diff options
author | Russell King <rmk+kernel@armlinux.org.uk> | 2017-04-05 18:43:03 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@armlinux.org.uk> | 2017-04-05 18:43:03 -0400 |
commit | 3872fe83a2fbb7366daa93ca533a22138e2d483e (patch) | |
tree | e9c7c7a4a09a551011286a866235594b76cb73af /drivers | |
parent | 3cc070c1c81948b33ebe2ea68cd39307ce2b312d (diff) | |
parent | 974310d047f3c7788a51d10c8d255eebdb1fa857 (diff) |
Merge branch 'kprobe-fixes' of https://git.linaro.org/people/tixy/kernel into fixes
Diffstat (limited to 'drivers')
102 files changed, 1592 insertions, 860 deletions
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 219b90bc0922..f15900132912 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
@@ -41,8 +41,10 @@ void acpi_gpe_apply_masked_gpes(void); | |||
41 | void acpi_container_init(void); | 41 | void acpi_container_init(void); |
42 | void acpi_memory_hotplug_init(void); | 42 | void acpi_memory_hotplug_init(void); |
43 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC | 43 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC |
44 | void pci_ioapic_remove(struct acpi_pci_root *root); | ||
44 | int acpi_ioapic_remove(struct acpi_pci_root *root); | 45 | int acpi_ioapic_remove(struct acpi_pci_root *root); |
45 | #else | 46 | #else |
47 | static inline void pci_ioapic_remove(struct acpi_pci_root *root) { return; } | ||
46 | static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; } | 48 | static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; } |
47 | #endif | 49 | #endif |
48 | #ifdef CONFIG_ACPI_DOCK | 50 | #ifdef CONFIG_ACPI_DOCK |
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c index 6d7ce6e12aaa..1120dfd625b8 100644 --- a/drivers/acpi/ioapic.c +++ b/drivers/acpi/ioapic.c | |||
@@ -206,24 +206,34 @@ int acpi_ioapic_add(acpi_handle root_handle) | |||
206 | return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV; | 206 | return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV; |
207 | } | 207 | } |
208 | 208 | ||
209 | int acpi_ioapic_remove(struct acpi_pci_root *root) | 209 | void pci_ioapic_remove(struct acpi_pci_root *root) |
210 | { | 210 | { |
211 | int retval = 0; | ||
212 | struct acpi_pci_ioapic *ioapic, *tmp; | 211 | struct acpi_pci_ioapic *ioapic, *tmp; |
213 | 212 | ||
214 | mutex_lock(&ioapic_list_lock); | 213 | mutex_lock(&ioapic_list_lock); |
215 | list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) { | 214 | list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) { |
216 | if (root->device->handle != ioapic->root_handle) | 215 | if (root->device->handle != ioapic->root_handle) |
217 | continue; | 216 | continue; |
218 | |||
219 | if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base)) | ||
220 | retval = -EBUSY; | ||
221 | |||
222 | if (ioapic->pdev) { | 217 | if (ioapic->pdev) { |
223 | pci_release_region(ioapic->pdev, 0); | 218 | pci_release_region(ioapic->pdev, 0); |
224 | pci_disable_device(ioapic->pdev); | 219 | pci_disable_device(ioapic->pdev); |
225 | pci_dev_put(ioapic->pdev); | 220 | pci_dev_put(ioapic->pdev); |
226 | } | 221 | } |
222 | } | ||
223 | mutex_unlock(&ioapic_list_lock); | ||
224 | } | ||
225 | |||
226 | int acpi_ioapic_remove(struct acpi_pci_root *root) | ||
227 | { | ||
228 | int retval = 0; | ||
229 | struct acpi_pci_ioapic *ioapic, *tmp; | ||
230 | |||
231 | mutex_lock(&ioapic_list_lock); | ||
232 | list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) { | ||
233 | if (root->device->handle != ioapic->root_handle) | ||
234 | continue; | ||
235 | if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base)) | ||
236 | retval = -EBUSY; | ||
227 | if (ioapic->res.flags && ioapic->res.parent) | 237 | if (ioapic->res.flags && ioapic->res.parent) |
228 | release_resource(&ioapic->res); | 238 | release_resource(&ioapic->res); |
229 | list_del(&ioapic->list); | 239 | list_del(&ioapic->list); |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index bf601d4df8cf..919be0aa2578 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -648,12 +648,12 @@ static void acpi_pci_root_remove(struct acpi_device *device) | |||
648 | 648 | ||
649 | pci_stop_root_bus(root->bus); | 649 | pci_stop_root_bus(root->bus); |
650 | 650 | ||
651 | WARN_ON(acpi_ioapic_remove(root)); | 651 | pci_ioapic_remove(root); |
652 | |||
653 | device_set_run_wake(root->bus->bridge, false); | 652 | device_set_run_wake(root->bus->bridge, false); |
654 | pci_acpi_remove_bus_pm_notifier(device); | 653 | pci_acpi_remove_bus_pm_notifier(device); |
655 | 654 | ||
656 | pci_remove_root_bus(root->bus); | 655 | pci_remove_root_bus(root->bus); |
656 | WARN_ON(acpi_ioapic_remove(root)); | ||
657 | 657 | ||
658 | dmar_device_remove(device->handle); | 658 | dmar_device_remove(device->handle); |
659 | 659 | ||
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 10aed84244f5..939641d6e262 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c | |||
@@ -50,7 +50,7 @@ | |||
50 | the slower the port i/o. In some cases, setting | 50 | the slower the port i/o. In some cases, setting |
51 | this to zero will speed up the device. (default -1) | 51 | this to zero will speed up the device. (default -1) |
52 | 52 | ||
53 | major You may use this parameter to overide the | 53 | major You may use this parameter to override the |
54 | default major number (46) that this driver | 54 | default major number (46) that this driver |
55 | will use. Be sure to change the device | 55 | will use. Be sure to change the device |
56 | name as well. | 56 | name as well. |
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 644ba0888bd4..9cfd2e06a649 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c | |||
@@ -61,7 +61,7 @@ | |||
61 | first drive found. | 61 | first drive found. |
62 | 62 | ||
63 | 63 | ||
64 | major You may use this parameter to overide the | 64 | major You may use this parameter to override the |
65 | default major number (45) that this driver | 65 | default major number (45) that this driver |
66 | will use. Be sure to change the device | 66 | will use. Be sure to change the device |
67 | name as well. | 67 | name as well. |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index ed93e8badf56..14c5d32f5d8b 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
@@ -59,7 +59,7 @@ | |||
59 | the slower the port i/o. In some cases, setting | 59 | the slower the port i/o. In some cases, setting |
60 | this to zero will speed up the device. (default -1) | 60 | this to zero will speed up the device. (default -1) |
61 | 61 | ||
62 | major You may use this parameter to overide the | 62 | major You may use this parameter to override the |
63 | default major number (47) that this driver | 63 | default major number (47) that this driver |
64 | will use. Be sure to change the device | 64 | will use. Be sure to change the device |
65 | name as well. | 65 | name as well. |
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index 5db955fe3a94..3b5882bfb736 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c | |||
@@ -84,7 +84,7 @@ | |||
84 | the slower the port i/o. In some cases, setting | 84 | the slower the port i/o. In some cases, setting |
85 | this to zero will speed up the device. (default -1) | 85 | this to zero will speed up the device. (default -1) |
86 | 86 | ||
87 | major You may use this parameter to overide the | 87 | major You may use this parameter to override the |
88 | default major number (97) that this driver | 88 | default major number (97) that this driver |
89 | will use. Be sure to change the device | 89 | will use. Be sure to change the device |
90 | name as well. | 90 | name as well. |
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c index 61fc6824299a..e815312a00ad 100644 --- a/drivers/block/paride/pt.c +++ b/drivers/block/paride/pt.c | |||
@@ -61,7 +61,7 @@ | |||
61 | the slower the port i/o. In some cases, setting | 61 | the slower the port i/o. In some cases, setting |
62 | this to zero will speed up the device. (default -1) | 62 | this to zero will speed up the device. (default -1) |
63 | 63 | ||
64 | major You may use this parameter to overide the | 64 | major You may use this parameter to override the |
65 | default major number (96) that this driver | 65 | default major number (96) that this driver |
66 | will use. Be sure to change the device | 66 | will use. Be sure to change the device |
67 | name as well. | 67 | name as well. |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4d6807723798..517838b65964 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -120,10 +120,11 @@ static int atomic_dec_return_safe(atomic_t *v) | |||
120 | 120 | ||
121 | /* Feature bits */ | 121 | /* Feature bits */ |
122 | 122 | ||
123 | #define RBD_FEATURE_LAYERING (1<<0) | 123 | #define RBD_FEATURE_LAYERING (1ULL<<0) |
124 | #define RBD_FEATURE_STRIPINGV2 (1<<1) | 124 | #define RBD_FEATURE_STRIPINGV2 (1ULL<<1) |
125 | #define RBD_FEATURE_EXCLUSIVE_LOCK (1<<2) | 125 | #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2) |
126 | #define RBD_FEATURE_DATA_POOL (1<<7) | 126 | #define RBD_FEATURE_DATA_POOL (1ULL<<7) |
127 | |||
127 | #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ | 128 | #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ |
128 | RBD_FEATURE_STRIPINGV2 | \ | 129 | RBD_FEATURE_STRIPINGV2 | \ |
129 | RBD_FEATURE_EXCLUSIVE_LOCK | \ | 130 | RBD_FEATURE_EXCLUSIVE_LOCK | \ |
@@ -499,16 +500,23 @@ static bool rbd_is_lock_owner(struct rbd_device *rbd_dev) | |||
499 | return is_lock_owner; | 500 | return is_lock_owner; |
500 | } | 501 | } |
501 | 502 | ||
503 | static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf) | ||
504 | { | ||
505 | return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED); | ||
506 | } | ||
507 | |||
502 | static BUS_ATTR(add, S_IWUSR, NULL, rbd_add); | 508 | static BUS_ATTR(add, S_IWUSR, NULL, rbd_add); |
503 | static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove); | 509 | static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove); |
504 | static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major); | 510 | static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major); |
505 | static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major); | 511 | static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major); |
512 | static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL); | ||
506 | 513 | ||
507 | static struct attribute *rbd_bus_attrs[] = { | 514 | static struct attribute *rbd_bus_attrs[] = { |
508 | &bus_attr_add.attr, | 515 | &bus_attr_add.attr, |
509 | &bus_attr_remove.attr, | 516 | &bus_attr_remove.attr, |
510 | &bus_attr_add_single_major.attr, | 517 | &bus_attr_add_single_major.attr, |
511 | &bus_attr_remove_single_major.attr, | 518 | &bus_attr_remove_single_major.attr, |
519 | &bus_attr_supported_features.attr, | ||
512 | NULL, | 520 | NULL, |
513 | }; | 521 | }; |
514 | 522 | ||
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index e27d89a36c34..dceb5edd1e54 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -1189,6 +1189,8 @@ static int zram_add(void) | |||
1189 | blk_queue_io_min(zram->disk->queue, PAGE_SIZE); | 1189 | blk_queue_io_min(zram->disk->queue, PAGE_SIZE); |
1190 | blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); | 1190 | blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); |
1191 | zram->disk->queue->limits.discard_granularity = PAGE_SIZE; | 1191 | zram->disk->queue->limits.discard_granularity = PAGE_SIZE; |
1192 | zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE; | ||
1193 | zram->disk->queue->limits.chunk_sectors = 0; | ||
1192 | blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); | 1194 | blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); |
1193 | /* | 1195 | /* |
1194 | * zram_bio_discard() will clear all logical blocks if logical block | 1196 | * zram_bio_discard() will clear all logical blocks if logical block |
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c index a5b1eb276c0b..e6d0d271c58c 100644 --- a/drivers/char/nwbutton.c +++ b/drivers/char/nwbutton.c | |||
@@ -6,7 +6,7 @@ | |||
6 | 6 | ||
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/sched.h> | 9 | #include <linux/sched/signal.h> |
10 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
11 | #include <linux/time.h> | 11 | #include <linux/time.h> |
12 | #include <linux/timer.h> | 12 | #include <linux/timer.h> |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 1ef26403bcc8..0ab024918907 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -313,13 +313,6 @@ static int random_read_wakeup_bits = 64; | |||
313 | static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; | 313 | static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; |
314 | 314 | ||
315 | /* | 315 | /* |
316 | * The minimum number of seconds between urandom pool reseeding. We | ||
317 | * do this to limit the amount of entropy that can be drained from the | ||
318 | * input pool even if there are heavy demands on /dev/urandom. | ||
319 | */ | ||
320 | static int random_min_urandom_seed = 60; | ||
321 | |||
322 | /* | ||
323 | * Originally, we used a primitive polynomial of degree .poolwords | 316 | * Originally, we used a primitive polynomial of degree .poolwords |
324 | * over GF(2). The taps for various sizes are defined below. They | 317 | * over GF(2). The taps for various sizes are defined below. They |
325 | * were chosen to be evenly spaced except for the last tap, which is 1 | 318 | * were chosen to be evenly spaced except for the last tap, which is 1 |
@@ -409,7 +402,6 @@ static struct poolinfo { | |||
409 | */ | 402 | */ |
410 | static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); | 403 | static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); |
411 | static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); | 404 | static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); |
412 | static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait); | ||
413 | static struct fasync_struct *fasync; | 405 | static struct fasync_struct *fasync; |
414 | 406 | ||
415 | static DEFINE_SPINLOCK(random_ready_list_lock); | 407 | static DEFINE_SPINLOCK(random_ready_list_lock); |
@@ -467,7 +459,6 @@ struct entropy_store { | |||
467 | int entropy_count; | 459 | int entropy_count; |
468 | int entropy_total; | 460 | int entropy_total; |
469 | unsigned int initialized:1; | 461 | unsigned int initialized:1; |
470 | unsigned int limit:1; | ||
471 | unsigned int last_data_init:1; | 462 | unsigned int last_data_init:1; |
472 | __u8 last_data[EXTRACT_SIZE]; | 463 | __u8 last_data[EXTRACT_SIZE]; |
473 | }; | 464 | }; |
@@ -485,7 +476,6 @@ static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy; | |||
485 | static struct entropy_store input_pool = { | 476 | static struct entropy_store input_pool = { |
486 | .poolinfo = &poolinfo_table[0], | 477 | .poolinfo = &poolinfo_table[0], |
487 | .name = "input", | 478 | .name = "input", |
488 | .limit = 1, | ||
489 | .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), | 479 | .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), |
490 | .pool = input_pool_data | 480 | .pool = input_pool_data |
491 | }; | 481 | }; |
@@ -493,7 +483,6 @@ static struct entropy_store input_pool = { | |||
493 | static struct entropy_store blocking_pool = { | 483 | static struct entropy_store blocking_pool = { |
494 | .poolinfo = &poolinfo_table[1], | 484 | .poolinfo = &poolinfo_table[1], |
495 | .name = "blocking", | 485 | .name = "blocking", |
496 | .limit = 1, | ||
497 | .pull = &input_pool, | 486 | .pull = &input_pool, |
498 | .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock), | 487 | .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock), |
499 | .pool = blocking_pool_data, | 488 | .pool = blocking_pool_data, |
@@ -855,13 +844,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) | |||
855 | spin_unlock_irqrestore(&primary_crng.lock, flags); | 844 | spin_unlock_irqrestore(&primary_crng.lock, flags); |
856 | } | 845 | } |
857 | 846 | ||
858 | static inline void maybe_reseed_primary_crng(void) | ||
859 | { | ||
860 | if (crng_init > 2 && | ||
861 | time_after(jiffies, primary_crng.init_time + CRNG_RESEED_INTERVAL)) | ||
862 | crng_reseed(&primary_crng, &input_pool); | ||
863 | } | ||
864 | |||
865 | static inline void crng_wait_ready(void) | 847 | static inline void crng_wait_ready(void) |
866 | { | 848 | { |
867 | wait_event_interruptible(crng_init_wait, crng_ready()); | 849 | wait_event_interruptible(crng_init_wait, crng_ready()); |
@@ -1220,15 +1202,6 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) | |||
1220 | r->entropy_count > r->poolinfo->poolfracbits) | 1202 | r->entropy_count > r->poolinfo->poolfracbits) |
1221 | return; | 1203 | return; |
1222 | 1204 | ||
1223 | if (r->limit == 0 && random_min_urandom_seed) { | ||
1224 | unsigned long now = jiffies; | ||
1225 | |||
1226 | if (time_before(now, | ||
1227 | r->last_pulled + random_min_urandom_seed * HZ)) | ||
1228 | return; | ||
1229 | r->last_pulled = now; | ||
1230 | } | ||
1231 | |||
1232 | _xfer_secondary_pool(r, nbytes); | 1205 | _xfer_secondary_pool(r, nbytes); |
1233 | } | 1206 | } |
1234 | 1207 | ||
@@ -1236,8 +1209,6 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes) | |||
1236 | { | 1209 | { |
1237 | __u32 tmp[OUTPUT_POOL_WORDS]; | 1210 | __u32 tmp[OUTPUT_POOL_WORDS]; |
1238 | 1211 | ||
1239 | /* For /dev/random's pool, always leave two wakeups' worth */ | ||
1240 | int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4; | ||
1241 | int bytes = nbytes; | 1212 | int bytes = nbytes; |
1242 | 1213 | ||
1243 | /* pull at least as much as a wakeup */ | 1214 | /* pull at least as much as a wakeup */ |
@@ -1248,7 +1219,7 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes) | |||
1248 | trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8, | 1219 | trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8, |
1249 | ENTROPY_BITS(r), ENTROPY_BITS(r->pull)); | 1220 | ENTROPY_BITS(r), ENTROPY_BITS(r->pull)); |
1250 | bytes = extract_entropy(r->pull, tmp, bytes, | 1221 | bytes = extract_entropy(r->pull, tmp, bytes, |
1251 | random_read_wakeup_bits / 8, rsvd_bytes); | 1222 | random_read_wakeup_bits / 8, 0); |
1252 | mix_pool_bytes(r, tmp, bytes); | 1223 | mix_pool_bytes(r, tmp, bytes); |
1253 | credit_entropy_bits(r, bytes*8); | 1224 | credit_entropy_bits(r, bytes*8); |
1254 | } | 1225 | } |
@@ -1276,7 +1247,7 @@ static void push_to_pool(struct work_struct *work) | |||
1276 | static size_t account(struct entropy_store *r, size_t nbytes, int min, | 1247 | static size_t account(struct entropy_store *r, size_t nbytes, int min, |
1277 | int reserved) | 1248 | int reserved) |
1278 | { | 1249 | { |
1279 | int entropy_count, orig; | 1250 | int entropy_count, orig, have_bytes; |
1280 | size_t ibytes, nfrac; | 1251 | size_t ibytes, nfrac; |
1281 | 1252 | ||
1282 | BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); | 1253 | BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); |
@@ -1285,14 +1256,12 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, | |||
1285 | retry: | 1256 | retry: |
1286 | entropy_count = orig = ACCESS_ONCE(r->entropy_count); | 1257 | entropy_count = orig = ACCESS_ONCE(r->entropy_count); |
1287 | ibytes = nbytes; | 1258 | ibytes = nbytes; |
1288 | /* If limited, never pull more than available */ | 1259 | /* never pull more than available */ |
1289 | if (r->limit) { | 1260 | have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); |
1290 | int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); | ||
1291 | 1261 | ||
1292 | if ((have_bytes -= reserved) < 0) | 1262 | if ((have_bytes -= reserved) < 0) |
1293 | have_bytes = 0; | 1263 | have_bytes = 0; |
1294 | ibytes = min_t(size_t, ibytes, have_bytes); | 1264 | ibytes = min_t(size_t, ibytes, have_bytes); |
1295 | } | ||
1296 | if (ibytes < min) | 1265 | if (ibytes < min) |
1297 | ibytes = 0; | 1266 | ibytes = 0; |
1298 | 1267 | ||
@@ -1912,6 +1881,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, | |||
1912 | static int min_read_thresh = 8, min_write_thresh; | 1881 | static int min_read_thresh = 8, min_write_thresh; |
1913 | static int max_read_thresh = OUTPUT_POOL_WORDS * 32; | 1882 | static int max_read_thresh = OUTPUT_POOL_WORDS * 32; |
1914 | static int max_write_thresh = INPUT_POOL_WORDS * 32; | 1883 | static int max_write_thresh = INPUT_POOL_WORDS * 32; |
1884 | static int random_min_urandom_seed = 60; | ||
1915 | static char sysctl_bootid[16]; | 1885 | static char sysctl_bootid[16]; |
1916 | 1886 | ||
1917 | /* | 1887 | /* |
@@ -2042,63 +2012,64 @@ struct ctl_table random_table[] = { | |||
2042 | }; | 2012 | }; |
2043 | #endif /* CONFIG_SYSCTL */ | 2013 | #endif /* CONFIG_SYSCTL */ |
2044 | 2014 | ||
2045 | static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; | 2015 | struct batched_entropy { |
2046 | 2016 | union { | |
2047 | int random_int_secret_init(void) | 2017 | u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)]; |
2048 | { | 2018 | u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)]; |
2049 | get_random_bytes(random_int_secret, sizeof(random_int_secret)); | 2019 | }; |
2050 | return 0; | 2020 | unsigned int position; |
2051 | } | 2021 | }; |
2052 | |||
2053 | static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash) | ||
2054 | __aligned(sizeof(unsigned long)); | ||
2055 | 2022 | ||
2056 | /* | 2023 | /* |
2057 | * Get a random word for internal kernel use only. Similar to urandom but | 2024 | * Get a random word for internal kernel use only. The quality of the random |
2058 | * with the goal of minimal entropy pool depletion. As a result, the random | 2025 | * number is either as good as RDRAND or as good as /dev/urandom, with the |
2059 | * value is not cryptographically secure but for several uses the cost of | 2026 | * goal of being quite fast and not depleting entropy. |
2060 | * depleting entropy is too high | ||
2061 | */ | 2027 | */ |
2062 | unsigned int get_random_int(void) | 2028 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); |
2029 | u64 get_random_u64(void) | ||
2063 | { | 2030 | { |
2064 | __u32 *hash; | 2031 | u64 ret; |
2065 | unsigned int ret; | 2032 | struct batched_entropy *batch; |
2066 | 2033 | ||
2067 | if (arch_get_random_int(&ret)) | 2034 | #if BITS_PER_LONG == 64 |
2035 | if (arch_get_random_long((unsigned long *)&ret)) | ||
2068 | return ret; | 2036 | return ret; |
2037 | #else | ||
2038 | if (arch_get_random_long((unsigned long *)&ret) && | ||
2039 | arch_get_random_long((unsigned long *)&ret + 1)) | ||
2040 | return ret; | ||
2041 | #endif | ||
2069 | 2042 | ||
2070 | hash = get_cpu_var(get_random_int_hash); | 2043 | batch = &get_cpu_var(batched_entropy_u64); |
2071 | 2044 | if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { | |
2072 | hash[0] += current->pid + jiffies + random_get_entropy(); | 2045 | extract_crng((u8 *)batch->entropy_u64); |
2073 | md5_transform(hash, random_int_secret); | 2046 | batch->position = 0; |
2074 | ret = hash[0]; | 2047 | } |
2075 | put_cpu_var(get_random_int_hash); | 2048 | ret = batch->entropy_u64[batch->position++]; |
2076 | 2049 | put_cpu_var(batched_entropy_u64); | |
2077 | return ret; | 2050 | return ret; |
2078 | } | 2051 | } |
2079 | EXPORT_SYMBOL(get_random_int); | 2052 | EXPORT_SYMBOL(get_random_u64); |
2080 | 2053 | ||
2081 | /* | 2054 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); |
2082 | * Same as get_random_int(), but returns unsigned long. | 2055 | u32 get_random_u32(void) |
2083 | */ | ||
2084 | unsigned long get_random_long(void) | ||
2085 | { | 2056 | { |
2086 | __u32 *hash; | 2057 | u32 ret; |
2087 | unsigned long ret; | 2058 | struct batched_entropy *batch; |
2088 | 2059 | ||
2089 | if (arch_get_random_long(&ret)) | 2060 | if (arch_get_random_int(&ret)) |
2090 | return ret; | 2061 | return ret; |
2091 | 2062 | ||
2092 | hash = get_cpu_var(get_random_int_hash); | 2063 | batch = &get_cpu_var(batched_entropy_u32); |
2093 | 2064 | if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { | |
2094 | hash[0] += current->pid + jiffies + random_get_entropy(); | 2065 | extract_crng((u8 *)batch->entropy_u32); |
2095 | md5_transform(hash, random_int_secret); | 2066 | batch->position = 0; |
2096 | ret = *(unsigned long *)hash; | 2067 | } |
2097 | put_cpu_var(get_random_int_hash); | 2068 | ret = batch->entropy_u32[batch->position++]; |
2098 | 2069 | put_cpu_var(batched_entropy_u32); | |
2099 | return ret; | 2070 | return ret; |
2100 | } | 2071 | } |
2101 | EXPORT_SYMBOL(get_random_long); | 2072 | EXPORT_SYMBOL(get_random_u32); |
2102 | 2073 | ||
2103 | /** | 2074 | /** |
2104 | * randomize_page - Generate a random, page aligned address | 2075 | * randomize_page - Generate a random, page aligned address |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index a47543281864..38b9fdf854a4 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -2532,4 +2532,5 @@ static int __init cpufreq_core_init(void) | |||
2532 | 2532 | ||
2533 | return 0; | 2533 | return 0; |
2534 | } | 2534 | } |
2535 | module_param(off, int, 0444); | ||
2535 | core_initcall(cpufreq_core_init); | 2536 | core_initcall(cpufreq_core_init); |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b1fbaa30ae04..3d37219a0dd7 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -377,6 +377,7 @@ static void intel_pstate_set_performance_limits(struct perf_limits *limits) | |||
377 | intel_pstate_init_limits(limits); | 377 | intel_pstate_init_limits(limits); |
378 | limits->min_perf_pct = 100; | 378 | limits->min_perf_pct = 100; |
379 | limits->min_perf = int_ext_tofp(1); | 379 | limits->min_perf = int_ext_tofp(1); |
380 | limits->min_sysfs_pct = 100; | ||
380 | } | 381 | } |
381 | 382 | ||
382 | static DEFINE_MUTEX(intel_pstate_driver_lock); | 383 | static DEFINE_MUTEX(intel_pstate_driver_lock); |
@@ -968,11 +969,20 @@ static int intel_pstate_resume(struct cpufreq_policy *policy) | |||
968 | } | 969 | } |
969 | 970 | ||
970 | static void intel_pstate_update_policies(void) | 971 | static void intel_pstate_update_policies(void) |
972 | __releases(&intel_pstate_limits_lock) | ||
973 | __acquires(&intel_pstate_limits_lock) | ||
971 | { | 974 | { |
975 | struct perf_limits *saved_limits = limits; | ||
972 | int cpu; | 976 | int cpu; |
973 | 977 | ||
978 | mutex_unlock(&intel_pstate_limits_lock); | ||
979 | |||
974 | for_each_possible_cpu(cpu) | 980 | for_each_possible_cpu(cpu) |
975 | cpufreq_update_policy(cpu); | 981 | cpufreq_update_policy(cpu); |
982 | |||
983 | mutex_lock(&intel_pstate_limits_lock); | ||
984 | |||
985 | limits = saved_limits; | ||
976 | } | 986 | } |
977 | 987 | ||
978 | /************************** debugfs begin ************************/ | 988 | /************************** debugfs begin ************************/ |
@@ -1180,10 +1190,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, | |||
1180 | 1190 | ||
1181 | limits->no_turbo = clamp_t(int, input, 0, 1); | 1191 | limits->no_turbo = clamp_t(int, input, 0, 1); |
1182 | 1192 | ||
1183 | mutex_unlock(&intel_pstate_limits_lock); | ||
1184 | |||
1185 | intel_pstate_update_policies(); | 1193 | intel_pstate_update_policies(); |
1186 | 1194 | ||
1195 | mutex_unlock(&intel_pstate_limits_lock); | ||
1196 | |||
1187 | mutex_unlock(&intel_pstate_driver_lock); | 1197 | mutex_unlock(&intel_pstate_driver_lock); |
1188 | 1198 | ||
1189 | return count; | 1199 | return count; |
@@ -1217,10 +1227,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, | |||
1217 | limits->max_perf_pct); | 1227 | limits->max_perf_pct); |
1218 | limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); | 1228 | limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); |
1219 | 1229 | ||
1220 | mutex_unlock(&intel_pstate_limits_lock); | ||
1221 | |||
1222 | intel_pstate_update_policies(); | 1230 | intel_pstate_update_policies(); |
1223 | 1231 | ||
1232 | mutex_unlock(&intel_pstate_limits_lock); | ||
1233 | |||
1224 | mutex_unlock(&intel_pstate_driver_lock); | 1234 | mutex_unlock(&intel_pstate_driver_lock); |
1225 | 1235 | ||
1226 | return count; | 1236 | return count; |
@@ -1254,10 +1264,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, | |||
1254 | limits->min_perf_pct); | 1264 | limits->min_perf_pct); |
1255 | limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); | 1265 | limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); |
1256 | 1266 | ||
1257 | mutex_unlock(&intel_pstate_limits_lock); | ||
1258 | |||
1259 | intel_pstate_update_policies(); | 1267 | intel_pstate_update_policies(); |
1260 | 1268 | ||
1269 | mutex_unlock(&intel_pstate_limits_lock); | ||
1270 | |||
1261 | mutex_unlock(&intel_pstate_driver_lock); | 1271 | mutex_unlock(&intel_pstate_driver_lock); |
1262 | 1272 | ||
1263 | return count; | 1273 | return count; |
@@ -1874,13 +1884,11 @@ static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) | |||
1874 | 1884 | ||
1875 | intel_pstate_get_min_max(cpu, &min_perf, &max_perf); | 1885 | intel_pstate_get_min_max(cpu, &min_perf, &max_perf); |
1876 | pstate = clamp_t(int, pstate, min_perf, max_perf); | 1886 | pstate = clamp_t(int, pstate, min_perf, max_perf); |
1877 | trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); | ||
1878 | return pstate; | 1887 | return pstate; |
1879 | } | 1888 | } |
1880 | 1889 | ||
1881 | static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) | 1890 | static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) |
1882 | { | 1891 | { |
1883 | pstate = intel_pstate_prepare_request(cpu, pstate); | ||
1884 | if (pstate == cpu->pstate.current_pstate) | 1892 | if (pstate == cpu->pstate.current_pstate) |
1885 | return; | 1893 | return; |
1886 | 1894 | ||
@@ -1900,6 +1908,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) | |||
1900 | 1908 | ||
1901 | update_turbo_state(); | 1909 | update_turbo_state(); |
1902 | 1910 | ||
1911 | target_pstate = intel_pstate_prepare_request(cpu, target_pstate); | ||
1912 | trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); | ||
1903 | intel_pstate_update_pstate(cpu, target_pstate); | 1913 | intel_pstate_update_pstate(cpu, target_pstate); |
1904 | 1914 | ||
1905 | sample = &cpu->sample; | 1915 | sample = &cpu->sample; |
@@ -2132,16 +2142,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
2132 | mutex_lock(&intel_pstate_limits_lock); | 2142 | mutex_lock(&intel_pstate_limits_lock); |
2133 | 2143 | ||
2134 | if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { | 2144 | if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { |
2145 | pr_debug("set performance\n"); | ||
2135 | if (!perf_limits) { | 2146 | if (!perf_limits) { |
2136 | limits = &performance_limits; | 2147 | limits = &performance_limits; |
2137 | perf_limits = limits; | 2148 | perf_limits = limits; |
2138 | } | 2149 | } |
2139 | if (policy->max >= policy->cpuinfo.max_freq && | ||
2140 | !limits->no_turbo) { | ||
2141 | pr_debug("set performance\n"); | ||
2142 | intel_pstate_set_performance_limits(perf_limits); | ||
2143 | goto out; | ||
2144 | } | ||
2145 | } else { | 2150 | } else { |
2146 | pr_debug("set powersave\n"); | 2151 | pr_debug("set powersave\n"); |
2147 | if (!perf_limits) { | 2152 | if (!perf_limits) { |
@@ -2152,7 +2157,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
2152 | } | 2157 | } |
2153 | 2158 | ||
2154 | intel_pstate_update_perf_limits(policy, perf_limits); | 2159 | intel_pstate_update_perf_limits(policy, perf_limits); |
2155 | out: | 2160 | |
2156 | if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { | 2161 | if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { |
2157 | /* | 2162 | /* |
2158 | * NOHZ_FULL CPUs need this as the governor callback may not | 2163 | * NOHZ_FULL CPUs need this as the governor callback may not |
@@ -2198,9 +2203,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy) | |||
2198 | unsigned int max_freq, min_freq; | 2203 | unsigned int max_freq, min_freq; |
2199 | 2204 | ||
2200 | max_freq = policy->cpuinfo.max_freq * | 2205 | max_freq = policy->cpuinfo.max_freq * |
2201 | limits->max_sysfs_pct / 100; | 2206 | perf_limits->max_sysfs_pct / 100; |
2202 | min_freq = policy->cpuinfo.max_freq * | 2207 | min_freq = policy->cpuinfo.max_freq * |
2203 | limits->min_sysfs_pct / 100; | 2208 | perf_limits->min_sysfs_pct / 100; |
2204 | cpufreq_verify_within_limits(policy, min_freq, max_freq); | 2209 | cpufreq_verify_within_limits(policy, min_freq, max_freq); |
2205 | } | 2210 | } |
2206 | 2211 | ||
@@ -2243,13 +2248,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) | |||
2243 | 2248 | ||
2244 | cpu = all_cpu_data[policy->cpu]; | 2249 | cpu = all_cpu_data[policy->cpu]; |
2245 | 2250 | ||
2246 | /* | ||
2247 | * We need sane value in the cpu->perf_limits, so inherit from global | ||
2248 | * perf_limits limits, which are seeded with values based on the | ||
2249 | * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up. | ||
2250 | */ | ||
2251 | if (per_cpu_limits) | 2251 | if (per_cpu_limits) |
2252 | memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits)); | 2252 | intel_pstate_init_limits(cpu->perf_limits); |
2253 | 2253 | ||
2254 | policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; | 2254 | policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; |
2255 | policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; | 2255 | policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; |
@@ -2301,7 +2301,6 @@ static struct cpufreq_driver intel_pstate = { | |||
2301 | static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) | 2301 | static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) |
2302 | { | 2302 | { |
2303 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | 2303 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
2304 | struct perf_limits *perf_limits = limits; | ||
2305 | 2304 | ||
2306 | update_turbo_state(); | 2305 | update_turbo_state(); |
2307 | policy->cpuinfo.max_freq = limits->turbo_disabled ? | 2306 | policy->cpuinfo.max_freq = limits->turbo_disabled ? |
@@ -2309,15 +2308,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) | |||
2309 | 2308 | ||
2310 | cpufreq_verify_within_cpu_limits(policy); | 2309 | cpufreq_verify_within_cpu_limits(policy); |
2311 | 2310 | ||
2312 | if (per_cpu_limits) | ||
2313 | perf_limits = cpu->perf_limits; | ||
2314 | |||
2315 | mutex_lock(&intel_pstate_limits_lock); | ||
2316 | |||
2317 | intel_pstate_update_perf_limits(policy, perf_limits); | ||
2318 | |||
2319 | mutex_unlock(&intel_pstate_limits_lock); | ||
2320 | |||
2321 | return 0; | 2311 | return 0; |
2322 | } | 2312 | } |
2323 | 2313 | ||
@@ -2370,6 +2360,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, | |||
2370 | wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, | 2360 | wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, |
2371 | pstate_funcs.get_val(cpu, target_pstate)); | 2361 | pstate_funcs.get_val(cpu, target_pstate)); |
2372 | } | 2362 | } |
2363 | freqs.new = target_pstate * cpu->pstate.scaling; | ||
2373 | cpufreq_freq_transition_end(policy, &freqs, false); | 2364 | cpufreq_freq_transition_end(policy, &freqs, false); |
2374 | 2365 | ||
2375 | return 0; | 2366 | return 0; |
@@ -2383,8 +2374,9 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, | |||
2383 | 2374 | ||
2384 | target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq); | 2375 | target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq); |
2385 | target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); | 2376 | target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); |
2377 | target_pstate = intel_pstate_prepare_request(cpu, target_pstate); | ||
2386 | intel_pstate_update_pstate(cpu, target_pstate); | 2378 | intel_pstate_update_pstate(cpu, target_pstate); |
2387 | return target_freq; | 2379 | return target_pstate * cpu->pstate.scaling; |
2388 | } | 2380 | } |
2389 | 2381 | ||
2390 | static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) | 2382 | static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) |
@@ -2437,8 +2429,11 @@ static int intel_pstate_register_driver(void) | |||
2437 | 2429 | ||
2438 | intel_pstate_init_limits(&powersave_limits); | 2430 | intel_pstate_init_limits(&powersave_limits); |
2439 | intel_pstate_set_performance_limits(&performance_limits); | 2431 | intel_pstate_set_performance_limits(&performance_limits); |
2440 | limits = IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) ? | 2432 | if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) && |
2441 | &performance_limits : &powersave_limits; | 2433 | intel_pstate_driver == &intel_pstate) |
2434 | limits = &performance_limits; | ||
2435 | else | ||
2436 | limits = &powersave_limits; | ||
2442 | 2437 | ||
2443 | ret = cpufreq_register_driver(intel_pstate_driver); | 2438 | ret = cpufreq_register_driver(intel_pstate_driver); |
2444 | if (ret) { | 2439 | if (ret) { |
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index 43a0c8a26ab0..00a16ab601cb 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c | |||
@@ -82,7 +82,7 @@ void cryp_activity(struct cryp_device_data *device_data, | |||
82 | void cryp_flush_inoutfifo(struct cryp_device_data *device_data) | 82 | void cryp_flush_inoutfifo(struct cryp_device_data *device_data) |
83 | { | 83 | { |
84 | /* | 84 | /* |
85 | * We always need to disble the hardware before trying to flush the | 85 | * We always need to disable the hardware before trying to flush the |
86 | * FIFO. This is something that isn't written in the design | 86 | * FIFO. This is something that isn't written in the design |
87 | * specification, but we have been informed by the hardware designers | 87 | * specification, but we have been informed by the hardware designers |
88 | * that this must be done. | 88 | * that this must be done. |
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 349dc3e1e52e..974c5a31a005 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c | |||
@@ -65,6 +65,7 @@ static bool __init efi_virtmap_init(void) | |||
65 | bool systab_found; | 65 | bool systab_found; |
66 | 66 | ||
67 | efi_mm.pgd = pgd_alloc(&efi_mm); | 67 | efi_mm.pgd = pgd_alloc(&efi_mm); |
68 | mm_init_cpumask(&efi_mm); | ||
68 | init_new_context(NULL, &efi_mm); | 69 | init_new_context(NULL, &efi_mm); |
69 | 70 | ||
70 | systab_found = false; | 71 | systab_found = false; |
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c index 6def402bf569..5da36e56b36a 100644 --- a/drivers/firmware/efi/libstub/secureboot.c +++ b/drivers/firmware/efi/libstub/secureboot.c | |||
@@ -45,6 +45,8 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg) | |||
45 | size = sizeof(secboot); | 45 | size = sizeof(secboot); |
46 | status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid, | 46 | status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid, |
47 | NULL, &size, &secboot); | 47 | NULL, &size, &secboot); |
48 | if (status == EFI_NOT_FOUND) | ||
49 | return efi_secureboot_mode_disabled; | ||
48 | if (status != EFI_SUCCESS) | 50 | if (status != EFI_SUCCESS) |
49 | goto out_efi_err; | 51 | goto out_efi_err; |
50 | 52 | ||
@@ -78,7 +80,5 @@ secure_boot_enabled: | |||
78 | 80 | ||
79 | out_efi_err: | 81 | out_efi_err: |
80 | pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n"); | 82 | pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n"); |
81 | if (status == EFI_NOT_FOUND) | ||
82 | return efi_secureboot_mode_disabled; | ||
83 | return efi_secureboot_mode_unknown; | 83 | return efi_secureboot_mode_unknown; |
84 | } | 84 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6abb238b25c9..4120b351a8e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -2094,8 +2094,11 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) | |||
2094 | } | 2094 | } |
2095 | 2095 | ||
2096 | r = amdgpu_late_init(adev); | 2096 | r = amdgpu_late_init(adev); |
2097 | if (r) | 2097 | if (r) { |
2098 | if (fbcon) | ||
2099 | console_unlock(); | ||
2098 | return r; | 2100 | return r; |
2101 | } | ||
2099 | 2102 | ||
2100 | /* pin cursors */ | 2103 | /* pin cursors */ |
2101 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 2104 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 75fc376ba735..f7adbace428a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
@@ -59,9 +59,10 @@ | |||
59 | * - 3.7.0 - Add support for VCE clock list packet | 59 | * - 3.7.0 - Add support for VCE clock list packet |
60 | * - 3.8.0 - Add support raster config init in the kernel | 60 | * - 3.8.0 - Add support raster config init in the kernel |
61 | * - 3.9.0 - Add support for memory query info about VRAM and GTT. | 61 | * - 3.9.0 - Add support for memory query info about VRAM and GTT. |
62 | * - 3.10.0 - Add support for new fences ioctl, new gem ioctl flags | ||
62 | */ | 63 | */ |
63 | #define KMS_DRIVER_MAJOR 3 | 64 | #define KMS_DRIVER_MAJOR 3 |
64 | #define KMS_DRIVER_MINOR 9 | 65 | #define KMS_DRIVER_MINOR 10 |
65 | #define KMS_DRIVER_PATCHLEVEL 0 | 66 | #define KMS_DRIVER_PATCHLEVEL 0 |
66 | 67 | ||
67 | int amdgpu_vram_limit = 0; | 68 | int amdgpu_vram_limit = 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 51d759463384..106cf83c2e6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -202,6 +202,27 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, | |||
202 | bool kernel = false; | 202 | bool kernel = false; |
203 | int r; | 203 | int r; |
204 | 204 | ||
205 | /* reject invalid gem flags */ | ||
206 | if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | ||
207 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | | ||
208 | AMDGPU_GEM_CREATE_CPU_GTT_USWC | | ||
209 | AMDGPU_GEM_CREATE_VRAM_CLEARED| | ||
210 | AMDGPU_GEM_CREATE_SHADOW | | ||
211 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { | ||
212 | r = -EINVAL; | ||
213 | goto error_unlock; | ||
214 | } | ||
215 | /* reject invalid gem domains */ | ||
216 | if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU | | ||
217 | AMDGPU_GEM_DOMAIN_GTT | | ||
218 | AMDGPU_GEM_DOMAIN_VRAM | | ||
219 | AMDGPU_GEM_DOMAIN_GDS | | ||
220 | AMDGPU_GEM_DOMAIN_GWS | | ||
221 | AMDGPU_GEM_DOMAIN_OA)) { | ||
222 | r = -EINVAL; | ||
223 | goto error_unlock; | ||
224 | } | ||
225 | |||
205 | /* create a gem object to contain this object in */ | 226 | /* create a gem object to contain this object in */ |
206 | if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | | 227 | if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | |
207 | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { | 228 | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 31375bdde6f1..011800f621c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
@@ -788,7 +788,7 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) | |||
788 | } | 788 | } |
789 | } | 789 | } |
790 | 790 | ||
791 | /* disble sdma engine before programing it */ | 791 | /* disable sdma engine before programing it */ |
792 | sdma_v3_0_ctx_switch_enable(adev, false); | 792 | sdma_v3_0_ctx_switch_enable(adev, false); |
793 | sdma_v3_0_enable(adev, false); | 793 | sdma_v3_0_enable(adev, false); |
794 | 794 | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c8baab9bee0d..ba58f1b11d1e 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -148,6 +148,9 @@ static const struct edid_quirk { | |||
148 | 148 | ||
149 | /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ | 149 | /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ |
150 | { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, | 150 | { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, |
151 | |||
152 | /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/ | ||
153 | { "ETR", 13896, EDID_QUIRK_FORCE_8BPC }, | ||
151 | }; | 154 | }; |
152 | 155 | ||
153 | /* | 156 | /* |
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 4a6a2ed65732..b7d7721e72fa 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c | |||
@@ -41,6 +41,54 @@ enum { | |||
41 | INTEL_GVT_PCI_BAR_MAX, | 41 | INTEL_GVT_PCI_BAR_MAX, |
42 | }; | 42 | }; |
43 | 43 | ||
44 | /* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one | ||
45 | * byte) byte by byte in standard pci configuration space. (not the full | ||
46 | * 256 bytes.) | ||
47 | */ | ||
48 | static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = { | ||
49 | [PCI_COMMAND] = 0xff, 0x07, | ||
50 | [PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */ | ||
51 | [PCI_CACHE_LINE_SIZE] = 0xff, | ||
52 | [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff, | ||
53 | [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff, | ||
54 | [PCI_INTERRUPT_LINE] = 0xff, | ||
55 | }; | ||
56 | |||
57 | /** | ||
58 | * vgpu_pci_cfg_mem_write - write virtual cfg space memory | ||
59 | * | ||
60 | * Use this function to write virtual cfg space memory. | ||
61 | * For standard cfg space, only RW bits can be changed, | ||
62 | * and we emulates the RW1C behavior of PCI_STATUS register. | ||
63 | */ | ||
64 | static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, | ||
65 | u8 *src, unsigned int bytes) | ||
66 | { | ||
67 | u8 *cfg_base = vgpu_cfg_space(vgpu); | ||
68 | u8 mask, new, old; | ||
69 | int i = 0; | ||
70 | |||
71 | for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) { | ||
72 | mask = pci_cfg_space_rw_bmp[off + i]; | ||
73 | old = cfg_base[off + i]; | ||
74 | new = src[i] & mask; | ||
75 | |||
76 | /** | ||
77 | * The PCI_STATUS high byte has RW1C bits, here | ||
78 | * emulates clear by writing 1 for these bits. | ||
79 | * Writing a 0b to RW1C bits has no effect. | ||
80 | */ | ||
81 | if (off + i == PCI_STATUS + 1) | ||
82 | new = (~new & old) & mask; | ||
83 | |||
84 | cfg_base[off + i] = (old & ~mask) | new; | ||
85 | } | ||
86 | |||
87 | /* For other configuration space directly copy as it is. */ | ||
88 | if (i < bytes) | ||
89 | memcpy(cfg_base + off + i, src + i, bytes - i); | ||
90 | } | ||
91 | |||
44 | /** | 92 | /** |
45 | * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read | 93 | * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read |
46 | * | 94 | * |
@@ -123,7 +171,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, | |||
123 | u8 changed = old ^ new; | 171 | u8 changed = old ^ new; |
124 | int ret; | 172 | int ret; |
125 | 173 | ||
126 | memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); | 174 | vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); |
127 | if (!(changed & PCI_COMMAND_MEMORY)) | 175 | if (!(changed & PCI_COMMAND_MEMORY)) |
128 | return 0; | 176 | return 0; |
129 | 177 | ||
@@ -237,6 +285,9 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
237 | { | 285 | { |
238 | int ret; | 286 | int ret; |
239 | 287 | ||
288 | if (vgpu->failsafe) | ||
289 | return 0; | ||
290 | |||
240 | if (WARN_ON(bytes > 4)) | 291 | if (WARN_ON(bytes > 4)) |
241 | return -EINVAL; | 292 | return -EINVAL; |
242 | 293 | ||
@@ -274,10 +325,10 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
274 | if (ret) | 325 | if (ret) |
275 | return ret; | 326 | return ret; |
276 | 327 | ||
277 | memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); | 328 | vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); |
278 | break; | 329 | break; |
279 | default: | 330 | default: |
280 | memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); | 331 | vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); |
281 | break; | 332 | break; |
282 | } | 333 | } |
283 | return 0; | 334 | return 0; |
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index b9c8e2407682..7ae6e2b241c8 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
@@ -668,7 +668,7 @@ static inline void print_opcode(u32 cmd, int ring_id) | |||
668 | if (d_info == NULL) | 668 | if (d_info == NULL) |
669 | return; | 669 | return; |
670 | 670 | ||
671 | gvt_err("opcode=0x%x %s sub_ops:", | 671 | gvt_dbg_cmd("opcode=0x%x %s sub_ops:", |
672 | cmd >> (32 - d_info->op_len), d_info->name); | 672 | cmd >> (32 - d_info->op_len), d_info->name); |
673 | 673 | ||
674 | for (i = 0; i < d_info->nr_sub_op; i++) | 674 | for (i = 0; i < d_info->nr_sub_op; i++) |
@@ -693,23 +693,23 @@ static void parser_exec_state_dump(struct parser_exec_state *s) | |||
693 | int cnt = 0; | 693 | int cnt = 0; |
694 | int i; | 694 | int i; |
695 | 695 | ||
696 | gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)" | 696 | gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)" |
697 | " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id, | 697 | " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id, |
698 | s->ring_id, s->ring_start, s->ring_start + s->ring_size, | 698 | s->ring_id, s->ring_start, s->ring_start + s->ring_size, |
699 | s->ring_head, s->ring_tail); | 699 | s->ring_head, s->ring_tail); |
700 | 700 | ||
701 | gvt_err(" %s %s ip_gma(%08lx) ", | 701 | gvt_dbg_cmd(" %s %s ip_gma(%08lx) ", |
702 | s->buf_type == RING_BUFFER_INSTRUCTION ? | 702 | s->buf_type == RING_BUFFER_INSTRUCTION ? |
703 | "RING_BUFFER" : "BATCH_BUFFER", | 703 | "RING_BUFFER" : "BATCH_BUFFER", |
704 | s->buf_addr_type == GTT_BUFFER ? | 704 | s->buf_addr_type == GTT_BUFFER ? |
705 | "GTT" : "PPGTT", s->ip_gma); | 705 | "GTT" : "PPGTT", s->ip_gma); |
706 | 706 | ||
707 | if (s->ip_va == NULL) { | 707 | if (s->ip_va == NULL) { |
708 | gvt_err(" ip_va(NULL)"); | 708 | gvt_dbg_cmd(" ip_va(NULL)"); |
709 | return; | 709 | return; |
710 | } | 710 | } |
711 | 711 | ||
712 | gvt_err(" ip_va=%p: %08x %08x %08x %08x\n", | 712 | gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n", |
713 | s->ip_va, cmd_val(s, 0), cmd_val(s, 1), | 713 | s->ip_va, cmd_val(s, 0), cmd_val(s, 1), |
714 | cmd_val(s, 2), cmd_val(s, 3)); | 714 | cmd_val(s, 2), cmd_val(s, 3)); |
715 | 715 | ||
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 6d8fde880c39..5419ae6ec633 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c | |||
@@ -83,44 +83,80 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) | |||
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = { | ||
87 | { | ||
88 | /* EDID with 1024x768 as its resolution */ | ||
89 | /*Header*/ | ||
90 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, | ||
91 | /* Vendor & Product Identification */ | ||
92 | 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, | ||
93 | /* Version & Revision */ | ||
94 | 0x01, 0x04, | ||
95 | /* Basic Display Parameters & Features */ | ||
96 | 0xa5, 0x34, 0x20, 0x78, 0x23, | ||
97 | /* Color Characteristics */ | ||
98 | 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, | ||
99 | /* Established Timings: maximum resolution is 1024x768 */ | ||
100 | 0x21, 0x08, 0x00, | ||
101 | /* Standard Timings. All invalid */ | ||
102 | 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00, | ||
103 | 0x00, 0x40, 0x00, 0x00, 0x00, 0x01, | ||
104 | /* 18 Byte Data Blocks 1: invalid */ | ||
105 | 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0, | ||
106 | 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, | ||
107 | /* 18 Byte Data Blocks 2: invalid */ | ||
108 | 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, | ||
109 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, | ||
110 | /* 18 Byte Data Blocks 3: invalid */ | ||
111 | 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, | ||
112 | 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, | ||
113 | /* 18 Byte Data Blocks 4: invalid */ | ||
114 | 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, | ||
115 | 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, | ||
116 | /* Extension Block Count */ | ||
117 | 0x00, | ||
118 | /* Checksum */ | ||
119 | 0xef, | ||
120 | }, | ||
121 | { | ||
86 | /* EDID with 1920x1200 as its resolution */ | 122 | /* EDID with 1920x1200 as its resolution */ |
87 | static unsigned char virtual_dp_monitor_edid[] = { | 123 | /*Header*/ |
88 | /*Header*/ | 124 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, |
89 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, | 125 | /* Vendor & Product Identification */ |
90 | /* Vendor & Product Identification */ | 126 | 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, |
91 | 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, | 127 | /* Version & Revision */ |
92 | /* Version & Revision */ | 128 | 0x01, 0x04, |
93 | 0x01, 0x04, | 129 | /* Basic Display Parameters & Features */ |
94 | /* Basic Display Parameters & Features */ | 130 | 0xa5, 0x34, 0x20, 0x78, 0x23, |
95 | 0xa5, 0x34, 0x20, 0x78, 0x23, | 131 | /* Color Characteristics */ |
96 | /* Color Characteristics */ | 132 | 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, |
97 | 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, | 133 | /* Established Timings: maximum resolution is 1024x768 */ |
98 | /* Established Timings: maximum resolution is 1024x768 */ | 134 | 0x21, 0x08, 0x00, |
99 | 0x21, 0x08, 0x00, | 135 | /* |
100 | /* | 136 | * Standard Timings. |
101 | * Standard Timings. | 137 | * below new resolutions can be supported: |
102 | * below new resolutions can be supported: | 138 | * 1920x1080, 1280x720, 1280x960, 1280x1024, |
103 | * 1920x1080, 1280x720, 1280x960, 1280x1024, | 139 | * 1440x900, 1600x1200, 1680x1050 |
104 | * 1440x900, 1600x1200, 1680x1050 | 140 | */ |
105 | */ | 141 | 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00, |
106 | 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00, | 142 | 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01, |
107 | 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01, | 143 | /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */ |
108 | /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */ | 144 | 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, |
109 | 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, | 145 | 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, |
110 | 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, | 146 | /* 18 Byte Data Blocks 2: invalid */ |
111 | /* 18 Byte Data Blocks 2: invalid */ | 147 | 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, |
112 | 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, | 148 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, |
113 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, | 149 | /* 18 Byte Data Blocks 3: invalid */ |
114 | /* 18 Byte Data Blocks 3: invalid */ | 150 | 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, |
115 | 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, | 151 | 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, |
116 | 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, | 152 | /* 18 Byte Data Blocks 4: invalid */ |
117 | /* 18 Byte Data Blocks 4: invalid */ | 153 | 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, |
118 | 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, | 154 | 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, |
119 | 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, | 155 | /* Extension Block Count */ |
120 | /* Extension Block Count */ | 156 | 0x00, |
121 | 0x00, | 157 | /* Checksum */ |
122 | /* Checksum */ | 158 | 0x45, |
123 | 0x45, | 159 | }, |
124 | }; | 160 | }; |
125 | 161 | ||
126 | #define DPCD_HEADER_SIZE 0xb | 162 | #define DPCD_HEADER_SIZE 0xb |
@@ -140,14 +176,20 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) | |||
140 | vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | | 176 | vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | |
141 | SDE_PORTE_HOTPLUG_SPT); | 177 | SDE_PORTE_HOTPLUG_SPT); |
142 | 178 | ||
143 | if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) | 179 | if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { |
144 | vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; | 180 | vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; |
181 | vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; | ||
182 | } | ||
145 | 183 | ||
146 | if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) | 184 | if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { |
147 | vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; | 185 | vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; |
186 | vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; | ||
187 | } | ||
148 | 188 | ||
149 | if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) | 189 | if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { |
150 | vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; | 190 | vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; |
191 | vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; | ||
192 | } | ||
151 | 193 | ||
152 | if (IS_SKYLAKE(dev_priv) && | 194 | if (IS_SKYLAKE(dev_priv) && |
153 | intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { | 195 | intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { |
@@ -160,6 +202,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) | |||
160 | GEN8_PORT_DP_A_HOTPLUG; | 202 | GEN8_PORT_DP_A_HOTPLUG; |
161 | else | 203 | else |
162 | vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT; | 204 | vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT; |
205 | |||
206 | vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED; | ||
163 | } | 207 | } |
164 | } | 208 | } |
165 | 209 | ||
@@ -175,10 +219,13 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) | |||
175 | } | 219 | } |
176 | 220 | ||
177 | static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, | 221 | static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, |
178 | int type) | 222 | int type, unsigned int resolution) |
179 | { | 223 | { |
180 | struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); | 224 | struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); |
181 | 225 | ||
226 | if (WARN_ON(resolution >= GVT_EDID_NUM)) | ||
227 | return -EINVAL; | ||
228 | |||
182 | port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL); | 229 | port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL); |
183 | if (!port->edid) | 230 | if (!port->edid) |
184 | return -ENOMEM; | 231 | return -ENOMEM; |
@@ -189,7 +236,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, | |||
189 | return -ENOMEM; | 236 | return -ENOMEM; |
190 | } | 237 | } |
191 | 238 | ||
192 | memcpy(port->edid->edid_block, virtual_dp_monitor_edid, | 239 | memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution], |
193 | EDID_SIZE); | 240 | EDID_SIZE); |
194 | port->edid->data_valid = true; | 241 | port->edid->data_valid = true; |
195 | 242 | ||
@@ -322,16 +369,18 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) | |||
322 | * Zero on success, negative error code if failed. | 369 | * Zero on success, negative error code if failed. |
323 | * | 370 | * |
324 | */ | 371 | */ |
325 | int intel_vgpu_init_display(struct intel_vgpu *vgpu) | 372 | int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) |
326 | { | 373 | { |
327 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 374 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
328 | 375 | ||
329 | intel_vgpu_init_i2c_edid(vgpu); | 376 | intel_vgpu_init_i2c_edid(vgpu); |
330 | 377 | ||
331 | if (IS_SKYLAKE(dev_priv)) | 378 | if (IS_SKYLAKE(dev_priv)) |
332 | return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D); | 379 | return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, |
380 | resolution); | ||
333 | else | 381 | else |
334 | return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B); | 382 | return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B, |
383 | resolution); | ||
335 | } | 384 | } |
336 | 385 | ||
337 | /** | 386 | /** |
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index 8b234ea961f6..d73de22102e2 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h | |||
@@ -154,10 +154,28 @@ struct intel_vgpu_port { | |||
154 | int type; | 154 | int type; |
155 | }; | 155 | }; |
156 | 156 | ||
157 | enum intel_vgpu_edid { | ||
158 | GVT_EDID_1024_768, | ||
159 | GVT_EDID_1920_1200, | ||
160 | GVT_EDID_NUM, | ||
161 | }; | ||
162 | |||
163 | static inline char *vgpu_edid_str(enum intel_vgpu_edid id) | ||
164 | { | ||
165 | switch (id) { | ||
166 | case GVT_EDID_1024_768: | ||
167 | return "1024x768"; | ||
168 | case GVT_EDID_1920_1200: | ||
169 | return "1920x1200"; | ||
170 | default: | ||
171 | return ""; | ||
172 | } | ||
173 | } | ||
174 | |||
157 | void intel_gvt_emulate_vblank(struct intel_gvt *gvt); | 175 | void intel_gvt_emulate_vblank(struct intel_gvt *gvt); |
158 | void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); | 176 | void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); |
159 | 177 | ||
160 | int intel_vgpu_init_display(struct intel_vgpu *vgpu); | 178 | int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution); |
161 | void intel_vgpu_reset_display(struct intel_vgpu *vgpu); | 179 | void intel_vgpu_reset_display(struct intel_vgpu *vgpu); |
162 | void intel_vgpu_clean_display(struct intel_vgpu *vgpu); | 180 | void intel_vgpu_clean_display(struct intel_vgpu *vgpu); |
163 | 181 | ||
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 1cb29b2d7dc6..933a7c211a1c 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c | |||
@@ -80,7 +80,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) | |||
80 | int ret; | 80 | int ret; |
81 | 81 | ||
82 | size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; | 82 | size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; |
83 | firmware = vmalloc(size); | 83 | firmware = vzalloc(size); |
84 | if (!firmware) | 84 | if (!firmware) |
85 | return -ENOMEM; | 85 | return -ENOMEM; |
86 | 86 | ||
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 28c92346db0e..6a5ff23ded90 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
@@ -1825,11 +1825,8 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, | |||
1825 | gma = g_gtt_index << GTT_PAGE_SHIFT; | 1825 | gma = g_gtt_index << GTT_PAGE_SHIFT; |
1826 | 1826 | ||
1827 | /* the VM may configure the whole GM space when ballooning is used */ | 1827 | /* the VM may configure the whole GM space when ballooning is used */ |
1828 | if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma), | 1828 | if (!vgpu_gmadr_is_valid(vgpu, gma)) |
1829 | "vgpu%d: found oob ggtt write, offset %x\n", | ||
1830 | vgpu->id, off)) { | ||
1831 | return 0; | 1829 | return 0; |
1832 | } | ||
1833 | 1830 | ||
1834 | ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); | 1831 | ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); |
1835 | 1832 | ||
@@ -2015,6 +2012,22 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) | |||
2015 | return create_scratch_page_tree(vgpu); | 2012 | return create_scratch_page_tree(vgpu); |
2016 | } | 2013 | } |
2017 | 2014 | ||
2015 | static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type) | ||
2016 | { | ||
2017 | struct list_head *pos, *n; | ||
2018 | struct intel_vgpu_mm *mm; | ||
2019 | |||
2020 | list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) { | ||
2021 | mm = container_of(pos, struct intel_vgpu_mm, list); | ||
2022 | if (mm->type == type) { | ||
2023 | vgpu->gvt->gtt.mm_free_page_table(mm); | ||
2024 | list_del(&mm->list); | ||
2025 | list_del(&mm->lru_list); | ||
2026 | kfree(mm); | ||
2027 | } | ||
2028 | } | ||
2029 | } | ||
2030 | |||
2018 | /** | 2031 | /** |
2019 | * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization | 2032 | * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization |
2020 | * @vgpu: a vGPU | 2033 | * @vgpu: a vGPU |
@@ -2027,19 +2040,11 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) | |||
2027 | */ | 2040 | */ |
2028 | void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) | 2041 | void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) |
2029 | { | 2042 | { |
2030 | struct list_head *pos, *n; | ||
2031 | struct intel_vgpu_mm *mm; | ||
2032 | |||
2033 | ppgtt_free_all_shadow_page(vgpu); | 2043 | ppgtt_free_all_shadow_page(vgpu); |
2034 | release_scratch_page_tree(vgpu); | 2044 | release_scratch_page_tree(vgpu); |
2035 | 2045 | ||
2036 | list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) { | 2046 | intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); |
2037 | mm = container_of(pos, struct intel_vgpu_mm, list); | 2047 | intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT); |
2038 | vgpu->gvt->gtt.mm_free_page_table(mm); | ||
2039 | list_del(&mm->list); | ||
2040 | list_del(&mm->lru_list); | ||
2041 | kfree(mm); | ||
2042 | } | ||
2043 | } | 2048 | } |
2044 | 2049 | ||
2045 | static void clean_spt_oos(struct intel_gvt *gvt) | 2050 | static void clean_spt_oos(struct intel_gvt *gvt) |
@@ -2322,6 +2327,13 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr) | |||
2322 | int i; | 2327 | int i; |
2323 | 2328 | ||
2324 | ppgtt_free_all_shadow_page(vgpu); | 2329 | ppgtt_free_all_shadow_page(vgpu); |
2330 | |||
2331 | /* Shadow pages are only created when there is no page | ||
2332 | * table tracking data, so remove page tracking data after | ||
2333 | * removing the shadow pages. | ||
2334 | */ | ||
2335 | intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); | ||
2336 | |||
2325 | if (!dmlr) | 2337 | if (!dmlr) |
2326 | return; | 2338 | return; |
2327 | 2339 | ||
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index e227caf5859e..23791920ced1 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h | |||
@@ -143,6 +143,8 @@ struct intel_vgpu { | |||
143 | int id; | 143 | int id; |
144 | unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ | 144 | unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ |
145 | bool active; | 145 | bool active; |
146 | bool pv_notified; | ||
147 | bool failsafe; | ||
146 | bool resetting; | 148 | bool resetting; |
147 | void *sched_data; | 149 | void *sched_data; |
148 | 150 | ||
@@ -203,18 +205,18 @@ struct intel_gvt_firmware { | |||
203 | }; | 205 | }; |
204 | 206 | ||
205 | struct intel_gvt_opregion { | 207 | struct intel_gvt_opregion { |
206 | void __iomem *opregion_va; | 208 | void *opregion_va; |
207 | u32 opregion_pa; | 209 | u32 opregion_pa; |
208 | }; | 210 | }; |
209 | 211 | ||
210 | #define NR_MAX_INTEL_VGPU_TYPES 20 | 212 | #define NR_MAX_INTEL_VGPU_TYPES 20 |
211 | struct intel_vgpu_type { | 213 | struct intel_vgpu_type { |
212 | char name[16]; | 214 | char name[16]; |
213 | unsigned int max_instance; | ||
214 | unsigned int avail_instance; | 215 | unsigned int avail_instance; |
215 | unsigned int low_gm_size; | 216 | unsigned int low_gm_size; |
216 | unsigned int high_gm_size; | 217 | unsigned int high_gm_size; |
217 | unsigned int fence; | 218 | unsigned int fence; |
219 | enum intel_vgpu_edid resolution; | ||
218 | }; | 220 | }; |
219 | 221 | ||
220 | struct intel_gvt { | 222 | struct intel_gvt { |
@@ -317,6 +319,7 @@ struct intel_vgpu_creation_params { | |||
317 | __u64 low_gm_sz; /* in MB */ | 319 | __u64 low_gm_sz; /* in MB */ |
318 | __u64 high_gm_sz; /* in MB */ | 320 | __u64 high_gm_sz; /* in MB */ |
319 | __u64 fence_sz; | 321 | __u64 fence_sz; |
322 | __u64 resolution; | ||
320 | __s32 primary; | 323 | __s32 primary; |
321 | __u64 vgpu_id; | 324 | __u64 vgpu_id; |
322 | }; | 325 | }; |
@@ -449,6 +452,11 @@ struct intel_gvt_ops { | |||
449 | }; | 452 | }; |
450 | 453 | ||
451 | 454 | ||
455 | enum { | ||
456 | GVT_FAILSAFE_UNSUPPORTED_GUEST, | ||
457 | GVT_FAILSAFE_INSUFFICIENT_RESOURCE, | ||
458 | }; | ||
459 | |||
452 | #include "mpt.h" | 460 | #include "mpt.h" |
453 | 461 | ||
454 | #endif | 462 | #endif |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 1d450627ff65..8e43395c748a 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
@@ -121,6 +121,7 @@ static int new_mmio_info(struct intel_gvt *gvt, | |||
121 | info->size = size; | 121 | info->size = size; |
122 | info->length = (i + 4) < end ? 4 : (end - i); | 122 | info->length = (i + 4) < end ? 4 : (end - i); |
123 | info->addr_mask = addr_mask; | 123 | info->addr_mask = addr_mask; |
124 | info->ro_mask = ro_mask; | ||
124 | info->device = device; | 125 | info->device = device; |
125 | info->read = read ? read : intel_vgpu_default_mmio_read; | 126 | info->read = read ? read : intel_vgpu_default_mmio_read; |
126 | info->write = write ? write : intel_vgpu_default_mmio_write; | 127 | info->write = write ? write : intel_vgpu_default_mmio_write; |
@@ -150,15 +151,44 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg) | |||
150 | #define fence_num_to_offset(num) \ | 151 | #define fence_num_to_offset(num) \ |
151 | (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) | 152 | (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) |
152 | 153 | ||
154 | |||
155 | static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason) | ||
156 | { | ||
157 | switch (reason) { | ||
158 | case GVT_FAILSAFE_UNSUPPORTED_GUEST: | ||
159 | pr_err("Detected your guest driver doesn't support GVT-g.\n"); | ||
160 | break; | ||
161 | case GVT_FAILSAFE_INSUFFICIENT_RESOURCE: | ||
162 | pr_err("Graphics resource is not enough for the guest\n"); | ||
163 | default: | ||
164 | break; | ||
165 | } | ||
166 | pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id); | ||
167 | vgpu->failsafe = true; | ||
168 | } | ||
169 | |||
153 | static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, | 170 | static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, |
154 | unsigned int fence_num, void *p_data, unsigned int bytes) | 171 | unsigned int fence_num, void *p_data, unsigned int bytes) |
155 | { | 172 | { |
156 | if (fence_num >= vgpu_fence_sz(vgpu)) { | 173 | if (fence_num >= vgpu_fence_sz(vgpu)) { |
157 | gvt_err("vgpu%d: found oob fence register access\n", | 174 | |
158 | vgpu->id); | 175 | /* When guest access oob fence regs without access |
159 | gvt_err("vgpu%d: total fence num %d access fence num %d\n", | 176 | * pv_info first, we treat guest not supporting GVT, |
160 | vgpu->id, vgpu_fence_sz(vgpu), fence_num); | 177 | * and we will let vgpu enter failsafe mode. |
178 | */ | ||
179 | if (!vgpu->pv_notified) | ||
180 | enter_failsafe_mode(vgpu, | ||
181 | GVT_FAILSAFE_UNSUPPORTED_GUEST); | ||
182 | |||
183 | if (!vgpu->mmio.disable_warn_untrack) { | ||
184 | gvt_err("vgpu%d: found oob fence register access\n", | ||
185 | vgpu->id); | ||
186 | gvt_err("vgpu%d: total fence %d, access fence %d\n", | ||
187 | vgpu->id, vgpu_fence_sz(vgpu), | ||
188 | fence_num); | ||
189 | } | ||
161 | memset(p_data, 0, bytes); | 190 | memset(p_data, 0, bytes); |
191 | return -EINVAL; | ||
162 | } | 192 | } |
163 | return 0; | 193 | return 0; |
164 | } | 194 | } |
@@ -369,6 +399,74 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
369 | return 0; | 399 | return 0; |
370 | } | 400 | } |
371 | 401 | ||
402 | /* ascendingly sorted */ | ||
403 | static i915_reg_t force_nonpriv_white_list[] = { | ||
404 | GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec) | ||
405 | GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248) | ||
406 | GEN8_CS_CHICKEN1,//_MMIO(0x2580) | ||
407 | _MMIO(0x2690), | ||
408 | _MMIO(0x2694), | ||
409 | _MMIO(0x2698), | ||
410 | _MMIO(0x4de0), | ||
411 | _MMIO(0x4de4), | ||
412 | _MMIO(0x4dfc), | ||
413 | GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010) | ||
414 | _MMIO(0x7014), | ||
415 | HDC_CHICKEN0,//_MMIO(0x7300) | ||
416 | GEN8_HDC_CHICKEN1,//_MMIO(0x7304) | ||
417 | _MMIO(0x7700), | ||
418 | _MMIO(0x7704), | ||
419 | _MMIO(0x7708), | ||
420 | _MMIO(0x770c), | ||
421 | _MMIO(0xb110), | ||
422 | GEN8_L3SQCREG4,//_MMIO(0xb118) | ||
423 | _MMIO(0xe100), | ||
424 | _MMIO(0xe18c), | ||
425 | _MMIO(0xe48c), | ||
426 | _MMIO(0xe5f4), | ||
427 | }; | ||
428 | |||
429 | /* a simple bsearch */ | ||
430 | static inline bool in_whitelist(unsigned int reg) | ||
431 | { | ||
432 | int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list); | ||
433 | i915_reg_t *array = force_nonpriv_white_list; | ||
434 | |||
435 | while (left < right) { | ||
436 | int mid = (left + right)/2; | ||
437 | |||
438 | if (reg > array[mid].reg) | ||
439 | left = mid + 1; | ||
440 | else if (reg < array[mid].reg) | ||
441 | right = mid; | ||
442 | else | ||
443 | return true; | ||
444 | } | ||
445 | return false; | ||
446 | } | ||
447 | |||
448 | static int force_nonpriv_write(struct intel_vgpu *vgpu, | ||
449 | unsigned int offset, void *p_data, unsigned int bytes) | ||
450 | { | ||
451 | u32 reg_nonpriv = *(u32 *)p_data; | ||
452 | int ret = -EINVAL; | ||
453 | |||
454 | if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) { | ||
455 | gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n", | ||
456 | vgpu->id, offset, bytes); | ||
457 | return ret; | ||
458 | } | ||
459 | |||
460 | if (in_whitelist(reg_nonpriv)) { | ||
461 | ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data, | ||
462 | bytes); | ||
463 | } else { | ||
464 | gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n", | ||
465 | vgpu->id, reg_nonpriv); | ||
466 | } | ||
467 | return ret; | ||
468 | } | ||
469 | |||
372 | static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | 470 | static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, |
373 | void *p_data, unsigned int bytes) | 471 | void *p_data, unsigned int bytes) |
374 | { | 472 | { |
@@ -1001,6 +1099,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, | |||
1001 | if (invalid_read) | 1099 | if (invalid_read) |
1002 | gvt_err("invalid pvinfo read: [%x:%x] = %x\n", | 1100 | gvt_err("invalid pvinfo read: [%x:%x] = %x\n", |
1003 | offset, bytes, *(u32 *)p_data); | 1101 | offset, bytes, *(u32 *)p_data); |
1102 | vgpu->pv_notified = true; | ||
1004 | return 0; | 1103 | return 0; |
1005 | } | 1104 | } |
1006 | 1105 | ||
@@ -1039,7 +1138,7 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready) | |||
1039 | char vmid_str[20]; | 1138 | char vmid_str[20]; |
1040 | char display_ready_str[20]; | 1139 | char display_ready_str[20]; |
1041 | 1140 | ||
1042 | snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready); | 1141 | snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready); |
1043 | env[0] = display_ready_str; | 1142 | env[0] = display_ready_str; |
1044 | 1143 | ||
1045 | snprintf(vmid_str, 20, "VMID=%d", vgpu->id); | 1144 | snprintf(vmid_str, 20, "VMID=%d", vgpu->id); |
@@ -1078,6 +1177,9 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
1078 | case _vgtif_reg(execlist_context_descriptor_lo): | 1177 | case _vgtif_reg(execlist_context_descriptor_lo): |
1079 | case _vgtif_reg(execlist_context_descriptor_hi): | 1178 | case _vgtif_reg(execlist_context_descriptor_hi): |
1080 | break; | 1179 | break; |
1180 | case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]): | ||
1181 | enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); | ||
1182 | break; | ||
1081 | default: | 1183 | default: |
1082 | gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", | 1184 | gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", |
1083 | offset, bytes, data); | 1185 | offset, bytes, data); |
@@ -1203,26 +1305,37 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
1203 | u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA); | 1305 | u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA); |
1204 | 1306 | ||
1205 | switch (cmd) { | 1307 | switch (cmd) { |
1206 | case 0x6: | 1308 | case GEN9_PCODE_READ_MEM_LATENCY: |
1207 | /** | 1309 | if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { |
1208 | * "Read memory latency" command on gen9. | 1310 | /** |
1209 | * Below memory latency values are read | 1311 | * "Read memory latency" command on gen9. |
1210 | * from skylake platform. | 1312 | * Below memory latency values are read |
1211 | */ | 1313 | * from skylake platform. |
1212 | if (!*data0) | 1314 | */ |
1213 | *data0 = 0x1e1a1100; | 1315 | if (!*data0) |
1214 | else | 1316 | *data0 = 0x1e1a1100; |
1215 | *data0 = 0x61514b3d; | 1317 | else |
1318 | *data0 = 0x61514b3d; | ||
1319 | } | ||
1320 | break; | ||
1321 | case SKL_PCODE_CDCLK_CONTROL: | ||
1322 | if (IS_SKYLAKE(vgpu->gvt->dev_priv)) | ||
1323 | *data0 = SKL_CDCLK_READY_FOR_CHANGE; | ||
1216 | break; | 1324 | break; |
1217 | case 0x5: | 1325 | case GEN6_PCODE_READ_RC6VIDS: |
1218 | *data0 |= 0x1; | 1326 | *data0 |= 0x1; |
1219 | break; | 1327 | break; |
1220 | } | 1328 | } |
1221 | 1329 | ||
1222 | gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n", | 1330 | gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n", |
1223 | vgpu->id, value, *data0); | 1331 | vgpu->id, value, *data0); |
1224 | 1332 | /** | |
1225 | value &= ~(1 << 31); | 1333 | * PCODE_READY clear means ready for pcode read/write, |
1334 | * PCODE_ERROR_MASK clear means no error happened. In GVT-g we | ||
1335 | * always emulate as pcode read/write success and ready for access | ||
1336 | * anytime, since we don't touch real physical registers here. | ||
1337 | */ | ||
1338 | value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK); | ||
1226 | return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes); | 1339 | return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes); |
1227 | } | 1340 | } |
1228 | 1341 | ||
@@ -1318,6 +1431,17 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
1318 | bool enable_execlist; | 1431 | bool enable_execlist; |
1319 | 1432 | ||
1320 | write_vreg(vgpu, offset, p_data, bytes); | 1433 | write_vreg(vgpu, offset, p_data, bytes); |
1434 | |||
1435 | /* when PPGTT mode enabled, we will check if guest has called | ||
1436 | * pvinfo, if not, we will treat this guest as non-gvtg-aware | ||
1437 | * guest, and stop emulating its cfg space, mmio, gtt, etc. | ||
1438 | */ | ||
1439 | if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) || | ||
1440 | (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))) | ||
1441 | && !vgpu->pv_notified) { | ||
1442 | enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); | ||
1443 | return 0; | ||
1444 | } | ||
1321 | if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)) | 1445 | if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)) |
1322 | || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) { | 1446 | || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) { |
1323 | enable_execlist = !!(data & GFX_RUN_LIST_ENABLE); | 1447 | enable_execlist = !!(data & GFX_RUN_LIST_ENABLE); |
@@ -1400,6 +1524,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, | |||
1400 | #define MMIO_GM(reg, d, r, w) \ | 1524 | #define MMIO_GM(reg, d, r, w) \ |
1401 | MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w) | 1525 | MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w) |
1402 | 1526 | ||
1527 | #define MMIO_GM_RDR(reg, d, r, w) \ | ||
1528 | MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w) | ||
1529 | |||
1403 | #define MMIO_RO(reg, d, f, rm, r, w) \ | 1530 | #define MMIO_RO(reg, d, f, rm, r, w) \ |
1404 | MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w) | 1531 | MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w) |
1405 | 1532 | ||
@@ -1419,6 +1546,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, | |||
1419 | #define MMIO_RING_GM(prefix, d, r, w) \ | 1546 | #define MMIO_RING_GM(prefix, d, r, w) \ |
1420 | MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w) | 1547 | MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w) |
1421 | 1548 | ||
1549 | #define MMIO_RING_GM_RDR(prefix, d, r, w) \ | ||
1550 | MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w) | ||
1551 | |||
1422 | #define MMIO_RING_RO(prefix, d, f, rm, r, w) \ | 1552 | #define MMIO_RING_RO(prefix, d, f, rm, r, w) \ |
1423 | MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w) | 1553 | MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w) |
1424 | 1554 | ||
@@ -1427,73 +1557,81 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) | |||
1427 | struct drm_i915_private *dev_priv = gvt->dev_priv; | 1557 | struct drm_i915_private *dev_priv = gvt->dev_priv; |
1428 | int ret; | 1558 | int ret; |
1429 | 1559 | ||
1430 | MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); | 1560 | MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL, |
1561 | intel_vgpu_reg_imr_handler); | ||
1431 | 1562 | ||
1432 | MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); | 1563 | MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); |
1433 | MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler); | 1564 | MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler); |
1434 | MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); | 1565 | MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); |
1435 | MMIO_D(SDEISR, D_ALL); | 1566 | MMIO_D(SDEISR, D_ALL); |
1436 | 1567 | ||
1437 | MMIO_RING_D(RING_HWSTAM, D_ALL); | 1568 | MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1438 | 1569 | ||
1439 | MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL); | 1570 | MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL); |
1440 | MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); | 1571 | MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); |
1441 | MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); | 1572 | MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); |
1442 | MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); | 1573 | MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); |
1443 | 1574 | ||
1444 | #define RING_REG(base) (base + 0x28) | 1575 | #define RING_REG(base) (base + 0x28) |
1445 | MMIO_RING_D(RING_REG, D_ALL); | 1576 | MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1446 | #undef RING_REG | 1577 | #undef RING_REG |
1447 | 1578 | ||
1448 | #define RING_REG(base) (base + 0x134) | 1579 | #define RING_REG(base) (base + 0x134) |
1449 | MMIO_RING_D(RING_REG, D_ALL); | 1580 | MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1450 | #undef RING_REG | 1581 | #undef RING_REG |
1451 | 1582 | ||
1452 | MMIO_GM(0x2148, D_ALL, NULL, NULL); | 1583 | MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL); |
1453 | MMIO_GM(CCID, D_ALL, NULL, NULL); | 1584 | MMIO_GM_RDR(CCID, D_ALL, NULL, NULL); |
1454 | MMIO_GM(0x12198, D_ALL, NULL, NULL); | 1585 | MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL); |
1455 | MMIO_D(GEN7_CXT_SIZE, D_ALL); | 1586 | MMIO_D(GEN7_CXT_SIZE, D_ALL); |
1456 | 1587 | ||
1457 | MMIO_RING_D(RING_TAIL, D_ALL); | 1588 | MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1458 | MMIO_RING_D(RING_HEAD, D_ALL); | 1589 | MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1459 | MMIO_RING_D(RING_CTL, D_ALL); | 1590 | MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1460 | MMIO_RING_D(RING_ACTHD, D_ALL); | 1591 | MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1461 | MMIO_RING_GM(RING_START, D_ALL, NULL, NULL); | 1592 | MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL); |
1462 | 1593 | ||
1463 | /* RING MODE */ | 1594 | /* RING MODE */ |
1464 | #define RING_REG(base) (base + 0x29c) | 1595 | #define RING_REG(base) (base + 0x29c) |
1465 | MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write); | 1596 | MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, |
1597 | ring_mode_mmio_write); | ||
1466 | #undef RING_REG | 1598 | #undef RING_REG |
1467 | 1599 | ||
1468 | MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL); | 1600 | MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, |
1469 | MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL); | 1601 | NULL, NULL); |
1602 | MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS, | ||
1603 | NULL, NULL); | ||
1470 | MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS, | 1604 | MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS, |
1471 | ring_timestamp_mmio_read, NULL); | 1605 | ring_timestamp_mmio_read, NULL); |
1472 | MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS, | 1606 | MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS, |
1473 | ring_timestamp_mmio_read, NULL); | 1607 | ring_timestamp_mmio_read, NULL); |
1474 | 1608 | ||
1475 | MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL); | 1609 | MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
1476 | MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL); | 1610 | MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS, |
1611 | NULL, NULL); | ||
1477 | MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 1612 | MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
1478 | 1613 | MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | |
1479 | MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL); | 1614 | MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
1480 | MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL); | 1615 | |
1481 | MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL); | 1616 | MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
1482 | MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL); | 1617 | MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
1483 | MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL); | 1618 | MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
1484 | MMIO_D(GAM_ECOCHK, D_ALL); | 1619 | MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
1485 | MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL); | 1620 | MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
1621 | MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); | ||
1622 | MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, | ||
1623 | NULL, NULL); | ||
1486 | MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 1624 | MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
1487 | MMIO_D(0x9030, D_ALL); | 1625 | MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1488 | MMIO_D(0x20a0, D_ALL); | 1626 | MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1489 | MMIO_D(0x2420, D_ALL); | 1627 | MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1490 | MMIO_D(0x2430, D_ALL); | 1628 | MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1491 | MMIO_D(0x2434, D_ALL); | 1629 | MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1492 | MMIO_D(0x2438, D_ALL); | 1630 | MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1493 | MMIO_D(0x243c, D_ALL); | 1631 | MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1494 | MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL); | 1632 | MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
1495 | MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 1633 | MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
1496 | MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL); | 1634 | MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
1497 | 1635 | ||
1498 | /* display */ | 1636 | /* display */ |
1499 | MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL); | 1637 | MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL); |
@@ -2022,8 +2160,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) | |||
2022 | MMIO_D(FORCEWAKE_ACK, D_ALL); | 2160 | MMIO_D(FORCEWAKE_ACK, D_ALL); |
2023 | MMIO_D(GEN6_GT_CORE_STATUS, D_ALL); | 2161 | MMIO_D(GEN6_GT_CORE_STATUS, D_ALL); |
2024 | MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL); | 2162 | MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL); |
2025 | MMIO_D(GTFIFODBG, D_ALL); | 2163 | MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL); |
2026 | MMIO_D(GTFIFOCTL, D_ALL); | 2164 | MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL); |
2027 | MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write); | 2165 | MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write); |
2028 | MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL); | 2166 | MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL); |
2029 | MMIO_D(ECOBUS, D_ALL); | 2167 | MMIO_D(ECOBUS, D_ALL); |
@@ -2080,7 +2218,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) | |||
2080 | 2218 | ||
2081 | MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL); | 2219 | MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL); |
2082 | 2220 | ||
2083 | MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL); | 2221 | MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW); |
2084 | MMIO_D(GEN6_PCODE_DATA, D_ALL); | 2222 | MMIO_D(GEN6_PCODE_DATA, D_ALL); |
2085 | MMIO_D(0x13812c, D_ALL); | 2223 | MMIO_D(0x13812c, D_ALL); |
2086 | MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL); | 2224 | MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL); |
@@ -2159,36 +2297,35 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) | |||
2159 | MMIO_D(0x1a054, D_ALL); | 2297 | MMIO_D(0x1a054, D_ALL); |
2160 | 2298 | ||
2161 | MMIO_D(0x44070, D_ALL); | 2299 | MMIO_D(0x44070, D_ALL); |
2162 | 2300 | MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL); | |
2163 | MMIO_D(0x215c, D_HSW_PLUS); | ||
2164 | MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL); | 2301 | MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL); |
2165 | MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL); | 2302 | MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL); |
2166 | MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL); | 2303 | MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL); |
2167 | MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL); | 2304 | MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL); |
2168 | 2305 | ||
2169 | MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL); | 2306 | MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL); |
2170 | MMIO_D(GEN7_OACONTROL, D_HSW); | 2307 | MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL); |
2171 | MMIO_D(0x2b00, D_BDW_PLUS); | 2308 | MMIO_D(0x2b00, D_BDW_PLUS); |
2172 | MMIO_D(0x2360, D_BDW_PLUS); | 2309 | MMIO_D(0x2360, D_BDW_PLUS); |
2173 | MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL); | 2310 | MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); |
2174 | MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL); | 2311 | MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); |
2175 | MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL); | 2312 | MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); |
2176 | 2313 | ||
2177 | MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2314 | MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2178 | MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2315 | MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2179 | MMIO_D(BCS_SWCTRL, D_ALL); | 2316 | MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL); |
2180 | 2317 | ||
2181 | MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); | 2318 | MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); |
2182 | MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); | 2319 | MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); |
2183 | MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); | 2320 | MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); |
2184 | MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); | 2321 | MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); |
2185 | MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); | 2322 | MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); |
2186 | MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); | 2323 | MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); |
2187 | MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); | 2324 | MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); |
2188 | MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); | 2325 | MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); |
2189 | MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); | 2326 | MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); |
2190 | MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); | 2327 | MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); |
2191 | MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); | 2328 | MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); |
2192 | MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); | 2329 | MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); |
2193 | MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); | 2330 | MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); |
2194 | MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); | 2331 | MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); |
@@ -2196,6 +2333,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) | |||
2196 | MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); | 2333 | MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); |
2197 | MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2334 | MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2198 | 2335 | ||
2336 | MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | ||
2337 | MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL); | ||
2338 | MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL); | ||
2339 | MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL); | ||
2340 | MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL); | ||
2341 | MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL); | ||
2342 | MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL); | ||
2343 | MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2344 | MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2345 | MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2346 | MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2199 | return 0; | 2347 | return 0; |
2200 | } | 2348 | } |
2201 | 2349 | ||
@@ -2204,7 +2352,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
2204 | struct drm_i915_private *dev_priv = gvt->dev_priv; | 2352 | struct drm_i915_private *dev_priv = gvt->dev_priv; |
2205 | int ret; | 2353 | int ret; |
2206 | 2354 | ||
2207 | MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, | 2355 | MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL, |
2208 | intel_vgpu_reg_imr_handler); | 2356 | intel_vgpu_reg_imr_handler); |
2209 | 2357 | ||
2210 | MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); | 2358 | MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); |
@@ -2269,24 +2417,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
2269 | MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, | 2417 | MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, |
2270 | intel_vgpu_reg_master_irq_handler); | 2418 | intel_vgpu_reg_master_irq_handler); |
2271 | 2419 | ||
2272 | MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS); | 2420 | MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, |
2273 | MMIO_D(0x1c134, D_BDW_PLUS); | 2421 | F_CMD_ACCESS, NULL, NULL); |
2274 | 2422 | MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | |
2275 | MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS); | 2423 | |
2276 | MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); | 2424 | MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, |
2277 | MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); | 2425 | NULL, NULL); |
2278 | MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS); | 2426 | MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS, |
2279 | MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); | 2427 | F_CMD_ACCESS, NULL, NULL); |
2280 | MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS); | 2428 | MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); |
2281 | MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write); | 2429 | MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, |
2282 | MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, | 2430 | NULL, NULL); |
2283 | NULL, NULL); | 2431 | MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS, |
2284 | MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, | 2432 | F_CMD_ACCESS, NULL, NULL); |
2285 | NULL, NULL); | 2433 | MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS, |
2434 | F_CMD_ACCESS, NULL, NULL); | ||
2435 | MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, | ||
2436 | ring_mode_mmio_write); | ||
2437 | MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, | ||
2438 | F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | ||
2439 | MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, | ||
2440 | F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | ||
2286 | MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, | 2441 | MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, |
2287 | ring_timestamp_mmio_read, NULL); | 2442 | ring_timestamp_mmio_read, NULL); |
2288 | 2443 | ||
2289 | MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS); | 2444 | MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2290 | 2445 | ||
2291 | #define RING_REG(base) (base + 0xd0) | 2446 | #define RING_REG(base) (base + 0xd0) |
2292 | MMIO_RING_F(RING_REG, 4, F_RO, 0, | 2447 | MMIO_RING_F(RING_REG, 4, F_RO, 0, |
@@ -2303,13 +2458,16 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
2303 | #undef RING_REG | 2458 | #undef RING_REG |
2304 | 2459 | ||
2305 | #define RING_REG(base) (base + 0x234) | 2460 | #define RING_REG(base) (base + 0x234) |
2306 | MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); | 2461 | MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS, |
2307 | MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL); | 2462 | NULL, NULL); |
2463 | MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0, | ||
2464 | ~0LL, D_BDW_PLUS, NULL, NULL); | ||
2308 | #undef RING_REG | 2465 | #undef RING_REG |
2309 | 2466 | ||
2310 | #define RING_REG(base) (base + 0x244) | 2467 | #define RING_REG(base) (base + 0x244) |
2311 | MMIO_RING_D(RING_REG, D_BDW_PLUS); | 2468 | MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2312 | MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS); | 2469 | MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, |
2470 | NULL, NULL); | ||
2313 | #undef RING_REG | 2471 | #undef RING_REG |
2314 | 2472 | ||
2315 | #define RING_REG(base) (base + 0x370) | 2473 | #define RING_REG(base) (base + 0x370) |
@@ -2331,6 +2489,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
2331 | MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS); | 2489 | MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS); |
2332 | MMIO_D(0x1c054, D_BDW_PLUS); | 2490 | MMIO_D(0x1c054, D_BDW_PLUS); |
2333 | 2491 | ||
2492 | MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write); | ||
2493 | |||
2334 | MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS); | 2494 | MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS); |
2335 | MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); | 2495 | MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); |
2336 | 2496 | ||
@@ -2341,14 +2501,14 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
2341 | MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); | 2501 | MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); |
2342 | #undef RING_REG | 2502 | #undef RING_REG |
2343 | 2503 | ||
2344 | MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); | 2504 | MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); |
2345 | MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL); | 2505 | MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); |
2346 | 2506 | ||
2347 | MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2507 | MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
2348 | 2508 | ||
2349 | MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW); | 2509 | MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS); |
2350 | MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW); | 2510 | MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS); |
2351 | MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW); | 2511 | MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS); |
2352 | 2512 | ||
2353 | MMIO_D(WM_MISC, D_BDW); | 2513 | MMIO_D(WM_MISC, D_BDW); |
2354 | MMIO_D(BDW_EDP_PSR_BASE, D_BDW); | 2514 | MMIO_D(BDW_EDP_PSR_BASE, D_BDW); |
@@ -2362,27 +2522,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
2362 | MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS); | 2522 | MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS); |
2363 | MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS); | 2523 | MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS); |
2364 | 2524 | ||
2365 | MMIO_D(0xfdc, D_BDW); | 2525 | MMIO_D(0xfdc, D_BDW_PLUS); |
2366 | MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2526 | MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, |
2367 | MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS); | 2527 | NULL, NULL); |
2368 | MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS); | 2528 | MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, |
2529 | NULL, NULL); | ||
2530 | MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2369 | 2531 | ||
2370 | MMIO_D(0xb1f0, D_BDW); | 2532 | MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL); |
2371 | MMIO_D(0xb1c0, D_BDW); | 2533 | MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL); |
2372 | MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2534 | MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2373 | MMIO_D(0xb100, D_BDW); | 2535 | MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL); |
2374 | MMIO_D(0xb10c, D_BDW); | 2536 | MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL); |
2375 | MMIO_D(0xb110, D_BDW); | 2537 | MMIO_D(0xb110, D_BDW); |
2376 | 2538 | ||
2377 | MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2539 | MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, |
2378 | MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2540 | NULL, force_nonpriv_write); |
2379 | MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2541 | |
2380 | MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2542 | MMIO_D(0x22040, D_BDW_PLUS); |
2543 | MMIO_D(0x44484, D_BDW_PLUS); | ||
2544 | MMIO_D(0x4448c, D_BDW_PLUS); | ||
2381 | 2545 | ||
2382 | MMIO_D(0x83a4, D_BDW); | 2546 | MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL); |
2383 | MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS); | 2547 | MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS); |
2384 | 2548 | ||
2385 | MMIO_D(0x8430, D_BDW); | 2549 | MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL); |
2386 | 2550 | ||
2387 | MMIO_D(0x110000, D_BDW_PLUS); | 2551 | MMIO_D(0x110000, D_BDW_PLUS); |
2388 | 2552 | ||
@@ -2394,10 +2558,19 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
2394 | MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2558 | MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
2395 | MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2559 | MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
2396 | MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2560 | MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
2397 | MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); | 2561 | MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
2398 | 2562 | ||
2399 | MMIO_D(0x2248, D_BDW); | 2563 | MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL); |
2400 | 2564 | ||
2565 | MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2566 | MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2567 | MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2568 | MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2569 | MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2570 | MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2571 | MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2572 | MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2573 | MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2401 | return 0; | 2574 | return 0; |
2402 | } | 2575 | } |
2403 | 2576 | ||
@@ -2420,7 +2593,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
2420 | MMIO_D(HSW_PWR_WELL_BIOS, D_SKL); | 2593 | MMIO_D(HSW_PWR_WELL_BIOS, D_SKL); |
2421 | MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write); | 2594 | MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write); |
2422 | 2595 | ||
2423 | MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write); | ||
2424 | MMIO_D(0xa210, D_SKL_PLUS); | 2596 | MMIO_D(0xa210, D_SKL_PLUS); |
2425 | MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); | 2597 | MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); |
2426 | MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); | 2598 | MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); |
@@ -2578,16 +2750,16 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
2578 | MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); | 2750 | MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); |
2579 | 2751 | ||
2580 | MMIO_D(0xd08, D_SKL); | 2752 | MMIO_D(0xd08, D_SKL); |
2581 | MMIO_D(0x20e0, D_SKL); | 2753 | MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL); |
2582 | MMIO_D(0x20ec, D_SKL); | 2754 | MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
2583 | 2755 | ||
2584 | /* TRTT */ | 2756 | /* TRTT */ |
2585 | MMIO_D(0x4de0, D_SKL); | 2757 | MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL); |
2586 | MMIO_D(0x4de4, D_SKL); | 2758 | MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL); |
2587 | MMIO_D(0x4de8, D_SKL); | 2759 | MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL); |
2588 | MMIO_D(0x4dec, D_SKL); | 2760 | MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL); |
2589 | MMIO_D(0x4df0, D_SKL); | 2761 | MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL); |
2590 | MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write); | 2762 | MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write); |
2591 | MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write); | 2763 | MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write); |
2592 | 2764 | ||
2593 | MMIO_D(0x45008, D_SKL); | 2765 | MMIO_D(0x45008, D_SKL); |
@@ -2611,7 +2783,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
2611 | MMIO_D(0x65f08, D_SKL); | 2783 | MMIO_D(0x65f08, D_SKL); |
2612 | MMIO_D(0x320f0, D_SKL); | 2784 | MMIO_D(0x320f0, D_SKL); |
2613 | 2785 | ||
2614 | MMIO_D(_REG_VCS2_EXCC, D_SKL); | 2786 | MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL); |
2615 | MMIO_D(0x70034, D_SKL); | 2787 | MMIO_D(0x70034, D_SKL); |
2616 | MMIO_D(0x71034, D_SKL); | 2788 | MMIO_D(0x71034, D_SKL); |
2617 | MMIO_D(0x72034, D_SKL); | 2789 | MMIO_D(0x72034, D_SKL); |
@@ -2624,6 +2796,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
2624 | MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL); | 2796 | MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL); |
2625 | 2797 | ||
2626 | MMIO_D(0x44500, D_SKL); | 2798 | MMIO_D(0x44500, D_SKL); |
2799 | MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2800 | MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS, | ||
2801 | NULL, NULL); | ||
2627 | return 0; | 2802 | return 0; |
2628 | } | 2803 | } |
2629 | 2804 | ||
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 0f7f5d97f582..84d801638ede 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
@@ -96,10 +96,10 @@ static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn, | |||
96 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | 96 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; |
97 | dma_addr_t daddr; | 97 | dma_addr_t daddr; |
98 | 98 | ||
99 | page = pfn_to_page(pfn); | 99 | if (unlikely(!pfn_valid(pfn))) |
100 | if (is_error_page(page)) | ||
101 | return -EFAULT; | 100 | return -EFAULT; |
102 | 101 | ||
102 | page = pfn_to_page(pfn); | ||
103 | daddr = dma_map_page(dev, page, 0, PAGE_SIZE, | 103 | daddr = dma_map_page(dev, page, 0, PAGE_SIZE, |
104 | PCI_DMA_BIDIRECTIONAL); | 104 | PCI_DMA_BIDIRECTIONAL); |
105 | if (dma_mapping_error(dev, daddr)) | 105 | if (dma_mapping_error(dev, daddr)) |
@@ -295,10 +295,10 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev, | |||
295 | return 0; | 295 | return 0; |
296 | 296 | ||
297 | return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" | 297 | return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" |
298 | "fence: %d\n", | 298 | "fence: %d\nresolution: %s\n", |
299 | BYTES_TO_MB(type->low_gm_size), | 299 | BYTES_TO_MB(type->low_gm_size), |
300 | BYTES_TO_MB(type->high_gm_size), | 300 | BYTES_TO_MB(type->high_gm_size), |
301 | type->fence); | 301 | type->fence, vgpu_edid_str(type->resolution)); |
302 | } | 302 | } |
303 | 303 | ||
304 | static MDEV_TYPE_ATTR_RO(available_instances); | 304 | static MDEV_TYPE_ATTR_RO(available_instances); |
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 4df078bc5d04..60b698cb8365 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c | |||
@@ -57,6 +57,58 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) | |||
57 | (reg >= gvt->device_info.gtt_start_offset \ | 57 | (reg >= gvt->device_info.gtt_start_offset \ |
58 | && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) | 58 | && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) |
59 | 59 | ||
60 | static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, | ||
61 | void *p_data, unsigned int bytes, bool read) | ||
62 | { | ||
63 | struct intel_gvt *gvt = NULL; | ||
64 | void *pt = NULL; | ||
65 | unsigned int offset = 0; | ||
66 | |||
67 | if (!vgpu || !p_data) | ||
68 | return; | ||
69 | |||
70 | gvt = vgpu->gvt; | ||
71 | mutex_lock(&gvt->lock); | ||
72 | offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); | ||
73 | if (reg_is_mmio(gvt, offset)) { | ||
74 | if (read) | ||
75 | intel_vgpu_default_mmio_read(vgpu, offset, p_data, | ||
76 | bytes); | ||
77 | else | ||
78 | intel_vgpu_default_mmio_write(vgpu, offset, p_data, | ||
79 | bytes); | ||
80 | } else if (reg_is_gtt(gvt, offset) && | ||
81 | vgpu->gtt.ggtt_mm->virtual_page_table) { | ||
82 | offset -= gvt->device_info.gtt_start_offset; | ||
83 | pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset; | ||
84 | if (read) | ||
85 | memcpy(p_data, pt, bytes); | ||
86 | else | ||
87 | memcpy(pt, p_data, bytes); | ||
88 | |||
89 | } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { | ||
90 | struct intel_vgpu_guest_page *gp; | ||
91 | |||
92 | /* Since we enter the failsafe mode early during guest boot, | ||
93 | * guest may not have chance to set up its ppgtt table, so | ||
94 | * there should not be any wp pages for guest. Keep the wp | ||
95 | * related code here in case we need to handle it in furture. | ||
96 | */ | ||
97 | gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); | ||
98 | if (gp) { | ||
99 | /* remove write protection to prevent furture traps */ | ||
100 | intel_vgpu_clean_guest_page(vgpu, gp); | ||
101 | if (read) | ||
102 | intel_gvt_hypervisor_read_gpa(vgpu, pa, | ||
103 | p_data, bytes); | ||
104 | else | ||
105 | intel_gvt_hypervisor_write_gpa(vgpu, pa, | ||
106 | p_data, bytes); | ||
107 | } | ||
108 | } | ||
109 | mutex_unlock(&gvt->lock); | ||
110 | } | ||
111 | |||
60 | /** | 112 | /** |
61 | * intel_vgpu_emulate_mmio_read - emulate MMIO read | 113 | * intel_vgpu_emulate_mmio_read - emulate MMIO read |
62 | * @vgpu: a vGPU | 114 | * @vgpu: a vGPU |
@@ -75,6 +127,11 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, | |||
75 | unsigned int offset = 0; | 127 | unsigned int offset = 0; |
76 | int ret = -EINVAL; | 128 | int ret = -EINVAL; |
77 | 129 | ||
130 | |||
131 | if (vgpu->failsafe) { | ||
132 | failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true); | ||
133 | return 0; | ||
134 | } | ||
78 | mutex_lock(&gvt->lock); | 135 | mutex_lock(&gvt->lock); |
79 | 136 | ||
80 | if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { | 137 | if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { |
@@ -188,6 +245,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, | |||
188 | u32 old_vreg = 0, old_sreg = 0; | 245 | u32 old_vreg = 0, old_sreg = 0; |
189 | int ret = -EINVAL; | 246 | int ret = -EINVAL; |
190 | 247 | ||
248 | if (vgpu->failsafe) { | ||
249 | failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false); | ||
250 | return 0; | ||
251 | } | ||
252 | |||
191 | mutex_lock(&gvt->lock); | 253 | mutex_lock(&gvt->lock); |
192 | 254 | ||
193 | if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { | 255 | if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { |
@@ -236,7 +298,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, | |||
236 | 298 | ||
237 | mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); | 299 | mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); |
238 | if (!mmio && !vgpu->mmio.disable_warn_untrack) | 300 | if (!mmio && !vgpu->mmio.disable_warn_untrack) |
239 | gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n", | 301 | gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n", |
240 | vgpu->id, offset, bytes, *(u32 *)p_data); | 302 | vgpu->id, offset, bytes, *(u32 *)p_data); |
241 | 303 | ||
242 | if (!intel_gvt_mmio_is_unalign(gvt, offset)) { | 304 | if (!intel_gvt_mmio_is_unalign(gvt, offset)) { |
@@ -322,6 +384,8 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) | |||
322 | 384 | ||
323 | /* set the bit 0:2(Core C-State ) to C0 */ | 385 | /* set the bit 0:2(Core C-State ) to C0 */ |
324 | vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; | 386 | vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; |
387 | |||
388 | vgpu->mmio.disable_warn_untrack = false; | ||
325 | } | 389 | } |
326 | 390 | ||
327 | /** | 391 | /** |
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index d9fb41ab7119..5d1caf9daba9 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c | |||
@@ -27,7 +27,6 @@ | |||
27 | 27 | ||
28 | static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) | 28 | static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) |
29 | { | 29 | { |
30 | void __iomem *host_va = vgpu->gvt->opregion.opregion_va; | ||
31 | u8 *buf; | 30 | u8 *buf; |
32 | int i; | 31 | int i; |
33 | 32 | ||
@@ -43,8 +42,8 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) | |||
43 | if (!vgpu_opregion(vgpu)->va) | 42 | if (!vgpu_opregion(vgpu)->va) |
44 | return -ENOMEM; | 43 | return -ENOMEM; |
45 | 44 | ||
46 | memcpy_fromio(vgpu_opregion(vgpu)->va, host_va, | 45 | memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va, |
47 | INTEL_GVT_OPREGION_SIZE); | 46 | INTEL_GVT_OPREGION_SIZE); |
48 | 47 | ||
49 | for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) | 48 | for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) |
50 | vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; | 49 | vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; |
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index 2b3a642284b6..73f052a4f424 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c | |||
@@ -53,6 +53,14 @@ static struct render_mmio gen8_render_mmio_list[] = { | |||
53 | {RCS, _MMIO(0x24d4), 0, false}, | 53 | {RCS, _MMIO(0x24d4), 0, false}, |
54 | {RCS, _MMIO(0x24d8), 0, false}, | 54 | {RCS, _MMIO(0x24d8), 0, false}, |
55 | {RCS, _MMIO(0x24dc), 0, false}, | 55 | {RCS, _MMIO(0x24dc), 0, false}, |
56 | {RCS, _MMIO(0x24e0), 0, false}, | ||
57 | {RCS, _MMIO(0x24e4), 0, false}, | ||
58 | {RCS, _MMIO(0x24e8), 0, false}, | ||
59 | {RCS, _MMIO(0x24ec), 0, false}, | ||
60 | {RCS, _MMIO(0x24f0), 0, false}, | ||
61 | {RCS, _MMIO(0x24f4), 0, false}, | ||
62 | {RCS, _MMIO(0x24f8), 0, false}, | ||
63 | {RCS, _MMIO(0x24fc), 0, false}, | ||
56 | {RCS, _MMIO(0x7004), 0xffff, true}, | 64 | {RCS, _MMIO(0x7004), 0xffff, true}, |
57 | {RCS, _MMIO(0x7008), 0xffff, true}, | 65 | {RCS, _MMIO(0x7008), 0xffff, true}, |
58 | {RCS, _MMIO(0x7000), 0xffff, true}, | 66 | {RCS, _MMIO(0x7000), 0xffff, true}, |
@@ -76,6 +84,14 @@ static struct render_mmio gen9_render_mmio_list[] = { | |||
76 | {RCS, _MMIO(0x24d4), 0, false}, | 84 | {RCS, _MMIO(0x24d4), 0, false}, |
77 | {RCS, _MMIO(0x24d8), 0, false}, | 85 | {RCS, _MMIO(0x24d8), 0, false}, |
78 | {RCS, _MMIO(0x24dc), 0, false}, | 86 | {RCS, _MMIO(0x24dc), 0, false}, |
87 | {RCS, _MMIO(0x24e0), 0, false}, | ||
88 | {RCS, _MMIO(0x24e4), 0, false}, | ||
89 | {RCS, _MMIO(0x24e8), 0, false}, | ||
90 | {RCS, _MMIO(0x24ec), 0, false}, | ||
91 | {RCS, _MMIO(0x24f0), 0, false}, | ||
92 | {RCS, _MMIO(0x24f4), 0, false}, | ||
93 | {RCS, _MMIO(0x24f8), 0, false}, | ||
94 | {RCS, _MMIO(0x24fc), 0, false}, | ||
79 | {RCS, _MMIO(0x7004), 0xffff, true}, | 95 | {RCS, _MMIO(0x7004), 0xffff, true}, |
80 | {RCS, _MMIO(0x7008), 0xffff, true}, | 96 | {RCS, _MMIO(0x7008), 0xffff, true}, |
81 | {RCS, _MMIO(0x7000), 0xffff, true}, | 97 | {RCS, _MMIO(0x7000), 0xffff, true}, |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index d6b6d0efdd1a..d3a56c949025 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -139,6 +139,9 @@ static int shadow_context_status_change(struct notifier_block *nb, | |||
139 | struct intel_vgpu_workload *workload = | 139 | struct intel_vgpu_workload *workload = |
140 | scheduler->current_workload[req->engine->id]; | 140 | scheduler->current_workload[req->engine->id]; |
141 | 141 | ||
142 | if (unlikely(!workload)) | ||
143 | return NOTIFY_OK; | ||
144 | |||
142 | switch (action) { | 145 | switch (action) { |
143 | case INTEL_CONTEXT_SCHEDULE_IN: | 146 | case INTEL_CONTEXT_SCHEDULE_IN: |
144 | intel_gvt_load_render_mmio(workload->vgpu, | 147 | intel_gvt_load_render_mmio(workload->vgpu, |
@@ -148,6 +151,15 @@ static int shadow_context_status_change(struct notifier_block *nb, | |||
148 | case INTEL_CONTEXT_SCHEDULE_OUT: | 151 | case INTEL_CONTEXT_SCHEDULE_OUT: |
149 | intel_gvt_restore_render_mmio(workload->vgpu, | 152 | intel_gvt_restore_render_mmio(workload->vgpu, |
150 | workload->ring_id); | 153 | workload->ring_id); |
154 | /* If the status is -EINPROGRESS means this workload | ||
155 | * doesn't meet any issue during dispatching so when | ||
156 | * get the SCHEDULE_OUT set the status to be zero for | ||
157 | * good. If the status is NOT -EINPROGRESS means there | ||
158 | * is something wrong happened during dispatching and | ||
159 | * the status should not be set to zero | ||
160 | */ | ||
161 | if (workload->status == -EINPROGRESS) | ||
162 | workload->status = 0; | ||
151 | atomic_set(&workload->shadow_ctx_active, 0); | 163 | atomic_set(&workload->shadow_ctx_active, 0); |
152 | break; | 164 | break; |
153 | default: | 165 | default: |
@@ -359,15 +371,23 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
359 | workload = scheduler->current_workload[ring_id]; | 371 | workload = scheduler->current_workload[ring_id]; |
360 | vgpu = workload->vgpu; | 372 | vgpu = workload->vgpu; |
361 | 373 | ||
362 | if (!workload->status && !vgpu->resetting) { | 374 | /* For the workload w/ request, needs to wait for the context |
375 | * switch to make sure request is completed. | ||
376 | * For the workload w/o request, directly complete the workload. | ||
377 | */ | ||
378 | if (workload->req) { | ||
363 | wait_event(workload->shadow_ctx_status_wq, | 379 | wait_event(workload->shadow_ctx_status_wq, |
364 | !atomic_read(&workload->shadow_ctx_active)); | 380 | !atomic_read(&workload->shadow_ctx_active)); |
365 | 381 | ||
366 | update_guest_context(workload); | 382 | i915_gem_request_put(fetch_and_zero(&workload->req)); |
383 | |||
384 | if (!workload->status && !vgpu->resetting) { | ||
385 | update_guest_context(workload); | ||
367 | 386 | ||
368 | for_each_set_bit(event, workload->pending_events, | 387 | for_each_set_bit(event, workload->pending_events, |
369 | INTEL_GVT_EVENT_MAX) | 388 | INTEL_GVT_EVENT_MAX) |
370 | intel_vgpu_trigger_virtual_event(vgpu, event); | 389 | intel_vgpu_trigger_virtual_event(vgpu, event); |
390 | } | ||
371 | } | 391 | } |
372 | 392 | ||
373 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", | 393 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", |
@@ -397,7 +417,6 @@ static int workload_thread(void *priv) | |||
397 | int ring_id = p->ring_id; | 417 | int ring_id = p->ring_id; |
398 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | 418 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
399 | struct intel_vgpu_workload *workload = NULL; | 419 | struct intel_vgpu_workload *workload = NULL; |
400 | long lret; | ||
401 | int ret; | 420 | int ret; |
402 | bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); | 421 | bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); |
403 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | 422 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
@@ -446,23 +465,24 @@ static int workload_thread(void *priv) | |||
446 | 465 | ||
447 | gvt_dbg_sched("ring id %d wait workload %p\n", | 466 | gvt_dbg_sched("ring id %d wait workload %p\n", |
448 | workload->ring_id, workload); | 467 | workload->ring_id, workload); |
449 | 468 | retry: | |
450 | lret = i915_wait_request(workload->req, | 469 | i915_wait_request(workload->req, |
451 | 0, MAX_SCHEDULE_TIMEOUT); | 470 | 0, MAX_SCHEDULE_TIMEOUT); |
452 | if (lret < 0) { | 471 | /* I915 has replay mechanism and a request will be replayed |
453 | workload->status = lret; | 472 | * if there is i915 reset. So the seqno will be updated anyway. |
454 | gvt_err("fail to wait workload, skip\n"); | 473 | * If the seqno is not updated yet after waiting, which means |
455 | } else { | 474 | * the replay may still be in progress and we can wait again. |
456 | workload->status = 0; | 475 | */ |
476 | if (!i915_gem_request_completed(workload->req)) { | ||
477 | gvt_dbg_sched("workload %p not completed, wait again\n", | ||
478 | workload); | ||
479 | goto retry; | ||
457 | } | 480 | } |
458 | 481 | ||
459 | complete: | 482 | complete: |
460 | gvt_dbg_sched("will complete workload %p, status: %d\n", | 483 | gvt_dbg_sched("will complete workload %p, status: %d\n", |
461 | workload, workload->status); | 484 | workload, workload->status); |
462 | 485 | ||
463 | if (workload->req) | ||
464 | i915_gem_request_put(fetch_and_zero(&workload->req)); | ||
465 | |||
466 | complete_current_workload(gvt, ring_id); | 486 | complete_current_workload(gvt, ring_id); |
467 | 487 | ||
468 | if (need_force_wake) | 488 | if (need_force_wake) |
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 95a97aa0051e..41cfa5ccae84 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c | |||
@@ -64,6 +64,20 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) | |||
64 | WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); | 64 | WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); |
65 | } | 65 | } |
66 | 66 | ||
67 | static struct { | ||
68 | unsigned int low_mm; | ||
69 | unsigned int high_mm; | ||
70 | unsigned int fence; | ||
71 | enum intel_vgpu_edid edid; | ||
72 | char *name; | ||
73 | } vgpu_types[] = { | ||
74 | /* Fixed vGPU type table */ | ||
75 | { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" }, | ||
76 | { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" }, | ||
77 | { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" }, | ||
78 | { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" }, | ||
79 | }; | ||
80 | |||
67 | /** | 81 | /** |
68 | * intel_gvt_init_vgpu_types - initialize vGPU type list | 82 | * intel_gvt_init_vgpu_types - initialize vGPU type list |
69 | * @gvt : GVT device | 83 | * @gvt : GVT device |
@@ -78,9 +92,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) | |||
78 | unsigned int min_low; | 92 | unsigned int min_low; |
79 | 93 | ||
80 | /* vGPU type name is defined as GVTg_Vx_y which contains | 94 | /* vGPU type name is defined as GVTg_Vx_y which contains |
81 | * physical GPU generation type and 'y' means maximum vGPU | 95 | * physical GPU generation type (e.g V4 as BDW server, V5 as |
82 | * instances user can create on one physical GPU for this | 96 | * SKL server). |
83 | * type. | ||
84 | * | 97 | * |
85 | * Depend on physical SKU resource, might see vGPU types like | 98 | * Depend on physical SKU resource, might see vGPU types like |
86 | * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create | 99 | * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create |
@@ -92,7 +105,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) | |||
92 | */ | 105 | */ |
93 | low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; | 106 | low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; |
94 | high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; | 107 | high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; |
95 | num_types = 4; | 108 | num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]); |
96 | 109 | ||
97 | gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type), | 110 | gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type), |
98 | GFP_KERNEL); | 111 | GFP_KERNEL); |
@@ -101,28 +114,29 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) | |||
101 | 114 | ||
102 | min_low = MB_TO_BYTES(32); | 115 | min_low = MB_TO_BYTES(32); |
103 | for (i = 0; i < num_types; ++i) { | 116 | for (i = 0; i < num_types; ++i) { |
104 | if (low_avail / min_low == 0) | 117 | if (low_avail / vgpu_types[i].low_mm == 0) |
105 | break; | 118 | break; |
106 | gvt->types[i].low_gm_size = min_low; | 119 | |
107 | gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); | 120 | gvt->types[i].low_gm_size = vgpu_types[i].low_mm; |
108 | gvt->types[i].fence = 4; | 121 | gvt->types[i].high_gm_size = vgpu_types[i].high_mm; |
109 | gvt->types[i].max_instance = min(low_avail / min_low, | 122 | gvt->types[i].fence = vgpu_types[i].fence; |
110 | high_avail / gvt->types[i].high_gm_size); | 123 | gvt->types[i].resolution = vgpu_types[i].edid; |
111 | gvt->types[i].avail_instance = gvt->types[i].max_instance; | 124 | gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm, |
125 | high_avail / vgpu_types[i].high_mm); | ||
112 | 126 | ||
113 | if (IS_GEN8(gvt->dev_priv)) | 127 | if (IS_GEN8(gvt->dev_priv)) |
114 | sprintf(gvt->types[i].name, "GVTg_V4_%u", | 128 | sprintf(gvt->types[i].name, "GVTg_V4_%s", |
115 | gvt->types[i].max_instance); | 129 | vgpu_types[i].name); |
116 | else if (IS_GEN9(gvt->dev_priv)) | 130 | else if (IS_GEN9(gvt->dev_priv)) |
117 | sprintf(gvt->types[i].name, "GVTg_V5_%u", | 131 | sprintf(gvt->types[i].name, "GVTg_V5_%s", |
118 | gvt->types[i].max_instance); | 132 | vgpu_types[i].name); |
119 | 133 | ||
120 | min_low <<= 1; | 134 | gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n", |
121 | gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n", | 135 | i, gvt->types[i].name, |
122 | i, gvt->types[i].name, gvt->types[i].max_instance, | ||
123 | gvt->types[i].avail_instance, | 136 | gvt->types[i].avail_instance, |
124 | gvt->types[i].low_gm_size, | 137 | gvt->types[i].low_gm_size, |
125 | gvt->types[i].high_gm_size, gvt->types[i].fence); | 138 | gvt->types[i].high_gm_size, gvt->types[i].fence, |
139 | vgpu_edid_str(gvt->types[i].resolution)); | ||
126 | } | 140 | } |
127 | 141 | ||
128 | gvt->num_types = i; | 142 | gvt->num_types = i; |
@@ -138,7 +152,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) | |||
138 | { | 152 | { |
139 | int i; | 153 | int i; |
140 | unsigned int low_gm_avail, high_gm_avail, fence_avail; | 154 | unsigned int low_gm_avail, high_gm_avail, fence_avail; |
141 | unsigned int low_gm_min, high_gm_min, fence_min, total_min; | 155 | unsigned int low_gm_min, high_gm_min, fence_min; |
142 | 156 | ||
143 | /* Need to depend on maxium hw resource size but keep on | 157 | /* Need to depend on maxium hw resource size but keep on |
144 | * static config for now. | 158 | * static config for now. |
@@ -154,12 +168,11 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) | |||
154 | low_gm_min = low_gm_avail / gvt->types[i].low_gm_size; | 168 | low_gm_min = low_gm_avail / gvt->types[i].low_gm_size; |
155 | high_gm_min = high_gm_avail / gvt->types[i].high_gm_size; | 169 | high_gm_min = high_gm_avail / gvt->types[i].high_gm_size; |
156 | fence_min = fence_avail / gvt->types[i].fence; | 170 | fence_min = fence_avail / gvt->types[i].fence; |
157 | total_min = min(min(low_gm_min, high_gm_min), fence_min); | 171 | gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min), |
158 | gvt->types[i].avail_instance = min(gvt->types[i].max_instance, | 172 | fence_min); |
159 | total_min); | ||
160 | 173 | ||
161 | gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n", | 174 | gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n", |
162 | i, gvt->types[i].name, gvt->types[i].max_instance, | 175 | i, gvt->types[i].name, |
163 | gvt->types[i].avail_instance, gvt->types[i].low_gm_size, | 176 | gvt->types[i].avail_instance, gvt->types[i].low_gm_size, |
164 | gvt->types[i].high_gm_size, gvt->types[i].fence); | 177 | gvt->types[i].high_gm_size, gvt->types[i].fence); |
165 | } | 178 | } |
@@ -248,7 +261,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, | |||
248 | if (ret) | 261 | if (ret) |
249 | goto out_detach_hypervisor_vgpu; | 262 | goto out_detach_hypervisor_vgpu; |
250 | 263 | ||
251 | ret = intel_vgpu_init_display(vgpu); | 264 | ret = intel_vgpu_init_display(vgpu, param->resolution); |
252 | if (ret) | 265 | if (ret) |
253 | goto out_clean_gtt; | 266 | goto out_clean_gtt; |
254 | 267 | ||
@@ -312,6 +325,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, | |||
312 | param.low_gm_sz = type->low_gm_size; | 325 | param.low_gm_sz = type->low_gm_size; |
313 | param.high_gm_sz = type->high_gm_size; | 326 | param.high_gm_sz = type->high_gm_size; |
314 | param.fence_sz = type->fence; | 327 | param.fence_sz = type->fence; |
328 | param.resolution = type->resolution; | ||
315 | 329 | ||
316 | /* XXX current param based on MB */ | 330 | /* XXX current param based on MB */ |
317 | param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz); | 331 | param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz); |
@@ -387,8 +401,12 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, | |||
387 | populate_pvinfo_page(vgpu); | 401 | populate_pvinfo_page(vgpu); |
388 | intel_vgpu_reset_display(vgpu); | 402 | intel_vgpu_reset_display(vgpu); |
389 | 403 | ||
390 | if (dmlr) | 404 | if (dmlr) { |
391 | intel_vgpu_reset_cfg_space(vgpu); | 405 | intel_vgpu_reset_cfg_space(vgpu); |
406 | /* only reset the failsafe mode when dmlr reset */ | ||
407 | vgpu->failsafe = false; | ||
408 | vgpu->pv_notified = false; | ||
409 | } | ||
392 | } | 410 | } |
393 | 411 | ||
394 | vgpu->resetting = false; | 412 | vgpu->resetting = false; |
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c index e10a4eda4078..1144e0c9e894 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c | |||
@@ -65,13 +65,11 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb) | |||
65 | switch (format) { | 65 | switch (format) { |
66 | case DRM_FORMAT_RGB565: | 66 | case DRM_FORMAT_RGB565: |
67 | dev_dbg(drm->dev, "Setting up RGB565 mode\n"); | 67 | dev_dbg(drm->dev, "Setting up RGB565 mode\n"); |
68 | ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT); | ||
69 | ctrl |= CTRL_SET_WORD_LENGTH(0); | 68 | ctrl |= CTRL_SET_WORD_LENGTH(0); |
70 | ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf); | 69 | ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf); |
71 | break; | 70 | break; |
72 | case DRM_FORMAT_XRGB8888: | 71 | case DRM_FORMAT_XRGB8888: |
73 | dev_dbg(drm->dev, "Setting up XRGB8888 mode\n"); | 72 | dev_dbg(drm->dev, "Setting up XRGB8888 mode\n"); |
74 | ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT); | ||
75 | ctrl |= CTRL_SET_WORD_LENGTH(3); | 73 | ctrl |= CTRL_SET_WORD_LENGTH(3); |
76 | /* Do not use packed pixels = one pixel per word instead. */ | 74 | /* Do not use packed pixels = one pixel per word instead. */ |
77 | ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7); | 75 | ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7); |
@@ -87,6 +85,36 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb) | |||
87 | return 0; | 85 | return 0; |
88 | } | 86 | } |
89 | 87 | ||
88 | static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb) | ||
89 | { | ||
90 | struct drm_crtc *crtc = &mxsfb->pipe.crtc; | ||
91 | struct drm_device *drm = crtc->dev; | ||
92 | u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; | ||
93 | u32 reg; | ||
94 | |||
95 | reg = readl(mxsfb->base + LCDC_CTRL); | ||
96 | |||
97 | if (mxsfb->connector.display_info.num_bus_formats) | ||
98 | bus_format = mxsfb->connector.display_info.bus_formats[0]; | ||
99 | |||
100 | reg &= ~CTRL_BUS_WIDTH_MASK; | ||
101 | switch (bus_format) { | ||
102 | case MEDIA_BUS_FMT_RGB565_1X16: | ||
103 | reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT); | ||
104 | break; | ||
105 | case MEDIA_BUS_FMT_RGB666_1X18: | ||
106 | reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_18BIT); | ||
107 | break; | ||
108 | case MEDIA_BUS_FMT_RGB888_1X24: | ||
109 | reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT); | ||
110 | break; | ||
111 | default: | ||
112 | dev_err(drm->dev, "Unknown media bus format %d\n", bus_format); | ||
113 | break; | ||
114 | } | ||
115 | writel(reg, mxsfb->base + LCDC_CTRL); | ||
116 | } | ||
117 | |||
90 | static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb) | 118 | static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb) |
91 | { | 119 | { |
92 | u32 reg; | 120 | u32 reg; |
@@ -168,13 +196,22 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) | |||
168 | vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH; | 196 | vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH; |
169 | if (m->flags & DRM_MODE_FLAG_PVSYNC) | 197 | if (m->flags & DRM_MODE_FLAG_PVSYNC) |
170 | vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH; | 198 | vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH; |
171 | if (bus_flags & DRM_BUS_FLAG_DE_HIGH) | 199 | /* Make sure Data Enable is high active by default */ |
200 | if (!(bus_flags & DRM_BUS_FLAG_DE_LOW)) | ||
172 | vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; | 201 | vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; |
173 | if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) | 202 | /* |
203 | * DRM_BUS_FLAG_PIXDATA_ defines are controller centric, | ||
204 | * controllers VDCTRL0_DOTCLK is display centric. | ||
205 | * Drive on positive edge -> display samples on falling edge | ||
206 | * DRM_BUS_FLAG_PIXDATA_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING | ||
207 | */ | ||
208 | if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) | ||
174 | vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING; | 209 | vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING; |
175 | 210 | ||
176 | writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0); | 211 | writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0); |
177 | 212 | ||
213 | mxsfb_set_bus_fmt(mxsfb); | ||
214 | |||
178 | /* Frame length in lines. */ | 215 | /* Frame length in lines. */ |
179 | writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1); | 216 | writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1); |
180 | 217 | ||
@@ -184,8 +221,8 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) | |||
184 | VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal), | 221 | VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal), |
185 | mxsfb->base + LCDC_VDCTRL2); | 222 | mxsfb->base + LCDC_VDCTRL2); |
186 | 223 | ||
187 | writel(SET_HOR_WAIT_CNT(m->crtc_hblank_end - m->crtc_hsync_end) | | 224 | writel(SET_HOR_WAIT_CNT(m->crtc_htotal - m->crtc_hsync_start) | |
188 | SET_VERT_WAIT_CNT(m->crtc_vblank_end - m->crtc_vsync_end), | 225 | SET_VERT_WAIT_CNT(m->crtc_vtotal - m->crtc_vsync_start), |
189 | mxsfb->base + LCDC_VDCTRL3); | 226 | mxsfb->base + LCDC_VDCTRL3); |
190 | 227 | ||
191 | writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay), | 228 | writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay), |
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index cdfbe0284635..ff6d6a6f842e 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c | |||
@@ -102,14 +102,18 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe, | |||
102 | { | 102 | { |
103 | struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); | 103 | struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); |
104 | 104 | ||
105 | drm_panel_prepare(mxsfb->panel); | ||
105 | mxsfb_crtc_enable(mxsfb); | 106 | mxsfb_crtc_enable(mxsfb); |
107 | drm_panel_enable(mxsfb->panel); | ||
106 | } | 108 | } |
107 | 109 | ||
108 | static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe) | 110 | static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe) |
109 | { | 111 | { |
110 | struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); | 112 | struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); |
111 | 113 | ||
114 | drm_panel_disable(mxsfb->panel); | ||
112 | mxsfb_crtc_disable(mxsfb); | 115 | mxsfb_crtc_disable(mxsfb); |
116 | drm_panel_unprepare(mxsfb->panel); | ||
113 | } | 117 | } |
114 | 118 | ||
115 | static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe, | 119 | static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe, |
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c index fa8d17399407..b8e81422d4e2 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_out.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c | |||
@@ -112,6 +112,7 @@ static int mxsfb_attach_endpoint(struct drm_device *drm, | |||
112 | 112 | ||
113 | int mxsfb_create_output(struct drm_device *drm) | 113 | int mxsfb_create_output(struct drm_device *drm) |
114 | { | 114 | { |
115 | struct mxsfb_drm_private *mxsfb = drm->dev_private; | ||
115 | struct device_node *ep_np = NULL; | 116 | struct device_node *ep_np = NULL; |
116 | struct of_endpoint ep; | 117 | struct of_endpoint ep; |
117 | int ret; | 118 | int ret; |
@@ -127,5 +128,8 @@ int mxsfb_create_output(struct drm_device *drm) | |||
127 | } | 128 | } |
128 | } | 129 | } |
129 | 130 | ||
131 | if (!mxsfb->panel) | ||
132 | return -EPROBE_DEFER; | ||
133 | |||
130 | return 0; | 134 | return 0; |
131 | } | 135 | } |
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_regs.h b/drivers/gpu/drm/mxsfb/mxsfb_regs.h index 31d62cd0d3d7..66a6ba9ec533 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_regs.h +++ b/drivers/gpu/drm/mxsfb/mxsfb_regs.h | |||
@@ -44,6 +44,7 @@ | |||
44 | #define CTRL_DATA_SELECT (1 << 16) | 44 | #define CTRL_DATA_SELECT (1 << 16) |
45 | #define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10) | 45 | #define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10) |
46 | #define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3) | 46 | #define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3) |
47 | #define CTRL_BUS_WIDTH_MASK (0x3 << 10) | ||
47 | #define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8) | 48 | #define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8) |
48 | #define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3) | 49 | #define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3) |
49 | #define CTRL_MASTER (1 << 5) | 50 | #define CTRL_MASTER (1 << 5) |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index b5bfbe50bd87..b0ff304ce3dc 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c | |||
@@ -32,6 +32,10 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) | |||
32 | { | 32 | { |
33 | const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode; | 33 | const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode; |
34 | struct rcar_du_device *rcdu = crtc->group->dev; | 34 | struct rcar_du_device *rcdu = crtc->group->dev; |
35 | struct vsp1_du_lif_config cfg = { | ||
36 | .width = mode->hdisplay, | ||
37 | .height = mode->vdisplay, | ||
38 | }; | ||
35 | struct rcar_du_plane_state state = { | 39 | struct rcar_du_plane_state state = { |
36 | .state = { | 40 | .state = { |
37 | .crtc = &crtc->crtc, | 41 | .crtc = &crtc->crtc, |
@@ -66,12 +70,12 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) | |||
66 | */ | 70 | */ |
67 | crtc->group->need_restart = true; | 71 | crtc->group->need_restart = true; |
68 | 72 | ||
69 | vsp1_du_setup_lif(crtc->vsp->vsp, mode->hdisplay, mode->vdisplay); | 73 | vsp1_du_setup_lif(crtc->vsp->vsp, &cfg); |
70 | } | 74 | } |
71 | 75 | ||
72 | void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) | 76 | void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) |
73 | { | 77 | { |
74 | vsp1_du_setup_lif(crtc->vsp->vsp, 0, 0); | 78 | vsp1_du_setup_lif(crtc->vsp->vsp, NULL); |
75 | } | 79 | } |
76 | 80 | ||
77 | void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) | 81 | void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) |
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 81a80c82f1bd..bd0d1988feb2 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c | |||
@@ -543,7 +543,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel) | |||
543 | /* | 543 | /* |
544 | * In case a device driver's probe() fails (e.g., | 544 | * In case a device driver's probe() fails (e.g., |
545 | * util_probe() -> vmbus_open() returns -ENOMEM) and the device is | 545 | * util_probe() -> vmbus_open() returns -ENOMEM) and the device is |
546 | * rescinded later (e.g., we dynamically disble an Integrated Service | 546 | * rescinded later (e.g., we dynamically disable an Integrated Service |
547 | * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): | 547 | * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): |
548 | * here we should skip most of the below cleanup work. | 548 | * here we should skip most of the below cleanup work. |
549 | */ | 549 | */ |
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 0652281662a8..78792b4d6437 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c | |||
@@ -465,6 +465,7 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, | |||
465 | u8 *tmp_buf; | 465 | u8 *tmp_buf; |
466 | int len = 0; | 466 | int len = 0; |
467 | int xfersz = brcmstb_i2c_get_xfersz(dev); | 467 | int xfersz = brcmstb_i2c_get_xfersz(dev); |
468 | u32 cond, cond_per_msg; | ||
468 | 469 | ||
469 | if (dev->is_suspended) | 470 | if (dev->is_suspended) |
470 | return -EBUSY; | 471 | return -EBUSY; |
@@ -481,10 +482,11 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, | |||
481 | pmsg->buf ? pmsg->buf[0] : '0', pmsg->len); | 482 | pmsg->buf ? pmsg->buf[0] : '0', pmsg->len); |
482 | 483 | ||
483 | if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART)) | 484 | if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART)) |
484 | brcmstb_set_i2c_start_stop(dev, ~(COND_START_STOP)); | 485 | cond = ~COND_START_STOP; |
485 | else | 486 | else |
486 | brcmstb_set_i2c_start_stop(dev, | 487 | cond = COND_RESTART | COND_NOSTOP; |
487 | COND_RESTART | COND_NOSTOP); | 488 | |
489 | brcmstb_set_i2c_start_stop(dev, cond); | ||
488 | 490 | ||
489 | /* Send slave address */ | 491 | /* Send slave address */ |
490 | if (!(pmsg->flags & I2C_M_NOSTART)) { | 492 | if (!(pmsg->flags & I2C_M_NOSTART)) { |
@@ -497,13 +499,24 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, | |||
497 | } | 499 | } |
498 | } | 500 | } |
499 | 501 | ||
502 | cond_per_msg = cond; | ||
503 | |||
500 | /* Perform data transfer */ | 504 | /* Perform data transfer */ |
501 | while (len) { | 505 | while (len) { |
502 | bytes_to_xfer = min(len, xfersz); | 506 | bytes_to_xfer = min(len, xfersz); |
503 | 507 | ||
504 | if (len <= xfersz && i == (num - 1)) | 508 | if (len <= xfersz) { |
505 | brcmstb_set_i2c_start_stop(dev, | 509 | if (i == (num - 1)) |
506 | ~(COND_START_STOP)); | 510 | cond_per_msg = cond_per_msg & |
511 | ~(COND_RESTART | COND_NOSTOP); | ||
512 | else | ||
513 | cond_per_msg = cond; | ||
514 | } else { | ||
515 | cond_per_msg = (cond_per_msg & ~COND_RESTART) | | ||
516 | COND_NOSTOP; | ||
517 | } | ||
518 | |||
519 | brcmstb_set_i2c_start_stop(dev, cond_per_msg); | ||
507 | 520 | ||
508 | rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf, | 521 | rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf, |
509 | bytes_to_xfer, pmsg); | 522 | bytes_to_xfer, pmsg); |
@@ -512,6 +525,8 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, | |||
512 | 525 | ||
513 | len -= bytes_to_xfer; | 526 | len -= bytes_to_xfer; |
514 | tmp_buf += bytes_to_xfer; | 527 | tmp_buf += bytes_to_xfer; |
528 | |||
529 | cond_per_msg = COND_NOSTART | COND_NOSTOP; | ||
515 | } | 530 | } |
516 | } | 531 | } |
517 | 532 | ||
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index c1db3a5a340f..d9aaf1790e0e 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h | |||
@@ -88,6 +88,7 @@ struct dw_i2c_dev { | |||
88 | void __iomem *base; | 88 | void __iomem *base; |
89 | struct completion cmd_complete; | 89 | struct completion cmd_complete; |
90 | struct clk *clk; | 90 | struct clk *clk; |
91 | struct reset_control *rst; | ||
91 | u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev); | 92 | u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev); |
92 | struct dw_pci_controller *controller; | 93 | struct dw_pci_controller *controller; |
93 | int cmd_err; | 94 | int cmd_err; |
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 6ce431323125..79c4b4ea0539 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/pm_runtime.h> | 38 | #include <linux/pm_runtime.h> |
39 | #include <linux/property.h> | 39 | #include <linux/property.h> |
40 | #include <linux/io.h> | 40 | #include <linux/io.h> |
41 | #include <linux/reset.h> | ||
41 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
42 | #include <linux/acpi.h> | 43 | #include <linux/acpi.h> |
43 | #include <linux/platform_data/i2c-designware.h> | 44 | #include <linux/platform_data/i2c-designware.h> |
@@ -199,6 +200,14 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) | |||
199 | dev->irq = irq; | 200 | dev->irq = irq; |
200 | platform_set_drvdata(pdev, dev); | 201 | platform_set_drvdata(pdev, dev); |
201 | 202 | ||
203 | dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); | ||
204 | if (IS_ERR(dev->rst)) { | ||
205 | if (PTR_ERR(dev->rst) == -EPROBE_DEFER) | ||
206 | return -EPROBE_DEFER; | ||
207 | } else { | ||
208 | reset_control_deassert(dev->rst); | ||
209 | } | ||
210 | |||
202 | if (pdata) { | 211 | if (pdata) { |
203 | dev->clk_freq = pdata->i2c_scl_freq; | 212 | dev->clk_freq = pdata->i2c_scl_freq; |
204 | } else { | 213 | } else { |
@@ -235,12 +244,13 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) | |||
235 | && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { | 244 | && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { |
236 | dev_err(&pdev->dev, | 245 | dev_err(&pdev->dev, |
237 | "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported"); | 246 | "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported"); |
238 | return -EINVAL; | 247 | r = -EINVAL; |
248 | goto exit_reset; | ||
239 | } | 249 | } |
240 | 250 | ||
241 | r = i2c_dw_eval_lock_support(dev); | 251 | r = i2c_dw_eval_lock_support(dev); |
242 | if (r) | 252 | if (r) |
243 | return r; | 253 | goto exit_reset; |
244 | 254 | ||
245 | dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; | 255 | dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; |
246 | 256 | ||
@@ -286,10 +296,18 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) | |||
286 | } | 296 | } |
287 | 297 | ||
288 | r = i2c_dw_probe(dev); | 298 | r = i2c_dw_probe(dev); |
289 | if (r && !dev->pm_runtime_disabled) | 299 | if (r) |
290 | pm_runtime_disable(&pdev->dev); | 300 | goto exit_probe; |
291 | 301 | ||
292 | return r; | 302 | return r; |
303 | |||
304 | exit_probe: | ||
305 | if (!dev->pm_runtime_disabled) | ||
306 | pm_runtime_disable(&pdev->dev); | ||
307 | exit_reset: | ||
308 | if (!IS_ERR_OR_NULL(dev->rst)) | ||
309 | reset_control_assert(dev->rst); | ||
310 | return r; | ||
293 | } | 311 | } |
294 | 312 | ||
295 | static int dw_i2c_plat_remove(struct platform_device *pdev) | 313 | static int dw_i2c_plat_remove(struct platform_device *pdev) |
@@ -306,6 +324,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev) | |||
306 | pm_runtime_put_sync(&pdev->dev); | 324 | pm_runtime_put_sync(&pdev->dev); |
307 | if (!dev->pm_runtime_disabled) | 325 | if (!dev->pm_runtime_disabled) |
308 | pm_runtime_disable(&pdev->dev); | 326 | pm_runtime_disable(&pdev->dev); |
327 | if (!IS_ERR_OR_NULL(dev->rst)) | ||
328 | reset_control_assert(dev->rst); | ||
309 | 329 | ||
310 | return 0; | 330 | return 0; |
311 | } | 331 | } |
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c index cbd93ce0661f..736a82472101 100644 --- a/drivers/i2c/busses/i2c-exynos5.c +++ b/drivers/i2c/busses/i2c-exynos5.c | |||
@@ -457,7 +457,6 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) | |||
457 | 457 | ||
458 | int_status = readl(i2c->regs + HSI2C_INT_STATUS); | 458 | int_status = readl(i2c->regs + HSI2C_INT_STATUS); |
459 | writel(int_status, i2c->regs + HSI2C_INT_STATUS); | 459 | writel(int_status, i2c->regs + HSI2C_INT_STATUS); |
460 | trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); | ||
461 | 460 | ||
462 | /* handle interrupt related to the transfer status */ | 461 | /* handle interrupt related to the transfer status */ |
463 | if (i2c->variant->hw == HSI2C_EXYNOS7) { | 462 | if (i2c->variant->hw == HSI2C_EXYNOS7) { |
@@ -482,11 +481,13 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) | |||
482 | goto stop; | 481 | goto stop; |
483 | } | 482 | } |
484 | 483 | ||
484 | trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); | ||
485 | if ((trans_status & HSI2C_MASTER_ST_MASK) == HSI2C_MASTER_ST_LOSE) { | 485 | if ((trans_status & HSI2C_MASTER_ST_MASK) == HSI2C_MASTER_ST_LOSE) { |
486 | i2c->state = -EAGAIN; | 486 | i2c->state = -EAGAIN; |
487 | goto stop; | 487 | goto stop; |
488 | } | 488 | } |
489 | } else if (int_status & HSI2C_INT_I2C) { | 489 | } else if (int_status & HSI2C_INT_I2C) { |
490 | trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS); | ||
490 | if (trans_status & HSI2C_NO_DEV_ACK) { | 491 | if (trans_status & HSI2C_NO_DEV_ACK) { |
491 | dev_dbg(i2c->dev, "No ACK from device\n"); | 492 | dev_dbg(i2c->dev, "No ACK from device\n"); |
492 | i2c->state = -ENXIO; | 493 | i2c->state = -ENXIO; |
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index 2aa61bbbd307..73b97c71a484 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c | |||
@@ -175,7 +175,7 @@ static void meson_i2c_put_data(struct meson_i2c *i2c, char *buf, int len) | |||
175 | wdata1 |= *buf++ << ((i - 4) * 8); | 175 | wdata1 |= *buf++ << ((i - 4) * 8); |
176 | 176 | ||
177 | writel(wdata0, i2c->regs + REG_TOK_WDATA0); | 177 | writel(wdata0, i2c->regs + REG_TOK_WDATA0); |
178 | writel(wdata0, i2c->regs + REG_TOK_WDATA1); | 178 | writel(wdata1, i2c->regs + REG_TOK_WDATA1); |
179 | 179 | ||
180 | dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__, | 180 | dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__, |
181 | wdata0, wdata1, len); | 181 | wdata0, wdata1, len); |
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 4a7d9bc2142b..45d61714c81b 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c | |||
@@ -172,14 +172,6 @@ static const struct i2c_adapter_quirks mt6577_i2c_quirks = { | |||
172 | .max_comb_2nd_msg_len = 31, | 172 | .max_comb_2nd_msg_len = 31, |
173 | }; | 173 | }; |
174 | 174 | ||
175 | static const struct i2c_adapter_quirks mt8173_i2c_quirks = { | ||
176 | .max_num_msgs = 65535, | ||
177 | .max_write_len = 65535, | ||
178 | .max_read_len = 65535, | ||
179 | .max_comb_1st_msg_len = 65535, | ||
180 | .max_comb_2nd_msg_len = 65535, | ||
181 | }; | ||
182 | |||
183 | static const struct mtk_i2c_compatible mt6577_compat = { | 175 | static const struct mtk_i2c_compatible mt6577_compat = { |
184 | .quirks = &mt6577_i2c_quirks, | 176 | .quirks = &mt6577_i2c_quirks, |
185 | .pmic_i2c = 0, | 177 | .pmic_i2c = 0, |
@@ -199,7 +191,6 @@ static const struct mtk_i2c_compatible mt6589_compat = { | |||
199 | }; | 191 | }; |
200 | 192 | ||
201 | static const struct mtk_i2c_compatible mt8173_compat = { | 193 | static const struct mtk_i2c_compatible mt8173_compat = { |
202 | .quirks = &mt8173_i2c_quirks, | ||
203 | .pmic_i2c = 0, | 194 | .pmic_i2c = 0, |
204 | .dcm = 1, | 195 | .dcm = 1, |
205 | .auto_restart = 1, | 196 | .auto_restart = 1, |
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index 8f11d347b3ec..c811af4c8d81 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c | |||
@@ -218,8 +218,12 @@ static irqreturn_t riic_tend_isr(int irq, void *data) | |||
218 | } | 218 | } |
219 | 219 | ||
220 | if (riic->is_last || riic->err) { | 220 | if (riic->is_last || riic->err) { |
221 | riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER); | 221 | riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER); |
222 | writeb(ICCR2_SP, riic->base + RIIC_ICCR2); | 222 | writeb(ICCR2_SP, riic->base + RIIC_ICCR2); |
223 | } else { | ||
224 | /* Transfer is complete, but do not send STOP */ | ||
225 | riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER); | ||
226 | complete(&riic->msg_done); | ||
223 | } | 227 | } |
224 | 228 | ||
225 | return IRQ_HANDLED; | 229 | return IRQ_HANDLED; |
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index 83768e85a919..2178266bca79 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c | |||
@@ -429,6 +429,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc) | |||
429 | while (muxc->num_adapters) { | 429 | while (muxc->num_adapters) { |
430 | struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters]; | 430 | struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters]; |
431 | struct i2c_mux_priv *priv = adap->algo_data; | 431 | struct i2c_mux_priv *priv = adap->algo_data; |
432 | struct device_node *np = adap->dev.of_node; | ||
432 | 433 | ||
433 | muxc->adapter[muxc->num_adapters] = NULL; | 434 | muxc->adapter[muxc->num_adapters] = NULL; |
434 | 435 | ||
@@ -438,6 +439,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc) | |||
438 | 439 | ||
439 | sysfs_remove_link(&priv->adap.dev.kobj, "mux_device"); | 440 | sysfs_remove_link(&priv->adap.dev.kobj, "mux_device"); |
440 | i2c_del_adapter(adap); | 441 | i2c_del_adapter(adap); |
442 | of_node_put(np); | ||
441 | kfree(priv); | 443 | kfree(priv); |
442 | } | 444 | } |
443 | } | 445 | } |
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c index 1eef56a89b1f..f96601268f71 100644 --- a/drivers/irqchip/irq-crossbar.c +++ b/drivers/irqchip/irq-crossbar.c | |||
@@ -198,7 +198,8 @@ static const struct irq_domain_ops crossbar_domain_ops = { | |||
198 | 198 | ||
199 | static int __init crossbar_of_init(struct device_node *node) | 199 | static int __init crossbar_of_init(struct device_node *node) |
200 | { | 200 | { |
201 | int i, size, max = 0, reserved = 0, entry; | 201 | u32 max = 0, entry, reg_size; |
202 | int i, size, reserved = 0; | ||
202 | const __be32 *irqsr; | 203 | const __be32 *irqsr; |
203 | int ret = -ENOMEM; | 204 | int ret = -ENOMEM; |
204 | 205 | ||
@@ -275,9 +276,9 @@ static int __init crossbar_of_init(struct device_node *node) | |||
275 | if (!cb->register_offsets) | 276 | if (!cb->register_offsets) |
276 | goto err_irq_map; | 277 | goto err_irq_map; |
277 | 278 | ||
278 | of_property_read_u32(node, "ti,reg-size", &size); | 279 | of_property_read_u32(node, "ti,reg-size", ®_size); |
279 | 280 | ||
280 | switch (size) { | 281 | switch (reg_size) { |
281 | case 1: | 282 | case 1: |
282 | cb->write = crossbar_writeb; | 283 | cb->write = crossbar_writeb; |
283 | break; | 284 | break; |
@@ -303,7 +304,7 @@ static int __init crossbar_of_init(struct device_node *node) | |||
303 | continue; | 304 | continue; |
304 | 305 | ||
305 | cb->register_offsets[i] = reserved; | 306 | cb->register_offsets[i] = reserved; |
306 | reserved += size; | 307 | reserved += reg_size; |
307 | } | 308 | } |
308 | 309 | ||
309 | of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map); | 310 | of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map); |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 23201004fd7a..f77f840d2b5f 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -1601,6 +1601,14 @@ static void __maybe_unused its_enable_quirk_cavium_23144(void *data) | |||
1601 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; | 1601 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; |
1602 | } | 1602 | } |
1603 | 1603 | ||
1604 | static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) | ||
1605 | { | ||
1606 | struct its_node *its = data; | ||
1607 | |||
1608 | /* On QDF2400, the size of the ITE is 16Bytes */ | ||
1609 | its->ite_size = 16; | ||
1610 | } | ||
1611 | |||
1604 | static const struct gic_quirk its_quirks[] = { | 1612 | static const struct gic_quirk its_quirks[] = { |
1605 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 | 1613 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 |
1606 | { | 1614 | { |
@@ -1618,6 +1626,14 @@ static const struct gic_quirk its_quirks[] = { | |||
1618 | .init = its_enable_quirk_cavium_23144, | 1626 | .init = its_enable_quirk_cavium_23144, |
1619 | }, | 1627 | }, |
1620 | #endif | 1628 | #endif |
1629 | #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 | ||
1630 | { | ||
1631 | .desc = "ITS: QDF2400 erratum 0065", | ||
1632 | .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ | ||
1633 | .mask = 0xffffffff, | ||
1634 | .init = its_enable_quirk_qdf2400_e0065, | ||
1635 | }, | ||
1636 | #endif | ||
1621 | { | 1637 | { |
1622 | } | 1638 | } |
1623 | }; | 1639 | }; |
diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c index 409849165838..f64a36007800 100644 --- a/drivers/isdn/hisax/st5481_b.c +++ b/drivers/isdn/hisax/st5481_b.c | |||
@@ -239,7 +239,7 @@ static void st5481B_mode(struct st5481_bcs *bcs, int mode) | |||
239 | } | 239 | } |
240 | } | 240 | } |
241 | } else { | 241 | } else { |
242 | // Disble B channel interrupts | 242 | // Disable B channel interrupts |
243 | st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL); | 243 | st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL); |
244 | 244 | ||
245 | // Disable B channel FIFOs | 245 | // Disable B channel FIFOs |
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index a126919ed102..5d13930f0f22 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h | |||
@@ -4,7 +4,6 @@ | |||
4 | 4 | ||
5 | #include <linux/blkdev.h> | 5 | #include <linux/blkdev.h> |
6 | #include <linux/errno.h> | 6 | #include <linux/errno.h> |
7 | #include <linux/blkdev.h> | ||
8 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
9 | #include <linux/sched/clock.h> | 8 | #include <linux/sched/clock.h> |
10 | #include <linux/llist.h> | 9 | #include <linux/llist.h> |
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h index 7a681d8202c7..4442e478db72 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h +++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h | |||
@@ -256,8 +256,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner, | |||
256 | * | 256 | * |
257 | * The actual DAP implementation may be restricted to only one of the modes. | 257 | * The actual DAP implementation may be restricted to only one of the modes. |
258 | * A compiler warning or error will be generated if the DAP implementation | 258 | * A compiler warning or error will be generated if the DAP implementation |
259 | * overides or cannot handle the mode defined below. | 259 | * overrides or cannot handle the mode defined below. |
260 | * | ||
261 | */ | 260 | */ |
262 | #ifndef DRXDAP_SINGLE_MASTER | 261 | #ifndef DRXDAP_SINGLE_MASTER |
263 | #define DRXDAP_SINGLE_MASTER 1 | 262 | #define DRXDAP_SINGLE_MASTER 1 |
@@ -272,7 +271,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner, | |||
272 | * | 271 | * |
273 | * This maximum size may be restricted by the actual DAP implementation. | 272 | * This maximum size may be restricted by the actual DAP implementation. |
274 | * A compiler warning or error will be generated if the DAP implementation | 273 | * A compiler warning or error will be generated if the DAP implementation |
275 | * overides or cannot handle the chunksize defined below. | 274 | * overrides or cannot handle the chunksize defined below. |
276 | * | 275 | * |
277 | * Beware that the DAP uses DRXDAP_MAX_WCHUNKSIZE to create a temporary data | 276 | * Beware that the DAP uses DRXDAP_MAX_WCHUNKSIZE to create a temporary data |
278 | * buffer. Do not undefine or choose too large, unless your system is able to | 277 | * buffer. Do not undefine or choose too large, unless your system is able to |
@@ -292,8 +291,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner, | |||
292 | * | 291 | * |
293 | * This maximum size may be restricted by the actual DAP implementation. | 292 | * This maximum size may be restricted by the actual DAP implementation. |
294 | * A compiler warning or error will be generated if the DAP implementation | 293 | * A compiler warning or error will be generated if the DAP implementation |
295 | * overides or cannot handle the chunksize defined below. | 294 | * overrides or cannot handle the chunksize defined below. |
296 | * | ||
297 | */ | 295 | */ |
298 | #ifndef DRXDAP_MAX_RCHUNKSIZE | 296 | #ifndef DRXDAP_MAX_RCHUNKSIZE |
299 | #define DRXDAP_MAX_RCHUNKSIZE 60 | 297 | #define DRXDAP_MAX_RCHUNKSIZE 60 |
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index b4b583f7137a..b4c0f10fc3b0 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c | |||
@@ -54,12 +54,11 @@ EXPORT_SYMBOL_GPL(vsp1_du_init); | |||
54 | /** | 54 | /** |
55 | * vsp1_du_setup_lif - Setup the output part of the VSP pipeline | 55 | * vsp1_du_setup_lif - Setup the output part of the VSP pipeline |
56 | * @dev: the VSP device | 56 | * @dev: the VSP device |
57 | * @width: output frame width in pixels | 57 | * @cfg: the LIF configuration |
58 | * @height: output frame height in pixels | ||
59 | * | 58 | * |
60 | * Configure the output part of VSP DRM pipeline for the given frame @width and | 59 | * Configure the output part of VSP DRM pipeline for the given frame @cfg.width |
61 | * @height. This sets up formats on the BRU source pad, the WPF0 sink and source | 60 | * and @cfg.height. This sets up formats on the BRU source pad, the WPF0 sink |
62 | * pads, and the LIF sink pad. | 61 | * and source pads, and the LIF sink pad. |
63 | * | 62 | * |
64 | * As the media bus code on the BRU source pad is conditioned by the | 63 | * As the media bus code on the BRU source pad is conditioned by the |
65 | * configuration of the BRU sink 0 pad, we also set up the formats on all BRU | 64 | * configuration of the BRU sink 0 pad, we also set up the formats on all BRU |
@@ -69,8 +68,7 @@ EXPORT_SYMBOL_GPL(vsp1_du_init); | |||
69 | * | 68 | * |
70 | * Return 0 on success or a negative error code on failure. | 69 | * Return 0 on success or a negative error code on failure. |
71 | */ | 70 | */ |
72 | int vsp1_du_setup_lif(struct device *dev, unsigned int width, | 71 | int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg) |
73 | unsigned int height) | ||
74 | { | 72 | { |
75 | struct vsp1_device *vsp1 = dev_get_drvdata(dev); | 73 | struct vsp1_device *vsp1 = dev_get_drvdata(dev); |
76 | struct vsp1_pipeline *pipe = &vsp1->drm->pipe; | 74 | struct vsp1_pipeline *pipe = &vsp1->drm->pipe; |
@@ -79,11 +77,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width, | |||
79 | unsigned int i; | 77 | unsigned int i; |
80 | int ret; | 78 | int ret; |
81 | 79 | ||
82 | dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n", | 80 | if (!cfg) { |
83 | __func__, width, height); | 81 | /* NULL configuration means the CRTC is being disabled, stop |
84 | |||
85 | if (width == 0 || height == 0) { | ||
86 | /* Zero width or height means the CRTC is being disabled, stop | ||
87 | * the pipeline and turn the light off. | 82 | * the pipeline and turn the light off. |
88 | */ | 83 | */ |
89 | ret = vsp1_pipeline_stop(pipe); | 84 | ret = vsp1_pipeline_stop(pipe); |
@@ -108,6 +103,9 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width, | |||
108 | return 0; | 103 | return 0; |
109 | } | 104 | } |
110 | 105 | ||
106 | dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n", | ||
107 | __func__, cfg->width, cfg->height); | ||
108 | |||
111 | /* Configure the format at the BRU sinks and propagate it through the | 109 | /* Configure the format at the BRU sinks and propagate it through the |
112 | * pipeline. | 110 | * pipeline. |
113 | */ | 111 | */ |
@@ -117,8 +115,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width, | |||
117 | for (i = 0; i < bru->entity.source_pad; ++i) { | 115 | for (i = 0; i < bru->entity.source_pad; ++i) { |
118 | format.pad = i; | 116 | format.pad = i; |
119 | 117 | ||
120 | format.format.width = width; | 118 | format.format.width = cfg->width; |
121 | format.format.height = height; | 119 | format.format.height = cfg->height; |
122 | format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; | 120 | format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; |
123 | format.format.field = V4L2_FIELD_NONE; | 121 | format.format.field = V4L2_FIELD_NONE; |
124 | 122 | ||
@@ -133,8 +131,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width, | |||
133 | } | 131 | } |
134 | 132 | ||
135 | format.pad = bru->entity.source_pad; | 133 | format.pad = bru->entity.source_pad; |
136 | format.format.width = width; | 134 | format.format.width = cfg->width; |
137 | format.format.height = height; | 135 | format.format.height = cfg->height; |
138 | format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; | 136 | format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; |
139 | format.format.field = V4L2_FIELD_NONE; | 137 | format.format.field = V4L2_FIELD_NONE; |
140 | 138 | ||
@@ -180,7 +178,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width, | |||
180 | /* Verify that the format at the output of the pipeline matches the | 178 | /* Verify that the format at the output of the pipeline matches the |
181 | * requested frame size and media bus code. | 179 | * requested frame size and media bus code. |
182 | */ | 180 | */ |
183 | if (format.format.width != width || format.format.height != height || | 181 | if (format.format.width != cfg->width || |
182 | format.format.height != cfg->height || | ||
184 | format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) { | 183 | format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) { |
185 | dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__); | 184 | dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__); |
186 | return -EPIPE; | 185 | return -EPIPE; |
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index 393dccaabdd0..1688893a65bb 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c | |||
@@ -436,6 +436,8 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file) | |||
436 | return -ERESTARTSYS; | 436 | return -ERESTARTSYS; |
437 | 437 | ||
438 | ir = irctls[iminor(inode)]; | 438 | ir = irctls[iminor(inode)]; |
439 | mutex_unlock(&lirc_dev_lock); | ||
440 | |||
439 | if (!ir) { | 441 | if (!ir) { |
440 | retval = -ENODEV; | 442 | retval = -ENODEV; |
441 | goto error; | 443 | goto error; |
@@ -476,8 +478,6 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file) | |||
476 | } | 478 | } |
477 | 479 | ||
478 | error: | 480 | error: |
479 | mutex_unlock(&lirc_dev_lock); | ||
480 | |||
481 | nonseekable_open(inode, file); | 481 | nonseekable_open(inode, file); |
482 | 482 | ||
483 | return retval; | 483 | return retval; |
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index b109f8246b96..ec4b25bd2ec2 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c | |||
@@ -176,12 +176,13 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev, | |||
176 | { | 176 | { |
177 | u8 tolerance, config; | 177 | u8 tolerance, config; |
178 | struct nvt_dev *nvt = dev->priv; | 178 | struct nvt_dev *nvt = dev->priv; |
179 | unsigned long flags; | ||
179 | int i; | 180 | int i; |
180 | 181 | ||
181 | /* hardcode the tolerance to 10% */ | 182 | /* hardcode the tolerance to 10% */ |
182 | tolerance = DIV_ROUND_UP(count, 10); | 183 | tolerance = DIV_ROUND_UP(count, 10); |
183 | 184 | ||
184 | spin_lock(&nvt->lock); | 185 | spin_lock_irqsave(&nvt->lock, flags); |
185 | 186 | ||
186 | nvt_clear_cir_wake_fifo(nvt); | 187 | nvt_clear_cir_wake_fifo(nvt); |
187 | nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP); | 188 | nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP); |
@@ -203,7 +204,7 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev, | |||
203 | 204 | ||
204 | nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON); | 205 | nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON); |
205 | 206 | ||
206 | spin_unlock(&nvt->lock); | 207 | spin_unlock_irqrestore(&nvt->lock, flags); |
207 | } | 208 | } |
208 | 209 | ||
209 | static ssize_t wakeup_data_show(struct device *dev, | 210 | static ssize_t wakeup_data_show(struct device *dev, |
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 2424946740e6..d84533699668 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c | |||
@@ -1663,6 +1663,7 @@ static int rc_setup_rx_device(struct rc_dev *dev) | |||
1663 | { | 1663 | { |
1664 | int rc; | 1664 | int rc; |
1665 | struct rc_map *rc_map; | 1665 | struct rc_map *rc_map; |
1666 | u64 rc_type; | ||
1666 | 1667 | ||
1667 | if (!dev->map_name) | 1668 | if (!dev->map_name) |
1668 | return -EINVAL; | 1669 | return -EINVAL; |
@@ -1677,15 +1678,18 @@ static int rc_setup_rx_device(struct rc_dev *dev) | |||
1677 | if (rc) | 1678 | if (rc) |
1678 | return rc; | 1679 | return rc; |
1679 | 1680 | ||
1680 | if (dev->change_protocol) { | 1681 | rc_type = BIT_ULL(rc_map->rc_type); |
1681 | u64 rc_type = (1ll << rc_map->rc_type); | ||
1682 | 1682 | ||
1683 | if (dev->change_protocol) { | ||
1683 | rc = dev->change_protocol(dev, &rc_type); | 1684 | rc = dev->change_protocol(dev, &rc_type); |
1684 | if (rc < 0) | 1685 | if (rc < 0) |
1685 | goto out_table; | 1686 | goto out_table; |
1686 | dev->enabled_protocols = rc_type; | 1687 | dev->enabled_protocols = rc_type; |
1687 | } | 1688 | } |
1688 | 1689 | ||
1690 | if (dev->driver_type == RC_DRIVER_IR_RAW) | ||
1691 | ir_raw_load_modules(&rc_type); | ||
1692 | |||
1689 | set_bit(EV_KEY, dev->input_dev->evbit); | 1693 | set_bit(EV_KEY, dev->input_dev->evbit); |
1690 | set_bit(EV_REP, dev->input_dev->evbit); | 1694 | set_bit(EV_REP, dev->input_dev->evbit); |
1691 | set_bit(EV_MSC, dev->input_dev->evbit); | 1695 | set_bit(EV_MSC, dev->input_dev->evbit); |
@@ -1777,12 +1781,6 @@ int rc_register_device(struct rc_dev *dev) | |||
1777 | dev->input_name ?: "Unspecified device", path ?: "N/A"); | 1781 | dev->input_name ?: "Unspecified device", path ?: "N/A"); |
1778 | kfree(path); | 1782 | kfree(path); |
1779 | 1783 | ||
1780 | if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { | ||
1781 | rc = rc_setup_rx_device(dev); | ||
1782 | if (rc) | ||
1783 | goto out_dev; | ||
1784 | } | ||
1785 | |||
1786 | if (dev->driver_type == RC_DRIVER_IR_RAW || | 1784 | if (dev->driver_type == RC_DRIVER_IR_RAW || |
1787 | dev->driver_type == RC_DRIVER_IR_RAW_TX) { | 1785 | dev->driver_type == RC_DRIVER_IR_RAW_TX) { |
1788 | if (!raw_init) { | 1786 | if (!raw_init) { |
@@ -1791,7 +1789,13 @@ int rc_register_device(struct rc_dev *dev) | |||
1791 | } | 1789 | } |
1792 | rc = ir_raw_event_register(dev); | 1790 | rc = ir_raw_event_register(dev); |
1793 | if (rc < 0) | 1791 | if (rc < 0) |
1794 | goto out_rx; | 1792 | goto out_dev; |
1793 | } | ||
1794 | |||
1795 | if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { | ||
1796 | rc = rc_setup_rx_device(dev); | ||
1797 | if (rc) | ||
1798 | goto out_raw; | ||
1795 | } | 1799 | } |
1796 | 1800 | ||
1797 | /* Allow the RC sysfs nodes to be accessible */ | 1801 | /* Allow the RC sysfs nodes to be accessible */ |
@@ -1803,8 +1807,8 @@ int rc_register_device(struct rc_dev *dev) | |||
1803 | 1807 | ||
1804 | return 0; | 1808 | return 0; |
1805 | 1809 | ||
1806 | out_rx: | 1810 | out_raw: |
1807 | rc_free_rx_device(dev); | 1811 | ir_raw_event_unregister(dev); |
1808 | out_dev: | 1812 | out_dev: |
1809 | device_del(&dev->dev); | 1813 | device_del(&dev->dev); |
1810 | out_unlock: | 1814 | out_unlock: |
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c index 923fb2299553..41b54e40176c 100644 --- a/drivers/media/rc/serial_ir.c +++ b/drivers/media/rc/serial_ir.c | |||
@@ -487,10 +487,69 @@ static void serial_ir_timeout(unsigned long arg) | |||
487 | ir_raw_event_handle(serial_ir.rcdev); | 487 | ir_raw_event_handle(serial_ir.rcdev); |
488 | } | 488 | } |
489 | 489 | ||
490 | /* Needed by serial_ir_probe() */ | ||
491 | static int serial_ir_tx(struct rc_dev *dev, unsigned int *txbuf, | ||
492 | unsigned int count); | ||
493 | static int serial_ir_tx_duty_cycle(struct rc_dev *dev, u32 cycle); | ||
494 | static int serial_ir_tx_carrier(struct rc_dev *dev, u32 carrier); | ||
495 | static int serial_ir_open(struct rc_dev *rcdev); | ||
496 | static void serial_ir_close(struct rc_dev *rcdev); | ||
497 | |||
490 | static int serial_ir_probe(struct platform_device *dev) | 498 | static int serial_ir_probe(struct platform_device *dev) |
491 | { | 499 | { |
500 | struct rc_dev *rcdev; | ||
492 | int i, nlow, nhigh, result; | 501 | int i, nlow, nhigh, result; |
493 | 502 | ||
503 | rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW); | ||
504 | if (!rcdev) | ||
505 | return -ENOMEM; | ||
506 | |||
507 | if (hardware[type].send_pulse && hardware[type].send_space) | ||
508 | rcdev->tx_ir = serial_ir_tx; | ||
509 | if (hardware[type].set_send_carrier) | ||
510 | rcdev->s_tx_carrier = serial_ir_tx_carrier; | ||
511 | if (hardware[type].set_duty_cycle) | ||
512 | rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle; | ||
513 | |||
514 | switch (type) { | ||
515 | case IR_HOMEBREW: | ||
516 | rcdev->input_name = "Serial IR type home-brew"; | ||
517 | break; | ||
518 | case IR_IRDEO: | ||
519 | rcdev->input_name = "Serial IR type IRdeo"; | ||
520 | break; | ||
521 | case IR_IRDEO_REMOTE: | ||
522 | rcdev->input_name = "Serial IR type IRdeo remote"; | ||
523 | break; | ||
524 | case IR_ANIMAX: | ||
525 | rcdev->input_name = "Serial IR type AnimaX"; | ||
526 | break; | ||
527 | case IR_IGOR: | ||
528 | rcdev->input_name = "Serial IR type IgorPlug"; | ||
529 | break; | ||
530 | } | ||
531 | |||
532 | rcdev->input_phys = KBUILD_MODNAME "/input0"; | ||
533 | rcdev->input_id.bustype = BUS_HOST; | ||
534 | rcdev->input_id.vendor = 0x0001; | ||
535 | rcdev->input_id.product = 0x0001; | ||
536 | rcdev->input_id.version = 0x0100; | ||
537 | rcdev->open = serial_ir_open; | ||
538 | rcdev->close = serial_ir_close; | ||
539 | rcdev->dev.parent = &serial_ir.pdev->dev; | ||
540 | rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; | ||
541 | rcdev->driver_name = KBUILD_MODNAME; | ||
542 | rcdev->map_name = RC_MAP_RC6_MCE; | ||
543 | rcdev->min_timeout = 1; | ||
544 | rcdev->timeout = IR_DEFAULT_TIMEOUT; | ||
545 | rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT; | ||
546 | rcdev->rx_resolution = 250000; | ||
547 | |||
548 | serial_ir.rcdev = rcdev; | ||
549 | |||
550 | setup_timer(&serial_ir.timeout_timer, serial_ir_timeout, | ||
551 | (unsigned long)&serial_ir); | ||
552 | |||
494 | result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler, | 553 | result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler, |
495 | share_irq ? IRQF_SHARED : 0, | 554 | share_irq ? IRQF_SHARED : 0, |
496 | KBUILD_MODNAME, &hardware); | 555 | KBUILD_MODNAME, &hardware); |
@@ -516,9 +575,6 @@ static int serial_ir_probe(struct platform_device *dev) | |||
516 | return -EBUSY; | 575 | return -EBUSY; |
517 | } | 576 | } |
518 | 577 | ||
519 | setup_timer(&serial_ir.timeout_timer, serial_ir_timeout, | ||
520 | (unsigned long)&serial_ir); | ||
521 | |||
522 | result = hardware_init_port(); | 578 | result = hardware_init_port(); |
523 | if (result < 0) | 579 | if (result < 0) |
524 | return result; | 580 | return result; |
@@ -552,7 +608,8 @@ static int serial_ir_probe(struct platform_device *dev) | |||
552 | sense ? "low" : "high"); | 608 | sense ? "low" : "high"); |
553 | 609 | ||
554 | dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io); | 610 | dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io); |
555 | return 0; | 611 | |
612 | return devm_rc_register_device(&dev->dev, rcdev); | ||
556 | } | 613 | } |
557 | 614 | ||
558 | static int serial_ir_open(struct rc_dev *rcdev) | 615 | static int serial_ir_open(struct rc_dev *rcdev) |
@@ -723,7 +780,6 @@ static void serial_ir_exit(void) | |||
723 | 780 | ||
724 | static int __init serial_ir_init_module(void) | 781 | static int __init serial_ir_init_module(void) |
725 | { | 782 | { |
726 | struct rc_dev *rcdev; | ||
727 | int result; | 783 | int result; |
728 | 784 | ||
729 | switch (type) { | 785 | switch (type) { |
@@ -754,63 +810,9 @@ static int __init serial_ir_init_module(void) | |||
754 | sense = !!sense; | 810 | sense = !!sense; |
755 | 811 | ||
756 | result = serial_ir_init(); | 812 | result = serial_ir_init(); |
757 | if (result) | ||
758 | return result; | ||
759 | |||
760 | rcdev = devm_rc_allocate_device(&serial_ir.pdev->dev, RC_DRIVER_IR_RAW); | ||
761 | if (!rcdev) { | ||
762 | result = -ENOMEM; | ||
763 | goto serial_cleanup; | ||
764 | } | ||
765 | |||
766 | if (hardware[type].send_pulse && hardware[type].send_space) | ||
767 | rcdev->tx_ir = serial_ir_tx; | ||
768 | if (hardware[type].set_send_carrier) | ||
769 | rcdev->s_tx_carrier = serial_ir_tx_carrier; | ||
770 | if (hardware[type].set_duty_cycle) | ||
771 | rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle; | ||
772 | |||
773 | switch (type) { | ||
774 | case IR_HOMEBREW: | ||
775 | rcdev->input_name = "Serial IR type home-brew"; | ||
776 | break; | ||
777 | case IR_IRDEO: | ||
778 | rcdev->input_name = "Serial IR type IRdeo"; | ||
779 | break; | ||
780 | case IR_IRDEO_REMOTE: | ||
781 | rcdev->input_name = "Serial IR type IRdeo remote"; | ||
782 | break; | ||
783 | case IR_ANIMAX: | ||
784 | rcdev->input_name = "Serial IR type AnimaX"; | ||
785 | break; | ||
786 | case IR_IGOR: | ||
787 | rcdev->input_name = "Serial IR type IgorPlug"; | ||
788 | break; | ||
789 | } | ||
790 | |||
791 | rcdev->input_phys = KBUILD_MODNAME "/input0"; | ||
792 | rcdev->input_id.bustype = BUS_HOST; | ||
793 | rcdev->input_id.vendor = 0x0001; | ||
794 | rcdev->input_id.product = 0x0001; | ||
795 | rcdev->input_id.version = 0x0100; | ||
796 | rcdev->open = serial_ir_open; | ||
797 | rcdev->close = serial_ir_close; | ||
798 | rcdev->dev.parent = &serial_ir.pdev->dev; | ||
799 | rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER; | ||
800 | rcdev->driver_name = KBUILD_MODNAME; | ||
801 | rcdev->map_name = RC_MAP_RC6_MCE; | ||
802 | rcdev->min_timeout = 1; | ||
803 | rcdev->timeout = IR_DEFAULT_TIMEOUT; | ||
804 | rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT; | ||
805 | rcdev->rx_resolution = 250000; | ||
806 | |||
807 | serial_ir.rcdev = rcdev; | ||
808 | |||
809 | result = rc_register_device(rcdev); | ||
810 | |||
811 | if (!result) | 813 | if (!result) |
812 | return 0; | 814 | return 0; |
813 | serial_cleanup: | 815 | |
814 | serial_ir_exit(); | 816 | serial_ir_exit(); |
815 | return result; | 817 | return result; |
816 | } | 818 | } |
@@ -818,7 +820,6 @@ serial_cleanup: | |||
818 | static void __exit serial_ir_exit_module(void) | 820 | static void __exit serial_ir_exit_module(void) |
819 | { | 821 | { |
820 | del_timer_sync(&serial_ir.timeout_timer); | 822 | del_timer_sync(&serial_ir.timeout_timer); |
821 | rc_unregister_device(serial_ir.rcdev); | ||
822 | serial_ir_exit(); | 823 | serial_ir_exit(); |
823 | } | 824 | } |
824 | 825 | ||
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index 6ca502d834b4..4f42d57f81d9 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c | |||
@@ -68,6 +68,7 @@ | |||
68 | struct dw2102_state { | 68 | struct dw2102_state { |
69 | u8 initialized; | 69 | u8 initialized; |
70 | u8 last_lock; | 70 | u8 last_lock; |
71 | u8 data[MAX_XFER_SIZE + 4]; | ||
71 | struct i2c_client *i2c_client_demod; | 72 | struct i2c_client *i2c_client_demod; |
72 | struct i2c_client *i2c_client_tuner; | 73 | struct i2c_client *i2c_client_tuner; |
73 | 74 | ||
@@ -661,62 +662,72 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], | |||
661 | int num) | 662 | int num) |
662 | { | 663 | { |
663 | struct dvb_usb_device *d = i2c_get_adapdata(adap); | 664 | struct dvb_usb_device *d = i2c_get_adapdata(adap); |
664 | u8 obuf[0x40], ibuf[0x40]; | 665 | struct dw2102_state *state; |
665 | 666 | ||
666 | if (!d) | 667 | if (!d) |
667 | return -ENODEV; | 668 | return -ENODEV; |
669 | |||
670 | state = d->priv; | ||
671 | |||
668 | if (mutex_lock_interruptible(&d->i2c_mutex) < 0) | 672 | if (mutex_lock_interruptible(&d->i2c_mutex) < 0) |
669 | return -EAGAIN; | 673 | return -EAGAIN; |
674 | if (mutex_lock_interruptible(&d->data_mutex) < 0) { | ||
675 | mutex_unlock(&d->i2c_mutex); | ||
676 | return -EAGAIN; | ||
677 | } | ||
670 | 678 | ||
671 | switch (num) { | 679 | switch (num) { |
672 | case 1: | 680 | case 1: |
673 | switch (msg[0].addr) { | 681 | switch (msg[0].addr) { |
674 | case SU3000_STREAM_CTRL: | 682 | case SU3000_STREAM_CTRL: |
675 | obuf[0] = msg[0].buf[0] + 0x36; | 683 | state->data[0] = msg[0].buf[0] + 0x36; |
676 | obuf[1] = 3; | 684 | state->data[1] = 3; |
677 | obuf[2] = 0; | 685 | state->data[2] = 0; |
678 | if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 0, 0) < 0) | 686 | if (dvb_usb_generic_rw(d, state->data, 3, |
687 | state->data, 0, 0) < 0) | ||
679 | err("i2c transfer failed."); | 688 | err("i2c transfer failed."); |
680 | break; | 689 | break; |
681 | case DW2102_RC_QUERY: | 690 | case DW2102_RC_QUERY: |
682 | obuf[0] = 0x10; | 691 | state->data[0] = 0x10; |
683 | if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 2, 0) < 0) | 692 | if (dvb_usb_generic_rw(d, state->data, 1, |
693 | state->data, 2, 0) < 0) | ||
684 | err("i2c transfer failed."); | 694 | err("i2c transfer failed."); |
685 | msg[0].buf[1] = ibuf[0]; | 695 | msg[0].buf[1] = state->data[0]; |
686 | msg[0].buf[0] = ibuf[1]; | 696 | msg[0].buf[0] = state->data[1]; |
687 | break; | 697 | break; |
688 | default: | 698 | default: |
689 | /* always i2c write*/ | 699 | /* always i2c write*/ |
690 | obuf[0] = 0x08; | 700 | state->data[0] = 0x08; |
691 | obuf[1] = msg[0].addr; | 701 | state->data[1] = msg[0].addr; |
692 | obuf[2] = msg[0].len; | 702 | state->data[2] = msg[0].len; |
693 | 703 | ||
694 | memcpy(&obuf[3], msg[0].buf, msg[0].len); | 704 | memcpy(&state->data[3], msg[0].buf, msg[0].len); |
695 | 705 | ||
696 | if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3, | 706 | if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3, |
697 | ibuf, 1, 0) < 0) | 707 | state->data, 1, 0) < 0) |
698 | err("i2c transfer failed."); | 708 | err("i2c transfer failed."); |
699 | 709 | ||
700 | } | 710 | } |
701 | break; | 711 | break; |
702 | case 2: | 712 | case 2: |
703 | /* always i2c read */ | 713 | /* always i2c read */ |
704 | obuf[0] = 0x09; | 714 | state->data[0] = 0x09; |
705 | obuf[1] = msg[0].len; | 715 | state->data[1] = msg[0].len; |
706 | obuf[2] = msg[1].len; | 716 | state->data[2] = msg[1].len; |
707 | obuf[3] = msg[0].addr; | 717 | state->data[3] = msg[0].addr; |
708 | memcpy(&obuf[4], msg[0].buf, msg[0].len); | 718 | memcpy(&state->data[4], msg[0].buf, msg[0].len); |
709 | 719 | ||
710 | if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4, | 720 | if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4, |
711 | ibuf, msg[1].len + 1, 0) < 0) | 721 | state->data, msg[1].len + 1, 0) < 0) |
712 | err("i2c transfer failed."); | 722 | err("i2c transfer failed."); |
713 | 723 | ||
714 | memcpy(msg[1].buf, &ibuf[1], msg[1].len); | 724 | memcpy(msg[1].buf, &state->data[1], msg[1].len); |
715 | break; | 725 | break; |
716 | default: | 726 | default: |
717 | warn("more than 2 i2c messages at a time is not handled yet."); | 727 | warn("more than 2 i2c messages at a time is not handled yet."); |
718 | break; | 728 | break; |
719 | } | 729 | } |
730 | mutex_unlock(&d->data_mutex); | ||
720 | mutex_unlock(&d->i2c_mutex); | 731 | mutex_unlock(&d->i2c_mutex); |
721 | return num; | 732 | return num; |
722 | } | 733 | } |
@@ -844,17 +855,23 @@ static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) | |||
844 | static int su3000_power_ctrl(struct dvb_usb_device *d, int i) | 855 | static int su3000_power_ctrl(struct dvb_usb_device *d, int i) |
845 | { | 856 | { |
846 | struct dw2102_state *state = (struct dw2102_state *)d->priv; | 857 | struct dw2102_state *state = (struct dw2102_state *)d->priv; |
847 | u8 obuf[] = {0xde, 0}; | 858 | int ret = 0; |
848 | 859 | ||
849 | info("%s: %d, initialized %d", __func__, i, state->initialized); | 860 | info("%s: %d, initialized %d", __func__, i, state->initialized); |
850 | 861 | ||
851 | if (i && !state->initialized) { | 862 | if (i && !state->initialized) { |
863 | mutex_lock(&d->data_mutex); | ||
864 | |||
865 | state->data[0] = 0xde; | ||
866 | state->data[1] = 0; | ||
867 | |||
852 | state->initialized = 1; | 868 | state->initialized = 1; |
853 | /* reset board */ | 869 | /* reset board */ |
854 | return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0); | 870 | ret = dvb_usb_generic_rw(d, state->data, 2, NULL, 0, 0); |
871 | mutex_unlock(&d->data_mutex); | ||
855 | } | 872 | } |
856 | 873 | ||
857 | return 0; | 874 | return ret; |
858 | } | 875 | } |
859 | 876 | ||
860 | static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) | 877 | static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) |
@@ -1309,49 +1326,57 @@ static int prof_7500_frontend_attach(struct dvb_usb_adapter *d) | |||
1309 | return 0; | 1326 | return 0; |
1310 | } | 1327 | } |
1311 | 1328 | ||
1312 | static int su3000_frontend_attach(struct dvb_usb_adapter *d) | 1329 | static int su3000_frontend_attach(struct dvb_usb_adapter *adap) |
1313 | { | 1330 | { |
1314 | u8 obuf[3] = { 0xe, 0x80, 0 }; | 1331 | struct dvb_usb_device *d = adap->dev; |
1315 | u8 ibuf[] = { 0 }; | 1332 | struct dw2102_state *state = d->priv; |
1333 | |||
1334 | mutex_lock(&d->data_mutex); | ||
1335 | |||
1336 | state->data[0] = 0xe; | ||
1337 | state->data[1] = 0x80; | ||
1338 | state->data[2] = 0; | ||
1316 | 1339 | ||
1317 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1340 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
1318 | err("command 0x0e transfer failed."); | 1341 | err("command 0x0e transfer failed."); |
1319 | 1342 | ||
1320 | obuf[0] = 0xe; | 1343 | state->data[0] = 0xe; |
1321 | obuf[1] = 0x02; | 1344 | state->data[1] = 0x02; |
1322 | obuf[2] = 1; | 1345 | state->data[2] = 1; |
1323 | 1346 | ||
1324 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1347 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
1325 | err("command 0x0e transfer failed."); | 1348 | err("command 0x0e transfer failed."); |
1326 | msleep(300); | 1349 | msleep(300); |
1327 | 1350 | ||
1328 | obuf[0] = 0xe; | 1351 | state->data[0] = 0xe; |
1329 | obuf[1] = 0x83; | 1352 | state->data[1] = 0x83; |
1330 | obuf[2] = 0; | 1353 | state->data[2] = 0; |
1331 | 1354 | ||
1332 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1355 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
1333 | err("command 0x0e transfer failed."); | 1356 | err("command 0x0e transfer failed."); |
1334 | 1357 | ||
1335 | obuf[0] = 0xe; | 1358 | state->data[0] = 0xe; |
1336 | obuf[1] = 0x83; | 1359 | state->data[1] = 0x83; |
1337 | obuf[2] = 1; | 1360 | state->data[2] = 1; |
1338 | 1361 | ||
1339 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1362 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
1340 | err("command 0x0e transfer failed."); | 1363 | err("command 0x0e transfer failed."); |
1341 | 1364 | ||
1342 | obuf[0] = 0x51; | 1365 | state->data[0] = 0x51; |
1343 | 1366 | ||
1344 | if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) | 1367 | if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) |
1345 | err("command 0x51 transfer failed."); | 1368 | err("command 0x51 transfer failed."); |
1346 | 1369 | ||
1347 | d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config, | 1370 | mutex_unlock(&d->data_mutex); |
1348 | &d->dev->i2c_adap); | 1371 | |
1349 | if (d->fe_adap[0].fe == NULL) | 1372 | adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config, |
1373 | &d->i2c_adap); | ||
1374 | if (adap->fe_adap[0].fe == NULL) | ||
1350 | return -EIO; | 1375 | return -EIO; |
1351 | 1376 | ||
1352 | if (dvb_attach(ts2020_attach, d->fe_adap[0].fe, | 1377 | if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe, |
1353 | &dw2104_ts2020_config, | 1378 | &dw2104_ts2020_config, |
1354 | &d->dev->i2c_adap)) { | 1379 | &d->i2c_adap)) { |
1355 | info("Attached DS3000/TS2020!"); | 1380 | info("Attached DS3000/TS2020!"); |
1356 | return 0; | 1381 | return 0; |
1357 | } | 1382 | } |
@@ -1360,47 +1385,55 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d) | |||
1360 | return -EIO; | 1385 | return -EIO; |
1361 | } | 1386 | } |
1362 | 1387 | ||
1363 | static int t220_frontend_attach(struct dvb_usb_adapter *d) | 1388 | static int t220_frontend_attach(struct dvb_usb_adapter *adap) |
1364 | { | 1389 | { |
1365 | u8 obuf[3] = { 0xe, 0x87, 0 }; | 1390 | struct dvb_usb_device *d = adap->dev; |
1366 | u8 ibuf[] = { 0 }; | 1391 | struct dw2102_state *state = d->priv; |
1392 | |||
1393 | mutex_lock(&d->data_mutex); | ||
1367 | 1394 | ||
1368 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1395 | state->data[0] = 0xe; |
1396 | state->data[1] = 0x87; | ||
1397 | state->data[2] = 0x0; | ||
1398 | |||
1399 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) | ||
1369 | err("command 0x0e transfer failed."); | 1400 | err("command 0x0e transfer failed."); |
1370 | 1401 | ||
1371 | obuf[0] = 0xe; | 1402 | state->data[0] = 0xe; |
1372 | obuf[1] = 0x86; | 1403 | state->data[1] = 0x86; |
1373 | obuf[2] = 1; | 1404 | state->data[2] = 1; |
1374 | 1405 | ||
1375 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1406 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
1376 | err("command 0x0e transfer failed."); | 1407 | err("command 0x0e transfer failed."); |
1377 | 1408 | ||
1378 | obuf[0] = 0xe; | 1409 | state->data[0] = 0xe; |
1379 | obuf[1] = 0x80; | 1410 | state->data[1] = 0x80; |
1380 | obuf[2] = 0; | 1411 | state->data[2] = 0; |
1381 | 1412 | ||
1382 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1413 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
1383 | err("command 0x0e transfer failed."); | 1414 | err("command 0x0e transfer failed."); |
1384 | 1415 | ||
1385 | msleep(50); | 1416 | msleep(50); |
1386 | 1417 | ||
1387 | obuf[0] = 0xe; | 1418 | state->data[0] = 0xe; |
1388 | obuf[1] = 0x80; | 1419 | state->data[1] = 0x80; |
1389 | obuf[2] = 1; | 1420 | state->data[2] = 1; |
1390 | 1421 | ||
1391 | if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) | 1422 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
1392 | err("command 0x0e transfer failed."); | 1423 | err("command 0x0e transfer failed."); |
1393 | 1424 | ||
1394 | obuf[0] = 0x51; | 1425 | state->data[0] = 0x51; |
1395 | 1426 | ||
1396 | if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) | 1427 | if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) |
1397 | err("command 0x51 transfer failed."); | 1428 | err("command 0x51 transfer failed."); |
1398 | 1429 | ||
1399 | d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config, | 1430 | mutex_unlock(&d->data_mutex); |
1400 | &d->dev->i2c_adap, NULL); | 1431 | |
1401 | if (d->fe_adap[0].fe != NULL) { | 1432 | adap->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config, |
1402 | if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60, | 1433 | &d->i2c_adap, NULL); |
1403 | &d->dev->i2c_adap, &tda18271_config)) { | 1434 | if (adap->fe_adap[0].fe != NULL) { |
1435 | if (dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0x60, | ||
1436 | &d->i2c_adap, &tda18271_config)) { | ||
1404 | info("Attached TDA18271HD/CXD2820R!"); | 1437 | info("Attached TDA18271HD/CXD2820R!"); |
1405 | return 0; | 1438 | return 0; |
1406 | } | 1439 | } |
@@ -1410,23 +1443,30 @@ static int t220_frontend_attach(struct dvb_usb_adapter *d) | |||
1410 | return -EIO; | 1443 | return -EIO; |
1411 | } | 1444 | } |
1412 | 1445 | ||
1413 | static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d) | 1446 | static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap) |
1414 | { | 1447 | { |
1415 | u8 obuf[] = { 0x51 }; | 1448 | struct dvb_usb_device *d = adap->dev; |
1416 | u8 ibuf[] = { 0 }; | 1449 | struct dw2102_state *state = d->priv; |
1450 | |||
1451 | mutex_lock(&d->data_mutex); | ||
1417 | 1452 | ||
1418 | if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) | 1453 | state->data[0] = 0x51; |
1454 | |||
1455 | if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) | ||
1419 | err("command 0x51 transfer failed."); | 1456 | err("command 0x51 transfer failed."); |
1420 | 1457 | ||
1421 | d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config, | 1458 | mutex_unlock(&d->data_mutex); |
1422 | &d->dev->i2c_adap); | ||
1423 | 1459 | ||
1424 | if (d->fe_adap[0].fe == NULL) | 1460 | adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach, |
1461 | &s421_m88rs2000_config, | ||
1462 | &d->i2c_adap); | ||
1463 | |||
1464 | if (adap->fe_adap[0].fe == NULL) | ||
1425 | return -EIO; | 1465 | return -EIO; |
1426 | 1466 | ||
1427 | if (dvb_attach(ts2020_attach, d->fe_adap[0].fe, | 1467 | if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe, |
1428 | &dw2104_ts2020_config, | 1468 | &dw2104_ts2020_config, |
1429 | &d->dev->i2c_adap)) { | 1469 | &d->i2c_adap)) { |
1430 | info("Attached RS2000/TS2020!"); | 1470 | info("Attached RS2000/TS2020!"); |
1431 | return 0; | 1471 | return 0; |
1432 | } | 1472 | } |
@@ -1439,44 +1479,50 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap) | |||
1439 | { | 1479 | { |
1440 | struct dvb_usb_device *d = adap->dev; | 1480 | struct dvb_usb_device *d = adap->dev; |
1441 | struct dw2102_state *state = d->priv; | 1481 | struct dw2102_state *state = d->priv; |
1442 | u8 obuf[3] = { 0xe, 0x80, 0 }; | ||
1443 | u8 ibuf[] = { 0 }; | ||
1444 | struct i2c_adapter *i2c_adapter; | 1482 | struct i2c_adapter *i2c_adapter; |
1445 | struct i2c_client *client; | 1483 | struct i2c_client *client; |
1446 | struct i2c_board_info board_info; | 1484 | struct i2c_board_info board_info; |
1447 | struct m88ds3103_platform_data m88ds3103_pdata = {}; | 1485 | struct m88ds3103_platform_data m88ds3103_pdata = {}; |
1448 | struct ts2020_config ts2020_config = {}; | 1486 | struct ts2020_config ts2020_config = {}; |
1449 | 1487 | ||
1450 | if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) | 1488 | mutex_lock(&d->data_mutex); |
1489 | |||
1490 | state->data[0] = 0xe; | ||
1491 | state->data[1] = 0x80; | ||
1492 | state->data[2] = 0x0; | ||
1493 | |||
1494 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) | ||
1451 | err("command 0x0e transfer failed."); | 1495 | err("command 0x0e transfer failed."); |
1452 | 1496 | ||
1453 | obuf[0] = 0xe; | 1497 | state->data[0] = 0xe; |
1454 | obuf[1] = 0x02; | 1498 | state->data[1] = 0x02; |
1455 | obuf[2] = 1; | 1499 | state->data[2] = 1; |
1456 | 1500 | ||
1457 | if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) | 1501 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
1458 | err("command 0x0e transfer failed."); | 1502 | err("command 0x0e transfer failed."); |
1459 | msleep(300); | 1503 | msleep(300); |
1460 | 1504 | ||
1461 | obuf[0] = 0xe; | 1505 | state->data[0] = 0xe; |
1462 | obuf[1] = 0x83; | 1506 | state->data[1] = 0x83; |
1463 | obuf[2] = 0; | 1507 | state->data[2] = 0; |
1464 | 1508 | ||
1465 | if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) | 1509 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
1466 | err("command 0x0e transfer failed."); | 1510 | err("command 0x0e transfer failed."); |
1467 | 1511 | ||
1468 | obuf[0] = 0xe; | 1512 | state->data[0] = 0xe; |
1469 | obuf[1] = 0x83; | 1513 | state->data[1] = 0x83; |
1470 | obuf[2] = 1; | 1514 | state->data[2] = 1; |
1471 | 1515 | ||
1472 | if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) | 1516 | if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0) |
1473 | err("command 0x0e transfer failed."); | 1517 | err("command 0x0e transfer failed."); |
1474 | 1518 | ||
1475 | obuf[0] = 0x51; | 1519 | state->data[0] = 0x51; |
1476 | 1520 | ||
1477 | if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0) | 1521 | if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0) |
1478 | err("command 0x51 transfer failed."); | 1522 | err("command 0x51 transfer failed."); |
1479 | 1523 | ||
1524 | mutex_unlock(&d->data_mutex); | ||
1525 | |||
1480 | /* attach demod */ | 1526 | /* attach demod */ |
1481 | m88ds3103_pdata.clk = 27000000; | 1527 | m88ds3103_pdata.clk = 27000000; |
1482 | m88ds3103_pdata.i2c_wr_max = 33; | 1528 | m88ds3103_pdata.i2c_wr_max = 33; |
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 6fb773dbcd0c..93be82fc338a 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c | |||
@@ -219,15 +219,20 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, | |||
219 | int write, unsigned long *paddr, int *pageshift) | 219 | int write, unsigned long *paddr, int *pageshift) |
220 | { | 220 | { |
221 | pgd_t *pgdp; | 221 | pgd_t *pgdp; |
222 | pmd_t *pmdp; | 222 | p4d_t *p4dp; |
223 | pud_t *pudp; | 223 | pud_t *pudp; |
224 | pmd_t *pmdp; | ||
224 | pte_t pte; | 225 | pte_t pte; |
225 | 226 | ||
226 | pgdp = pgd_offset(vma->vm_mm, vaddr); | 227 | pgdp = pgd_offset(vma->vm_mm, vaddr); |
227 | if (unlikely(pgd_none(*pgdp))) | 228 | if (unlikely(pgd_none(*pgdp))) |
228 | goto err; | 229 | goto err; |
229 | 230 | ||
230 | pudp = pud_offset(pgdp, vaddr); | 231 | p4dp = p4d_offset(pgdp, vaddr); |
232 | if (unlikely(p4d_none(*p4dp))) | ||
233 | goto err; | ||
234 | |||
235 | pudp = pud_offset(p4dp, vaddr); | ||
231 | if (unlikely(pud_none(*pudp))) | 236 | if (unlikely(pud_none(*pudp))) |
232 | goto err; | 237 | goto err; |
233 | 238 | ||
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 1ae872bfc3ba..747645c74134 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c | |||
@@ -186,7 +186,7 @@ static inline int write_enable(struct spi_nor *nor) | |||
186 | } | 186 | } |
187 | 187 | ||
188 | /* | 188 | /* |
189 | * Send write disble instruction to the chip. | 189 | * Send write disable instruction to the chip. |
190 | */ | 190 | */ |
191 | static inline int write_disable(struct spi_nor *nor) | 191 | static inline int write_disable(struct spi_nor *nor) |
192 | { | 192 | { |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 6d31f92ef2b6..84ac50f92c9c 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h | |||
@@ -1162,8 +1162,8 @@ struct ob_mac_tso_iocb_rsp { | |||
1162 | struct ib_mac_iocb_rsp { | 1162 | struct ib_mac_iocb_rsp { |
1163 | u8 opcode; /* 0x20 */ | 1163 | u8 opcode; /* 0x20 */ |
1164 | u8 flags1; | 1164 | u8 flags1; |
1165 | #define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */ | 1165 | #define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */ |
1166 | #define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */ | 1166 | #define IB_MAC_IOCB_RSP_I 0x02 /* Disable Intr Generation */ |
1167 | #define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */ | 1167 | #define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */ |
1168 | #define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ | 1168 | #define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ |
1169 | #define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ | 1169 | #define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ |
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c index 993b650ef275..44f774c12fb2 100644 --- a/drivers/pci/dwc/pci-exynos.c +++ b/drivers/pci/dwc/pci-exynos.c | |||
@@ -132,10 +132,6 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev, | |||
132 | struct device *dev = pci->dev; | 132 | struct device *dev = pci->dev; |
133 | struct resource *res; | 133 | struct resource *res; |
134 | 134 | ||
135 | /* If using the PHY framework, doesn't need to get other resource */ | ||
136 | if (ep->using_phy) | ||
137 | return 0; | ||
138 | |||
139 | ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL); | 135 | ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL); |
140 | if (!ep->mem_res) | 136 | if (!ep->mem_res) |
141 | return -ENOMEM; | 137 | return -ENOMEM; |
@@ -145,6 +141,10 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev, | |||
145 | if (IS_ERR(ep->mem_res->elbi_base)) | 141 | if (IS_ERR(ep->mem_res->elbi_base)) |
146 | return PTR_ERR(ep->mem_res->elbi_base); | 142 | return PTR_ERR(ep->mem_res->elbi_base); |
147 | 143 | ||
144 | /* If using the PHY framework, doesn't need to get other resource */ | ||
145 | if (ep->using_phy) | ||
146 | return 0; | ||
147 | |||
148 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 148 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
149 | ep->mem_res->phy_base = devm_ioremap_resource(dev, res); | 149 | ep->mem_res->phy_base = devm_ioremap_resource(dev, res); |
150 | if (IS_ERR(ep->mem_res->phy_base)) | 150 | if (IS_ERR(ep->mem_res->phy_base)) |
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 973472c23d89..1dfa10cc566b 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
@@ -478,7 +478,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link, | |||
478 | 478 | ||
479 | static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) | 479 | static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) |
480 | { | 480 | { |
481 | struct pci_dev *child, *parent = link->pdev; | 481 | struct pci_dev *child = link->downstream, *parent = link->pdev; |
482 | struct pci_bus *linkbus = parent->subordinate; | 482 | struct pci_bus *linkbus = parent->subordinate; |
483 | struct aspm_register_info upreg, dwreg; | 483 | struct aspm_register_info upreg, dwreg; |
484 | 484 | ||
@@ -491,9 +491,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) | |||
491 | 491 | ||
492 | /* Get upstream/downstream components' register state */ | 492 | /* Get upstream/downstream components' register state */ |
493 | pcie_get_aspm_reg(parent, &upreg); | 493 | pcie_get_aspm_reg(parent, &upreg); |
494 | child = pci_function_0(linkbus); | ||
495 | pcie_get_aspm_reg(child, &dwreg); | 494 | pcie_get_aspm_reg(child, &dwreg); |
496 | link->downstream = child; | ||
497 | 495 | ||
498 | /* | 496 | /* |
499 | * If ASPM not supported, don't mess with the clocks and link, | 497 | * If ASPM not supported, don't mess with the clocks and link, |
@@ -800,6 +798,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) | |||
800 | INIT_LIST_HEAD(&link->children); | 798 | INIT_LIST_HEAD(&link->children); |
801 | INIT_LIST_HEAD(&link->link); | 799 | INIT_LIST_HEAD(&link->link); |
802 | link->pdev = pdev; | 800 | link->pdev = pdev; |
801 | link->downstream = pci_function_0(pdev->subordinate); | ||
803 | 802 | ||
804 | /* | 803 | /* |
805 | * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe | 804 | * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index f754453fe754..673683660b5c 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -2174,6 +2174,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd); | |||
2174 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd); | 2174 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd); |
2175 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, | 2175 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, |
2176 | quirk_blacklist_vpd); | 2176 | quirk_blacklist_vpd); |
2177 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd); | ||
2177 | 2178 | ||
2178 | /* | 2179 | /* |
2179 | * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the | 2180 | * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the |
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index f8e9e1c2b2f6..c978be5eb9eb 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c | |||
@@ -422,6 +422,20 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in | |||
422 | return 0; | 422 | return 0; |
423 | } | 423 | } |
424 | 424 | ||
425 | static int msm_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) | ||
426 | { | ||
427 | struct msm_pinctrl *pctrl = gpiochip_get_data(chip); | ||
428 | const struct msm_pingroup *g; | ||
429 | u32 val; | ||
430 | |||
431 | g = &pctrl->soc->groups[offset]; | ||
432 | |||
433 | val = readl(pctrl->regs + g->ctl_reg); | ||
434 | |||
435 | /* 0 = output, 1 = input */ | ||
436 | return val & BIT(g->oe_bit) ? 0 : 1; | ||
437 | } | ||
438 | |||
425 | static int msm_gpio_get(struct gpio_chip *chip, unsigned offset) | 439 | static int msm_gpio_get(struct gpio_chip *chip, unsigned offset) |
426 | { | 440 | { |
427 | const struct msm_pingroup *g; | 441 | const struct msm_pingroup *g; |
@@ -510,6 +524,7 @@ static void msm_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) | |||
510 | static struct gpio_chip msm_gpio_template = { | 524 | static struct gpio_chip msm_gpio_template = { |
511 | .direction_input = msm_gpio_direction_input, | 525 | .direction_input = msm_gpio_direction_input, |
512 | .direction_output = msm_gpio_direction_output, | 526 | .direction_output = msm_gpio_direction_output, |
527 | .get_direction = msm_gpio_get_direction, | ||
513 | .get = msm_gpio_get, | 528 | .get = msm_gpio_get, |
514 | .set = msm_gpio_set, | 529 | .set = msm_gpio_set, |
515 | .request = gpiochip_generic_request, | 530 | .request = gpiochip_generic_request, |
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c index 77a0236ee781..83f8864fa76a 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c | |||
@@ -390,22 +390,22 @@ static const struct pinctrl_pin_desc uniphier_ld11_pins[] = { | |||
390 | UNIPHIER_PINCTRL_PIN(140, "AO1D0", 140, | 390 | UNIPHIER_PINCTRL_PIN(140, "AO1D0", 140, |
391 | 140, UNIPHIER_PIN_DRV_1BIT, | 391 | 140, UNIPHIER_PIN_DRV_1BIT, |
392 | 140, UNIPHIER_PIN_PULL_DOWN), | 392 | 140, UNIPHIER_PIN_PULL_DOWN), |
393 | UNIPHIER_PINCTRL_PIN(141, "TCON0", 141, | 393 | UNIPHIER_PINCTRL_PIN(141, "AO1D1", 141, |
394 | 141, UNIPHIER_PIN_DRV_1BIT, | 394 | 141, UNIPHIER_PIN_DRV_1BIT, |
395 | 141, UNIPHIER_PIN_PULL_DOWN), | 395 | 141, UNIPHIER_PIN_PULL_DOWN), |
396 | UNIPHIER_PINCTRL_PIN(142, "TCON1", 142, | 396 | UNIPHIER_PINCTRL_PIN(142, "AO1D2", 142, |
397 | 142, UNIPHIER_PIN_DRV_1BIT, | 397 | 142, UNIPHIER_PIN_DRV_1BIT, |
398 | 142, UNIPHIER_PIN_PULL_DOWN), | 398 | 142, UNIPHIER_PIN_PULL_DOWN), |
399 | UNIPHIER_PINCTRL_PIN(143, "TCON2", 143, | 399 | UNIPHIER_PINCTRL_PIN(143, "XIRQ9", 143, |
400 | 143, UNIPHIER_PIN_DRV_1BIT, | 400 | 143, UNIPHIER_PIN_DRV_1BIT, |
401 | 143, UNIPHIER_PIN_PULL_DOWN), | 401 | 143, UNIPHIER_PIN_PULL_DOWN), |
402 | UNIPHIER_PINCTRL_PIN(144, "TCON3", 144, | 402 | UNIPHIER_PINCTRL_PIN(144, "XIRQ10", 144, |
403 | 144, UNIPHIER_PIN_DRV_1BIT, | 403 | 144, UNIPHIER_PIN_DRV_1BIT, |
404 | 144, UNIPHIER_PIN_PULL_DOWN), | 404 | 144, UNIPHIER_PIN_PULL_DOWN), |
405 | UNIPHIER_PINCTRL_PIN(145, "TCON4", 145, | 405 | UNIPHIER_PINCTRL_PIN(145, "XIRQ11", 145, |
406 | 145, UNIPHIER_PIN_DRV_1BIT, | 406 | 145, UNIPHIER_PIN_DRV_1BIT, |
407 | 145, UNIPHIER_PIN_PULL_DOWN), | 407 | 145, UNIPHIER_PIN_PULL_DOWN), |
408 | UNIPHIER_PINCTRL_PIN(146, "TCON5", 146, | 408 | UNIPHIER_PINCTRL_PIN(146, "XIRQ13", 146, |
409 | 146, UNIPHIER_PIN_DRV_1BIT, | 409 | 146, UNIPHIER_PIN_DRV_1BIT, |
410 | 146, UNIPHIER_PIN_PULL_DOWN), | 410 | 146, UNIPHIER_PIN_PULL_DOWN), |
411 | UNIPHIER_PINCTRL_PIN(147, "PWMA", 147, | 411 | UNIPHIER_PINCTRL_PIN(147, "PWMA", 147, |
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index 109e2c99e6c1..95d8f25cbcca 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c | |||
@@ -6278,7 +6278,7 @@ ahd_reset(struct ahd_softc *ahd, int reinit) | |||
6278 | * does not disable its parity logic prior to | 6278 | * does not disable its parity logic prior to |
6279 | * the start of the reset. This may cause a | 6279 | * the start of the reset. This may cause a |
6280 | * parity error to be detected and thus a | 6280 | * parity error to be detected and thus a |
6281 | * spurious SERR or PERR assertion. Disble | 6281 | * spurious SERR or PERR assertion. Disable |
6282 | * PERR and SERR responses during the CHIPRST. | 6282 | * PERR and SERR responses during the CHIPRST. |
6283 | */ | 6283 | */ |
6284 | mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); | 6284 | mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index c7839f6c35cc..d277e8620e3e 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -3075,23 +3075,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie) | |||
3075 | put_device(&sdkp->dev); | 3075 | put_device(&sdkp->dev); |
3076 | } | 3076 | } |
3077 | 3077 | ||
3078 | struct sd_devt { | ||
3079 | int idx; | ||
3080 | struct disk_devt disk_devt; | ||
3081 | }; | ||
3082 | |||
3083 | static void sd_devt_release(struct disk_devt *disk_devt) | ||
3084 | { | ||
3085 | struct sd_devt *sd_devt = container_of(disk_devt, struct sd_devt, | ||
3086 | disk_devt); | ||
3087 | |||
3088 | spin_lock(&sd_index_lock); | ||
3089 | ida_remove(&sd_index_ida, sd_devt->idx); | ||
3090 | spin_unlock(&sd_index_lock); | ||
3091 | |||
3092 | kfree(sd_devt); | ||
3093 | } | ||
3094 | |||
3095 | /** | 3078 | /** |
3096 | * sd_probe - called during driver initialization and whenever a | 3079 | * sd_probe - called during driver initialization and whenever a |
3097 | * new scsi device is attached to the system. It is called once | 3080 | * new scsi device is attached to the system. It is called once |
@@ -3113,7 +3096,6 @@ static void sd_devt_release(struct disk_devt *disk_devt) | |||
3113 | static int sd_probe(struct device *dev) | 3096 | static int sd_probe(struct device *dev) |
3114 | { | 3097 | { |
3115 | struct scsi_device *sdp = to_scsi_device(dev); | 3098 | struct scsi_device *sdp = to_scsi_device(dev); |
3116 | struct sd_devt *sd_devt; | ||
3117 | struct scsi_disk *sdkp; | 3099 | struct scsi_disk *sdkp; |
3118 | struct gendisk *gd; | 3100 | struct gendisk *gd; |
3119 | int index; | 3101 | int index; |
@@ -3139,13 +3121,9 @@ static int sd_probe(struct device *dev) | |||
3139 | if (!sdkp) | 3121 | if (!sdkp) |
3140 | goto out; | 3122 | goto out; |
3141 | 3123 | ||
3142 | sd_devt = kzalloc(sizeof(*sd_devt), GFP_KERNEL); | ||
3143 | if (!sd_devt) | ||
3144 | goto out_free; | ||
3145 | |||
3146 | gd = alloc_disk(SD_MINORS); | 3124 | gd = alloc_disk(SD_MINORS); |
3147 | if (!gd) | 3125 | if (!gd) |
3148 | goto out_free_devt; | 3126 | goto out_free; |
3149 | 3127 | ||
3150 | do { | 3128 | do { |
3151 | if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) | 3129 | if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) |
@@ -3161,11 +3139,6 @@ static int sd_probe(struct device *dev) | |||
3161 | goto out_put; | 3139 | goto out_put; |
3162 | } | 3140 | } |
3163 | 3141 | ||
3164 | atomic_set(&sd_devt->disk_devt.count, 1); | ||
3165 | sd_devt->disk_devt.release = sd_devt_release; | ||
3166 | sd_devt->idx = index; | ||
3167 | gd->disk_devt = &sd_devt->disk_devt; | ||
3168 | |||
3169 | error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); | 3142 | error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); |
3170 | if (error) { | 3143 | if (error) { |
3171 | sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); | 3144 | sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); |
@@ -3205,12 +3178,11 @@ static int sd_probe(struct device *dev) | |||
3205 | return 0; | 3178 | return 0; |
3206 | 3179 | ||
3207 | out_free_index: | 3180 | out_free_index: |
3208 | put_disk_devt(&sd_devt->disk_devt); | 3181 | spin_lock(&sd_index_lock); |
3209 | sd_devt = NULL; | 3182 | ida_remove(&sd_index_ida, index); |
3183 | spin_unlock(&sd_index_lock); | ||
3210 | out_put: | 3184 | out_put: |
3211 | put_disk(gd); | 3185 | put_disk(gd); |
3212 | out_free_devt: | ||
3213 | kfree(sd_devt); | ||
3214 | out_free: | 3186 | out_free: |
3215 | kfree(sdkp); | 3187 | kfree(sdkp); |
3216 | out: | 3188 | out: |
@@ -3271,7 +3243,10 @@ static void scsi_disk_release(struct device *dev) | |||
3271 | struct scsi_disk *sdkp = to_scsi_disk(dev); | 3243 | struct scsi_disk *sdkp = to_scsi_disk(dev); |
3272 | struct gendisk *disk = sdkp->disk; | 3244 | struct gendisk *disk = sdkp->disk; |
3273 | 3245 | ||
3274 | put_disk_devt(disk->disk_devt); | 3246 | spin_lock(&sd_index_lock); |
3247 | ida_remove(&sd_index_ida, sdkp->index); | ||
3248 | spin_unlock(&sd_index_lock); | ||
3249 | |||
3275 | disk->private_data = NULL; | 3250 | disk->private_data = NULL; |
3276 | put_disk(disk); | 3251 | put_disk(disk); |
3277 | put_device(&sdkp->device->sdev_gendev); | 3252 | put_device(&sdkp->device->sdev_gendev); |
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 7f8cf875157c..65a285631994 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c | |||
@@ -336,7 +336,6 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget) | |||
336 | if (likely((port < TOTAL_NUMBER_OF_PORTS) && | 336 | if (likely((port < TOTAL_NUMBER_OF_PORTS) && |
337 | cvm_oct_device[port])) { | 337 | cvm_oct_device[port])) { |
338 | struct net_device *dev = cvm_oct_device[port]; | 338 | struct net_device *dev = cvm_oct_device[port]; |
339 | struct octeon_ethernet *priv = netdev_priv(dev); | ||
340 | 339 | ||
341 | /* | 340 | /* |
342 | * Only accept packets for devices that are | 341 | * Only accept packets for devices that are |
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig index e61e4ca064a8..74094fff4367 100644 --- a/drivers/staging/vc04_services/Kconfig +++ b/drivers/staging/vc04_services/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config BCM2835_VCHIQ | 1 | config BCM2835_VCHIQ |
2 | tristate "Videocore VCHIQ" | 2 | tristate "Videocore VCHIQ" |
3 | depends on HAS_DMA | 3 | depends on HAS_DMA |
4 | depends on OF | ||
4 | depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE) | 5 | depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE) |
5 | default y | 6 | default y |
6 | help | 7 | help |
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index 1bacbc3b19a0..e94aea8c0d05 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c | |||
@@ -114,7 +114,7 @@ | |||
114 | #define DEFAULT_TX_BUF_COUNT 3 | 114 | #define DEFAULT_TX_BUF_COUNT 3 |
115 | 115 | ||
116 | struct n_hdlc_buf { | 116 | struct n_hdlc_buf { |
117 | struct n_hdlc_buf *link; | 117 | struct list_head list_item; |
118 | int count; | 118 | int count; |
119 | char buf[1]; | 119 | char buf[1]; |
120 | }; | 120 | }; |
@@ -122,8 +122,7 @@ struct n_hdlc_buf { | |||
122 | #define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe) | 122 | #define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe) |
123 | 123 | ||
124 | struct n_hdlc_buf_list { | 124 | struct n_hdlc_buf_list { |
125 | struct n_hdlc_buf *head; | 125 | struct list_head list; |
126 | struct n_hdlc_buf *tail; | ||
127 | int count; | 126 | int count; |
128 | spinlock_t spinlock; | 127 | spinlock_t spinlock; |
129 | }; | 128 | }; |
@@ -136,7 +135,6 @@ struct n_hdlc_buf_list { | |||
136 | * @backup_tty - TTY to use if tty gets closed | 135 | * @backup_tty - TTY to use if tty gets closed |
137 | * @tbusy - reentrancy flag for tx wakeup code | 136 | * @tbusy - reentrancy flag for tx wakeup code |
138 | * @woke_up - FIXME: describe this field | 137 | * @woke_up - FIXME: describe this field |
139 | * @tbuf - currently transmitting tx buffer | ||
140 | * @tx_buf_list - list of pending transmit frame buffers | 138 | * @tx_buf_list - list of pending transmit frame buffers |
141 | * @rx_buf_list - list of received frame buffers | 139 | * @rx_buf_list - list of received frame buffers |
142 | * @tx_free_buf_list - list unused transmit frame buffers | 140 | * @tx_free_buf_list - list unused transmit frame buffers |
@@ -149,7 +147,6 @@ struct n_hdlc { | |||
149 | struct tty_struct *backup_tty; | 147 | struct tty_struct *backup_tty; |
150 | int tbusy; | 148 | int tbusy; |
151 | int woke_up; | 149 | int woke_up; |
152 | struct n_hdlc_buf *tbuf; | ||
153 | struct n_hdlc_buf_list tx_buf_list; | 150 | struct n_hdlc_buf_list tx_buf_list; |
154 | struct n_hdlc_buf_list rx_buf_list; | 151 | struct n_hdlc_buf_list rx_buf_list; |
155 | struct n_hdlc_buf_list tx_free_buf_list; | 152 | struct n_hdlc_buf_list tx_free_buf_list; |
@@ -159,6 +156,8 @@ struct n_hdlc { | |||
159 | /* | 156 | /* |
160 | * HDLC buffer list manipulation functions | 157 | * HDLC buffer list manipulation functions |
161 | */ | 158 | */ |
159 | static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list, | ||
160 | struct n_hdlc_buf *buf); | ||
162 | static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, | 161 | static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, |
163 | struct n_hdlc_buf *buf); | 162 | struct n_hdlc_buf *buf); |
164 | static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list); | 163 | static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list); |
@@ -208,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty) | |||
208 | { | 207 | { |
209 | struct n_hdlc *n_hdlc = tty2n_hdlc(tty); | 208 | struct n_hdlc *n_hdlc = tty2n_hdlc(tty); |
210 | struct n_hdlc_buf *buf; | 209 | struct n_hdlc_buf *buf; |
211 | unsigned long flags; | ||
212 | 210 | ||
213 | while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list))) | 211 | while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list))) |
214 | n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf); | 212 | n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf); |
215 | spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags); | ||
216 | if (n_hdlc->tbuf) { | ||
217 | n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf); | ||
218 | n_hdlc->tbuf = NULL; | ||
219 | } | ||
220 | spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); | ||
221 | } | 213 | } |
222 | 214 | ||
223 | static struct tty_ldisc_ops n_hdlc_ldisc = { | 215 | static struct tty_ldisc_ops n_hdlc_ldisc = { |
@@ -283,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc) | |||
283 | } else | 275 | } else |
284 | break; | 276 | break; |
285 | } | 277 | } |
286 | kfree(n_hdlc->tbuf); | ||
287 | kfree(n_hdlc); | 278 | kfree(n_hdlc); |
288 | 279 | ||
289 | } /* end of n_hdlc_release() */ | 280 | } /* end of n_hdlc_release() */ |
@@ -402,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) | |||
402 | n_hdlc->woke_up = 0; | 393 | n_hdlc->woke_up = 0; |
403 | spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); | 394 | spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); |
404 | 395 | ||
405 | /* get current transmit buffer or get new transmit */ | 396 | tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); |
406 | /* buffer from list of pending transmit buffers */ | ||
407 | |||
408 | tbuf = n_hdlc->tbuf; | ||
409 | if (!tbuf) | ||
410 | tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); | ||
411 | |||
412 | while (tbuf) { | 397 | while (tbuf) { |
413 | if (debuglevel >= DEBUG_LEVEL_INFO) | 398 | if (debuglevel >= DEBUG_LEVEL_INFO) |
414 | printk("%s(%d)sending frame %p, count=%d\n", | 399 | printk("%s(%d)sending frame %p, count=%d\n", |
@@ -420,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) | |||
420 | 405 | ||
421 | /* rollback was possible and has been done */ | 406 | /* rollback was possible and has been done */ |
422 | if (actual == -ERESTARTSYS) { | 407 | if (actual == -ERESTARTSYS) { |
423 | n_hdlc->tbuf = tbuf; | 408 | n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); |
424 | break; | 409 | break; |
425 | } | 410 | } |
426 | /* if transmit error, throw frame away by */ | 411 | /* if transmit error, throw frame away by */ |
@@ -435,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) | |||
435 | 420 | ||
436 | /* free current transmit buffer */ | 421 | /* free current transmit buffer */ |
437 | n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf); | 422 | n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf); |
438 | 423 | ||
439 | /* this tx buffer is done */ | ||
440 | n_hdlc->tbuf = NULL; | ||
441 | |||
442 | /* wait up sleeping writers */ | 424 | /* wait up sleeping writers */ |
443 | wake_up_interruptible(&tty->write_wait); | 425 | wake_up_interruptible(&tty->write_wait); |
444 | 426 | ||
@@ -448,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) | |||
448 | if (debuglevel >= DEBUG_LEVEL_INFO) | 430 | if (debuglevel >= DEBUG_LEVEL_INFO) |
449 | printk("%s(%d)frame %p pending\n", | 431 | printk("%s(%d)frame %p pending\n", |
450 | __FILE__,__LINE__,tbuf); | 432 | __FILE__,__LINE__,tbuf); |
451 | 433 | ||
452 | /* buffer not accepted by driver */ | 434 | /* |
453 | /* set this buffer as pending buffer */ | 435 | * the buffer was not accepted by driver, |
454 | n_hdlc->tbuf = tbuf; | 436 | * return it back into tx queue |
437 | */ | ||
438 | n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); | ||
455 | break; | 439 | break; |
456 | } | 440 | } |
457 | } | 441 | } |
@@ -749,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
749 | int error = 0; | 733 | int error = 0; |
750 | int count; | 734 | int count; |
751 | unsigned long flags; | 735 | unsigned long flags; |
752 | 736 | struct n_hdlc_buf *buf = NULL; | |
737 | |||
753 | if (debuglevel >= DEBUG_LEVEL_INFO) | 738 | if (debuglevel >= DEBUG_LEVEL_INFO) |
754 | printk("%s(%d)n_hdlc_tty_ioctl() called %d\n", | 739 | printk("%s(%d)n_hdlc_tty_ioctl() called %d\n", |
755 | __FILE__,__LINE__,cmd); | 740 | __FILE__,__LINE__,cmd); |
@@ -763,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
763 | /* report count of read data available */ | 748 | /* report count of read data available */ |
764 | /* in next available frame (if any) */ | 749 | /* in next available frame (if any) */ |
765 | spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags); | 750 | spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags); |
766 | if (n_hdlc->rx_buf_list.head) | 751 | buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list, |
767 | count = n_hdlc->rx_buf_list.head->count; | 752 | struct n_hdlc_buf, list_item); |
753 | if (buf) | ||
754 | count = buf->count; | ||
768 | else | 755 | else |
769 | count = 0; | 756 | count = 0; |
770 | spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags); | 757 | spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags); |
@@ -776,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
776 | count = tty_chars_in_buffer(tty); | 763 | count = tty_chars_in_buffer(tty); |
777 | /* add size of next output frame in queue */ | 764 | /* add size of next output frame in queue */ |
778 | spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags); | 765 | spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags); |
779 | if (n_hdlc->tx_buf_list.head) | 766 | buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list, |
780 | count += n_hdlc->tx_buf_list.head->count; | 767 | struct n_hdlc_buf, list_item); |
768 | if (buf) | ||
769 | count += buf->count; | ||
781 | spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags); | 770 | spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags); |
782 | error = put_user(count, (int __user *)arg); | 771 | error = put_user(count, (int __user *)arg); |
783 | break; | 772 | break; |
@@ -825,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp, | |||
825 | poll_wait(filp, &tty->write_wait, wait); | 814 | poll_wait(filp, &tty->write_wait, wait); |
826 | 815 | ||
827 | /* set bits for operations that won't block */ | 816 | /* set bits for operations that won't block */ |
828 | if (n_hdlc->rx_buf_list.head) | 817 | if (!list_empty(&n_hdlc->rx_buf_list.list)) |
829 | mask |= POLLIN | POLLRDNORM; /* readable */ | 818 | mask |= POLLIN | POLLRDNORM; /* readable */ |
830 | if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) | 819 | if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) |
831 | mask |= POLLHUP; | 820 | mask |= POLLHUP; |
832 | if (tty_hung_up_p(filp)) | 821 | if (tty_hung_up_p(filp)) |
833 | mask |= POLLHUP; | 822 | mask |= POLLHUP; |
834 | if (!tty_is_writelocked(tty) && | 823 | if (!tty_is_writelocked(tty) && |
835 | n_hdlc->tx_free_buf_list.head) | 824 | !list_empty(&n_hdlc->tx_free_buf_list.list)) |
836 | mask |= POLLOUT | POLLWRNORM; /* writable */ | 825 | mask |= POLLOUT | POLLWRNORM; /* writable */ |
837 | } | 826 | } |
838 | return mask; | 827 | return mask; |
@@ -856,7 +845,12 @@ static struct n_hdlc *n_hdlc_alloc(void) | |||
856 | spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock); | 845 | spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock); |
857 | spin_lock_init(&n_hdlc->rx_buf_list.spinlock); | 846 | spin_lock_init(&n_hdlc->rx_buf_list.spinlock); |
858 | spin_lock_init(&n_hdlc->tx_buf_list.spinlock); | 847 | spin_lock_init(&n_hdlc->tx_buf_list.spinlock); |
859 | 848 | ||
849 | INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list); | ||
850 | INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list); | ||
851 | INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list); | ||
852 | INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list); | ||
853 | |||
860 | /* allocate free rx buffer list */ | 854 | /* allocate free rx buffer list */ |
861 | for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) { | 855 | for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) { |
862 | buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL); | 856 | buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL); |
@@ -884,53 +878,65 @@ static struct n_hdlc *n_hdlc_alloc(void) | |||
884 | } /* end of n_hdlc_alloc() */ | 878 | } /* end of n_hdlc_alloc() */ |
885 | 879 | ||
886 | /** | 880 | /** |
881 | * n_hdlc_buf_return - put the HDLC buffer after the head of the specified list | ||
882 | * @buf_list - pointer to the buffer list | ||
883 | * @buf - pointer to the buffer | ||
884 | */ | ||
885 | static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list, | ||
886 | struct n_hdlc_buf *buf) | ||
887 | { | ||
888 | unsigned long flags; | ||
889 | |||
890 | spin_lock_irqsave(&buf_list->spinlock, flags); | ||
891 | |||
892 | list_add(&buf->list_item, &buf_list->list); | ||
893 | buf_list->count++; | ||
894 | |||
895 | spin_unlock_irqrestore(&buf_list->spinlock, flags); | ||
896 | } | ||
897 | |||
898 | /** | ||
887 | * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list | 899 | * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list |
888 | * @list - pointer to buffer list | 900 | * @buf_list - pointer to buffer list |
889 | * @buf - pointer to buffer | 901 | * @buf - pointer to buffer |
890 | */ | 902 | */ |
891 | static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, | 903 | static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list, |
892 | struct n_hdlc_buf *buf) | 904 | struct n_hdlc_buf *buf) |
893 | { | 905 | { |
894 | unsigned long flags; | 906 | unsigned long flags; |
895 | spin_lock_irqsave(&list->spinlock,flags); | 907 | |
896 | 908 | spin_lock_irqsave(&buf_list->spinlock, flags); | |
897 | buf->link=NULL; | 909 | |
898 | if (list->tail) | 910 | list_add_tail(&buf->list_item, &buf_list->list); |
899 | list->tail->link = buf; | 911 | buf_list->count++; |
900 | else | 912 | |
901 | list->head = buf; | 913 | spin_unlock_irqrestore(&buf_list->spinlock, flags); |
902 | list->tail = buf; | ||
903 | (list->count)++; | ||
904 | |||
905 | spin_unlock_irqrestore(&list->spinlock,flags); | ||
906 | |||
907 | } /* end of n_hdlc_buf_put() */ | 914 | } /* end of n_hdlc_buf_put() */ |
908 | 915 | ||
909 | /** | 916 | /** |
910 | * n_hdlc_buf_get - remove and return an HDLC buffer from list | 917 | * n_hdlc_buf_get - remove and return an HDLC buffer from list |
911 | * @list - pointer to HDLC buffer list | 918 | * @buf_list - pointer to HDLC buffer list |
912 | * | 919 | * |
913 | * Remove and return an HDLC buffer from the head of the specified HDLC buffer | 920 | * Remove and return an HDLC buffer from the head of the specified HDLC buffer |
914 | * list. | 921 | * list. |
915 | * Returns a pointer to HDLC buffer if available, otherwise %NULL. | 922 | * Returns a pointer to HDLC buffer if available, otherwise %NULL. |
916 | */ | 923 | */ |
917 | static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list) | 924 | static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list) |
918 | { | 925 | { |
919 | unsigned long flags; | 926 | unsigned long flags; |
920 | struct n_hdlc_buf *buf; | 927 | struct n_hdlc_buf *buf; |
921 | spin_lock_irqsave(&list->spinlock,flags); | 928 | |
922 | 929 | spin_lock_irqsave(&buf_list->spinlock, flags); | |
923 | buf = list->head; | 930 | |
931 | buf = list_first_entry_or_null(&buf_list->list, | ||
932 | struct n_hdlc_buf, list_item); | ||
924 | if (buf) { | 933 | if (buf) { |
925 | list->head = buf->link; | 934 | list_del(&buf->list_item); |
926 | (list->count)--; | 935 | buf_list->count--; |
927 | } | 936 | } |
928 | if (!list->head) | 937 | |
929 | list->tail = NULL; | 938 | spin_unlock_irqrestore(&buf_list->spinlock, flags); |
930 | |||
931 | spin_unlock_irqrestore(&list->spinlock,flags); | ||
932 | return buf; | 939 | return buf; |
933 | |||
934 | } /* end of n_hdlc_buf_get() */ | 940 | } /* end of n_hdlc_buf_get() */ |
935 | 941 | ||
936 | static char hdlc_banner[] __initdata = | 942 | static char hdlc_banner[] __initdata = |
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index b4f86c219db1..7a17aedbf902 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c | |||
@@ -1031,8 +1031,10 @@ static int s3c64xx_serial_startup(struct uart_port *port) | |||
1031 | if (ourport->dma) { | 1031 | if (ourport->dma) { |
1032 | ret = s3c24xx_serial_request_dma(ourport); | 1032 | ret = s3c24xx_serial_request_dma(ourport); |
1033 | if (ret < 0) { | 1033 | if (ret < 0) { |
1034 | dev_warn(port->dev, "DMA request failed\n"); | 1034 | dev_warn(port->dev, |
1035 | return ret; | 1035 | "DMA request failed, DMA will not be used\n"); |
1036 | devm_kfree(port->dev, ourport->dma); | ||
1037 | ourport->dma = NULL; | ||
1036 | } | 1038 | } |
1037 | } | 1039 | } |
1038 | 1040 | ||
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index 2092e46b1380..f8d0747810e7 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c | |||
@@ -250,6 +250,7 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, | |||
250 | val = dwc3_omap_read_utmi_ctrl(omap); | 250 | val = dwc3_omap_read_utmi_ctrl(omap); |
251 | val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG; | 251 | val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG; |
252 | dwc3_omap_write_utmi_ctrl(omap, val); | 252 | dwc3_omap_write_utmi_ctrl(omap, val); |
253 | break; | ||
253 | 254 | ||
254 | case OMAP_DWC3_VBUS_OFF: | 255 | case OMAP_DWC3_VBUS_OFF: |
255 | val = dwc3_omap_read_utmi_ctrl(omap); | 256 | val = dwc3_omap_read_utmi_ctrl(omap); |
@@ -392,7 +393,7 @@ static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap) | |||
392 | { | 393 | { |
393 | u32 reg; | 394 | u32 reg; |
394 | struct device_node *node = omap->dev->of_node; | 395 | struct device_node *node = omap->dev->of_node; |
395 | int utmi_mode = 0; | 396 | u32 utmi_mode = 0; |
396 | 397 | ||
397 | reg = dwc3_omap_read_utmi_ctrl(omap); | 398 | reg = dwc3_omap_read_utmi_ctrl(omap); |
398 | 399 | ||
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 4db97ecae885..0d75158e43fe 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
@@ -1342,6 +1342,68 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, | |||
1342 | if (r == req) { | 1342 | if (r == req) { |
1343 | /* wait until it is processed */ | 1343 | /* wait until it is processed */ |
1344 | dwc3_stop_active_transfer(dwc, dep->number, true); | 1344 | dwc3_stop_active_transfer(dwc, dep->number, true); |
1345 | |||
1346 | /* | ||
1347 | * If request was already started, this means we had to | ||
1348 | * stop the transfer. With that we also need to ignore | ||
1349 | * all TRBs used by the request, however TRBs can only | ||
1350 | * be modified after completion of END_TRANSFER | ||
1351 | * command. So what we do here is that we wait for | ||
1352 | * END_TRANSFER completion and only after that, we jump | ||
1353 | * over TRBs by clearing HWO and incrementing dequeue | ||
1354 | * pointer. | ||
1355 | * | ||
1356 | * Note that we have 2 possible types of transfers here: | ||
1357 | * | ||
1358 | * i) Linear buffer request | ||
1359 | * ii) SG-list based request | ||
1360 | * | ||
1361 | * SG-list based requests will have r->num_pending_sgs | ||
1362 | * set to a valid number (> 0). Linear requests, | ||
1363 | * normally use a single TRB. | ||
1364 | * | ||
1365 | * For each of these two cases, if r->unaligned flag is | ||
1366 | * set, one extra TRB has been used to align transfer | ||
1367 | * size to wMaxPacketSize. | ||
1368 | * | ||
1369 | * All of these cases need to be taken into | ||
1370 | * consideration so we don't mess up our TRB ring | ||
1371 | * pointers. | ||
1372 | */ | ||
1373 | wait_event_lock_irq(dep->wait_end_transfer, | ||
1374 | !(dep->flags & DWC3_EP_END_TRANSFER_PENDING), | ||
1375 | dwc->lock); | ||
1376 | |||
1377 | if (!r->trb) | ||
1378 | goto out1; | ||
1379 | |||
1380 | if (r->num_pending_sgs) { | ||
1381 | struct dwc3_trb *trb; | ||
1382 | int i = 0; | ||
1383 | |||
1384 | for (i = 0; i < r->num_pending_sgs; i++) { | ||
1385 | trb = r->trb + i; | ||
1386 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; | ||
1387 | dwc3_ep_inc_deq(dep); | ||
1388 | } | ||
1389 | |||
1390 | if (r->unaligned) { | ||
1391 | trb = r->trb + r->num_pending_sgs + 1; | ||
1392 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; | ||
1393 | dwc3_ep_inc_deq(dep); | ||
1394 | } | ||
1395 | } else { | ||
1396 | struct dwc3_trb *trb = r->trb; | ||
1397 | |||
1398 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; | ||
1399 | dwc3_ep_inc_deq(dep); | ||
1400 | |||
1401 | if (r->unaligned) { | ||
1402 | trb = r->trb + 1; | ||
1403 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; | ||
1404 | dwc3_ep_inc_deq(dep); | ||
1405 | } | ||
1406 | } | ||
1345 | goto out1; | 1407 | goto out1; |
1346 | } | 1408 | } |
1347 | dev_err(dwc->dev, "request %p was not queued to %s\n", | 1409 | dev_err(dwc->dev, "request %p was not queued to %s\n", |
@@ -1352,6 +1414,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, | |||
1352 | 1414 | ||
1353 | out1: | 1415 | out1: |
1354 | /* giveback the request */ | 1416 | /* giveback the request */ |
1417 | dep->queued_requests--; | ||
1355 | dwc3_gadget_giveback(dep, req, -ECONNRESET); | 1418 | dwc3_gadget_giveback(dep, req, -ECONNRESET); |
1356 | 1419 | ||
1357 | out0: | 1420 | out0: |
@@ -2126,12 +2189,12 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, | |||
2126 | return 1; | 2189 | return 1; |
2127 | } | 2190 | } |
2128 | 2191 | ||
2129 | if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) | ||
2130 | return 1; | ||
2131 | |||
2132 | count = trb->size & DWC3_TRB_SIZE_MASK; | 2192 | count = trb->size & DWC3_TRB_SIZE_MASK; |
2133 | req->remaining += count; | 2193 | req->remaining += count; |
2134 | 2194 | ||
2195 | if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) | ||
2196 | return 1; | ||
2197 | |||
2135 | if (dep->direction) { | 2198 | if (dep->direction) { |
2136 | if (count) { | 2199 | if (count) { |
2137 | trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); | 2200 | trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); |
@@ -3228,15 +3291,10 @@ void dwc3_gadget_exit(struct dwc3 *dwc) | |||
3228 | 3291 | ||
3229 | int dwc3_gadget_suspend(struct dwc3 *dwc) | 3292 | int dwc3_gadget_suspend(struct dwc3 *dwc) |
3230 | { | 3293 | { |
3231 | int ret; | ||
3232 | |||
3233 | if (!dwc->gadget_driver) | 3294 | if (!dwc->gadget_driver) |
3234 | return 0; | 3295 | return 0; |
3235 | 3296 | ||
3236 | ret = dwc3_gadget_run_stop(dwc, false, false); | 3297 | dwc3_gadget_run_stop(dwc, false, false); |
3237 | if (ret < 0) | ||
3238 | return ret; | ||
3239 | |||
3240 | dwc3_disconnect_gadget(dwc); | 3298 | dwc3_disconnect_gadget(dwc); |
3241 | __dwc3_gadget_stop(dwc); | 3299 | __dwc3_gadget_stop(dwc); |
3242 | 3300 | ||
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h index 3129bcf74d7d..265e223ab645 100644 --- a/drivers/usb/dwc3/gadget.h +++ b/drivers/usb/dwc3/gadget.h | |||
@@ -28,23 +28,23 @@ struct dwc3; | |||
28 | #define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget)) | 28 | #define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget)) |
29 | 29 | ||
30 | /* DEPCFG parameter 1 */ | 30 | /* DEPCFG parameter 1 */ |
31 | #define DWC3_DEPCFG_INT_NUM(n) ((n) << 0) | 31 | #define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0) |
32 | #define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8) | 32 | #define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8) |
33 | #define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9) | 33 | #define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9) |
34 | #define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10) | 34 | #define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10) |
35 | #define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11) | 35 | #define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11) |
36 | #define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13) | 36 | #define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13) |
37 | #define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16) | 37 | #define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16) |
38 | #define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24) | 38 | #define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24) |
39 | #define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25) | 39 | #define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25) |
40 | #define DWC3_DEPCFG_BULK_BASED (1 << 30) | 40 | #define DWC3_DEPCFG_BULK_BASED (1 << 30) |
41 | #define DWC3_DEPCFG_FIFO_BASED (1 << 31) | 41 | #define DWC3_DEPCFG_FIFO_BASED (1 << 31) |
42 | 42 | ||
43 | /* DEPCFG parameter 0 */ | 43 | /* DEPCFG parameter 0 */ |
44 | #define DWC3_DEPCFG_EP_TYPE(n) ((n) << 1) | 44 | #define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1) |
45 | #define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3) | 45 | #define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3) |
46 | #define DWC3_DEPCFG_FIFO_NUMBER(n) ((n) << 17) | 46 | #define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17) |
47 | #define DWC3_DEPCFG_BURST_SIZE(n) ((n) << 22) | 47 | #define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22) |
48 | #define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26) | 48 | #define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26) |
49 | /* This applies for core versions earlier than 1.94a */ | 49 | /* This applies for core versions earlier than 1.94a */ |
50 | #define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31) | 50 | #define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31) |
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 78c44979dde3..cbff3b02840d 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c | |||
@@ -269,6 +269,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item, | |||
269 | ret = unregister_gadget(gi); | 269 | ret = unregister_gadget(gi); |
270 | if (ret) | 270 | if (ret) |
271 | goto err; | 271 | goto err; |
272 | kfree(name); | ||
272 | } else { | 273 | } else { |
273 | if (gi->composite.gadget_driver.udc_name) { | 274 | if (gi->composite.gadget_driver.udc_name) { |
274 | ret = -EBUSY; | 275 | ret = -EBUSY; |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index a5b7cd615698..a0085571824d 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
@@ -1834,11 +1834,14 @@ static int ffs_func_eps_enable(struct ffs_function *func) | |||
1834 | spin_lock_irqsave(&func->ffs->eps_lock, flags); | 1834 | spin_lock_irqsave(&func->ffs->eps_lock, flags); |
1835 | while(count--) { | 1835 | while(count--) { |
1836 | struct usb_endpoint_descriptor *ds; | 1836 | struct usb_endpoint_descriptor *ds; |
1837 | struct usb_ss_ep_comp_descriptor *comp_desc = NULL; | ||
1838 | int needs_comp_desc = false; | ||
1837 | int desc_idx; | 1839 | int desc_idx; |
1838 | 1840 | ||
1839 | if (ffs->gadget->speed == USB_SPEED_SUPER) | 1841 | if (ffs->gadget->speed == USB_SPEED_SUPER) { |
1840 | desc_idx = 2; | 1842 | desc_idx = 2; |
1841 | else if (ffs->gadget->speed == USB_SPEED_HIGH) | 1843 | needs_comp_desc = true; |
1844 | } else if (ffs->gadget->speed == USB_SPEED_HIGH) | ||
1842 | desc_idx = 1; | 1845 | desc_idx = 1; |
1843 | else | 1846 | else |
1844 | desc_idx = 0; | 1847 | desc_idx = 0; |
@@ -1855,6 +1858,14 @@ static int ffs_func_eps_enable(struct ffs_function *func) | |||
1855 | 1858 | ||
1856 | ep->ep->driver_data = ep; | 1859 | ep->ep->driver_data = ep; |
1857 | ep->ep->desc = ds; | 1860 | ep->ep->desc = ds; |
1861 | |||
1862 | comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + | ||
1863 | USB_DT_ENDPOINT_SIZE); | ||
1864 | ep->ep->maxburst = comp_desc->bMaxBurst + 1; | ||
1865 | |||
1866 | if (needs_comp_desc) | ||
1867 | ep->ep->comp_desc = comp_desc; | ||
1868 | |||
1858 | ret = usb_ep_enable(ep->ep); | 1869 | ret = usb_ep_enable(ep->ep); |
1859 | if (likely(!ret)) { | 1870 | if (likely(!ret)) { |
1860 | epfile->ep = ep; | 1871 | epfile->ep = ep; |
@@ -2253,7 +2264,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, | |||
2253 | 2264 | ||
2254 | if (len < sizeof(*d) || | 2265 | if (len < sizeof(*d) || |
2255 | d->bFirstInterfaceNumber >= ffs->interfaces_count || | 2266 | d->bFirstInterfaceNumber >= ffs->interfaces_count || |
2256 | d->Reserved1) | 2267 | !d->Reserved1) |
2257 | return -EINVAL; | 2268 | return -EINVAL; |
2258 | for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) | 2269 | for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) |
2259 | if (d->Reserved2[i]) | 2270 | if (d->Reserved2[i]) |
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index 27ed51b5082f..29b41b5dee04 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c | |||
@@ -258,13 +258,6 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) | |||
258 | memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req)); | 258 | memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req)); |
259 | v4l2_event_queue(&uvc->vdev, &v4l2_event); | 259 | v4l2_event_queue(&uvc->vdev, &v4l2_event); |
260 | 260 | ||
261 | /* Pass additional setup data to userspace */ | ||
262 | if (uvc->event_setup_out && uvc->event_length) { | ||
263 | uvc->control_req->length = uvc->event_length; | ||
264 | return usb_ep_queue(uvc->func.config->cdev->gadget->ep0, | ||
265 | uvc->control_req, GFP_ATOMIC); | ||
266 | } | ||
267 | |||
268 | return 0; | 261 | return 0; |
269 | } | 262 | } |
270 | 263 | ||
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index a2615d64d07c..a2c916869293 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
@@ -84,8 +84,7 @@ static int ep_open(struct inode *, struct file *); | |||
84 | 84 | ||
85 | /* /dev/gadget/$CHIP represents ep0 and the whole device */ | 85 | /* /dev/gadget/$CHIP represents ep0 and the whole device */ |
86 | enum ep0_state { | 86 | enum ep0_state { |
87 | /* DISBLED is the initial state. | 87 | /* DISABLED is the initial state. */ |
88 | */ | ||
89 | STATE_DEV_DISABLED = 0, | 88 | STATE_DEV_DISABLED = 0, |
90 | 89 | ||
91 | /* Only one open() of /dev/gadget/$CHIP; only one file tracks | 90 | /* Only one open() of /dev/gadget/$CHIP; only one file tracks |
@@ -1782,8 +1781,10 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1782 | 1781 | ||
1783 | spin_lock_irq (&dev->lock); | 1782 | spin_lock_irq (&dev->lock); |
1784 | value = -EINVAL; | 1783 | value = -EINVAL; |
1785 | if (dev->buf) | 1784 | if (dev->buf) { |
1785 | kfree(kbuf); | ||
1786 | goto fail; | 1786 | goto fail; |
1787 | } | ||
1787 | dev->buf = kbuf; | 1788 | dev->buf = kbuf; |
1788 | 1789 | ||
1789 | /* full or low speed config */ | 1790 | /* full or low speed config */ |
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 11bbce28bc23..2035906b8ced 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c | |||
@@ -610,7 +610,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | |||
610 | { | 610 | { |
611 | struct usba_ep *ep = to_usba_ep(_ep); | 611 | struct usba_ep *ep = to_usba_ep(_ep); |
612 | struct usba_udc *udc = ep->udc; | 612 | struct usba_udc *udc = ep->udc; |
613 | unsigned long flags, ept_cfg, maxpacket; | 613 | unsigned long flags, maxpacket; |
614 | unsigned int nr_trans; | 614 | unsigned int nr_trans; |
615 | 615 | ||
616 | DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); | 616 | DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); |
@@ -630,7 +630,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | |||
630 | ep->is_in = 0; | 630 | ep->is_in = 0; |
631 | 631 | ||
632 | DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n", | 632 | DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n", |
633 | ep->ep.name, ept_cfg, maxpacket); | 633 | ep->ep.name, ep->ept_cfg, maxpacket); |
634 | 634 | ||
635 | if (usb_endpoint_dir_in(desc)) { | 635 | if (usb_endpoint_dir_in(desc)) { |
636 | ep->is_in = 1; | 636 | ep->is_in = 1; |
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index c60abe3a68f9..8cabc5944d5f 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c | |||
@@ -1031,6 +1031,8 @@ static int dummy_udc_probe(struct platform_device *pdev) | |||
1031 | int rc; | 1031 | int rc; |
1032 | 1032 | ||
1033 | dum = *((void **)dev_get_platdata(&pdev->dev)); | 1033 | dum = *((void **)dev_get_platdata(&pdev->dev)); |
1034 | /* Clear usb_gadget region for new registration to udc-core */ | ||
1035 | memzero_explicit(&dum->gadget, sizeof(struct usb_gadget)); | ||
1034 | dum->gadget.name = gadget_name; | 1036 | dum->gadget.name = gadget_name; |
1035 | dum->gadget.ops = &dummy_ops; | 1037 | dum->gadget.ops = &dummy_ops; |
1036 | dum->gadget.max_speed = USB_SPEED_SUPER; | 1038 | dum->gadget.max_speed = USB_SPEED_SUPER; |
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 85504419ab31..3828c2ec8623 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c | |||
@@ -1146,15 +1146,15 @@ static int scan_dma_completions(struct net2280_ep *ep) | |||
1146 | */ | 1146 | */ |
1147 | while (!list_empty(&ep->queue)) { | 1147 | while (!list_empty(&ep->queue)) { |
1148 | struct net2280_request *req; | 1148 | struct net2280_request *req; |
1149 | u32 tmp; | 1149 | u32 req_dma_count; |
1150 | 1150 | ||
1151 | req = list_entry(ep->queue.next, | 1151 | req = list_entry(ep->queue.next, |
1152 | struct net2280_request, queue); | 1152 | struct net2280_request, queue); |
1153 | if (!req->valid) | 1153 | if (!req->valid) |
1154 | break; | 1154 | break; |
1155 | rmb(); | 1155 | rmb(); |
1156 | tmp = le32_to_cpup(&req->td->dmacount); | 1156 | req_dma_count = le32_to_cpup(&req->td->dmacount); |
1157 | if ((tmp & BIT(VALID_BIT)) != 0) | 1157 | if ((req_dma_count & BIT(VALID_BIT)) != 0) |
1158 | break; | 1158 | break; |
1159 | 1159 | ||
1160 | /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short" | 1160 | /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short" |
@@ -1163,40 +1163,41 @@ static int scan_dma_completions(struct net2280_ep *ep) | |||
1163 | */ | 1163 | */ |
1164 | if (unlikely(req->td->dmadesc == 0)) { | 1164 | if (unlikely(req->td->dmadesc == 0)) { |
1165 | /* paranoia */ | 1165 | /* paranoia */ |
1166 | tmp = readl(&ep->dma->dmacount); | 1166 | u32 const ep_dmacount = readl(&ep->dma->dmacount); |
1167 | if (tmp & DMA_BYTE_COUNT_MASK) | 1167 | |
1168 | if (ep_dmacount & DMA_BYTE_COUNT_MASK) | ||
1168 | break; | 1169 | break; |
1169 | /* single transfer mode */ | 1170 | /* single transfer mode */ |
1170 | dma_done(ep, req, tmp, 0); | 1171 | dma_done(ep, req, req_dma_count, 0); |
1171 | num_completed++; | 1172 | num_completed++; |
1172 | break; | 1173 | break; |
1173 | } else if (!ep->is_in && | 1174 | } else if (!ep->is_in && |
1174 | (req->req.length % ep->ep.maxpacket) && | 1175 | (req->req.length % ep->ep.maxpacket) && |
1175 | !(ep->dev->quirks & PLX_PCIE)) { | 1176 | !(ep->dev->quirks & PLX_PCIE)) { |
1176 | 1177 | ||
1177 | tmp = readl(&ep->regs->ep_stat); | 1178 | u32 const ep_stat = readl(&ep->regs->ep_stat); |
1178 | /* AVOID TROUBLE HERE by not issuing short reads from | 1179 | /* AVOID TROUBLE HERE by not issuing short reads from |
1179 | * your gadget driver. That helps avoids errata 0121, | 1180 | * your gadget driver. That helps avoids errata 0121, |
1180 | * 0122, and 0124; not all cases trigger the warning. | 1181 | * 0122, and 0124; not all cases trigger the warning. |
1181 | */ | 1182 | */ |
1182 | if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) { | 1183 | if ((ep_stat & BIT(NAK_OUT_PACKETS)) == 0) { |
1183 | ep_warn(ep->dev, "%s lost packet sync!\n", | 1184 | ep_warn(ep->dev, "%s lost packet sync!\n", |
1184 | ep->ep.name); | 1185 | ep->ep.name); |
1185 | req->req.status = -EOVERFLOW; | 1186 | req->req.status = -EOVERFLOW; |
1186 | } else { | 1187 | } else { |
1187 | tmp = readl(&ep->regs->ep_avail); | 1188 | u32 const ep_avail = readl(&ep->regs->ep_avail); |
1188 | if (tmp) { | 1189 | if (ep_avail) { |
1189 | /* fifo gets flushed later */ | 1190 | /* fifo gets flushed later */ |
1190 | ep->out_overflow = 1; | 1191 | ep->out_overflow = 1; |
1191 | ep_dbg(ep->dev, | 1192 | ep_dbg(ep->dev, |
1192 | "%s dma, discard %d len %d\n", | 1193 | "%s dma, discard %d len %d\n", |
1193 | ep->ep.name, tmp, | 1194 | ep->ep.name, ep_avail, |
1194 | req->req.length); | 1195 | req->req.length); |
1195 | req->req.status = -EOVERFLOW; | 1196 | req->req.status = -EOVERFLOW; |
1196 | } | 1197 | } |
1197 | } | 1198 | } |
1198 | } | 1199 | } |
1199 | dma_done(ep, req, tmp, 0); | 1200 | dma_done(ep, req, req_dma_count, 0); |
1200 | num_completed++; | 1201 | num_completed++; |
1201 | } | 1202 | } |
1202 | 1203 | ||
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c index e1335ad5bce9..832c4fdbe985 100644 --- a/drivers/usb/gadget/udc/pxa27x_udc.c +++ b/drivers/usb/gadget/udc/pxa27x_udc.c | |||
@@ -2534,9 +2534,10 @@ static int pxa_udc_remove(struct platform_device *_dev) | |||
2534 | usb_del_gadget_udc(&udc->gadget); | 2534 | usb_del_gadget_udc(&udc->gadget); |
2535 | pxa_cleanup_debugfs(udc); | 2535 | pxa_cleanup_debugfs(udc); |
2536 | 2536 | ||
2537 | if (!IS_ERR_OR_NULL(udc->transceiver)) | 2537 | if (!IS_ERR_OR_NULL(udc->transceiver)) { |
2538 | usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy); | 2538 | usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy); |
2539 | usb_put_phy(udc->transceiver); | 2539 | usb_put_phy(udc->transceiver); |
2540 | } | ||
2540 | 2541 | ||
2541 | udc->transceiver = NULL; | 2542 | udc->transceiver = NULL; |
2542 | the_controller = NULL; | 2543 | the_controller = NULL; |
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 414e3c376dbb..5302f988e7e6 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c | |||
@@ -350,7 +350,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
350 | 350 | ||
351 | case USB_PORT_FEAT_SUSPEND: | 351 | case USB_PORT_FEAT_SUSPEND: |
352 | dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n"); | 352 | dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n"); |
353 | if (valid_port(wIndex)) { | 353 | if (valid_port(wIndex) && ohci_at91->sfr_regmap) { |
354 | ohci_at91_port_suspend(ohci_at91->sfr_regmap, | 354 | ohci_at91_port_suspend(ohci_at91->sfr_regmap, |
355 | 1); | 355 | 1); |
356 | return 0; | 356 | return 0; |
@@ -393,7 +393,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
393 | 393 | ||
394 | case USB_PORT_FEAT_SUSPEND: | 394 | case USB_PORT_FEAT_SUSPEND: |
395 | dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n"); | 395 | dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n"); |
396 | if (valid_port(wIndex)) { | 396 | if (valid_port(wIndex) && ohci_at91->sfr_regmap) { |
397 | ohci_at91_port_suspend(ohci_at91->sfr_regmap, | 397 | ohci_at91_port_suspend(ohci_at91->sfr_regmap, |
398 | 0); | 398 | 0); |
399 | return 0; | 399 | return 0; |
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 363d125300ea..2b4a00fa735d 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c | |||
@@ -109,7 +109,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci) | |||
109 | xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK); | 109 | xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK); |
110 | 110 | ||
111 | /* xhci 1.1 controllers have the HCCPARAMS2 register */ | 111 | /* xhci 1.1 controllers have the HCCPARAMS2 register */ |
112 | if (hci_version > 100) { | 112 | if (hci_version > 0x100) { |
113 | temp = readl(&xhci->cap_regs->hcc_params2); | 113 | temp = readl(&xhci->cap_regs->hcc_params2); |
114 | xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp); | 114 | xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp); |
115 | xhci_dbg(xhci, " HC %s Force save context capability", | 115 | xhci_dbg(xhci, " HC %s Force save context capability", |
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 9066ec9e0c2e..67d5dc79b6b5 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c | |||
@@ -382,7 +382,6 @@ static int usb_wakeup_of_property_parse(struct xhci_hcd_mtk *mtk, | |||
382 | 382 | ||
383 | static int xhci_mtk_setup(struct usb_hcd *hcd); | 383 | static int xhci_mtk_setup(struct usb_hcd *hcd); |
384 | static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = { | 384 | static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = { |
385 | .extra_priv_size = sizeof(struct xhci_hcd), | ||
386 | .reset = xhci_mtk_setup, | 385 | .reset = xhci_mtk_setup, |
387 | }; | 386 | }; |
388 | 387 | ||
@@ -678,13 +677,13 @@ static int xhci_mtk_probe(struct platform_device *pdev) | |||
678 | goto power_off_phys; | 677 | goto power_off_phys; |
679 | } | 678 | } |
680 | 679 | ||
681 | if (HCC_MAX_PSA(xhci->hcc_params) >= 4) | ||
682 | xhci->shared_hcd->can_do_streams = 1; | ||
683 | |||
684 | ret = usb_add_hcd(hcd, irq, IRQF_SHARED); | 680 | ret = usb_add_hcd(hcd, irq, IRQF_SHARED); |
685 | if (ret) | 681 | if (ret) |
686 | goto put_usb3_hcd; | 682 | goto put_usb3_hcd; |
687 | 683 | ||
684 | if (HCC_MAX_PSA(xhci->hcc_params) >= 4) | ||
685 | xhci->shared_hcd->can_do_streams = 1; | ||
686 | |||
688 | ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); | 687 | ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); |
689 | if (ret) | 688 | if (ret) |
690 | goto dealloc_usb2_hcd; | 689 | goto dealloc_usb2_hcd; |
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 6d33b42ffcf5..bd02a6cd8e2c 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c | |||
@@ -286,6 +286,8 @@ static int xhci_plat_remove(struct platform_device *dev) | |||
286 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 286 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
287 | struct clk *clk = xhci->clk; | 287 | struct clk *clk = xhci->clk; |
288 | 288 | ||
289 | xhci->xhc_state |= XHCI_STATE_REMOVING; | ||
290 | |||
289 | usb_remove_hcd(xhci->shared_hcd); | 291 | usb_remove_hcd(xhci->shared_hcd); |
290 | usb_phy_shutdown(hcd->usb_phy); | 292 | usb_phy_shutdown(hcd->usb_phy); |
291 | 293 | ||
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index a59fafb4b329..74436f8ca538 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c | |||
@@ -1308,7 +1308,6 @@ static int tegra_xhci_setup(struct usb_hcd *hcd) | |||
1308 | } | 1308 | } |
1309 | 1309 | ||
1310 | static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = { | 1310 | static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = { |
1311 | .extra_priv_size = sizeof(struct xhci_hcd), | ||
1312 | .reset = tegra_xhci_setup, | 1311 | .reset = tegra_xhci_setup, |
1313 | }; | 1312 | }; |
1314 | 1313 | ||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 6d6c46000e56..50aee8b7718b 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -868,7 +868,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) | |||
868 | 868 | ||
869 | spin_lock_irqsave(&xhci->lock, flags); | 869 | spin_lock_irqsave(&xhci->lock, flags); |
870 | 870 | ||
871 | /* disble usb3 ports Wake bits*/ | 871 | /* disable usb3 ports Wake bits */ |
872 | port_index = xhci->num_usb3_ports; | 872 | port_index = xhci->num_usb3_ports; |
873 | port_array = xhci->usb3_ports; | 873 | port_array = xhci->usb3_ports; |
874 | while (port_index--) { | 874 | while (port_index--) { |
@@ -879,7 +879,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) | |||
879 | writel(t2, port_array[port_index]); | 879 | writel(t2, port_array[port_index]); |
880 | } | 880 | } |
881 | 881 | ||
882 | /* disble usb2 ports Wake bits*/ | 882 | /* disable usb2 ports Wake bits */ |
883 | port_index = xhci->num_usb2_ports; | 883 | port_index = xhci->num_usb2_ports; |
884 | port_array = xhci->usb2_ports; | 884 | port_array = xhci->usb2_ports; |
885 | while (port_index--) { | 885 | while (port_index--) { |
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 095778ff984d..37c63cb39714 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c | |||
@@ -781,12 +781,6 @@ static int iowarrior_probe(struct usb_interface *interface, | |||
781 | iface_desc = interface->cur_altsetting; | 781 | iface_desc = interface->cur_altsetting; |
782 | dev->product_id = le16_to_cpu(udev->descriptor.idProduct); | 782 | dev->product_id = le16_to_cpu(udev->descriptor.idProduct); |
783 | 783 | ||
784 | if (iface_desc->desc.bNumEndpoints < 1) { | ||
785 | dev_err(&interface->dev, "Invalid number of endpoints\n"); | ||
786 | retval = -EINVAL; | ||
787 | goto error; | ||
788 | } | ||
789 | |||
790 | /* set up the endpoint information */ | 784 | /* set up the endpoint information */ |
791 | for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { | 785 | for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { |
792 | endpoint = &iface_desc->endpoint[i].desc; | 786 | endpoint = &iface_desc->endpoint[i].desc; |
@@ -797,6 +791,21 @@ static int iowarrior_probe(struct usb_interface *interface, | |||
797 | /* this one will match for the IOWarrior56 only */ | 791 | /* this one will match for the IOWarrior56 only */ |
798 | dev->int_out_endpoint = endpoint; | 792 | dev->int_out_endpoint = endpoint; |
799 | } | 793 | } |
794 | |||
795 | if (!dev->int_in_endpoint) { | ||
796 | dev_err(&interface->dev, "no interrupt-in endpoint found\n"); | ||
797 | retval = -ENODEV; | ||
798 | goto error; | ||
799 | } | ||
800 | |||
801 | if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) { | ||
802 | if (!dev->int_out_endpoint) { | ||
803 | dev_err(&interface->dev, "no interrupt-out endpoint found\n"); | ||
804 | retval = -ENODEV; | ||
805 | goto error; | ||
806 | } | ||
807 | } | ||
808 | |||
800 | /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ | 809 | /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ |
801 | dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); | 810 | dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); |
802 | if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && | 811 | if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && |
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index 4e18600dc9b4..91f66d68bcb7 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c | |||
@@ -375,18 +375,24 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, | |||
375 | if (of_get_property(np, "dynamic-power-switching", NULL)) | 375 | if (of_get_property(np, "dynamic-power-switching", NULL)) |
376 | hub->conf_data2 |= BIT(7); | 376 | hub->conf_data2 |= BIT(7); |
377 | 377 | ||
378 | if (of_get_property(np, "oc-delay-100us", NULL)) { | 378 | if (!of_property_read_u32(np, "oc-delay-us", property_u32)) { |
379 | hub->conf_data2 &= ~BIT(5); | 379 | if (*property_u32 == 100) { |
380 | hub->conf_data2 &= ~BIT(4); | 380 | /* 100 us*/ |
381 | } else if (of_get_property(np, "oc-delay-4ms", NULL)) { | 381 | hub->conf_data2 &= ~BIT(5); |
382 | hub->conf_data2 &= ~BIT(5); | 382 | hub->conf_data2 &= ~BIT(4); |
383 | hub->conf_data2 |= BIT(4); | 383 | } else if (*property_u32 == 4000) { |
384 | } else if (of_get_property(np, "oc-delay-8ms", NULL)) { | 384 | /* 4 ms */ |
385 | hub->conf_data2 |= BIT(5); | 385 | hub->conf_data2 &= ~BIT(5); |
386 | hub->conf_data2 &= ~BIT(4); | 386 | hub->conf_data2 |= BIT(4); |
387 | } else if (of_get_property(np, "oc-delay-16ms", NULL)) { | 387 | } else if (*property_u32 == 16000) { |
388 | hub->conf_data2 |= BIT(5); | 388 | /* 16 ms */ |
389 | hub->conf_data2 |= BIT(4); | 389 | hub->conf_data2 |= BIT(5); |
390 | hub->conf_data2 |= BIT(4); | ||
391 | } else { | ||
392 | /* 8 ms (DEFAULT) */ | ||
393 | hub->conf_data2 |= BIT(5); | ||
394 | hub->conf_data2 &= ~BIT(4); | ||
395 | } | ||
390 | } | 396 | } |
391 | 397 | ||
392 | if (of_get_property(np, "compound-device", NULL)) | 398 | if (of_get_property(np, "compound-device", NULL)) |
@@ -432,30 +438,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, | |||
432 | } | 438 | } |
433 | } | 439 | } |
434 | 440 | ||
435 | hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF; | ||
436 | if (!of_property_read_u32(np, "max-sp-power", property_u32)) | ||
437 | hub->max_power_sp = min_t(u8, be32_to_cpu(*property_u32) / 2, | ||
438 | 250); | ||
439 | |||
440 | hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS; | ||
441 | if (!of_property_read_u32(np, "max-bp-power", property_u32)) | ||
442 | hub->max_power_bp = min_t(u8, be32_to_cpu(*property_u32) / 2, | ||
443 | 250); | ||
444 | |||
445 | hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF; | ||
446 | if (!of_property_read_u32(np, "max-sp-current", property_u32)) | ||
447 | hub->max_current_sp = min_t(u8, be32_to_cpu(*property_u32) / 2, | ||
448 | 250); | ||
449 | |||
450 | hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS; | ||
451 | if (!of_property_read_u32(np, "max-bp-current", property_u32)) | ||
452 | hub->max_current_bp = min_t(u8, be32_to_cpu(*property_u32) / 2, | ||
453 | 250); | ||
454 | |||
455 | hub->power_on_time = USB251XB_DEF_POWER_ON_TIME; | 441 | hub->power_on_time = USB251XB_DEF_POWER_ON_TIME; |
456 | if (!of_property_read_u32(np, "power-on-time", property_u32)) | 442 | if (!of_property_read_u32(np, "power-on-time-ms", property_u32)) |
457 | hub->power_on_time = min_t(u8, be32_to_cpu(*property_u32) / 2, | 443 | hub->power_on_time = min_t(u8, *property_u32 / 2, 255); |
458 | 255); | ||
459 | 444 | ||
460 | if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1)) | 445 | if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1)) |
461 | hub->lang_id = USB251XB_DEF_LANGUAGE_ID; | 446 | hub->lang_id = USB251XB_DEF_LANGUAGE_ID; |
@@ -492,6 +477,10 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, | |||
492 | /* The following parameters are currently not exposed to devicetree, but | 477 | /* The following parameters are currently not exposed to devicetree, but |
493 | * may be as soon as needed. | 478 | * may be as soon as needed. |
494 | */ | 479 | */ |
480 | hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF; | ||
481 | hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS; | ||
482 | hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF; | ||
483 | hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS; | ||
495 | hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE; | 484 | hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE; |
496 | hub->boost_up = USB251XB_DEF_BOOST_UP; | 485 | hub->boost_up = USB251XB_DEF_BOOST_UP; |
497 | hub->boost_x = USB251XB_DEF_BOOST_X; | 486 | hub->boost_x = USB251XB_DEF_BOOST_X; |
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c index db68156568e6..b3b33cf7ddf6 100644 --- a/drivers/usb/phy/phy-isp1301.c +++ b/drivers/usb/phy/phy-isp1301.c | |||
@@ -33,6 +33,12 @@ static const struct i2c_device_id isp1301_id[] = { | |||
33 | }; | 33 | }; |
34 | MODULE_DEVICE_TABLE(i2c, isp1301_id); | 34 | MODULE_DEVICE_TABLE(i2c, isp1301_id); |
35 | 35 | ||
36 | static const struct of_device_id isp1301_of_match[] = { | ||
37 | {.compatible = "nxp,isp1301" }, | ||
38 | { }, | ||
39 | }; | ||
40 | MODULE_DEVICE_TABLE(of, isp1301_of_match); | ||
41 | |||
36 | static struct i2c_client *isp1301_i2c_client; | 42 | static struct i2c_client *isp1301_i2c_client; |
37 | 43 | ||
38 | static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear) | 44 | static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear) |
@@ -130,6 +136,7 @@ static int isp1301_remove(struct i2c_client *client) | |||
130 | static struct i2c_driver isp1301_driver = { | 136 | static struct i2c_driver isp1301_driver = { |
131 | .driver = { | 137 | .driver = { |
132 | .name = DRV_NAME, | 138 | .name = DRV_NAME, |
139 | .of_match_table = of_match_ptr(isp1301_of_match), | ||
133 | }, | 140 | }, |
134 | .probe = isp1301_probe, | 141 | .probe = isp1301_probe, |
135 | .remove = isp1301_remove, | 142 | .remove = isp1301_remove, |
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index ab78111e0968..6537d3ca2797 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c | |||
@@ -1500,7 +1500,7 @@ static int digi_read_oob_callback(struct urb *urb) | |||
1500 | return -1; | 1500 | return -1; |
1501 | 1501 | ||
1502 | /* handle each oob command */ | 1502 | /* handle each oob command */ |
1503 | for (i = 0; i < urb->actual_length - 4; i += 4) { | 1503 | for (i = 0; i < urb->actual_length - 3; i += 4) { |
1504 | opcode = buf[i]; | 1504 | opcode = buf[i]; |
1505 | line = buf[i + 1]; | 1505 | line = buf[i + 1]; |
1506 | status = buf[i + 2]; | 1506 | status = buf[i + 2]; |
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index ceaeebaa6f90..a76b95d32157 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c | |||
@@ -1674,6 +1674,12 @@ static void edge_interrupt_callback(struct urb *urb) | |||
1674 | function = TIUMP_GET_FUNC_FROM_CODE(data[0]); | 1674 | function = TIUMP_GET_FUNC_FROM_CODE(data[0]); |
1675 | dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__, | 1675 | dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__, |
1676 | port_number, function, data[1]); | 1676 | port_number, function, data[1]); |
1677 | |||
1678 | if (port_number >= edge_serial->serial->num_ports) { | ||
1679 | dev_err(dev, "bad port number %d\n", port_number); | ||
1680 | goto exit; | ||
1681 | } | ||
1682 | |||
1677 | port = edge_serial->serial->port[port_number]; | 1683 | port = edge_serial->serial->port[port_number]; |
1678 | edge_port = usb_get_serial_port_data(port); | 1684 | edge_port = usb_get_serial_port_data(port); |
1679 | if (!edge_port) { | 1685 | if (!edge_port) { |
@@ -1755,7 +1761,7 @@ static void edge_bulk_in_callback(struct urb *urb) | |||
1755 | 1761 | ||
1756 | port_number = edge_port->port->port_number; | 1762 | port_number = edge_port->port->port_number; |
1757 | 1763 | ||
1758 | if (edge_port->lsr_event) { | 1764 | if (urb->actual_length > 0 && edge_port->lsr_event) { |
1759 | edge_port->lsr_event = 0; | 1765 | edge_port->lsr_event = 0; |
1760 | dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n", | 1766 | dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n", |
1761 | __func__, port_number, edge_port->lsr_mask, *data); | 1767 | __func__, port_number, edge_port->lsr_mask, *data); |
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c index a180b17d2432..dd706953b466 100644 --- a/drivers/usb/serial/omninet.c +++ b/drivers/usb/serial/omninet.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #define BT_IGNITIONPRO_ID 0x2000 | 31 | #define BT_IGNITIONPRO_ID 0x2000 |
32 | 32 | ||
33 | /* function prototypes */ | 33 | /* function prototypes */ |
34 | static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port); | ||
35 | static void omninet_process_read_urb(struct urb *urb); | 34 | static void omninet_process_read_urb(struct urb *urb); |
36 | static void omninet_write_bulk_callback(struct urb *urb); | 35 | static void omninet_write_bulk_callback(struct urb *urb); |
37 | static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port, | 36 | static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port, |
@@ -60,7 +59,6 @@ static struct usb_serial_driver zyxel_omninet_device = { | |||
60 | .attach = omninet_attach, | 59 | .attach = omninet_attach, |
61 | .port_probe = omninet_port_probe, | 60 | .port_probe = omninet_port_probe, |
62 | .port_remove = omninet_port_remove, | 61 | .port_remove = omninet_port_remove, |
63 | .open = omninet_open, | ||
64 | .write = omninet_write, | 62 | .write = omninet_write, |
65 | .write_room = omninet_write_room, | 63 | .write_room = omninet_write_room, |
66 | .write_bulk_callback = omninet_write_bulk_callback, | 64 | .write_bulk_callback = omninet_write_bulk_callback, |
@@ -140,17 +138,6 @@ static int omninet_port_remove(struct usb_serial_port *port) | |||
140 | return 0; | 138 | return 0; |
141 | } | 139 | } |
142 | 140 | ||
143 | static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port) | ||
144 | { | ||
145 | struct usb_serial *serial = port->serial; | ||
146 | struct usb_serial_port *wport; | ||
147 | |||
148 | wport = serial->port[1]; | ||
149 | tty_port_tty_set(&wport->port, tty); | ||
150 | |||
151 | return usb_serial_generic_open(tty, port); | ||
152 | } | ||
153 | |||
154 | #define OMNINET_HEADERLEN 4 | 141 | #define OMNINET_HEADERLEN 4 |
155 | #define OMNINET_BULKOUTSIZE 64 | 142 | #define OMNINET_BULKOUTSIZE 64 |
156 | #define OMNINET_PAYLOADSIZE (OMNINET_BULKOUTSIZE - OMNINET_HEADERLEN) | 143 | #define OMNINET_PAYLOADSIZE (OMNINET_BULKOUTSIZE - OMNINET_HEADERLEN) |
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c index 93c6c9b08daa..8a069aa154ed 100644 --- a/drivers/usb/serial/safe_serial.c +++ b/drivers/usb/serial/safe_serial.c | |||
@@ -200,6 +200,11 @@ static void safe_process_read_urb(struct urb *urb) | |||
200 | if (!safe) | 200 | if (!safe) |
201 | goto out; | 201 | goto out; |
202 | 202 | ||
203 | if (length < 2) { | ||
204 | dev_err(&port->dev, "malformed packet\n"); | ||
205 | return; | ||
206 | } | ||
207 | |||
203 | fcs = fcs_compute10(data, length, CRC10_INITFCS); | 208 | fcs = fcs_compute10(data, length, CRC10_INITFCS); |
204 | if (fcs) { | 209 | if (fcs) { |
205 | dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs); | 210 | dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs); |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 16cc18369111..9129f6cb8230 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -2071,6 +2071,20 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110, | |||
2071 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 2071 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
2072 | US_FL_IGNORE_RESIDUE ), | 2072 | US_FL_IGNORE_RESIDUE ), |
2073 | 2073 | ||
2074 | /* | ||
2075 | * Reported by Tobias Jakobi <tjakobi@math.uni-bielefeld.de> | ||
2076 | * The INIC-3619 bridge is used in the StarTech SLSODDU33B | ||
2077 | * SATA-USB enclosure for slimline optical drives. | ||
2078 | * | ||
2079 | * The quirk enables MakeMKV to properly exchange keys with | ||
2080 | * an installed BD drive. | ||
2081 | */ | ||
2082 | UNUSUAL_DEV( 0x13fd, 0x3609, 0x0209, 0x0209, | ||
2083 | "Initio Corporation", | ||
2084 | "INIC-3619", | ||
2085 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
2086 | US_FL_IGNORE_RESIDUE ), | ||
2087 | |||
2074 | /* Reported by Qinglin Ye <yestyle@gmail.com> */ | 2088 | /* Reported by Qinglin Ye <yestyle@gmail.com> */ |
2075 | UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100, | 2089 | UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100, |
2076 | "Kingston", | 2090 | "Kingston", |
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index f8afc6dcc29f..e8cef1ad0fe3 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
@@ -681,3 +681,50 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask) | |||
681 | return 0; | 681 | return 0; |
682 | } | 682 | } |
683 | EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask); | 683 | EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask); |
684 | |||
685 | /* | ||
686 | * Create userspace mapping for the DMA-coherent memory. | ||
687 | * This function should be called with the pages from the current domain only, | ||
688 | * passing pages mapped from other domains would lead to memory corruption. | ||
689 | */ | ||
690 | int | ||
691 | xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, | ||
692 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
693 | unsigned long attrs) | ||
694 | { | ||
695 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) | ||
696 | if (__generic_dma_ops(dev)->mmap) | ||
697 | return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr, | ||
698 | dma_addr, size, attrs); | ||
699 | #endif | ||
700 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
701 | } | ||
702 | EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap); | ||
703 | |||
704 | /* | ||
705 | * This function should be called with the pages from the current domain only, | ||
706 | * passing pages mapped from other domains would lead to memory corruption. | ||
707 | */ | ||
708 | int | ||
709 | xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
710 | void *cpu_addr, dma_addr_t handle, size_t size, | ||
711 | unsigned long attrs) | ||
712 | { | ||
713 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) | ||
714 | if (__generic_dma_ops(dev)->get_sgtable) { | ||
715 | #if 0 | ||
716 | /* | ||
717 | * This check verifies that the page belongs to the current domain and | ||
718 | * is not one mapped from another domain. | ||
719 | * This check is for debug only, and should not go to production build | ||
720 | */ | ||
721 | unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle)); | ||
722 | BUG_ON (!page_is_ram(bfn)); | ||
723 | #endif | ||
724 | return __generic_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr, | ||
725 | handle, size, attrs); | ||
726 | } | ||
727 | #endif | ||
728 | return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size); | ||
729 | } | ||
730 | EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable); | ||
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 4d343eed08f5..1f4733b80c87 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c | |||
@@ -55,7 +55,6 @@ | |||
55 | #include <linux/string.h> | 55 | #include <linux/string.h> |
56 | #include <linux/slab.h> | 56 | #include <linux/slab.h> |
57 | #include <linux/miscdevice.h> | 57 | #include <linux/miscdevice.h> |
58 | #include <linux/init.h> | ||
59 | 58 | ||
60 | #include <xen/xenbus.h> | 59 | #include <xen/xenbus.h> |
61 | #include <xen/xen.h> | 60 | #include <xen/xen.h> |