diff options
Diffstat (limited to 'drivers')
65 files changed, 719 insertions, 381 deletions
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c index 5d4b72e21161..569a4a662dcd 100644 --- a/drivers/ata/pata_ftide010.c +++ b/drivers/ata/pata_ftide010.c | |||
| @@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = { | |||
| 256 | .qc_issue = ftide010_qc_issue, | 256 | .qc_issue = ftide010_qc_issue, |
| 257 | }; | 257 | }; |
| 258 | 258 | ||
| 259 | static struct ata_port_info ftide010_port_info[] = { | 259 | static struct ata_port_info ftide010_port_info = { |
| 260 | { | 260 | .flags = ATA_FLAG_SLAVE_POSS, |
| 261 | .flags = ATA_FLAG_SLAVE_POSS, | 261 | .mwdma_mask = ATA_MWDMA2, |
| 262 | .mwdma_mask = ATA_MWDMA2, | 262 | .udma_mask = ATA_UDMA6, |
| 263 | .udma_mask = ATA_UDMA6, | 263 | .pio_mask = ATA_PIO4, |
| 264 | .pio_mask = ATA_PIO4, | 264 | .port_ops = &pata_ftide010_port_ops, |
| 265 | .port_ops = &pata_ftide010_port_ops, | ||
| 266 | }, | ||
| 267 | }; | 265 | }; |
| 268 | 266 | ||
| 269 | #if IS_ENABLED(CONFIG_SATA_GEMINI) | 267 | #if IS_ENABLED(CONFIG_SATA_GEMINI) |
| @@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap) | |||
| 349 | } | 347 | } |
| 350 | 348 | ||
| 351 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, | 349 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, |
| 350 | struct ata_port_info *pi, | ||
| 352 | bool is_ata1) | 351 | bool is_ata1) |
| 353 | { | 352 | { |
| 354 | struct device *dev = ftide->dev; | 353 | struct device *dev = ftide->dev; |
| @@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, | |||
| 373 | 372 | ||
| 374 | /* Flag port as SATA-capable */ | 373 | /* Flag port as SATA-capable */ |
| 375 | if (gemini_sata_bridge_enabled(sg, is_ata1)) | 374 | if (gemini_sata_bridge_enabled(sg, is_ata1)) |
| 376 | ftide010_port_info[0].flags |= ATA_FLAG_SATA; | 375 | pi->flags |= ATA_FLAG_SATA; |
| 376 | |||
| 377 | /* This device has broken DMA, only PIO works */ | ||
| 378 | if (of_machine_is_compatible("itian,sq201")) { | ||
| 379 | pi->mwdma_mask = 0; | ||
| 380 | pi->udma_mask = 0; | ||
| 381 | } | ||
| 377 | 382 | ||
| 378 | /* | 383 | /* |
| 379 | * We assume that a simple 40-wire cable is used in the PATA mode. | 384 | * We assume that a simple 40-wire cable is used in the PATA mode. |
| @@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, | |||
| 435 | } | 440 | } |
| 436 | #else | 441 | #else |
| 437 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, | 442 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, |
| 443 | struct ata_port_info *pi, | ||
| 438 | bool is_ata1) | 444 | bool is_ata1) |
| 439 | { | 445 | { |
| 440 | return -ENOTSUPP; | 446 | return -ENOTSUPP; |
| @@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) | |||
| 446 | { | 452 | { |
| 447 | struct device *dev = &pdev->dev; | 453 | struct device *dev = &pdev->dev; |
| 448 | struct device_node *np = dev->of_node; | 454 | struct device_node *np = dev->of_node; |
| 449 | const struct ata_port_info pi = ftide010_port_info[0]; | 455 | struct ata_port_info pi = ftide010_port_info; |
| 450 | const struct ata_port_info *ppi[] = { &pi, NULL }; | 456 | const struct ata_port_info *ppi[] = { &pi, NULL }; |
| 451 | struct ftide010 *ftide; | 457 | struct ftide010 *ftide; |
| 452 | struct resource *res; | 458 | struct resource *res; |
| @@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) | |||
| 490 | * are ATA0. This will also set up the cable types. | 496 | * are ATA0. This will also set up the cable types. |
| 491 | */ | 497 | */ |
| 492 | ret = pata_ftide010_gemini_init(ftide, | 498 | ret = pata_ftide010_gemini_init(ftide, |
| 499 | &pi, | ||
| 493 | (res->start == 0x63400000)); | 500 | (res->start == 0x63400000)); |
| 494 | if (ret) | 501 | if (ret) |
| 495 | goto err_dis_clk; | 502 | goto err_dis_clk; |
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 8e2e4757adcb..5a42ae4078c2 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
| @@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk); | |||
| 185 | int of_pm_clk_add_clks(struct device *dev) | 185 | int of_pm_clk_add_clks(struct device *dev) |
| 186 | { | 186 | { |
| 187 | struct clk **clks; | 187 | struct clk **clks; |
| 188 | unsigned int i, count; | 188 | int i, count; |
| 189 | int ret; | 189 | int ret; |
| 190 | 190 | ||
| 191 | if (!dev || !dev->of_node) | 191 | if (!dev || !dev->of_node) |
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index b55b245e8052..fd1e19f1a49f 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
| @@ -84,6 +84,18 @@ MODULE_PARM_DESC(max_persistent_grants, | |||
| 84 | "Maximum number of grants to map persistently"); | 84 | "Maximum number of grants to map persistently"); |
| 85 | 85 | ||
| 86 | /* | 86 | /* |
| 87 | * How long a persistent grant is allowed to remain allocated without being in | ||
| 88 | * use. The time is in seconds, 0 means indefinitely long. | ||
| 89 | */ | ||
| 90 | |||
| 91 | static unsigned int xen_blkif_pgrant_timeout = 60; | ||
| 92 | module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout, | ||
| 93 | uint, 0644); | ||
| 94 | MODULE_PARM_DESC(persistent_grant_unused_seconds, | ||
| 95 | "Time in seconds an unused persistent grant is allowed to " | ||
| 96 | "remain allocated. Default is 60, 0 means unlimited."); | ||
| 97 | |||
| 98 | /* | ||
| 87 | * Maximum number of rings/queues blkback supports, allow as many queues as there | 99 | * Maximum number of rings/queues blkback supports, allow as many queues as there |
| 88 | * are CPUs if user has not specified a value. | 100 | * are CPUs if user has not specified a value. |
| 89 | */ | 101 | */ |
| @@ -123,6 +135,13 @@ module_param(log_stats, int, 0644); | |||
| 123 | /* Number of free pages to remove on each call to gnttab_free_pages */ | 135 | /* Number of free pages to remove on each call to gnttab_free_pages */ |
| 124 | #define NUM_BATCH_FREE_PAGES 10 | 136 | #define NUM_BATCH_FREE_PAGES 10 |
| 125 | 137 | ||
| 138 | static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt) | ||
| 139 | { | ||
| 140 | return xen_blkif_pgrant_timeout && | ||
| 141 | (jiffies - persistent_gnt->last_used >= | ||
| 142 | HZ * xen_blkif_pgrant_timeout); | ||
| 143 | } | ||
| 144 | |||
| 126 | static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page) | 145 | static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page) |
| 127 | { | 146 | { |
| 128 | unsigned long flags; | 147 | unsigned long flags; |
| @@ -236,8 +255,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring, | |||
| 236 | } | 255 | } |
| 237 | } | 256 | } |
| 238 | 257 | ||
| 239 | bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); | 258 | persistent_gnt->active = true; |
| 240 | set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | ||
| 241 | /* Add new node and rebalance tree. */ | 259 | /* Add new node and rebalance tree. */ |
| 242 | rb_link_node(&(persistent_gnt->node), parent, new); | 260 | rb_link_node(&(persistent_gnt->node), parent, new); |
| 243 | rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); | 261 | rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); |
| @@ -261,11 +279,11 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, | |||
| 261 | else if (gref > data->gnt) | 279 | else if (gref > data->gnt) |
| 262 | node = node->rb_right; | 280 | node = node->rb_right; |
| 263 | else { | 281 | else { |
| 264 | if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { | 282 | if (data->active) { |
| 265 | pr_alert_ratelimited("requesting a grant already in use\n"); | 283 | pr_alert_ratelimited("requesting a grant already in use\n"); |
| 266 | return NULL; | 284 | return NULL; |
| 267 | } | 285 | } |
| 268 | set_bit(PERSISTENT_GNT_ACTIVE, data->flags); | 286 | data->active = true; |
| 269 | atomic_inc(&ring->persistent_gnt_in_use); | 287 | atomic_inc(&ring->persistent_gnt_in_use); |
| 270 | return data; | 288 | return data; |
| 271 | } | 289 | } |
| @@ -276,10 +294,10 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, | |||
| 276 | static void put_persistent_gnt(struct xen_blkif_ring *ring, | 294 | static void put_persistent_gnt(struct xen_blkif_ring *ring, |
| 277 | struct persistent_gnt *persistent_gnt) | 295 | struct persistent_gnt *persistent_gnt) |
| 278 | { | 296 | { |
| 279 | if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | 297 | if (!persistent_gnt->active) |
| 280 | pr_alert_ratelimited("freeing a grant already unused\n"); | 298 | pr_alert_ratelimited("freeing a grant already unused\n"); |
| 281 | set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | 299 | persistent_gnt->last_used = jiffies; |
| 282 | clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | 300 | persistent_gnt->active = false; |
| 283 | atomic_dec(&ring->persistent_gnt_in_use); | 301 | atomic_dec(&ring->persistent_gnt_in_use); |
| 284 | } | 302 | } |
| 285 | 303 | ||
| @@ -371,26 +389,26 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) | |||
| 371 | struct persistent_gnt *persistent_gnt; | 389 | struct persistent_gnt *persistent_gnt; |
| 372 | struct rb_node *n; | 390 | struct rb_node *n; |
| 373 | unsigned int num_clean, total; | 391 | unsigned int num_clean, total; |
| 374 | bool scan_used = false, clean_used = false; | 392 | bool scan_used = false; |
| 375 | struct rb_root *root; | 393 | struct rb_root *root; |
| 376 | 394 | ||
| 377 | if (ring->persistent_gnt_c < xen_blkif_max_pgrants || | ||
| 378 | (ring->persistent_gnt_c == xen_blkif_max_pgrants && | ||
| 379 | !ring->blkif->vbd.overflow_max_grants)) { | ||
| 380 | goto out; | ||
| 381 | } | ||
| 382 | |||
| 383 | if (work_busy(&ring->persistent_purge_work)) { | 395 | if (work_busy(&ring->persistent_purge_work)) { |
| 384 | pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); | 396 | pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); |
| 385 | goto out; | 397 | goto out; |
| 386 | } | 398 | } |
| 387 | 399 | ||
| 388 | num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; | 400 | if (ring->persistent_gnt_c < xen_blkif_max_pgrants || |
| 389 | num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; | 401 | (ring->persistent_gnt_c == xen_blkif_max_pgrants && |
| 390 | num_clean = min(ring->persistent_gnt_c, num_clean); | 402 | !ring->blkif->vbd.overflow_max_grants)) { |
| 391 | if ((num_clean == 0) || | 403 | num_clean = 0; |
| 392 | (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use)))) | 404 | } else { |
| 393 | goto out; | 405 | num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; |
| 406 | num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + | ||
| 407 | num_clean; | ||
| 408 | num_clean = min(ring->persistent_gnt_c, num_clean); | ||
| 409 | pr_debug("Going to purge at least %u persistent grants\n", | ||
| 410 | num_clean); | ||
| 411 | } | ||
| 394 | 412 | ||
| 395 | /* | 413 | /* |
| 396 | * At this point, we can assure that there will be no calls | 414 | * At this point, we can assure that there will be no calls |
| @@ -401,9 +419,7 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) | |||
| 401 | * number of grants. | 419 | * number of grants. |
| 402 | */ | 420 | */ |
| 403 | 421 | ||
| 404 | total = num_clean; | 422 | total = 0; |
| 405 | |||
| 406 | pr_debug("Going to purge %u persistent grants\n", num_clean); | ||
| 407 | 423 | ||
| 408 | BUG_ON(!list_empty(&ring->persistent_purge_list)); | 424 | BUG_ON(!list_empty(&ring->persistent_purge_list)); |
| 409 | root = &ring->persistent_gnts; | 425 | root = &ring->persistent_gnts; |
| @@ -412,46 +428,37 @@ purge_list: | |||
| 412 | BUG_ON(persistent_gnt->handle == | 428 | BUG_ON(persistent_gnt->handle == |
| 413 | BLKBACK_INVALID_HANDLE); | 429 | BLKBACK_INVALID_HANDLE); |
| 414 | 430 | ||
| 415 | if (clean_used) { | 431 | if (persistent_gnt->active) |
| 416 | clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | ||
| 417 | continue; | 432 | continue; |
| 418 | } | 433 | if (!scan_used && !persistent_gnt_timeout(persistent_gnt)) |
| 419 | |||
| 420 | if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | ||
| 421 | continue; | 434 | continue; |
| 422 | if (!scan_used && | 435 | if (scan_used && total >= num_clean) |
| 423 | (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags))) | ||
| 424 | continue; | 436 | continue; |
| 425 | 437 | ||
| 426 | rb_erase(&persistent_gnt->node, root); | 438 | rb_erase(&persistent_gnt->node, root); |
| 427 | list_add(&persistent_gnt->remove_node, | 439 | list_add(&persistent_gnt->remove_node, |
| 428 | &ring->persistent_purge_list); | 440 | &ring->persistent_purge_list); |
| 429 | if (--num_clean == 0) | 441 | total++; |
| 430 | goto finished; | ||
| 431 | } | 442 | } |
| 432 | /* | 443 | /* |
| 433 | * If we get here it means we also need to start cleaning | 444 | * Check whether we also need to start cleaning |
| 434 | * grants that were used since last purge in order to cope | 445 | * grants that were used since last purge in order to cope |
| 435 | * with the requested num | 446 | * with the requested num |
| 436 | */ | 447 | */ |
| 437 | if (!scan_used && !clean_used) { | 448 | if (!scan_used && total < num_clean) { |
| 438 | pr_debug("Still missing %u purged frames\n", num_clean); | 449 | pr_debug("Still missing %u purged frames\n", num_clean - total); |
| 439 | scan_used = true; | 450 | scan_used = true; |
| 440 | goto purge_list; | 451 | goto purge_list; |
| 441 | } | 452 | } |
| 442 | finished: | ||
| 443 | if (!clean_used) { | ||
| 444 | pr_debug("Finished scanning for grants to clean, removing used flag\n"); | ||
| 445 | clean_used = true; | ||
| 446 | goto purge_list; | ||
| 447 | } | ||
| 448 | 453 | ||
| 449 | ring->persistent_gnt_c -= (total - num_clean); | 454 | if (total) { |
| 450 | ring->blkif->vbd.overflow_max_grants = 0; | 455 | ring->persistent_gnt_c -= total; |
| 456 | ring->blkif->vbd.overflow_max_grants = 0; | ||
| 451 | 457 | ||
| 452 | /* We can defer this work */ | 458 | /* We can defer this work */ |
| 453 | schedule_work(&ring->persistent_purge_work); | 459 | schedule_work(&ring->persistent_purge_work); |
| 454 | pr_debug("Purged %u/%u\n", (total - num_clean), total); | 460 | pr_debug("Purged %u/%u\n", num_clean, total); |
| 461 | } | ||
| 455 | 462 | ||
| 456 | out: | 463 | out: |
| 457 | return; | 464 | return; |
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index ecb35fe8ca8d..1d3002d773f7 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
| @@ -233,16 +233,6 @@ struct xen_vbd { | |||
| 233 | 233 | ||
| 234 | struct backend_info; | 234 | struct backend_info; |
| 235 | 235 | ||
| 236 | /* Number of available flags */ | ||
| 237 | #define PERSISTENT_GNT_FLAGS_SIZE 2 | ||
| 238 | /* This persistent grant is currently in use */ | ||
| 239 | #define PERSISTENT_GNT_ACTIVE 0 | ||
| 240 | /* | ||
| 241 | * This persistent grant has been used, this flag is set when we remove the | ||
| 242 | * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently. | ||
| 243 | */ | ||
| 244 | #define PERSISTENT_GNT_WAS_ACTIVE 1 | ||
| 245 | |||
| 246 | /* Number of requests that we can fit in a ring */ | 236 | /* Number of requests that we can fit in a ring */ |
| 247 | #define XEN_BLKIF_REQS_PER_PAGE 32 | 237 | #define XEN_BLKIF_REQS_PER_PAGE 32 |
| 248 | 238 | ||
| @@ -250,7 +240,8 @@ struct persistent_gnt { | |||
| 250 | struct page *page; | 240 | struct page *page; |
| 251 | grant_ref_t gnt; | 241 | grant_ref_t gnt; |
| 252 | grant_handle_t handle; | 242 | grant_handle_t handle; |
| 253 | DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE); | 243 | unsigned long last_used; |
| 244 | bool active; | ||
| 254 | struct rb_node node; | 245 | struct rb_node node; |
| 255 | struct list_head remove_node; | 246 | struct list_head remove_node; |
| 256 | }; | 247 | }; |
| @@ -278,7 +269,6 @@ struct xen_blkif_ring { | |||
| 278 | wait_queue_head_t pending_free_wq; | 269 | wait_queue_head_t pending_free_wq; |
| 279 | 270 | ||
| 280 | /* Tree to store persistent grants. */ | 271 | /* Tree to store persistent grants. */ |
| 281 | spinlock_t pers_gnts_lock; | ||
| 282 | struct rb_root persistent_gnts; | 272 | struct rb_root persistent_gnts; |
| 283 | unsigned int persistent_gnt_c; | 273 | unsigned int persistent_gnt_c; |
| 284 | atomic_t persistent_gnt_in_use; | 274 | atomic_t persistent_gnt_in_use; |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8986adab9bf5..a71d817e900d 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | #include <linux/scatterlist.h> | 46 | #include <linux/scatterlist.h> |
| 47 | #include <linux/bitmap.h> | 47 | #include <linux/bitmap.h> |
| 48 | #include <linux/list.h> | 48 | #include <linux/list.h> |
| 49 | #include <linux/workqueue.h> | ||
| 49 | 50 | ||
| 50 | #include <xen/xen.h> | 51 | #include <xen/xen.h> |
| 51 | #include <xen/xenbus.h> | 52 | #include <xen/xenbus.h> |
| @@ -121,6 +122,8 @@ static inline struct blkif_req *blkif_req(struct request *rq) | |||
| 121 | 122 | ||
| 122 | static DEFINE_MUTEX(blkfront_mutex); | 123 | static DEFINE_MUTEX(blkfront_mutex); |
| 123 | static const struct block_device_operations xlvbd_block_fops; | 124 | static const struct block_device_operations xlvbd_block_fops; |
| 125 | static struct delayed_work blkfront_work; | ||
| 126 | static LIST_HEAD(info_list); | ||
| 124 | 127 | ||
| 125 | /* | 128 | /* |
| 126 | * Maximum number of segments in indirect requests, the actual value used by | 129 | * Maximum number of segments in indirect requests, the actual value used by |
| @@ -216,6 +219,7 @@ struct blkfront_info | |||
| 216 | /* Save uncomplete reqs and bios for migration. */ | 219 | /* Save uncomplete reqs and bios for migration. */ |
| 217 | struct list_head requests; | 220 | struct list_head requests; |
| 218 | struct bio_list bio_list; | 221 | struct bio_list bio_list; |
| 222 | struct list_head info_list; | ||
| 219 | }; | 223 | }; |
| 220 | 224 | ||
| 221 | static unsigned int nr_minors; | 225 | static unsigned int nr_minors; |
| @@ -1759,6 +1763,12 @@ abort_transaction: | |||
| 1759 | return err; | 1763 | return err; |
| 1760 | } | 1764 | } |
| 1761 | 1765 | ||
| 1766 | static void free_info(struct blkfront_info *info) | ||
| 1767 | { | ||
| 1768 | list_del(&info->info_list); | ||
| 1769 | kfree(info); | ||
| 1770 | } | ||
| 1771 | |||
| 1762 | /* Common code used when first setting up, and when resuming. */ | 1772 | /* Common code used when first setting up, and when resuming. */ |
| 1763 | static int talk_to_blkback(struct xenbus_device *dev, | 1773 | static int talk_to_blkback(struct xenbus_device *dev, |
| 1764 | struct blkfront_info *info) | 1774 | struct blkfront_info *info) |
| @@ -1880,7 +1890,10 @@ again: | |||
| 1880 | destroy_blkring: | 1890 | destroy_blkring: |
| 1881 | blkif_free(info, 0); | 1891 | blkif_free(info, 0); |
| 1882 | 1892 | ||
| 1883 | kfree(info); | 1893 | mutex_lock(&blkfront_mutex); |
| 1894 | free_info(info); | ||
| 1895 | mutex_unlock(&blkfront_mutex); | ||
| 1896 | |||
| 1884 | dev_set_drvdata(&dev->dev, NULL); | 1897 | dev_set_drvdata(&dev->dev, NULL); |
| 1885 | 1898 | ||
| 1886 | return err; | 1899 | return err; |
| @@ -1991,6 +2004,10 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
| 1991 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); | 2004 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); |
| 1992 | dev_set_drvdata(&dev->dev, info); | 2005 | dev_set_drvdata(&dev->dev, info); |
| 1993 | 2006 | ||
| 2007 | mutex_lock(&blkfront_mutex); | ||
| 2008 | list_add(&info->info_list, &info_list); | ||
| 2009 | mutex_unlock(&blkfront_mutex); | ||
| 2010 | |||
| 1994 | return 0; | 2011 | return 0; |
| 1995 | } | 2012 | } |
| 1996 | 2013 | ||
| @@ -2301,6 +2318,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) | |||
| 2301 | if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) | 2318 | if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) |
| 2302 | indirect_segments = 0; | 2319 | indirect_segments = 0; |
| 2303 | info->max_indirect_segments = indirect_segments; | 2320 | info->max_indirect_segments = indirect_segments; |
| 2321 | |||
| 2322 | if (info->feature_persistent) { | ||
| 2323 | mutex_lock(&blkfront_mutex); | ||
| 2324 | schedule_delayed_work(&blkfront_work, HZ * 10); | ||
| 2325 | mutex_unlock(&blkfront_mutex); | ||
| 2326 | } | ||
| 2304 | } | 2327 | } |
| 2305 | 2328 | ||
| 2306 | /* | 2329 | /* |
| @@ -2482,7 +2505,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) | |||
| 2482 | mutex_unlock(&info->mutex); | 2505 | mutex_unlock(&info->mutex); |
| 2483 | 2506 | ||
| 2484 | if (!bdev) { | 2507 | if (!bdev) { |
| 2485 | kfree(info); | 2508 | mutex_lock(&blkfront_mutex); |
| 2509 | free_info(info); | ||
| 2510 | mutex_unlock(&blkfront_mutex); | ||
| 2486 | return 0; | 2511 | return 0; |
| 2487 | } | 2512 | } |
| 2488 | 2513 | ||
| @@ -2502,7 +2527,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) | |||
| 2502 | if (info && !bdev->bd_openers) { | 2527 | if (info && !bdev->bd_openers) { |
| 2503 | xlvbd_release_gendisk(info); | 2528 | xlvbd_release_gendisk(info); |
| 2504 | disk->private_data = NULL; | 2529 | disk->private_data = NULL; |
| 2505 | kfree(info); | 2530 | mutex_lock(&blkfront_mutex); |
| 2531 | free_info(info); | ||
| 2532 | mutex_unlock(&blkfront_mutex); | ||
| 2506 | } | 2533 | } |
| 2507 | 2534 | ||
| 2508 | mutex_unlock(&bdev->bd_mutex); | 2535 | mutex_unlock(&bdev->bd_mutex); |
| @@ -2585,7 +2612,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode) | |||
| 2585 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); | 2612 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); |
| 2586 | xlvbd_release_gendisk(info); | 2613 | xlvbd_release_gendisk(info); |
| 2587 | disk->private_data = NULL; | 2614 | disk->private_data = NULL; |
| 2588 | kfree(info); | 2615 | free_info(info); |
| 2589 | } | 2616 | } |
| 2590 | 2617 | ||
| 2591 | out: | 2618 | out: |
| @@ -2618,6 +2645,61 @@ static struct xenbus_driver blkfront_driver = { | |||
| 2618 | .is_ready = blkfront_is_ready, | 2645 | .is_ready = blkfront_is_ready, |
| 2619 | }; | 2646 | }; |
| 2620 | 2647 | ||
| 2648 | static void purge_persistent_grants(struct blkfront_info *info) | ||
| 2649 | { | ||
| 2650 | unsigned int i; | ||
| 2651 | unsigned long flags; | ||
| 2652 | |||
| 2653 | for (i = 0; i < info->nr_rings; i++) { | ||
| 2654 | struct blkfront_ring_info *rinfo = &info->rinfo[i]; | ||
| 2655 | struct grant *gnt_list_entry, *tmp; | ||
| 2656 | |||
| 2657 | spin_lock_irqsave(&rinfo->ring_lock, flags); | ||
| 2658 | |||
| 2659 | if (rinfo->persistent_gnts_c == 0) { | ||
| 2660 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | ||
| 2661 | continue; | ||
| 2662 | } | ||
| 2663 | |||
| 2664 | list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, | ||
| 2665 | node) { | ||
| 2666 | if (gnt_list_entry->gref == GRANT_INVALID_REF || | ||
| 2667 | gnttab_query_foreign_access(gnt_list_entry->gref)) | ||
| 2668 | continue; | ||
| 2669 | |||
| 2670 | list_del(&gnt_list_entry->node); | ||
| 2671 | gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); | ||
| 2672 | rinfo->persistent_gnts_c--; | ||
| 2673 | __free_page(gnt_list_entry->page); | ||
| 2674 | kfree(gnt_list_entry); | ||
| 2675 | } | ||
| 2676 | |||
| 2677 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | ||
| 2678 | } | ||
| 2679 | } | ||
| 2680 | |||
| 2681 | static void blkfront_delay_work(struct work_struct *work) | ||
| 2682 | { | ||
| 2683 | struct blkfront_info *info; | ||
| 2684 | bool need_schedule_work = false; | ||
| 2685 | |||
| 2686 | mutex_lock(&blkfront_mutex); | ||
| 2687 | |||
| 2688 | list_for_each_entry(info, &info_list, info_list) { | ||
| 2689 | if (info->feature_persistent) { | ||
| 2690 | need_schedule_work = true; | ||
| 2691 | mutex_lock(&info->mutex); | ||
| 2692 | purge_persistent_grants(info); | ||
| 2693 | mutex_unlock(&info->mutex); | ||
| 2694 | } | ||
| 2695 | } | ||
| 2696 | |||
| 2697 | if (need_schedule_work) | ||
| 2698 | schedule_delayed_work(&blkfront_work, HZ * 10); | ||
| 2699 | |||
| 2700 | mutex_unlock(&blkfront_mutex); | ||
| 2701 | } | ||
| 2702 | |||
| 2621 | static int __init xlblk_init(void) | 2703 | static int __init xlblk_init(void) |
| 2622 | { | 2704 | { |
| 2623 | int ret; | 2705 | int ret; |
| @@ -2626,6 +2708,15 @@ static int __init xlblk_init(void) | |||
| 2626 | if (!xen_domain()) | 2708 | if (!xen_domain()) |
| 2627 | return -ENODEV; | 2709 | return -ENODEV; |
| 2628 | 2710 | ||
| 2711 | if (!xen_has_pv_disk_devices()) | ||
| 2712 | return -ENODEV; | ||
| 2713 | |||
| 2714 | if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { | ||
| 2715 | pr_warn("xen_blk: can't get major %d with name %s\n", | ||
| 2716 | XENVBD_MAJOR, DEV_NAME); | ||
| 2717 | return -ENODEV; | ||
| 2718 | } | ||
| 2719 | |||
| 2629 | if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) | 2720 | if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) |
| 2630 | xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; | 2721 | xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; |
| 2631 | 2722 | ||
| @@ -2641,14 +2732,7 @@ static int __init xlblk_init(void) | |||
| 2641 | xen_blkif_max_queues = nr_cpus; | 2732 | xen_blkif_max_queues = nr_cpus; |
| 2642 | } | 2733 | } |
| 2643 | 2734 | ||
| 2644 | if (!xen_has_pv_disk_devices()) | 2735 | INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work); |
| 2645 | return -ENODEV; | ||
| 2646 | |||
| 2647 | if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { | ||
| 2648 | printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", | ||
| 2649 | XENVBD_MAJOR, DEV_NAME); | ||
| 2650 | return -ENODEV; | ||
| 2651 | } | ||
| 2652 | 2736 | ||
| 2653 | ret = xenbus_register_frontend(&blkfront_driver); | 2737 | ret = xenbus_register_frontend(&blkfront_driver); |
| 2654 | if (ret) { | 2738 | if (ret) { |
| @@ -2663,6 +2747,8 @@ module_init(xlblk_init); | |||
| 2663 | 2747 | ||
| 2664 | static void __exit xlblk_exit(void) | 2748 | static void __exit xlblk_exit(void) |
| 2665 | { | 2749 | { |
| 2750 | cancel_delayed_work_sync(&blkfront_work); | ||
| 2751 | |||
| 2666 | xenbus_unregister_driver(&blkfront_driver); | 2752 | xenbus_unregister_driver(&blkfront_driver); |
| 2667 | unregister_blkdev(XENVBD_MAJOR, DEV_NAME); | 2753 | unregister_blkdev(XENVBD_MAJOR, DEV_NAME); |
| 2668 | kfree(minors); | 2754 | kfree(minors); |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 113fc6edb2b0..a5d5a96479bf 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
| @@ -2546,7 +2546,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, | |||
| 2546 | if (!CDROM_CAN(CDC_SELECT_DISC) || | 2546 | if (!CDROM_CAN(CDC_SELECT_DISC) || |
| 2547 | (arg == CDSL_CURRENT || arg == CDSL_NONE)) | 2547 | (arg == CDSL_CURRENT || arg == CDSL_NONE)) |
| 2548 | return cdi->ops->drive_status(cdi, CDSL_CURRENT); | 2548 | return cdi->ops->drive_status(cdi, CDSL_CURRENT); |
| 2549 | if (((int)arg >= cdi->capacity)) | 2549 | if (arg >= cdi->capacity) |
| 2550 | return -EINVAL; | 2550 | return -EINVAL; |
| 2551 | return cdrom_slot_status(cdi, arg); | 2551 | return cdrom_slot_status(cdi, arg); |
| 2552 | } | 2552 | } |
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c index 740af90a9508..c5edf8f2fd19 100644 --- a/drivers/clk/clk-npcm7xx.c +++ b/drivers/clk/clk-npcm7xx.c | |||
| @@ -558,8 +558,8 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np) | |||
| 558 | if (!clk_base) | 558 | if (!clk_base) |
| 559 | goto npcm7xx_init_error; | 559 | goto npcm7xx_init_error; |
| 560 | 560 | ||
| 561 | npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) * | 561 | npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws, |
| 562 | NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL); | 562 | NPCM7XX_NUM_CLOCKS), GFP_KERNEL); |
| 563 | if (!npcm7xx_clk_data) | 563 | if (!npcm7xx_clk_data) |
| 564 | goto npcm7xx_init_np_err; | 564 | goto npcm7xx_init_np_err; |
| 565 | 565 | ||
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c index fb62f3938008..3a0996f2d556 100644 --- a/drivers/clk/x86/clk-st.c +++ b/drivers/clk/x86/clk-st.c | |||
| @@ -46,7 +46,7 @@ static int st_clk_probe(struct platform_device *pdev) | |||
| 46 | clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents), | 46 | clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents), |
| 47 | 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL); | 47 | 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL); |
| 48 | 48 | ||
| 49 | clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_25M]->clk); | 49 | clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk); |
| 50 | 50 | ||
| 51 | hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux", | 51 | hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux", |
| 52 | 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, | 52 | 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 110483f0e3fb..e26a40971b26 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
| @@ -379,9 +379,20 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, | |||
| 379 | if (idx == -1) | 379 | if (idx == -1) |
| 380 | idx = i; /* first enabled state */ | 380 | idx = i; /* first enabled state */ |
| 381 | if (s->target_residency > data->predicted_us) { | 381 | if (s->target_residency > data->predicted_us) { |
| 382 | if (!tick_nohz_tick_stopped()) | 382 | if (data->predicted_us < TICK_USEC) |
| 383 | break; | 383 | break; |
| 384 | 384 | ||
| 385 | if (!tick_nohz_tick_stopped()) { | ||
| 386 | /* | ||
| 387 | * If the state selected so far is shallow, | ||
| 388 | * waking up early won't hurt, so retain the | ||
| 389 | * tick in that case and let the governor run | ||
| 390 | * again in the next iteration of the loop. | ||
| 391 | */ | ||
| 392 | expected_interval = drv->states[idx].target_residency; | ||
| 393 | break; | ||
| 394 | } | ||
| 395 | |||
| 385 | /* | 396 | /* |
| 386 | * If the state selected so far is shallow and this | 397 | * If the state selected so far is shallow and this |
| 387 | * state's target residency matches the time till the | 398 | * state's target residency matches the time till the |
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 6e61cc93c2b0..d7aa7d7ff102 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c | |||
| @@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 679 | int ret = 0; | 679 | int ret = 0; |
| 680 | 680 | ||
| 681 | if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { | 681 | if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { |
| 682 | crypto_ablkcipher_set_flags(ablkcipher, | ||
| 683 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 684 | dev_err(jrdev, "key size mismatch\n"); | 682 | dev_err(jrdev, "key size mismatch\n"); |
| 685 | return -EINVAL; | 683 | goto badkey; |
| 686 | } | 684 | } |
| 687 | 685 | ||
| 688 | ctx->cdata.keylen = keylen; | 686 | ctx->cdata.keylen = keylen; |
| @@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 715 | return ret; | 713 | return ret; |
| 716 | badkey: | 714 | badkey: |
| 717 | crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 715 | crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 718 | return 0; | 716 | return -EINVAL; |
| 719 | } | 717 | } |
| 720 | 718 | ||
| 721 | /* | 719 | /* |
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 578ea63a3109..f26d62e5533a 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c | |||
| @@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, | |||
| 71 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); | 71 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); |
| 72 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); | 72 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); |
| 73 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); | 73 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
| 74 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 74 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
| 75 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); | 75 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, | 78 | static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, |
| @@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, | |||
| 90 | dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); | 90 | dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); |
| 91 | dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); | 91 | dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); |
| 92 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); | 92 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); |
| 93 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 93 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
| 94 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); | 94 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | /* RSA Job Completion handler */ | 97 | /* RSA Job Completion handler */ |
| @@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, | |||
| 417 | goto unmap_p; | 417 | goto unmap_p; |
| 418 | } | 418 | } |
| 419 | 419 | ||
| 420 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); | 420 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); |
| 421 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { | 421 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { |
| 422 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); | 422 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); |
| 423 | goto unmap_q; | 423 | goto unmap_q; |
| 424 | } | 424 | } |
| 425 | 425 | ||
| 426 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); | 426 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); |
| 427 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { | 427 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { |
| 428 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); | 428 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); |
| 429 | goto unmap_tmp1; | 429 | goto unmap_tmp1; |
| @@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, | |||
| 451 | return 0; | 451 | return 0; |
| 452 | 452 | ||
| 453 | unmap_tmp1: | 453 | unmap_tmp1: |
| 454 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 454 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
| 455 | unmap_q: | 455 | unmap_q: |
| 456 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); | 456 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
| 457 | unmap_p: | 457 | unmap_p: |
| @@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, | |||
| 504 | goto unmap_dq; | 504 | goto unmap_dq; |
| 505 | } | 505 | } |
| 506 | 506 | ||
| 507 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); | 507 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); |
| 508 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { | 508 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { |
| 509 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); | 509 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); |
| 510 | goto unmap_qinv; | 510 | goto unmap_qinv; |
| 511 | } | 511 | } |
| 512 | 512 | ||
| 513 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); | 513 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); |
| 514 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { | 514 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { |
| 515 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); | 515 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); |
| 516 | goto unmap_tmp1; | 516 | goto unmap_tmp1; |
| @@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, | |||
| 538 | return 0; | 538 | return 0; |
| 539 | 539 | ||
| 540 | unmap_tmp1: | 540 | unmap_tmp1: |
| 541 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 541 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
| 542 | unmap_qinv: | 542 | unmap_qinv: |
| 543 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); | 543 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); |
| 544 | unmap_dq: | 544 | unmap_dq: |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index f4f258075b89..acdd72016ffe 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
| @@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
| 190 | BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); | 190 | BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); |
| 191 | 191 | ||
| 192 | /* Unmap just-run descriptor so we can post-process */ | 192 | /* Unmap just-run descriptor so we can post-process */ |
| 193 | dma_unmap_single(dev, jrp->outring[hw_idx].desc, | 193 | dma_unmap_single(dev, |
| 194 | caam_dma_to_cpu(jrp->outring[hw_idx].desc), | ||
| 194 | jrp->entinfo[sw_idx].desc_size, | 195 | jrp->entinfo[sw_idx].desc_size, |
| 195 | DMA_TO_DEVICE); | 196 | DMA_TO_DEVICE); |
| 196 | 197 | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h index 9a476bb6d4c7..af596455b420 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h | |||
| @@ -35,6 +35,7 @@ struct nitrox_cmdq { | |||
| 35 | /* requests in backlog queues */ | 35 | /* requests in backlog queues */ |
| 36 | atomic_t backlog_count; | 36 | atomic_t backlog_count; |
| 37 | 37 | ||
| 38 | int write_idx; | ||
| 38 | /* command size 32B/64B */ | 39 | /* command size 32B/64B */ |
| 39 | u8 instr_size; | 40 | u8 instr_size; |
| 40 | u8 qno; | 41 | u8 qno; |
| @@ -87,7 +88,7 @@ struct nitrox_bh { | |||
| 87 | struct bh_data *slc; | 88 | struct bh_data *slc; |
| 88 | }; | 89 | }; |
| 89 | 90 | ||
| 90 | /* NITROX-5 driver state */ | 91 | /* NITROX-V driver state */ |
| 91 | #define NITROX_UCODE_LOADED 0 | 92 | #define NITROX_UCODE_LOADED 0 |
| 92 | #define NITROX_READY 1 | 93 | #define NITROX_READY 1 |
| 93 | 94 | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index ebe267379ac9..4d31df07777f 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c | |||
| @@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq) | |||
| 36 | cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); | 36 | cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); |
| 37 | cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); | 37 | cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); |
| 38 | cmdq->qsize = (qsize + PKT_IN_ALIGN); | 38 | cmdq->qsize = (qsize + PKT_IN_ALIGN); |
| 39 | cmdq->write_idx = 0; | ||
| 39 | 40 | ||
| 40 | spin_lock_init(&cmdq->response_lock); | 41 | spin_lock_init(&cmdq->response_lock); |
| 41 | spin_lock_init(&cmdq->cmdq_lock); | 42 | spin_lock_init(&cmdq->cmdq_lock); |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index deaefd532aaa..4a362fc22f62 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c | |||
| @@ -42,6 +42,16 @@ | |||
| 42 | * Invalid flag options in AES-CCM IV. | 42 | * Invalid flag options in AES-CCM IV. |
| 43 | */ | 43 | */ |
| 44 | 44 | ||
| 45 | static inline int incr_index(int index, int count, int max) | ||
| 46 | { | ||
| 47 | if ((index + count) >= max) | ||
| 48 | index = index + count - max; | ||
| 49 | else | ||
| 50 | index += count; | ||
| 51 | |||
| 52 | return index; | ||
| 53 | } | ||
| 54 | |||
| 45 | /** | 55 | /** |
| 46 | * dma_free_sglist - unmap and free the sg lists. | 56 | * dma_free_sglist - unmap and free the sg lists. |
| 47 | * @ndev: N5 device | 57 | * @ndev: N5 device |
| @@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr, | |||
| 426 | struct nitrox_cmdq *cmdq) | 436 | struct nitrox_cmdq *cmdq) |
| 427 | { | 437 | { |
| 428 | struct nitrox_device *ndev = sr->ndev; | 438 | struct nitrox_device *ndev = sr->ndev; |
| 429 | union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell; | 439 | int idx; |
| 430 | u64 offset; | ||
| 431 | u8 *ent; | 440 | u8 *ent; |
| 432 | 441 | ||
| 433 | spin_lock_bh(&cmdq->cmdq_lock); | 442 | spin_lock_bh(&cmdq->cmdq_lock); |
| 434 | 443 | ||
| 435 | /* get the next write offset */ | 444 | idx = cmdq->write_idx; |
| 436 | offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno); | ||
| 437 | pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset); | ||
| 438 | /* copy the instruction */ | 445 | /* copy the instruction */ |
| 439 | ent = cmdq->head + pkt_in_baoff_dbell.s.aoff; | 446 | ent = cmdq->head + (idx * cmdq->instr_size); |
| 440 | memcpy(ent, &sr->instr, cmdq->instr_size); | 447 | memcpy(ent, &sr->instr, cmdq->instr_size); |
| 441 | /* flush the command queue updates */ | ||
| 442 | dma_wmb(); | ||
| 443 | 448 | ||
| 444 | sr->tstamp = jiffies; | ||
| 445 | atomic_set(&sr->status, REQ_POSTED); | 449 | atomic_set(&sr->status, REQ_POSTED); |
| 446 | response_list_add(sr, cmdq); | 450 | response_list_add(sr, cmdq); |
| 451 | sr->tstamp = jiffies; | ||
| 452 | /* flush the command queue updates */ | ||
| 453 | dma_wmb(); | ||
| 447 | 454 | ||
| 448 | /* Ring doorbell with count 1 */ | 455 | /* Ring doorbell with count 1 */ |
| 449 | writeq(1, cmdq->dbell_csr_addr); | 456 | writeq(1, cmdq->dbell_csr_addr); |
| 450 | /* orders the doorbell rings */ | 457 | /* orders the doorbell rings */ |
| 451 | mmiowb(); | 458 | mmiowb(); |
| 452 | 459 | ||
| 460 | cmdq->write_idx = incr_index(idx, 1, ndev->qlen); | ||
| 461 | |||
| 453 | spin_unlock_bh(&cmdq->cmdq_lock); | 462 | spin_unlock_bh(&cmdq->cmdq_lock); |
| 454 | } | 463 | } |
| 455 | 464 | ||
| @@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) | |||
| 459 | struct nitrox_softreq *sr, *tmp; | 468 | struct nitrox_softreq *sr, *tmp; |
| 460 | int ret = 0; | 469 | int ret = 0; |
| 461 | 470 | ||
| 471 | if (!atomic_read(&cmdq->backlog_count)) | ||
| 472 | return 0; | ||
| 473 | |||
| 462 | spin_lock_bh(&cmdq->backlog_lock); | 474 | spin_lock_bh(&cmdq->backlog_lock); |
| 463 | 475 | ||
| 464 | list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { | 476 | list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { |
| @@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) | |||
| 466 | 478 | ||
| 467 | /* submit until space available */ | 479 | /* submit until space available */ |
| 468 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | 480 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { |
| 469 | ret = -EBUSY; | 481 | ret = -ENOSPC; |
| 470 | break; | 482 | break; |
| 471 | } | 483 | } |
| 472 | /* delete from backlog list */ | 484 | /* delete from backlog list */ |
| @@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr) | |||
| 491 | { | 503 | { |
| 492 | struct nitrox_cmdq *cmdq = sr->cmdq; | 504 | struct nitrox_cmdq *cmdq = sr->cmdq; |
| 493 | struct nitrox_device *ndev = sr->ndev; | 505 | struct nitrox_device *ndev = sr->ndev; |
| 494 | int ret = -EBUSY; | 506 | |
| 507 | /* try to post backlog requests */ | ||
| 508 | post_backlog_cmds(cmdq); | ||
| 495 | 509 | ||
| 496 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | 510 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { |
| 497 | if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | 511 | if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
| 498 | return -EAGAIN; | 512 | return -ENOSPC; |
| 499 | 513 | /* add to backlog list */ | |
| 500 | backlog_list_add(sr, cmdq); | 514 | backlog_list_add(sr, cmdq); |
| 501 | } else { | 515 | return -EBUSY; |
| 502 | ret = post_backlog_cmds(cmdq); | ||
| 503 | if (ret) { | ||
| 504 | backlog_list_add(sr, cmdq); | ||
| 505 | return ret; | ||
| 506 | } | ||
| 507 | post_se_instr(sr, cmdq); | ||
| 508 | ret = -EINPROGRESS; | ||
| 509 | } | 516 | } |
| 510 | return ret; | 517 | post_se_instr(sr, cmdq); |
| 518 | |||
| 519 | return -EINPROGRESS; | ||
| 511 | } | 520 | } |
| 512 | 521 | ||
| 513 | /** | 522 | /** |
| @@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev, | |||
| 624 | */ | 633 | */ |
| 625 | sr->instr.fdata[0] = *((u64 *)&req->gph); | 634 | sr->instr.fdata[0] = *((u64 *)&req->gph); |
| 626 | sr->instr.fdata[1] = 0; | 635 | sr->instr.fdata[1] = 0; |
| 627 | /* flush the soft_req changes before posting the cmd */ | ||
| 628 | wmb(); | ||
| 629 | 636 | ||
| 630 | ret = nitrox_enqueue_request(sr); | 637 | ret = nitrox_enqueue_request(sr); |
| 631 | if (ret == -EAGAIN) | 638 | if (ret == -ENOSPC) |
| 632 | goto send_fail; | 639 | goto send_fail; |
| 633 | 640 | ||
| 634 | return ret; | 641 | return ret; |
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h index a53a0e6ba024..7725b6ee14ef 100644 --- a/drivers/crypto/chelsio/chtls/chtls.h +++ b/drivers/crypto/chelsio/chtls/chtls.h | |||
| @@ -96,6 +96,10 @@ enum csk_flags { | |||
| 96 | CSK_CONN_INLINE, /* Connection on HW */ | 96 | CSK_CONN_INLINE, /* Connection on HW */ |
| 97 | }; | 97 | }; |
| 98 | 98 | ||
| 99 | enum chtls_cdev_state { | ||
| 100 | CHTLS_CDEV_STATE_UP = 1 | ||
| 101 | }; | ||
| 102 | |||
| 99 | struct listen_ctx { | 103 | struct listen_ctx { |
| 100 | struct sock *lsk; | 104 | struct sock *lsk; |
| 101 | struct chtls_dev *cdev; | 105 | struct chtls_dev *cdev; |
| @@ -146,6 +150,7 @@ struct chtls_dev { | |||
| 146 | unsigned int send_page_order; | 150 | unsigned int send_page_order; |
| 147 | int max_host_sndbuf; | 151 | int max_host_sndbuf; |
| 148 | struct key_map kmap; | 152 | struct key_map kmap; |
| 153 | unsigned int cdev_state; | ||
| 149 | }; | 154 | }; |
| 150 | 155 | ||
| 151 | struct chtls_hws { | 156 | struct chtls_hws { |
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 9b07f9165658..f59b044ebd25 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c | |||
| @@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev) | |||
| 160 | tlsdev->hash = chtls_create_hash; | 160 | tlsdev->hash = chtls_create_hash; |
| 161 | tlsdev->unhash = chtls_destroy_hash; | 161 | tlsdev->unhash = chtls_destroy_hash; |
| 162 | tls_register_device(&cdev->tlsdev); | 162 | tls_register_device(&cdev->tlsdev); |
| 163 | cdev->cdev_state = CHTLS_CDEV_STATE_UP; | ||
| 163 | } | 164 | } |
| 164 | 165 | ||
| 165 | static void chtls_unregister_dev(struct chtls_dev *cdev) | 166 | static void chtls_unregister_dev(struct chtls_dev *cdev) |
| @@ -281,8 +282,10 @@ static void chtls_free_all_uld(void) | |||
| 281 | struct chtls_dev *cdev, *tmp; | 282 | struct chtls_dev *cdev, *tmp; |
| 282 | 283 | ||
| 283 | mutex_lock(&cdev_mutex); | 284 | mutex_lock(&cdev_mutex); |
| 284 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list) | 285 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list) { |
| 285 | chtls_free_uld(cdev); | 286 | if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) |
| 287 | chtls_free_uld(cdev); | ||
| 288 | } | ||
| 286 | mutex_unlock(&cdev_mutex); | 289 | mutex_unlock(&cdev_mutex); |
| 287 | } | 290 | } |
| 288 | 291 | ||
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 5285ece4f33a..b71895871be3 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c | |||
| @@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, | |||
| 107 | ret = crypto_skcipher_encrypt(req); | 107 | ret = crypto_skcipher_encrypt(req); |
| 108 | skcipher_request_zero(req); | 108 | skcipher_request_zero(req); |
| 109 | } else { | 109 | } else { |
| 110 | preempt_disable(); | ||
| 111 | pagefault_disable(); | ||
| 112 | enable_kernel_vsx(); | ||
| 113 | |||
| 114 | blkcipher_walk_init(&walk, dst, src, nbytes); | 110 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 115 | ret = blkcipher_walk_virt(desc, &walk); | 111 | ret = blkcipher_walk_virt(desc, &walk); |
| 116 | while ((nbytes = walk.nbytes)) { | 112 | while ((nbytes = walk.nbytes)) { |
| 113 | preempt_disable(); | ||
| 114 | pagefault_disable(); | ||
| 115 | enable_kernel_vsx(); | ||
| 117 | aes_p8_cbc_encrypt(walk.src.virt.addr, | 116 | aes_p8_cbc_encrypt(walk.src.virt.addr, |
| 118 | walk.dst.virt.addr, | 117 | walk.dst.virt.addr, |
| 119 | nbytes & AES_BLOCK_MASK, | 118 | nbytes & AES_BLOCK_MASK, |
| 120 | &ctx->enc_key, walk.iv, 1); | 119 | &ctx->enc_key, walk.iv, 1); |
| 120 | disable_kernel_vsx(); | ||
| 121 | pagefault_enable(); | ||
| 122 | preempt_enable(); | ||
| 123 | |||
| 121 | nbytes &= AES_BLOCK_SIZE - 1; | 124 | nbytes &= AES_BLOCK_SIZE - 1; |
| 122 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 125 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
| 123 | } | 126 | } |
| 124 | |||
| 125 | disable_kernel_vsx(); | ||
| 126 | pagefault_enable(); | ||
| 127 | preempt_enable(); | ||
| 128 | } | 127 | } |
| 129 | 128 | ||
| 130 | return ret; | 129 | return ret; |
| @@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, | |||
| 147 | ret = crypto_skcipher_decrypt(req); | 146 | ret = crypto_skcipher_decrypt(req); |
| 148 | skcipher_request_zero(req); | 147 | skcipher_request_zero(req); |
| 149 | } else { | 148 | } else { |
| 150 | preempt_disable(); | ||
| 151 | pagefault_disable(); | ||
| 152 | enable_kernel_vsx(); | ||
| 153 | |||
| 154 | blkcipher_walk_init(&walk, dst, src, nbytes); | 149 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 155 | ret = blkcipher_walk_virt(desc, &walk); | 150 | ret = blkcipher_walk_virt(desc, &walk); |
| 156 | while ((nbytes = walk.nbytes)) { | 151 | while ((nbytes = walk.nbytes)) { |
| 152 | preempt_disable(); | ||
| 153 | pagefault_disable(); | ||
| 154 | enable_kernel_vsx(); | ||
| 157 | aes_p8_cbc_encrypt(walk.src.virt.addr, | 155 | aes_p8_cbc_encrypt(walk.src.virt.addr, |
| 158 | walk.dst.virt.addr, | 156 | walk.dst.virt.addr, |
| 159 | nbytes & AES_BLOCK_MASK, | 157 | nbytes & AES_BLOCK_MASK, |
| 160 | &ctx->dec_key, walk.iv, 0); | 158 | &ctx->dec_key, walk.iv, 0); |
| 159 | disable_kernel_vsx(); | ||
| 160 | pagefault_enable(); | ||
| 161 | preempt_enable(); | ||
| 162 | |||
| 161 | nbytes &= AES_BLOCK_SIZE - 1; | 163 | nbytes &= AES_BLOCK_SIZE - 1; |
| 162 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 164 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
| 163 | } | 165 | } |
| 164 | |||
| 165 | disable_kernel_vsx(); | ||
| 166 | pagefault_enable(); | ||
| 167 | preempt_enable(); | ||
| 168 | } | 166 | } |
| 169 | 167 | ||
| 170 | return ret; | 168 | return ret; |
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 8bd9aff0f55f..e9954a7d4694 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c | |||
| @@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, | |||
| 116 | ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); | 116 | ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); |
| 117 | skcipher_request_zero(req); | 117 | skcipher_request_zero(req); |
| 118 | } else { | 118 | } else { |
| 119 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 120 | |||
| 121 | ret = blkcipher_walk_virt(desc, &walk); | ||
| 122 | |||
| 119 | preempt_disable(); | 123 | preempt_disable(); |
| 120 | pagefault_disable(); | 124 | pagefault_disable(); |
| 121 | enable_kernel_vsx(); | 125 | enable_kernel_vsx(); |
| 122 | 126 | ||
| 123 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 124 | |||
| 125 | ret = blkcipher_walk_virt(desc, &walk); | ||
| 126 | iv = walk.iv; | 127 | iv = walk.iv; |
| 127 | memset(tweak, 0, AES_BLOCK_SIZE); | 128 | memset(tweak, 0, AES_BLOCK_SIZE); |
| 128 | aes_p8_encrypt(iv, tweak, &ctx->tweak_key); | 129 | aes_p8_encrypt(iv, tweak, &ctx->tweak_key); |
| 129 | 130 | ||
| 131 | disable_kernel_vsx(); | ||
| 132 | pagefault_enable(); | ||
| 133 | preempt_enable(); | ||
| 134 | |||
| 130 | while ((nbytes = walk.nbytes)) { | 135 | while ((nbytes = walk.nbytes)) { |
| 136 | preempt_disable(); | ||
| 137 | pagefault_disable(); | ||
| 138 | enable_kernel_vsx(); | ||
| 131 | if (enc) | 139 | if (enc) |
| 132 | aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, | 140 | aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, |
| 133 | nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); | 141 | nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); |
| 134 | else | 142 | else |
| 135 | aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, | 143 | aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, |
| 136 | nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); | 144 | nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); |
| 145 | disable_kernel_vsx(); | ||
| 146 | pagefault_enable(); | ||
| 147 | preempt_enable(); | ||
| 137 | 148 | ||
| 138 | nbytes &= AES_BLOCK_SIZE - 1; | 149 | nbytes &= AES_BLOCK_SIZE - 1; |
| 139 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 150 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
| 140 | } | 151 | } |
| 141 | |||
| 142 | disable_kernel_vsx(); | ||
| 143 | pagefault_enable(); | ||
| 144 | preempt_enable(); | ||
| 145 | } | 152 | } |
| 146 | return ret; | 153 | return ret; |
| 147 | } | 154 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 502b94fb116a..b6e9df11115d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
| @@ -1012,13 +1012,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
| 1012 | if (r) | 1012 | if (r) |
| 1013 | return r; | 1013 | return r; |
| 1014 | 1014 | ||
| 1015 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { | 1015 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) |
| 1016 | parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; | 1016 | parser->job->preamble_status |= |
| 1017 | if (!parser->ctx->preamble_presented) { | 1017 | AMDGPU_PREAMBLE_IB_PRESENT; |
| 1018 | parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; | ||
| 1019 | parser->ctx->preamble_presented = true; | ||
| 1020 | } | ||
| 1021 | } | ||
| 1022 | 1018 | ||
| 1023 | if (parser->ring && parser->ring != ring) | 1019 | if (parser->ring && parser->ring != ring) |
| 1024 | return -EINVAL; | 1020 | return -EINVAL; |
| @@ -1207,26 +1203,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
| 1207 | 1203 | ||
| 1208 | int r; | 1204 | int r; |
| 1209 | 1205 | ||
| 1206 | job = p->job; | ||
| 1207 | p->job = NULL; | ||
| 1208 | |||
| 1209 | r = drm_sched_job_init(&job->base, entity, p->filp); | ||
| 1210 | if (r) | ||
| 1211 | goto error_unlock; | ||
| 1212 | |||
| 1213 | /* No memory allocation is allowed while holding the mn lock */ | ||
| 1210 | amdgpu_mn_lock(p->mn); | 1214 | amdgpu_mn_lock(p->mn); |
| 1211 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { | 1215 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { |
| 1212 | struct amdgpu_bo *bo = e->robj; | 1216 | struct amdgpu_bo *bo = e->robj; |
| 1213 | 1217 | ||
| 1214 | if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { | 1218 | if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { |
| 1215 | amdgpu_mn_unlock(p->mn); | 1219 | r = -ERESTARTSYS; |
| 1216 | return -ERESTARTSYS; | 1220 | goto error_abort; |
| 1217 | } | 1221 | } |
| 1218 | } | 1222 | } |
| 1219 | 1223 | ||
| 1220 | job = p->job; | ||
| 1221 | p->job = NULL; | ||
| 1222 | |||
| 1223 | r = drm_sched_job_init(&job->base, entity, p->filp); | ||
| 1224 | if (r) { | ||
| 1225 | amdgpu_job_free(job); | ||
| 1226 | amdgpu_mn_unlock(p->mn); | ||
| 1227 | return r; | ||
| 1228 | } | ||
| 1229 | |||
| 1230 | job->owner = p->filp; | 1224 | job->owner = p->filp; |
| 1231 | p->fence = dma_fence_get(&job->base.s_fence->finished); | 1225 | p->fence = dma_fence_get(&job->base.s_fence->finished); |
| 1232 | 1226 | ||
| @@ -1241,6 +1235,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
| 1241 | 1235 | ||
| 1242 | amdgpu_cs_post_dependencies(p); | 1236 | amdgpu_cs_post_dependencies(p); |
| 1243 | 1237 | ||
| 1238 | if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && | ||
| 1239 | !p->ctx->preamble_presented) { | ||
| 1240 | job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; | ||
| 1241 | p->ctx->preamble_presented = true; | ||
| 1242 | } | ||
| 1243 | |||
| 1244 | cs->out.handle = seq; | 1244 | cs->out.handle = seq; |
| 1245 | job->uf_sequence = seq; | 1245 | job->uf_sequence = seq; |
| 1246 | 1246 | ||
| @@ -1258,6 +1258,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
| 1258 | amdgpu_mn_unlock(p->mn); | 1258 | amdgpu_mn_unlock(p->mn); |
| 1259 | 1259 | ||
| 1260 | return 0; | 1260 | return 0; |
| 1261 | |||
| 1262 | error_abort: | ||
| 1263 | dma_fence_put(&job->base.s_fence->finished); | ||
| 1264 | job->base.s_fence = NULL; | ||
| 1265 | |||
| 1266 | error_unlock: | ||
| 1267 | amdgpu_job_free(job); | ||
| 1268 | amdgpu_mn_unlock(p->mn); | ||
| 1269 | return r; | ||
| 1261 | } | 1270 | } |
| 1262 | 1271 | ||
| 1263 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | 1272 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 5518e623fed2..51b5e977ca88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
| @@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 164 | return r; | 164 | return r; |
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | need_ctx_switch = ring->current_ctx != fence_ctx; | ||
| 167 | if (ring->funcs->emit_pipeline_sync && job && | 168 | if (ring->funcs->emit_pipeline_sync && job && |
| 168 | ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || | 169 | ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || |
| 170 | (amdgpu_sriov_vf(adev) && need_ctx_switch) || | ||
| 169 | amdgpu_vm_need_pipeline_sync(ring, job))) { | 171 | amdgpu_vm_need_pipeline_sync(ring, job))) { |
| 170 | need_pipe_sync = true; | 172 | need_pipe_sync = true; |
| 171 | dma_fence_put(tmp); | 173 | dma_fence_put(tmp); |
| @@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 196 | } | 198 | } |
| 197 | 199 | ||
| 198 | skip_preamble = ring->current_ctx == fence_ctx; | 200 | skip_preamble = ring->current_ctx == fence_ctx; |
| 199 | need_ctx_switch = ring->current_ctx != fence_ctx; | ||
| 200 | if (job && ring->funcs->emit_cntxcntl) { | 201 | if (job && ring->funcs->emit_cntxcntl) { |
| 201 | if (need_ctx_switch) | 202 | if (need_ctx_switch) |
| 202 | status |= AMDGPU_HAVE_CTX_SWITCH; | 203 | status |= AMDGPU_HAVE_CTX_SWITCH; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8f98629fbe59..7b4e657a95c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
| @@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) | |||
| 1932 | amdgpu_fence_wait_empty(ring); | 1932 | amdgpu_fence_wait_empty(ring); |
| 1933 | } | 1933 | } |
| 1934 | 1934 | ||
| 1935 | mutex_lock(&adev->pm.mutex); | ||
| 1936 | /* update battery/ac status */ | ||
| 1937 | if (power_supply_is_system_supplied() > 0) | ||
| 1938 | adev->pm.ac_power = true; | ||
| 1939 | else | ||
| 1940 | adev->pm.ac_power = false; | ||
| 1941 | mutex_unlock(&adev->pm.mutex); | ||
| 1942 | |||
| 1943 | if (adev->powerplay.pp_funcs->dispatch_tasks) { | 1935 | if (adev->powerplay.pp_funcs->dispatch_tasks) { |
| 1944 | if (!amdgpu_device_has_dc_support(adev)) { | 1936 | if (!amdgpu_device_has_dc_support(adev)) { |
| 1945 | mutex_lock(&adev->pm.mutex); | 1937 | mutex_lock(&adev->pm.mutex); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ece0ac703e27..b17771dd5ce7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, | |||
| 172 | * is validated on next vm use to avoid fault. | 172 | * is validated on next vm use to avoid fault. |
| 173 | * */ | 173 | * */ |
| 174 | list_move_tail(&base->vm_status, &vm->evicted); | 174 | list_move_tail(&base->vm_status, &vm->evicted); |
| 175 | base->moved = true; | ||
| 175 | } | 176 | } |
| 176 | 177 | ||
| 177 | /** | 178 | /** |
| @@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
| 369 | uint64_t addr; | 370 | uint64_t addr; |
| 370 | int r; | 371 | int r; |
| 371 | 372 | ||
| 372 | addr = amdgpu_bo_gpu_offset(bo); | ||
| 373 | entries = amdgpu_bo_size(bo) / 8; | 373 | entries = amdgpu_bo_size(bo) / 8; |
| 374 | 374 | ||
| 375 | if (pte_support_ats) { | 375 | if (pte_support_ats) { |
| @@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
| 401 | if (r) | 401 | if (r) |
| 402 | goto error; | 402 | goto error; |
| 403 | 403 | ||
| 404 | addr = amdgpu_bo_gpu_offset(bo); | ||
| 404 | if (ats_entries) { | 405 | if (ats_entries) { |
| 405 | uint64_t ats_value; | 406 | uint64_t ats_value; |
| 406 | 407 | ||
| @@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) | |||
| 2483 | * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size | 2484 | * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size |
| 2484 | * | 2485 | * |
| 2485 | * @adev: amdgpu_device pointer | 2486 | * @adev: amdgpu_device pointer |
| 2486 | * @vm_size: the default vm size if it's set auto | 2487 | * @min_vm_size: the minimum vm size in GB if it's set auto |
| 2487 | * @fragment_size_default: Default PTE fragment size | 2488 | * @fragment_size_default: Default PTE fragment size |
| 2488 | * @max_level: max VMPT level | 2489 | * @max_level: max VMPT level |
| 2489 | * @max_bits: max address space size in bits | 2490 | * @max_bits: max address space size in bits |
| 2490 | * | 2491 | * |
| 2491 | */ | 2492 | */ |
| 2492 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, | 2493 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, |
| 2493 | uint32_t fragment_size_default, unsigned max_level, | 2494 | uint32_t fragment_size_default, unsigned max_level, |
| 2494 | unsigned max_bits) | 2495 | unsigned max_bits) |
| 2495 | { | 2496 | { |
| 2497 | unsigned int max_size = 1 << (max_bits - 30); | ||
| 2498 | unsigned int vm_size; | ||
| 2496 | uint64_t tmp; | 2499 | uint64_t tmp; |
| 2497 | 2500 | ||
| 2498 | /* adjust vm size first */ | 2501 | /* adjust vm size first */ |
| 2499 | if (amdgpu_vm_size != -1) { | 2502 | if (amdgpu_vm_size != -1) { |
| 2500 | unsigned max_size = 1 << (max_bits - 30); | ||
| 2501 | |||
| 2502 | vm_size = amdgpu_vm_size; | 2503 | vm_size = amdgpu_vm_size; |
| 2503 | if (vm_size > max_size) { | 2504 | if (vm_size > max_size) { |
| 2504 | dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", | 2505 | dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", |
| 2505 | amdgpu_vm_size, max_size); | 2506 | amdgpu_vm_size, max_size); |
| 2506 | vm_size = max_size; | 2507 | vm_size = max_size; |
| 2507 | } | 2508 | } |
| 2509 | } else { | ||
| 2510 | struct sysinfo si; | ||
| 2511 | unsigned int phys_ram_gb; | ||
| 2512 | |||
| 2513 | /* Optimal VM size depends on the amount of physical | ||
| 2514 | * RAM available. Underlying requirements and | ||
| 2515 | * assumptions: | ||
| 2516 | * | ||
| 2517 | * - Need to map system memory and VRAM from all GPUs | ||
| 2518 | * - VRAM from other GPUs not known here | ||
| 2519 | * - Assume VRAM <= system memory | ||
| 2520 | * - On GFX8 and older, VM space can be segmented for | ||
| 2521 | * different MTYPEs | ||
| 2522 | * - Need to allow room for fragmentation, guard pages etc. | ||
| 2523 | * | ||
| 2524 | * This adds up to a rough guess of system memory x3. | ||
| 2525 | * Round up to power of two to maximize the available | ||
| 2526 | * VM size with the given page table size. | ||
| 2527 | */ | ||
| 2528 | si_meminfo(&si); | ||
| 2529 | phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + | ||
| 2530 | (1 << 30) - 1) >> 30; | ||
| 2531 | vm_size = roundup_pow_of_two( | ||
| 2532 | min(max(phys_ram_gb * 3, min_vm_size), max_size)); | ||
| 2508 | } | 2533 | } |
| 2509 | 2534 | ||
| 2510 | adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; | 2535 | adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 67a15d439ac0..9fa9df0c5e7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | |||
| @@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, | |||
| 321 | void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); | 321 | void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); |
| 322 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | 322 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, |
| 323 | struct amdgpu_bo_va *bo_va); | 323 | struct amdgpu_bo_va *bo_va); |
| 324 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, | 324 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, |
| 325 | uint32_t fragment_size_default, unsigned max_level, | 325 | uint32_t fragment_size_default, unsigned max_level, |
| 326 | unsigned max_bits); | 326 | unsigned max_bits); |
| 327 | int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); | 327 | int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5cd45210113f..5a9534a82d40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
| @@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
| 5664 | if (amdgpu_sriov_vf(adev)) | 5664 | if (amdgpu_sriov_vf(adev)) |
| 5665 | return 0; | 5665 | return 0; |
| 5666 | 5666 | ||
| 5667 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | | ||
| 5668 | AMD_PG_SUPPORT_RLC_SMU_HS | | ||
| 5669 | AMD_PG_SUPPORT_CP | | ||
| 5670 | AMD_PG_SUPPORT_GFX_DMG)) | ||
| 5671 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | ||
| 5667 | switch (adev->asic_type) { | 5672 | switch (adev->asic_type) { |
| 5668 | case CHIP_CARRIZO: | 5673 | case CHIP_CARRIZO: |
| 5669 | case CHIP_STONEY: | 5674 | case CHIP_STONEY: |
| @@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
| 5713 | default: | 5718 | default: |
| 5714 | break; | 5719 | break; |
| 5715 | } | 5720 | } |
| 5716 | 5721 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | | |
| 5722 | AMD_PG_SUPPORT_RLC_SMU_HS | | ||
| 5723 | AMD_PG_SUPPORT_CP | | ||
| 5724 | AMD_PG_SUPPORT_GFX_DMG)) | ||
| 5725 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | ||
| 5717 | return 0; | 5726 | return 0; |
| 5718 | } | 5727 | } |
| 5719 | 5728 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 75317f283c69..ad151fefa41f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | |||
| @@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) | |||
| 632 | amdgpu_gart_table_vram_unpin(adev); | 632 | amdgpu_gart_table_vram_unpin(adev); |
| 633 | } | 633 | } |
| 634 | 634 | ||
| 635 | static void gmc_v6_0_gart_fini(struct amdgpu_device *adev) | ||
| 636 | { | ||
| 637 | amdgpu_gart_table_vram_free(adev); | ||
| 638 | amdgpu_gart_fini(adev); | ||
| 639 | } | ||
| 640 | |||
| 641 | static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, | 635 | static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, |
| 642 | u32 status, u32 addr, u32 mc_client) | 636 | u32 status, u32 addr, u32 mc_client) |
| 643 | { | 637 | { |
| @@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle) | |||
| 935 | 929 | ||
| 936 | amdgpu_gem_force_release(adev); | 930 | amdgpu_gem_force_release(adev); |
| 937 | amdgpu_vm_manager_fini(adev); | 931 | amdgpu_vm_manager_fini(adev); |
| 938 | gmc_v6_0_gart_fini(adev); | 932 | amdgpu_gart_table_vram_free(adev); |
| 939 | amdgpu_bo_fini(adev); | 933 | amdgpu_bo_fini(adev); |
| 934 | amdgpu_gart_fini(adev); | ||
| 940 | release_firmware(adev->gmc.fw); | 935 | release_firmware(adev->gmc.fw); |
| 941 | adev->gmc.fw = NULL; | 936 | adev->gmc.fw = NULL; |
| 942 | 937 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 36dc367c4b45..f8d8a3a73e42 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
| @@ -747,19 +747,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) | |||
| 747 | } | 747 | } |
| 748 | 748 | ||
| 749 | /** | 749 | /** |
| 750 | * gmc_v7_0_gart_fini - vm fini callback | ||
| 751 | * | ||
| 752 | * @adev: amdgpu_device pointer | ||
| 753 | * | ||
| 754 | * Tears down the driver GART/VM setup (CIK). | ||
| 755 | */ | ||
| 756 | static void gmc_v7_0_gart_fini(struct amdgpu_device *adev) | ||
| 757 | { | ||
| 758 | amdgpu_gart_table_vram_free(adev); | ||
| 759 | amdgpu_gart_fini(adev); | ||
| 760 | } | ||
| 761 | |||
| 762 | /** | ||
| 763 | * gmc_v7_0_vm_decode_fault - print human readable fault info | 750 | * gmc_v7_0_vm_decode_fault - print human readable fault info |
| 764 | * | 751 | * |
| 765 | * @adev: amdgpu_device pointer | 752 | * @adev: amdgpu_device pointer |
| @@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle) | |||
| 1095 | amdgpu_gem_force_release(adev); | 1082 | amdgpu_gem_force_release(adev); |
| 1096 | amdgpu_vm_manager_fini(adev); | 1083 | amdgpu_vm_manager_fini(adev); |
| 1097 | kfree(adev->gmc.vm_fault_info); | 1084 | kfree(adev->gmc.vm_fault_info); |
| 1098 | gmc_v7_0_gart_fini(adev); | 1085 | amdgpu_gart_table_vram_free(adev); |
| 1099 | amdgpu_bo_fini(adev); | 1086 | amdgpu_bo_fini(adev); |
| 1087 | amdgpu_gart_fini(adev); | ||
| 1100 | release_firmware(adev->gmc.fw); | 1088 | release_firmware(adev->gmc.fw); |
| 1101 | adev->gmc.fw = NULL; | 1089 | adev->gmc.fw = NULL; |
| 1102 | 1090 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 70fc97b59b4f..9333109b210d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
| @@ -969,19 +969,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) | |||
| 969 | } | 969 | } |
| 970 | 970 | ||
| 971 | /** | 971 | /** |
| 972 | * gmc_v8_0_gart_fini - vm fini callback | ||
| 973 | * | ||
| 974 | * @adev: amdgpu_device pointer | ||
| 975 | * | ||
| 976 | * Tears down the driver GART/VM setup (CIK). | ||
| 977 | */ | ||
| 978 | static void gmc_v8_0_gart_fini(struct amdgpu_device *adev) | ||
| 979 | { | ||
| 980 | amdgpu_gart_table_vram_free(adev); | ||
| 981 | amdgpu_gart_fini(adev); | ||
| 982 | } | ||
| 983 | |||
| 984 | /** | ||
| 985 | * gmc_v8_0_vm_decode_fault - print human readable fault info | 972 | * gmc_v8_0_vm_decode_fault - print human readable fault info |
| 986 | * | 973 | * |
| 987 | * @adev: amdgpu_device pointer | 974 | * @adev: amdgpu_device pointer |
| @@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle) | |||
| 1199 | amdgpu_gem_force_release(adev); | 1186 | amdgpu_gem_force_release(adev); |
| 1200 | amdgpu_vm_manager_fini(adev); | 1187 | amdgpu_vm_manager_fini(adev); |
| 1201 | kfree(adev->gmc.vm_fault_info); | 1188 | kfree(adev->gmc.vm_fault_info); |
| 1202 | gmc_v8_0_gart_fini(adev); | 1189 | amdgpu_gart_table_vram_free(adev); |
| 1203 | amdgpu_bo_fini(adev); | 1190 | amdgpu_bo_fini(adev); |
| 1191 | amdgpu_gart_fini(adev); | ||
| 1204 | release_firmware(adev->gmc.fw); | 1192 | release_firmware(adev->gmc.fw); |
| 1205 | adev->gmc.fw = NULL; | 1193 | adev->gmc.fw = NULL; |
| 1206 | 1194 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 399a5db27649..72f8018fa2a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | |||
| @@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle) | |||
| 942 | return 0; | 942 | return 0; |
| 943 | } | 943 | } |
| 944 | 944 | ||
| 945 | /** | ||
| 946 | * gmc_v9_0_gart_fini - vm fini callback | ||
| 947 | * | ||
| 948 | * @adev: amdgpu_device pointer | ||
| 949 | * | ||
| 950 | * Tears down the driver GART/VM setup (CIK). | ||
| 951 | */ | ||
| 952 | static void gmc_v9_0_gart_fini(struct amdgpu_device *adev) | ||
| 953 | { | ||
| 954 | amdgpu_gart_table_vram_free(adev); | ||
| 955 | amdgpu_gart_fini(adev); | ||
| 956 | } | ||
| 957 | |||
| 958 | static int gmc_v9_0_sw_fini(void *handle) | 945 | static int gmc_v9_0_sw_fini(void *handle) |
| 959 | { | 946 | { |
| 960 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 947 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 961 | 948 | ||
| 962 | amdgpu_gem_force_release(adev); | 949 | amdgpu_gem_force_release(adev); |
| 963 | amdgpu_vm_manager_fini(adev); | 950 | amdgpu_vm_manager_fini(adev); |
| 964 | gmc_v9_0_gart_fini(adev); | ||
| 965 | 951 | ||
| 966 | /* | 952 | /* |
| 967 | * TODO: | 953 | * TODO: |
| @@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle) | |||
| 974 | */ | 960 | */ |
| 975 | amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); | 961 | amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); |
| 976 | 962 | ||
| 963 | amdgpu_gart_table_vram_free(adev); | ||
| 977 | amdgpu_bo_fini(adev); | 964 | amdgpu_bo_fini(adev); |
| 965 | amdgpu_gart_fini(adev); | ||
| 978 | 966 | ||
| 979 | return 0; | 967 | return 0; |
| 980 | } | 968 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 3f57f6463dc8..cb79a93c2eb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c | |||
| @@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, | |||
| 65 | int min_temp, int max_temp); | 65 | int min_temp, int max_temp); |
| 66 | static int kv_init_fps_limits(struct amdgpu_device *adev); | 66 | static int kv_init_fps_limits(struct amdgpu_device *adev); |
| 67 | 67 | ||
| 68 | static void kv_dpm_powergate_uvd(void *handle, bool gate); | ||
| 69 | static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); | ||
| 70 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); | 68 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); |
| 71 | static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); | 69 | static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); |
| 72 | 70 | ||
| @@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev) | |||
| 1354 | return ret; | 1352 | return ret; |
| 1355 | } | 1353 | } |
| 1356 | 1354 | ||
| 1357 | kv_update_current_ps(adev, adev->pm.dpm.boot_ps); | ||
| 1358 | |||
| 1359 | if (adev->irq.installed && | 1355 | if (adev->irq.installed && |
| 1360 | amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { | 1356 | amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { |
| 1361 | ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); | 1357 | ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); |
| @@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev) | |||
| 1374 | 1370 | ||
| 1375 | static void kv_dpm_disable(struct amdgpu_device *adev) | 1371 | static void kv_dpm_disable(struct amdgpu_device *adev) |
| 1376 | { | 1372 | { |
| 1373 | struct kv_power_info *pi = kv_get_pi(adev); | ||
| 1374 | |||
| 1377 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, | 1375 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
| 1378 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); | 1376 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); |
| 1379 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, | 1377 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
| @@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev) | |||
| 1387 | /* powerup blocks */ | 1385 | /* powerup blocks */ |
| 1388 | kv_dpm_powergate_acp(adev, false); | 1386 | kv_dpm_powergate_acp(adev, false); |
| 1389 | kv_dpm_powergate_samu(adev, false); | 1387 | kv_dpm_powergate_samu(adev, false); |
| 1390 | kv_dpm_powergate_vce(adev, false); | 1388 | if (pi->caps_vce_pg) /* power on the VCE block */ |
| 1391 | kv_dpm_powergate_uvd(adev, false); | 1389 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); |
| 1390 | if (pi->caps_uvd_pg) /* power on the UVD block */ | ||
| 1391 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); | ||
| 1392 | 1392 | ||
| 1393 | kv_enable_smc_cac(adev, false); | 1393 | kv_enable_smc_cac(adev, false); |
| 1394 | kv_enable_didt(adev, false); | 1394 | kv_enable_didt(adev, false); |
| @@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, | |||
| 1551 | int ret; | 1551 | int ret; |
| 1552 | 1552 | ||
| 1553 | if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { | 1553 | if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { |
| 1554 | kv_dpm_powergate_vce(adev, false); | ||
| 1555 | if (pi->caps_stable_p_state) | 1554 | if (pi->caps_stable_p_state) |
| 1556 | pi->vce_boot_level = table->count - 1; | 1555 | pi->vce_boot_level = table->count - 1; |
| 1557 | else | 1556 | else |
| @@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, | |||
| 1573 | kv_enable_vce_dpm(adev, true); | 1572 | kv_enable_vce_dpm(adev, true); |
| 1574 | } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { | 1573 | } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { |
| 1575 | kv_enable_vce_dpm(adev, false); | 1574 | kv_enable_vce_dpm(adev, false); |
| 1576 | kv_dpm_powergate_vce(adev, true); | ||
| 1577 | } | 1575 | } |
| 1578 | 1576 | ||
| 1579 | return 0; | 1577 | return 0; |
| @@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate) | |||
| 1702 | } | 1700 | } |
| 1703 | } | 1701 | } |
| 1704 | 1702 | ||
| 1705 | static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) | 1703 | static void kv_dpm_powergate_vce(void *handle, bool gate) |
| 1706 | { | 1704 | { |
| 1705 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 1707 | struct kv_power_info *pi = kv_get_pi(adev); | 1706 | struct kv_power_info *pi = kv_get_pi(adev); |
| 1708 | 1707 | int ret; | |
| 1709 | if (pi->vce_power_gated == gate) | ||
| 1710 | return; | ||
| 1711 | 1708 | ||
| 1712 | pi->vce_power_gated = gate; | 1709 | pi->vce_power_gated = gate; |
| 1713 | 1710 | ||
| 1714 | if (!pi->caps_vce_pg) | 1711 | if (gate) { |
| 1715 | return; | 1712 | /* stop the VCE block */ |
| 1716 | 1713 | ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | |
| 1717 | if (gate) | 1714 | AMD_PG_STATE_GATE); |
| 1718 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); | 1715 | kv_enable_vce_dpm(adev, false); |
| 1719 | else | 1716 | if (pi->caps_vce_pg) /* power off the VCE block */ |
| 1720 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); | 1717 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); |
| 1718 | } else { | ||
| 1719 | if (pi->caps_vce_pg) /* power on the VCE block */ | ||
| 1720 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); | ||
| 1721 | kv_enable_vce_dpm(adev, true); | ||
| 1722 | /* re-init the VCE block */ | ||
| 1723 | ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 1724 | AMD_PG_STATE_UNGATE); | ||
| 1725 | } | ||
| 1721 | } | 1726 | } |
| 1722 | 1727 | ||
| 1728 | |||
| 1723 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) | 1729 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) |
| 1724 | { | 1730 | { |
| 1725 | struct kv_power_info *pi = kv_get_pi(adev); | 1731 | struct kv_power_info *pi = kv_get_pi(adev); |
| @@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle) | |||
| 3061 | else | 3067 | else |
| 3062 | adev->pm.dpm_enabled = true; | 3068 | adev->pm.dpm_enabled = true; |
| 3063 | mutex_unlock(&adev->pm.mutex); | 3069 | mutex_unlock(&adev->pm.mutex); |
| 3064 | 3070 | amdgpu_pm_compute_clocks(adev); | |
| 3065 | return ret; | 3071 | return ret; |
| 3066 | } | 3072 | } |
| 3067 | 3073 | ||
| @@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle, | |||
| 3313 | case AMD_IP_BLOCK_TYPE_UVD: | 3319 | case AMD_IP_BLOCK_TYPE_UVD: |
| 3314 | kv_dpm_powergate_uvd(handle, gate); | 3320 | kv_dpm_powergate_uvd(handle, gate); |
| 3315 | break; | 3321 | break; |
| 3322 | case AMD_IP_BLOCK_TYPE_VCE: | ||
| 3323 | kv_dpm_powergate_vce(handle, gate); | ||
| 3324 | break; | ||
| 3316 | default: | 3325 | default: |
| 3317 | break; | 3326 | break; |
| 3318 | } | 3327 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index db327b412562..1de96995e690 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c | |||
| @@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) | |||
| 6887 | 6887 | ||
| 6888 | si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | 6888 | si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
| 6889 | si_thermal_start_thermal_controller(adev); | 6889 | si_thermal_start_thermal_controller(adev); |
| 6890 | ni_update_current_ps(adev, boot_ps); | ||
| 6891 | 6890 | ||
| 6892 | return 0; | 6891 | return 0; |
| 6893 | } | 6892 | } |
| @@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle) | |||
| 7763 | else | 7762 | else |
| 7764 | adev->pm.dpm_enabled = true; | 7763 | adev->pm.dpm_enabled = true; |
| 7765 | mutex_unlock(&adev->pm.mutex); | 7764 | mutex_unlock(&adev->pm.mutex); |
| 7766 | 7765 | amdgpu_pm_compute_clocks(adev); | |
| 7767 | return ret; | 7766 | return ret; |
| 7768 | } | 7767 | } |
| 7769 | 7768 | ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index fbe878ae1e8c..4ba0003a9d32 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | |||
| @@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp, | |||
| 480 | { | 480 | { |
| 481 | struct dc_context *ctx = pp->ctx; | 481 | struct dc_context *ctx = pp->ctx; |
| 482 | struct amdgpu_device *adev = ctx->driver_context; | 482 | struct amdgpu_device *adev = ctx->driver_context; |
| 483 | void *pp_handle = adev->powerplay.pp_handle; | ||
| 483 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | 484 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; |
| 485 | struct pp_display_clock_request clock = {0}; | ||
| 484 | 486 | ||
| 485 | if (!pp_funcs || !pp_funcs->display_configuration_changed) | 487 | if (!pp_funcs || !pp_funcs->display_clock_voltage_request) |
| 486 | return; | 488 | return; |
| 487 | 489 | ||
| 488 | amdgpu_dpm_display_configuration_changed(adev); | 490 | clock.clock_type = amd_pp_dcf_clock; |
| 491 | clock.clock_freq_in_khz = req->hard_min_dcefclk_khz; | ||
| 492 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); | ||
| 493 | |||
| 494 | clock.clock_type = amd_pp_f_clock; | ||
| 495 | clock.clock_freq_in_khz = req->hard_min_fclk_khz; | ||
| 496 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); | ||
| 489 | } | 497 | } |
| 490 | 498 | ||
| 491 | void pp_rv_set_wm_ranges(struct pp_smu *pp, | 499 | void pp_rv_set_wm_ranges(struct pp_smu *pp, |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 567867915d32..37eaf72ace54 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
| @@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) | |||
| 754 | * fail-safe mode | 754 | * fail-safe mode |
| 755 | */ | 755 | */ |
| 756 | if (dc_is_hdmi_signal(link->connector_signal) || | 756 | if (dc_is_hdmi_signal(link->connector_signal) || |
| 757 | dc_is_dvi_signal(link->connector_signal)) | 757 | dc_is_dvi_signal(link->connector_signal)) { |
| 758 | if (prev_sink != NULL) | ||
| 759 | dc_sink_release(prev_sink); | ||
| 760 | |||
| 758 | return false; | 761 | return false; |
| 762 | } | ||
| 759 | default: | 763 | default: |
| 760 | break; | 764 | break; |
| 761 | } | 765 | } |
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 11d834f94220..98358b4b36de 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c | |||
| @@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj, | |||
| 199 | vma->flags |= I915_VMA_GGTT; | 199 | vma->flags |= I915_VMA_GGTT; |
| 200 | list_add(&vma->obj_link, &obj->vma_list); | 200 | list_add(&vma->obj_link, &obj->vma_list); |
| 201 | } else { | 201 | } else { |
| 202 | i915_ppgtt_get(i915_vm_to_ppgtt(vm)); | ||
| 203 | list_add_tail(&vma->obj_link, &obj->vma_list); | 202 | list_add_tail(&vma->obj_link, &obj->vma_list); |
| 204 | } | 203 | } |
| 205 | 204 | ||
| @@ -807,9 +806,6 @@ static void __i915_vma_destroy(struct i915_vma *vma) | |||
| 807 | if (vma->obj) | 806 | if (vma->obj) |
| 808 | rb_erase(&vma->obj_node, &vma->obj->vma_tree); | 807 | rb_erase(&vma->obj_node, &vma->obj->vma_tree); |
| 809 | 808 | ||
| 810 | if (!i915_vma_is_ggtt(vma)) | ||
| 811 | i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); | ||
| 812 | |||
| 813 | rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { | 809 | rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { |
| 814 | GEM_BUG_ON(i915_gem_active_isset(&iter->base)); | 810 | GEM_BUG_ON(i915_gem_active_isset(&iter->base)); |
| 815 | kfree(iter); | 811 | kfree(iter); |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index b725835b47ef..769f3f586661 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
| @@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv) | |||
| 962 | { | 962 | { |
| 963 | int ret; | 963 | int ret; |
| 964 | 964 | ||
| 965 | if (INTEL_INFO(dev_priv)->num_pipes == 0) | ||
| 966 | return; | ||
| 967 | |||
| 968 | ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); | 965 | ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); |
| 969 | if (ret < 0) { | 966 | if (ret < 0) { |
| 970 | DRM_ERROR("failed to add audio component (%d)\n", ret); | 967 | DRM_ERROR("failed to add audio component (%d)\n", ret); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ed3fa1c8a983..4a3c8ee9a973 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -2988,6 +2988,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, | |||
| 2988 | int w = drm_rect_width(&plane_state->base.src) >> 16; | 2988 | int w = drm_rect_width(&plane_state->base.src) >> 16; |
| 2989 | int h = drm_rect_height(&plane_state->base.src) >> 16; | 2989 | int h = drm_rect_height(&plane_state->base.src) >> 16; |
| 2990 | int dst_x = plane_state->base.dst.x1; | 2990 | int dst_x = plane_state->base.dst.x1; |
| 2991 | int dst_w = drm_rect_width(&plane_state->base.dst); | ||
| 2991 | int pipe_src_w = crtc_state->pipe_src_w; | 2992 | int pipe_src_w = crtc_state->pipe_src_w; |
| 2992 | int max_width = skl_max_plane_width(fb, 0, rotation); | 2993 | int max_width = skl_max_plane_width(fb, 0, rotation); |
| 2993 | int max_height = 4096; | 2994 | int max_height = 4096; |
| @@ -3009,10 +3010,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, | |||
| 3009 | * screen may cause FIFO underflow and display corruption. | 3010 | * screen may cause FIFO underflow and display corruption. |
| 3010 | */ | 3011 | */ |
| 3011 | if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && | 3012 | if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && |
| 3012 | (dst_x + w < 4 || dst_x > pipe_src_w - 4)) { | 3013 | (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) { |
| 3013 | DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", | 3014 | DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", |
| 3014 | dst_x + w < 4 ? "end" : "start", | 3015 | dst_x + dst_w < 4 ? "end" : "start", |
| 3015 | dst_x + w < 4 ? dst_x + w : dst_x, | 3016 | dst_x + dst_w < 4 ? dst_x + dst_w : dst_x, |
| 3016 | 4, pipe_src_w - 4); | 3017 | 4, pipe_src_w - 4); |
| 3017 | return -ERANGE; | 3018 | return -ERANGE; |
| 3018 | } | 3019 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a9076402dcb0..192972a7d287 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port, | |||
| 943 | 943 | ||
| 944 | ret = i2c_transfer(adapter, &msg, 1); | 944 | ret = i2c_transfer(adapter, &msg, 1); |
| 945 | if (ret == 1) | 945 | if (ret == 1) |
| 946 | return 0; | 946 | ret = 0; |
| 947 | return ret >= 0 ? -EIO : ret; | 947 | else if (ret >= 0) |
| 948 | ret = -EIO; | ||
| 949 | |||
| 950 | kfree(write_buf); | ||
| 951 | return ret; | ||
| 948 | } | 952 | } |
| 949 | 953 | ||
| 950 | static | 954 | static |
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 5dae16ccd9f1..3e085c5f2b81 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c | |||
| @@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, | |||
| 74 | DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", | 74 | DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", |
| 75 | lspcon_mode_name(mode)); | 75 | lspcon_mode_name(mode)); |
| 76 | 76 | ||
| 77 | wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100); | 77 | wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400); |
| 78 | if (current_mode != mode) | 78 | if (current_mode != mode) |
| 79 | DRM_ERROR("LSPCON mode hasn't settled\n"); | 79 | DRM_ERROR("LSPCON mode hasn't settled\n"); |
| 80 | 80 | ||
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 978782a77629..28d191192945 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c | |||
| @@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w, | |||
| 132 | writel(0x0, comp->regs + DISP_REG_OVL_RST); | 132 | writel(0x0, comp->regs + DISP_REG_OVL_RST); |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp) | ||
| 136 | { | ||
| 137 | return 4; | ||
| 138 | } | ||
| 139 | |||
| 135 | static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) | 140 | static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) |
| 136 | { | 141 | { |
| 137 | unsigned int reg; | 142 | unsigned int reg; |
| @@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx) | |||
| 157 | 162 | ||
| 158 | static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) | 163 | static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) |
| 159 | { | 164 | { |
| 165 | /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" | ||
| 166 | * is defined in mediatek HW data sheet. | ||
| 167 | * The alphabet order in XXX is no relation to data | ||
| 168 | * arrangement in memory. | ||
| 169 | */ | ||
| 160 | switch (fmt) { | 170 | switch (fmt) { |
| 161 | default: | 171 | default: |
| 162 | case DRM_FORMAT_RGB565: | 172 | case DRM_FORMAT_RGB565: |
| @@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = { | |||
| 221 | .stop = mtk_ovl_stop, | 231 | .stop = mtk_ovl_stop, |
| 222 | .enable_vblank = mtk_ovl_enable_vblank, | 232 | .enable_vblank = mtk_ovl_enable_vblank, |
| 223 | .disable_vblank = mtk_ovl_disable_vblank, | 233 | .disable_vblank = mtk_ovl_disable_vblank, |
| 234 | .layer_nr = mtk_ovl_layer_nr, | ||
| 224 | .layer_on = mtk_ovl_layer_on, | 235 | .layer_on = mtk_ovl_layer_on, |
| 225 | .layer_off = mtk_ovl_layer_off, | 236 | .layer_off = mtk_ovl_layer_off, |
| 226 | .layer_config = mtk_ovl_layer_config, | 237 | .layer_config = mtk_ovl_layer_config, |
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 585943c81e1f..b0a5cffe345a 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c | |||
| @@ -31,14 +31,31 @@ | |||
| 31 | #define RDMA_REG_UPDATE_INT BIT(0) | 31 | #define RDMA_REG_UPDATE_INT BIT(0) |
| 32 | #define DISP_REG_RDMA_GLOBAL_CON 0x0010 | 32 | #define DISP_REG_RDMA_GLOBAL_CON 0x0010 |
| 33 | #define RDMA_ENGINE_EN BIT(0) | 33 | #define RDMA_ENGINE_EN BIT(0) |
| 34 | #define RDMA_MODE_MEMORY BIT(1) | ||
| 34 | #define DISP_REG_RDMA_SIZE_CON_0 0x0014 | 35 | #define DISP_REG_RDMA_SIZE_CON_0 0x0014 |
| 36 | #define RDMA_MATRIX_ENABLE BIT(17) | ||
| 37 | #define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20) | ||
| 38 | #define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20) | ||
| 35 | #define DISP_REG_RDMA_SIZE_CON_1 0x0018 | 39 | #define DISP_REG_RDMA_SIZE_CON_1 0x0018 |
| 36 | #define DISP_REG_RDMA_TARGET_LINE 0x001c | 40 | #define DISP_REG_RDMA_TARGET_LINE 0x001c |
| 41 | #define DISP_RDMA_MEM_CON 0x0024 | ||
| 42 | #define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4) | ||
| 43 | #define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4) | ||
| 44 | #define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4) | ||
| 45 | #define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4) | ||
| 46 | #define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4) | ||
| 47 | #define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4) | ||
| 48 | #define MEM_MODE_INPUT_SWAP BIT(8) | ||
| 49 | #define DISP_RDMA_MEM_SRC_PITCH 0x002c | ||
| 50 | #define DISP_RDMA_MEM_GMC_SETTING_0 0x0030 | ||
| 37 | #define DISP_REG_RDMA_FIFO_CON 0x0040 | 51 | #define DISP_REG_RDMA_FIFO_CON 0x0040 |
| 38 | #define RDMA_FIFO_UNDERFLOW_EN BIT(31) | 52 | #define RDMA_FIFO_UNDERFLOW_EN BIT(31) |
| 39 | #define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) | 53 | #define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) |
| 40 | #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) | 54 | #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) |
| 41 | #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) | 55 | #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) |
| 56 | #define DISP_RDMA_MEM_START_ADDR 0x0f00 | ||
| 57 | |||
| 58 | #define RDMA_MEM_GMC 0x40402020 | ||
| 42 | 59 | ||
| 43 | struct mtk_disp_rdma_data { | 60 | struct mtk_disp_rdma_data { |
| 44 | unsigned int fifo_size; | 61 | unsigned int fifo_size; |
| @@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, | |||
| 138 | writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); | 155 | writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); |
| 139 | } | 156 | } |
| 140 | 157 | ||
| 158 | static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, | ||
| 159 | unsigned int fmt) | ||
| 160 | { | ||
| 161 | /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" | ||
| 162 | * is defined in mediatek HW data sheet. | ||
| 163 | * The alphabet order in XXX is no relation to data | ||
| 164 | * arrangement in memory. | ||
| 165 | */ | ||
| 166 | switch (fmt) { | ||
| 167 | default: | ||
| 168 | case DRM_FORMAT_RGB565: | ||
| 169 | return MEM_MODE_INPUT_FORMAT_RGB565; | ||
| 170 | case DRM_FORMAT_BGR565: | ||
| 171 | return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP; | ||
| 172 | case DRM_FORMAT_RGB888: | ||
| 173 | return MEM_MODE_INPUT_FORMAT_RGB888; | ||
| 174 | case DRM_FORMAT_BGR888: | ||
| 175 | return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP; | ||
| 176 | case DRM_FORMAT_RGBX8888: | ||
| 177 | case DRM_FORMAT_RGBA8888: | ||
| 178 | return MEM_MODE_INPUT_FORMAT_ARGB8888; | ||
| 179 | case DRM_FORMAT_BGRX8888: | ||
| 180 | case DRM_FORMAT_BGRA8888: | ||
| 181 | return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP; | ||
| 182 | case DRM_FORMAT_XRGB8888: | ||
| 183 | case DRM_FORMAT_ARGB8888: | ||
| 184 | return MEM_MODE_INPUT_FORMAT_RGBA8888; | ||
| 185 | case DRM_FORMAT_XBGR8888: | ||
| 186 | case DRM_FORMAT_ABGR8888: | ||
| 187 | return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP; | ||
| 188 | case DRM_FORMAT_UYVY: | ||
| 189 | return MEM_MODE_INPUT_FORMAT_UYVY; | ||
| 190 | case DRM_FORMAT_YUYV: | ||
| 191 | return MEM_MODE_INPUT_FORMAT_YUYV; | ||
| 192 | } | ||
| 193 | } | ||
| 194 | |||
| 195 | static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp) | ||
| 196 | { | ||
| 197 | return 1; | ||
| 198 | } | ||
| 199 | |||
| 200 | static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, | ||
| 201 | struct mtk_plane_state *state) | ||
| 202 | { | ||
| 203 | struct mtk_disp_rdma *rdma = comp_to_rdma(comp); | ||
| 204 | struct mtk_plane_pending_state *pending = &state->pending; | ||
| 205 | unsigned int addr = pending->addr; | ||
| 206 | unsigned int pitch = pending->pitch & 0xffff; | ||
| 207 | unsigned int fmt = pending->format; | ||
| 208 | unsigned int con; | ||
| 209 | |||
| 210 | con = rdma_fmt_convert(rdma, fmt); | ||
| 211 | writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON); | ||
| 212 | |||
| 213 | if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) { | ||
| 214 | rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, | ||
| 215 | RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE); | ||
| 216 | rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, | ||
| 217 | RDMA_MATRIX_INT_MTX_SEL, | ||
| 218 | RDMA_MATRIX_INT_MTX_BT601_to_RGB); | ||
| 219 | } else { | ||
| 220 | rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, | ||
| 221 | RDMA_MATRIX_ENABLE, 0); | ||
| 222 | } | ||
| 223 | |||
| 224 | writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR); | ||
| 225 | writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH); | ||
| 226 | writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0); | ||
| 227 | rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, | ||
| 228 | RDMA_MODE_MEMORY, RDMA_MODE_MEMORY); | ||
| 229 | } | ||
| 230 | |||
| 141 | static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { | 231 | static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { |
| 142 | .config = mtk_rdma_config, | 232 | .config = mtk_rdma_config, |
| 143 | .start = mtk_rdma_start, | 233 | .start = mtk_rdma_start, |
| 144 | .stop = mtk_rdma_stop, | 234 | .stop = mtk_rdma_stop, |
| 145 | .enable_vblank = mtk_rdma_enable_vblank, | 235 | .enable_vblank = mtk_rdma_enable_vblank, |
| 146 | .disable_vblank = mtk_rdma_disable_vblank, | 236 | .disable_vblank = mtk_rdma_disable_vblank, |
| 237 | .layer_nr = mtk_rdma_layer_nr, | ||
| 238 | .layer_config = mtk_rdma_layer_config, | ||
| 147 | }; | 239 | }; |
| 148 | 240 | ||
| 149 | static int mtk_disp_rdma_bind(struct device *dev, struct device *master, | 241 | static int mtk_disp_rdma_bind(struct device *dev, struct device *master, |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 2d6aa150a9ff..0b976dfd04df 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c | |||
| @@ -45,7 +45,8 @@ struct mtk_drm_crtc { | |||
| 45 | bool pending_needs_vblank; | 45 | bool pending_needs_vblank; |
| 46 | struct drm_pending_vblank_event *event; | 46 | struct drm_pending_vblank_event *event; |
| 47 | 47 | ||
| 48 | struct drm_plane planes[OVL_LAYER_NR]; | 48 | struct drm_plane *planes; |
| 49 | unsigned int layer_nr; | ||
| 49 | bool pending_planes; | 50 | bool pending_planes; |
| 50 | 51 | ||
| 51 | void __iomem *config_regs; | 52 | void __iomem *config_regs; |
| @@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
| 171 | static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) | 172 | static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) |
| 172 | { | 173 | { |
| 173 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 174 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 174 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 175 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
| 175 | 176 | ||
| 176 | mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base); | 177 | mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base); |
| 177 | 178 | ||
| 178 | return 0; | 179 | return 0; |
| 179 | } | 180 | } |
| @@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) | |||
| 181 | static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) | 182 | static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) |
| 182 | { | 183 | { |
| 183 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 184 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 184 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 185 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
| 185 | 186 | ||
| 186 | mtk_ddp_comp_disable_vblank(ovl); | 187 | mtk_ddp_comp_disable_vblank(comp); |
| 187 | } | 188 | } |
| 188 | 189 | ||
| 189 | static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) | 190 | static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) |
| @@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) | |||
| 286 | } | 287 | } |
| 287 | 288 | ||
| 288 | /* Initially configure all planes */ | 289 | /* Initially configure all planes */ |
| 289 | for (i = 0; i < OVL_LAYER_NR; i++) { | 290 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
| 290 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 291 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
| 291 | struct mtk_plane_state *plane_state; | 292 | struct mtk_plane_state *plane_state; |
| 292 | 293 | ||
| @@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) | |||
| 334 | { | 335 | { |
| 335 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 336 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 336 | struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); | 337 | struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); |
| 337 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 338 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
| 338 | unsigned int i; | 339 | unsigned int i; |
| 339 | 340 | ||
| 340 | /* | 341 | /* |
| @@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) | |||
| 343 | * queue update module registers on vblank. | 344 | * queue update module registers on vblank. |
| 344 | */ | 345 | */ |
| 345 | if (state->pending_config) { | 346 | if (state->pending_config) { |
| 346 | mtk_ddp_comp_config(ovl, state->pending_width, | 347 | mtk_ddp_comp_config(comp, state->pending_width, |
| 347 | state->pending_height, | 348 | state->pending_height, |
| 348 | state->pending_vrefresh, 0); | 349 | state->pending_vrefresh, 0); |
| 349 | 350 | ||
| @@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) | |||
| 351 | } | 352 | } |
| 352 | 353 | ||
| 353 | if (mtk_crtc->pending_planes) { | 354 | if (mtk_crtc->pending_planes) { |
| 354 | for (i = 0; i < OVL_LAYER_NR; i++) { | 355 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
| 355 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 356 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
| 356 | struct mtk_plane_state *plane_state; | 357 | struct mtk_plane_state *plane_state; |
| 357 | 358 | ||
| 358 | plane_state = to_mtk_plane_state(plane->state); | 359 | plane_state = to_mtk_plane_state(plane->state); |
| 359 | 360 | ||
| 360 | if (plane_state->pending.config) { | 361 | if (plane_state->pending.config) { |
| 361 | mtk_ddp_comp_layer_config(ovl, i, plane_state); | 362 | mtk_ddp_comp_layer_config(comp, i, plane_state); |
| 362 | plane_state->pending.config = false; | 363 | plane_state->pending.config = false; |
| 363 | } | 364 | } |
| 364 | } | 365 | } |
| @@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, | |||
| 370 | struct drm_crtc_state *old_state) | 371 | struct drm_crtc_state *old_state) |
| 371 | { | 372 | { |
| 372 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 373 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 373 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 374 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
| 374 | int ret; | 375 | int ret; |
| 375 | 376 | ||
| 376 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); | 377 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); |
| 377 | 378 | ||
| 378 | ret = mtk_smi_larb_get(ovl->larb_dev); | 379 | ret = mtk_smi_larb_get(comp->larb_dev); |
| 379 | if (ret) { | 380 | if (ret) { |
| 380 | DRM_ERROR("Failed to get larb: %d\n", ret); | 381 | DRM_ERROR("Failed to get larb: %d\n", ret); |
| 381 | return; | 382 | return; |
| @@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, | |||
| 383 | 384 | ||
| 384 | ret = mtk_crtc_ddp_hw_init(mtk_crtc); | 385 | ret = mtk_crtc_ddp_hw_init(mtk_crtc); |
| 385 | if (ret) { | 386 | if (ret) { |
| 386 | mtk_smi_larb_put(ovl->larb_dev); | 387 | mtk_smi_larb_put(comp->larb_dev); |
| 387 | return; | 388 | return; |
| 388 | } | 389 | } |
| 389 | 390 | ||
| @@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, | |||
| 395 | struct drm_crtc_state *old_state) | 396 | struct drm_crtc_state *old_state) |
| 396 | { | 397 | { |
| 397 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 398 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 398 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 399 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
| 399 | int i; | 400 | int i; |
| 400 | 401 | ||
| 401 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); | 402 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); |
| @@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, | |||
| 403 | return; | 404 | return; |
| 404 | 405 | ||
| 405 | /* Set all pending plane state to disabled */ | 406 | /* Set all pending plane state to disabled */ |
| 406 | for (i = 0; i < OVL_LAYER_NR; i++) { | 407 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
| 407 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 408 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
| 408 | struct mtk_plane_state *plane_state; | 409 | struct mtk_plane_state *plane_state; |
| 409 | 410 | ||
| @@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, | |||
| 418 | 419 | ||
| 419 | drm_crtc_vblank_off(crtc); | 420 | drm_crtc_vblank_off(crtc); |
| 420 | mtk_crtc_ddp_hw_fini(mtk_crtc); | 421 | mtk_crtc_ddp_hw_fini(mtk_crtc); |
| 421 | mtk_smi_larb_put(ovl->larb_dev); | 422 | mtk_smi_larb_put(comp->larb_dev); |
| 422 | 423 | ||
| 423 | mtk_crtc->enabled = false; | 424 | mtk_crtc->enabled = false; |
| 424 | } | 425 | } |
| @@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 450 | 451 | ||
| 451 | if (mtk_crtc->event) | 452 | if (mtk_crtc->event) |
| 452 | mtk_crtc->pending_needs_vblank = true; | 453 | mtk_crtc->pending_needs_vblank = true; |
| 453 | for (i = 0; i < OVL_LAYER_NR; i++) { | 454 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
| 454 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 455 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
| 455 | struct mtk_plane_state *plane_state; | 456 | struct mtk_plane_state *plane_state; |
| 456 | 457 | ||
| @@ -516,7 +517,7 @@ err_cleanup_crtc: | |||
| 516 | return ret; | 517 | return ret; |
| 517 | } | 518 | } |
| 518 | 519 | ||
| 519 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl) | 520 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp) |
| 520 | { | 521 | { |
| 521 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 522 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 522 | struct mtk_drm_private *priv = crtc->dev->dev_private; | 523 | struct mtk_drm_private *priv = crtc->dev->dev_private; |
| @@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, | |||
| 598 | mtk_crtc->ddp_comp[i] = comp; | 599 | mtk_crtc->ddp_comp[i] = comp; |
| 599 | } | 600 | } |
| 600 | 601 | ||
| 601 | for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) { | 602 | mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); |
| 603 | mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr * | ||
| 604 | sizeof(struct drm_plane), | ||
| 605 | GFP_KERNEL); | ||
| 606 | |||
| 607 | for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) { | ||
| 602 | type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : | 608 | type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : |
| 603 | (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : | 609 | (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : |
| 604 | DRM_PLANE_TYPE_OVERLAY; | 610 | DRM_PLANE_TYPE_OVERLAY; |
| @@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, | |||
| 609 | } | 615 | } |
| 610 | 616 | ||
| 611 | ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], | 617 | ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], |
| 612 | &mtk_crtc->planes[1], pipe); | 618 | mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] : |
| 619 | NULL, pipe); | ||
| 613 | if (ret < 0) | 620 | if (ret < 0) |
| 614 | goto unprepare; | 621 | goto unprepare; |
| 615 | drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); | 622 | drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index 9d9410c67ae9..091adb2087eb 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h | |||
| @@ -18,13 +18,12 @@ | |||
| 18 | #include "mtk_drm_ddp_comp.h" | 18 | #include "mtk_drm_ddp_comp.h" |
| 19 | #include "mtk_drm_plane.h" | 19 | #include "mtk_drm_plane.h" |
| 20 | 20 | ||
| 21 | #define OVL_LAYER_NR 4 | ||
| 22 | #define MTK_LUT_SIZE 512 | 21 | #define MTK_LUT_SIZE 512 |
| 23 | #define MTK_MAX_BPC 10 | 22 | #define MTK_MAX_BPC 10 |
| 24 | #define MTK_MIN_BPC 3 | 23 | #define MTK_MIN_BPC 3 |
| 25 | 24 | ||
| 26 | void mtk_drm_crtc_commit(struct drm_crtc *crtc); | 25 | void mtk_drm_crtc_commit(struct drm_crtc *crtc); |
| 27 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl); | 26 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp); |
| 28 | int mtk_drm_crtc_create(struct drm_device *drm_dev, | 27 | int mtk_drm_crtc_create(struct drm_device *drm_dev, |
| 29 | const enum mtk_ddp_comp_id *path, | 28 | const enum mtk_ddp_comp_id *path, |
| 30 | unsigned int path_len); | 29 | unsigned int path_len); |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 87e4191c250e..546b3e3b300b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c | |||
| @@ -106,6 +106,8 @@ | |||
| 106 | #define OVL1_MOUT_EN_COLOR1 0x1 | 106 | #define OVL1_MOUT_EN_COLOR1 0x1 |
| 107 | #define GAMMA_MOUT_EN_RDMA1 0x1 | 107 | #define GAMMA_MOUT_EN_RDMA1 0x1 |
| 108 | #define RDMA0_SOUT_DPI0 0x2 | 108 | #define RDMA0_SOUT_DPI0 0x2 |
| 109 | #define RDMA0_SOUT_DPI1 0x3 | ||
| 110 | #define RDMA0_SOUT_DSI1 0x1 | ||
| 109 | #define RDMA0_SOUT_DSI2 0x4 | 111 | #define RDMA0_SOUT_DSI2 0x4 |
| 110 | #define RDMA0_SOUT_DSI3 0x5 | 112 | #define RDMA0_SOUT_DSI3 0x5 |
| 111 | #define RDMA1_SOUT_DPI0 0x2 | 113 | #define RDMA1_SOUT_DPI0 0x2 |
| @@ -122,6 +124,8 @@ | |||
| 122 | #define DPI0_SEL_IN_RDMA2 0x3 | 124 | #define DPI0_SEL_IN_RDMA2 0x3 |
| 123 | #define DPI1_SEL_IN_RDMA1 (0x1 << 8) | 125 | #define DPI1_SEL_IN_RDMA1 (0x1 << 8) |
| 124 | #define DPI1_SEL_IN_RDMA2 (0x3 << 8) | 126 | #define DPI1_SEL_IN_RDMA2 (0x3 << 8) |
| 127 | #define DSI0_SEL_IN_RDMA1 0x1 | ||
| 128 | #define DSI0_SEL_IN_RDMA2 0x4 | ||
| 125 | #define DSI1_SEL_IN_RDMA1 0x1 | 129 | #define DSI1_SEL_IN_RDMA1 0x1 |
| 126 | #define DSI1_SEL_IN_RDMA2 0x4 | 130 | #define DSI1_SEL_IN_RDMA2 0x4 |
| 127 | #define DSI2_SEL_IN_RDMA1 (0x1 << 16) | 131 | #define DSI2_SEL_IN_RDMA1 (0x1 << 16) |
| @@ -224,6 +228,12 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, | |||
| 224 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { | 228 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { |
| 225 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | 229 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; |
| 226 | value = RDMA0_SOUT_DPI0; | 230 | value = RDMA0_SOUT_DPI0; |
| 231 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) { | ||
| 232 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | ||
| 233 | value = RDMA0_SOUT_DPI1; | ||
| 234 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) { | ||
| 235 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | ||
| 236 | value = RDMA0_SOUT_DSI1; | ||
| 227 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { | 237 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { |
| 228 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | 238 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; |
| 229 | value = RDMA0_SOUT_DSI2; | 239 | value = RDMA0_SOUT_DSI2; |
| @@ -282,6 +292,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, | |||
| 282 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { | 292 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { |
| 283 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; | 293 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; |
| 284 | value = DPI1_SEL_IN_RDMA1; | 294 | value = DPI1_SEL_IN_RDMA1; |
| 295 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) { | ||
| 296 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; | ||
| 297 | value = DSI0_SEL_IN_RDMA1; | ||
| 285 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { | 298 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { |
| 286 | *addr = DISP_REG_CONFIG_DSIO_SEL_IN; | 299 | *addr = DISP_REG_CONFIG_DSIO_SEL_IN; |
| 287 | value = DSI1_SEL_IN_RDMA1; | 300 | value = DSI1_SEL_IN_RDMA1; |
| @@ -297,8 +310,11 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, | |||
| 297 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { | 310 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { |
| 298 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; | 311 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; |
| 299 | value = DPI1_SEL_IN_RDMA2; | 312 | value = DPI1_SEL_IN_RDMA2; |
| 300 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { | 313 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) { |
| 301 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; | 314 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; |
| 315 | value = DSI0_SEL_IN_RDMA2; | ||
| 316 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { | ||
| 317 | *addr = DISP_REG_CONFIG_DSIO_SEL_IN; | ||
| 302 | value = DSI1_SEL_IN_RDMA2; | 318 | value = DSI1_SEL_IN_RDMA2; |
| 303 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { | 319 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { |
| 304 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; | 320 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 7413ffeb3c9d..8399229e6ad2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h | |||
| @@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs { | |||
| 78 | void (*stop)(struct mtk_ddp_comp *comp); | 78 | void (*stop)(struct mtk_ddp_comp *comp); |
| 79 | void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); | 79 | void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); |
| 80 | void (*disable_vblank)(struct mtk_ddp_comp *comp); | 80 | void (*disable_vblank)(struct mtk_ddp_comp *comp); |
| 81 | unsigned int (*layer_nr)(struct mtk_ddp_comp *comp); | ||
| 81 | void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); | 82 | void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); |
| 82 | void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); | 83 | void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); |
| 83 | void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, | 84 | void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, |
| @@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp) | |||
| 128 | comp->funcs->disable_vblank(comp); | 129 | comp->funcs->disable_vblank(comp); |
| 129 | } | 130 | } |
| 130 | 131 | ||
| 132 | static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) | ||
| 133 | { | ||
| 134 | if (comp->funcs && comp->funcs->layer_nr) | ||
| 135 | return comp->funcs->layer_nr(comp); | ||
| 136 | |||
| 137 | return 0; | ||
| 138 | } | ||
| 139 | |||
| 131 | static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, | 140 | static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, |
| 132 | unsigned int idx) | 141 | unsigned int idx) |
| 133 | { | 142 | { |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 39721119713b..47ec604289b7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c | |||
| @@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev) | |||
| 381 | err_deinit: | 381 | err_deinit: |
| 382 | mtk_drm_kms_deinit(drm); | 382 | mtk_drm_kms_deinit(drm); |
| 383 | err_free: | 383 | err_free: |
| 384 | drm_dev_unref(drm); | 384 | drm_dev_put(drm); |
| 385 | return ret; | 385 | return ret; |
| 386 | } | 386 | } |
| 387 | 387 | ||
| @@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev) | |||
| 390 | struct mtk_drm_private *private = dev_get_drvdata(dev); | 390 | struct mtk_drm_private *private = dev_get_drvdata(dev); |
| 391 | 391 | ||
| 392 | drm_dev_unregister(private->drm); | 392 | drm_dev_unregister(private->drm); |
| 393 | drm_dev_unref(private->drm); | 393 | drm_dev_put(private->drm); |
| 394 | private->drm = NULL; | 394 | private->drm = NULL; |
| 395 | } | 395 | } |
| 396 | 396 | ||
| @@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev) | |||
| 564 | 564 | ||
| 565 | drm_dev_unregister(drm); | 565 | drm_dev_unregister(drm); |
| 566 | mtk_drm_kms_deinit(drm); | 566 | mtk_drm_kms_deinit(drm); |
| 567 | drm_dev_unref(drm); | 567 | drm_dev_put(drm); |
| 568 | 568 | ||
| 569 | component_master_del(&pdev->dev, &mtk_drm_ops); | 569 | component_master_del(&pdev->dev, &mtk_drm_ops); |
| 570 | pm_runtime_disable(&pdev->dev); | 570 | pm_runtime_disable(&pdev->dev); |
| @@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev) | |||
| 580 | { | 580 | { |
| 581 | struct mtk_drm_private *private = dev_get_drvdata(dev); | 581 | struct mtk_drm_private *private = dev_get_drvdata(dev); |
| 582 | struct drm_device *drm = private->drm; | 582 | struct drm_device *drm = private->drm; |
| 583 | int ret; | ||
| 583 | 584 | ||
| 584 | drm_kms_helper_poll_disable(drm); | 585 | ret = drm_mode_config_helper_suspend(drm); |
| 585 | |||
| 586 | private->suspend_state = drm_atomic_helper_suspend(drm); | ||
| 587 | if (IS_ERR(private->suspend_state)) { | ||
| 588 | drm_kms_helper_poll_enable(drm); | ||
| 589 | return PTR_ERR(private->suspend_state); | ||
| 590 | } | ||
| 591 | |||
| 592 | DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); | 586 | DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); |
| 593 | return 0; | 587 | |
| 588 | return ret; | ||
| 594 | } | 589 | } |
| 595 | 590 | ||
| 596 | static int mtk_drm_sys_resume(struct device *dev) | 591 | static int mtk_drm_sys_resume(struct device *dev) |
| 597 | { | 592 | { |
| 598 | struct mtk_drm_private *private = dev_get_drvdata(dev); | 593 | struct mtk_drm_private *private = dev_get_drvdata(dev); |
| 599 | struct drm_device *drm = private->drm; | 594 | struct drm_device *drm = private->drm; |
| 595 | int ret; | ||
| 600 | 596 | ||
| 601 | drm_atomic_helper_resume(drm, private->suspend_state); | 597 | ret = drm_mode_config_helper_resume(drm); |
| 602 | drm_kms_helper_poll_enable(drm); | ||
| 603 | |||
| 604 | DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); | 598 | DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); |
| 605 | return 0; | 599 | |
| 600 | return ret; | ||
| 606 | } | 601 | } |
| 607 | #endif | 602 | #endif |
| 608 | 603 | ||
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 90837f7c7d0f..f4c7516eb989 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c | |||
| @@ -302,14 +302,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn) | |||
| 302 | return clamp_val(reg, 0, 1023) & (0xff << 2); | 302 | return clamp_val(reg, 0, 1023) & (0xff << 2); |
| 303 | } | 303 | } |
| 304 | 304 | ||
| 305 | static u16 adt7475_read_word(struct i2c_client *client, int reg) | 305 | static int adt7475_read_word(struct i2c_client *client, int reg) |
| 306 | { | 306 | { |
| 307 | u16 val; | 307 | int val1, val2; |
| 308 | 308 | ||
| 309 | val = i2c_smbus_read_byte_data(client, reg); | 309 | val1 = i2c_smbus_read_byte_data(client, reg); |
| 310 | val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8); | 310 | if (val1 < 0) |
| 311 | return val1; | ||
| 312 | val2 = i2c_smbus_read_byte_data(client, reg + 1); | ||
| 313 | if (val2 < 0) | ||
| 314 | return val2; | ||
| 311 | 315 | ||
| 312 | return val; | 316 | return val1 | (val2 << 8); |
| 313 | } | 317 | } |
| 314 | 318 | ||
| 315 | static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) | 319 | static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) |
| @@ -962,13 +966,14 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr, | |||
| 962 | { | 966 | { |
| 963 | struct adt7475_data *data = adt7475_update_device(dev); | 967 | struct adt7475_data *data = adt7475_update_device(dev); |
| 964 | struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); | 968 | struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); |
| 965 | int i = clamp_val(data->range[sattr->index] & 0xf, 0, | 969 | int idx; |
| 966 | ARRAY_SIZE(pwmfreq_table) - 1); | ||
| 967 | 970 | ||
| 968 | if (IS_ERR(data)) | 971 | if (IS_ERR(data)) |
| 969 | return PTR_ERR(data); | 972 | return PTR_ERR(data); |
| 973 | idx = clamp_val(data->range[sattr->index] & 0xf, 0, | ||
| 974 | ARRAY_SIZE(pwmfreq_table) - 1); | ||
| 970 | 975 | ||
| 971 | return sprintf(buf, "%d\n", pwmfreq_table[i]); | 976 | return sprintf(buf, "%d\n", pwmfreq_table[idx]); |
| 972 | } | 977 | } |
| 973 | 978 | ||
| 974 | static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr, | 979 | static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr, |
| @@ -1004,6 +1009,10 @@ static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev, | |||
| 1004 | char *buf) | 1009 | char *buf) |
| 1005 | { | 1010 | { |
| 1006 | struct adt7475_data *data = adt7475_update_device(dev); | 1011 | struct adt7475_data *data = adt7475_update_device(dev); |
| 1012 | |||
| 1013 | if (IS_ERR(data)) | ||
| 1014 | return PTR_ERR(data); | ||
| 1015 | |||
| 1007 | return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY)); | 1016 | return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY)); |
| 1008 | } | 1017 | } |
| 1009 | 1018 | ||
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index e9e6aeabbf84..71d3445ba869 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | * Bi-directional Current/Power Monitor with I2C Interface | 17 | * Bi-directional Current/Power Monitor with I2C Interface |
| 18 | * Datasheet: http://www.ti.com/product/ina230 | 18 | * Datasheet: http://www.ti.com/product/ina230 |
| 19 | * | 19 | * |
| 20 | * Copyright (C) 2012 Lothar Felten <l-felten@ti.com> | 20 | * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com> |
| 21 | * Thanks to Jan Volkering | 21 | * Thanks to Jan Volkering |
| 22 | * | 22 | * |
| 23 | * This program is free software; you can redistribute it and/or modify | 23 | * This program is free software; you can redistribute it and/or modify |
| @@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val) | |||
| 329 | return 0; | 329 | return 0; |
| 330 | } | 330 | } |
| 331 | 331 | ||
| 332 | static ssize_t ina2xx_show_shunt(struct device *dev, | ||
| 333 | struct device_attribute *da, | ||
| 334 | char *buf) | ||
| 335 | { | ||
| 336 | struct ina2xx_data *data = dev_get_drvdata(dev); | ||
| 337 | |||
| 338 | return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt); | ||
| 339 | } | ||
| 340 | |||
| 332 | static ssize_t ina2xx_store_shunt(struct device *dev, | 341 | static ssize_t ina2xx_store_shunt(struct device *dev, |
| 333 | struct device_attribute *da, | 342 | struct device_attribute *da, |
| 334 | const char *buf, size_t count) | 343 | const char *buf, size_t count) |
| @@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, | |||
| 403 | 412 | ||
| 404 | /* shunt resistance */ | 413 | /* shunt resistance */ |
| 405 | static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, | 414 | static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, |
| 406 | ina2xx_show_value, ina2xx_store_shunt, | 415 | ina2xx_show_shunt, ina2xx_store_shunt, |
| 407 | INA2XX_CALIBRATION); | 416 | INA2XX_CALIBRATION); |
| 408 | 417 | ||
| 409 | /* update interval (ina226 only) */ | 418 | /* update interval (ina226 only) */ |
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index c6bd61e4695a..944f5b63aecd 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
| @@ -63,6 +63,7 @@ | |||
| 63 | #include <linux/bitops.h> | 63 | #include <linux/bitops.h> |
| 64 | #include <linux/dmi.h> | 64 | #include <linux/dmi.h> |
| 65 | #include <linux/io.h> | 65 | #include <linux/io.h> |
| 66 | #include <linux/nospec.h> | ||
| 66 | #include "lm75.h" | 67 | #include "lm75.h" |
| 67 | 68 | ||
| 68 | #define USE_ALTERNATE | 69 | #define USE_ALTERNATE |
| @@ -2689,6 +2690,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr, | |||
| 2689 | return err; | 2690 | return err; |
| 2690 | if (val > NUM_TEMP) | 2691 | if (val > NUM_TEMP) |
| 2691 | return -EINVAL; | 2692 | return -EINVAL; |
| 2693 | val = array_index_nospec(val, NUM_TEMP + 1); | ||
| 2692 | if (val && (!(data->have_temp & BIT(val - 1)) || | 2694 | if (val && (!(data->have_temp & BIT(val - 1)) || |
| 2693 | !data->temp_src[val - 1])) | 2695 | !data->temp_src[val - 1])) |
| 2694 | return -EINVAL; | 2696 | return -EINVAL; |
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 6ec65adaba49..c33dcfb87993 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c | |||
| @@ -110,8 +110,8 @@ static int sclhi(struct i2c_algo_bit_data *adap) | |||
| 110 | } | 110 | } |
| 111 | #ifdef DEBUG | 111 | #ifdef DEBUG |
| 112 | if (jiffies != start && i2c_debug >= 3) | 112 | if (jiffies != start && i2c_debug >= 3) |
| 113 | pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go " | 113 | pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go high\n", |
| 114 | "high\n", jiffies - start); | 114 | jiffies - start); |
| 115 | #endif | 115 | #endif |
| 116 | 116 | ||
| 117 | done: | 117 | done: |
| @@ -171,8 +171,9 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) | |||
| 171 | setsda(adap, sb); | 171 | setsda(adap, sb); |
| 172 | udelay((adap->udelay + 1) / 2); | 172 | udelay((adap->udelay + 1) / 2); |
| 173 | if (sclhi(adap) < 0) { /* timed out */ | 173 | if (sclhi(adap) < 0) { /* timed out */ |
| 174 | bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " | 174 | bit_dbg(1, &i2c_adap->dev, |
| 175 | "timeout at bit #%d\n", (int)c, i); | 175 | "i2c_outb: 0x%02x, timeout at bit #%d\n", |
| 176 | (int)c, i); | ||
| 176 | return -ETIMEDOUT; | 177 | return -ETIMEDOUT; |
| 177 | } | 178 | } |
| 178 | /* FIXME do arbitration here: | 179 | /* FIXME do arbitration here: |
| @@ -185,8 +186,8 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) | |||
| 185 | } | 186 | } |
| 186 | sdahi(adap); | 187 | sdahi(adap); |
| 187 | if (sclhi(adap) < 0) { /* timeout */ | 188 | if (sclhi(adap) < 0) { /* timeout */ |
| 188 | bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " | 189 | bit_dbg(1, &i2c_adap->dev, |
| 189 | "timeout at ack\n", (int)c); | 190 | "i2c_outb: 0x%02x, timeout at ack\n", (int)c); |
| 190 | return -ETIMEDOUT; | 191 | return -ETIMEDOUT; |
| 191 | } | 192 | } |
| 192 | 193 | ||
| @@ -215,8 +216,9 @@ static int i2c_inb(struct i2c_adapter *i2c_adap) | |||
| 215 | sdahi(adap); | 216 | sdahi(adap); |
| 216 | for (i = 0; i < 8; i++) { | 217 | for (i = 0; i < 8; i++) { |
| 217 | if (sclhi(adap) < 0) { /* timeout */ | 218 | if (sclhi(adap) < 0) { /* timeout */ |
| 218 | bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit " | 219 | bit_dbg(1, &i2c_adap->dev, |
| 219 | "#%d\n", 7 - i); | 220 | "i2c_inb: timeout at bit #%d\n", |
| 221 | 7 - i); | ||
| 220 | return -ETIMEDOUT; | 222 | return -ETIMEDOUT; |
| 221 | } | 223 | } |
| 222 | indata *= 2; | 224 | indata *= 2; |
| @@ -265,8 +267,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
| 265 | goto bailout; | 267 | goto bailout; |
| 266 | } | 268 | } |
| 267 | if (!scl) { | 269 | if (!scl) { |
| 268 | printk(KERN_WARNING "%s: SCL unexpected low " | 270 | printk(KERN_WARNING |
| 269 | "while pulling SDA low!\n", name); | 271 | "%s: SCL unexpected low while pulling SDA low!\n", |
| 272 | name); | ||
| 270 | goto bailout; | 273 | goto bailout; |
| 271 | } | 274 | } |
| 272 | 275 | ||
| @@ -278,8 +281,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
| 278 | goto bailout; | 281 | goto bailout; |
| 279 | } | 282 | } |
| 280 | if (!scl) { | 283 | if (!scl) { |
| 281 | printk(KERN_WARNING "%s: SCL unexpected low " | 284 | printk(KERN_WARNING |
| 282 | "while pulling SDA high!\n", name); | 285 | "%s: SCL unexpected low while pulling SDA high!\n", |
| 286 | name); | ||
| 283 | goto bailout; | 287 | goto bailout; |
| 284 | } | 288 | } |
| 285 | 289 | ||
| @@ -291,8 +295,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
| 291 | goto bailout; | 295 | goto bailout; |
| 292 | } | 296 | } |
| 293 | if (!sda) { | 297 | if (!sda) { |
| 294 | printk(KERN_WARNING "%s: SDA unexpected low " | 298 | printk(KERN_WARNING |
| 295 | "while pulling SCL low!\n", name); | 299 | "%s: SDA unexpected low while pulling SCL low!\n", |
| 300 | name); | ||
| 296 | goto bailout; | 301 | goto bailout; |
| 297 | } | 302 | } |
| 298 | 303 | ||
| @@ -304,8 +309,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
| 304 | goto bailout; | 309 | goto bailout; |
| 305 | } | 310 | } |
| 306 | if (!sda) { | 311 | if (!sda) { |
| 307 | printk(KERN_WARNING "%s: SDA unexpected low " | 312 | printk(KERN_WARNING |
| 308 | "while pulling SCL high!\n", name); | 313 | "%s: SDA unexpected low while pulling SCL high!\n", |
| 314 | name); | ||
| 309 | goto bailout; | 315 | goto bailout; |
| 310 | } | 316 | } |
| 311 | 317 | ||
| @@ -352,8 +358,8 @@ static int try_address(struct i2c_adapter *i2c_adap, | |||
| 352 | i2c_start(adap); | 358 | i2c_start(adap); |
| 353 | } | 359 | } |
| 354 | if (i && ret) | 360 | if (i && ret) |
| 355 | bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at " | 361 | bit_dbg(1, &i2c_adap->dev, |
| 356 | "0x%02x: %s\n", i + 1, | 362 | "Used %d tries to %s client at 0x%02x: %s\n", i + 1, |
| 357 | addr & 1 ? "read from" : "write to", addr >> 1, | 363 | addr & 1 ? "read from" : "write to", addr >> 1, |
| 358 | ret == 1 ? "success" : "failed, timeout?"); | 364 | ret == 1 ? "success" : "failed, timeout?"); |
| 359 | return ret; | 365 | return ret; |
| @@ -442,8 +448,9 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
| 442 | if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { | 448 | if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { |
| 443 | if (!(flags & I2C_M_NO_RD_ACK)) | 449 | if (!(flags & I2C_M_NO_RD_ACK)) |
| 444 | acknak(i2c_adap, 0); | 450 | acknak(i2c_adap, 0); |
| 445 | dev_err(&i2c_adap->dev, "readbytes: invalid " | 451 | dev_err(&i2c_adap->dev, |
| 446 | "block length (%d)\n", inval); | 452 | "readbytes: invalid block length (%d)\n", |
| 453 | inval); | ||
| 447 | return -EPROTO; | 454 | return -EPROTO; |
| 448 | } | 455 | } |
| 449 | /* The original count value accounts for the extra | 456 | /* The original count value accounts for the extra |
| @@ -506,8 +513,8 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
| 506 | return -ENXIO; | 513 | return -ENXIO; |
| 507 | } | 514 | } |
| 508 | if (flags & I2C_M_RD) { | 515 | if (flags & I2C_M_RD) { |
| 509 | bit_dbg(3, &i2c_adap->dev, "emitting repeated " | 516 | bit_dbg(3, &i2c_adap->dev, |
| 510 | "start condition\n"); | 517 | "emitting repeated start condition\n"); |
| 511 | i2c_repstart(adap); | 518 | i2c_repstart(adap); |
| 512 | /* okay, now switch into reading mode */ | 519 | /* okay, now switch into reading mode */ |
| 513 | addr |= 0x01; | 520 | addr |= 0x01; |
| @@ -564,8 +571,8 @@ static int bit_xfer(struct i2c_adapter *i2c_adap, | |||
| 564 | } | 571 | } |
| 565 | ret = bit_doAddress(i2c_adap, pmsg); | 572 | ret = bit_doAddress(i2c_adap, pmsg); |
| 566 | if ((ret != 0) && !nak_ok) { | 573 | if ((ret != 0) && !nak_ok) { |
| 567 | bit_dbg(1, &i2c_adap->dev, "NAK from " | 574 | bit_dbg(1, &i2c_adap->dev, |
| 568 | "device addr 0x%02x msg #%d\n", | 575 | "NAK from device addr 0x%02x msg #%d\n", |
| 569 | msgs[i].addr, i); | 576 | msgs[i].addr, i); |
| 570 | goto bailout; | 577 | goto bailout; |
| 571 | } | 578 | } |
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index e18442b9973a..94d94b4a9a0d 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c | |||
| @@ -708,7 +708,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev) | |||
| 708 | i2c_set_adapdata(adap, dev); | 708 | i2c_set_adapdata(adap, dev); |
| 709 | 709 | ||
| 710 | if (dev->pm_disabled) { | 710 | if (dev->pm_disabled) { |
| 711 | dev_pm_syscore_device(dev->dev, true); | ||
| 712 | irq_flags = IRQF_NO_SUSPEND; | 711 | irq_flags = IRQF_NO_SUSPEND; |
| 713 | } else { | 712 | } else { |
| 714 | irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; | 713 | irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; |
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 1a8d2da5b000..b5750fd85125 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
| @@ -434,6 +434,9 @@ static int dw_i2c_plat_suspend(struct device *dev) | |||
| 434 | { | 434 | { |
| 435 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); | 435 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); |
| 436 | 436 | ||
| 437 | if (i_dev->pm_disabled) | ||
| 438 | return 0; | ||
| 439 | |||
| 437 | i_dev->disable(i_dev); | 440 | i_dev->disable(i_dev); |
| 438 | i2c_dw_prepare_clk(i_dev, false); | 441 | i2c_dw_prepare_clk(i_dev, false); |
| 439 | 442 | ||
| @@ -444,7 +447,9 @@ static int dw_i2c_plat_resume(struct device *dev) | |||
| 444 | { | 447 | { |
| 445 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); | 448 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); |
| 446 | 449 | ||
| 447 | i2c_dw_prepare_clk(i_dev, true); | 450 | if (!i_dev->pm_disabled) |
| 451 | i2c_dw_prepare_clk(i_dev, true); | ||
| 452 | |||
| 448 | i_dev->init(i_dev); | 453 | i_dev->init(i_dev); |
| 449 | 454 | ||
| 450 | return 0; | 455 | return 0; |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 941c223f6491..04b60a349d7e 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
| @@ -1415,6 +1415,13 @@ static void i801_add_tco(struct i801_priv *priv) | |||
| 1415 | } | 1415 | } |
| 1416 | 1416 | ||
| 1417 | #ifdef CONFIG_ACPI | 1417 | #ifdef CONFIG_ACPI |
| 1418 | static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv, | ||
| 1419 | acpi_physical_address address) | ||
| 1420 | { | ||
| 1421 | return address >= priv->smba && | ||
| 1422 | address <= pci_resource_end(priv->pci_dev, SMBBAR); | ||
| 1423 | } | ||
| 1424 | |||
| 1418 | static acpi_status | 1425 | static acpi_status |
| 1419 | i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, | 1426 | i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, |
| 1420 | u64 *value, void *handler_context, void *region_context) | 1427 | u64 *value, void *handler_context, void *region_context) |
| @@ -1430,7 +1437,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, | |||
| 1430 | */ | 1437 | */ |
| 1431 | mutex_lock(&priv->acpi_lock); | 1438 | mutex_lock(&priv->acpi_lock); |
| 1432 | 1439 | ||
| 1433 | if (!priv->acpi_reserved) { | 1440 | if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) { |
| 1434 | priv->acpi_reserved = true; | 1441 | priv->acpi_reserved = true; |
| 1435 | 1442 | ||
| 1436 | dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); | 1443 | dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); |
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 439e8778f849..818cab14e87c 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c | |||
| @@ -507,8 +507,6 @@ static void sh_mobile_i2c_dma_callback(void *data) | |||
| 507 | pd->pos = pd->msg->len; | 507 | pd->pos = pd->msg->len; |
| 508 | pd->stop_after_dma = true; | 508 | pd->stop_after_dma = true; |
| 509 | 509 | ||
| 510 | i2c_release_dma_safe_msg_buf(pd->msg, pd->dma_buf); | ||
| 511 | |||
| 512 | iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); | 510 | iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); |
| 513 | } | 511 | } |
| 514 | 512 | ||
| @@ -602,8 +600,8 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd) | |||
| 602 | dma_async_issue_pending(chan); | 600 | dma_async_issue_pending(chan); |
| 603 | } | 601 | } |
| 604 | 602 | ||
| 605 | static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, | 603 | static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, |
| 606 | bool do_init) | 604 | bool do_init) |
| 607 | { | 605 | { |
| 608 | if (do_init) { | 606 | if (do_init) { |
| 609 | /* Initialize channel registers */ | 607 | /* Initialize channel registers */ |
| @@ -627,7 +625,6 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, | |||
| 627 | 625 | ||
| 628 | /* Enable all interrupts to begin with */ | 626 | /* Enable all interrupts to begin with */ |
| 629 | iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); | 627 | iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); |
| 630 | return 0; | ||
| 631 | } | 628 | } |
| 632 | 629 | ||
| 633 | static int poll_dte(struct sh_mobile_i2c_data *pd) | 630 | static int poll_dte(struct sh_mobile_i2c_data *pd) |
| @@ -698,9 +695,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |||
| 698 | pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; | 695 | pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; |
| 699 | pd->stop_after_dma = false; | 696 | pd->stop_after_dma = false; |
| 700 | 697 | ||
| 701 | err = start_ch(pd, msg, do_start); | 698 | start_ch(pd, msg, do_start); |
| 702 | if (err) | ||
| 703 | break; | ||
| 704 | 699 | ||
| 705 | if (do_start) | 700 | if (do_start) |
| 706 | i2c_op(pd, OP_START, 0); | 701 | i2c_op(pd, OP_START, 0); |
| @@ -709,6 +704,10 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |||
| 709 | timeout = wait_event_timeout(pd->wait, | 704 | timeout = wait_event_timeout(pd->wait, |
| 710 | pd->sr & (ICSR_TACK | SW_DONE), | 705 | pd->sr & (ICSR_TACK | SW_DONE), |
| 711 | adapter->timeout); | 706 | adapter->timeout); |
| 707 | |||
| 708 | /* 'stop_after_dma' tells if DMA transfer was complete */ | ||
| 709 | i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma); | ||
| 710 | |||
| 712 | if (!timeout) { | 711 | if (!timeout) { |
| 713 | dev_err(pd->dev, "Transfer request timed out\n"); | 712 | dev_err(pd->dev, "Transfer request timed out\n"); |
| 714 | if (pd->dma_direction != DMA_NONE) | 713 | if (pd->dma_direction != DMA_NONE) |
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index f15737763608..9ee9a15e7134 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c | |||
| @@ -2293,21 +2293,22 @@ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold) | |||
| 2293 | EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); | 2293 | EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); |
| 2294 | 2294 | ||
| 2295 | /** | 2295 | /** |
| 2296 | * i2c_release_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg | 2296 | * i2c_put_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg |
| 2297 | * @msg: the message to be synced with | ||
| 2298 | * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. | 2297 | * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. |
| 2298 | * @msg: the message which the buffer corresponds to | ||
| 2299 | * @xferred: bool saying if the message was transferred | ||
| 2299 | */ | 2300 | */ |
| 2300 | void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf) | 2301 | void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred) |
| 2301 | { | 2302 | { |
| 2302 | if (!buf || buf == msg->buf) | 2303 | if (!buf || buf == msg->buf) |
| 2303 | return; | 2304 | return; |
| 2304 | 2305 | ||
| 2305 | if (msg->flags & I2C_M_RD) | 2306 | if (xferred && msg->flags & I2C_M_RD) |
| 2306 | memcpy(msg->buf, buf, msg->len); | 2307 | memcpy(msg->buf, buf, msg->len); |
| 2307 | 2308 | ||
| 2308 | kfree(buf); | 2309 | kfree(buf); |
| 2309 | } | 2310 | } |
| 2310 | EXPORT_SYMBOL_GPL(i2c_release_dma_safe_msg_buf); | 2311 | EXPORT_SYMBOL_GPL(i2c_put_dma_safe_msg_buf); |
| 2311 | 2312 | ||
| 2312 | MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); | 2313 | MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); |
| 2313 | MODULE_DESCRIPTION("I2C-Bus main module"); | 2314 | MODULE_DESCRIPTION("I2C-Bus main module"); |
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 648eb6743ed5..6edffeed9953 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c | |||
| @@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, | |||
| 238 | mmc_exit_request(mq->queue, req); | 238 | mmc_exit_request(mq->queue, req); |
| 239 | } | 239 | } |
| 240 | 240 | ||
| 241 | /* | ||
| 242 | * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests | ||
| 243 | * will not be dispatched in parallel. | ||
| 244 | */ | ||
| 245 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | 241 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
| 246 | const struct blk_mq_queue_data *bd) | 242 | const struct blk_mq_queue_data *bd) |
| 247 | { | 243 | { |
| @@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 264 | 260 | ||
| 265 | spin_lock_irq(q->queue_lock); | 261 | spin_lock_irq(q->queue_lock); |
| 266 | 262 | ||
| 267 | if (mq->recovery_needed) { | 263 | if (mq->recovery_needed || mq->busy) { |
| 268 | spin_unlock_irq(q->queue_lock); | 264 | spin_unlock_irq(q->queue_lock); |
| 269 | return BLK_STS_RESOURCE; | 265 | return BLK_STS_RESOURCE; |
| 270 | } | 266 | } |
| @@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 291 | break; | 287 | break; |
| 292 | } | 288 | } |
| 293 | 289 | ||
| 290 | /* Parallel dispatch of requests is not supported at the moment */ | ||
| 291 | mq->busy = true; | ||
| 292 | |||
| 294 | mq->in_flight[issue_type] += 1; | 293 | mq->in_flight[issue_type] += 1; |
| 295 | get_card = (mmc_tot_in_flight(mq) == 1); | 294 | get_card = (mmc_tot_in_flight(mq) == 1); |
| 296 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); | 295 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); |
| @@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 333 | mq->in_flight[issue_type] -= 1; | 332 | mq->in_flight[issue_type] -= 1; |
| 334 | if (mmc_tot_in_flight(mq) == 0) | 333 | if (mmc_tot_in_flight(mq) == 0) |
| 335 | put_card = true; | 334 | put_card = true; |
| 335 | mq->busy = false; | ||
| 336 | spin_unlock_irq(q->queue_lock); | 336 | spin_unlock_irq(q->queue_lock); |
| 337 | if (put_card) | 337 | if (put_card) |
| 338 | mmc_put_card(card, &mq->ctx); | 338 | mmc_put_card(card, &mq->ctx); |
| 339 | } else { | ||
| 340 | WRITE_ONCE(mq->busy, false); | ||
| 339 | } | 341 | } |
| 340 | 342 | ||
| 341 | return ret; | 343 | return ret; |
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 17e59d50b496..9bf3c9245075 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h | |||
| @@ -81,6 +81,7 @@ struct mmc_queue { | |||
| 81 | unsigned int cqe_busy; | 81 | unsigned int cqe_busy; |
| 82 | #define MMC_CQE_DCMD_BUSY BIT(0) | 82 | #define MMC_CQE_DCMD_BUSY BIT(0) |
| 83 | #define MMC_CQE_QUEUE_FULL BIT(1) | 83 | #define MMC_CQE_QUEUE_FULL BIT(1) |
| 84 | bool busy; | ||
| 84 | bool use_cqe; | 85 | bool use_cqe; |
| 85 | bool recovery_needed; | 86 | bool recovery_needed; |
| 86 | bool in_recovery; | 87 | bool in_recovery; |
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c index 294de177632c..61e4e2a213c9 100644 --- a/drivers/mmc/host/android-goldfish.c +++ b/drivers/mmc/host/android-goldfish.c | |||
| @@ -217,7 +217,7 @@ static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host, | |||
| 217 | * We don't really have DMA, so we need | 217 | * We don't really have DMA, so we need |
| 218 | * to copy from our platform driver buffer | 218 | * to copy from our platform driver buffer |
| 219 | */ | 219 | */ |
| 220 | sg_copy_to_buffer(data->sg, 1, host->virt_base, | 220 | sg_copy_from_buffer(data->sg, 1, host->virt_base, |
| 221 | data->sg->length); | 221 | data->sg->length); |
| 222 | } | 222 | } |
| 223 | host->data->bytes_xfered += data->sg->length; | 223 | host->data->bytes_xfered += data->sg->length; |
| @@ -393,7 +393,7 @@ static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host, | |||
| 393 | * We don't really have DMA, so we need to copy to our | 393 | * We don't really have DMA, so we need to copy to our |
| 394 | * platform driver buffer | 394 | * platform driver buffer |
| 395 | */ | 395 | */ |
| 396 | sg_copy_from_buffer(data->sg, 1, host->virt_base, | 396 | sg_copy_to_buffer(data->sg, 1, host->virt_base, |
| 397 | data->sg->length); | 397 | data->sg->length); |
| 398 | } | 398 | } |
| 399 | } | 399 | } |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 5aa2c9404e92..be53044086c7 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
| @@ -1976,7 +1976,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
| 1976 | do { | 1976 | do { |
| 1977 | value = atmci_readl(host, ATMCI_RDR); | 1977 | value = atmci_readl(host, ATMCI_RDR); |
| 1978 | if (likely(offset + 4 <= sg->length)) { | 1978 | if (likely(offset + 4 <= sg->length)) { |
| 1979 | sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); | 1979 | sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); |
| 1980 | 1980 | ||
| 1981 | offset += 4; | 1981 | offset += 4; |
| 1982 | nbytes += 4; | 1982 | nbytes += 4; |
| @@ -1993,7 +1993,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
| 1993 | } else { | 1993 | } else { |
| 1994 | unsigned int remaining = sg->length - offset; | 1994 | unsigned int remaining = sg->length - offset; |
| 1995 | 1995 | ||
| 1996 | sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); | 1996 | sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); |
| 1997 | nbytes += remaining; | 1997 | nbytes += remaining; |
| 1998 | 1998 | ||
| 1999 | flush_dcache_page(sg_page(sg)); | 1999 | flush_dcache_page(sg_page(sg)); |
| @@ -2003,7 +2003,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
| 2003 | goto done; | 2003 | goto done; |
| 2004 | 2004 | ||
| 2005 | offset = 4 - remaining; | 2005 | offset = 4 - remaining; |
| 2006 | sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, | 2006 | sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, |
| 2007 | offset, 0); | 2007 | offset, 0); |
| 2008 | nbytes += offset; | 2008 | nbytes += offset; |
| 2009 | } | 2009 | } |
| @@ -2042,7 +2042,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
| 2042 | 2042 | ||
| 2043 | do { | 2043 | do { |
| 2044 | if (likely(offset + 4 <= sg->length)) { | 2044 | if (likely(offset + 4 <= sg->length)) { |
| 2045 | sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); | 2045 | sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); |
| 2046 | atmci_writel(host, ATMCI_TDR, value); | 2046 | atmci_writel(host, ATMCI_TDR, value); |
| 2047 | 2047 | ||
| 2048 | offset += 4; | 2048 | offset += 4; |
| @@ -2059,7 +2059,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
| 2059 | unsigned int remaining = sg->length - offset; | 2059 | unsigned int remaining = sg->length - offset; |
| 2060 | 2060 | ||
| 2061 | value = 0; | 2061 | value = 0; |
| 2062 | sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); | 2062 | sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); |
| 2063 | nbytes += remaining; | 2063 | nbytes += remaining; |
| 2064 | 2064 | ||
| 2065 | host->sg = sg = sg_next(sg); | 2065 | host->sg = sg = sg_next(sg); |
| @@ -2070,7 +2070,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
| 2070 | } | 2070 | } |
| 2071 | 2071 | ||
| 2072 | offset = 4 - remaining; | 2072 | offset = 4 - remaining; |
| 2073 | sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, | 2073 | sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, |
| 2074 | offset, 0); | 2074 | offset, 0); |
| 2075 | atmci_writel(host, ATMCI_TDR, value); | 2075 | atmci_writel(host, ATMCI_TDR, value); |
| 2076 | nbytes += offset; | 2076 | nbytes += offset; |
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 35cc0de6be67..ca0b43973769 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c | |||
| @@ -45,14 +45,16 @@ | |||
| 45 | /* DM_CM_RST */ | 45 | /* DM_CM_RST */ |
| 46 | #define RST_DTRANRST1 BIT(9) | 46 | #define RST_DTRANRST1 BIT(9) |
| 47 | #define RST_DTRANRST0 BIT(8) | 47 | #define RST_DTRANRST0 BIT(8) |
| 48 | #define RST_RESERVED_BITS GENMASK_ULL(32, 0) | 48 | #define RST_RESERVED_BITS GENMASK_ULL(31, 0) |
| 49 | 49 | ||
| 50 | /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ | 50 | /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ |
| 51 | #define INFO1_CLEAR 0 | 51 | #define INFO1_CLEAR 0 |
| 52 | #define INFO1_MASK_CLEAR GENMASK_ULL(31, 0) | ||
| 52 | #define INFO1_DTRANEND1 BIT(17) | 53 | #define INFO1_DTRANEND1 BIT(17) |
| 53 | #define INFO1_DTRANEND0 BIT(16) | 54 | #define INFO1_DTRANEND0 BIT(16) |
| 54 | 55 | ||
| 55 | /* DM_CM_INFO2 and DM_CM_INFO2_MASK */ | 56 | /* DM_CM_INFO2 and DM_CM_INFO2_MASK */ |
| 57 | #define INFO2_MASK_CLEAR GENMASK_ULL(31, 0) | ||
| 56 | #define INFO2_DTRANERR1 BIT(17) | 58 | #define INFO2_DTRANERR1 BIT(17) |
| 57 | #define INFO2_DTRANERR0 BIT(16) | 59 | #define INFO2_DTRANERR0 BIT(16) |
| 58 | 60 | ||
| @@ -252,6 +254,12 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host, | |||
| 252 | { | 254 | { |
| 253 | struct renesas_sdhi *priv = host_to_priv(host); | 255 | struct renesas_sdhi *priv = host_to_priv(host); |
| 254 | 256 | ||
| 257 | /* Disable DMAC interrupts, we don't use them */ | ||
| 258 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK, | ||
| 259 | INFO1_MASK_CLEAR); | ||
| 260 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK, | ||
| 261 | INFO2_MASK_CLEAR); | ||
| 262 | |||
| 255 | /* Each value is set to non-zero to assume "enabling" each DMA */ | 263 | /* Each value is set to non-zero to assume "enabling" each DMA */ |
| 256 | host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; | 264 | host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; |
| 257 | 265 | ||
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index ca18612c4201..67b2065e7a19 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c | |||
| @@ -1338,6 +1338,11 @@ int denali_init(struct denali_nand_info *denali) | |||
| 1338 | 1338 | ||
| 1339 | denali_enable_irq(denali); | 1339 | denali_enable_irq(denali); |
| 1340 | denali_reset_banks(denali); | 1340 | denali_reset_banks(denali); |
| 1341 | if (!denali->max_banks) { | ||
| 1342 | /* Error out earlier if no chip is found for some reasons. */ | ||
| 1343 | ret = -ENODEV; | ||
| 1344 | goto disable_irq; | ||
| 1345 | } | ||
| 1341 | 1346 | ||
| 1342 | denali->active_bank = DENALI_INVALID_BANK; | 1347 | denali->active_bank = DENALI_INVALID_BANK; |
| 1343 | 1348 | ||
diff --git a/drivers/mtd/nand/raw/docg4.c b/drivers/mtd/nand/raw/docg4.c index a3f04315c05c..427fcbc1b71c 100644 --- a/drivers/mtd/nand/raw/docg4.c +++ b/drivers/mtd/nand/raw/docg4.c | |||
| @@ -1218,7 +1218,7 @@ static int docg4_resume(struct platform_device *pdev) | |||
| 1218 | return 0; | 1218 | return 0; |
| 1219 | } | 1219 | } |
| 1220 | 1220 | ||
| 1221 | static void __init init_mtd_structs(struct mtd_info *mtd) | 1221 | static void init_mtd_structs(struct mtd_info *mtd) |
| 1222 | { | 1222 | { |
| 1223 | /* initialize mtd and nand data structures */ | 1223 | /* initialize mtd and nand data structures */ |
| 1224 | 1224 | ||
| @@ -1290,7 +1290,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd) | |||
| 1290 | 1290 | ||
| 1291 | } | 1291 | } |
| 1292 | 1292 | ||
| 1293 | static int __init read_id_reg(struct mtd_info *mtd) | 1293 | static int read_id_reg(struct mtd_info *mtd) |
| 1294 | { | 1294 | { |
| 1295 | struct nand_chip *nand = mtd_to_nand(mtd); | 1295 | struct nand_chip *nand = mtd_to_nand(mtd); |
| 1296 | struct docg4_priv *doc = nand_get_controller_data(nand); | 1296 | struct docg4_priv *doc = nand_get_controller_data(nand); |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 1b9951d2067e..d668682f91df 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -316,6 +316,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, | |||
| 316 | old_value = *dbbuf_db; | 316 | old_value = *dbbuf_db; |
| 317 | *dbbuf_db = value; | 317 | *dbbuf_db = value; |
| 318 | 318 | ||
| 319 | /* | ||
| 320 | * Ensure that the doorbell is updated before reading the event | ||
| 321 | * index from memory. The controller needs to provide similar | ||
| 322 | * ordering to ensure the envent index is updated before reading | ||
| 323 | * the doorbell. | ||
| 324 | */ | ||
| 325 | mb(); | ||
| 326 | |||
| 319 | if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) | 327 | if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) |
| 320 | return false; | 328 | return false; |
| 321 | } | 329 | } |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index ebf3e7a6c49e..b5ec96abd048 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
| @@ -1210,7 +1210,7 @@ static int __init nvmet_init(void) | |||
| 1210 | 1210 | ||
| 1211 | error = nvmet_init_discovery(); | 1211 | error = nvmet_init_discovery(); |
| 1212 | if (error) | 1212 | if (error) |
| 1213 | goto out; | 1213 | goto out_free_work_queue; |
| 1214 | 1214 | ||
| 1215 | error = nvmet_init_configfs(); | 1215 | error = nvmet_init_configfs(); |
| 1216 | if (error) | 1216 | if (error) |
| @@ -1219,6 +1219,8 @@ static int __init nvmet_init(void) | |||
| 1219 | 1219 | ||
| 1220 | out_exit_discovery: | 1220 | out_exit_discovery: |
| 1221 | nvmet_exit_discovery(); | 1221 | nvmet_exit_discovery(); |
| 1222 | out_free_work_queue: | ||
| 1223 | destroy_workqueue(buffered_io_wq); | ||
| 1222 | out: | 1224 | out: |
| 1223 | return error; | 1225 | return error; |
| 1224 | } | 1226 | } |
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 34712def81b1..5251689a1d9a 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c | |||
| @@ -311,7 +311,7 @@ fcloop_tgt_lsrqst_done_work(struct work_struct *work) | |||
| 311 | struct fcloop_tport *tport = tls_req->tport; | 311 | struct fcloop_tport *tport = tls_req->tport; |
| 312 | struct nvmefc_ls_req *lsreq = tls_req->lsreq; | 312 | struct nvmefc_ls_req *lsreq = tls_req->lsreq; |
| 313 | 313 | ||
| 314 | if (tport->remoteport) | 314 | if (!tport || tport->remoteport) |
| 315 | lsreq->done(lsreq, tls_req->status); | 315 | lsreq->done(lsreq, tls_req->status); |
| 316 | } | 316 | } |
| 317 | 317 | ||
| @@ -329,6 +329,7 @@ fcloop_ls_req(struct nvme_fc_local_port *localport, | |||
| 329 | 329 | ||
| 330 | if (!rport->targetport) { | 330 | if (!rport->targetport) { |
| 331 | tls_req->status = -ECONNREFUSED; | 331 | tls_req->status = -ECONNREFUSED; |
| 332 | tls_req->tport = NULL; | ||
| 332 | schedule_work(&tls_req->work); | 333 | schedule_work(&tls_req->work); |
| 333 | return ret; | 334 | return ret; |
| 334 | } | 335 | } |
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index f2088838f690..5b471889d723 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
| @@ -402,10 +402,19 @@ static ssize_t modalias_show(struct device *dev, | |||
| 402 | } | 402 | } |
| 403 | static DEVICE_ATTR_RO(modalias); | 403 | static DEVICE_ATTR_RO(modalias); |
| 404 | 404 | ||
| 405 | static ssize_t state_show(struct device *dev, | ||
| 406 | struct device_attribute *attr, char *buf) | ||
| 407 | { | ||
| 408 | return sprintf(buf, "%s\n", | ||
| 409 | xenbus_strstate(to_xenbus_device(dev)->state)); | ||
| 410 | } | ||
| 411 | static DEVICE_ATTR_RO(state); | ||
| 412 | |||
| 405 | static struct attribute *xenbus_dev_attrs[] = { | 413 | static struct attribute *xenbus_dev_attrs[] = { |
| 406 | &dev_attr_nodename.attr, | 414 | &dev_attr_nodename.attr, |
| 407 | &dev_attr_devtype.attr, | 415 | &dev_attr_devtype.attr, |
| 408 | &dev_attr_modalias.attr, | 416 | &dev_attr_modalias.attr, |
| 417 | &dev_attr_state.attr, | ||
| 409 | NULL, | 418 | NULL, |
| 410 | }; | 419 | }; |
| 411 | 420 | ||
