diff options
Diffstat (limited to 'include/linux')
42 files changed, 351 insertions, 246 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7235c4851460..43856d19cf4d 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
| @@ -217,6 +217,7 @@ struct pci_dev; | |||
| 217 | 217 | ||
| 218 | int acpi_pci_irq_enable (struct pci_dev *dev); | 218 | int acpi_pci_irq_enable (struct pci_dev *dev); |
| 219 | void acpi_penalize_isa_irq(int irq, int active); | 219 | void acpi_penalize_isa_irq(int irq, int active); |
| 220 | bool acpi_isa_irq_available(int irq); | ||
| 220 | void acpi_penalize_sci_irq(int irq, int trigger, int polarity); | 221 | void acpi_penalize_sci_irq(int irq, int trigger, int polarity); |
| 221 | void acpi_pci_irq_disable (struct pci_dev *dev); | 222 | void acpi_pci_irq_disable (struct pci_dev *dev); |
| 222 | 223 | ||
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index a23209b43842..1b4d69f68c33 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h | |||
| @@ -116,6 +116,8 @@ struct bdi_writeback { | |||
| 116 | struct list_head work_list; | 116 | struct list_head work_list; |
| 117 | struct delayed_work dwork; /* work item used for writeback */ | 117 | struct delayed_work dwork; /* work item used for writeback */ |
| 118 | 118 | ||
| 119 | struct list_head bdi_node; /* anchored at bdi->wb_list */ | ||
| 120 | |||
| 119 | #ifdef CONFIG_CGROUP_WRITEBACK | 121 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 120 | struct percpu_ref refcnt; /* used only for !root wb's */ | 122 | struct percpu_ref refcnt; /* used only for !root wb's */ |
| 121 | struct fprop_local_percpu memcg_completions; | 123 | struct fprop_local_percpu memcg_completions; |
| @@ -150,6 +152,7 @@ struct backing_dev_info { | |||
| 150 | atomic_long_t tot_write_bandwidth; | 152 | atomic_long_t tot_write_bandwidth; |
| 151 | 153 | ||
| 152 | struct bdi_writeback wb; /* the root writeback info for this bdi */ | 154 | struct bdi_writeback wb; /* the root writeback info for this bdi */ |
| 155 | struct list_head wb_list; /* list of all wbs */ | ||
| 153 | #ifdef CONFIG_CGROUP_WRITEBACK | 156 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 154 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ | 157 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ |
| 155 | struct rb_root cgwb_congested_tree; /* their congested states */ | 158 | struct rb_root cgwb_congested_tree; /* their congested states */ |
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 5a5d79ee256f..c85f74946a8b 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
| @@ -13,18 +13,23 @@ | |||
| 13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
| 14 | #include <linux/blkdev.h> | 14 | #include <linux/blkdev.h> |
| 15 | #include <linux/writeback.h> | 15 | #include <linux/writeback.h> |
| 16 | #include <linux/memcontrol.h> | ||
| 16 | #include <linux/blk-cgroup.h> | 17 | #include <linux/blk-cgroup.h> |
| 17 | #include <linux/backing-dev-defs.h> | 18 | #include <linux/backing-dev-defs.h> |
| 18 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
| 19 | 20 | ||
| 20 | int __must_check bdi_init(struct backing_dev_info *bdi); | 21 | int __must_check bdi_init(struct backing_dev_info *bdi); |
| 21 | void bdi_destroy(struct backing_dev_info *bdi); | 22 | void bdi_exit(struct backing_dev_info *bdi); |
| 22 | 23 | ||
| 23 | __printf(3, 4) | 24 | __printf(3, 4) |
| 24 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, | 25 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, |
| 25 | const char *fmt, ...); | 26 | const char *fmt, ...); |
| 26 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); | 27 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); |
| 28 | void bdi_unregister(struct backing_dev_info *bdi); | ||
| 29 | |||
| 27 | int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); | 30 | int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); |
| 31 | void bdi_destroy(struct backing_dev_info *bdi); | ||
| 32 | |||
| 28 | void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, | 33 | void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, |
| 29 | bool range_cyclic, enum wb_reason reason); | 34 | bool range_cyclic, enum wb_reason reason); |
| 30 | void wb_start_background_writeback(struct bdi_writeback *wb); | 35 | void wb_start_background_writeback(struct bdi_writeback *wb); |
| @@ -252,13 +257,19 @@ int inode_congested(struct inode *inode, int cong_bits); | |||
| 252 | * @inode: inode of interest | 257 | * @inode: inode of interest |
| 253 | * | 258 | * |
| 254 | * cgroup writeback requires support from both the bdi and filesystem. | 259 | * cgroup writeback requires support from both the bdi and filesystem. |
| 255 | * Test whether @inode has both. | 260 | * Also, both memcg and iocg have to be on the default hierarchy. Test |
| 261 | * whether all conditions are met. | ||
| 262 | * | ||
| 263 | * Note that the test result may change dynamically on the same inode | ||
| 264 | * depending on how memcg and iocg are configured. | ||
| 256 | */ | 265 | */ |
| 257 | static inline bool inode_cgwb_enabled(struct inode *inode) | 266 | static inline bool inode_cgwb_enabled(struct inode *inode) |
| 258 | { | 267 | { |
| 259 | struct backing_dev_info *bdi = inode_to_bdi(inode); | 268 | struct backing_dev_info *bdi = inode_to_bdi(inode); |
| 260 | 269 | ||
| 261 | return bdi_cap_account_dirty(bdi) && | 270 | return cgroup_on_dfl(mem_cgroup_root_css->cgroup) && |
| 271 | cgroup_on_dfl(blkcg_root_css->cgroup) && | ||
| 272 | bdi_cap_account_dirty(bdi) && | ||
| 262 | (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && | 273 | (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && |
| 263 | (inode->i_sb->s_iflags & SB_I_CGROUPWB); | 274 | (inode->i_sb->s_iflags & SB_I_CGROUPWB); |
| 264 | } | 275 | } |
| @@ -401,61 +412,6 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) | |||
| 401 | rcu_read_unlock(); | 412 | rcu_read_unlock(); |
| 402 | } | 413 | } |
| 403 | 414 | ||
| 404 | struct wb_iter { | ||
| 405 | int start_memcg_id; | ||
| 406 | struct radix_tree_iter tree_iter; | ||
| 407 | void **slot; | ||
| 408 | }; | ||
| 409 | |||
| 410 | static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter, | ||
| 411 | struct backing_dev_info *bdi) | ||
| 412 | { | ||
| 413 | struct radix_tree_iter *titer = &iter->tree_iter; | ||
| 414 | |||
| 415 | WARN_ON_ONCE(!rcu_read_lock_held()); | ||
| 416 | |||
| 417 | if (iter->start_memcg_id >= 0) { | ||
| 418 | iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id); | ||
| 419 | iter->start_memcg_id = -1; | ||
| 420 | } else { | ||
| 421 | iter->slot = radix_tree_next_slot(iter->slot, titer, 0); | ||
| 422 | } | ||
| 423 | |||
| 424 | if (!iter->slot) | ||
| 425 | iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0); | ||
| 426 | if (iter->slot) | ||
| 427 | return *iter->slot; | ||
| 428 | return NULL; | ||
| 429 | } | ||
| 430 | |||
| 431 | static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter, | ||
| 432 | struct backing_dev_info *bdi, | ||
| 433 | int start_memcg_id) | ||
| 434 | { | ||
| 435 | iter->start_memcg_id = start_memcg_id; | ||
| 436 | |||
| 437 | if (start_memcg_id) | ||
| 438 | return __wb_iter_next(iter, bdi); | ||
| 439 | else | ||
| 440 | return &bdi->wb; | ||
| 441 | } | ||
| 442 | |||
| 443 | /** | ||
| 444 | * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order | ||
| 445 | * @wb_cur: cursor struct bdi_writeback pointer | ||
| 446 | * @bdi: bdi to walk wb's of | ||
| 447 | * @iter: pointer to struct wb_iter to be used as iteration buffer | ||
| 448 | * @start_memcg_id: memcg ID to start iteration from | ||
| 449 | * | ||
| 450 | * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending | ||
| 451 | * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter | ||
| 452 | * to be used as temp storage during iteration. rcu_read_lock() must be | ||
| 453 | * held throughout iteration. | ||
| 454 | */ | ||
| 455 | #define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \ | ||
| 456 | for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \ | ||
| 457 | (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi)) | ||
| 458 | |||
| 459 | #else /* CONFIG_CGROUP_WRITEBACK */ | 415 | #else /* CONFIG_CGROUP_WRITEBACK */ |
| 460 | 416 | ||
| 461 | static inline bool inode_cgwb_enabled(struct inode *inode) | 417 | static inline bool inode_cgwb_enabled(struct inode *inode) |
| @@ -515,14 +471,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg) | |||
| 515 | { | 471 | { |
| 516 | } | 472 | } |
| 517 | 473 | ||
| 518 | struct wb_iter { | ||
| 519 | int next_id; | ||
| 520 | }; | ||
| 521 | |||
| 522 | #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \ | ||
| 523 | for ((iter)->next_id = (start_blkcg_id); \ | ||
| 524 | ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); ) | ||
| 525 | |||
| 526 | static inline int inode_congested(struct inode *inode, int cong_bits) | 474 | static inline int inode_congested(struct inode *inode, int cong_bits) |
| 527 | { | 475 | { |
| 528 | return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); | 476 | return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); |
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 0a5cc7a1109b..c02e669945e9 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h | |||
| @@ -713,9 +713,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, | |||
| 713 | 713 | ||
| 714 | if (!throtl) { | 714 | if (!throtl) { |
| 715 | blkg = blkg ?: q->root_blkg; | 715 | blkg = blkg ?: q->root_blkg; |
| 716 | blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags, | 716 | blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw, |
| 717 | bio->bi_iter.bi_size); | 717 | bio->bi_iter.bi_size); |
| 718 | blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1); | 718 | blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1); |
| 719 | } | 719 | } |
| 720 | 720 | ||
| 721 | rcu_read_unlock(); | 721 | rcu_read_unlock(); |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 37d1602c4f7a..5e7d43ab61c0 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
| @@ -145,7 +145,6 @@ enum { | |||
| 145 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, | 145 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, |
| 146 | BLK_MQ_F_TAG_SHARED = 1 << 1, | 146 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
| 147 | BLK_MQ_F_SG_MERGE = 1 << 2, | 147 | BLK_MQ_F_SG_MERGE = 1 << 2, |
| 148 | BLK_MQ_F_SYSFS_UP = 1 << 3, | ||
| 149 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, | 148 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, |
| 150 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, | 149 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, |
| 151 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, | 150 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, |
| @@ -215,7 +214,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); | |||
| 215 | void blk_mq_cancel_requeue_work(struct request_queue *q); | 214 | void blk_mq_cancel_requeue_work(struct request_queue *q); |
| 216 | void blk_mq_kick_requeue_list(struct request_queue *q); | 215 | void blk_mq_kick_requeue_list(struct request_queue *q); |
| 217 | void blk_mq_abort_requeue_list(struct request_queue *q); | 216 | void blk_mq_abort_requeue_list(struct request_queue *q); |
| 218 | void blk_mq_complete_request(struct request *rq); | 217 | void blk_mq_complete_request(struct request *rq, int error); |
| 219 | 218 | ||
| 220 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); | 219 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
| 221 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); | 220 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); |
| @@ -224,8 +223,6 @@ void blk_mq_start_hw_queues(struct request_queue *q); | |||
| 224 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); | 223 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
| 225 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); | 224 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
| 226 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); | 225 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
| 227 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, | ||
| 228 | void *priv); | ||
| 229 | void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, | 226 | void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, |
| 230 | void *priv); | 227 | void *priv); |
| 231 | void blk_mq_freeze_queue(struct request_queue *q); | 228 | void blk_mq_freeze_queue(struct request_queue *q); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 38a5ff772a37..19c2e947d4d1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -456,6 +456,8 @@ struct request_queue { | |||
| 456 | struct blk_mq_tag_set *tag_set; | 456 | struct blk_mq_tag_set *tag_set; |
| 457 | struct list_head tag_set_list; | 457 | struct list_head tag_set_list; |
| 458 | struct bio_set *bio_split; | 458 | struct bio_set *bio_split; |
| 459 | |||
| 460 | bool mq_sysfs_init_done; | ||
| 459 | }; | 461 | }; |
| 460 | 462 | ||
| 461 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 463 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
| @@ -1368,6 +1370,26 @@ static inline bool bvec_gap_to_prev(struct request_queue *q, | |||
| 1368 | ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); | 1370 | ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); |
| 1369 | } | 1371 | } |
| 1370 | 1372 | ||
| 1373 | static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, | ||
| 1374 | struct bio *next) | ||
| 1375 | { | ||
| 1376 | if (!bio_has_data(prev)) | ||
| 1377 | return false; | ||
| 1378 | |||
| 1379 | return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], | ||
| 1380 | next->bi_io_vec[0].bv_offset); | ||
| 1381 | } | ||
| 1382 | |||
| 1383 | static inline bool req_gap_back_merge(struct request *req, struct bio *bio) | ||
| 1384 | { | ||
| 1385 | return bio_will_gap(req->q, req->biotail, bio); | ||
| 1386 | } | ||
| 1387 | |||
| 1388 | static inline bool req_gap_front_merge(struct request *req, struct bio *bio) | ||
| 1389 | { | ||
| 1390 | return bio_will_gap(req->q, bio, req->bio); | ||
| 1391 | } | ||
| 1392 | |||
| 1371 | struct work_struct; | 1393 | struct work_struct; |
| 1372 | int kblockd_schedule_work(struct work_struct *work); | 1394 | int kblockd_schedule_work(struct work_struct *work); |
| 1373 | int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); | 1395 | int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); |
| @@ -1494,6 +1516,26 @@ queue_max_integrity_segments(struct request_queue *q) | |||
| 1494 | return q->limits.max_integrity_segments; | 1516 | return q->limits.max_integrity_segments; |
| 1495 | } | 1517 | } |
| 1496 | 1518 | ||
| 1519 | static inline bool integrity_req_gap_back_merge(struct request *req, | ||
| 1520 | struct bio *next) | ||
| 1521 | { | ||
| 1522 | struct bio_integrity_payload *bip = bio_integrity(req->bio); | ||
| 1523 | struct bio_integrity_payload *bip_next = bio_integrity(next); | ||
| 1524 | |||
| 1525 | return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], | ||
| 1526 | bip_next->bip_vec[0].bv_offset); | ||
| 1527 | } | ||
| 1528 | |||
| 1529 | static inline bool integrity_req_gap_front_merge(struct request *req, | ||
| 1530 | struct bio *bio) | ||
| 1531 | { | ||
| 1532 | struct bio_integrity_payload *bip = bio_integrity(bio); | ||
| 1533 | struct bio_integrity_payload *bip_next = bio_integrity(req->bio); | ||
| 1534 | |||
| 1535 | return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], | ||
| 1536 | bip_next->bip_vec[0].bv_offset); | ||
| 1537 | } | ||
| 1538 | |||
| 1497 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 1539 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
| 1498 | 1540 | ||
| 1499 | struct bio; | 1541 | struct bio; |
| @@ -1560,6 +1602,16 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g) | |||
| 1560 | { | 1602 | { |
| 1561 | return 0; | 1603 | return 0; |
| 1562 | } | 1604 | } |
| 1605 | static inline bool integrity_req_gap_back_merge(struct request *req, | ||
| 1606 | struct bio *next) | ||
| 1607 | { | ||
| 1608 | return false; | ||
| 1609 | } | ||
| 1610 | static inline bool integrity_req_gap_front_merge(struct request *req, | ||
| 1611 | struct bio *bio) | ||
| 1612 | { | ||
| 1613 | return false; | ||
| 1614 | } | ||
| 1563 | 1615 | ||
| 1564 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 1616 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 1565 | 1617 | ||
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 4763ad64e832..f89b31d45cc8 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h | |||
| @@ -107,6 +107,7 @@ static inline u64 ceph_sanitize_features(u64 features) | |||
| 107 | CEPH_FEATURE_OSDMAP_ENC | \ | 107 | CEPH_FEATURE_OSDMAP_ENC | \ |
| 108 | CEPH_FEATURE_CRUSH_TUNABLES3 | \ | 108 | CEPH_FEATURE_CRUSH_TUNABLES3 | \ |
| 109 | CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \ | 109 | CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \ |
| 110 | CEPH_FEATURE_MSGR_KEEPALIVE2 | \ | ||
| 110 | CEPH_FEATURE_CRUSH_V4) | 111 | CEPH_FEATURE_CRUSH_V4) |
| 111 | 112 | ||
| 112 | #define CEPH_FEATURES_REQUIRED_DEFAULT \ | 113 | #define CEPH_FEATURES_REQUIRED_DEFAULT \ |
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 7e1252e97a30..b2371d9b51fa 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h | |||
| @@ -238,6 +238,8 @@ struct ceph_connection { | |||
| 238 | bool out_kvec_is_msg; /* kvec refers to out_msg */ | 238 | bool out_kvec_is_msg; /* kvec refers to out_msg */ |
| 239 | int out_more; /* there is more data after the kvecs */ | 239 | int out_more; /* there is more data after the kvecs */ |
| 240 | __le64 out_temp_ack; /* for writing an ack */ | 240 | __le64 out_temp_ack; /* for writing an ack */ |
| 241 | struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2 | ||
| 242 | stamp */ | ||
| 241 | 243 | ||
| 242 | /* message in temps */ | 244 | /* message in temps */ |
| 243 | struct ceph_msg_header in_hdr; | 245 | struct ceph_msg_header in_hdr; |
| @@ -248,7 +250,7 @@ struct ceph_connection { | |||
| 248 | int in_base_pos; /* bytes read */ | 250 | int in_base_pos; /* bytes read */ |
| 249 | __le64 in_temp_ack; /* for reading an ack */ | 251 | __le64 in_temp_ack; /* for reading an ack */ |
| 250 | 252 | ||
| 251 | struct timespec last_keepalive_ack; | 253 | struct timespec last_keepalive_ack; /* keepalive2 ack stamp */ |
| 252 | 254 | ||
| 253 | struct delayed_work work; /* send|recv work */ | 255 | struct delayed_work work; /* send|recv work */ |
| 254 | unsigned long delay; /* current delay interval */ | 256 | unsigned long delay; /* current delay interval */ |
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 4d8fcf2187dc..8492721b39be 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h | |||
| @@ -473,31 +473,8 @@ struct cgroup_subsys { | |||
| 473 | unsigned int depends_on; | 473 | unsigned int depends_on; |
| 474 | }; | 474 | }; |
| 475 | 475 | ||
| 476 | extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; | 476 | void cgroup_threadgroup_change_begin(struct task_struct *tsk); |
| 477 | 477 | void cgroup_threadgroup_change_end(struct task_struct *tsk); | |
| 478 | /** | ||
| 479 | * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups | ||
| 480 | * @tsk: target task | ||
| 481 | * | ||
| 482 | * Called from threadgroup_change_begin() and allows cgroup operations to | ||
| 483 | * synchronize against threadgroup changes using a percpu_rw_semaphore. | ||
| 484 | */ | ||
| 485 | static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) | ||
| 486 | { | ||
| 487 | percpu_down_read(&cgroup_threadgroup_rwsem); | ||
| 488 | } | ||
| 489 | |||
| 490 | /** | ||
| 491 | * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups | ||
| 492 | * @tsk: target task | ||
| 493 | * | ||
| 494 | * Called from threadgroup_change_end(). Counterpart of | ||
| 495 | * cgroup_threadcgroup_change_begin(). | ||
| 496 | */ | ||
| 497 | static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) | ||
| 498 | { | ||
| 499 | percpu_up_read(&cgroup_threadgroup_rwsem); | ||
| 500 | } | ||
| 501 | 478 | ||
| 502 | #else /* CONFIG_CGROUPS */ | 479 | #else /* CONFIG_CGROUPS */ |
| 503 | 480 | ||
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 31ce435981fe..bdcf358dfce2 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
| @@ -18,15 +18,6 @@ | |||
| 18 | struct clock_event_device; | 18 | struct clock_event_device; |
| 19 | struct module; | 19 | struct module; |
| 20 | 20 | ||
| 21 | /* Clock event mode commands for legacy ->set_mode(): OBSOLETE */ | ||
| 22 | enum clock_event_mode { | ||
| 23 | CLOCK_EVT_MODE_UNUSED, | ||
| 24 | CLOCK_EVT_MODE_SHUTDOWN, | ||
| 25 | CLOCK_EVT_MODE_PERIODIC, | ||
| 26 | CLOCK_EVT_MODE_ONESHOT, | ||
| 27 | CLOCK_EVT_MODE_RESUME, | ||
| 28 | }; | ||
| 29 | |||
| 30 | /* | 21 | /* |
| 31 | * Possible states of a clock event device. | 22 | * Possible states of a clock event device. |
| 32 | * | 23 | * |
| @@ -86,16 +77,14 @@ enum clock_event_state { | |||
| 86 | * @min_delta_ns: minimum delta value in ns | 77 | * @min_delta_ns: minimum delta value in ns |
| 87 | * @mult: nanosecond to cycles multiplier | 78 | * @mult: nanosecond to cycles multiplier |
| 88 | * @shift: nanoseconds to cycles divisor (power of two) | 79 | * @shift: nanoseconds to cycles divisor (power of two) |
| 89 | * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE | ||
| 90 | * @state_use_accessors:current state of the device, assigned by the core code | 80 | * @state_use_accessors:current state of the device, assigned by the core code |
| 91 | * @features: features | 81 | * @features: features |
| 92 | * @retries: number of forced programming retries | 82 | * @retries: number of forced programming retries |
| 93 | * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME. | 83 | * @set_state_periodic: switch state to periodic |
| 94 | * @set_state_periodic: switch state to periodic, if !set_mode | 84 | * @set_state_oneshot: switch state to oneshot |
| 95 | * @set_state_oneshot: switch state to oneshot, if !set_mode | 85 | * @set_state_oneshot_stopped: switch state to oneshot_stopped |
| 96 | * @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode | 86 | * @set_state_shutdown: switch state to shutdown |
| 97 | * @set_state_shutdown: switch state to shutdown, if !set_mode | 87 | * @tick_resume: resume clkevt device |
| 98 | * @tick_resume: resume clkevt device, if !set_mode | ||
| 99 | * @broadcast: function to broadcast events | 88 | * @broadcast: function to broadcast events |
| 100 | * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration | 89 | * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration |
| 101 | * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration | 90 | * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration |
| @@ -116,18 +105,10 @@ struct clock_event_device { | |||
| 116 | u64 min_delta_ns; | 105 | u64 min_delta_ns; |
| 117 | u32 mult; | 106 | u32 mult; |
| 118 | u32 shift; | 107 | u32 shift; |
| 119 | enum clock_event_mode mode; | ||
| 120 | enum clock_event_state state_use_accessors; | 108 | enum clock_event_state state_use_accessors; |
| 121 | unsigned int features; | 109 | unsigned int features; |
| 122 | unsigned long retries; | 110 | unsigned long retries; |
| 123 | 111 | ||
| 124 | /* | ||
| 125 | * State transition callback(s): Only one of the two groups should be | ||
| 126 | * defined: | ||
| 127 | * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME. | ||
| 128 | * - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume(). | ||
| 129 | */ | ||
| 130 | void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *); | ||
| 131 | int (*set_state_periodic)(struct clock_event_device *); | 112 | int (*set_state_periodic)(struct clock_event_device *); |
| 132 | int (*set_state_oneshot)(struct clock_event_device *); | 113 | int (*set_state_oneshot)(struct clock_event_device *); |
| 133 | int (*set_state_oneshot_stopped)(struct clock_event_device *); | 114 | int (*set_state_oneshot_stopped)(struct clock_event_device *); |
diff --git a/include/linux/cma.h b/include/linux/cma.h index f7ef093ec49a..29f9e774ab76 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h | |||
| @@ -26,6 +26,6 @@ extern int __init cma_declare_contiguous(phys_addr_t base, | |||
| 26 | extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, | 26 | extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
| 27 | unsigned int order_per_bit, | 27 | unsigned int order_per_bit, |
| 28 | struct cma **res_cma); | 28 | struct cma **res_cma); |
| 29 | extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align); | 29 | extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align); |
| 30 | extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); | 30 | extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); |
| 31 | #endif | 31 | #endif |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index dfaa7b3e9ae9..8efb40e61d6e 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
| @@ -237,12 +237,25 @@ | |||
| 237 | #define KASAN_ABI_VERSION 3 | 237 | #define KASAN_ABI_VERSION 3 |
| 238 | #endif | 238 | #endif |
| 239 | 239 | ||
| 240 | #if GCC_VERSION >= 40902 | ||
| 241 | /* | ||
| 242 | * Tell the compiler that address safety instrumentation (KASAN) | ||
| 243 | * should not be applied to that function. | ||
| 244 | * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 | ||
| 245 | */ | ||
| 246 | #define __no_sanitize_address __attribute__((no_sanitize_address)) | ||
| 247 | #endif | ||
| 248 | |||
| 240 | #endif /* gcc version >= 40000 specific checks */ | 249 | #endif /* gcc version >= 40000 specific checks */ |
| 241 | 250 | ||
| 242 | #if !defined(__noclone) | 251 | #if !defined(__noclone) |
| 243 | #define __noclone /* not needed */ | 252 | #define __noclone /* not needed */ |
| 244 | #endif | 253 | #endif |
| 245 | 254 | ||
| 255 | #if !defined(__no_sanitize_address) | ||
| 256 | #define __no_sanitize_address | ||
| 257 | #endif | ||
| 258 | |||
| 246 | /* | 259 | /* |
| 247 | * A trick to suppress uninitialized variable warning without generating any | 260 | * A trick to suppress uninitialized variable warning without generating any |
| 248 | * code | 261 | * code |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c836eb2dc44d..3d7810341b57 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -198,19 +198,45 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
| 198 | 198 | ||
| 199 | #include <uapi/linux/types.h> | 199 | #include <uapi/linux/types.h> |
| 200 | 200 | ||
| 201 | static __always_inline void __read_once_size(const volatile void *p, void *res, int size) | 201 | #define __READ_ONCE_SIZE \ |
| 202 | ({ \ | ||
| 203 | switch (size) { \ | ||
| 204 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ | ||
| 205 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ | ||
| 206 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ | ||
| 207 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ | ||
| 208 | default: \ | ||
| 209 | barrier(); \ | ||
| 210 | __builtin_memcpy((void *)res, (const void *)p, size); \ | ||
| 211 | barrier(); \ | ||
| 212 | } \ | ||
| 213 | }) | ||
| 214 | |||
| 215 | static __always_inline | ||
| 216 | void __read_once_size(const volatile void *p, void *res, int size) | ||
| 202 | { | 217 | { |
| 203 | switch (size) { | 218 | __READ_ONCE_SIZE; |
| 204 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; | 219 | } |
| 205 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; | 220 | |
| 206 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; | 221 | #ifdef CONFIG_KASAN |
| 207 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; | 222 | /* |
| 208 | default: | 223 | * This function is not 'inline' because __no_sanitize_address confilcts |
| 209 | barrier(); | 224 | * with inlining. Attempt to inline it may cause a build failure. |
| 210 | __builtin_memcpy((void *)res, (const void *)p, size); | 225 | * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 |
| 211 | barrier(); | 226 | * '__maybe_unused' allows us to avoid defined-but-not-used warnings. |
| 212 | } | 227 | */ |
| 228 | static __no_sanitize_address __maybe_unused | ||
| 229 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) | ||
| 230 | { | ||
| 231 | __READ_ONCE_SIZE; | ||
| 232 | } | ||
| 233 | #else | ||
| 234 | static __always_inline | ||
| 235 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) | ||
| 236 | { | ||
| 237 | __READ_ONCE_SIZE; | ||
| 213 | } | 238 | } |
| 239 | #endif | ||
| 214 | 240 | ||
| 215 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) | 241 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
| 216 | { | 242 | { |
| @@ -248,8 +274,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
| 248 | * required ordering. | 274 | * required ordering. |
| 249 | */ | 275 | */ |
| 250 | 276 | ||
| 251 | #define READ_ONCE(x) \ | 277 | #define __READ_ONCE(x, check) \ |
| 252 | ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) | 278 | ({ \ |
| 279 | union { typeof(x) __val; char __c[1]; } __u; \ | ||
| 280 | if (check) \ | ||
| 281 | __read_once_size(&(x), __u.__c, sizeof(x)); \ | ||
| 282 | else \ | ||
| 283 | __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ | ||
| 284 | __u.__val; \ | ||
| 285 | }) | ||
| 286 | #define READ_ONCE(x) __READ_ONCE(x, 1) | ||
| 287 | |||
| 288 | /* | ||
| 289 | * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need | ||
| 290 | * to hide memory access from KASAN. | ||
| 291 | */ | ||
| 292 | #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) | ||
| 253 | 293 | ||
| 254 | #define WRITE_ONCE(x, val) \ | 294 | #define WRITE_ONCE(x, val) \ |
| 255 | ({ \ | 295 | ({ \ |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 430efcbea48e..dca22de98d94 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
| @@ -127,9 +127,14 @@ struct cpufreq_policy { | |||
| 127 | #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ | 127 | #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ |
| 128 | 128 | ||
| 129 | #ifdef CONFIG_CPU_FREQ | 129 | #ifdef CONFIG_CPU_FREQ |
| 130 | struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); | ||
| 130 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); | 131 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); |
| 131 | void cpufreq_cpu_put(struct cpufreq_policy *policy); | 132 | void cpufreq_cpu_put(struct cpufreq_policy *policy); |
| 132 | #else | 133 | #else |
| 134 | static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) | ||
| 135 | { | ||
| 136 | return NULL; | ||
| 137 | } | ||
| 133 | static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) | 138 | static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) |
| 134 | { | 139 | { |
| 135 | return NULL; | 140 | return NULL; |
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index ce447f0f1bad..68030e22af35 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h | |||
| @@ -65,7 +65,10 @@ struct devfreq_dev_status { | |||
| 65 | * The "flags" parameter's possible values are | 65 | * The "flags" parameter's possible values are |
| 66 | * explained above with "DEVFREQ_FLAG_*" macros. | 66 | * explained above with "DEVFREQ_FLAG_*" macros. |
| 67 | * @get_dev_status: The device should provide the current performance | 67 | * @get_dev_status: The device should provide the current performance |
| 68 | * status to devfreq, which is used by governors. | 68 | * status to devfreq. Governors are recommended not to |
| 69 | * use this directly. Instead, governors are recommended | ||
| 70 | * to use devfreq_update_stats() along with | ||
| 71 | * devfreq.last_status. | ||
| 69 | * @get_cur_freq: The device should provide the current frequency | 72 | * @get_cur_freq: The device should provide the current frequency |
| 70 | * at which it is operating. | 73 | * at which it is operating. |
| 71 | * @exit: An optional callback that is called when devfreq | 74 | * @exit: An optional callback that is called when devfreq |
| @@ -161,6 +164,7 @@ struct devfreq { | |||
| 161 | struct delayed_work work; | 164 | struct delayed_work work; |
| 162 | 165 | ||
| 163 | unsigned long previous_freq; | 166 | unsigned long previous_freq; |
| 167 | struct devfreq_dev_status last_status; | ||
| 164 | 168 | ||
| 165 | void *data; /* private data for governors */ | 169 | void *data; /* private data for governors */ |
| 166 | 170 | ||
| @@ -204,6 +208,19 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev, | |||
| 204 | extern void devm_devfreq_unregister_opp_notifier(struct device *dev, | 208 | extern void devm_devfreq_unregister_opp_notifier(struct device *dev, |
| 205 | struct devfreq *devfreq); | 209 | struct devfreq *devfreq); |
| 206 | 210 | ||
| 211 | /** | ||
| 212 | * devfreq_update_stats() - update the last_status pointer in struct devfreq | ||
| 213 | * @df: the devfreq instance whose status needs updating | ||
| 214 | * | ||
| 215 | * Governors are recommended to use this function along with last_status, | ||
| 216 | * which allows other entities to reuse the last_status without affecting | ||
| 217 | * the values fetched later by governors. | ||
| 218 | */ | ||
| 219 | static inline int devfreq_update_stats(struct devfreq *df) | ||
| 220 | { | ||
| 221 | return df->profile->get_dev_status(df->dev.parent, &df->last_status); | ||
| 222 | } | ||
| 223 | |||
| 207 | #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) | 224 | #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) |
| 208 | /** | 225 | /** |
| 209 | * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq | 226 | * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq |
| @@ -289,6 +306,11 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev, | |||
| 289 | struct devfreq *devfreq) | 306 | struct devfreq *devfreq) |
| 290 | { | 307 | { |
| 291 | } | 308 | } |
| 309 | |||
| 310 | static inline int devfreq_update_stats(struct devfreq *df) | ||
| 311 | { | ||
| 312 | return -EINVAL; | ||
| 313 | } | ||
| 292 | #endif /* CONFIG_PM_DEVFREQ */ | 314 | #endif /* CONFIG_PM_DEVFREQ */ |
| 293 | 315 | ||
| 294 | #endif /* __LINUX_DEVFREQ_H__ */ | 316 | #endif /* __LINUX_DEVFREQ_H__ */ |
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index 569bbd039896..fec734df1524 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h | |||
| @@ -111,7 +111,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, | |||
| 111 | return ret; | 111 | return ret; |
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | struct page *dma_alloc_from_contiguous(struct device *dev, int count, | 114 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, |
| 115 | unsigned int order); | 115 | unsigned int order); |
| 116 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, | 116 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, |
| 117 | int count); | 117 | int count); |
| @@ -144,7 +144,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size, | |||
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | static inline | 146 | static inline |
| 147 | struct page *dma_alloc_from_contiguous(struct device *dev, int count, | 147 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, |
| 148 | unsigned int order) | 148 | unsigned int order) |
| 149 | { | 149 | { |
| 150 | return NULL; | 150 | return NULL; |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index d0b380ee7d67..e38681f4912d 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -25,6 +25,13 @@ | |||
| 25 | extern struct files_struct init_files; | 25 | extern struct files_struct init_files; |
| 26 | extern struct fs_struct init_fs; | 26 | extern struct fs_struct init_fs; |
| 27 | 27 | ||
| 28 | #ifdef CONFIG_CGROUPS | ||
| 29 | #define INIT_GROUP_RWSEM(sig) \ | ||
| 30 | .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem), | ||
| 31 | #else | ||
| 32 | #define INIT_GROUP_RWSEM(sig) | ||
| 33 | #endif | ||
| 34 | |||
| 28 | #ifdef CONFIG_CPUSETS | 35 | #ifdef CONFIG_CPUSETS |
| 29 | #define INIT_CPUSET_SEQ(tsk) \ | 36 | #define INIT_CPUSET_SEQ(tsk) \ |
| 30 | .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), | 37 | .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), |
| @@ -57,6 +64,7 @@ extern struct fs_struct init_fs; | |||
| 57 | INIT_PREV_CPUTIME(sig) \ | 64 | INIT_PREV_CPUTIME(sig) \ |
| 58 | .cred_guard_mutex = \ | 65 | .cred_guard_mutex = \ |
| 59 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ | 66 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ |
| 67 | INIT_GROUP_RWSEM(sig) \ | ||
| 60 | } | 68 | } |
| 61 | 69 | ||
| 62 | extern struct nsproxy init_nsproxy; | 70 | extern struct nsproxy init_nsproxy; |
diff --git a/include/linux/iova.h b/include/linux/iova.h index 3920a19d8194..92f7177db2ce 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h | |||
| @@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) | |||
| 68 | return iova >> iova_shift(iovad); | 68 | return iova >> iova_shift(iovad); |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | int iommu_iova_cache_init(void); | 71 | int iova_cache_get(void); |
| 72 | void iommu_iova_cache_destroy(void); | 72 | void iova_cache_put(void); |
| 73 | 73 | ||
| 74 | struct iova *alloc_iova_mem(void); | 74 | struct iova *alloc_iova_mem(void); |
| 75 | void free_iova_mem(struct iova *iova); | 75 | void free_iova_mem(struct iova *iova); |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 6f8b34066442..11bf09288ddb 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -110,8 +110,8 @@ enum { | |||
| 110 | /* | 110 | /* |
| 111 | * Return value for chip->irq_set_affinity() | 111 | * Return value for chip->irq_set_affinity() |
| 112 | * | 112 | * |
| 113 | * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity | 113 | * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity |
| 114 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity | 114 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity |
| 115 | * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to | 115 | * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to |
| 116 | * support stacked irqchips, which indicates skipping | 116 | * support stacked irqchips, which indicates skipping |
| 117 | * all descendent irqchips. | 117 | * all descendent irqchips. |
| @@ -129,9 +129,19 @@ struct irq_domain; | |||
| 129 | * struct irq_common_data - per irq data shared by all irqchips | 129 | * struct irq_common_data - per irq data shared by all irqchips |
| 130 | * @state_use_accessors: status information for irq chip functions. | 130 | * @state_use_accessors: status information for irq chip functions. |
| 131 | * Use accessor functions to deal with it | 131 | * Use accessor functions to deal with it |
| 132 | * @node: node index useful for balancing | ||
| 133 | * @handler_data: per-IRQ data for the irq_chip methods | ||
| 134 | * @affinity: IRQ affinity on SMP | ||
| 135 | * @msi_desc: MSI descriptor | ||
| 132 | */ | 136 | */ |
| 133 | struct irq_common_data { | 137 | struct irq_common_data { |
| 134 | unsigned int state_use_accessors; | 138 | unsigned int state_use_accessors; |
| 139 | #ifdef CONFIG_NUMA | ||
| 140 | unsigned int node; | ||
| 141 | #endif | ||
| 142 | void *handler_data; | ||
| 143 | struct msi_desc *msi_desc; | ||
| 144 | cpumask_var_t affinity; | ||
| 135 | }; | 145 | }; |
| 136 | 146 | ||
| 137 | /** | 147 | /** |
| @@ -139,38 +149,26 @@ struct irq_common_data { | |||
| 139 | * @mask: precomputed bitmask for accessing the chip registers | 149 | * @mask: precomputed bitmask for accessing the chip registers |
| 140 | * @irq: interrupt number | 150 | * @irq: interrupt number |
| 141 | * @hwirq: hardware interrupt number, local to the interrupt domain | 151 | * @hwirq: hardware interrupt number, local to the interrupt domain |
| 142 | * @node: node index useful for balancing | ||
| 143 | * @common: point to data shared by all irqchips | 152 | * @common: point to data shared by all irqchips |
| 144 | * @chip: low level interrupt hardware access | 153 | * @chip: low level interrupt hardware access |
| 145 | * @domain: Interrupt translation domain; responsible for mapping | 154 | * @domain: Interrupt translation domain; responsible for mapping |
| 146 | * between hwirq number and linux irq number. | 155 | * between hwirq number and linux irq number. |
| 147 | * @parent_data: pointer to parent struct irq_data to support hierarchy | 156 | * @parent_data: pointer to parent struct irq_data to support hierarchy |
| 148 | * irq_domain | 157 | * irq_domain |
| 149 | * @handler_data: per-IRQ data for the irq_chip methods | ||
| 150 | * @chip_data: platform-specific per-chip private data for the chip | 158 | * @chip_data: platform-specific per-chip private data for the chip |
| 151 | * methods, to allow shared chip implementations | 159 | * methods, to allow shared chip implementations |
| 152 | * @msi_desc: MSI descriptor | ||
| 153 | * @affinity: IRQ affinity on SMP | ||
| 154 | * | ||
| 155 | * The fields here need to overlay the ones in irq_desc until we | ||
| 156 | * cleaned up the direct references and switched everything over to | ||
| 157 | * irq_data. | ||
| 158 | */ | 160 | */ |
| 159 | struct irq_data { | 161 | struct irq_data { |
| 160 | u32 mask; | 162 | u32 mask; |
| 161 | unsigned int irq; | 163 | unsigned int irq; |
| 162 | unsigned long hwirq; | 164 | unsigned long hwirq; |
| 163 | unsigned int node; | ||
| 164 | struct irq_common_data *common; | 165 | struct irq_common_data *common; |
| 165 | struct irq_chip *chip; | 166 | struct irq_chip *chip; |
| 166 | struct irq_domain *domain; | 167 | struct irq_domain *domain; |
| 167 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | 168 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
| 168 | struct irq_data *parent_data; | 169 | struct irq_data *parent_data; |
| 169 | #endif | 170 | #endif |
| 170 | void *handler_data; | ||
| 171 | void *chip_data; | 171 | void *chip_data; |
| 172 | struct msi_desc *msi_desc; | ||
| 173 | cpumask_var_t affinity; | ||
| 174 | }; | 172 | }; |
| 175 | 173 | ||
| 176 | /* | 174 | /* |
| @@ -190,6 +188,7 @@ struct irq_data { | |||
| 190 | * IRQD_IRQ_MASKED - Masked state of the interrupt | 188 | * IRQD_IRQ_MASKED - Masked state of the interrupt |
| 191 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt | 189 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt |
| 192 | * IRQD_WAKEUP_ARMED - Wakeup mode armed | 190 | * IRQD_WAKEUP_ARMED - Wakeup mode armed |
| 191 | * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU | ||
| 193 | */ | 192 | */ |
| 194 | enum { | 193 | enum { |
| 195 | IRQD_TRIGGER_MASK = 0xf, | 194 | IRQD_TRIGGER_MASK = 0xf, |
| @@ -204,6 +203,7 @@ enum { | |||
| 204 | IRQD_IRQ_MASKED = (1 << 17), | 203 | IRQD_IRQ_MASKED = (1 << 17), |
| 205 | IRQD_IRQ_INPROGRESS = (1 << 18), | 204 | IRQD_IRQ_INPROGRESS = (1 << 18), |
| 206 | IRQD_WAKEUP_ARMED = (1 << 19), | 205 | IRQD_WAKEUP_ARMED = (1 << 19), |
| 206 | IRQD_FORWARDED_TO_VCPU = (1 << 20), | ||
| 207 | }; | 207 | }; |
| 208 | 208 | ||
| 209 | #define __irqd_to_state(d) ((d)->common->state_use_accessors) | 209 | #define __irqd_to_state(d) ((d)->common->state_use_accessors) |
| @@ -282,6 +282,20 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d) | |||
| 282 | return __irqd_to_state(d) & IRQD_WAKEUP_ARMED; | 282 | return __irqd_to_state(d) & IRQD_WAKEUP_ARMED; |
| 283 | } | 283 | } |
| 284 | 284 | ||
| 285 | static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d) | ||
| 286 | { | ||
| 287 | return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU; | ||
| 288 | } | ||
| 289 | |||
| 290 | static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d) | ||
| 291 | { | ||
| 292 | __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU; | ||
| 293 | } | ||
| 294 | |||
| 295 | static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) | ||
| 296 | { | ||
| 297 | __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; | ||
| 298 | } | ||
| 285 | 299 | ||
| 286 | /* | 300 | /* |
| 287 | * Functions for chained handlers which can be enabled/disabled by the | 301 | * Functions for chained handlers which can be enabled/disabled by the |
| @@ -461,14 +475,14 @@ static inline int irq_set_parent(int irq, int parent_irq) | |||
| 461 | * Built-in IRQ handlers for various IRQ types, | 475 | * Built-in IRQ handlers for various IRQ types, |
| 462 | * callable via desc->handle_irq() | 476 | * callable via desc->handle_irq() |
| 463 | */ | 477 | */ |
| 464 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); | 478 | extern void handle_level_irq(struct irq_desc *desc); |
| 465 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); | 479 | extern void handle_fasteoi_irq(struct irq_desc *desc); |
| 466 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); | 480 | extern void handle_edge_irq(struct irq_desc *desc); |
| 467 | extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); | 481 | extern void handle_edge_eoi_irq(struct irq_desc *desc); |
| 468 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); | 482 | extern void handle_simple_irq(struct irq_desc *desc); |
| 469 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | 483 | extern void handle_percpu_irq(struct irq_desc *desc); |
| 470 | extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); | 484 | extern void handle_percpu_devid_irq(struct irq_desc *desc); |
| 471 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 485 | extern void handle_bad_irq(struct irq_desc *desc); |
| 472 | extern void handle_nested_irq(unsigned int irq); | 486 | extern void handle_nested_irq(unsigned int irq); |
| 473 | 487 | ||
| 474 | extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); | 488 | extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); |
| @@ -627,23 +641,23 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d) | |||
| 627 | static inline void *irq_get_handler_data(unsigned int irq) | 641 | static inline void *irq_get_handler_data(unsigned int irq) |
| 628 | { | 642 | { |
| 629 | struct irq_data *d = irq_get_irq_data(irq); | 643 | struct irq_data *d = irq_get_irq_data(irq); |
| 630 | return d ? d->handler_data : NULL; | 644 | return d ? d->common->handler_data : NULL; |
| 631 | } | 645 | } |
| 632 | 646 | ||
| 633 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) | 647 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) |
| 634 | { | 648 | { |
| 635 | return d->handler_data; | 649 | return d->common->handler_data; |
| 636 | } | 650 | } |
| 637 | 651 | ||
| 638 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) | 652 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) |
| 639 | { | 653 | { |
| 640 | struct irq_data *d = irq_get_irq_data(irq); | 654 | struct irq_data *d = irq_get_irq_data(irq); |
| 641 | return d ? d->msi_desc : NULL; | 655 | return d ? d->common->msi_desc : NULL; |
| 642 | } | 656 | } |
| 643 | 657 | ||
| 644 | static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) | 658 | static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) |
| 645 | { | 659 | { |
| 646 | return d->msi_desc; | 660 | return d->common->msi_desc; |
| 647 | } | 661 | } |
| 648 | 662 | ||
| 649 | static inline u32 irq_get_trigger_type(unsigned int irq) | 663 | static inline u32 irq_get_trigger_type(unsigned int irq) |
| @@ -652,21 +666,30 @@ static inline u32 irq_get_trigger_type(unsigned int irq) | |||
| 652 | return d ? irqd_get_trigger_type(d) : 0; | 666 | return d ? irqd_get_trigger_type(d) : 0; |
| 653 | } | 667 | } |
| 654 | 668 | ||
| 655 | static inline int irq_data_get_node(struct irq_data *d) | 669 | static inline int irq_common_data_get_node(struct irq_common_data *d) |
| 656 | { | 670 | { |
| 671 | #ifdef CONFIG_NUMA | ||
| 657 | return d->node; | 672 | return d->node; |
| 673 | #else | ||
| 674 | return 0; | ||
| 675 | #endif | ||
| 676 | } | ||
| 677 | |||
| 678 | static inline int irq_data_get_node(struct irq_data *d) | ||
| 679 | { | ||
| 680 | return irq_common_data_get_node(d->common); | ||
| 658 | } | 681 | } |
| 659 | 682 | ||
| 660 | static inline struct cpumask *irq_get_affinity_mask(int irq) | 683 | static inline struct cpumask *irq_get_affinity_mask(int irq) |
| 661 | { | 684 | { |
| 662 | struct irq_data *d = irq_get_irq_data(irq); | 685 | struct irq_data *d = irq_get_irq_data(irq); |
| 663 | 686 | ||
| 664 | return d ? d->affinity : NULL; | 687 | return d ? d->common->affinity : NULL; |
| 665 | } | 688 | } |
| 666 | 689 | ||
| 667 | static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) | 690 | static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) |
| 668 | { | 691 | { |
| 669 | return d->affinity; | 692 | return d->common->affinity; |
| 670 | } | 693 | } |
| 671 | 694 | ||
| 672 | unsigned int arch_dynirq_lower_bound(unsigned int from); | 695 | unsigned int arch_dynirq_lower_bound(unsigned int from); |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 5acfa26602e1..a587a33363c7 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
| @@ -98,11 +98,7 @@ extern struct irq_desc irq_desc[NR_IRQS]; | |||
| 98 | 98 | ||
| 99 | static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) | 99 | static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) |
| 100 | { | 100 | { |
| 101 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | 101 | return container_of(data->common, struct irq_desc, irq_common_data); |
| 102 | return irq_to_desc(data->irq); | ||
| 103 | #else | ||
| 104 | return container_of(data, struct irq_desc, irq_data); | ||
| 105 | #endif | ||
| 106 | } | 102 | } |
| 107 | 103 | ||
| 108 | static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) | 104 | static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) |
| @@ -127,23 +123,21 @@ static inline void *irq_desc_get_chip_data(struct irq_desc *desc) | |||
| 127 | 123 | ||
| 128 | static inline void *irq_desc_get_handler_data(struct irq_desc *desc) | 124 | static inline void *irq_desc_get_handler_data(struct irq_desc *desc) |
| 129 | { | 125 | { |
| 130 | return desc->irq_data.handler_data; | 126 | return desc->irq_common_data.handler_data; |
| 131 | } | 127 | } |
| 132 | 128 | ||
| 133 | static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) | 129 | static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) |
| 134 | { | 130 | { |
| 135 | return desc->irq_data.msi_desc; | 131 | return desc->irq_common_data.msi_desc; |
| 136 | } | 132 | } |
| 137 | 133 | ||
| 138 | /* | 134 | /* |
| 139 | * Architectures call this to let the generic IRQ layer | 135 | * Architectures call this to let the generic IRQ layer |
| 140 | * handle an interrupt. If the descriptor is attached to an | 136 | * handle an interrupt. |
| 141 | * irqchip-style controller then we call the ->handle_irq() handler, | ||
| 142 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | ||
| 143 | */ | 137 | */ |
| 144 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | 138 | static inline void generic_handle_irq_desc(struct irq_desc *desc) |
| 145 | { | 139 | { |
| 146 | desc->handle_irq(irq, desc); | 140 | desc->handle_irq(desc); |
| 147 | } | 141 | } |
| 148 | 142 | ||
| 149 | int generic_handle_irq(unsigned int irq); | 143 | int generic_handle_irq(unsigned int irq); |
| @@ -176,29 +170,6 @@ static inline int irq_has_action(unsigned int irq) | |||
| 176 | return irq_desc_has_action(irq_to_desc(irq)); | 170 | return irq_desc_has_action(irq_to_desc(irq)); |
| 177 | } | 171 | } |
| 178 | 172 | ||
| 179 | /* caller has locked the irq_desc and both params are valid */ | ||
| 180 | static inline void __irq_set_handler_locked(unsigned int irq, | ||
| 181 | irq_flow_handler_t handler) | ||
| 182 | { | ||
| 183 | struct irq_desc *desc; | ||
| 184 | |||
| 185 | desc = irq_to_desc(irq); | ||
| 186 | desc->handle_irq = handler; | ||
| 187 | } | ||
| 188 | |||
| 189 | /* caller has locked the irq_desc and both params are valid */ | ||
| 190 | static inline void | ||
| 191 | __irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip, | ||
| 192 | irq_flow_handler_t handler, const char *name) | ||
| 193 | { | ||
| 194 | struct irq_desc *desc; | ||
| 195 | |||
| 196 | desc = irq_to_desc(irq); | ||
| 197 | irq_desc_get_irq_data(desc)->chip = chip; | ||
| 198 | desc->handle_irq = handler; | ||
| 199 | desc->name = name; | ||
| 200 | } | ||
| 201 | |||
| 202 | /** | 173 | /** |
| 203 | * irq_set_handler_locked - Set irq handler from a locked region | 174 | * irq_set_handler_locked - Set irq handler from a locked region |
| 204 | * @data: Pointer to the irq_data structure which identifies the irq | 175 | * @data: Pointer to the irq_data structure which identifies the irq |
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index d3ca79236fb0..f644fdb06dd6 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
| @@ -161,6 +161,11 @@ enum { | |||
| 161 | IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), | 161 | IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), |
| 162 | }; | 162 | }; |
| 163 | 163 | ||
| 164 | static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) | ||
| 165 | { | ||
| 166 | return d->of_node; | ||
| 167 | } | ||
| 168 | |||
| 164 | #ifdef CONFIG_IRQ_DOMAIN | 169 | #ifdef CONFIG_IRQ_DOMAIN |
| 165 | struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, | 170 | struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, |
| 166 | irq_hw_number_t hwirq_max, int direct_max, | 171 | irq_hw_number_t hwirq_max, int direct_max, |
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h index 62d543004197..661bed0ed1f3 100644 --- a/include/linux/irqhandler.h +++ b/include/linux/irqhandler.h | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | 8 | ||
| 9 | struct irq_desc; | 9 | struct irq_desc; |
| 10 | struct irq_data; | 10 | struct irq_data; |
| 11 | typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc); | 11 | typedef void (*irq_flow_handler_t)(struct irq_desc *desc); |
| 12 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); | 12 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); |
| 13 | 13 | ||
| 14 | #endif | 14 | #endif |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 7f653e8f6690..f1094238ab2a 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
| @@ -21,8 +21,8 @@ | |||
| 21 | * | 21 | * |
| 22 | * DEFINE_STATIC_KEY_TRUE(key); | 22 | * DEFINE_STATIC_KEY_TRUE(key); |
| 23 | * DEFINE_STATIC_KEY_FALSE(key); | 23 | * DEFINE_STATIC_KEY_FALSE(key); |
| 24 | * static_key_likely() | 24 | * static_branch_likely() |
| 25 | * statick_key_unlikely() | 25 | * static_branch_unlikely() |
| 26 | * | 26 | * |
| 27 | * Jump labels provide an interface to generate dynamic branches using | 27 | * Jump labels provide an interface to generate dynamic branches using |
| 28 | * self-modifying code. Assuming toolchain and architecture support, if we | 28 | * self-modifying code. Assuming toolchain and architecture support, if we |
| @@ -45,12 +45,10 @@ | |||
| 45 | * statement, setting the key to true requires us to patch in a jump | 45 | * statement, setting the key to true requires us to patch in a jump |
| 46 | * to the out-of-line of true branch. | 46 | * to the out-of-line of true branch. |
| 47 | * | 47 | * |
| 48 | * In addtion to static_branch_{enable,disable}, we can also reference count | 48 | * In addition to static_branch_{enable,disable}, we can also reference count |
| 49 | * the key or branch direction via static_branch_{inc,dec}. Thus, | 49 | * the key or branch direction via static_branch_{inc,dec}. Thus, |
| 50 | * static_branch_inc() can be thought of as a 'make more true' and | 50 | * static_branch_inc() can be thought of as a 'make more true' and |
| 51 | * static_branch_dec() as a 'make more false'. The inc()/dec() | 51 | * static_branch_dec() as a 'make more false'. |
| 52 | * interface is meant to be used exclusively from the inc()/dec() for a given | ||
| 53 | * key. | ||
| 54 | * | 52 | * |
| 55 | * Since this relies on modifying code, the branch modifying functions | 53 | * Since this relies on modifying code, the branch modifying functions |
| 56 | * must be considered absolute slow paths (machine wide synchronization etc.). | 54 | * must be considered absolute slow paths (machine wide synchronization etc.). |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ad800e62cb7a..3e3318ddfc0e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -242,7 +242,6 @@ struct mem_cgroup { | |||
| 242 | * percpu counter. | 242 | * percpu counter. |
| 243 | */ | 243 | */ |
| 244 | struct mem_cgroup_stat_cpu __percpu *stat; | 244 | struct mem_cgroup_stat_cpu __percpu *stat; |
| 245 | spinlock_t pcp_counter_lock; | ||
| 246 | 245 | ||
| 247 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) | 246 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) |
| 248 | struct cg_proto tcp_mem; | 247 | struct cg_proto tcp_mem; |
| @@ -677,8 +676,9 @@ enum { | |||
| 677 | 676 | ||
| 678 | struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); | 677 | struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); |
| 679 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); | 678 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); |
| 680 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, | 679 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, |
| 681 | unsigned long *pdirty, unsigned long *pwriteback); | 680 | unsigned long *pheadroom, unsigned long *pdirty, |
| 681 | unsigned long *pwriteback); | ||
| 682 | 682 | ||
| 683 | #else /* CONFIG_CGROUP_WRITEBACK */ | 683 | #else /* CONFIG_CGROUP_WRITEBACK */ |
| 684 | 684 | ||
| @@ -688,7 +688,8 @@ static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) | |||
| 688 | } | 688 | } |
| 689 | 689 | ||
| 690 | static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, | 690 | static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, |
| 691 | unsigned long *pavail, | 691 | unsigned long *pfilepages, |
| 692 | unsigned long *pheadroom, | ||
| 692 | unsigned long *pdirty, | 693 | unsigned long *pdirty, |
| 693 | unsigned long *pwriteback) | 694 | unsigned long *pwriteback) |
| 694 | { | 695 | { |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8eb3b19af2a4..250b1ff8b48d 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
| @@ -402,17 +402,6 @@ struct mlx5_cmd_teardown_hca_mbox_out { | |||
| 402 | u8 rsvd[8]; | 402 | u8 rsvd[8]; |
| 403 | }; | 403 | }; |
| 404 | 404 | ||
| 405 | struct mlx5_cmd_query_special_contexts_mbox_in { | ||
| 406 | struct mlx5_inbox_hdr hdr; | ||
| 407 | u8 rsvd[8]; | ||
| 408 | }; | ||
| 409 | |||
| 410 | struct mlx5_cmd_query_special_contexts_mbox_out { | ||
| 411 | struct mlx5_outbox_hdr hdr; | ||
| 412 | __be32 dump_fill_mkey; | ||
| 413 | __be32 resd_lkey; | ||
| 414 | }; | ||
| 415 | |||
| 416 | struct mlx5_cmd_layout { | 405 | struct mlx5_cmd_layout { |
| 417 | u8 type; | 406 | u8 type; |
| 418 | u8 rsvd0[3]; | 407 | u8 rsvd0[3]; |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 27b53f9a24ad..8b6d6f2154a4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -845,7 +845,6 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); | |||
| 845 | int mlx5_register_interface(struct mlx5_interface *intf); | 845 | int mlx5_register_interface(struct mlx5_interface *intf); |
| 846 | void mlx5_unregister_interface(struct mlx5_interface *intf); | 846 | void mlx5_unregister_interface(struct mlx5_interface *intf); |
| 847 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); | 847 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); |
| 848 | int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey); | ||
| 849 | 848 | ||
| 850 | struct mlx5_profile { | 849 | struct mlx5_profile { |
| 851 | u64 mask; | 850 | u64 mask; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 91c08f6f0dc9..80001de019ba 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -905,6 +905,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone, | |||
| 905 | #endif | 905 | #endif |
| 906 | } | 906 | } |
| 907 | 907 | ||
| 908 | #ifdef CONFIG_MEMCG | ||
| 909 | static inline struct mem_cgroup *page_memcg(struct page *page) | ||
| 910 | { | ||
| 911 | return page->mem_cgroup; | ||
| 912 | } | ||
| 913 | |||
| 914 | static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) | ||
| 915 | { | ||
| 916 | page->mem_cgroup = memcg; | ||
| 917 | } | ||
| 918 | #else | ||
| 919 | static inline struct mem_cgroup *page_memcg(struct page *page) | ||
| 920 | { | ||
| 921 | return NULL; | ||
| 922 | } | ||
| 923 | |||
| 924 | static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) | ||
| 925 | { | ||
| 926 | } | ||
| 927 | #endif | ||
| 928 | |||
| 908 | /* | 929 | /* |
| 909 | * Some inline functions in vmstat.h depend on page_zone() | 930 | * Some inline functions in vmstat.h depend on page_zone() |
| 910 | */ | 931 | */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 88a00694eda5..210d11a75e4f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -507,6 +507,7 @@ static inline void napi_enable(struct napi_struct *n) | |||
| 507 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | 507 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); |
| 508 | smp_mb__before_atomic(); | 508 | smp_mb__before_atomic(); |
| 509 | clear_bit(NAPI_STATE_SCHED, &n->state); | 509 | clear_bit(NAPI_STATE_SCHED, &n->state); |
| 510 | clear_bit(NAPI_STATE_NPSVC, &n->state); | ||
| 510 | } | 511 | } |
| 511 | 512 | ||
| 512 | #ifdef CONFIG_SMP | 513 | #ifdef CONFIG_SMP |
| @@ -1053,6 +1054,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | |||
| 1053 | * This function is used to pass protocol port error state information | 1054 | * This function is used to pass protocol port error state information |
| 1054 | * to the switch driver. The switch driver can react to the proto_down | 1055 | * to the switch driver. The switch driver can react to the proto_down |
| 1055 | * by doing a phys down on the associated switch port. | 1056 | * by doing a phys down on the associated switch port. |
| 1057 | * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); | ||
| 1058 | * This function is used to get egress tunnel information for given skb. | ||
| 1059 | * This is useful for retrieving outer tunnel header parameters while | ||
| 1060 | * sampling packet. | ||
| 1056 | * | 1061 | * |
| 1057 | */ | 1062 | */ |
| 1058 | struct net_device_ops { | 1063 | struct net_device_ops { |
| @@ -1226,6 +1231,8 @@ struct net_device_ops { | |||
| 1226 | int (*ndo_get_iflink)(const struct net_device *dev); | 1231 | int (*ndo_get_iflink)(const struct net_device *dev); |
| 1227 | int (*ndo_change_proto_down)(struct net_device *dev, | 1232 | int (*ndo_change_proto_down)(struct net_device *dev, |
| 1228 | bool proto_down); | 1233 | bool proto_down); |
| 1234 | int (*ndo_fill_metadata_dst)(struct net_device *dev, | ||
| 1235 | struct sk_buff *skb); | ||
| 1229 | }; | 1236 | }; |
| 1230 | 1237 | ||
| 1231 | /** | 1238 | /** |
| @@ -2202,6 +2209,7 @@ void dev_add_offload(struct packet_offload *po); | |||
| 2202 | void dev_remove_offload(struct packet_offload *po); | 2209 | void dev_remove_offload(struct packet_offload *po); |
| 2203 | 2210 | ||
| 2204 | int dev_get_iflink(const struct net_device *dev); | 2211 | int dev_get_iflink(const struct net_device *dev); |
| 2212 | int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); | ||
| 2205 | struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, | 2213 | struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, |
| 2206 | unsigned short mask); | 2214 | unsigned short mask); |
| 2207 | struct net_device *dev_get_by_name(struct net *net, const char *name); | 2215 | struct net_device *dev_get_by_name(struct net *net, const char *name); |
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h index e5a70132a240..88fa8af2b937 100644 --- a/include/linux/omap-dma.h +++ b/include/linux/omap-dma.h | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
| 19 | 19 | ||
| 20 | #define INT_DMA_LCD 25 | 20 | #define INT_DMA_LCD (NR_IRQS_LEGACY + 25) |
| 21 | 21 | ||
| 22 | #define OMAP1_DMA_TOUT_IRQ (1 << 0) | 22 | #define OMAP1_DMA_TOUT_IRQ (1 << 0) |
| 23 | #define OMAP_DMA_DROP_IRQ (1 << 1) | 23 | #define OMAP_DMA_DROP_IRQ (1 << 1) |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 962387a192f1..4a4e3a092337 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/spinlock.h> | 19 | #include <linux/spinlock.h> |
| 20 | #include <linux/ethtool.h> | 20 | #include <linux/ethtool.h> |
| 21 | #include <linux/mii.h> | 21 | #include <linux/mii.h> |
| 22 | #include <linux/module.h> | ||
| 22 | #include <linux/timer.h> | 23 | #include <linux/timer.h> |
| 23 | #include <linux/workqueue.h> | 24 | #include <linux/workqueue.h> |
| 24 | #include <linux/mod_devicetable.h> | 25 | #include <linux/mod_devicetable.h> |
| @@ -153,6 +154,7 @@ struct sk_buff; | |||
| 153 | * PHYs should register using this structure | 154 | * PHYs should register using this structure |
| 154 | */ | 155 | */ |
| 155 | struct mii_bus { | 156 | struct mii_bus { |
| 157 | struct module *owner; | ||
| 156 | const char *name; | 158 | const char *name; |
| 157 | char id[MII_BUS_ID_SIZE]; | 159 | char id[MII_BUS_ID_SIZE]; |
| 158 | void *priv; | 160 | void *priv; |
| @@ -198,7 +200,8 @@ static inline struct mii_bus *mdiobus_alloc(void) | |||
| 198 | return mdiobus_alloc_size(0); | 200 | return mdiobus_alloc_size(0); |
| 199 | } | 201 | } |
| 200 | 202 | ||
| 201 | int mdiobus_register(struct mii_bus *bus); | 203 | int __mdiobus_register(struct mii_bus *bus, struct module *owner); |
| 204 | #define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE) | ||
| 202 | void mdiobus_unregister(struct mii_bus *bus); | 205 | void mdiobus_unregister(struct mii_bus *bus); |
| 203 | void mdiobus_free(struct mii_bus *bus); | 206 | void mdiobus_free(struct mii_bus *bus); |
| 204 | struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); | 207 | struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); |
| @@ -742,6 +745,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, | |||
| 742 | struct phy_c45_device_ids *c45_ids); | 745 | struct phy_c45_device_ids *c45_ids); |
| 743 | struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); | 746 | struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); |
| 744 | int phy_device_register(struct phy_device *phy); | 747 | int phy_device_register(struct phy_device *phy); |
| 748 | void phy_device_remove(struct phy_device *phydev); | ||
| 745 | int phy_init_hw(struct phy_device *phydev); | 749 | int phy_init_hw(struct phy_device *phydev); |
| 746 | int phy_suspend(struct phy_device *phydev); | 750 | int phy_suspend(struct phy_device *phydev); |
| 747 | int phy_resume(struct phy_device *phydev); | 751 | int phy_resume(struct phy_device *phydev); |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index ff476515f716..581abf848566 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -230,12 +230,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, | |||
| 230 | struct rcu_synchronize *rs_array); | 230 | struct rcu_synchronize *rs_array); |
| 231 | 231 | ||
| 232 | #define _wait_rcu_gp(checktiny, ...) \ | 232 | #define _wait_rcu_gp(checktiny, ...) \ |
| 233 | do { \ | 233 | do { \ |
| 234 | call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ | 234 | call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ |
| 235 | const int __n = ARRAY_SIZE(__crcu_array); \ | 235 | struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ |
| 236 | struct rcu_synchronize __rs_array[__n]; \ | 236 | __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ |
| 237 | \ | 237 | __crcu_array, __rs_array); \ |
| 238 | __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \ | ||
| 239 | } while (0) | 238 | } while (0) |
| 240 | 239 | ||
| 241 | #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) | 240 | #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) |
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 45932228cbf5..9c2903e58adb 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h | |||
| @@ -245,6 +245,7 @@ enum regulator_type { | |||
| 245 | * @linear_min_sel: Minimal selector for starting linear mapping | 245 | * @linear_min_sel: Minimal selector for starting linear mapping |
| 246 | * @fixed_uV: Fixed voltage of rails. | 246 | * @fixed_uV: Fixed voltage of rails. |
| 247 | * @ramp_delay: Time to settle down after voltage change (unit: uV/us) | 247 | * @ramp_delay: Time to settle down after voltage change (unit: uV/us) |
| 248 | * @min_dropout_uV: The minimum dropout voltage this regulator can handle | ||
| 248 | * @linear_ranges: A constant table of possible voltage ranges. | 249 | * @linear_ranges: A constant table of possible voltage ranges. |
| 249 | * @n_linear_ranges: Number of entries in the @linear_ranges table. | 250 | * @n_linear_ranges: Number of entries in the @linear_ranges table. |
| 250 | * @volt_table: Voltage mapping table (if table based mapping) | 251 | * @volt_table: Voltage mapping table (if table based mapping) |
| @@ -292,6 +293,7 @@ struct regulator_desc { | |||
| 292 | unsigned int linear_min_sel; | 293 | unsigned int linear_min_sel; |
| 293 | int fixed_uV; | 294 | int fixed_uV; |
| 294 | unsigned int ramp_delay; | 295 | unsigned int ramp_delay; |
| 296 | int min_dropout_uV; | ||
| 295 | 297 | ||
| 296 | const struct regulator_linear_range *linear_ranges; | 298 | const struct regulator_linear_range *linear_ranges; |
| 297 | int n_linear_ranges; | 299 | int n_linear_ranges; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index a4ab9daa387c..b7b9501b41af 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -762,6 +762,18 @@ struct signal_struct { | |||
| 762 | unsigned audit_tty_log_passwd; | 762 | unsigned audit_tty_log_passwd; |
| 763 | struct tty_audit_buf *tty_audit_buf; | 763 | struct tty_audit_buf *tty_audit_buf; |
| 764 | #endif | 764 | #endif |
| 765 | #ifdef CONFIG_CGROUPS | ||
| 766 | /* | ||
| 767 | * group_rwsem prevents new tasks from entering the threadgroup and | ||
| 768 | * member tasks from exiting,a more specifically, setting of | ||
| 769 | * PF_EXITING. fork and exit paths are protected with this rwsem | ||
| 770 | * using threadgroup_change_begin/end(). Users which require | ||
| 771 | * threadgroup to remain stable should use threadgroup_[un]lock() | ||
| 772 | * which also takes care of exec path. Currently, cgroup is the | ||
| 773 | * only user. | ||
| 774 | */ | ||
| 775 | struct rw_semaphore group_rwsem; | ||
| 776 | #endif | ||
| 765 | 777 | ||
| 766 | oom_flags_t oom_flags; | 778 | oom_flags_t oom_flags; |
| 767 | short oom_score_adj; /* OOM kill score adjustment */ | 779 | short oom_score_adj; /* OOM kill score adjustment */ |
diff --git a/include/linux/security.h b/include/linux/security.h index 79d85ddf8093..2f4c1f7aa7db 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
| @@ -946,7 +946,7 @@ static inline int security_task_prctl(int option, unsigned long arg2, | |||
| 946 | unsigned long arg4, | 946 | unsigned long arg4, |
| 947 | unsigned long arg5) | 947 | unsigned long arg5) |
| 948 | { | 948 | { |
| 949 | return cap_task_prctl(option, arg2, arg3, arg3, arg5); | 949 | return cap_task_prctl(option, arg2, arg3, arg4, arg5); |
| 950 | } | 950 | } |
| 951 | 951 | ||
| 952 | static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) | 952 | static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2738d355cdf9..4398411236f1 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -179,6 +179,9 @@ struct nf_bridge_info { | |||
| 179 | u8 bridged_dnat:1; | 179 | u8 bridged_dnat:1; |
| 180 | __u16 frag_max_size; | 180 | __u16 frag_max_size; |
| 181 | struct net_device *physindev; | 181 | struct net_device *physindev; |
| 182 | |||
| 183 | /* always valid & non-NULL from FORWARD on, for physdev match */ | ||
| 184 | struct net_device *physoutdev; | ||
| 182 | union { | 185 | union { |
| 183 | /* prerouting: detect dnat in orig/reply direction */ | 186 | /* prerouting: detect dnat in orig/reply direction */ |
| 184 | __be32 ipv4_daddr; | 187 | __be32 ipv4_daddr; |
| @@ -189,9 +192,6 @@ struct nf_bridge_info { | |||
| 189 | * skb is out in neigh layer. | 192 | * skb is out in neigh layer. |
| 190 | */ | 193 | */ |
| 191 | char neigh_header[8]; | 194 | char neigh_header[8]; |
| 192 | |||
| 193 | /* always valid & non-NULL from FORWARD on, for physdev match */ | ||
| 194 | struct net_device *physoutdev; | ||
| 195 | }; | 195 | }; |
| 196 | }; | 196 | }; |
| 197 | #endif | 197 | #endif |
| @@ -2707,6 +2707,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, | |||
| 2707 | { | 2707 | { |
| 2708 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 2708 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
| 2709 | skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); | 2709 | skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); |
| 2710 | else if (skb->ip_summed == CHECKSUM_PARTIAL && | ||
| 2711 | skb_checksum_start_offset(skb) < 0) | ||
| 2712 | skb->ip_summed = CHECKSUM_NONE; | ||
| 2710 | } | 2713 | } |
| 2711 | 2714 | ||
| 2712 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); | 2715 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 269e8afd3e2a..6b00f18f5e6b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
| @@ -34,7 +34,7 @@ extern struct bus_type spi_bus_type; | |||
| 34 | 34 | ||
| 35 | /** | 35 | /** |
| 36 | * struct spi_statistics - statistics for spi transfers | 36 | * struct spi_statistics - statistics for spi transfers |
| 37 | * @clock: lock protecting this structure | 37 | * @lock: lock protecting this structure |
| 38 | * | 38 | * |
| 39 | * @messages: number of spi-messages handled | 39 | * @messages: number of spi-messages handled |
| 40 | * @transfers: number of spi_transfers handled | 40 | * @transfers: number of spi_transfers handled |
diff --git a/include/linux/string.h b/include/linux/string.h index a8d90db9c4b0..9ef7795e65e4 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
| @@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t); | |||
| 25 | #ifndef __HAVE_ARCH_STRLCPY | 25 | #ifndef __HAVE_ARCH_STRLCPY |
| 26 | size_t strlcpy(char *, const char *, size_t); | 26 | size_t strlcpy(char *, const char *, size_t); |
| 27 | #endif | 27 | #endif |
| 28 | #ifndef __HAVE_ARCH_STRSCPY | ||
| 29 | ssize_t __must_check strscpy(char *, const char *, size_t); | ||
| 30 | #endif | ||
| 28 | #ifndef __HAVE_ARCH_STRCAT | 31 | #ifndef __HAVE_ARCH_STRCAT |
| 29 | extern char * strcat(char *, const char *); | 32 | extern char * strcat(char *, const char *); |
| 30 | #endif | 33 | #endif |
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 7591788e9fbf..357e44c1a46b 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h | |||
| @@ -42,6 +42,7 @@ struct sock_xprt { | |||
| 42 | /* | 42 | /* |
| 43 | * Connection of transports | 43 | * Connection of transports |
| 44 | */ | 44 | */ |
| 45 | unsigned long sock_state; | ||
| 45 | struct delayed_work connect_worker; | 46 | struct delayed_work connect_worker; |
| 46 | struct sockaddr_storage srcaddr; | 47 | struct sockaddr_storage srcaddr; |
| 47 | unsigned short srcport; | 48 | unsigned short srcport; |
| @@ -76,6 +77,8 @@ struct sock_xprt { | |||
| 76 | */ | 77 | */ |
| 77 | #define TCP_RPC_REPLY (1UL << 6) | 78 | #define TCP_RPC_REPLY (1UL << 6) |
| 78 | 79 | ||
| 80 | #define XPRT_SOCK_CONNECTING 1U | ||
| 81 | |||
| 79 | #endif /* __KERNEL__ */ | 82 | #endif /* __KERNEL__ */ |
| 80 | 83 | ||
| 81 | #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ | 84 | #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 17292fee8686..157d366e761b 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
| @@ -360,7 +360,7 @@ static inline struct thermal_zone_device * | |||
| 360 | thermal_zone_of_sensor_register(struct device *dev, int id, void *data, | 360 | thermal_zone_of_sensor_register(struct device *dev, int id, void *data, |
| 361 | const struct thermal_zone_of_device_ops *ops) | 361 | const struct thermal_zone_of_device_ops *ops) |
| 362 | { | 362 | { |
| 363 | return NULL; | 363 | return ERR_PTR(-ENODEV); |
| 364 | } | 364 | } |
| 365 | 365 | ||
| 366 | static inline | 366 | static inline |
| @@ -380,6 +380,8 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) | |||
| 380 | 380 | ||
| 381 | int power_actor_get_max_power(struct thermal_cooling_device *, | 381 | int power_actor_get_max_power(struct thermal_cooling_device *, |
| 382 | struct thermal_zone_device *tz, u32 *max_power); | 382 | struct thermal_zone_device *tz, u32 *max_power); |
| 383 | int power_actor_get_min_power(struct thermal_cooling_device *, | ||
| 384 | struct thermal_zone_device *tz, u32 *min_power); | ||
| 383 | int power_actor_set_power(struct thermal_cooling_device *, | 385 | int power_actor_set_power(struct thermal_cooling_device *, |
| 384 | struct thermal_instance *, u32); | 386 | struct thermal_instance *, u32); |
| 385 | struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, | 387 | struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, |
| @@ -415,6 +417,10 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) | |||
| 415 | static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, | 417 | static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, |
| 416 | struct thermal_zone_device *tz, u32 *max_power) | 418 | struct thermal_zone_device *tz, u32 *max_power) |
| 417 | { return 0; } | 419 | { return 0; } |
| 420 | static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev, | ||
| 421 | struct thermal_zone_device *tz, | ||
| 422 | u32 *min_power) | ||
| 423 | { return -ENODEV; } | ||
| 418 | static inline int power_actor_set_power(struct thermal_cooling_device *cdev, | 424 | static inline int power_actor_set_power(struct thermal_cooling_device *cdev, |
| 419 | struct thermal_instance *tz, u32 power) | 425 | struct thermal_instance *tz, u32 power) |
| 420 | { return 0; } | 426 | { return 0; } |
diff --git a/include/linux/tick.h b/include/linux/tick.h index 48d901f83f92..e312219ff823 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
| @@ -147,11 +147,20 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) | |||
| 147 | cpumask_or(mask, mask, tick_nohz_full_mask); | 147 | cpumask_or(mask, mask, tick_nohz_full_mask); |
| 148 | } | 148 | } |
| 149 | 149 | ||
| 150 | static inline int housekeeping_any_cpu(void) | ||
| 151 | { | ||
| 152 | return cpumask_any_and(housekeeping_mask, cpu_online_mask); | ||
| 153 | } | ||
| 154 | |||
| 150 | extern void tick_nohz_full_kick(void); | 155 | extern void tick_nohz_full_kick(void); |
| 151 | extern void tick_nohz_full_kick_cpu(int cpu); | 156 | extern void tick_nohz_full_kick_cpu(int cpu); |
| 152 | extern void tick_nohz_full_kick_all(void); | 157 | extern void tick_nohz_full_kick_all(void); |
| 153 | extern void __tick_nohz_task_switch(void); | 158 | extern void __tick_nohz_task_switch(void); |
| 154 | #else | 159 | #else |
| 160 | static inline int housekeeping_any_cpu(void) | ||
| 161 | { | ||
| 162 | return smp_processor_id(); | ||
| 163 | } | ||
| 155 | static inline bool tick_nohz_full_enabled(void) { return false; } | 164 | static inline bool tick_nohz_full_enabled(void) { return false; } |
| 156 | static inline bool tick_nohz_full_cpu(int cpu) { return false; } | 165 | static inline bool tick_nohz_full_cpu(int cpu) { return false; } |
| 157 | static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } | 166 | static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } |
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 3dd5a781da99..bfb74723f151 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h | |||
| @@ -157,7 +157,7 @@ struct renesas_usbhs_driver_param { | |||
| 157 | */ | 157 | */ |
| 158 | int pio_dma_border; /* default is 64byte */ | 158 | int pio_dma_border; /* default is 64byte */ |
| 159 | 159 | ||
| 160 | u32 type; | 160 | uintptr_t type; |
| 161 | u32 enable_gpio; | 161 | u32 enable_gpio; |
| 162 | 162 | ||
| 163 | /* | 163 | /* |
diff --git a/include/linux/wait.h b/include/linux/wait.h index d3d077228d4c..1e1bf9f963a9 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -147,8 +147,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) | |||
| 147 | 147 | ||
| 148 | typedef int wait_bit_action_f(struct wait_bit_key *); | 148 | typedef int wait_bit_action_f(struct wait_bit_key *); |
| 149 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | 149 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
| 150 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr, | 150 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); |
| 151 | void *key); | ||
| 152 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | 151 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
| 153 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); | 152 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); |
| 154 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); | 153 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); |
| @@ -180,7 +179,7 @@ wait_queue_head_t *bit_waitqueue(void *, int); | |||
| 180 | #define wake_up_poll(x, m) \ | 179 | #define wake_up_poll(x, m) \ |
| 181 | __wake_up(x, TASK_NORMAL, 1, (void *) (m)) | 180 | __wake_up(x, TASK_NORMAL, 1, (void *) (m)) |
| 182 | #define wake_up_locked_poll(x, m) \ | 181 | #define wake_up_locked_poll(x, m) \ |
| 183 | __wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m)) | 182 | __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) |
| 184 | #define wake_up_interruptible_poll(x, m) \ | 183 | #define wake_up_interruptible_poll(x, m) \ |
| 185 | __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) | 184 | __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) |
| 186 | #define wake_up_interruptible_sync_poll(x, m) \ | 185 | #define wake_up_interruptible_sync_poll(x, m) \ |
