diff options
| author | Tejun Heo <tj@kernel.org> | 2015-11-03 17:29:03 -0500 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2015-11-03 17:29:03 -0500 |
| commit | 159b5bb46492e4dcef2070b12861030bc360402b (patch) | |
| tree | 93de7d6e94a059aade50ee5437de6a50ccd1cf7b /include/linux | |
| parent | 56e74338a535cbcc2f2da08b1ea1a92920194364 (diff) | |
| parent | 469eabb3aec03d9defed3462df743a223a5c8f54 (diff) | |
Merge branch 'for-4.3-fixes' into for-4.4
Diffstat (limited to 'include/linux')
81 files changed, 1908 insertions, 475 deletions
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index 6a0a89ed7f81..0ddb5c02ad8b 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h | |||
| @@ -33,14 +33,12 @@ | |||
| 33 | #define UART01x_DR 0x00 /* Data read or written from the interface. */ | 33 | #define UART01x_DR 0x00 /* Data read or written from the interface. */ |
| 34 | #define UART01x_RSR 0x04 /* Receive status register (Read). */ | 34 | #define UART01x_RSR 0x04 /* Receive status register (Read). */ |
| 35 | #define UART01x_ECR 0x04 /* Error clear register (Write). */ | 35 | #define UART01x_ECR 0x04 /* Error clear register (Write). */ |
| 36 | #define ZX_UART01x_DR 0x04 /* Data read or written from the interface. */ | ||
| 37 | #define UART010_LCRH 0x08 /* Line control register, high byte. */ | 36 | #define UART010_LCRH 0x08 /* Line control register, high byte. */ |
| 38 | #define ST_UART011_DMAWM 0x08 /* DMA watermark configure register. */ | 37 | #define ST_UART011_DMAWM 0x08 /* DMA watermark configure register. */ |
| 39 | #define UART010_LCRM 0x0C /* Line control register, middle byte. */ | 38 | #define UART010_LCRM 0x0C /* Line control register, middle byte. */ |
| 40 | #define ST_UART011_TIMEOUT 0x0C /* Timeout period register. */ | 39 | #define ST_UART011_TIMEOUT 0x0C /* Timeout period register. */ |
| 41 | #define UART010_LCRL 0x10 /* Line control register, low byte. */ | 40 | #define UART010_LCRL 0x10 /* Line control register, low byte. */ |
| 42 | #define UART010_CR 0x14 /* Control register. */ | 41 | #define UART010_CR 0x14 /* Control register. */ |
| 43 | #define ZX_UART01x_FR 0x14 /* Flag register (Read only). */ | ||
| 44 | #define UART01x_FR 0x18 /* Flag register (Read only). */ | 42 | #define UART01x_FR 0x18 /* Flag register (Read only). */ |
| 45 | #define UART010_IIR 0x1C /* Interrupt identification register (Read). */ | 43 | #define UART010_IIR 0x1C /* Interrupt identification register (Read). */ |
| 46 | #define UART010_ICR 0x1C /* Interrupt clear register (Write). */ | 44 | #define UART010_ICR 0x1C /* Interrupt clear register (Write). */ |
| @@ -51,21 +49,13 @@ | |||
| 51 | #define UART011_LCRH 0x2c /* Line control register. */ | 49 | #define UART011_LCRH 0x2c /* Line control register. */ |
| 52 | #define ST_UART011_LCRH_TX 0x2c /* Tx Line control register. */ | 50 | #define ST_UART011_LCRH_TX 0x2c /* Tx Line control register. */ |
| 53 | #define UART011_CR 0x30 /* Control register. */ | 51 | #define UART011_CR 0x30 /* Control register. */ |
| 54 | #define ZX_UART011_LCRH_TX 0x30 /* Tx Line control register. */ | ||
| 55 | #define UART011_IFLS 0x34 /* Interrupt fifo level select. */ | 52 | #define UART011_IFLS 0x34 /* Interrupt fifo level select. */ |
| 56 | #define ZX_UART011_CR 0x34 /* Control register. */ | ||
| 57 | #define ZX_UART011_IFLS 0x38 /* Interrupt fifo level select. */ | ||
| 58 | #define UART011_IMSC 0x38 /* Interrupt mask. */ | 53 | #define UART011_IMSC 0x38 /* Interrupt mask. */ |
| 59 | #define UART011_RIS 0x3c /* Raw interrupt status. */ | 54 | #define UART011_RIS 0x3c /* Raw interrupt status. */ |
| 60 | #define UART011_MIS 0x40 /* Masked interrupt status. */ | 55 | #define UART011_MIS 0x40 /* Masked interrupt status. */ |
| 61 | #define ZX_UART011_IMSC 0x40 /* Interrupt mask. */ | ||
| 62 | #define UART011_ICR 0x44 /* Interrupt clear register. */ | 56 | #define UART011_ICR 0x44 /* Interrupt clear register. */ |
| 63 | #define ZX_UART011_RIS 0x44 /* Raw interrupt status. */ | ||
| 64 | #define UART011_DMACR 0x48 /* DMA control register. */ | 57 | #define UART011_DMACR 0x48 /* DMA control register. */ |
| 65 | #define ZX_UART011_MIS 0x48 /* Masked interrupt status. */ | ||
| 66 | #define ZX_UART011_ICR 0x4c /* Interrupt clear register. */ | ||
| 67 | #define ST_UART011_XFCR 0x50 /* XON/XOFF control register. */ | 58 | #define ST_UART011_XFCR 0x50 /* XON/XOFF control register. */ |
| 68 | #define ZX_UART011_DMACR 0x50 /* DMA control register. */ | ||
| 69 | #define ST_UART011_XON1 0x54 /* XON1 register. */ | 59 | #define ST_UART011_XON1 0x54 /* XON1 register. */ |
| 70 | #define ST_UART011_XON2 0x58 /* XON2 register. */ | 60 | #define ST_UART011_XON2 0x58 /* XON2 register. */ |
| 71 | #define ST_UART011_XOFF1 0x5C /* XON1 register. */ | 61 | #define ST_UART011_XOFF1 0x5C /* XON1 register. */ |
| @@ -85,19 +75,15 @@ | |||
| 85 | #define UART01x_RSR_PE 0x02 | 75 | #define UART01x_RSR_PE 0x02 |
| 86 | #define UART01x_RSR_FE 0x01 | 76 | #define UART01x_RSR_FE 0x01 |
| 87 | 77 | ||
| 88 | #define ZX_UART01x_FR_BUSY 0x300 | ||
| 89 | #define UART011_FR_RI 0x100 | 78 | #define UART011_FR_RI 0x100 |
| 90 | #define UART011_FR_TXFE 0x080 | 79 | #define UART011_FR_TXFE 0x080 |
| 91 | #define UART011_FR_RXFF 0x040 | 80 | #define UART011_FR_RXFF 0x040 |
| 92 | #define UART01x_FR_TXFF 0x020 | 81 | #define UART01x_FR_TXFF 0x020 |
| 93 | #define UART01x_FR_RXFE 0x010 | 82 | #define UART01x_FR_RXFE 0x010 |
| 94 | #define UART01x_FR_BUSY 0x008 | 83 | #define UART01x_FR_BUSY 0x008 |
| 95 | #define ZX_UART01x_FR_DSR 0x008 | ||
| 96 | #define UART01x_FR_DCD 0x004 | 84 | #define UART01x_FR_DCD 0x004 |
| 97 | #define UART01x_FR_DSR 0x002 | 85 | #define UART01x_FR_DSR 0x002 |
| 98 | #define ZX_UART01x_FR_CTS 0x002 | ||
| 99 | #define UART01x_FR_CTS 0x001 | 86 | #define UART01x_FR_CTS 0x001 |
| 100 | #define ZX_UART011_FR_RI 0x001 | ||
| 101 | #define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY) | 87 | #define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY) |
| 102 | 88 | ||
| 103 | #define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */ | 89 | #define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */ |
diff --git a/include/linux/asn1_ber_bytecode.h b/include/linux/asn1_ber_bytecode.h index 945d44ae529c..ab3a6c002f7b 100644 --- a/include/linux/asn1_ber_bytecode.h +++ b/include/linux/asn1_ber_bytecode.h | |||
| @@ -45,23 +45,27 @@ enum asn1_opcode { | |||
| 45 | ASN1_OP_MATCH_JUMP = 0x04, | 45 | ASN1_OP_MATCH_JUMP = 0x04, |
| 46 | ASN1_OP_MATCH_JUMP_OR_SKIP = 0x05, | 46 | ASN1_OP_MATCH_JUMP_OR_SKIP = 0x05, |
| 47 | ASN1_OP_MATCH_ANY = 0x08, | 47 | ASN1_OP_MATCH_ANY = 0x08, |
| 48 | ASN1_OP_MATCH_ANY_OR_SKIP = 0x09, | ||
| 48 | ASN1_OP_MATCH_ANY_ACT = 0x0a, | 49 | ASN1_OP_MATCH_ANY_ACT = 0x0a, |
| 50 | ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 0x0b, | ||
| 49 | /* Everything before here matches unconditionally */ | 51 | /* Everything before here matches unconditionally */ |
| 50 | 52 | ||
| 51 | ASN1_OP_COND_MATCH_OR_SKIP = 0x11, | 53 | ASN1_OP_COND_MATCH_OR_SKIP = 0x11, |
| 52 | ASN1_OP_COND_MATCH_ACT_OR_SKIP = 0x13, | 54 | ASN1_OP_COND_MATCH_ACT_OR_SKIP = 0x13, |
| 53 | ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 0x15, | 55 | ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 0x15, |
| 54 | ASN1_OP_COND_MATCH_ANY = 0x18, | 56 | ASN1_OP_COND_MATCH_ANY = 0x18, |
| 57 | ASN1_OP_COND_MATCH_ANY_OR_SKIP = 0x19, | ||
| 55 | ASN1_OP_COND_MATCH_ANY_ACT = 0x1a, | 58 | ASN1_OP_COND_MATCH_ANY_ACT = 0x1a, |
| 59 | ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 0x1b, | ||
| 56 | 60 | ||
| 57 | /* Everything before here will want a tag from the data */ | 61 | /* Everything before here will want a tag from the data */ |
| 58 | #define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT | 62 | #define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP |
| 59 | 63 | ||
| 60 | /* These are here to help fill up space */ | 64 | /* These are here to help fill up space */ |
| 61 | ASN1_OP_COND_FAIL = 0x1b, | 65 | ASN1_OP_COND_FAIL = 0x1c, |
| 62 | ASN1_OP_COMPLETE = 0x1c, | 66 | ASN1_OP_COMPLETE = 0x1d, |
| 63 | ASN1_OP_ACT = 0x1d, | 67 | ASN1_OP_ACT = 0x1e, |
| 64 | ASN1_OP_RETURN = 0x1e, | 68 | ASN1_OP_MAYBE_ACT = 0x1f, |
| 65 | 69 | ||
| 66 | /* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */ | 70 | /* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */ |
| 67 | ASN1_OP_END_SEQ = 0x20, | 71 | ASN1_OP_END_SEQ = 0x20, |
| @@ -76,6 +80,8 @@ enum asn1_opcode { | |||
| 76 | #define ASN1_OP_END__OF 0x02 | 80 | #define ASN1_OP_END__OF 0x02 |
| 77 | #define ASN1_OP_END__ACT 0x04 | 81 | #define ASN1_OP_END__ACT 0x04 |
| 78 | 82 | ||
| 83 | ASN1_OP_RETURN = 0x28, | ||
| 84 | |||
| 79 | ASN1_OP__NR | 85 | ASN1_OP__NR |
| 80 | }; | 86 | }; |
| 81 | 87 | ||
diff --git a/include/linux/audit.h b/include/linux/audit.h index c2e7e3a83965..b2abc996c25d 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
| @@ -27,6 +27,9 @@ | |||
| 27 | #include <linux/ptrace.h> | 27 | #include <linux/ptrace.h> |
| 28 | #include <uapi/linux/audit.h> | 28 | #include <uapi/linux/audit.h> |
| 29 | 29 | ||
| 30 | #define AUDIT_INO_UNSET ((unsigned long)-1) | ||
| 31 | #define AUDIT_DEV_UNSET ((dev_t)-1) | ||
| 32 | |||
| 30 | struct audit_sig_info { | 33 | struct audit_sig_info { |
| 31 | uid_t uid; | 34 | uid_t uid; |
| 32 | pid_t pid; | 35 | pid_t pid; |
| @@ -59,6 +62,7 @@ struct audit_krule { | |||
| 59 | struct audit_field *inode_f; /* quick access to an inode field */ | 62 | struct audit_field *inode_f; /* quick access to an inode field */ |
| 60 | struct audit_watch *watch; /* associated watch */ | 63 | struct audit_watch *watch; /* associated watch */ |
| 61 | struct audit_tree *tree; /* associated watched tree */ | 64 | struct audit_tree *tree; /* associated watched tree */ |
| 65 | struct audit_fsnotify_mark *exe; | ||
| 62 | struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ | 66 | struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ |
| 63 | struct list_head list; /* for AUDIT_LIST* purposes only */ | 67 | struct list_head list; /* for AUDIT_LIST* purposes only */ |
| 64 | u64 prio; | 68 | u64 prio; |
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 0fe9df983ab7..5a5d79ee256f 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
| @@ -286,7 +286,7 @@ static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi | |||
| 286 | * %current's blkcg equals the effective blkcg of its memcg. No | 286 | * %current's blkcg equals the effective blkcg of its memcg. No |
| 287 | * need to use the relatively expensive cgroup_get_e_css(). | 287 | * need to use the relatively expensive cgroup_get_e_css(). |
| 288 | */ | 288 | */ |
| 289 | if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id))) | 289 | if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) |
| 290 | return wb; | 290 | return wb; |
| 291 | return NULL; | 291 | return NULL; |
| 292 | } | 292 | } |
| @@ -402,7 +402,7 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) | |||
| 402 | } | 402 | } |
| 403 | 403 | ||
| 404 | struct wb_iter { | 404 | struct wb_iter { |
| 405 | int start_blkcg_id; | 405 | int start_memcg_id; |
| 406 | struct radix_tree_iter tree_iter; | 406 | struct radix_tree_iter tree_iter; |
| 407 | void **slot; | 407 | void **slot; |
| 408 | }; | 408 | }; |
| @@ -414,9 +414,9 @@ static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter, | |||
| 414 | 414 | ||
| 415 | WARN_ON_ONCE(!rcu_read_lock_held()); | 415 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 416 | 416 | ||
| 417 | if (iter->start_blkcg_id >= 0) { | 417 | if (iter->start_memcg_id >= 0) { |
| 418 | iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id); | 418 | iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id); |
| 419 | iter->start_blkcg_id = -1; | 419 | iter->start_memcg_id = -1; |
| 420 | } else { | 420 | } else { |
| 421 | iter->slot = radix_tree_next_slot(iter->slot, titer, 0); | 421 | iter->slot = radix_tree_next_slot(iter->slot, titer, 0); |
| 422 | } | 422 | } |
| @@ -430,30 +430,30 @@ static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter, | |||
| 430 | 430 | ||
| 431 | static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter, | 431 | static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter, |
| 432 | struct backing_dev_info *bdi, | 432 | struct backing_dev_info *bdi, |
| 433 | int start_blkcg_id) | 433 | int start_memcg_id) |
| 434 | { | 434 | { |
| 435 | iter->start_blkcg_id = start_blkcg_id; | 435 | iter->start_memcg_id = start_memcg_id; |
| 436 | 436 | ||
| 437 | if (start_blkcg_id) | 437 | if (start_memcg_id) |
| 438 | return __wb_iter_next(iter, bdi); | 438 | return __wb_iter_next(iter, bdi); |
| 439 | else | 439 | else |
| 440 | return &bdi->wb; | 440 | return &bdi->wb; |
| 441 | } | 441 | } |
| 442 | 442 | ||
| 443 | /** | 443 | /** |
| 444 | * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order | 444 | * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order |
| 445 | * @wb_cur: cursor struct bdi_writeback pointer | 445 | * @wb_cur: cursor struct bdi_writeback pointer |
| 446 | * @bdi: bdi to walk wb's of | 446 | * @bdi: bdi to walk wb's of |
| 447 | * @iter: pointer to struct wb_iter to be used as iteration buffer | 447 | * @iter: pointer to struct wb_iter to be used as iteration buffer |
| 448 | * @start_blkcg_id: blkcg ID to start iteration from | 448 | * @start_memcg_id: memcg ID to start iteration from |
| 449 | * | 449 | * |
| 450 | * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending | 450 | * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending |
| 451 | * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter | 451 | * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter |
| 452 | * to be used as temp storage during iteration. rcu_read_lock() must be | 452 | * to be used as temp storage during iteration. rcu_read_lock() must be |
| 453 | * held throughout iteration. | 453 | * held throughout iteration. |
| 454 | */ | 454 | */ |
| 455 | #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \ | 455 | #define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \ |
| 456 | for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \ | 456 | for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \ |
| 457 | (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi)) | 457 | (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi)) |
| 458 | 458 | ||
| 459 | #else /* CONFIG_CGROUP_WRITEBACK */ | 459 | #else /* CONFIG_CGROUP_WRITEBACK */ |
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index a4cd1641e9e2..0a5cc7a1109b 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h | |||
| @@ -14,12 +14,15 @@ | |||
| 14 | */ | 14 | */ |
| 15 | 15 | ||
| 16 | #include <linux/cgroup.h> | 16 | #include <linux/cgroup.h> |
| 17 | #include <linux/u64_stats_sync.h> | 17 | #include <linux/percpu_counter.h> |
| 18 | #include <linux/seq_file.h> | 18 | #include <linux/seq_file.h> |
| 19 | #include <linux/radix-tree.h> | 19 | #include <linux/radix-tree.h> |
| 20 | #include <linux/blkdev.h> | 20 | #include <linux/blkdev.h> |
| 21 | #include <linux/atomic.h> | 21 | #include <linux/atomic.h> |
| 22 | 22 | ||
| 23 | /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ | ||
| 24 | #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) | ||
| 25 | |||
| 23 | /* Max limits for throttle policy */ | 26 | /* Max limits for throttle policy */ |
| 24 | #define THROTL_IOPS_MAX UINT_MAX | 27 | #define THROTL_IOPS_MAX UINT_MAX |
| 25 | 28 | ||
| @@ -45,7 +48,7 @@ struct blkcg { | |||
| 45 | struct blkcg_gq *blkg_hint; | 48 | struct blkcg_gq *blkg_hint; |
| 46 | struct hlist_head blkg_list; | 49 | struct hlist_head blkg_list; |
| 47 | 50 | ||
| 48 | struct blkcg_policy_data *pd[BLKCG_MAX_POLS]; | 51 | struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; |
| 49 | 52 | ||
| 50 | struct list_head all_blkcgs_node; | 53 | struct list_head all_blkcgs_node; |
| 51 | #ifdef CONFIG_CGROUP_WRITEBACK | 54 | #ifdef CONFIG_CGROUP_WRITEBACK |
| @@ -53,14 +56,19 @@ struct blkcg { | |||
| 53 | #endif | 56 | #endif |
| 54 | }; | 57 | }; |
| 55 | 58 | ||
| 59 | /* | ||
| 60 | * blkg_[rw]stat->aux_cnt is excluded for local stats but included for | ||
| 61 | * recursive. Used to carry stats of dead children, and, for blkg_rwstat, | ||
| 62 | * to carry result values from read and sum operations. | ||
| 63 | */ | ||
| 56 | struct blkg_stat { | 64 | struct blkg_stat { |
| 57 | struct u64_stats_sync syncp; | 65 | struct percpu_counter cpu_cnt; |
| 58 | uint64_t cnt; | 66 | atomic64_t aux_cnt; |
| 59 | }; | 67 | }; |
| 60 | 68 | ||
| 61 | struct blkg_rwstat { | 69 | struct blkg_rwstat { |
| 62 | struct u64_stats_sync syncp; | 70 | struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR]; |
| 63 | uint64_t cnt[BLKG_RWSTAT_NR]; | 71 | atomic64_t aux_cnt[BLKG_RWSTAT_NR]; |
| 64 | }; | 72 | }; |
| 65 | 73 | ||
| 66 | /* | 74 | /* |
| @@ -68,32 +76,28 @@ struct blkg_rwstat { | |||
| 68 | * request_queue (q). This is used by blkcg policies which need to track | 76 | * request_queue (q). This is used by blkcg policies which need to track |
| 69 | * information per blkcg - q pair. | 77 | * information per blkcg - q pair. |
| 70 | * | 78 | * |
| 71 | * There can be multiple active blkcg policies and each has its private | 79 | * There can be multiple active blkcg policies and each blkg:policy pair is |
| 72 | * data on each blkg, the size of which is determined by | 80 | * represented by a blkg_policy_data which is allocated and freed by each |
| 73 | * blkcg_policy->pd_size. blkcg core allocates and frees such areas | 81 | * policy's pd_alloc/free_fn() methods. A policy can allocate private data |
| 74 | * together with blkg and invokes pd_init/exit_fn() methods. | 82 | * area by allocating larger data structure which embeds blkg_policy_data |
| 75 | * | 83 | * at the beginning. |
| 76 | * Such private data must embed struct blkg_policy_data (pd) at the | ||
| 77 | * beginning and pd_size can't be smaller than pd. | ||
| 78 | */ | 84 | */ |
| 79 | struct blkg_policy_data { | 85 | struct blkg_policy_data { |
| 80 | /* the blkg and policy id this per-policy data belongs to */ | 86 | /* the blkg and policy id this per-policy data belongs to */ |
| 81 | struct blkcg_gq *blkg; | 87 | struct blkcg_gq *blkg; |
| 82 | int plid; | 88 | int plid; |
| 83 | |||
| 84 | /* used during policy activation */ | ||
| 85 | struct list_head alloc_node; | ||
| 86 | }; | 89 | }; |
| 87 | 90 | ||
| 88 | /* | 91 | /* |
| 89 | * Policies that need to keep per-blkcg data which is independent | 92 | * Policies that need to keep per-blkcg data which is independent from any |
| 90 | * from any request_queue associated to it must specify its size | 93 | * request_queue associated to it should implement cpd_alloc/free_fn() |
| 91 | * with the cpd_size field of the blkcg_policy structure and | 94 | * methods. A policy can allocate private data area by allocating larger |
| 92 | * embed a blkcg_policy_data in it. cpd_init() is invoked to let | 95 | * data structure which embeds blkcg_policy_data at the beginning. |
| 93 | * each policy handle per-blkcg data. | 96 | * cpd_init() is invoked to let each policy handle per-blkcg data. |
| 94 | */ | 97 | */ |
| 95 | struct blkcg_policy_data { | 98 | struct blkcg_policy_data { |
| 96 | /* the policy id this per-policy data belongs to */ | 99 | /* the blkcg and policy id this per-policy data belongs to */ |
| 100 | struct blkcg *blkcg; | ||
| 97 | int plid; | 101 | int plid; |
| 98 | }; | 102 | }; |
| 99 | 103 | ||
| @@ -123,40 +127,50 @@ struct blkcg_gq { | |||
| 123 | /* is this blkg online? protected by both blkcg and q locks */ | 127 | /* is this blkg online? protected by both blkcg and q locks */ |
| 124 | bool online; | 128 | bool online; |
| 125 | 129 | ||
| 130 | struct blkg_rwstat stat_bytes; | ||
| 131 | struct blkg_rwstat stat_ios; | ||
| 132 | |||
| 126 | struct blkg_policy_data *pd[BLKCG_MAX_POLS]; | 133 | struct blkg_policy_data *pd[BLKCG_MAX_POLS]; |
| 127 | 134 | ||
| 128 | struct rcu_head rcu_head; | 135 | struct rcu_head rcu_head; |
| 129 | }; | 136 | }; |
| 130 | 137 | ||
| 131 | typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg); | 138 | typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); |
| 132 | typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg); | 139 | typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); |
| 133 | typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg); | 140 | typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); |
| 134 | typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg); | 141 | typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); |
| 135 | typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg); | 142 | typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node); |
| 136 | typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg); | 143 | typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); |
| 144 | typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); | ||
| 145 | typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); | ||
| 146 | typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); | ||
| 147 | typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); | ||
| 137 | 148 | ||
| 138 | struct blkcg_policy { | 149 | struct blkcg_policy { |
| 139 | int plid; | 150 | int plid; |
| 140 | /* policy specific private data size */ | ||
| 141 | size_t pd_size; | ||
| 142 | /* policy specific per-blkcg data size */ | ||
| 143 | size_t cpd_size; | ||
| 144 | /* cgroup files for the policy */ | 151 | /* cgroup files for the policy */ |
| 145 | struct cftype *cftypes; | 152 | struct cftype *dfl_cftypes; |
| 153 | struct cftype *legacy_cftypes; | ||
| 146 | 154 | ||
| 147 | /* operations */ | 155 | /* operations */ |
| 156 | blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; | ||
| 148 | blkcg_pol_init_cpd_fn *cpd_init_fn; | 157 | blkcg_pol_init_cpd_fn *cpd_init_fn; |
| 158 | blkcg_pol_free_cpd_fn *cpd_free_fn; | ||
| 159 | blkcg_pol_bind_cpd_fn *cpd_bind_fn; | ||
| 160 | |||
| 161 | blkcg_pol_alloc_pd_fn *pd_alloc_fn; | ||
| 149 | blkcg_pol_init_pd_fn *pd_init_fn; | 162 | blkcg_pol_init_pd_fn *pd_init_fn; |
| 150 | blkcg_pol_online_pd_fn *pd_online_fn; | 163 | blkcg_pol_online_pd_fn *pd_online_fn; |
| 151 | blkcg_pol_offline_pd_fn *pd_offline_fn; | 164 | blkcg_pol_offline_pd_fn *pd_offline_fn; |
| 152 | blkcg_pol_exit_pd_fn *pd_exit_fn; | 165 | blkcg_pol_free_pd_fn *pd_free_fn; |
| 153 | blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; | 166 | blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; |
| 154 | }; | 167 | }; |
| 155 | 168 | ||
| 156 | extern struct blkcg blkcg_root; | 169 | extern struct blkcg blkcg_root; |
| 157 | extern struct cgroup_subsys_state * const blkcg_root_css; | 170 | extern struct cgroup_subsys_state * const blkcg_root_css; |
| 158 | 171 | ||
| 159 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q); | 172 | struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, |
| 173 | struct request_queue *q, bool update_hint); | ||
| 160 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, | 174 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
| 161 | struct request_queue *q); | 175 | struct request_queue *q); |
| 162 | int blkcg_init_queue(struct request_queue *q); | 176 | int blkcg_init_queue(struct request_queue *q); |
| @@ -171,6 +185,7 @@ int blkcg_activate_policy(struct request_queue *q, | |||
| 171 | void blkcg_deactivate_policy(struct request_queue *q, | 185 | void blkcg_deactivate_policy(struct request_queue *q, |
| 172 | const struct blkcg_policy *pol); | 186 | const struct blkcg_policy *pol); |
| 173 | 187 | ||
| 188 | const char *blkg_dev_name(struct blkcg_gq *blkg); | ||
| 174 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, | 189 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
| 175 | u64 (*prfill)(struct seq_file *, | 190 | u64 (*prfill)(struct seq_file *, |
| 176 | struct blkg_policy_data *, int), | 191 | struct blkg_policy_data *, int), |
| @@ -182,19 +197,24 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | |||
| 182 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); | 197 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); |
| 183 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | 198 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
| 184 | int off); | 199 | int off); |
| 200 | int blkg_print_stat_bytes(struct seq_file *sf, void *v); | ||
| 201 | int blkg_print_stat_ios(struct seq_file *sf, void *v); | ||
| 202 | int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v); | ||
| 203 | int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v); | ||
| 185 | 204 | ||
| 186 | u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off); | 205 | u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, |
| 187 | struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, | 206 | struct blkcg_policy *pol, int off); |
| 188 | int off); | 207 | struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, |
| 208 | struct blkcg_policy *pol, int off); | ||
| 189 | 209 | ||
| 190 | struct blkg_conf_ctx { | 210 | struct blkg_conf_ctx { |
| 191 | struct gendisk *disk; | 211 | struct gendisk *disk; |
| 192 | struct blkcg_gq *blkg; | 212 | struct blkcg_gq *blkg; |
| 193 | u64 v; | 213 | char *body; |
| 194 | }; | 214 | }; |
| 195 | 215 | ||
| 196 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | 216 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
| 197 | const char *input, struct blkg_conf_ctx *ctx); | 217 | char *input, struct blkg_conf_ctx *ctx); |
| 198 | void blkg_conf_finish(struct blkg_conf_ctx *ctx); | 218 | void blkg_conf_finish(struct blkg_conf_ctx *ctx); |
| 199 | 219 | ||
| 200 | 220 | ||
| @@ -205,7 +225,7 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) | |||
| 205 | 225 | ||
| 206 | static inline struct blkcg *task_blkcg(struct task_struct *tsk) | 226 | static inline struct blkcg *task_blkcg(struct task_struct *tsk) |
| 207 | { | 227 | { |
| 208 | return css_to_blkcg(task_css(tsk, blkio_cgrp_id)); | 228 | return css_to_blkcg(task_css(tsk, io_cgrp_id)); |
| 209 | } | 229 | } |
| 210 | 230 | ||
| 211 | static inline struct blkcg *bio_blkcg(struct bio *bio) | 231 | static inline struct blkcg *bio_blkcg(struct bio *bio) |
| @@ -218,7 +238,7 @@ static inline struct blkcg *bio_blkcg(struct bio *bio) | |||
| 218 | static inline struct cgroup_subsys_state * | 238 | static inline struct cgroup_subsys_state * |
| 219 | task_get_blkcg_css(struct task_struct *task) | 239 | task_get_blkcg_css(struct task_struct *task) |
| 220 | { | 240 | { |
| 221 | return task_get_css(task, blkio_cgrp_id); | 241 | return task_get_css(task, io_cgrp_id); |
| 222 | } | 242 | } |
| 223 | 243 | ||
| 224 | /** | 244 | /** |
| @@ -233,6 +253,52 @@ static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) | |||
| 233 | } | 253 | } |
| 234 | 254 | ||
| 235 | /** | 255 | /** |
| 256 | * __blkg_lookup - internal version of blkg_lookup() | ||
| 257 | * @blkcg: blkcg of interest | ||
| 258 | * @q: request_queue of interest | ||
| 259 | * @update_hint: whether to update lookup hint with the result or not | ||
| 260 | * | ||
| 261 | * This is internal version and shouldn't be used by policy | ||
| 262 | * implementations. Looks up blkgs for the @blkcg - @q pair regardless of | ||
| 263 | * @q's bypass state. If @update_hint is %true, the caller should be | ||
| 264 | * holding @q->queue_lock and lookup hint is updated on success. | ||
| 265 | */ | ||
| 266 | static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, | ||
| 267 | struct request_queue *q, | ||
| 268 | bool update_hint) | ||
| 269 | { | ||
| 270 | struct blkcg_gq *blkg; | ||
| 271 | |||
| 272 | if (blkcg == &blkcg_root) | ||
| 273 | return q->root_blkg; | ||
| 274 | |||
| 275 | blkg = rcu_dereference(blkcg->blkg_hint); | ||
| 276 | if (blkg && blkg->q == q) | ||
| 277 | return blkg; | ||
| 278 | |||
| 279 | return blkg_lookup_slowpath(blkcg, q, update_hint); | ||
| 280 | } | ||
| 281 | |||
| 282 | /** | ||
| 283 | * blkg_lookup - lookup blkg for the specified blkcg - q pair | ||
| 284 | * @blkcg: blkcg of interest | ||
| 285 | * @q: request_queue of interest | ||
| 286 | * | ||
| 287 | * Lookup blkg for the @blkcg - @q pair. This function should be called | ||
| 288 | * under RCU read lock and is guaranteed to return %NULL if @q is bypassing | ||
| 289 | * - see blk_queue_bypass_start() for details. | ||
| 290 | */ | ||
| 291 | static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, | ||
| 292 | struct request_queue *q) | ||
| 293 | { | ||
| 294 | WARN_ON_ONCE(!rcu_read_lock_held()); | ||
| 295 | |||
| 296 | if (unlikely(blk_queue_bypass(q))) | ||
| 297 | return NULL; | ||
| 298 | return __blkg_lookup(blkcg, q, false); | ||
| 299 | } | ||
| 300 | |||
| 301 | /** | ||
| 236 | * blkg_to_pdata - get policy private data | 302 | * blkg_to_pdata - get policy private data |
| 237 | * @blkg: blkg of interest | 303 | * @blkg: blkg of interest |
| 238 | * @pol: policy of interest | 304 | * @pol: policy of interest |
| @@ -248,7 +314,7 @@ static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, | |||
| 248 | static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, | 314 | static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, |
| 249 | struct blkcg_policy *pol) | 315 | struct blkcg_policy *pol) |
| 250 | { | 316 | { |
| 251 | return blkcg ? blkcg->pd[pol->plid] : NULL; | 317 | return blkcg ? blkcg->cpd[pol->plid] : NULL; |
| 252 | } | 318 | } |
| 253 | 319 | ||
| 254 | /** | 320 | /** |
| @@ -262,6 +328,11 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) | |||
| 262 | return pd ? pd->blkg : NULL; | 328 | return pd ? pd->blkg : NULL; |
| 263 | } | 329 | } |
| 264 | 330 | ||
| 331 | static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) | ||
| 332 | { | ||
| 333 | return cpd ? cpd->blkcg : NULL; | ||
| 334 | } | ||
| 335 | |||
| 265 | /** | 336 | /** |
| 266 | * blkg_path - format cgroup path of blkg | 337 | * blkg_path - format cgroup path of blkg |
| 267 | * @blkg: blkg of interest | 338 | * @blkg: blkg of interest |
| @@ -309,9 +380,6 @@ static inline void blkg_put(struct blkcg_gq *blkg) | |||
| 309 | call_rcu(&blkg->rcu_head, __blkg_release_rcu); | 380 | call_rcu(&blkg->rcu_head, __blkg_release_rcu); |
| 310 | } | 381 | } |
| 311 | 382 | ||
| 312 | struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, | ||
| 313 | bool update_hint); | ||
| 314 | |||
| 315 | /** | 383 | /** |
| 316 | * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants | 384 | * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants |
| 317 | * @d_blkg: loop cursor pointing to the current descendant | 385 | * @d_blkg: loop cursor pointing to the current descendant |
| @@ -373,8 +441,8 @@ static inline struct request_list *blk_get_rl(struct request_queue *q, | |||
| 373 | * or if either the blkcg or queue is going away. Fall back to | 441 | * or if either the blkcg or queue is going away. Fall back to |
| 374 | * root_rl in such cases. | 442 | * root_rl in such cases. |
| 375 | */ | 443 | */ |
| 376 | blkg = blkg_lookup_create(blkcg, q); | 444 | blkg = blkg_lookup(blkcg, q); |
| 377 | if (IS_ERR(blkg)) | 445 | if (unlikely(!blkg)) |
| 378 | goto root_rl; | 446 | goto root_rl; |
| 379 | 447 | ||
| 380 | blkg_get(blkg); | 448 | blkg_get(blkg); |
| @@ -394,8 +462,7 @@ root_rl: | |||
| 394 | */ | 462 | */ |
| 395 | static inline void blk_put_rl(struct request_list *rl) | 463 | static inline void blk_put_rl(struct request_list *rl) |
| 396 | { | 464 | { |
| 397 | /* root_rl may not have blkg set */ | 465 | if (rl->blkg->blkcg != &blkcg_root) |
| 398 | if (rl->blkg && rl->blkg->blkcg != &blkcg_root) | ||
| 399 | blkg_put(rl->blkg); | 466 | blkg_put(rl->blkg); |
| 400 | } | 467 | } |
| 401 | 468 | ||
| @@ -433,9 +500,21 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl, | |||
| 433 | #define blk_queue_for_each_rl(rl, q) \ | 500 | #define blk_queue_for_each_rl(rl, q) \ |
| 434 | for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) | 501 | for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) |
| 435 | 502 | ||
| 436 | static inline void blkg_stat_init(struct blkg_stat *stat) | 503 | static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp) |
| 437 | { | 504 | { |
| 438 | u64_stats_init(&stat->syncp); | 505 | int ret; |
| 506 | |||
| 507 | ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); | ||
| 508 | if (ret) | ||
| 509 | return ret; | ||
| 510 | |||
| 511 | atomic64_set(&stat->aux_cnt, 0); | ||
| 512 | return 0; | ||
| 513 | } | ||
| 514 | |||
| 515 | static inline void blkg_stat_exit(struct blkg_stat *stat) | ||
| 516 | { | ||
| 517 | percpu_counter_destroy(&stat->cpu_cnt); | ||
| 439 | } | 518 | } |
| 440 | 519 | ||
| 441 | /** | 520 | /** |
| @@ -443,34 +522,21 @@ static inline void blkg_stat_init(struct blkg_stat *stat) | |||
| 443 | * @stat: target blkg_stat | 522 | * @stat: target blkg_stat |
| 444 | * @val: value to add | 523 | * @val: value to add |
| 445 | * | 524 | * |
| 446 | * Add @val to @stat. The caller is responsible for synchronizing calls to | 525 | * Add @val to @stat. The caller must ensure that IRQ on the same CPU |
| 447 | * this function. | 526 | * don't re-enter this function for the same counter. |
| 448 | */ | 527 | */ |
| 449 | static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) | 528 | static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) |
| 450 | { | 529 | { |
| 451 | u64_stats_update_begin(&stat->syncp); | 530 | __percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); |
| 452 | stat->cnt += val; | ||
| 453 | u64_stats_update_end(&stat->syncp); | ||
| 454 | } | 531 | } |
| 455 | 532 | ||
| 456 | /** | 533 | /** |
| 457 | * blkg_stat_read - read the current value of a blkg_stat | 534 | * blkg_stat_read - read the current value of a blkg_stat |
| 458 | * @stat: blkg_stat to read | 535 | * @stat: blkg_stat to read |
| 459 | * | ||
| 460 | * Read the current value of @stat. This function can be called without | ||
| 461 | * synchroniztion and takes care of u64 atomicity. | ||
| 462 | */ | 536 | */ |
| 463 | static inline uint64_t blkg_stat_read(struct blkg_stat *stat) | 537 | static inline uint64_t blkg_stat_read(struct blkg_stat *stat) |
| 464 | { | 538 | { |
| 465 | unsigned int start; | 539 | return percpu_counter_sum_positive(&stat->cpu_cnt); |
| 466 | uint64_t v; | ||
| 467 | |||
| 468 | do { | ||
| 469 | start = u64_stats_fetch_begin_irq(&stat->syncp); | ||
| 470 | v = stat->cnt; | ||
| 471 | } while (u64_stats_fetch_retry_irq(&stat->syncp, start)); | ||
| 472 | |||
| 473 | return v; | ||
| 474 | } | 540 | } |
| 475 | 541 | ||
| 476 | /** | 542 | /** |
| @@ -479,24 +545,46 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat) | |||
| 479 | */ | 545 | */ |
| 480 | static inline void blkg_stat_reset(struct blkg_stat *stat) | 546 | static inline void blkg_stat_reset(struct blkg_stat *stat) |
| 481 | { | 547 | { |
| 482 | stat->cnt = 0; | 548 | percpu_counter_set(&stat->cpu_cnt, 0); |
| 549 | atomic64_set(&stat->aux_cnt, 0); | ||
| 483 | } | 550 | } |
| 484 | 551 | ||
| 485 | /** | 552 | /** |
| 486 | * blkg_stat_merge - merge a blkg_stat into another | 553 | * blkg_stat_add_aux - add a blkg_stat into another's aux count |
| 487 | * @to: the destination blkg_stat | 554 | * @to: the destination blkg_stat |
| 488 | * @from: the source | 555 | * @from: the source |
| 489 | * | 556 | * |
| 490 | * Add @from's count to @to. | 557 | * Add @from's count including the aux one to @to's aux count. |
| 491 | */ | 558 | */ |
| 492 | static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from) | 559 | static inline void blkg_stat_add_aux(struct blkg_stat *to, |
| 560 | struct blkg_stat *from) | ||
| 493 | { | 561 | { |
| 494 | blkg_stat_add(to, blkg_stat_read(from)); | 562 | atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt), |
| 563 | &to->aux_cnt); | ||
| 495 | } | 564 | } |
| 496 | 565 | ||
| 497 | static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat) | 566 | static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp) |
| 498 | { | 567 | { |
| 499 | u64_stats_init(&rwstat->syncp); | 568 | int i, ret; |
| 569 | |||
| 570 | for (i = 0; i < BLKG_RWSTAT_NR; i++) { | ||
| 571 | ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp); | ||
| 572 | if (ret) { | ||
| 573 | while (--i >= 0) | ||
| 574 | percpu_counter_destroy(&rwstat->cpu_cnt[i]); | ||
| 575 | return ret; | ||
| 576 | } | ||
| 577 | atomic64_set(&rwstat->aux_cnt[i], 0); | ||
| 578 | } | ||
| 579 | return 0; | ||
| 580 | } | ||
| 581 | |||
| 582 | static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat) | ||
| 583 | { | ||
| 584 | int i; | ||
| 585 | |||
| 586 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | ||
| 587 | percpu_counter_destroy(&rwstat->cpu_cnt[i]); | ||
| 500 | } | 588 | } |
| 501 | 589 | ||
| 502 | /** | 590 | /** |
| @@ -511,39 +599,38 @@ static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat) | |||
| 511 | static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, | 599 | static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, |
| 512 | int rw, uint64_t val) | 600 | int rw, uint64_t val) |
| 513 | { | 601 | { |
| 514 | u64_stats_update_begin(&rwstat->syncp); | 602 | struct percpu_counter *cnt; |
| 515 | 603 | ||
| 516 | if (rw & REQ_WRITE) | 604 | if (rw & REQ_WRITE) |
| 517 | rwstat->cnt[BLKG_RWSTAT_WRITE] += val; | 605 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE]; |
| 518 | else | 606 | else |
| 519 | rwstat->cnt[BLKG_RWSTAT_READ] += val; | 607 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; |
| 608 | |||
| 609 | __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); | ||
| 610 | |||
| 520 | if (rw & REQ_SYNC) | 611 | if (rw & REQ_SYNC) |
| 521 | rwstat->cnt[BLKG_RWSTAT_SYNC] += val; | 612 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; |
| 522 | else | 613 | else |
| 523 | rwstat->cnt[BLKG_RWSTAT_ASYNC] += val; | 614 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; |
| 524 | 615 | ||
| 525 | u64_stats_update_end(&rwstat->syncp); | 616 | __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); |
| 526 | } | 617 | } |
| 527 | 618 | ||
| 528 | /** | 619 | /** |
| 529 | * blkg_rwstat_read - read the current values of a blkg_rwstat | 620 | * blkg_rwstat_read - read the current values of a blkg_rwstat |
| 530 | * @rwstat: blkg_rwstat to read | 621 | * @rwstat: blkg_rwstat to read |
| 531 | * | 622 | * |
| 532 | * Read the current snapshot of @rwstat and return it as the return value. | 623 | * Read the current snapshot of @rwstat and return it in the aux counts. |
| 533 | * This function can be called without synchronization and takes care of | ||
| 534 | * u64 atomicity. | ||
| 535 | */ | 624 | */ |
| 536 | static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) | 625 | static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) |
| 537 | { | 626 | { |
| 538 | unsigned int start; | 627 | struct blkg_rwstat result; |
| 539 | struct blkg_rwstat tmp; | 628 | int i; |
| 540 | |||
| 541 | do { | ||
| 542 | start = u64_stats_fetch_begin_irq(&rwstat->syncp); | ||
| 543 | tmp = *rwstat; | ||
| 544 | } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start)); | ||
| 545 | 629 | ||
| 546 | return tmp; | 630 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
| 631 | atomic64_set(&result.aux_cnt[i], | ||
| 632 | percpu_counter_sum_positive(&rwstat->cpu_cnt[i])); | ||
| 633 | return result; | ||
| 547 | } | 634 | } |
| 548 | 635 | ||
| 549 | /** | 636 | /** |
| @@ -558,7 +645,8 @@ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) | |||
| 558 | { | 645 | { |
| 559 | struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); | 646 | struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); |
| 560 | 647 | ||
| 561 | return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]; | 648 | return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + |
| 649 | atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); | ||
| 562 | } | 650 | } |
| 563 | 651 | ||
| 564 | /** | 652 | /** |
| @@ -567,26 +655,71 @@ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) | |||
| 567 | */ | 655 | */ |
| 568 | static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) | 656 | static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) |
| 569 | { | 657 | { |
| 570 | memset(rwstat->cnt, 0, sizeof(rwstat->cnt)); | 658 | int i; |
| 659 | |||
| 660 | for (i = 0; i < BLKG_RWSTAT_NR; i++) { | ||
| 661 | percpu_counter_set(&rwstat->cpu_cnt[i], 0); | ||
| 662 | atomic64_set(&rwstat->aux_cnt[i], 0); | ||
| 663 | } | ||
| 571 | } | 664 | } |
| 572 | 665 | ||
| 573 | /** | 666 | /** |
| 574 | * blkg_rwstat_merge - merge a blkg_rwstat into another | 667 | * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count |
| 575 | * @to: the destination blkg_rwstat | 668 | * @to: the destination blkg_rwstat |
| 576 | * @from: the source | 669 | * @from: the source |
| 577 | * | 670 | * |
| 578 | * Add @from's counts to @to. | 671 | * Add @from's count including the aux one to @to's aux count. |
| 579 | */ | 672 | */ |
| 580 | static inline void blkg_rwstat_merge(struct blkg_rwstat *to, | 673 | static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to, |
| 581 | struct blkg_rwstat *from) | 674 | struct blkg_rwstat *from) |
| 582 | { | 675 | { |
| 583 | struct blkg_rwstat v = blkg_rwstat_read(from); | 676 | struct blkg_rwstat v = blkg_rwstat_read(from); |
| 584 | int i; | 677 | int i; |
| 585 | 678 | ||
| 586 | u64_stats_update_begin(&to->syncp); | ||
| 587 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | 679 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
| 588 | to->cnt[i] += v.cnt[i]; | 680 | atomic64_add(atomic64_read(&v.aux_cnt[i]) + |
| 589 | u64_stats_update_end(&to->syncp); | 681 | atomic64_read(&from->aux_cnt[i]), |
| 682 | &to->aux_cnt[i]); | ||
| 683 | } | ||
| 684 | |||
| 685 | #ifdef CONFIG_BLK_DEV_THROTTLING | ||
| 686 | extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, | ||
| 687 | struct bio *bio); | ||
| 688 | #else | ||
| 689 | static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, | ||
| 690 | struct bio *bio) { return false; } | ||
| 691 | #endif | ||
| 692 | |||
| 693 | static inline bool blkcg_bio_issue_check(struct request_queue *q, | ||
| 694 | struct bio *bio) | ||
| 695 | { | ||
| 696 | struct blkcg *blkcg; | ||
| 697 | struct blkcg_gq *blkg; | ||
| 698 | bool throtl = false; | ||
| 699 | |||
| 700 | rcu_read_lock(); | ||
| 701 | blkcg = bio_blkcg(bio); | ||
| 702 | |||
| 703 | blkg = blkg_lookup(blkcg, q); | ||
| 704 | if (unlikely(!blkg)) { | ||
| 705 | spin_lock_irq(q->queue_lock); | ||
| 706 | blkg = blkg_lookup_create(blkcg, q); | ||
| 707 | if (IS_ERR(blkg)) | ||
| 708 | blkg = NULL; | ||
| 709 | spin_unlock_irq(q->queue_lock); | ||
| 710 | } | ||
| 711 | |||
| 712 | throtl = blk_throtl_bio(q, blkg, bio); | ||
| 713 | |||
| 714 | if (!throtl) { | ||
| 715 | blkg = blkg ?: q->root_blkg; | ||
| 716 | blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags, | ||
| 717 | bio->bi_iter.bi_size); | ||
| 718 | blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1); | ||
| 719 | } | ||
| 720 | |||
| 721 | rcu_read_unlock(); | ||
| 722 | return !throtl; | ||
| 590 | } | 723 | } |
| 591 | 724 | ||
| 592 | #else /* CONFIG_BLK_CGROUP */ | 725 | #else /* CONFIG_BLK_CGROUP */ |
| @@ -642,6 +775,9 @@ static inline void blk_put_rl(struct request_list *rl) { } | |||
| 642 | static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } | 775 | static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } |
| 643 | static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } | 776 | static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } |
| 644 | 777 | ||
| 778 | static inline bool blkcg_bio_issue_check(struct request_queue *q, | ||
| 779 | struct bio *bio) { return true; } | ||
| 780 | |||
| 645 | #define blk_queue_for_each_rl(rl, q) \ | 781 | #define blk_queue_for_each_rl(rl, q) \ |
| 646 | for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) | 782 | for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) |
| 647 | 783 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index a622f270f09e..38a5ff772a37 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -584,7 +584,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
| 584 | 584 | ||
| 585 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) | 585 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) |
| 586 | 586 | ||
| 587 | #define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0) | 587 | #define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1)) |
| 588 | 588 | ||
| 589 | /* | 589 | /* |
| 590 | * Driver can handle struct request, if it either has an old style | 590 | * Driver can handle struct request, if it either has an old style |
| @@ -1569,8 +1569,8 @@ struct block_device_operations { | |||
| 1569 | int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); | 1569 | int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); |
| 1570 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 1570 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
| 1571 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 1571 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
| 1572 | long (*direct_access)(struct block_device *, sector_t, | 1572 | long (*direct_access)(struct block_device *, sector_t, void __pmem **, |
| 1573 | void **, unsigned long *pfn, long size); | 1573 | unsigned long *pfn); |
| 1574 | unsigned int (*check_events) (struct gendisk *disk, | 1574 | unsigned int (*check_events) (struct gendisk *disk, |
| 1575 | unsigned int clearing); | 1575 | unsigned int clearing); |
| 1576 | /* ->media_changed() is DEPRECATED, use ->check_events() instead */ | 1576 | /* ->media_changed() is DEPRECATED, use ->check_events() instead */ |
| @@ -1588,8 +1588,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, | |||
| 1588 | extern int bdev_read_page(struct block_device *, sector_t, struct page *); | 1588 | extern int bdev_read_page(struct block_device *, sector_t, struct page *); |
| 1589 | extern int bdev_write_page(struct block_device *, sector_t, struct page *, | 1589 | extern int bdev_write_page(struct block_device *, sector_t, struct page *, |
| 1590 | struct writeback_control *); | 1590 | struct writeback_control *); |
| 1591 | extern long bdev_direct_access(struct block_device *, sector_t, void **addr, | 1591 | extern long bdev_direct_access(struct block_device *, sector_t, |
| 1592 | unsigned long *pfn, long size); | 1592 | void __pmem **addr, unsigned long *pfn, long size); |
| 1593 | #else /* CONFIG_BLOCK */ | 1593 | #else /* CONFIG_BLOCK */ |
| 1594 | 1594 | ||
| 1595 | struct block_device; | 1595 | struct block_device; |
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 9ebee53d3bf5..397c5cd09794 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h | |||
| @@ -46,6 +46,7 @@ struct ceph_options { | |||
| 46 | unsigned long mount_timeout; /* jiffies */ | 46 | unsigned long mount_timeout; /* jiffies */ |
| 47 | unsigned long osd_idle_ttl; /* jiffies */ | 47 | unsigned long osd_idle_ttl; /* jiffies */ |
| 48 | unsigned long osd_keepalive_timeout; /* jiffies */ | 48 | unsigned long osd_keepalive_timeout; /* jiffies */ |
| 49 | unsigned long monc_ping_timeout; /* jiffies */ | ||
| 49 | 50 | ||
| 50 | /* | 51 | /* |
| 51 | * any type that can't be simply compared or doesn't need need | 52 | * any type that can't be simply compared or doesn't need need |
| @@ -66,6 +67,7 @@ struct ceph_options { | |||
| 66 | #define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) | 67 | #define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) |
| 67 | #define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) | 68 | #define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) |
| 68 | #define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) | 69 | #define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) |
| 70 | #define CEPH_MONC_PING_TIMEOUT_DEFAULT msecs_to_jiffies(30 * 1000) | ||
| 69 | 71 | ||
| 70 | #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) | 72 | #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) |
| 71 | #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) | 73 | #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) |
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 37753278987a..7e1252e97a30 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h | |||
| @@ -248,6 +248,8 @@ struct ceph_connection { | |||
| 248 | int in_base_pos; /* bytes read */ | 248 | int in_base_pos; /* bytes read */ |
| 249 | __le64 in_temp_ack; /* for reading an ack */ | 249 | __le64 in_temp_ack; /* for reading an ack */ |
| 250 | 250 | ||
| 251 | struct timespec last_keepalive_ack; | ||
| 252 | |||
| 251 | struct delayed_work work; /* send|recv work */ | 253 | struct delayed_work work; /* send|recv work */ |
| 252 | unsigned long delay; /* current delay interval */ | 254 | unsigned long delay; /* current delay interval */ |
| 253 | }; | 255 | }; |
| @@ -285,6 +287,8 @@ extern void ceph_msg_revoke(struct ceph_msg *msg); | |||
| 285 | extern void ceph_msg_revoke_incoming(struct ceph_msg *msg); | 287 | extern void ceph_msg_revoke_incoming(struct ceph_msg *msg); |
| 286 | 288 | ||
| 287 | extern void ceph_con_keepalive(struct ceph_connection *con); | 289 | extern void ceph_con_keepalive(struct ceph_connection *con); |
| 290 | extern bool ceph_con_keepalive_expired(struct ceph_connection *con, | ||
| 291 | unsigned long interval); | ||
| 288 | 292 | ||
| 289 | extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, | 293 | extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, |
| 290 | size_t length, size_t alignment); | 294 | size_t length, size_t alignment); |
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h index 1c1887206ffa..0fe2656ac415 100644 --- a/include/linux/ceph/msgr.h +++ b/include/linux/ceph/msgr.h | |||
| @@ -84,10 +84,12 @@ struct ceph_entity_inst { | |||
| 84 | #define CEPH_MSGR_TAG_MSG 7 /* message */ | 84 | #define CEPH_MSGR_TAG_MSG 7 /* message */ |
| 85 | #define CEPH_MSGR_TAG_ACK 8 /* message ack */ | 85 | #define CEPH_MSGR_TAG_ACK 8 /* message ack */ |
| 86 | #define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */ | 86 | #define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */ |
| 87 | #define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */ | 87 | #define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */ |
| 88 | #define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */ | 88 | #define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */ |
| 89 | #define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */ | 89 | #define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */ |
| 90 | #define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */ | 90 | #define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */ |
| 91 | #define CEPH_MSGR_TAG_KEEPALIVE2 14 /* keepalive2 byte + ceph_timespec */ | ||
| 92 | #define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */ | ||
| 91 | 93 | ||
| 92 | 94 | ||
| 93 | /* | 95 | /* |
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 1f36945fd23d..1a96fdaa33d5 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h | |||
| @@ -27,7 +27,7 @@ SUBSYS(cpuacct) | |||
| 27 | #endif | 27 | #endif |
| 28 | 28 | ||
| 29 | #if IS_ENABLED(CONFIG_BLK_CGROUP) | 29 | #if IS_ENABLED(CONFIG_BLK_CGROUP) |
| 30 | SUBSYS(blkio) | 30 | SUBSYS(io) |
| 31 | #endif | 31 | #endif |
| 32 | 32 | ||
| 33 | #if IS_ENABLED(CONFIG_MEMCG) | 33 | #if IS_ENABLED(CONFIG_MEMCG) |
diff --git a/include/linux/dax.h b/include/linux/dax.h new file mode 100644 index 000000000000..b415e521528d --- /dev/null +++ b/include/linux/dax.h | |||
| @@ -0,0 +1,39 @@ | |||
| 1 | #ifndef _LINUX_DAX_H | ||
| 2 | #define _LINUX_DAX_H | ||
| 3 | |||
| 4 | #include <linux/fs.h> | ||
| 5 | #include <linux/mm.h> | ||
| 6 | #include <asm/pgtable.h> | ||
| 7 | |||
| 8 | ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t, | ||
| 9 | get_block_t, dio_iodone_t, int flags); | ||
| 10 | int dax_clear_blocks(struct inode *, sector_t block, long size); | ||
| 11 | int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); | ||
| 12 | int dax_truncate_page(struct inode *, loff_t from, get_block_t); | ||
| 13 | int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, | ||
| 14 | dax_iodone_t); | ||
| 15 | int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, | ||
| 16 | dax_iodone_t); | ||
| 17 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 18 | int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, | ||
| 19 | unsigned int flags, get_block_t, dax_iodone_t); | ||
| 20 | int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, | ||
| 21 | unsigned int flags, get_block_t, dax_iodone_t); | ||
| 22 | #else | ||
| 23 | static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, | ||
| 24 | pmd_t *pmd, unsigned int flags, get_block_t gb, | ||
| 25 | dax_iodone_t di) | ||
| 26 | { | ||
| 27 | return VM_FAULT_FALLBACK; | ||
| 28 | } | ||
| 29 | #define __dax_pmd_fault dax_pmd_fault | ||
| 30 | #endif | ||
| 31 | int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); | ||
| 32 | #define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod) | ||
| 33 | #define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod) | ||
| 34 | |||
| 35 | static inline bool vma_is_dax(struct vm_area_struct *vma) | ||
| 36 | { | ||
| 37 | return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); | ||
| 38 | } | ||
| 39 | #endif | ||
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 420311bcee38..9beb636b97eb 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h | |||
| @@ -116,6 +116,12 @@ struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, | |||
| 116 | 116 | ||
| 117 | bool debugfs_initialized(void); | 117 | bool debugfs_initialized(void); |
| 118 | 118 | ||
| 119 | ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf, | ||
| 120 | size_t count, loff_t *ppos); | ||
| 121 | |||
| 122 | ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf, | ||
| 123 | size_t count, loff_t *ppos); | ||
| 124 | |||
| 119 | #else | 125 | #else |
| 120 | 126 | ||
| 121 | #include <linux/err.h> | 127 | #include <linux/err.h> |
| @@ -282,6 +288,20 @@ static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev, | |||
| 282 | return ERR_PTR(-ENODEV); | 288 | return ERR_PTR(-ENODEV); |
| 283 | } | 289 | } |
| 284 | 290 | ||
| 291 | static inline ssize_t debugfs_read_file_bool(struct file *file, | ||
| 292 | char __user *user_buf, | ||
| 293 | size_t count, loff_t *ppos) | ||
| 294 | { | ||
| 295 | return -ENODEV; | ||
| 296 | } | ||
| 297 | |||
| 298 | static inline ssize_t debugfs_write_file_bool(struct file *file, | ||
| 299 | const char __user *user_buf, | ||
| 300 | size_t count, loff_t *ppos) | ||
| 301 | { | ||
| 302 | return -ENODEV; | ||
| 303 | } | ||
| 304 | |||
| 285 | #endif | 305 | #endif |
| 286 | 306 | ||
| 287 | #endif | 307 | #endif |
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h index e1043f79122f..53ba737505df 100644 --- a/include/linux/dmapool.h +++ b/include/linux/dmapool.h | |||
| @@ -24,6 +24,12 @@ void dma_pool_destroy(struct dma_pool *pool); | |||
| 24 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, | 24 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
| 25 | dma_addr_t *handle); | 25 | dma_addr_t *handle); |
| 26 | 26 | ||
| 27 | static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, | ||
| 28 | dma_addr_t *handle) | ||
| 29 | { | ||
| 30 | return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle); | ||
| 31 | } | ||
| 32 | |||
| 27 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); | 33 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); |
| 28 | 34 | ||
| 29 | /* | 35 | /* |
diff --git a/include/linux/fb.h b/include/linux/fb.h index 043f3283b71c..bc9afa74ee11 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
| @@ -788,7 +788,7 @@ struct dmt_videomode { | |||
| 788 | 788 | ||
| 789 | extern const char *fb_mode_option; | 789 | extern const char *fb_mode_option; |
| 790 | extern const struct fb_videomode vesa_modes[]; | 790 | extern const struct fb_videomode vesa_modes[]; |
| 791 | extern const struct fb_videomode cea_modes[64]; | 791 | extern const struct fb_videomode cea_modes[65]; |
| 792 | extern const struct dmt_videomode dmt_modes[]; | 792 | extern const struct dmt_videomode dmt_modes[]; |
| 793 | 793 | ||
| 794 | struct fb_modelist { | 794 | struct fb_modelist { |
diff --git a/include/linux/fs.h b/include/linux/fs.h index b2f9b9c25e41..72d8a844c692 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -52,7 +52,6 @@ struct swap_info_struct; | |||
| 52 | struct seq_file; | 52 | struct seq_file; |
| 53 | struct workqueue_struct; | 53 | struct workqueue_struct; |
| 54 | struct iov_iter; | 54 | struct iov_iter; |
| 55 | struct vm_fault; | ||
| 56 | 55 | ||
| 57 | extern void __init inode_init(void); | 56 | extern void __init inode_init(void); |
| 58 | extern void __init inode_init_early(void); | 57 | extern void __init inode_init_early(void); |
| @@ -2678,19 +2677,6 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset, | |||
| 2678 | extern int generic_file_open(struct inode * inode, struct file * filp); | 2677 | extern int generic_file_open(struct inode * inode, struct file * filp); |
| 2679 | extern int nonseekable_open(struct inode * inode, struct file * filp); | 2678 | extern int nonseekable_open(struct inode * inode, struct file * filp); |
| 2680 | 2679 | ||
| 2681 | ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t, | ||
| 2682 | get_block_t, dio_iodone_t, int flags); | ||
| 2683 | int dax_clear_blocks(struct inode *, sector_t block, long size); | ||
| 2684 | int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); | ||
| 2685 | int dax_truncate_page(struct inode *, loff_t from, get_block_t); | ||
| 2686 | int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, | ||
| 2687 | dax_iodone_t); | ||
| 2688 | int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, | ||
| 2689 | dax_iodone_t); | ||
| 2690 | int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); | ||
| 2691 | #define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod) | ||
| 2692 | #define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod) | ||
| 2693 | |||
| 2694 | #ifdef CONFIG_BLOCK | 2680 | #ifdef CONFIG_BLOCK |
| 2695 | typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, | 2681 | typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, |
| 2696 | loff_t file_offset); | 2682 | loff_t file_offset); |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index ad35f300b9a4..f92cbd2f4450 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -63,7 +63,10 @@ struct vm_area_struct; | |||
| 63 | * but it is definitely preferable to use the flag rather than opencode endless | 63 | * but it is definitely preferable to use the flag rather than opencode endless |
| 64 | * loop around allocator. | 64 | * loop around allocator. |
| 65 | * | 65 | * |
| 66 | * __GFP_NORETRY: The VM implementation must not retry indefinitely. | 66 | * __GFP_NORETRY: The VM implementation must not retry indefinitely and will |
| 67 | * return NULL when direct reclaim and memory compaction have failed to allow | ||
| 68 | * the allocation to succeed. The OOM killer is not called with the current | ||
| 69 | * implementation. | ||
| 67 | * | 70 | * |
| 68 | * __GFP_MOVABLE: Flag that this page will be movable by the page migration | 71 | * __GFP_MOVABLE: Flag that this page will be movable by the page migration |
| 69 | * mechanism or reclaimed | 72 | * mechanism or reclaimed |
| @@ -300,22 +303,31 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order, | |||
| 300 | return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); | 303 | return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); |
| 301 | } | 304 | } |
| 302 | 305 | ||
| 303 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | 306 | /* |
| 304 | unsigned int order) | 307 | * Allocate pages, preferring the node given as nid. The node must be valid and |
| 308 | * online. For more general interface, see alloc_pages_node(). | ||
| 309 | */ | ||
| 310 | static inline struct page * | ||
| 311 | __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) | ||
| 305 | { | 312 | { |
| 306 | /* Unknown node is current node */ | 313 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); |
| 307 | if (nid < 0) | 314 | VM_WARN_ON(!node_online(nid)); |
| 308 | nid = numa_node_id(); | ||
| 309 | 315 | ||
| 310 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); | 316 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); |
| 311 | } | 317 | } |
| 312 | 318 | ||
| 313 | static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, | 319 | /* |
| 320 | * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, | ||
| 321 | * prefer the current CPU's closest node. Otherwise node must be valid and | ||
| 322 | * online. | ||
| 323 | */ | ||
| 324 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | ||
| 314 | unsigned int order) | 325 | unsigned int order) |
| 315 | { | 326 | { |
| 316 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid)); | 327 | if (nid == NUMA_NO_NODE) |
| 328 | nid = numa_mem_id(); | ||
| 317 | 329 | ||
| 318 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); | 330 | return __alloc_pages_node(nid, gfp_mask, order); |
| 319 | } | 331 | } |
| 320 | 332 | ||
| 321 | #ifdef CONFIG_NUMA | 333 | #ifdef CONFIG_NUMA |
| @@ -354,7 +366,6 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask); | |||
| 354 | 366 | ||
| 355 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); | 367 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); |
| 356 | void free_pages_exact(void *virt, size_t size); | 368 | void free_pages_exact(void *virt, size_t size); |
| 357 | /* This is different from alloc_pages_exact_node !!! */ | ||
| 358 | void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); | 369 | void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); |
| 359 | 370 | ||
| 360 | #define __get_free_page(gfp_mask) \ | 371 | #define __get_free_page(gfp_mask) \ |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index f10b20f05159..ecb080d6ff42 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
| @@ -33,6 +33,8 @@ extern int move_huge_pmd(struct vm_area_struct *vma, | |||
| 33 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 33 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
| 34 | unsigned long addr, pgprot_t newprot, | 34 | unsigned long addr, pgprot_t newprot, |
| 35 | int prot_numa); | 35 | int prot_numa); |
| 36 | int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *, | ||
| 37 | unsigned long pfn, bool write); | ||
| 36 | 38 | ||
| 37 | enum transparent_hugepage_flag { | 39 | enum transparent_hugepage_flag { |
| 38 | TRANSPARENT_HUGEPAGE_FLAG, | 40 | TRANSPARENT_HUGEPAGE_FLAG, |
| @@ -122,7 +124,7 @@ extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, | |||
| 122 | #endif | 124 | #endif |
| 123 | extern int hugepage_madvise(struct vm_area_struct *vma, | 125 | extern int hugepage_madvise(struct vm_area_struct *vma, |
| 124 | unsigned long *vm_flags, int advice); | 126 | unsigned long *vm_flags, int advice); |
| 125 | extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, | 127 | extern void vma_adjust_trans_huge(struct vm_area_struct *vma, |
| 126 | unsigned long start, | 128 | unsigned long start, |
| 127 | unsigned long end, | 129 | unsigned long end, |
| 128 | long adjust_next); | 130 | long adjust_next); |
| @@ -138,15 +140,6 @@ static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, | |||
| 138 | else | 140 | else |
| 139 | return 0; | 141 | return 0; |
| 140 | } | 142 | } |
| 141 | static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, | ||
| 142 | unsigned long start, | ||
| 143 | unsigned long end, | ||
| 144 | long adjust_next) | ||
| 145 | { | ||
| 146 | if (!vma->anon_vma || vma->vm_ops) | ||
| 147 | return; | ||
| 148 | __vma_adjust_trans_huge(vma, start, end, adjust_next); | ||
| 149 | } | ||
| 150 | static inline int hpage_nr_pages(struct page *page) | 143 | static inline int hpage_nr_pages(struct page *page) |
| 151 | { | 144 | { |
| 152 | if (unlikely(PageTransHuge(page))) | 145 | if (unlikely(PageTransHuge(page))) |
| @@ -164,6 +157,13 @@ static inline bool is_huge_zero_page(struct page *page) | |||
| 164 | return ACCESS_ONCE(huge_zero_page) == page; | 157 | return ACCESS_ONCE(huge_zero_page) == page; |
| 165 | } | 158 | } |
| 166 | 159 | ||
| 160 | static inline bool is_huge_zero_pmd(pmd_t pmd) | ||
| 161 | { | ||
| 162 | return is_huge_zero_page(pmd_page(pmd)); | ||
| 163 | } | ||
| 164 | |||
| 165 | struct page *get_huge_zero_page(void); | ||
| 166 | |||
| 167 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | 167 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 168 | #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) | 168 | #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) |
| 169 | #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) | 169 | #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d891f949466a..5e35379f58a5 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
| @@ -35,6 +35,9 @@ struct resv_map { | |||
| 35 | struct kref refs; | 35 | struct kref refs; |
| 36 | spinlock_t lock; | 36 | spinlock_t lock; |
| 37 | struct list_head regions; | 37 | struct list_head regions; |
| 38 | long adds_in_progress; | ||
| 39 | struct list_head region_cache; | ||
| 40 | long region_cache_count; | ||
| 38 | }; | 41 | }; |
| 39 | extern struct resv_map *resv_map_alloc(void); | 42 | extern struct resv_map *resv_map_alloc(void); |
| 40 | void resv_map_release(struct kref *ref); | 43 | void resv_map_release(struct kref *ref); |
| @@ -80,11 +83,18 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 80 | int hugetlb_reserve_pages(struct inode *inode, long from, long to, | 83 | int hugetlb_reserve_pages(struct inode *inode, long from, long to, |
| 81 | struct vm_area_struct *vma, | 84 | struct vm_area_struct *vma, |
| 82 | vm_flags_t vm_flags); | 85 | vm_flags_t vm_flags); |
| 83 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); | 86 | long hugetlb_unreserve_pages(struct inode *inode, long start, long end, |
| 87 | long freed); | ||
| 84 | int dequeue_hwpoisoned_huge_page(struct page *page); | 88 | int dequeue_hwpoisoned_huge_page(struct page *page); |
| 85 | bool isolate_huge_page(struct page *page, struct list_head *list); | 89 | bool isolate_huge_page(struct page *page, struct list_head *list); |
| 86 | void putback_active_hugepage(struct page *page); | 90 | void putback_active_hugepage(struct page *page); |
| 87 | void free_huge_page(struct page *page); | 91 | void free_huge_page(struct page *page); |
| 92 | void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve); | ||
| 93 | extern struct mutex *hugetlb_fault_mutex_table; | ||
| 94 | u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, | ||
| 95 | struct vm_area_struct *vma, | ||
| 96 | struct address_space *mapping, | ||
| 97 | pgoff_t idx, unsigned long address); | ||
| 88 | 98 | ||
| 89 | #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE | 99 | #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE |
| 90 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); | 100 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); |
| @@ -320,9 +330,13 @@ struct huge_bootmem_page { | |||
| 320 | #endif | 330 | #endif |
| 321 | }; | 331 | }; |
| 322 | 332 | ||
| 333 | struct page *alloc_huge_page(struct vm_area_struct *vma, | ||
| 334 | unsigned long addr, int avoid_reserve); | ||
| 323 | struct page *alloc_huge_page_node(struct hstate *h, int nid); | 335 | struct page *alloc_huge_page_node(struct hstate *h, int nid); |
| 324 | struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, | 336 | struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, |
| 325 | unsigned long addr, int avoid_reserve); | 337 | unsigned long addr, int avoid_reserve); |
| 338 | int huge_add_to_page_cache(struct page *page, struct address_space *mapping, | ||
| 339 | pgoff_t idx); | ||
| 326 | 340 | ||
| 327 | /* arch callback */ | 341 | /* arch callback */ |
| 328 | int __init alloc_bootmem_huge_page(struct hstate *h); | 342 | int __init alloc_bootmem_huge_page(struct hstate *h); |
| @@ -471,6 +485,7 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, | |||
| 471 | 485 | ||
| 472 | #else /* CONFIG_HUGETLB_PAGE */ | 486 | #else /* CONFIG_HUGETLB_PAGE */ |
| 473 | struct hstate {}; | 487 | struct hstate {}; |
| 488 | #define alloc_huge_page(v, a, r) NULL | ||
| 474 | #define alloc_huge_page_node(h, nid) NULL | 489 | #define alloc_huge_page_node(h, nid) NULL |
| 475 | #define alloc_huge_page_noerr(v, a, r) NULL | 490 | #define alloc_huge_page_noerr(v, a, r) NULL |
| 476 | #define alloc_bootmem_huge_page(h) NULL | 491 | #define alloc_bootmem_huge_page(h) NULL |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index e83a738a3b87..768063baafbf 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
| @@ -121,6 +121,9 @@ extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client, | |||
| 121 | extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, | 121 | extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, |
| 122 | u8 command, u8 length, | 122 | u8 command, u8 length, |
| 123 | const u8 *values); | 123 | const u8 *values); |
| 124 | extern s32 | ||
| 125 | i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client, | ||
| 126 | u8 command, u8 length, u8 *values); | ||
| 124 | #endif /* I2C */ | 127 | #endif /* I2C */ |
| 125 | 128 | ||
| 126 | /** | 129 | /** |
| @@ -550,11 +553,12 @@ void i2c_lock_adapter(struct i2c_adapter *); | |||
| 550 | void i2c_unlock_adapter(struct i2c_adapter *); | 553 | void i2c_unlock_adapter(struct i2c_adapter *); |
| 551 | 554 | ||
| 552 | /*flags for the client struct: */ | 555 | /*flags for the client struct: */ |
| 553 | #define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ | 556 | #define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ |
| 554 | #define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ | 557 | #define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ |
| 555 | /* Must equal I2C_M_TEN below */ | 558 | /* Must equal I2C_M_TEN below */ |
| 556 | #define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ | 559 | #define I2C_CLIENT_SLAVE 0x20 /* we are the slave */ |
| 557 | #define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ | 560 | #define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ |
| 561 | #define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ | ||
| 558 | /* Must match I2C_M_STOP|IGNORE_NAK */ | 562 | /* Must match I2C_M_STOP|IGNORE_NAK */ |
| 559 | 563 | ||
| 560 | /* i2c adapter classes (bitmask) */ | 564 | /* i2c adapter classes (bitmask) */ |
| @@ -638,6 +642,8 @@ extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node); | |||
| 638 | /* must call put_device() when done with returned i2c_adapter device */ | 642 | /* must call put_device() when done with returned i2c_adapter device */ |
| 639 | extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node); | 643 | extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node); |
| 640 | 644 | ||
| 645 | /* must call i2c_put_adapter() when done with returned i2c_adapter device */ | ||
| 646 | struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node); | ||
| 641 | #else | 647 | #else |
| 642 | 648 | ||
| 643 | static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) | 649 | static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) |
| @@ -649,6 +655,11 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node | |||
| 649 | { | 655 | { |
| 650 | return NULL; | 656 | return NULL; |
| 651 | } | 657 | } |
| 658 | |||
| 659 | static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node) | ||
| 660 | { | ||
| 661 | return NULL; | ||
| 662 | } | ||
| 652 | #endif /* CONFIG_OF */ | 663 | #endif /* CONFIG_OF */ |
| 653 | 664 | ||
| 654 | #endif /* _LINUX_I2C_H */ | 665 | #endif /* _LINUX_I2C_H */ |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index d9a366d24e3b..6240063bdcac 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
| @@ -344,7 +344,7 @@ struct intel_iommu { | |||
| 344 | 344 | ||
| 345 | #ifdef CONFIG_INTEL_IOMMU | 345 | #ifdef CONFIG_INTEL_IOMMU |
| 346 | unsigned long *domain_ids; /* bitmap of domains */ | 346 | unsigned long *domain_ids; /* bitmap of domains */ |
| 347 | struct dmar_domain **domains; /* ptr to domains */ | 347 | struct dmar_domain ***domains; /* ptr to domains */ |
| 348 | spinlock_t lock; /* protect context, domain ids */ | 348 | spinlock_t lock; /* protect context, domain ids */ |
| 349 | struct root_entry *root_entry; /* virtual address */ | 349 | struct root_entry *root_entry; /* virtual address */ |
| 350 | 350 | ||
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index c27dde7215b5..e399029b68c5 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h | |||
| @@ -21,7 +21,7 @@ | |||
| 21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
| 22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
| 23 | #include <linux/bug.h> | 23 | #include <linux/bug.h> |
| 24 | #include <asm/io.h> | 24 | #include <linux/io.h> |
| 25 | #include <asm/page.h> | 25 | #include <asm/page.h> |
| 26 | 26 | ||
| 27 | /* | 27 | /* |
diff --git a/include/linux/io.h b/include/linux/io.h index fb5a99800e77..de64c1e53612 100644 --- a/include/linux/io.h +++ b/include/linux/io.h | |||
| @@ -20,10 +20,13 @@ | |||
| 20 | 20 | ||
| 21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
| 22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
| 23 | #include <linux/bug.h> | ||
| 24 | #include <linux/err.h> | ||
| 23 | #include <asm/io.h> | 25 | #include <asm/io.h> |
| 24 | #include <asm/page.h> | 26 | #include <asm/page.h> |
| 25 | 27 | ||
| 26 | struct device; | 28 | struct device; |
| 29 | struct resource; | ||
| 27 | 30 | ||
| 28 | __visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); | 31 | __visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); |
| 29 | void __iowrite64_copy(void __iomem *to, const void *from, size_t count); | 32 | void __iowrite64_copy(void __iomem *to, const void *from, size_t count); |
| @@ -80,6 +83,27 @@ int check_signature(const volatile void __iomem *io_addr, | |||
| 80 | const unsigned char *signature, int length); | 83 | const unsigned char *signature, int length); |
| 81 | void devm_ioremap_release(struct device *dev, void *res); | 84 | void devm_ioremap_release(struct device *dev, void *res); |
| 82 | 85 | ||
| 86 | void *devm_memremap(struct device *dev, resource_size_t offset, | ||
| 87 | size_t size, unsigned long flags); | ||
| 88 | void devm_memunmap(struct device *dev, void *addr); | ||
| 89 | |||
| 90 | void *__devm_memremap_pages(struct device *dev, struct resource *res); | ||
| 91 | |||
| 92 | #ifdef CONFIG_ZONE_DEVICE | ||
| 93 | void *devm_memremap_pages(struct device *dev, struct resource *res); | ||
| 94 | #else | ||
| 95 | static inline void *devm_memremap_pages(struct device *dev, struct resource *res) | ||
| 96 | { | ||
| 97 | /* | ||
| 98 | * Fail attempts to call devm_memremap_pages() without | ||
| 99 | * ZONE_DEVICE support enabled, this requires callers to fall | ||
| 100 | * back to plain devm_memremap() based on config | ||
| 101 | */ | ||
| 102 | WARN_ON_ONCE(1); | ||
| 103 | return ERR_PTR(-ENXIO); | ||
| 104 | } | ||
| 105 | #endif | ||
| 106 | |||
| 83 | /* | 107 | /* |
| 84 | * Some systems do not have legacy ISA devices. | 108 | * Some systems do not have legacy ISA devices. |
| 85 | * /dev/port is not a valid interface on these systems. | 109 | * /dev/port is not a valid interface on these systems. |
| @@ -121,4 +145,13 @@ static inline int arch_phys_wc_index(int handle) | |||
| 121 | #endif | 145 | #endif |
| 122 | #endif | 146 | #endif |
| 123 | 147 | ||
| 148 | enum { | ||
| 149 | /* See memremap() kernel-doc for usage description... */ | ||
| 150 | MEMREMAP_WB = 1 << 0, | ||
| 151 | MEMREMAP_WT = 1 << 1, | ||
| 152 | }; | ||
| 153 | |||
| 154 | void *memremap(resource_size_t offset, size_t size, unsigned long flags); | ||
| 155 | void memunmap(void *addr); | ||
| 156 | |||
| 124 | #endif /* _LINUX_IO_H */ | 157 | #endif /* _LINUX_IO_H */ |
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index 0b1e569f5ff5..f8cea14485dd 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h | |||
| @@ -115,6 +115,11 @@ struct ipmi_smi_handlers { | |||
| 115 | implement it. */ | 115 | implement it. */ |
| 116 | void (*set_need_watch)(void *send_info, bool enable); | 116 | void (*set_need_watch)(void *send_info, bool enable); |
| 117 | 117 | ||
| 118 | /* | ||
| 119 | * Called when flushing all pending messages. | ||
| 120 | */ | ||
| 121 | void (*flush_messages)(void *send_info); | ||
| 122 | |||
| 118 | /* Called when the interface should go into "run to | 123 | /* Called when the interface should go into "run to |
| 119 | completion" mode. If this call sets the value to true, the | 124 | completion" mode. If this call sets the value to true, the |
| 120 | interface should make sure that all messages are flushed | 125 | interface should make sure that all messages are flushed |
| @@ -207,7 +212,7 @@ static inline int ipmi_demangle_device_id(const unsigned char *data, | |||
| 207 | upper layer until the start_processing() function in the handlers | 212 | upper layer until the start_processing() function in the handlers |
| 208 | is called, and the lower layer must get the interface from that | 213 | is called, and the lower layer must get the interface from that |
| 209 | call. */ | 214 | call. */ |
| 210 | int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | 215 | int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, |
| 211 | void *send_info, | 216 | void *send_info, |
| 212 | struct ipmi_device_id *device_id, | 217 | struct ipmi_device_id *device_id, |
| 213 | struct device *dev, | 218 | struct device *dev, |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index bf982e021fbd..9eeeb9589acf 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
| @@ -104,6 +104,8 @@ | |||
| 104 | #define GICR_SYNCR 0x00C0 | 104 | #define GICR_SYNCR 0x00C0 |
| 105 | #define GICR_MOVLPIR 0x0100 | 105 | #define GICR_MOVLPIR 0x0100 |
| 106 | #define GICR_MOVALLR 0x0110 | 106 | #define GICR_MOVALLR 0x0110 |
| 107 | #define GICR_ISACTIVER GICD_ISACTIVER | ||
| 108 | #define GICR_ICACTIVER GICD_ICACTIVER | ||
| 107 | #define GICR_IDREGS GICD_IDREGS | 109 | #define GICR_IDREGS GICD_IDREGS |
| 108 | #define GICR_PIDR2 GICD_PIDR2 | 110 | #define GICR_PIDR2 GICD_PIDR2 |
| 109 | 111 | ||
| @@ -268,9 +270,12 @@ | |||
| 268 | 270 | ||
| 269 | #define ICH_LR_EOI (1UL << 41) | 271 | #define ICH_LR_EOI (1UL << 41) |
| 270 | #define ICH_LR_GROUP (1UL << 60) | 272 | #define ICH_LR_GROUP (1UL << 60) |
| 273 | #define ICH_LR_HW (1UL << 61) | ||
| 271 | #define ICH_LR_STATE (3UL << 62) | 274 | #define ICH_LR_STATE (3UL << 62) |
| 272 | #define ICH_LR_PENDING_BIT (1UL << 62) | 275 | #define ICH_LR_PENDING_BIT (1UL << 62) |
| 273 | #define ICH_LR_ACTIVE_BIT (1UL << 63) | 276 | #define ICH_LR_ACTIVE_BIT (1UL << 63) |
| 277 | #define ICH_LR_PHYS_ID_SHIFT 32 | ||
| 278 | #define ICH_LR_PHYS_ID_MASK (0x3ffUL << ICH_LR_PHYS_ID_SHIFT) | ||
| 274 | 279 | ||
| 275 | #define ICH_MISR_EOI (1 << 0) | 280 | #define ICH_MISR_EOI (1 << 0) |
| 276 | #define ICH_MISR_U (1 << 1) | 281 | #define ICH_MISR_U (1 << 1) |
| @@ -288,6 +293,7 @@ | |||
| 288 | #define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) | 293 | #define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) |
| 289 | 294 | ||
| 290 | #define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) | 295 | #define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) |
| 296 | #define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1) | ||
| 291 | #define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) | 297 | #define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) |
| 292 | #define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) | 298 | #define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) |
| 293 | #define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) | 299 | #define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) |
| @@ -385,6 +391,12 @@ static inline void gic_write_eoir(u64 irq) | |||
| 385 | isb(); | 391 | isb(); |
| 386 | } | 392 | } |
| 387 | 393 | ||
| 394 | static inline void gic_write_dir(u64 irq) | ||
| 395 | { | ||
| 396 | asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq)); | ||
| 397 | isb(); | ||
| 398 | } | ||
| 399 | |||
| 388 | struct irq_domain; | 400 | struct irq_domain; |
| 389 | int its_cpu_init(void); | 401 | int its_cpu_init(void); |
| 390 | int its_init(struct device_node *node, struct rdists *rdists, | 402 | int its_init(struct device_node *node, struct rdists *rdists, |
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 65da435d01c1..b8901dfd9e95 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h | |||
| @@ -20,9 +20,13 @@ | |||
| 20 | #define GIC_CPU_ALIAS_BINPOINT 0x1c | 20 | #define GIC_CPU_ALIAS_BINPOINT 0x1c |
| 21 | #define GIC_CPU_ACTIVEPRIO 0xd0 | 21 | #define GIC_CPU_ACTIVEPRIO 0xd0 |
| 22 | #define GIC_CPU_IDENT 0xfc | 22 | #define GIC_CPU_IDENT 0xfc |
| 23 | #define GIC_CPU_DEACTIVATE 0x1000 | ||
| 23 | 24 | ||
| 24 | #define GICC_ENABLE 0x1 | 25 | #define GICC_ENABLE 0x1 |
| 25 | #define GICC_INT_PRI_THRESHOLD 0xf0 | 26 | #define GICC_INT_PRI_THRESHOLD 0xf0 |
| 27 | |||
| 28 | #define GIC_CPU_CTRL_EOImodeNS (1 << 9) | ||
| 29 | |||
| 26 | #define GICC_IAR_INT_ID_MASK 0x3ff | 30 | #define GICC_IAR_INT_ID_MASK 0x3ff |
| 27 | #define GICC_INT_SPURIOUS 1023 | 31 | #define GICC_INT_SPURIOUS 1023 |
| 28 | #define GICC_DIS_BYPASS_MASK 0x1e0 | 32 | #define GICC_DIS_BYPASS_MASK 0x1e0 |
| @@ -71,11 +75,12 @@ | |||
| 71 | 75 | ||
| 72 | #define GICH_LR_VIRTUALID (0x3ff << 0) | 76 | #define GICH_LR_VIRTUALID (0x3ff << 0) |
| 73 | #define GICH_LR_PHYSID_CPUID_SHIFT (10) | 77 | #define GICH_LR_PHYSID_CPUID_SHIFT (10) |
| 74 | #define GICH_LR_PHYSID_CPUID (7 << GICH_LR_PHYSID_CPUID_SHIFT) | 78 | #define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT) |
| 75 | #define GICH_LR_STATE (3 << 28) | 79 | #define GICH_LR_STATE (3 << 28) |
| 76 | #define GICH_LR_PENDING_BIT (1 << 28) | 80 | #define GICH_LR_PENDING_BIT (1 << 28) |
| 77 | #define GICH_LR_ACTIVE_BIT (1 << 29) | 81 | #define GICH_LR_ACTIVE_BIT (1 << 29) |
| 78 | #define GICH_LR_EOI (1 << 19) | 82 | #define GICH_LR_EOI (1 << 19) |
| 83 | #define GICH_LR_HW (1 << 31) | ||
| 79 | 84 | ||
| 80 | #define GICH_VMCR_CTRL_SHIFT 0 | 85 | #define GICH_VMCR_CTRL_SHIFT 0 |
| 81 | #define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT) | 86 | #define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT) |
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 123be25ea15a..5d4e9c4b821d 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h | |||
| @@ -266,6 +266,7 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn) | |||
| 266 | } | 266 | } |
| 267 | 267 | ||
| 268 | int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen); | 268 | int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen); |
| 269 | size_t kernfs_path_len(struct kernfs_node *kn); | ||
| 269 | char * __must_check kernfs_path(struct kernfs_node *kn, char *buf, | 270 | char * __must_check kernfs_path(struct kernfs_node *kn, char *buf, |
| 270 | size_t buflen); | 271 | size_t buflen); |
| 271 | void pr_cont_kernfs_name(struct kernfs_node *kn); | 272 | void pr_cont_kernfs_name(struct kernfs_node *kn); |
| @@ -332,6 +333,9 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn) | |||
| 332 | static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen) | 333 | static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen) |
| 333 | { return -ENOSYS; } | 334 | { return -ENOSYS; } |
| 334 | 335 | ||
| 336 | static inline size_t kernfs_path_len(struct kernfs_node *kn) | ||
| 337 | { return 0; } | ||
| 338 | |||
| 335 | static inline char * __must_check kernfs_path(struct kernfs_node *kn, char *buf, | 339 | static inline char * __must_check kernfs_path(struct kernfs_node *kn, char *buf, |
| 336 | size_t buflen) | 340 | size_t buflen) |
| 337 | { return NULL; } | 341 | { return NULL; } |
diff --git a/include/linux/kexec.h b/include/linux/kexec.h index b63218f68c4b..d140b1e9faa7 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | 16 | ||
| 17 | #include <uapi/linux/kexec.h> | 17 | #include <uapi/linux/kexec.h> |
| 18 | 18 | ||
| 19 | #ifdef CONFIG_KEXEC | 19 | #ifdef CONFIG_KEXEC_CORE |
| 20 | #include <linux/list.h> | 20 | #include <linux/list.h> |
| 21 | #include <linux/linkage.h> | 21 | #include <linux/linkage.h> |
| 22 | #include <linux/compat.h> | 22 | #include <linux/compat.h> |
| @@ -318,13 +318,24 @@ int crash_shrink_memory(unsigned long new_size); | |||
| 318 | size_t crash_get_memory_size(void); | 318 | size_t crash_get_memory_size(void); |
| 319 | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); | 319 | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); |
| 320 | 320 | ||
| 321 | #else /* !CONFIG_KEXEC */ | 321 | int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, |
| 322 | unsigned long buf_len); | ||
| 323 | void * __weak arch_kexec_kernel_image_load(struct kimage *image); | ||
| 324 | int __weak arch_kimage_file_post_load_cleanup(struct kimage *image); | ||
| 325 | int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, | ||
| 326 | unsigned long buf_len); | ||
| 327 | int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, | ||
| 328 | Elf_Shdr *sechdrs, unsigned int relsec); | ||
| 329 | int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, | ||
| 330 | unsigned int relsec); | ||
| 331 | |||
| 332 | #else /* !CONFIG_KEXEC_CORE */ | ||
| 322 | struct pt_regs; | 333 | struct pt_regs; |
| 323 | struct task_struct; | 334 | struct task_struct; |
| 324 | static inline void crash_kexec(struct pt_regs *regs) { } | 335 | static inline void crash_kexec(struct pt_regs *regs) { } |
| 325 | static inline int kexec_should_crash(struct task_struct *p) { return 0; } | 336 | static inline int kexec_should_crash(struct task_struct *p) { return 0; } |
| 326 | #define kexec_in_progress false | 337 | #define kexec_in_progress false |
| 327 | #endif /* CONFIG_KEXEC */ | 338 | #endif /* CONFIG_KEXEC_CORE */ |
| 328 | 339 | ||
| 329 | #endif /* !defined(__ASSEBMLY__) */ | 340 | #endif /* !defined(__ASSEBMLY__) */ |
| 330 | 341 | ||
diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 0555cc66a15b..fcfd2bf14d3f 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h | |||
| @@ -85,8 +85,6 @@ enum umh_disable_depth { | |||
| 85 | UMH_DISABLED, | 85 | UMH_DISABLED, |
| 86 | }; | 86 | }; |
| 87 | 87 | ||
| 88 | extern void usermodehelper_init(void); | ||
| 89 | |||
| 90 | extern int __usermodehelper_disable(enum umh_disable_depth depth); | 88 | extern int __usermodehelper_disable(enum umh_disable_depth depth); |
| 91 | extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth); | 89 | extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth); |
| 92 | 90 | ||
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 81089cf1f0c1..1bef9e21e725 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -242,6 +242,7 @@ struct kvm_vcpu { | |||
| 242 | int sigset_active; | 242 | int sigset_active; |
| 243 | sigset_t sigset; | 243 | sigset_t sigset; |
| 244 | struct kvm_vcpu_stat stat; | 244 | struct kvm_vcpu_stat stat; |
| 245 | unsigned int halt_poll_ns; | ||
| 245 | 246 | ||
| 246 | #ifdef CONFIG_HAS_IOMEM | 247 | #ifdef CONFIG_HAS_IOMEM |
| 247 | int mmio_needed; | 248 | int mmio_needed; |
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 75e3af01ee32..3f021dc5da8c 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h | |||
| @@ -31,6 +31,9 @@ enum { | |||
| 31 | ND_CMD_ARS_STATUS_MAX = SZ_4K, | 31 | ND_CMD_ARS_STATUS_MAX = SZ_4K, |
| 32 | ND_MAX_MAPPINGS = 32, | 32 | ND_MAX_MAPPINGS = 32, |
| 33 | 33 | ||
| 34 | /* region flag indicating to direct-map persistent memory by default */ | ||
| 35 | ND_REGION_PAGEMAP = 0, | ||
| 36 | |||
| 34 | /* mark newly adjusted resources as requiring a label update */ | 37 | /* mark newly adjusted resources as requiring a label update */ |
| 35 | DPA_RESOURCE_ADJUSTED = 1 << 0, | 38 | DPA_RESOURCE_ADJUSTED = 1 << 0, |
| 36 | }; | 39 | }; |
| @@ -91,6 +94,7 @@ struct nd_region_desc { | |||
| 91 | void *provider_data; | 94 | void *provider_data; |
| 92 | int num_lanes; | 95 | int num_lanes; |
| 93 | int numa_node; | 96 | int numa_node; |
| 97 | unsigned long flags; | ||
| 94 | }; | 98 | }; |
| 95 | 99 | ||
| 96 | struct nvdimm_bus; | 100 | struct nvdimm_bus; |
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 1cc89e9df480..ffb9c9da4f39 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h | |||
| @@ -40,6 +40,11 @@ struct lsm_network_audit { | |||
| 40 | } fam; | 40 | } fam; |
| 41 | }; | 41 | }; |
| 42 | 42 | ||
| 43 | struct lsm_ioctlop_audit { | ||
| 44 | struct path path; | ||
| 45 | u16 cmd; | ||
| 46 | }; | ||
| 47 | |||
| 43 | /* Auxiliary data to use in generating the audit record. */ | 48 | /* Auxiliary data to use in generating the audit record. */ |
| 44 | struct common_audit_data { | 49 | struct common_audit_data { |
| 45 | char type; | 50 | char type; |
| @@ -53,6 +58,7 @@ struct common_audit_data { | |||
| 53 | #define LSM_AUDIT_DATA_KMOD 8 | 58 | #define LSM_AUDIT_DATA_KMOD 8 |
| 54 | #define LSM_AUDIT_DATA_INODE 9 | 59 | #define LSM_AUDIT_DATA_INODE 9 |
| 55 | #define LSM_AUDIT_DATA_DENTRY 10 | 60 | #define LSM_AUDIT_DATA_DENTRY 10 |
| 61 | #define LSM_AUDIT_DATA_IOCTL_OP 11 | ||
| 56 | union { | 62 | union { |
| 57 | struct path path; | 63 | struct path path; |
| 58 | struct dentry *dentry; | 64 | struct dentry *dentry; |
| @@ -68,6 +74,7 @@ struct common_audit_data { | |||
| 68 | } key_struct; | 74 | } key_struct; |
| 69 | #endif | 75 | #endif |
| 70 | char *kmod_name; | 76 | char *kmod_name; |
| 77 | struct lsm_ioctlop_audit *op; | ||
| 71 | } u; | 78 | } u; |
| 72 | /* this union contains LSM specific data */ | 79 | /* this union contains LSM specific data */ |
| 73 | union { | 80 | union { |
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 9429f054c323..ec3a6bab29de 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h | |||
| @@ -1881,8 +1881,10 @@ static inline void security_delete_hooks(struct security_hook_list *hooks, | |||
| 1881 | 1881 | ||
| 1882 | extern int __init security_module_enable(const char *module); | 1882 | extern int __init security_module_enable(const char *module); |
| 1883 | extern void __init capability_add_hooks(void); | 1883 | extern void __init capability_add_hooks(void); |
| 1884 | #ifdef CONFIG_SECURITY_YAMA_STACKED | 1884 | #ifdef CONFIG_SECURITY_YAMA |
| 1885 | void __init yama_add_hooks(void); | 1885 | extern void __init yama_add_hooks(void); |
| 1886 | #else | ||
| 1887 | static inline void __init yama_add_hooks(void) { } | ||
| 1886 | #endif | 1888 | #endif |
| 1887 | 1889 | ||
| 1888 | #endif /* ! __LINUX_LSM_HOOKS_H */ | 1890 | #endif /* ! __LINUX_LSM_HOOKS_H */ |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index cc4b01972060..c518eb589260 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
| @@ -77,6 +77,8 @@ int memblock_remove(phys_addr_t base, phys_addr_t size); | |||
| 77 | int memblock_free(phys_addr_t base, phys_addr_t size); | 77 | int memblock_free(phys_addr_t base, phys_addr_t size); |
| 78 | int memblock_reserve(phys_addr_t base, phys_addr_t size); | 78 | int memblock_reserve(phys_addr_t base, phys_addr_t size); |
| 79 | void memblock_trim_memory(phys_addr_t align); | 79 | void memblock_trim_memory(phys_addr_t align); |
| 80 | bool memblock_overlaps_region(struct memblock_type *type, | ||
| 81 | phys_addr_t base, phys_addr_t size); | ||
| 80 | int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); | 82 | int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); |
| 81 | int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); | 83 | int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); |
| 82 | int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); | 84 | int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); |
| @@ -323,7 +325,7 @@ void memblock_enforce_memory_limit(phys_addr_t memory_limit); | |||
| 323 | int memblock_is_memory(phys_addr_t addr); | 325 | int memblock_is_memory(phys_addr_t addr); |
| 324 | int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); | 326 | int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); |
| 325 | int memblock_is_reserved(phys_addr_t addr); | 327 | int memblock_is_reserved(phys_addr_t addr); |
| 326 | int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); | 328 | bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); |
| 327 | 329 | ||
| 328 | extern void __memblock_dump_all(void); | 330 | extern void __memblock_dump_all(void); |
| 329 | 331 | ||
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 73b02b0a8f60..ad800e62cb7a 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -23,6 +23,11 @@ | |||
| 23 | #include <linux/vm_event_item.h> | 23 | #include <linux/vm_event_item.h> |
| 24 | #include <linux/hardirq.h> | 24 | #include <linux/hardirq.h> |
| 25 | #include <linux/jump_label.h> | 25 | #include <linux/jump_label.h> |
| 26 | #include <linux/page_counter.h> | ||
| 27 | #include <linux/vmpressure.h> | ||
| 28 | #include <linux/eventfd.h> | ||
| 29 | #include <linux/mmzone.h> | ||
| 30 | #include <linux/writeback.h> | ||
| 26 | 31 | ||
| 27 | struct mem_cgroup; | 32 | struct mem_cgroup; |
| 28 | struct page; | 33 | struct page; |
| @@ -67,12 +72,221 @@ enum mem_cgroup_events_index { | |||
| 67 | MEMCG_NR_EVENTS, | 72 | MEMCG_NR_EVENTS, |
| 68 | }; | 73 | }; |
| 69 | 74 | ||
| 75 | /* | ||
| 76 | * Per memcg event counter is incremented at every pagein/pageout. With THP, | ||
| 77 | * it will be incremated by the number of pages. This counter is used for | ||
| 78 | * for trigger some periodic events. This is straightforward and better | ||
| 79 | * than using jiffies etc. to handle periodic memcg event. | ||
| 80 | */ | ||
| 81 | enum mem_cgroup_events_target { | ||
| 82 | MEM_CGROUP_TARGET_THRESH, | ||
| 83 | MEM_CGROUP_TARGET_SOFTLIMIT, | ||
| 84 | MEM_CGROUP_TARGET_NUMAINFO, | ||
| 85 | MEM_CGROUP_NTARGETS, | ||
| 86 | }; | ||
| 87 | |||
| 88 | /* | ||
| 89 | * Bits in struct cg_proto.flags | ||
| 90 | */ | ||
| 91 | enum cg_proto_flags { | ||
| 92 | /* Currently active and new sockets should be assigned to cgroups */ | ||
| 93 | MEMCG_SOCK_ACTIVE, | ||
| 94 | /* It was ever activated; we must disarm static keys on destruction */ | ||
| 95 | MEMCG_SOCK_ACTIVATED, | ||
| 96 | }; | ||
| 97 | |||
| 98 | struct cg_proto { | ||
| 99 | struct page_counter memory_allocated; /* Current allocated memory. */ | ||
| 100 | struct percpu_counter sockets_allocated; /* Current number of sockets. */ | ||
| 101 | int memory_pressure; | ||
| 102 | long sysctl_mem[3]; | ||
| 103 | unsigned long flags; | ||
| 104 | /* | ||
| 105 | * memcg field is used to find which memcg we belong directly | ||
| 106 | * Each memcg struct can hold more than one cg_proto, so container_of | ||
| 107 | * won't really cut. | ||
| 108 | * | ||
| 109 | * The elegant solution would be having an inverse function to | ||
| 110 | * proto_cgroup in struct proto, but that means polluting the structure | ||
| 111 | * for everybody, instead of just for memcg users. | ||
| 112 | */ | ||
| 113 | struct mem_cgroup *memcg; | ||
| 114 | }; | ||
| 115 | |||
| 70 | #ifdef CONFIG_MEMCG | 116 | #ifdef CONFIG_MEMCG |
| 117 | struct mem_cgroup_stat_cpu { | ||
| 118 | long count[MEM_CGROUP_STAT_NSTATS]; | ||
| 119 | unsigned long events[MEMCG_NR_EVENTS]; | ||
| 120 | unsigned long nr_page_events; | ||
| 121 | unsigned long targets[MEM_CGROUP_NTARGETS]; | ||
| 122 | }; | ||
| 123 | |||
| 124 | struct mem_cgroup_reclaim_iter { | ||
| 125 | struct mem_cgroup *position; | ||
| 126 | /* scan generation, increased every round-trip */ | ||
| 127 | unsigned int generation; | ||
| 128 | }; | ||
| 129 | |||
| 130 | /* | ||
| 131 | * per-zone information in memory controller. | ||
| 132 | */ | ||
| 133 | struct mem_cgroup_per_zone { | ||
| 134 | struct lruvec lruvec; | ||
| 135 | unsigned long lru_size[NR_LRU_LISTS]; | ||
| 136 | |||
| 137 | struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; | ||
| 138 | |||
| 139 | struct rb_node tree_node; /* RB tree node */ | ||
| 140 | unsigned long usage_in_excess;/* Set to the value by which */ | ||
| 141 | /* the soft limit is exceeded*/ | ||
| 142 | bool on_tree; | ||
| 143 | struct mem_cgroup *memcg; /* Back pointer, we cannot */ | ||
| 144 | /* use container_of */ | ||
| 145 | }; | ||
| 146 | |||
| 147 | struct mem_cgroup_per_node { | ||
| 148 | struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; | ||
| 149 | }; | ||
| 150 | |||
| 151 | struct mem_cgroup_threshold { | ||
| 152 | struct eventfd_ctx *eventfd; | ||
| 153 | unsigned long threshold; | ||
| 154 | }; | ||
| 155 | |||
| 156 | /* For threshold */ | ||
| 157 | struct mem_cgroup_threshold_ary { | ||
| 158 | /* An array index points to threshold just below or equal to usage. */ | ||
| 159 | int current_threshold; | ||
| 160 | /* Size of entries[] */ | ||
| 161 | unsigned int size; | ||
| 162 | /* Array of thresholds */ | ||
| 163 | struct mem_cgroup_threshold entries[0]; | ||
| 164 | }; | ||
| 165 | |||
| 166 | struct mem_cgroup_thresholds { | ||
| 167 | /* Primary thresholds array */ | ||
| 168 | struct mem_cgroup_threshold_ary *primary; | ||
| 169 | /* | ||
| 170 | * Spare threshold array. | ||
| 171 | * This is needed to make mem_cgroup_unregister_event() "never fail". | ||
| 172 | * It must be able to store at least primary->size - 1 entries. | ||
| 173 | */ | ||
| 174 | struct mem_cgroup_threshold_ary *spare; | ||
| 175 | }; | ||
| 176 | |||
| 177 | /* | ||
| 178 | * The memory controller data structure. The memory controller controls both | ||
| 179 | * page cache and RSS per cgroup. We would eventually like to provide | ||
| 180 | * statistics based on the statistics developed by Rik Van Riel for clock-pro, | ||
| 181 | * to help the administrator determine what knobs to tune. | ||
| 182 | */ | ||
| 183 | struct mem_cgroup { | ||
| 184 | struct cgroup_subsys_state css; | ||
| 185 | |||
| 186 | /* Accounted resources */ | ||
| 187 | struct page_counter memory; | ||
| 188 | struct page_counter memsw; | ||
| 189 | struct page_counter kmem; | ||
| 190 | |||
| 191 | /* Normal memory consumption range */ | ||
| 192 | unsigned long low; | ||
| 193 | unsigned long high; | ||
| 194 | |||
| 195 | unsigned long soft_limit; | ||
| 196 | |||
| 197 | /* vmpressure notifications */ | ||
| 198 | struct vmpressure vmpressure; | ||
| 199 | |||
| 200 | /* css_online() has been completed */ | ||
| 201 | int initialized; | ||
| 202 | |||
| 203 | /* | ||
| 204 | * Should the accounting and control be hierarchical, per subtree? | ||
| 205 | */ | ||
| 206 | bool use_hierarchy; | ||
| 207 | |||
| 208 | /* protected by memcg_oom_lock */ | ||
| 209 | bool oom_lock; | ||
| 210 | int under_oom; | ||
| 211 | |||
| 212 | int swappiness; | ||
| 213 | /* OOM-Killer disable */ | ||
| 214 | int oom_kill_disable; | ||
| 215 | |||
| 216 | /* protect arrays of thresholds */ | ||
| 217 | struct mutex thresholds_lock; | ||
| 218 | |||
| 219 | /* thresholds for memory usage. RCU-protected */ | ||
| 220 | struct mem_cgroup_thresholds thresholds; | ||
| 221 | |||
| 222 | /* thresholds for mem+swap usage. RCU-protected */ | ||
| 223 | struct mem_cgroup_thresholds memsw_thresholds; | ||
| 224 | |||
| 225 | /* For oom notifier event fd */ | ||
| 226 | struct list_head oom_notify; | ||
| 227 | |||
| 228 | /* | ||
| 229 | * Should we move charges of a task when a task is moved into this | ||
| 230 | * mem_cgroup ? And what type of charges should we move ? | ||
| 231 | */ | ||
| 232 | unsigned long move_charge_at_immigrate; | ||
| 233 | /* | ||
| 234 | * set > 0 if pages under this cgroup are moving to other cgroup. | ||
| 235 | */ | ||
| 236 | atomic_t moving_account; | ||
| 237 | /* taken only while moving_account > 0 */ | ||
| 238 | spinlock_t move_lock; | ||
| 239 | struct task_struct *move_lock_task; | ||
| 240 | unsigned long move_lock_flags; | ||
| 241 | /* | ||
| 242 | * percpu counter. | ||
| 243 | */ | ||
| 244 | struct mem_cgroup_stat_cpu __percpu *stat; | ||
| 245 | spinlock_t pcp_counter_lock; | ||
| 246 | |||
| 247 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) | ||
| 248 | struct cg_proto tcp_mem; | ||
| 249 | #endif | ||
| 250 | #if defined(CONFIG_MEMCG_KMEM) | ||
| 251 | /* Index in the kmem_cache->memcg_params.memcg_caches array */ | ||
| 252 | int kmemcg_id; | ||
| 253 | bool kmem_acct_activated; | ||
| 254 | bool kmem_acct_active; | ||
| 255 | #endif | ||
| 256 | |||
| 257 | int last_scanned_node; | ||
| 258 | #if MAX_NUMNODES > 1 | ||
| 259 | nodemask_t scan_nodes; | ||
| 260 | atomic_t numainfo_events; | ||
| 261 | atomic_t numainfo_updating; | ||
| 262 | #endif | ||
| 263 | |||
| 264 | #ifdef CONFIG_CGROUP_WRITEBACK | ||
| 265 | struct list_head cgwb_list; | ||
| 266 | struct wb_domain cgwb_domain; | ||
| 267 | #endif | ||
| 268 | |||
| 269 | /* List of events which userspace want to receive */ | ||
| 270 | struct list_head event_list; | ||
| 271 | spinlock_t event_list_lock; | ||
| 272 | |||
| 273 | struct mem_cgroup_per_node *nodeinfo[0]; | ||
| 274 | /* WARNING: nodeinfo must be the last member here */ | ||
| 275 | }; | ||
| 71 | extern struct cgroup_subsys_state *mem_cgroup_root_css; | 276 | extern struct cgroup_subsys_state *mem_cgroup_root_css; |
| 72 | 277 | ||
| 73 | void mem_cgroup_events(struct mem_cgroup *memcg, | 278 | /** |
| 279 | * mem_cgroup_events - count memory events against a cgroup | ||
| 280 | * @memcg: the memory cgroup | ||
| 281 | * @idx: the event index | ||
| 282 | * @nr: the number of events to account for | ||
| 283 | */ | ||
| 284 | static inline void mem_cgroup_events(struct mem_cgroup *memcg, | ||
| 74 | enum mem_cgroup_events_index idx, | 285 | enum mem_cgroup_events_index idx, |
| 75 | unsigned int nr); | 286 | unsigned int nr) |
| 287 | { | ||
| 288 | this_cpu_add(memcg->stat->events[idx], nr); | ||
| 289 | } | ||
| 76 | 290 | ||
| 77 | bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); | 291 | bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); |
| 78 | 292 | ||
| @@ -90,15 +304,29 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, | |||
| 90 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); | 304 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); |
| 91 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); | 305 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); |
| 92 | 306 | ||
| 93 | bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, | ||
| 94 | struct mem_cgroup *root); | ||
| 95 | bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); | 307 | bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); |
| 308 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | ||
| 309 | struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); | ||
| 96 | 310 | ||
| 97 | extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); | 311 | static inline |
| 98 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | 312 | struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ |
| 313 | return css ? container_of(css, struct mem_cgroup, css) : NULL; | ||
| 314 | } | ||
| 99 | 315 | ||
| 100 | extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); | 316 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, |
| 101 | extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); | 317 | struct mem_cgroup *, |
| 318 | struct mem_cgroup_reclaim_cookie *); | ||
| 319 | void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); | ||
| 320 | |||
| 321 | static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, | ||
| 322 | struct mem_cgroup *root) | ||
| 323 | { | ||
| 324 | if (root == memcg) | ||
| 325 | return true; | ||
| 326 | if (!root->use_hierarchy) | ||
| 327 | return false; | ||
| 328 | return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); | ||
| 329 | } | ||
| 102 | 330 | ||
| 103 | static inline bool mm_match_cgroup(struct mm_struct *mm, | 331 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
| 104 | struct mem_cgroup *memcg) | 332 | struct mem_cgroup *memcg) |
| @@ -114,24 +342,68 @@ static inline bool mm_match_cgroup(struct mm_struct *mm, | |||
| 114 | return match; | 342 | return match; |
| 115 | } | 343 | } |
| 116 | 344 | ||
| 117 | extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); | 345 | struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); |
| 118 | extern struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); | 346 | ino_t page_cgroup_ino(struct page *page); |
| 119 | 347 | ||
| 120 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, | 348 | static inline bool mem_cgroup_disabled(void) |
| 121 | struct mem_cgroup *, | 349 | { |
| 122 | struct mem_cgroup_reclaim_cookie *); | 350 | if (memory_cgrp_subsys.disabled) |
| 123 | void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); | 351 | return true; |
| 352 | return false; | ||
| 353 | } | ||
| 124 | 354 | ||
| 125 | /* | 355 | /* |
| 126 | * For memory reclaim. | 356 | * For memory reclaim. |
| 127 | */ | 357 | */ |
| 128 | int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec); | ||
| 129 | bool mem_cgroup_lruvec_online(struct lruvec *lruvec); | ||
| 130 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); | 358 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); |
| 131 | unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); | 359 | |
| 132 | void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); | 360 | void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, |
| 133 | extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, | 361 | int nr_pages); |
| 134 | struct task_struct *p); | 362 | |
| 363 | static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) | ||
| 364 | { | ||
| 365 | struct mem_cgroup_per_zone *mz; | ||
| 366 | struct mem_cgroup *memcg; | ||
| 367 | |||
| 368 | if (mem_cgroup_disabled()) | ||
| 369 | return true; | ||
| 370 | |||
| 371 | mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); | ||
| 372 | memcg = mz->memcg; | ||
| 373 | |||
| 374 | return !!(memcg->css.flags & CSS_ONLINE); | ||
| 375 | } | ||
| 376 | |||
| 377 | static inline | ||
| 378 | unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) | ||
| 379 | { | ||
| 380 | struct mem_cgroup_per_zone *mz; | ||
| 381 | |||
| 382 | mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); | ||
| 383 | return mz->lru_size[lru]; | ||
| 384 | } | ||
| 385 | |||
| 386 | static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) | ||
| 387 | { | ||
| 388 | unsigned long inactive_ratio; | ||
| 389 | unsigned long inactive; | ||
| 390 | unsigned long active; | ||
| 391 | unsigned long gb; | ||
| 392 | |||
| 393 | inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON); | ||
| 394 | active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON); | ||
| 395 | |||
| 396 | gb = (inactive + active) >> (30 - PAGE_SHIFT); | ||
| 397 | if (gb) | ||
| 398 | inactive_ratio = int_sqrt(10 * gb); | ||
| 399 | else | ||
| 400 | inactive_ratio = 1; | ||
| 401 | |||
| 402 | return inactive * inactive_ratio < active; | ||
| 403 | } | ||
| 404 | |||
| 405 | void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, | ||
| 406 | struct task_struct *p); | ||
| 135 | 407 | ||
| 136 | static inline void mem_cgroup_oom_enable(void) | 408 | static inline void mem_cgroup_oom_enable(void) |
| 137 | { | 409 | { |
| @@ -156,18 +428,26 @@ bool mem_cgroup_oom_synchronize(bool wait); | |||
| 156 | extern int do_swap_account; | 428 | extern int do_swap_account; |
| 157 | #endif | 429 | #endif |
| 158 | 430 | ||
| 159 | static inline bool mem_cgroup_disabled(void) | ||
| 160 | { | ||
| 161 | if (memory_cgrp_subsys.disabled) | ||
| 162 | return true; | ||
| 163 | return false; | ||
| 164 | } | ||
| 165 | |||
| 166 | struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page); | 431 | struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page); |
| 167 | void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, | ||
| 168 | enum mem_cgroup_stat_index idx, int val); | ||
| 169 | void mem_cgroup_end_page_stat(struct mem_cgroup *memcg); | 432 | void mem_cgroup_end_page_stat(struct mem_cgroup *memcg); |
| 170 | 433 | ||
| 434 | /** | ||
| 435 | * mem_cgroup_update_page_stat - update page state statistics | ||
| 436 | * @memcg: memcg to account against | ||
| 437 | * @idx: page state item to account | ||
| 438 | * @val: number of pages (positive or negative) | ||
| 439 | * | ||
| 440 | * See mem_cgroup_begin_page_stat() for locking requirements. | ||
| 441 | */ | ||
| 442 | static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, | ||
| 443 | enum mem_cgroup_stat_index idx, int val) | ||
| 444 | { | ||
| 445 | VM_BUG_ON(!rcu_read_lock_held()); | ||
| 446 | |||
| 447 | if (memcg) | ||
| 448 | this_cpu_add(memcg->stat->count[idx], val); | ||
| 449 | } | ||
| 450 | |||
| 171 | static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, | 451 | static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, |
| 172 | enum mem_cgroup_stat_index idx) | 452 | enum mem_cgroup_stat_index idx) |
| 173 | { | 453 | { |
| @@ -184,13 +464,31 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | |||
| 184 | gfp_t gfp_mask, | 464 | gfp_t gfp_mask, |
| 185 | unsigned long *total_scanned); | 465 | unsigned long *total_scanned); |
| 186 | 466 | ||
| 187 | void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); | ||
| 188 | static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, | 467 | static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, |
| 189 | enum vm_event_item idx) | 468 | enum vm_event_item idx) |
| 190 | { | 469 | { |
| 470 | struct mem_cgroup *memcg; | ||
| 471 | |||
| 191 | if (mem_cgroup_disabled()) | 472 | if (mem_cgroup_disabled()) |
| 192 | return; | 473 | return; |
| 193 | __mem_cgroup_count_vm_event(mm, idx); | 474 | |
| 475 | rcu_read_lock(); | ||
| 476 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | ||
| 477 | if (unlikely(!memcg)) | ||
| 478 | goto out; | ||
| 479 | |||
| 480 | switch (idx) { | ||
| 481 | case PGFAULT: | ||
| 482 | this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]); | ||
| 483 | break; | ||
| 484 | case PGMAJFAULT: | ||
| 485 | this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]); | ||
| 486 | break; | ||
| 487 | default: | ||
| 488 | BUG(); | ||
| 489 | } | ||
| 490 | out: | ||
| 491 | rcu_read_unlock(); | ||
| 194 | } | 492 | } |
| 195 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 493 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 196 | void mem_cgroup_split_huge_fixup(struct page *head); | 494 | void mem_cgroup_split_huge_fixup(struct page *head); |
| @@ -199,8 +497,6 @@ void mem_cgroup_split_huge_fixup(struct page *head); | |||
| 199 | #else /* CONFIG_MEMCG */ | 497 | #else /* CONFIG_MEMCG */ |
| 200 | struct mem_cgroup; | 498 | struct mem_cgroup; |
| 201 | 499 | ||
| 202 | #define mem_cgroup_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL)) | ||
| 203 | |||
| 204 | static inline void mem_cgroup_events(struct mem_cgroup *memcg, | 500 | static inline void mem_cgroup_events(struct mem_cgroup *memcg, |
| 205 | enum mem_cgroup_events_index idx, | 501 | enum mem_cgroup_events_index idx, |
| 206 | unsigned int nr) | 502 | unsigned int nr) |
| @@ -258,11 +554,6 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, | |||
| 258 | return &zone->lruvec; | 554 | return &zone->lruvec; |
| 259 | } | 555 | } |
| 260 | 556 | ||
| 261 | static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) | ||
| 262 | { | ||
| 263 | return NULL; | ||
| 264 | } | ||
| 265 | |||
| 266 | static inline bool mm_match_cgroup(struct mm_struct *mm, | 557 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
| 267 | struct mem_cgroup *memcg) | 558 | struct mem_cgroup *memcg) |
| 268 | { | 559 | { |
| @@ -275,12 +566,6 @@ static inline bool task_in_mem_cgroup(struct task_struct *task, | |||
| 275 | return true; | 566 | return true; |
| 276 | } | 567 | } |
| 277 | 568 | ||
| 278 | static inline struct cgroup_subsys_state | ||
| 279 | *mem_cgroup_css(struct mem_cgroup *memcg) | ||
| 280 | { | ||
| 281 | return NULL; | ||
| 282 | } | ||
| 283 | |||
| 284 | static inline struct mem_cgroup * | 569 | static inline struct mem_cgroup * |
| 285 | mem_cgroup_iter(struct mem_cgroup *root, | 570 | mem_cgroup_iter(struct mem_cgroup *root, |
| 286 | struct mem_cgroup *prev, | 571 | struct mem_cgroup *prev, |
| @@ -428,8 +713,8 @@ static inline void sock_release_memcg(struct sock *sk) | |||
| 428 | extern struct static_key memcg_kmem_enabled_key; | 713 | extern struct static_key memcg_kmem_enabled_key; |
| 429 | 714 | ||
| 430 | extern int memcg_nr_cache_ids; | 715 | extern int memcg_nr_cache_ids; |
| 431 | extern void memcg_get_cache_ids(void); | 716 | void memcg_get_cache_ids(void); |
| 432 | extern void memcg_put_cache_ids(void); | 717 | void memcg_put_cache_ids(void); |
| 433 | 718 | ||
| 434 | /* | 719 | /* |
| 435 | * Helper macro to loop through all memcg-specific caches. Callers must still | 720 | * Helper macro to loop through all memcg-specific caches. Callers must still |
| @@ -444,7 +729,10 @@ static inline bool memcg_kmem_enabled(void) | |||
| 444 | return static_key_false(&memcg_kmem_enabled_key); | 729 | return static_key_false(&memcg_kmem_enabled_key); |
| 445 | } | 730 | } |
| 446 | 731 | ||
| 447 | bool memcg_kmem_is_active(struct mem_cgroup *memcg); | 732 | static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) |
| 733 | { | ||
| 734 | return memcg->kmem_acct_active; | ||
| 735 | } | ||
| 448 | 736 | ||
| 449 | /* | 737 | /* |
| 450 | * In general, we'll do everything in our power to not incur in any overhead | 738 | * In general, we'll do everything in our power to not incur in any overhead |
| @@ -463,7 +751,15 @@ void __memcg_kmem_commit_charge(struct page *page, | |||
| 463 | struct mem_cgroup *memcg, int order); | 751 | struct mem_cgroup *memcg, int order); |
| 464 | void __memcg_kmem_uncharge_pages(struct page *page, int order); | 752 | void __memcg_kmem_uncharge_pages(struct page *page, int order); |
| 465 | 753 | ||
| 466 | int memcg_cache_id(struct mem_cgroup *memcg); | 754 | /* |
| 755 | * helper for acessing a memcg's index. It will be used as an index in the | ||
| 756 | * child cache array in kmem_cache, and also to derive its name. This function | ||
| 757 | * will return -1 when this is not a kmem-limited memcg. | ||
| 758 | */ | ||
| 759 | static inline int memcg_cache_id(struct mem_cgroup *memcg) | ||
| 760 | { | ||
| 761 | return memcg ? memcg->kmemcg_id : -1; | ||
| 762 | } | ||
| 467 | 763 | ||
| 468 | struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); | 764 | struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); |
| 469 | void __memcg_kmem_put_cache(struct kmem_cache *cachep); | 765 | void __memcg_kmem_put_cache(struct kmem_cache *cachep); |
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 6ffa0ac7f7d6..8f60e899b33c 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
| @@ -266,8 +266,9 @@ static inline void remove_memory(int nid, u64 start, u64 size) {} | |||
| 266 | extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, | 266 | extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, |
| 267 | void *arg, int (*func)(struct memory_block *, void *)); | 267 | void *arg, int (*func)(struct memory_block *, void *)); |
| 268 | extern int add_memory(int nid, u64 start, u64 size); | 268 | extern int add_memory(int nid, u64 start, u64 size); |
| 269 | extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default); | 269 | extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default, |
| 270 | extern int arch_add_memory(int nid, u64 start, u64 size); | 270 | bool for_device); |
| 271 | extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device); | ||
| 271 | extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); | 272 | extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); |
| 272 | extern bool is_memblock_offlined(struct memory_block *mem); | 273 | extern bool is_memblock_offlined(struct memory_block *mem); |
| 273 | extern void remove_memory(int nid, u64 start, u64 size); | 274 | extern void remove_memory(int nid, u64 start, u64 size); |
diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h new file mode 100644 index 000000000000..eb492d47f717 --- /dev/null +++ b/include/linux/microchipphy.h | |||
| @@ -0,0 +1,73 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2015 Microchip Technology | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or | ||
| 5 | * modify it under the terms of the GNU General Public License | ||
| 6 | * as published by the Free Software Foundation; either version 2 | ||
| 7 | * of the License, or (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef _MICROCHIPPHY_H | ||
| 19 | #define _MICROCHIPPHY_H | ||
| 20 | |||
| 21 | #define LAN88XX_INT_MASK (0x19) | ||
| 22 | #define LAN88XX_INT_MASK_MDINTPIN_EN_ (0x8000) | ||
| 23 | #define LAN88XX_INT_MASK_SPEED_CHANGE_ (0x4000) | ||
| 24 | #define LAN88XX_INT_MASK_LINK_CHANGE_ (0x2000) | ||
| 25 | #define LAN88XX_INT_MASK_FDX_CHANGE_ (0x1000) | ||
| 26 | #define LAN88XX_INT_MASK_AUTONEG_ERR_ (0x0800) | ||
| 27 | #define LAN88XX_INT_MASK_AUTONEG_DONE_ (0x0400) | ||
| 28 | #define LAN88XX_INT_MASK_POE_DETECT_ (0x0200) | ||
| 29 | #define LAN88XX_INT_MASK_SYMBOL_ERR_ (0x0100) | ||
| 30 | #define LAN88XX_INT_MASK_FAST_LINK_FAIL_ (0x0080) | ||
| 31 | #define LAN88XX_INT_MASK_WOL_EVENT_ (0x0040) | ||
| 32 | #define LAN88XX_INT_MASK_EXTENDED_INT_ (0x0020) | ||
| 33 | #define LAN88XX_INT_MASK_RESERVED_ (0x0010) | ||
| 34 | #define LAN88XX_INT_MASK_FALSE_CARRIER_ (0x0008) | ||
| 35 | #define LAN88XX_INT_MASK_LINK_SPEED_DS_ (0x0004) | ||
| 36 | #define LAN88XX_INT_MASK_MASTER_SLAVE_DONE_ (0x0002) | ||
| 37 | #define LAN88XX_INT_MASK_RX__ER_ (0x0001) | ||
| 38 | |||
| 39 | #define LAN88XX_INT_STS (0x1A) | ||
| 40 | #define LAN88XX_INT_STS_INT_ACTIVE_ (0x8000) | ||
| 41 | #define LAN88XX_INT_STS_SPEED_CHANGE_ (0x4000) | ||
| 42 | #define LAN88XX_INT_STS_LINK_CHANGE_ (0x2000) | ||
| 43 | #define LAN88XX_INT_STS_FDX_CHANGE_ (0x1000) | ||
| 44 | #define LAN88XX_INT_STS_AUTONEG_ERR_ (0x0800) | ||
| 45 | #define LAN88XX_INT_STS_AUTONEG_DONE_ (0x0400) | ||
| 46 | #define LAN88XX_INT_STS_POE_DETECT_ (0x0200) | ||
| 47 | #define LAN88XX_INT_STS_SYMBOL_ERR_ (0x0100) | ||
| 48 | #define LAN88XX_INT_STS_FAST_LINK_FAIL_ (0x0080) | ||
| 49 | #define LAN88XX_INT_STS_WOL_EVENT_ (0x0040) | ||
| 50 | #define LAN88XX_INT_STS_EXTENDED_INT_ (0x0020) | ||
| 51 | #define LAN88XX_INT_STS_RESERVED_ (0x0010) | ||
| 52 | #define LAN88XX_INT_STS_FALSE_CARRIER_ (0x0008) | ||
| 53 | #define LAN88XX_INT_STS_LINK_SPEED_DS_ (0x0004) | ||
| 54 | #define LAN88XX_INT_STS_MASTER_SLAVE_DONE_ (0x0002) | ||
| 55 | #define LAN88XX_INT_STS_RX_ER_ (0x0001) | ||
| 56 | |||
| 57 | #define LAN88XX_EXT_PAGE_ACCESS (0x1F) | ||
| 58 | #define LAN88XX_EXT_PAGE_SPACE_0 (0x0000) | ||
| 59 | #define LAN88XX_EXT_PAGE_SPACE_1 (0x0001) | ||
| 60 | #define LAN88XX_EXT_PAGE_SPACE_2 (0x0002) | ||
| 61 | |||
| 62 | /* Extended Register Page 1 space */ | ||
| 63 | #define LAN88XX_EXT_MODE_CTRL (0x13) | ||
| 64 | #define LAN88XX_EXT_MODE_CTRL_MDIX_MASK_ (0x000C) | ||
| 65 | #define LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_ (0x0000) | ||
| 66 | #define LAN88XX_EXT_MODE_CTRL_MDI_ (0x0008) | ||
| 67 | #define LAN88XX_EXT_MODE_CTRL_MDI_X_ (0x000C) | ||
| 68 | |||
| 69 | /* MMD 3 Registers */ | ||
| 70 | #define LAN88XX_MMD3_CHIP_ID (32877) | ||
| 71 | #define LAN88XX_MMD3_CHIP_REV (32878) | ||
| 72 | |||
| 73 | #endif /* _MICROCHIPPHY_H */ | ||
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index bcbf8c72a77b..baad4cb8e9b0 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -79,7 +79,8 @@ enum { | |||
| 79 | 79 | ||
| 80 | enum { | 80 | enum { |
| 81 | MLX4_MAX_PORTS = 2, | 81 | MLX4_MAX_PORTS = 2, |
| 82 | MLX4_MAX_PORT_PKEYS = 128 | 82 | MLX4_MAX_PORT_PKEYS = 128, |
| 83 | MLX4_MAX_PORT_GIDS = 128 | ||
| 83 | }; | 84 | }; |
| 84 | 85 | ||
| 85 | /* base qkey for use in sriov tunnel-qp/proxy-qp communication. | 86 | /* base qkey for use in sriov tunnel-qp/proxy-qp communication. |
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index 9553a73d2049..5a06d969338e 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h | |||
| @@ -59,6 +59,7 @@ struct mlx4_interface { | |||
| 59 | void (*event) (struct mlx4_dev *dev, void *context, | 59 | void (*event) (struct mlx4_dev *dev, void *context, |
| 60 | enum mlx4_dev_event event, unsigned long param); | 60 | enum mlx4_dev_event event, unsigned long param); |
| 61 | void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); | 61 | void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); |
| 62 | void (*activate)(struct mlx4_dev *dev, void *context); | ||
| 62 | struct list_head list; | 63 | struct list_head list; |
| 63 | enum mlx4_protocol protocol; | 64 | enum mlx4_protocol protocol; |
| 64 | int flags; | 65 | int flags; |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 250b1ff8b48d..8eb3b19af2a4 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
| @@ -402,6 +402,17 @@ struct mlx5_cmd_teardown_hca_mbox_out { | |||
| 402 | u8 rsvd[8]; | 402 | u8 rsvd[8]; |
| 403 | }; | 403 | }; |
| 404 | 404 | ||
| 405 | struct mlx5_cmd_query_special_contexts_mbox_in { | ||
| 406 | struct mlx5_inbox_hdr hdr; | ||
| 407 | u8 rsvd[8]; | ||
| 408 | }; | ||
| 409 | |||
| 410 | struct mlx5_cmd_query_special_contexts_mbox_out { | ||
| 411 | struct mlx5_outbox_hdr hdr; | ||
| 412 | __be32 dump_fill_mkey; | ||
| 413 | __be32 resd_lkey; | ||
| 414 | }; | ||
| 415 | |||
| 405 | struct mlx5_cmd_layout { | 416 | struct mlx5_cmd_layout { |
| 406 | u8 type; | 417 | u8 type; |
| 407 | u8 rsvd0[3]; | 418 | u8 rsvd0[3]; |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 8b6d6f2154a4..27b53f9a24ad 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -845,6 +845,7 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); | |||
| 845 | int mlx5_register_interface(struct mlx5_interface *intf); | 845 | int mlx5_register_interface(struct mlx5_interface *intf); |
| 846 | void mlx5_unregister_interface(struct mlx5_interface *intf); | 846 | void mlx5_unregister_interface(struct mlx5_interface *intf); |
| 847 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); | 847 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); |
| 848 | int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey); | ||
| 848 | 849 | ||
| 849 | struct mlx5_profile { | 850 | struct mlx5_profile { |
| 850 | u64 mask; | 851 | u64 mask; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 8b257c43855b..91c08f6f0dc9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/shrinker.h> | 20 | #include <linux/shrinker.h> |
| 21 | #include <linux/resource.h> | 21 | #include <linux/resource.h> |
| 22 | #include <linux/page_ext.h> | 22 | #include <linux/page_ext.h> |
| 23 | #include <linux/err.h> | ||
| 23 | 24 | ||
| 24 | struct mempolicy; | 25 | struct mempolicy; |
| 25 | struct anon_vma; | 26 | struct anon_vma; |
| @@ -249,6 +250,8 @@ struct vm_operations_struct { | |||
| 249 | void (*close)(struct vm_area_struct * area); | 250 | void (*close)(struct vm_area_struct * area); |
| 250 | int (*mremap)(struct vm_area_struct * area); | 251 | int (*mremap)(struct vm_area_struct * area); |
| 251 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); | 252 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); |
| 253 | int (*pmd_fault)(struct vm_area_struct *, unsigned long address, | ||
| 254 | pmd_t *, unsigned int flags); | ||
| 252 | void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); | 255 | void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); |
| 253 | 256 | ||
| 254 | /* notification that a previously read-only page is about to become | 257 | /* notification that a previously read-only page is about to become |
| @@ -307,18 +310,6 @@ struct inode; | |||
| 307 | #define page_private(page) ((page)->private) | 310 | #define page_private(page) ((page)->private) |
| 308 | #define set_page_private(page, v) ((page)->private = (v)) | 311 | #define set_page_private(page, v) ((page)->private = (v)) |
| 309 | 312 | ||
| 310 | /* It's valid only if the page is free path or free_list */ | ||
| 311 | static inline void set_freepage_migratetype(struct page *page, int migratetype) | ||
| 312 | { | ||
| 313 | page->index = migratetype; | ||
| 314 | } | ||
| 315 | |||
| 316 | /* It's valid only if the page is free path or free_list */ | ||
| 317 | static inline int get_freepage_migratetype(struct page *page) | ||
| 318 | { | ||
| 319 | return page->index; | ||
| 320 | } | ||
| 321 | |||
| 322 | /* | 313 | /* |
| 323 | * FIXME: take this include out, include page-flags.h in | 314 | * FIXME: take this include out, include page-flags.h in |
| 324 | * files which need it (119 of them) | 315 | * files which need it (119 of them) |
| @@ -359,20 +350,15 @@ static inline int get_page_unless_zero(struct page *page) | |||
| 359 | return atomic_inc_not_zero(&page->_count); | 350 | return atomic_inc_not_zero(&page->_count); |
| 360 | } | 351 | } |
| 361 | 352 | ||
| 362 | /* | ||
| 363 | * Try to drop a ref unless the page has a refcount of one, return false if | ||
| 364 | * that is the case. | ||
| 365 | * This is to make sure that the refcount won't become zero after this drop. | ||
| 366 | * This can be called when MMU is off so it must not access | ||
| 367 | * any of the virtual mappings. | ||
| 368 | */ | ||
| 369 | static inline int put_page_unless_one(struct page *page) | ||
| 370 | { | ||
| 371 | return atomic_add_unless(&page->_count, -1, 1); | ||
| 372 | } | ||
| 373 | |||
| 374 | extern int page_is_ram(unsigned long pfn); | 353 | extern int page_is_ram(unsigned long pfn); |
| 375 | extern int region_is_ram(resource_size_t phys_addr, unsigned long size); | 354 | |
| 355 | enum { | ||
| 356 | REGION_INTERSECTS, | ||
| 357 | REGION_DISJOINT, | ||
| 358 | REGION_MIXED, | ||
| 359 | }; | ||
| 360 | |||
| 361 | int region_intersects(resource_size_t offset, size_t size, const char *type); | ||
| 376 | 362 | ||
| 377 | /* Support for virtually mapped pages */ | 363 | /* Support for virtually mapped pages */ |
| 378 | struct page *vmalloc_to_page(const void *addr); | 364 | struct page *vmalloc_to_page(const void *addr); |
| @@ -1229,6 +1215,49 @@ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1229 | int write, int force, struct page **pages); | 1215 | int write, int force, struct page **pages); |
| 1230 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | 1216 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
| 1231 | struct page **pages); | 1217 | struct page **pages); |
| 1218 | |||
| 1219 | /* Container for pinned pfns / pages */ | ||
| 1220 | struct frame_vector { | ||
| 1221 | unsigned int nr_allocated; /* Number of frames we have space for */ | ||
| 1222 | unsigned int nr_frames; /* Number of frames stored in ptrs array */ | ||
| 1223 | bool got_ref; /* Did we pin pages by getting page ref? */ | ||
| 1224 | bool is_pfns; /* Does array contain pages or pfns? */ | ||
| 1225 | void *ptrs[0]; /* Array of pinned pfns / pages. Use | ||
| 1226 | * pfns_vector_pages() or pfns_vector_pfns() | ||
| 1227 | * for access */ | ||
| 1228 | }; | ||
| 1229 | |||
| 1230 | struct frame_vector *frame_vector_create(unsigned int nr_frames); | ||
| 1231 | void frame_vector_destroy(struct frame_vector *vec); | ||
| 1232 | int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, | ||
| 1233 | bool write, bool force, struct frame_vector *vec); | ||
| 1234 | void put_vaddr_frames(struct frame_vector *vec); | ||
| 1235 | int frame_vector_to_pages(struct frame_vector *vec); | ||
| 1236 | void frame_vector_to_pfns(struct frame_vector *vec); | ||
| 1237 | |||
| 1238 | static inline unsigned int frame_vector_count(struct frame_vector *vec) | ||
| 1239 | { | ||
| 1240 | return vec->nr_frames; | ||
| 1241 | } | ||
| 1242 | |||
| 1243 | static inline struct page **frame_vector_pages(struct frame_vector *vec) | ||
| 1244 | { | ||
| 1245 | if (vec->is_pfns) { | ||
| 1246 | int err = frame_vector_to_pages(vec); | ||
| 1247 | |||
| 1248 | if (err) | ||
| 1249 | return ERR_PTR(err); | ||
| 1250 | } | ||
| 1251 | return (struct page **)(vec->ptrs); | ||
| 1252 | } | ||
| 1253 | |||
| 1254 | static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) | ||
| 1255 | { | ||
| 1256 | if (!vec->is_pfns) | ||
| 1257 | frame_vector_to_pfns(vec); | ||
| 1258 | return (unsigned long *)(vec->ptrs); | ||
| 1259 | } | ||
| 1260 | |||
| 1232 | struct kvec; | 1261 | struct kvec; |
| 1233 | int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, | 1262 | int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, |
| 1234 | struct page **pages); | 1263 | struct page **pages); |
| @@ -1260,6 +1289,11 @@ static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) | |||
| 1260 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); | 1289 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); |
| 1261 | } | 1290 | } |
| 1262 | 1291 | ||
| 1292 | static inline bool vma_is_anonymous(struct vm_area_struct *vma) | ||
| 1293 | { | ||
| 1294 | return !vma->vm_ops; | ||
| 1295 | } | ||
| 1296 | |||
| 1263 | static inline int stack_guard_page_start(struct vm_area_struct *vma, | 1297 | static inline int stack_guard_page_start(struct vm_area_struct *vma, |
| 1264 | unsigned long addr) | 1298 | unsigned long addr) |
| 1265 | { | 1299 | { |
| @@ -1883,11 +1917,19 @@ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned lo | |||
| 1883 | 1917 | ||
| 1884 | extern unsigned long mmap_region(struct file *file, unsigned long addr, | 1918 | extern unsigned long mmap_region(struct file *file, unsigned long addr, |
| 1885 | unsigned long len, vm_flags_t vm_flags, unsigned long pgoff); | 1919 | unsigned long len, vm_flags_t vm_flags, unsigned long pgoff); |
| 1886 | extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | 1920 | extern unsigned long do_mmap(struct file *file, unsigned long addr, |
| 1887 | unsigned long len, unsigned long prot, unsigned long flags, | 1921 | unsigned long len, unsigned long prot, unsigned long flags, |
| 1888 | unsigned long pgoff, unsigned long *populate); | 1922 | vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate); |
| 1889 | extern int do_munmap(struct mm_struct *, unsigned long, size_t); | 1923 | extern int do_munmap(struct mm_struct *, unsigned long, size_t); |
| 1890 | 1924 | ||
| 1925 | static inline unsigned long | ||
| 1926 | do_mmap_pgoff(struct file *file, unsigned long addr, | ||
| 1927 | unsigned long len, unsigned long prot, unsigned long flags, | ||
| 1928 | unsigned long pgoff, unsigned long *populate) | ||
| 1929 | { | ||
| 1930 | return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate); | ||
| 1931 | } | ||
| 1932 | |||
| 1891 | #ifdef CONFIG_MMU | 1933 | #ifdef CONFIG_MMU |
| 1892 | extern int __mm_populate(unsigned long addr, unsigned long len, | 1934 | extern int __mm_populate(unsigned long addr, unsigned long len, |
| 1893 | int ignore_errors); | 1935 | int ignore_errors); |
| @@ -2186,6 +2228,7 @@ extern int memory_failure(unsigned long pfn, int trapno, int flags); | |||
| 2186 | extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); | 2228 | extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); |
| 2187 | extern int unpoison_memory(unsigned long pfn); | 2229 | extern int unpoison_memory(unsigned long pfn); |
| 2188 | extern int get_hwpoison_page(struct page *page); | 2230 | extern int get_hwpoison_page(struct page *page); |
| 2231 | extern void put_hwpoison_page(struct page *page); | ||
| 2189 | extern int sysctl_memory_failure_early_kill; | 2232 | extern int sysctl_memory_failure_early_kill; |
| 2190 | extern int sysctl_memory_failure_recovery; | 2233 | extern int sysctl_memory_failure_recovery; |
| 2191 | extern void shake_page(struct page *p, int access); | 2234 | extern void shake_page(struct page *p, int access); |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index c8d0a73d64c4..3d6baa7d4534 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -235,7 +235,7 @@ struct page_frag_cache { | |||
| 235 | bool pfmemalloc; | 235 | bool pfmemalloc; |
| 236 | }; | 236 | }; |
| 237 | 237 | ||
| 238 | typedef unsigned long __nocast vm_flags_t; | 238 | typedef unsigned long vm_flags_t; |
| 239 | 239 | ||
| 240 | /* | 240 | /* |
| 241 | * A region containing a mapping of a non-memory backed file under NOMMU | 241 | * A region containing a mapping of a non-memory backed file under NOMMU |
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 4d3776d25925..fdd0779ccdfa 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
| @@ -279,10 +279,13 @@ struct mmc_card { | |||
| 279 | #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ | 279 | #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ |
| 280 | #define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ | 280 | #define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ |
| 281 | #define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */ | 281 | #define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */ |
| 282 | #define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */ | ||
| 283 | |||
| 282 | 284 | ||
| 283 | unsigned int erase_size; /* erase size in sectors */ | 285 | unsigned int erase_size; /* erase size in sectors */ |
| 284 | unsigned int erase_shift; /* if erase unit is power 2 */ | 286 | unsigned int erase_shift; /* if erase unit is power 2 */ |
| 285 | unsigned int pref_erase; /* in sectors */ | 287 | unsigned int pref_erase; /* in sectors */ |
| 288 | unsigned int eg_boundary; /* don't cross erase-group boundaries */ | ||
| 286 | u8 erased_byte; /* value of erased bytes */ | 289 | u8 erased_byte; /* value of erased bytes */ |
| 287 | 290 | ||
| 288 | u32 raw_cid[4]; /* raw card CID */ | 291 | u32 raw_cid[4]; /* raw card CID */ |
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 5be97676f1fa..134c57422740 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h | |||
| @@ -98,6 +98,7 @@ struct mmc_data; | |||
| 98 | * @irq_flags: The flags to be passed to request_irq. | 98 | * @irq_flags: The flags to be passed to request_irq. |
| 99 | * @irq: The irq value to be passed to request_irq. | 99 | * @irq: The irq value to be passed to request_irq. |
| 100 | * @sdio_id0: Number of slot0 in the SDIO interrupt registers. | 100 | * @sdio_id0: Number of slot0 in the SDIO interrupt registers. |
| 101 | * @dto_timer: Timer for broken data transfer over scheme. | ||
| 101 | * | 102 | * |
| 102 | * Locking | 103 | * Locking |
| 103 | * ======= | 104 | * ======= |
| @@ -153,11 +154,7 @@ struct dw_mci { | |||
| 153 | dma_addr_t sg_dma; | 154 | dma_addr_t sg_dma; |
| 154 | void *sg_cpu; | 155 | void *sg_cpu; |
| 155 | const struct dw_mci_dma_ops *dma_ops; | 156 | const struct dw_mci_dma_ops *dma_ops; |
| 156 | #ifdef CONFIG_MMC_DW_IDMAC | ||
| 157 | unsigned int ring_size; | 157 | unsigned int ring_size; |
| 158 | #else | ||
| 159 | struct dw_mci_dma_data *dma_data; | ||
| 160 | #endif | ||
| 161 | u32 cmd_status; | 158 | u32 cmd_status; |
| 162 | u32 data_status; | 159 | u32 data_status; |
| 163 | u32 stop_cmdr; | 160 | u32 stop_cmdr; |
| @@ -204,6 +201,7 @@ struct dw_mci { | |||
| 204 | int sdio_id0; | 201 | int sdio_id0; |
| 205 | 202 | ||
| 206 | struct timer_list cmd11_timer; | 203 | struct timer_list cmd11_timer; |
| 204 | struct timer_list dto_timer; | ||
| 207 | }; | 205 | }; |
| 208 | 206 | ||
| 209 | /* DMA ops for Internal/External DMAC interface */ | 207 | /* DMA ops for Internal/External DMAC interface */ |
| @@ -226,6 +224,8 @@ struct dw_mci_dma_ops { | |||
| 226 | #define DW_MCI_QUIRK_HIGHSPEED BIT(2) | 224 | #define DW_MCI_QUIRK_HIGHSPEED BIT(2) |
| 227 | /* Unreliable card detection */ | 225 | /* Unreliable card detection */ |
| 228 | #define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3) | 226 | #define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3) |
| 227 | /* Timer for broken data transfer over scheme */ | ||
| 228 | #define DW_MCI_QUIRK_BROKEN_DTO BIT(4) | ||
| 229 | 229 | ||
| 230 | struct dma_pdata; | 230 | struct dma_pdata; |
| 231 | 231 | ||
| @@ -259,7 +259,6 @@ struct dw_mci_board { | |||
| 259 | 259 | ||
| 260 | struct dw_mci_dma_ops *dma_ops; | 260 | struct dw_mci_dma_ops *dma_ops; |
| 261 | struct dma_pdata *data; | 261 | struct dma_pdata *data; |
| 262 | struct block_settings *blk_settings; | ||
| 263 | }; | 262 | }; |
| 264 | 263 | ||
| 265 | #endif /* LINUX_MMC_DW_MMC_H */ | 264 | #endif /* LINUX_MMC_DW_MMC_H */ |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 1369e54faeb7..83b81fd865f3 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
| @@ -412,7 +412,8 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host) | |||
| 412 | { | 412 | { |
| 413 | host->ops->enable_sdio_irq(host, 0); | 413 | host->ops->enable_sdio_irq(host, 0); |
| 414 | host->sdio_irq_pending = true; | 414 | host->sdio_irq_pending = true; |
| 415 | wake_up_process(host->sdio_irq_thread); | 415 | if (host->sdio_irq_thread) |
| 416 | wake_up_process(host->sdio_irq_thread); | ||
| 416 | } | 417 | } |
| 417 | 418 | ||
| 418 | void sdio_run_irqs(struct mmc_host *host); | 419 | void sdio_run_irqs(struct mmc_host *host); |
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 61cd67f4d788..a1a210d59961 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h | |||
| @@ -66,6 +66,16 @@ struct mmu_notifier_ops { | |||
| 66 | unsigned long end); | 66 | unsigned long end); |
| 67 | 67 | ||
| 68 | /* | 68 | /* |
| 69 | * clear_young is a lightweight version of clear_flush_young. Like the | ||
| 70 | * latter, it is supposed to test-and-clear the young/accessed bitflag | ||
| 71 | * in the secondary pte, but it may omit flushing the secondary tlb. | ||
| 72 | */ | ||
| 73 | int (*clear_young)(struct mmu_notifier *mn, | ||
| 74 | struct mm_struct *mm, | ||
| 75 | unsigned long start, | ||
| 76 | unsigned long end); | ||
| 77 | |||
| 78 | /* | ||
| 69 | * test_young is called to check the young/accessed bitflag in | 79 | * test_young is called to check the young/accessed bitflag in |
| 70 | * the secondary pte. This is used to know if the page is | 80 | * the secondary pte. This is used to know if the page is |
| 71 | * frequently used without actually clearing the flag or tearing | 81 | * frequently used without actually clearing the flag or tearing |
| @@ -203,6 +213,9 @@ extern void __mmu_notifier_release(struct mm_struct *mm); | |||
| 203 | extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, | 213 | extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, |
| 204 | unsigned long start, | 214 | unsigned long start, |
| 205 | unsigned long end); | 215 | unsigned long end); |
| 216 | extern int __mmu_notifier_clear_young(struct mm_struct *mm, | ||
| 217 | unsigned long start, | ||
| 218 | unsigned long end); | ||
| 206 | extern int __mmu_notifier_test_young(struct mm_struct *mm, | 219 | extern int __mmu_notifier_test_young(struct mm_struct *mm, |
| 207 | unsigned long address); | 220 | unsigned long address); |
| 208 | extern void __mmu_notifier_change_pte(struct mm_struct *mm, | 221 | extern void __mmu_notifier_change_pte(struct mm_struct *mm, |
| @@ -231,6 +244,15 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, | |||
| 231 | return 0; | 244 | return 0; |
| 232 | } | 245 | } |
| 233 | 246 | ||
| 247 | static inline int mmu_notifier_clear_young(struct mm_struct *mm, | ||
| 248 | unsigned long start, | ||
| 249 | unsigned long end) | ||
| 250 | { | ||
| 251 | if (mm_has_notifiers(mm)) | ||
| 252 | return __mmu_notifier_clear_young(mm, start, end); | ||
| 253 | return 0; | ||
| 254 | } | ||
| 255 | |||
| 234 | static inline int mmu_notifier_test_young(struct mm_struct *mm, | 256 | static inline int mmu_notifier_test_young(struct mm_struct *mm, |
| 235 | unsigned long address) | 257 | unsigned long address) |
| 236 | { | 258 | { |
| @@ -311,6 +333,28 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
| 311 | __young; \ | 333 | __young; \ |
| 312 | }) | 334 | }) |
| 313 | 335 | ||
| 336 | #define ptep_clear_young_notify(__vma, __address, __ptep) \ | ||
| 337 | ({ \ | ||
| 338 | int __young; \ | ||
| 339 | struct vm_area_struct *___vma = __vma; \ | ||
| 340 | unsigned long ___address = __address; \ | ||
| 341 | __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\ | ||
| 342 | __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \ | ||
| 343 | ___address + PAGE_SIZE); \ | ||
| 344 | __young; \ | ||
| 345 | }) | ||
| 346 | |||
| 347 | #define pmdp_clear_young_notify(__vma, __address, __pmdp) \ | ||
| 348 | ({ \ | ||
| 349 | int __young; \ | ||
| 350 | struct vm_area_struct *___vma = __vma; \ | ||
| 351 | unsigned long ___address = __address; \ | ||
| 352 | __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\ | ||
| 353 | __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \ | ||
| 354 | ___address + PMD_SIZE); \ | ||
| 355 | __young; \ | ||
| 356 | }) | ||
| 357 | |||
| 314 | #define ptep_clear_flush_notify(__vma, __address, __ptep) \ | 358 | #define ptep_clear_flush_notify(__vma, __address, __ptep) \ |
| 315 | ({ \ | 359 | ({ \ |
| 316 | unsigned long ___addr = __address & PAGE_MASK; \ | 360 | unsigned long ___addr = __address & PAGE_MASK; \ |
| @@ -427,6 +471,8 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
| 427 | 471 | ||
| 428 | #define ptep_clear_flush_young_notify ptep_clear_flush_young | 472 | #define ptep_clear_flush_young_notify ptep_clear_flush_young |
| 429 | #define pmdp_clear_flush_young_notify pmdp_clear_flush_young | 473 | #define pmdp_clear_flush_young_notify pmdp_clear_flush_young |
| 474 | #define ptep_clear_young_notify ptep_test_and_clear_young | ||
| 475 | #define pmdp_clear_young_notify pmdp_test_and_clear_young | ||
| 430 | #define ptep_clear_flush_notify ptep_clear_flush | 476 | #define ptep_clear_flush_notify ptep_clear_flush |
| 431 | #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush | 477 | #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush |
| 432 | #define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear | 478 | #define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ac00e2050943..d94347737292 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -319,7 +319,11 @@ enum zone_type { | |||
| 319 | ZONE_HIGHMEM, | 319 | ZONE_HIGHMEM, |
| 320 | #endif | 320 | #endif |
| 321 | ZONE_MOVABLE, | 321 | ZONE_MOVABLE, |
| 322 | #ifdef CONFIG_ZONE_DEVICE | ||
| 323 | ZONE_DEVICE, | ||
| 324 | #endif | ||
| 322 | __MAX_NR_ZONES | 325 | __MAX_NR_ZONES |
| 326 | |||
| 323 | }; | 327 | }; |
| 324 | 328 | ||
| 325 | #ifndef __GENERATING_BOUNDS_H | 329 | #ifndef __GENERATING_BOUNDS_H |
| @@ -786,6 +790,25 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat) | |||
| 786 | return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; | 790 | return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; |
| 787 | } | 791 | } |
| 788 | 792 | ||
| 793 | static inline int zone_id(const struct zone *zone) | ||
| 794 | { | ||
| 795 | struct pglist_data *pgdat = zone->zone_pgdat; | ||
| 796 | |||
| 797 | return zone - pgdat->node_zones; | ||
| 798 | } | ||
| 799 | |||
| 800 | #ifdef CONFIG_ZONE_DEVICE | ||
| 801 | static inline bool is_dev_zone(const struct zone *zone) | ||
| 802 | { | ||
| 803 | return zone_id(zone) == ZONE_DEVICE; | ||
| 804 | } | ||
| 805 | #else | ||
| 806 | static inline bool is_dev_zone(const struct zone *zone) | ||
| 807 | { | ||
| 808 | return false; | ||
| 809 | } | ||
| 810 | #endif | ||
| 811 | |||
| 789 | #include <linux/memory_hotplug.h> | 812 | #include <linux/memory_hotplug.h> |
| 790 | 813 | ||
| 791 | extern struct mutex zonelists_mutex; | 814 | extern struct mutex zonelists_mutex; |
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 29975c73a953..366cf77953b5 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h | |||
| @@ -27,9 +27,9 @@ | |||
| 27 | #include <linux/string.h> | 27 | #include <linux/string.h> |
| 28 | #include <linux/bug.h> | 28 | #include <linux/bug.h> |
| 29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
| 30 | #include <linux/io.h> | ||
| 30 | 31 | ||
| 31 | #include <asm/unaligned.h> | 32 | #include <asm/unaligned.h> |
| 32 | #include <asm/io.h> | ||
| 33 | #include <asm/barrier.h> | 33 | #include <asm/barrier.h> |
| 34 | 34 | ||
| 35 | #ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 | 35 | #ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 |
diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 9120edb650a0..639e9b8b0e4d 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h | |||
| @@ -68,8 +68,17 @@ extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); | |||
| 68 | extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); | 68 | extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); |
| 69 | extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); | 69 | extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); |
| 70 | extern int netlink_has_listeners(struct sock *sk, unsigned int group); | 70 | extern int netlink_has_listeners(struct sock *sk, unsigned int group); |
| 71 | extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size, | 71 | |
| 72 | u32 dst_portid, gfp_t gfp_mask); | 72 | extern struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size, |
| 73 | unsigned int ldiff, u32 dst_portid, | ||
| 74 | gfp_t gfp_mask); | ||
| 75 | static inline struct sk_buff * | ||
| 76 | netlink_alloc_skb(struct sock *ssk, unsigned int size, u32 dst_portid, | ||
| 77 | gfp_t gfp_mask) | ||
| 78 | { | ||
| 79 | return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask); | ||
| 80 | } | ||
| 81 | |||
| 73 | extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); | 82 | extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); |
| 74 | extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, | 83 | extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, |
| 75 | __u32 group, gfp_t allocation); | 84 | __u32 group, gfp_t allocation); |
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index a91adf6e02f2..78488e099ce7 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h | |||
| @@ -47,6 +47,12 @@ static inline bool trigger_allbutself_cpu_backtrace(void) | |||
| 47 | arch_trigger_all_cpu_backtrace(false); | 47 | arch_trigger_all_cpu_backtrace(false); |
| 48 | return true; | 48 | return true; |
| 49 | } | 49 | } |
| 50 | |||
| 51 | /* generic implementation */ | ||
| 52 | void nmi_trigger_all_cpu_backtrace(bool include_self, | ||
| 53 | void (*raise)(cpumask_t *mask)); | ||
| 54 | bool nmi_cpu_backtrace(struct pt_regs *regs); | ||
| 55 | |||
| 50 | #else | 56 | #else |
| 51 | static inline bool trigger_all_cpu_backtrace(void) | 57 | static inline bool trigger_all_cpu_backtrace(void) |
| 52 | { | 58 | { |
diff --git a/include/linux/ntb.h b/include/linux/ntb.h index b02f72bb8e32..f798e2afba88 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h | |||
| @@ -522,10 +522,9 @@ static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int idx) | |||
| 522 | * @speed: OUT - The link speed expressed as PCIe generation number. | 522 | * @speed: OUT - The link speed expressed as PCIe generation number. |
| 523 | * @width: OUT - The link width expressed as the number of PCIe lanes. | 523 | * @width: OUT - The link width expressed as the number of PCIe lanes. |
| 524 | * | 524 | * |
| 525 | * Set the translation of a memory window. The peer may access local memory | 525 | * Get the current state of the ntb link. It is recommended to query the link |
| 526 | * through the window starting at the address, up to the size. The address | 526 | * state once after every link event. It is safe to query the link state in |
| 527 | * must be aligned to the alignment specified by ntb_mw_get_range(). The size | 527 | * the context of the link event callback. |
| 528 | * must be aligned to the size alignment specified by ntb_mw_get_range(). | ||
| 529 | * | 528 | * |
| 530 | * Return: One if the link is up, zero if the link is down, otherwise a | 529 | * Return: One if the link is up, zero if the link is down, otherwise a |
| 531 | * negative value indicating the error number. | 530 | * negative value indicating the error number. |
| @@ -795,7 +794,7 @@ static inline int ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) | |||
| 795 | } | 794 | } |
| 796 | 795 | ||
| 797 | /** | 796 | /** |
| 798 | * ntb_peer_db_clear() - clear bits in the local doorbell register | 797 | * ntb_peer_db_clear() - clear bits in the peer doorbell register |
| 799 | * @ntb: NTB device context. | 798 | * @ntb: NTB device context. |
| 800 | * @db_bits: Doorbell bits to clear. | 799 | * @db_bits: Doorbell bits to clear. |
| 801 | * | 800 | * |
diff --git a/include/linux/ntb_transport.h b/include/linux/ntb_transport.h index 2862861366a5..7243eb98a722 100644 --- a/include/linux/ntb_transport.h +++ b/include/linux/ntb_transport.h | |||
| @@ -83,3 +83,4 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len); | |||
| 83 | void ntb_transport_link_up(struct ntb_transport_qp *qp); | 83 | void ntb_transport_link_up(struct ntb_transport_qp *qp); |
| 84 | void ntb_transport_link_down(struct ntb_transport_qp *qp); | 84 | void ntb_transport_link_down(struct ntb_transport_qp *qp); |
| 85 | bool ntb_transport_link_query(struct ntb_transport_qp *qp); | 85 | bool ntb_transport_link_query(struct ntb_transport_qp *qp); |
| 86 | unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp); | ||
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index c2bbf672b84e..d2fa9ca42e9a 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h | |||
| @@ -41,7 +41,7 @@ enum OID { | |||
| 41 | OID_signed_data, /* 1.2.840.113549.1.7.2 */ | 41 | OID_signed_data, /* 1.2.840.113549.1.7.2 */ |
| 42 | /* PKCS#9 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-9(9)} */ | 42 | /* PKCS#9 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-9(9)} */ |
| 43 | OID_email_address, /* 1.2.840.113549.1.9.1 */ | 43 | OID_email_address, /* 1.2.840.113549.1.9.1 */ |
| 44 | OID_content_type, /* 1.2.840.113549.1.9.3 */ | 44 | OID_contentType, /* 1.2.840.113549.1.9.3 */ |
| 45 | OID_messageDigest, /* 1.2.840.113549.1.9.4 */ | 45 | OID_messageDigest, /* 1.2.840.113549.1.9.4 */ |
| 46 | OID_signingTime, /* 1.2.840.113549.1.9.5 */ | 46 | OID_signingTime, /* 1.2.840.113549.1.9.5 */ |
| 47 | OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */ | 47 | OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */ |
| @@ -54,6 +54,8 @@ enum OID { | |||
| 54 | 54 | ||
| 55 | /* Microsoft Authenticode & Software Publishing */ | 55 | /* Microsoft Authenticode & Software Publishing */ |
| 56 | OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */ | 56 | OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */ |
| 57 | OID_msStatementType, /* 1.3.6.1.4.1.311.2.1.11 */ | ||
| 58 | OID_msSpOpusInfo, /* 1.3.6.1.4.1.311.2.1.12 */ | ||
| 57 | OID_msPeImageDataObjId, /* 1.3.6.1.4.1.311.2.1.15 */ | 59 | OID_msPeImageDataObjId, /* 1.3.6.1.4.1.311.2.1.15 */ |
| 58 | OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */ | 60 | OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */ |
| 59 | OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */ | 61 | OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */ |
| @@ -61,6 +63,9 @@ enum OID { | |||
| 61 | OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ | 63 | OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ |
| 62 | OID_sha1, /* 1.3.14.3.2.26 */ | 64 | OID_sha1, /* 1.3.14.3.2.26 */ |
| 63 | OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ | 65 | OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ |
| 66 | OID_sha384, /* 2.16.840.1.101.3.4.2.2 */ | ||
| 67 | OID_sha512, /* 2.16.840.1.101.3.4.2.3 */ | ||
| 68 | OID_sha224, /* 2.16.840.1.101.3.4.2.4 */ | ||
| 64 | 69 | ||
| 65 | /* Distinguished Name attribute IDs [RFC 2256] */ | 70 | /* Distinguished Name attribute IDs [RFC 2256] */ |
| 66 | OID_commonName, /* 2.5.4.3 */ | 71 | OID_commonName, /* 2.5.4.3 */ |
diff --git a/include/linux/oom.h b/include/linux/oom.h index 7deecb7bca5e..03e6257321f0 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
| @@ -13,6 +13,27 @@ struct mem_cgroup; | |||
| 13 | struct task_struct; | 13 | struct task_struct; |
| 14 | 14 | ||
| 15 | /* | 15 | /* |
| 16 | * Details of the page allocation that triggered the oom killer that are used to | ||
| 17 | * determine what should be killed. | ||
| 18 | */ | ||
| 19 | struct oom_control { | ||
| 20 | /* Used to determine cpuset */ | ||
| 21 | struct zonelist *zonelist; | ||
| 22 | |||
| 23 | /* Used to determine mempolicy */ | ||
| 24 | nodemask_t *nodemask; | ||
| 25 | |||
| 26 | /* Used to determine cpuset and node locality requirement */ | ||
| 27 | const gfp_t gfp_mask; | ||
| 28 | |||
| 29 | /* | ||
| 30 | * order == -1 means the oom kill is required by sysrq, otherwise only | ||
| 31 | * for display purposes. | ||
| 32 | */ | ||
| 33 | const int order; | ||
| 34 | }; | ||
| 35 | |||
| 36 | /* | ||
| 16 | * Types of limitations to the nodes from which allocations may occur | 37 | * Types of limitations to the nodes from which allocations may occur |
| 17 | */ | 38 | */ |
| 18 | enum oom_constraint { | 39 | enum oom_constraint { |
| @@ -57,21 +78,18 @@ extern unsigned long oom_badness(struct task_struct *p, | |||
| 57 | 78 | ||
| 58 | extern int oom_kills_count(void); | 79 | extern int oom_kills_count(void); |
| 59 | extern void note_oom_kill(void); | 80 | extern void note_oom_kill(void); |
| 60 | extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | 81 | extern void oom_kill_process(struct oom_control *oc, struct task_struct *p, |
| 61 | unsigned int points, unsigned long totalpages, | 82 | unsigned int points, unsigned long totalpages, |
| 62 | struct mem_cgroup *memcg, nodemask_t *nodemask, | 83 | struct mem_cgroup *memcg, const char *message); |
| 63 | const char *message); | ||
| 64 | 84 | ||
| 65 | extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, | 85 | extern void check_panic_on_oom(struct oom_control *oc, |
| 66 | int order, const nodemask_t *nodemask, | 86 | enum oom_constraint constraint, |
| 67 | struct mem_cgroup *memcg); | 87 | struct mem_cgroup *memcg); |
| 68 | 88 | ||
| 69 | extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task, | 89 | extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, |
| 70 | unsigned long totalpages, const nodemask_t *nodemask, | 90 | struct task_struct *task, unsigned long totalpages); |
| 71 | bool force_kill); | ||
| 72 | 91 | ||
| 73 | extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | 92 | extern bool out_of_memory(struct oom_control *oc); |
| 74 | int order, nodemask_t *mask, bool force_kill); | ||
| 75 | 93 | ||
| 76 | extern void exit_oom_victim(void); | 94 | extern void exit_oom_victim(void); |
| 77 | 95 | ||
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 41c93844fb1d..416509e26d6d 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
| @@ -109,6 +109,10 @@ enum pageflags { | |||
| 109 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 109 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 110 | PG_compound_lock, | 110 | PG_compound_lock, |
| 111 | #endif | 111 | #endif |
| 112 | #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) | ||
| 113 | PG_young, | ||
| 114 | PG_idle, | ||
| 115 | #endif | ||
| 112 | __NR_PAGEFLAGS, | 116 | __NR_PAGEFLAGS, |
| 113 | 117 | ||
| 114 | /* Filesystems */ | 118 | /* Filesystems */ |
| @@ -289,6 +293,13 @@ PAGEFLAG_FALSE(HWPoison) | |||
| 289 | #define __PG_HWPOISON 0 | 293 | #define __PG_HWPOISON 0 |
| 290 | #endif | 294 | #endif |
| 291 | 295 | ||
| 296 | #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) | ||
| 297 | TESTPAGEFLAG(Young, young) | ||
| 298 | SETPAGEFLAG(Young, young) | ||
| 299 | TESTCLEARFLAG(Young, young) | ||
| 300 | PAGEFLAG(Idle, idle) | ||
| 301 | #endif | ||
| 302 | |||
| 292 | /* | 303 | /* |
| 293 | * On an anonymous page mapped into a user virtual memory area, | 304 | * On an anonymous page mapped into a user virtual memory area, |
| 294 | * page->mapping points to its anon_vma, not to a struct address_space; | 305 | * page->mapping points to its anon_vma, not to a struct address_space; |
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 2dc1e1697b45..047d64706f2a 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h | |||
| @@ -65,11 +65,6 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | |||
| 65 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, | 65 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, |
| 66 | bool skip_hwpoisoned_pages); | 66 | bool skip_hwpoisoned_pages); |
| 67 | 67 | ||
| 68 | /* | ||
| 69 | * Internal functions. Changes pageblock's migrate type. | ||
| 70 | */ | ||
| 71 | int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages); | ||
| 72 | void unset_migratetype_isolate(struct page *page, unsigned migratetype); | ||
| 73 | struct page *alloc_migrate_target(struct page *page, unsigned long private, | 68 | struct page *alloc_migrate_target(struct page *page, unsigned long private, |
| 74 | int **resultp); | 69 | int **resultp); |
| 75 | 70 | ||
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index c42981cd99aa..17f118a82854 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h | |||
| @@ -26,6 +26,10 @@ enum page_ext_flags { | |||
| 26 | PAGE_EXT_DEBUG_POISON, /* Page is poisoned */ | 26 | PAGE_EXT_DEBUG_POISON, /* Page is poisoned */ |
| 27 | PAGE_EXT_DEBUG_GUARD, | 27 | PAGE_EXT_DEBUG_GUARD, |
| 28 | PAGE_EXT_OWNER, | 28 | PAGE_EXT_OWNER, |
| 29 | #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT) | ||
| 30 | PAGE_EXT_YOUNG, | ||
| 31 | PAGE_EXT_IDLE, | ||
| 32 | #endif | ||
| 29 | }; | 33 | }; |
| 30 | 34 | ||
| 31 | /* | 35 | /* |
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h new file mode 100644 index 000000000000..bf268fa92c5b --- /dev/null +++ b/include/linux/page_idle.h | |||
| @@ -0,0 +1,110 @@ | |||
| 1 | #ifndef _LINUX_MM_PAGE_IDLE_H | ||
| 2 | #define _LINUX_MM_PAGE_IDLE_H | ||
| 3 | |||
| 4 | #include <linux/bitops.h> | ||
| 5 | #include <linux/page-flags.h> | ||
| 6 | #include <linux/page_ext.h> | ||
| 7 | |||
| 8 | #ifdef CONFIG_IDLE_PAGE_TRACKING | ||
| 9 | |||
| 10 | #ifdef CONFIG_64BIT | ||
| 11 | static inline bool page_is_young(struct page *page) | ||
| 12 | { | ||
| 13 | return PageYoung(page); | ||
| 14 | } | ||
| 15 | |||
| 16 | static inline void set_page_young(struct page *page) | ||
| 17 | { | ||
| 18 | SetPageYoung(page); | ||
| 19 | } | ||
| 20 | |||
| 21 | static inline bool test_and_clear_page_young(struct page *page) | ||
| 22 | { | ||
| 23 | return TestClearPageYoung(page); | ||
| 24 | } | ||
| 25 | |||
| 26 | static inline bool page_is_idle(struct page *page) | ||
| 27 | { | ||
| 28 | return PageIdle(page); | ||
| 29 | } | ||
| 30 | |||
| 31 | static inline void set_page_idle(struct page *page) | ||
| 32 | { | ||
| 33 | SetPageIdle(page); | ||
| 34 | } | ||
| 35 | |||
| 36 | static inline void clear_page_idle(struct page *page) | ||
| 37 | { | ||
| 38 | ClearPageIdle(page); | ||
| 39 | } | ||
| 40 | #else /* !CONFIG_64BIT */ | ||
| 41 | /* | ||
| 42 | * If there is not enough space to store Idle and Young bits in page flags, use | ||
| 43 | * page ext flags instead. | ||
| 44 | */ | ||
| 45 | extern struct page_ext_operations page_idle_ops; | ||
| 46 | |||
| 47 | static inline bool page_is_young(struct page *page) | ||
| 48 | { | ||
| 49 | return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); | ||
| 50 | } | ||
| 51 | |||
| 52 | static inline void set_page_young(struct page *page) | ||
| 53 | { | ||
| 54 | set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); | ||
| 55 | } | ||
| 56 | |||
| 57 | static inline bool test_and_clear_page_young(struct page *page) | ||
| 58 | { | ||
| 59 | return test_and_clear_bit(PAGE_EXT_YOUNG, | ||
| 60 | &lookup_page_ext(page)->flags); | ||
| 61 | } | ||
| 62 | |||
| 63 | static inline bool page_is_idle(struct page *page) | ||
| 64 | { | ||
| 65 | return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); | ||
| 66 | } | ||
| 67 | |||
| 68 | static inline void set_page_idle(struct page *page) | ||
| 69 | { | ||
| 70 | set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); | ||
| 71 | } | ||
| 72 | |||
| 73 | static inline void clear_page_idle(struct page *page) | ||
| 74 | { | ||
| 75 | clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); | ||
| 76 | } | ||
| 77 | #endif /* CONFIG_64BIT */ | ||
| 78 | |||
| 79 | #else /* !CONFIG_IDLE_PAGE_TRACKING */ | ||
| 80 | |||
| 81 | static inline bool page_is_young(struct page *page) | ||
| 82 | { | ||
| 83 | return false; | ||
| 84 | } | ||
| 85 | |||
| 86 | static inline void set_page_young(struct page *page) | ||
| 87 | { | ||
| 88 | } | ||
| 89 | |||
| 90 | static inline bool test_and_clear_page_young(struct page *page) | ||
| 91 | { | ||
| 92 | return false; | ||
| 93 | } | ||
| 94 | |||
| 95 | static inline bool page_is_idle(struct page *page) | ||
| 96 | { | ||
| 97 | return false; | ||
| 98 | } | ||
| 99 | |||
| 100 | static inline void set_page_idle(struct page *page) | ||
| 101 | { | ||
| 102 | } | ||
| 103 | |||
| 104 | static inline void clear_page_idle(struct page *page) | ||
| 105 | { | ||
| 106 | } | ||
| 107 | |||
| 108 | #endif /* CONFIG_IDLE_PAGE_TRACKING */ | ||
| 109 | |||
| 110 | #endif /* _LINUX_MM_PAGE_IDLE_H */ | ||
diff --git a/include/linux/pci.h b/include/linux/pci.h index 1a64733c48c7..e90eb22de628 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -1227,6 +1227,8 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode, | |||
| 1227 | dma_pool_create(name, &pdev->dev, size, align, allocation) | 1227 | dma_pool_create(name, &pdev->dev, size, align, allocation) |
| 1228 | #define pci_pool_destroy(pool) dma_pool_destroy(pool) | 1228 | #define pci_pool_destroy(pool) dma_pool_destroy(pool) |
| 1229 | #define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) | 1229 | #define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) |
| 1230 | #define pci_pool_zalloc(pool, flags, handle) \ | ||
| 1231 | dma_pool_zalloc(pool, flags, handle) | ||
| 1230 | #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) | 1232 | #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) |
| 1231 | 1233 | ||
| 1232 | struct msix_entry { | 1234 | struct msix_entry { |
diff --git a/include/linux/platform_data/i2c-mux-reg.h b/include/linux/platform_data/i2c-mux-reg.h new file mode 100644 index 000000000000..c68712aadf43 --- /dev/null +++ b/include/linux/platform_data/i2c-mux-reg.h | |||
| @@ -0,0 +1,44 @@ | |||
| 1 | /* | ||
| 2 | * I2C multiplexer using a single register | ||
| 3 | * | ||
| 4 | * Copyright 2015 Freescale Semiconductor | ||
| 5 | * York Sun <yorksun@freescale.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify it | ||
| 8 | * under the terms of the GNU General Public License as published by the | ||
| 9 | * Free Software Foundation; either version 2 of the License, or (at your | ||
| 10 | * option) any later version. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #ifndef __LINUX_PLATFORM_DATA_I2C_MUX_REG_H | ||
| 14 | #define __LINUX_PLATFORM_DATA_I2C_MUX_REG_H | ||
| 15 | |||
| 16 | /** | ||
| 17 | * struct i2c_mux_reg_platform_data - Platform-dependent data for i2c-mux-reg | ||
| 18 | * @parent: Parent I2C bus adapter number | ||
| 19 | * @base_nr: Base I2C bus number to number adapters from or zero for dynamic | ||
| 20 | * @values: Array of value for each channel | ||
| 21 | * @n_values: Number of multiplexer channels | ||
| 22 | * @little_endian: Indicating if the register is in little endian | ||
| 23 | * @write_only: Reading the register is not allowed by hardware | ||
| 24 | * @classes: Optional I2C auto-detection classes | ||
| 25 | * @idle: Value to write to mux when idle | ||
| 26 | * @idle_in_use: indicate if idle value is in use | ||
| 27 | * @reg: Virtual address of the register to switch channel | ||
| 28 | * @reg_size: register size in bytes | ||
| 29 | */ | ||
| 30 | struct i2c_mux_reg_platform_data { | ||
| 31 | int parent; | ||
| 32 | int base_nr; | ||
| 33 | const unsigned int *values; | ||
| 34 | int n_values; | ||
| 35 | bool little_endian; | ||
| 36 | bool write_only; | ||
| 37 | const unsigned int *classes; | ||
| 38 | u32 idle; | ||
| 39 | bool idle_in_use; | ||
| 40 | void __iomem *reg; | ||
| 41 | resource_size_t reg_size; | ||
| 42 | }; | ||
| 43 | |||
| 44 | #endif /* __LINUX_PLATFORM_DATA_I2C_MUX_REG_H */ | ||
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h index e1571efa3f2b..95ccab3f454a 100644 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ b/include/linux/platform_data/mmc-esdhc-imx.h | |||
| @@ -45,5 +45,6 @@ struct esdhc_platform_data { | |||
| 45 | int max_bus_width; | 45 | int max_bus_width; |
| 46 | bool support_vsel; | 46 | bool support_vsel; |
| 47 | unsigned int delay_line; | 47 | unsigned int delay_line; |
| 48 | unsigned int tuning_step; /* The delay cell steps in tuning procedure */ | ||
| 48 | }; | 49 | }; |
| 49 | #endif /* __ASM_ARCH_IMX_ESDHC_H */ | 50 | #endif /* __ASM_ARCH_IMX_ESDHC_H */ |
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index cab7ba55bedb..e817722ee3f0 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h | |||
| @@ -34,6 +34,7 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); | |||
| 34 | 34 | ||
| 35 | int dev_pm_opp_get_opp_count(struct device *dev); | 35 | int dev_pm_opp_get_opp_count(struct device *dev); |
| 36 | unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev); | 36 | unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev); |
| 37 | struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev); | ||
| 37 | 38 | ||
| 38 | struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, | 39 | struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, |
| 39 | unsigned long freq, | 40 | unsigned long freq, |
| @@ -80,6 +81,11 @@ static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) | |||
| 80 | return 0; | 81 | return 0; |
| 81 | } | 82 | } |
| 82 | 83 | ||
| 84 | static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) | ||
| 85 | { | ||
| 86 | return NULL; | ||
| 87 | } | ||
| 88 | |||
| 83 | static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, | 89 | static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, |
| 84 | unsigned long freq, bool available) | 90 | unsigned long freq, bool available) |
| 85 | { | 91 | { |
diff --git a/include/linux/pmem.h b/include/linux/pmem.h index d2114045a6c4..85f810b33917 100644 --- a/include/linux/pmem.h +++ b/include/linux/pmem.h | |||
| @@ -14,28 +14,42 @@ | |||
| 14 | #define __PMEM_H__ | 14 | #define __PMEM_H__ |
| 15 | 15 | ||
| 16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
| 17 | #include <linux/uio.h> | ||
| 17 | 18 | ||
| 18 | #ifdef CONFIG_ARCH_HAS_PMEM_API | 19 | #ifdef CONFIG_ARCH_HAS_PMEM_API |
| 19 | #include <asm/cacheflush.h> | 20 | #define ARCH_MEMREMAP_PMEM MEMREMAP_WB |
| 21 | #include <asm/pmem.h> | ||
| 20 | #else | 22 | #else |
| 23 | #define ARCH_MEMREMAP_PMEM MEMREMAP_WT | ||
| 24 | /* | ||
| 25 | * These are simply here to enable compilation, all call sites gate | ||
| 26 | * calling these symbols with arch_has_pmem_api() and redirect to the | ||
| 27 | * implementation in asm/pmem.h. | ||
| 28 | */ | ||
| 29 | static inline bool __arch_has_wmb_pmem(void) | ||
| 30 | { | ||
| 31 | return false; | ||
| 32 | } | ||
| 33 | |||
| 21 | static inline void arch_wmb_pmem(void) | 34 | static inline void arch_wmb_pmem(void) |
| 22 | { | 35 | { |
| 23 | BUG(); | 36 | BUG(); |
| 24 | } | 37 | } |
| 25 | 38 | ||
| 26 | static inline bool __arch_has_wmb_pmem(void) | 39 | static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, |
| 40 | size_t n) | ||
| 27 | { | 41 | { |
| 28 | return false; | 42 | BUG(); |
| 29 | } | 43 | } |
| 30 | 44 | ||
| 31 | static inline void __pmem *arch_memremap_pmem(resource_size_t offset, | 45 | static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, |
| 32 | unsigned long size) | 46 | struct iov_iter *i) |
| 33 | { | 47 | { |
| 34 | return NULL; | 48 | BUG(); |
| 49 | return 0; | ||
| 35 | } | 50 | } |
| 36 | 51 | ||
| 37 | static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, | 52 | static inline void arch_clear_pmem(void __pmem *addr, size_t size) |
| 38 | size_t n) | ||
| 39 | { | 53 | { |
| 40 | BUG(); | 54 | BUG(); |
| 41 | } | 55 | } |
| @@ -43,18 +57,22 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, | |||
| 43 | 57 | ||
| 44 | /* | 58 | /* |
| 45 | * Architectures that define ARCH_HAS_PMEM_API must provide | 59 | * Architectures that define ARCH_HAS_PMEM_API must provide |
| 46 | * implementations for arch_memremap_pmem(), arch_memcpy_to_pmem(), | 60 | * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(), |
| 47 | * arch_wmb_pmem(), and __arch_has_wmb_pmem(). | 61 | * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem(). |
| 48 | */ | 62 | */ |
| 49 | |||
| 50 | static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) | 63 | static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) |
| 51 | { | 64 | { |
| 52 | memcpy(dst, (void __force const *) src, size); | 65 | memcpy(dst, (void __force const *) src, size); |
| 53 | } | 66 | } |
| 54 | 67 | ||
| 55 | static inline void memunmap_pmem(void __pmem *addr) | 68 | static inline void memunmap_pmem(struct device *dev, void __pmem *addr) |
| 69 | { | ||
| 70 | devm_memunmap(dev, (void __force *) addr); | ||
| 71 | } | ||
| 72 | |||
| 73 | static inline bool arch_has_pmem_api(void) | ||
| 56 | { | 74 | { |
| 57 | iounmap((void __force __iomem *) addr); | 75 | return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); |
| 58 | } | 76 | } |
| 59 | 77 | ||
| 60 | /** | 78 | /** |
| @@ -68,14 +86,7 @@ static inline void memunmap_pmem(void __pmem *addr) | |||
| 68 | */ | 86 | */ |
| 69 | static inline bool arch_has_wmb_pmem(void) | 87 | static inline bool arch_has_wmb_pmem(void) |
| 70 | { | 88 | { |
| 71 | if (IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) | 89 | return arch_has_pmem_api() && __arch_has_wmb_pmem(); |
| 72 | return __arch_has_wmb_pmem(); | ||
| 73 | return false; | ||
| 74 | } | ||
| 75 | |||
| 76 | static inline bool arch_has_pmem_api(void) | ||
| 77 | { | ||
| 78 | return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem(); | ||
| 79 | } | 90 | } |
| 80 | 91 | ||
| 81 | /* | 92 | /* |
| @@ -85,16 +96,24 @@ static inline bool arch_has_pmem_api(void) | |||
| 85 | * default_memremap_pmem + default_memcpy_to_pmem is sufficient for | 96 | * default_memremap_pmem + default_memcpy_to_pmem is sufficient for |
| 86 | * making data durable relative to i/o completion. | 97 | * making data durable relative to i/o completion. |
| 87 | */ | 98 | */ |
| 88 | static void default_memcpy_to_pmem(void __pmem *dst, const void *src, | 99 | static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src, |
| 89 | size_t size) | 100 | size_t size) |
| 90 | { | 101 | { |
| 91 | memcpy((void __force *) dst, src, size); | 102 | memcpy((void __force *) dst, src, size); |
| 92 | } | 103 | } |
| 93 | 104 | ||
| 94 | static void __pmem *default_memremap_pmem(resource_size_t offset, | 105 | static inline size_t default_copy_from_iter_pmem(void __pmem *addr, |
| 95 | unsigned long size) | 106 | size_t bytes, struct iov_iter *i) |
| 107 | { | ||
| 108 | return copy_from_iter_nocache((void __force *)addr, bytes, i); | ||
| 109 | } | ||
| 110 | |||
| 111 | static inline void default_clear_pmem(void __pmem *addr, size_t size) | ||
| 96 | { | 112 | { |
| 97 | return (void __pmem __force *)ioremap_wt(offset, size); | 113 | if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0) |
| 114 | clear_page((void __force *)addr); | ||
| 115 | else | ||
| 116 | memset((void __force *)addr, 0, size); | ||
| 98 | } | 117 | } |
| 99 | 118 | ||
| 100 | /** | 119 | /** |
| @@ -109,12 +128,11 @@ static void __pmem *default_memremap_pmem(resource_size_t offset, | |||
| 109 | * wmb_pmem() arrange for the data to be written through the | 128 | * wmb_pmem() arrange for the data to be written through the |
| 110 | * cache to persistent media. | 129 | * cache to persistent media. |
| 111 | */ | 130 | */ |
| 112 | static inline void __pmem *memremap_pmem(resource_size_t offset, | 131 | static inline void __pmem *memremap_pmem(struct device *dev, |
| 113 | unsigned long size) | 132 | resource_size_t offset, unsigned long size) |
| 114 | { | 133 | { |
| 115 | if (arch_has_pmem_api()) | 134 | return (void __pmem *) devm_memremap(dev, offset, size, |
| 116 | return arch_memremap_pmem(offset, size); | 135 | ARCH_MEMREMAP_PMEM); |
| 117 | return default_memremap_pmem(offset, size); | ||
| 118 | } | 136 | } |
| 119 | 137 | ||
| 120 | /** | 138 | /** |
| @@ -146,7 +164,42 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) | |||
| 146 | */ | 164 | */ |
| 147 | static inline void wmb_pmem(void) | 165 | static inline void wmb_pmem(void) |
| 148 | { | 166 | { |
| 149 | if (arch_has_pmem_api()) | 167 | if (arch_has_wmb_pmem()) |
| 150 | arch_wmb_pmem(); | 168 | arch_wmb_pmem(); |
| 169 | else | ||
| 170 | wmb(); | ||
| 171 | } | ||
| 172 | |||
| 173 | /** | ||
| 174 | * copy_from_iter_pmem - copy data from an iterator to PMEM | ||
| 175 | * @addr: PMEM destination address | ||
| 176 | * @bytes: number of bytes to copy | ||
| 177 | * @i: iterator with source data | ||
| 178 | * | ||
| 179 | * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. | ||
| 180 | * This function requires explicit ordering with a wmb_pmem() call. | ||
| 181 | */ | ||
| 182 | static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, | ||
| 183 | struct iov_iter *i) | ||
| 184 | { | ||
| 185 | if (arch_has_pmem_api()) | ||
| 186 | return arch_copy_from_iter_pmem(addr, bytes, i); | ||
| 187 | return default_copy_from_iter_pmem(addr, bytes, i); | ||
| 188 | } | ||
| 189 | |||
| 190 | /** | ||
| 191 | * clear_pmem - zero a PMEM memory range | ||
| 192 | * @addr: virtual start address | ||
| 193 | * @size: number of bytes to zero | ||
| 194 | * | ||
| 195 | * Write zeros into the memory range starting at 'addr' for 'size' bytes. | ||
| 196 | * This function requires explicit ordering with a wmb_pmem() call. | ||
| 197 | */ | ||
| 198 | static inline void clear_pmem(void __pmem *addr, size_t size) | ||
| 199 | { | ||
| 200 | if (arch_has_pmem_api()) | ||
| 201 | arch_clear_pmem(addr, size); | ||
| 202 | else | ||
| 203 | default_clear_pmem(addr, size); | ||
| 151 | } | 204 | } |
| 152 | #endif /* __PMEM_H__ */ | 205 | #endif /* __PMEM_H__ */ |
diff --git a/include/linux/poison.h b/include/linux/poison.h index 2110a81c5e2a..317e16de09e5 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h | |||
| @@ -19,8 +19,8 @@ | |||
| 19 | * under normal circumstances, used to verify that nobody uses | 19 | * under normal circumstances, used to verify that nobody uses |
| 20 | * non-initialized list entries. | 20 | * non-initialized list entries. |
| 21 | */ | 21 | */ |
| 22 | #define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA) | 22 | #define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) |
| 23 | #define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA) | 23 | #define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA) |
| 24 | 24 | ||
| 25 | /********** include/linux/timer.h **********/ | 25 | /********** include/linux/timer.h **********/ |
| 26 | /* | 26 | /* |
| @@ -69,10 +69,6 @@ | |||
| 69 | #define ATM_POISON_FREE 0x12 | 69 | #define ATM_POISON_FREE 0x12 |
| 70 | #define ATM_POISON 0xdeadbeef | 70 | #define ATM_POISON 0xdeadbeef |
| 71 | 71 | ||
| 72 | /********** net/ **********/ | ||
| 73 | #define NEIGHBOR_DEAD 0xdeadbeef | ||
| 74 | #define NETFILTER_LINK_POISON 0xdead57ac | ||
| 75 | |||
| 76 | /********** kernel/mutexes **********/ | 72 | /********** kernel/mutexes **********/ |
| 77 | #define MUTEX_DEBUG_INIT 0x11 | 73 | #define MUTEX_DEBUG_INIT 0x11 |
| 78 | #define MUTEX_DEBUG_FREE 0x22 | 74 | #define MUTEX_DEBUG_FREE 0x22 |
| @@ -83,7 +79,4 @@ | |||
| 83 | /********** security/ **********/ | 79 | /********** security/ **********/ |
| 84 | #define KEY_DESTROY 0xbd | 80 | #define KEY_DESTROY 0xbd |
| 85 | 81 | ||
| 86 | /********** sound/oss/ **********/ | ||
| 87 | #define OSS_POISON_FREE 0xAB | ||
| 88 | |||
| 89 | #endif | 82 | #endif |
diff --git a/include/linux/printk.h b/include/linux/printk.h index a6298b27ac99..9729565c25ff 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h | |||
| @@ -404,10 +404,10 @@ do { \ | |||
| 404 | static DEFINE_RATELIMIT_STATE(_rs, \ | 404 | static DEFINE_RATELIMIT_STATE(_rs, \ |
| 405 | DEFAULT_RATELIMIT_INTERVAL, \ | 405 | DEFAULT_RATELIMIT_INTERVAL, \ |
| 406 | DEFAULT_RATELIMIT_BURST); \ | 406 | DEFAULT_RATELIMIT_BURST); \ |
| 407 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ | 407 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \ |
| 408 | if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ | 408 | if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ |
| 409 | __ratelimit(&_rs)) \ | 409 | __ratelimit(&_rs)) \ |
| 410 | __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \ | 410 | __dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \ |
| 411 | } while (0) | 411 | } while (0) |
| 412 | #elif defined(DEBUG) | 412 | #elif defined(DEBUG) |
| 413 | #define pr_debug_ratelimited(fmt, ...) \ | 413 | #define pr_debug_ratelimited(fmt, ...) \ |
| @@ -456,11 +456,17 @@ static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type, | |||
| 456 | groupsize, buf, len, ascii) \ | 456 | groupsize, buf, len, ascii) \ |
| 457 | dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ | 457 | dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ |
| 458 | groupsize, buf, len, ascii) | 458 | groupsize, buf, len, ascii) |
| 459 | #else | 459 | #elif defined(DEBUG) |
| 460 | #define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \ | 460 | #define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \ |
| 461 | groupsize, buf, len, ascii) \ | 461 | groupsize, buf, len, ascii) \ |
| 462 | print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \ | 462 | print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \ |
| 463 | groupsize, buf, len, ascii) | 463 | groupsize, buf, len, ascii) |
| 464 | #endif /* defined(CONFIG_DYNAMIC_DEBUG) */ | 464 | #else |
| 465 | static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type, | ||
| 466 | int rowsize, int groupsize, | ||
| 467 | const void *buf, size_t len, bool ascii) | ||
| 468 | { | ||
| 469 | } | ||
| 470 | #endif | ||
| 465 | 471 | ||
| 466 | #endif | 472 | #endif |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 987a73a40ef8..061265f92876 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP) | 34 | #define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP) |
| 35 | 35 | ||
| 36 | #define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT) | 36 | #define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT) |
| 37 | #define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT) | ||
| 37 | 38 | ||
| 38 | /* single stepping state bits (used on ARM and PA-RISC) */ | 39 | /* single stepping state bits (used on ARM and PA-RISC) */ |
| 39 | #define PT_SINGLESTEP_BIT 31 | 40 | #define PT_SINGLESTEP_BIT 31 |
diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 36262d08a9da..d681f6875aef 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h | |||
| @@ -79,26 +79,43 @@ enum { | |||
| 79 | PWMF_EXPORTED = 1 << 2, | 79 | PWMF_EXPORTED = 1 << 2, |
| 80 | }; | 80 | }; |
| 81 | 81 | ||
| 82 | /** | ||
| 83 | * struct pwm_device - PWM channel object | ||
| 84 | * @label: name of the PWM device | ||
| 85 | * @flags: flags associated with the PWM device | ||
| 86 | * @hwpwm: per-chip relative index of the PWM device | ||
| 87 | * @pwm: global index of the PWM device | ||
| 88 | * @chip: PWM chip providing this PWM device | ||
| 89 | * @chip_data: chip-private data associated with the PWM device | ||
| 90 | * @period: period of the PWM signal (in nanoseconds) | ||
| 91 | * @duty_cycle: duty cycle of the PWM signal (in nanoseconds) | ||
| 92 | * @polarity: polarity of the PWM signal | ||
| 93 | */ | ||
| 82 | struct pwm_device { | 94 | struct pwm_device { |
| 83 | const char *label; | 95 | const char *label; |
| 84 | unsigned long flags; | 96 | unsigned long flags; |
| 85 | unsigned int hwpwm; | 97 | unsigned int hwpwm; |
| 86 | unsigned int pwm; | 98 | unsigned int pwm; |
| 87 | struct pwm_chip *chip; | 99 | struct pwm_chip *chip; |
| 88 | void *chip_data; | 100 | void *chip_data; |
| 89 | 101 | ||
| 90 | unsigned int period; /* in nanoseconds */ | 102 | unsigned int period; |
| 91 | unsigned int duty_cycle; /* in nanoseconds */ | 103 | unsigned int duty_cycle; |
| 92 | enum pwm_polarity polarity; | 104 | enum pwm_polarity polarity; |
| 93 | }; | 105 | }; |
| 94 | 106 | ||
| 107 | static inline bool pwm_is_enabled(const struct pwm_device *pwm) | ||
| 108 | { | ||
| 109 | return test_bit(PWMF_ENABLED, &pwm->flags); | ||
| 110 | } | ||
| 111 | |||
| 95 | static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) | 112 | static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) |
| 96 | { | 113 | { |
| 97 | if (pwm) | 114 | if (pwm) |
| 98 | pwm->period = period; | 115 | pwm->period = period; |
| 99 | } | 116 | } |
| 100 | 117 | ||
| 101 | static inline unsigned int pwm_get_period(struct pwm_device *pwm) | 118 | static inline unsigned int pwm_get_period(const struct pwm_device *pwm) |
| 102 | { | 119 | { |
| 103 | return pwm ? pwm->period : 0; | 120 | return pwm ? pwm->period : 0; |
| 104 | } | 121 | } |
| @@ -109,7 +126,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty) | |||
| 109 | pwm->duty_cycle = duty; | 126 | pwm->duty_cycle = duty; |
| 110 | } | 127 | } |
| 111 | 128 | ||
| 112 | static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm) | 129 | static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm) |
| 113 | { | 130 | { |
| 114 | return pwm ? pwm->duty_cycle : 0; | 131 | return pwm ? pwm->duty_cycle : 0; |
| 115 | } | 132 | } |
| @@ -119,6 +136,11 @@ static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm) | |||
| 119 | */ | 136 | */ |
| 120 | int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity); | 137 | int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity); |
| 121 | 138 | ||
| 139 | static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm) | ||
| 140 | { | ||
| 141 | return pwm ? pwm->polarity : PWM_POLARITY_NORMAL; | ||
| 142 | } | ||
| 143 | |||
| 122 | /** | 144 | /** |
| 123 | * struct pwm_ops - PWM controller operations | 145 | * struct pwm_ops - PWM controller operations |
| 124 | * @request: optional hook for requesting a PWM | 146 | * @request: optional hook for requesting a PWM |
| @@ -131,25 +153,18 @@ int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity); | |||
| 131 | * @owner: helps prevent removal of modules exporting active PWMs | 153 | * @owner: helps prevent removal of modules exporting active PWMs |
| 132 | */ | 154 | */ |
| 133 | struct pwm_ops { | 155 | struct pwm_ops { |
| 134 | int (*request)(struct pwm_chip *chip, | 156 | int (*request)(struct pwm_chip *chip, struct pwm_device *pwm); |
| 135 | struct pwm_device *pwm); | 157 | void (*free)(struct pwm_chip *chip, struct pwm_device *pwm); |
| 136 | void (*free)(struct pwm_chip *chip, | 158 | int (*config)(struct pwm_chip *chip, struct pwm_device *pwm, |
| 137 | struct pwm_device *pwm); | 159 | int duty_ns, int period_ns); |
| 138 | int (*config)(struct pwm_chip *chip, | 160 | int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm, |
| 139 | struct pwm_device *pwm, | 161 | enum pwm_polarity polarity); |
| 140 | int duty_ns, int period_ns); | 162 | int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm); |
| 141 | int (*set_polarity)(struct pwm_chip *chip, | 163 | void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm); |
| 142 | struct pwm_device *pwm, | ||
| 143 | enum pwm_polarity polarity); | ||
| 144 | int (*enable)(struct pwm_chip *chip, | ||
| 145 | struct pwm_device *pwm); | ||
| 146 | void (*disable)(struct pwm_chip *chip, | ||
| 147 | struct pwm_device *pwm); | ||
| 148 | #ifdef CONFIG_DEBUG_FS | 164 | #ifdef CONFIG_DEBUG_FS |
| 149 | void (*dbg_show)(struct pwm_chip *chip, | 165 | void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s); |
| 150 | struct seq_file *s); | ||
| 151 | #endif | 166 | #endif |
| 152 | struct module *owner; | 167 | struct module *owner; |
| 153 | }; | 168 | }; |
| 154 | 169 | ||
| 155 | /** | 170 | /** |
| @@ -160,22 +175,24 @@ struct pwm_ops { | |||
| 160 | * @base: number of first PWM controlled by this chip | 175 | * @base: number of first PWM controlled by this chip |
| 161 | * @npwm: number of PWMs controlled by this chip | 176 | * @npwm: number of PWMs controlled by this chip |
| 162 | * @pwms: array of PWM devices allocated by the framework | 177 | * @pwms: array of PWM devices allocated by the framework |
| 178 | * @of_xlate: request a PWM device given a device tree PWM specifier | ||
| 179 | * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier | ||
| 163 | * @can_sleep: must be true if the .config(), .enable() or .disable() | 180 | * @can_sleep: must be true if the .config(), .enable() or .disable() |
| 164 | * operations may sleep | 181 | * operations may sleep |
| 165 | */ | 182 | */ |
| 166 | struct pwm_chip { | 183 | struct pwm_chip { |
| 167 | struct device *dev; | 184 | struct device *dev; |
| 168 | struct list_head list; | 185 | struct list_head list; |
| 169 | const struct pwm_ops *ops; | 186 | const struct pwm_ops *ops; |
| 170 | int base; | 187 | int base; |
| 171 | unsigned int npwm; | 188 | unsigned int npwm; |
| 172 | 189 | ||
| 173 | struct pwm_device *pwms; | 190 | struct pwm_device *pwms; |
| 174 | 191 | ||
| 175 | struct pwm_device * (*of_xlate)(struct pwm_chip *pc, | 192 | struct pwm_device * (*of_xlate)(struct pwm_chip *pc, |
| 176 | const struct of_phandle_args *args); | 193 | const struct of_phandle_args *args); |
| 177 | unsigned int of_pwm_n_cells; | 194 | unsigned int of_pwm_n_cells; |
| 178 | bool can_sleep; | 195 | bool can_sleep; |
| 179 | }; | 196 | }; |
| 180 | 197 | ||
| 181 | #if IS_ENABLED(CONFIG_PWM) | 198 | #if IS_ENABLED(CONFIG_PWM) |
diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 4a6759098769..8fc0bfd8edc4 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/rbtree.h> | 17 | #include <linux/rbtree.h> |
| 18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
| 19 | #include <linux/bug.h> | 19 | #include <linux/bug.h> |
| 20 | #include <linux/lockdep.h> | ||
| 20 | 21 | ||
| 21 | struct module; | 22 | struct module; |
| 22 | struct device; | 23 | struct device; |
| @@ -51,14 +52,17 @@ struct reg_default { | |||
| 51 | }; | 52 | }; |
| 52 | 53 | ||
| 53 | /** | 54 | /** |
| 54 | * Register/value pairs for sequences of writes | 55 | * Register/value pairs for sequences of writes with an optional delay in |
| 56 | * microseconds to be applied after each write. | ||
| 55 | * | 57 | * |
| 56 | * @reg: Register address. | 58 | * @reg: Register address. |
| 57 | * @def: Register value. | 59 | * @def: Register value. |
| 60 | * @delay_us: Delay to be applied after the register write in microseconds | ||
| 58 | */ | 61 | */ |
| 59 | struct reg_sequence { | 62 | struct reg_sequence { |
| 60 | unsigned int reg; | 63 | unsigned int reg; |
| 61 | unsigned int def; | 64 | unsigned int def; |
| 65 | unsigned int delay_us; | ||
| 62 | }; | 66 | }; |
| 63 | 67 | ||
| 64 | #ifdef CONFIG_REGMAP | 68 | #ifdef CONFIG_REGMAP |
| @@ -307,8 +311,12 @@ typedef void (*regmap_hw_free_context)(void *context); | |||
| 307 | * if not implemented on a given device. | 311 | * if not implemented on a given device. |
| 308 | * @async_write: Write operation which completes asynchronously, optional and | 312 | * @async_write: Write operation which completes asynchronously, optional and |
| 309 | * must serialise with respect to non-async I/O. | 313 | * must serialise with respect to non-async I/O. |
| 314 | * @reg_write: Write a single register value to the given register address. This | ||
| 315 | * write operation has to complete when returning from the function. | ||
| 310 | * @read: Read operation. Data is returned in the buffer used to transmit | 316 | * @read: Read operation. Data is returned in the buffer used to transmit |
| 311 | * data. | 317 | * data. |
| 318 | * @reg_read: Read a single register value from a given register address. | ||
| 319 | * @free_context: Free context. | ||
| 312 | * @async_alloc: Allocate a regmap_async() structure. | 320 | * @async_alloc: Allocate a regmap_async() structure. |
| 313 | * @read_flag_mask: Mask to be set in the top byte of the register when doing | 321 | * @read_flag_mask: Mask to be set in the top byte of the register when doing |
| 314 | * a read. | 322 | * a read. |
| @@ -318,7 +326,8 @@ typedef void (*regmap_hw_free_context)(void *context); | |||
| 318 | * @val_format_endian_default: Default endianness for formatted register | 326 | * @val_format_endian_default: Default endianness for formatted register |
| 319 | * values. Used when the regmap_config specifies DEFAULT. If this is | 327 | * values. Used when the regmap_config specifies DEFAULT. If this is |
| 320 | * DEFAULT, BIG is assumed. | 328 | * DEFAULT, BIG is assumed. |
| 321 | * @async_size: Size of struct used for async work. | 329 | * @max_raw_read: Max raw read size that can be used on the bus. |
| 330 | * @max_raw_write: Max raw write size that can be used on the bus. | ||
| 322 | */ | 331 | */ |
| 323 | struct regmap_bus { | 332 | struct regmap_bus { |
| 324 | bool fast_io; | 333 | bool fast_io; |
| @@ -333,47 +342,186 @@ struct regmap_bus { | |||
| 333 | u8 read_flag_mask; | 342 | u8 read_flag_mask; |
| 334 | enum regmap_endian reg_format_endian_default; | 343 | enum regmap_endian reg_format_endian_default; |
| 335 | enum regmap_endian val_format_endian_default; | 344 | enum regmap_endian val_format_endian_default; |
| 345 | size_t max_raw_read; | ||
| 346 | size_t max_raw_write; | ||
| 336 | }; | 347 | }; |
| 337 | 348 | ||
| 338 | struct regmap *regmap_init(struct device *dev, | 349 | /* |
| 339 | const struct regmap_bus *bus, | 350 | * __regmap_init functions. |
| 340 | void *bus_context, | 351 | * |
| 341 | const struct regmap_config *config); | 352 | * These functions take a lock key and name parameter, and should not be called |
| 353 | * directly. Instead, use the regmap_init macros that generate a key and name | ||
| 354 | * for each call. | ||
| 355 | */ | ||
| 356 | struct regmap *__regmap_init(struct device *dev, | ||
| 357 | const struct regmap_bus *bus, | ||
| 358 | void *bus_context, | ||
| 359 | const struct regmap_config *config, | ||
| 360 | struct lock_class_key *lock_key, | ||
| 361 | const char *lock_name); | ||
| 362 | struct regmap *__regmap_init_i2c(struct i2c_client *i2c, | ||
| 363 | const struct regmap_config *config, | ||
| 364 | struct lock_class_key *lock_key, | ||
| 365 | const char *lock_name); | ||
| 366 | struct regmap *__regmap_init_spi(struct spi_device *dev, | ||
| 367 | const struct regmap_config *config, | ||
| 368 | struct lock_class_key *lock_key, | ||
| 369 | const char *lock_name); | ||
| 370 | struct regmap *__regmap_init_spmi_base(struct spmi_device *dev, | ||
| 371 | const struct regmap_config *config, | ||
| 372 | struct lock_class_key *lock_key, | ||
| 373 | const char *lock_name); | ||
| 374 | struct regmap *__regmap_init_spmi_ext(struct spmi_device *dev, | ||
| 375 | const struct regmap_config *config, | ||
| 376 | struct lock_class_key *lock_key, | ||
| 377 | const char *lock_name); | ||
| 378 | struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id, | ||
| 379 | void __iomem *regs, | ||
| 380 | const struct regmap_config *config, | ||
| 381 | struct lock_class_key *lock_key, | ||
| 382 | const char *lock_name); | ||
| 383 | struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97, | ||
| 384 | const struct regmap_config *config, | ||
| 385 | struct lock_class_key *lock_key, | ||
| 386 | const char *lock_name); | ||
| 387 | |||
| 388 | struct regmap *__devm_regmap_init(struct device *dev, | ||
| 389 | const struct regmap_bus *bus, | ||
| 390 | void *bus_context, | ||
| 391 | const struct regmap_config *config, | ||
| 392 | struct lock_class_key *lock_key, | ||
| 393 | const char *lock_name); | ||
| 394 | struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c, | ||
| 395 | const struct regmap_config *config, | ||
| 396 | struct lock_class_key *lock_key, | ||
| 397 | const char *lock_name); | ||
| 398 | struct regmap *__devm_regmap_init_spi(struct spi_device *dev, | ||
| 399 | const struct regmap_config *config, | ||
| 400 | struct lock_class_key *lock_key, | ||
| 401 | const char *lock_name); | ||
| 402 | struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *dev, | ||
| 403 | const struct regmap_config *config, | ||
| 404 | struct lock_class_key *lock_key, | ||
| 405 | const char *lock_name); | ||
| 406 | struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *dev, | ||
| 407 | const struct regmap_config *config, | ||
| 408 | struct lock_class_key *lock_key, | ||
| 409 | const char *lock_name); | ||
| 410 | struct regmap *__devm_regmap_init_mmio_clk(struct device *dev, | ||
| 411 | const char *clk_id, | ||
| 412 | void __iomem *regs, | ||
| 413 | const struct regmap_config *config, | ||
| 414 | struct lock_class_key *lock_key, | ||
| 415 | const char *lock_name); | ||
| 416 | struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97, | ||
| 417 | const struct regmap_config *config, | ||
| 418 | struct lock_class_key *lock_key, | ||
| 419 | const char *lock_name); | ||
| 420 | |||
| 421 | /* | ||
| 422 | * Wrapper for regmap_init macros to include a unique lockdep key and name | ||
| 423 | * for each call. No-op if CONFIG_LOCKDEP is not set. | ||
| 424 | * | ||
| 425 | * @fn: Real function to call (in the form __[*_]regmap_init[_*]) | ||
| 426 | * @name: Config variable name (#config in the calling macro) | ||
| 427 | **/ | ||
| 428 | #ifdef CONFIG_LOCKDEP | ||
| 429 | #define __regmap_lockdep_wrapper(fn, name, ...) \ | ||
| 430 | ( \ | ||
| 431 | ({ \ | ||
| 432 | static struct lock_class_key _key; \ | ||
| 433 | fn(__VA_ARGS__, &_key, \ | ||
| 434 | KBUILD_BASENAME ":" \ | ||
| 435 | __stringify(__LINE__) ":" \ | ||
| 436 | "(" name ")->lock"); \ | ||
| 437 | }) \ | ||
| 438 | ) | ||
| 439 | #else | ||
| 440 | #define __regmap_lockdep_wrapper(fn, name, ...) fn(__VA_ARGS__, NULL, NULL) | ||
| 441 | #endif | ||
| 442 | |||
| 443 | /** | ||
| 444 | * regmap_init(): Initialise register map | ||
| 445 | * | ||
| 446 | * @dev: Device that will be interacted with | ||
| 447 | * @bus: Bus-specific callbacks to use with device | ||
| 448 | * @bus_context: Data passed to bus-specific callbacks | ||
| 449 | * @config: Configuration for register map | ||
| 450 | * | ||
| 451 | * The return value will be an ERR_PTR() on error or a valid pointer to | ||
| 452 | * a struct regmap. This function should generally not be called | ||
| 453 | * directly, it should be called by bus-specific init functions. | ||
| 454 | */ | ||
| 455 | #define regmap_init(dev, bus, bus_context, config) \ | ||
| 456 | __regmap_lockdep_wrapper(__regmap_init, #config, \ | ||
| 457 | dev, bus, bus_context, config) | ||
| 342 | int regmap_attach_dev(struct device *dev, struct regmap *map, | 458 | int regmap_attach_dev(struct device *dev, struct regmap *map, |
| 343 | const struct regmap_config *config); | 459 | const struct regmap_config *config); |
| 344 | struct regmap *regmap_init_i2c(struct i2c_client *i2c, | ||
| 345 | const struct regmap_config *config); | ||
| 346 | struct regmap *regmap_init_spi(struct spi_device *dev, | ||
| 347 | const struct regmap_config *config); | ||
| 348 | struct regmap *regmap_init_spmi_base(struct spmi_device *dev, | ||
| 349 | const struct regmap_config *config); | ||
| 350 | struct regmap *regmap_init_spmi_ext(struct spmi_device *dev, | ||
| 351 | const struct regmap_config *config); | ||
| 352 | struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id, | ||
| 353 | void __iomem *regs, | ||
| 354 | const struct regmap_config *config); | ||
| 355 | struct regmap *regmap_init_ac97(struct snd_ac97 *ac97, | ||
| 356 | const struct regmap_config *config); | ||
| 357 | |||
| 358 | struct regmap *devm_regmap_init(struct device *dev, | ||
| 359 | const struct regmap_bus *bus, | ||
| 360 | void *bus_context, | ||
| 361 | const struct regmap_config *config); | ||
| 362 | struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c, | ||
| 363 | const struct regmap_config *config); | ||
| 364 | struct regmap *devm_regmap_init_spi(struct spi_device *dev, | ||
| 365 | const struct regmap_config *config); | ||
| 366 | struct regmap *devm_regmap_init_spmi_base(struct spmi_device *dev, | ||
| 367 | const struct regmap_config *config); | ||
| 368 | struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *dev, | ||
| 369 | const struct regmap_config *config); | ||
| 370 | struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id, | ||
| 371 | void __iomem *regs, | ||
| 372 | const struct regmap_config *config); | ||
| 373 | struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97, | ||
| 374 | const struct regmap_config *config); | ||
| 375 | 460 | ||
| 376 | bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); | 461 | /** |
| 462 | * regmap_init_i2c(): Initialise register map | ||
| 463 | * | ||
| 464 | * @i2c: Device that will be interacted with | ||
| 465 | * @config: Configuration for register map | ||
| 466 | * | ||
| 467 | * The return value will be an ERR_PTR() on error or a valid pointer to | ||
| 468 | * a struct regmap. | ||
| 469 | */ | ||
| 470 | #define regmap_init_i2c(i2c, config) \ | ||
| 471 | __regmap_lockdep_wrapper(__regmap_init_i2c, #config, \ | ||
| 472 | i2c, config) | ||
| 473 | |||
| 474 | /** | ||
| 475 | * regmap_init_spi(): Initialise register map | ||
| 476 | * | ||
| 477 | * @spi: Device that will be interacted with | ||
| 478 | * @config: Configuration for register map | ||
| 479 | * | ||
| 480 | * The return value will be an ERR_PTR() on error or a valid pointer to | ||
| 481 | * a struct regmap. | ||
| 482 | */ | ||
| 483 | #define regmap_init_spi(dev, config) \ | ||
| 484 | __regmap_lockdep_wrapper(__regmap_init_spi, #config, \ | ||
| 485 | dev, config) | ||
| 486 | |||
| 487 | /** | ||
| 488 | * regmap_init_spmi_base(): Create regmap for the Base register space | ||
| 489 | * @sdev: SPMI device that will be interacted with | ||
| 490 | * @config: Configuration for register map | ||
| 491 | * | ||
| 492 | * The return value will be an ERR_PTR() on error or a valid pointer to | ||
| 493 | * a struct regmap. | ||
| 494 | */ | ||
| 495 | #define regmap_init_spmi_base(dev, config) \ | ||
| 496 | __regmap_lockdep_wrapper(__regmap_init_spmi_base, #config, \ | ||
| 497 | dev, config) | ||
| 498 | |||
| 499 | /** | ||
| 500 | * regmap_init_spmi_ext(): Create regmap for Ext register space | ||
| 501 | * @sdev: Device that will be interacted with | ||
| 502 | * @config: Configuration for register map | ||
| 503 | * | ||
| 504 | * The return value will be an ERR_PTR() on error or a valid pointer to | ||
| 505 | * a struct regmap. | ||
| 506 | */ | ||
| 507 | #define regmap_init_spmi_ext(dev, config) \ | ||
| 508 | __regmap_lockdep_wrapper(__regmap_init_spmi_ext, #config, \ | ||
| 509 | dev, config) | ||
| 510 | |||
| 511 | /** | ||
| 512 | * regmap_init_mmio_clk(): Initialise register map with register clock | ||
| 513 | * | ||
| 514 | * @dev: Device that will be interacted with | ||
| 515 | * @clk_id: register clock consumer ID | ||
| 516 | * @regs: Pointer to memory-mapped IO region | ||
| 517 | * @config: Configuration for register map | ||
| 518 | * | ||
| 519 | * The return value will be an ERR_PTR() on error or a valid pointer to | ||
| 520 | * a struct regmap. | ||
| 521 | */ | ||
| 522 | #define regmap_init_mmio_clk(dev, clk_id, regs, config) \ | ||
| 523 | __regmap_lockdep_wrapper(__regmap_init_mmio_clk, #config, \ | ||
| 524 | dev, clk_id, regs, config) | ||
| 377 | 525 | ||
| 378 | /** | 526 | /** |
| 379 | * regmap_init_mmio(): Initialise register map | 527 | * regmap_init_mmio(): Initialise register map |
| @@ -385,12 +533,109 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); | |||
| 385 | * The return value will be an ERR_PTR() on error or a valid pointer to | 533 | * The return value will be an ERR_PTR() on error or a valid pointer to |
| 386 | * a struct regmap. | 534 | * a struct regmap. |
| 387 | */ | 535 | */ |
| 388 | static inline struct regmap *regmap_init_mmio(struct device *dev, | 536 | #define regmap_init_mmio(dev, regs, config) \ |
| 389 | void __iomem *regs, | 537 | regmap_init_mmio_clk(dev, NULL, regs, config) |
| 390 | const struct regmap_config *config) | 538 | |
| 391 | { | 539 | /** |
| 392 | return regmap_init_mmio_clk(dev, NULL, regs, config); | 540 | * regmap_init_ac97(): Initialise AC'97 register map |
| 393 | } | 541 | * |
| 542 | * @ac97: Device that will be interacted with | ||
| 543 | * @config: Configuration for register map | ||
| 544 | * | ||
| 545 | * The return value will be an ERR_PTR() on error or a valid pointer to | ||
| 546 | * a struct regmap. | ||
| 547 | */ | ||
| 548 | #define regmap_init_ac97(ac97, config) \ | ||
| 549 | __regmap_lockdep_wrapper(__regmap_init_ac97, #config, \ | ||
| 550 | ac97, config) | ||
| 551 | bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); | ||
| 552 | |||
| 553 | /** | ||
| 554 | * devm_regmap_init(): Initialise managed register map | ||
| 555 | * | ||
| 556 | * @dev: Device that will be interacted with | ||
| 557 | * @bus: Bus-specific callbacks to use with device | ||
| 558 | * @bus_context: Data passed to bus-specific callbacks | ||
| 559 | * @config: Configuration for register map | ||
| 560 | * | ||
| 561 | * The return value will be an ERR_PTR() on error or a valid pointer | ||
| 562 | * to a struct regmap. This function should generally not be called | ||
| 563 | * directly, it should be called by bus-specific init functions. The | ||
| 564 | * map will be automatically freed by the device management code. | ||
| 565 | */ | ||
| 566 | #define devm_regmap_init(dev, bus, bus_context, config) \ | ||
| 567 | __regmap_lockdep_wrapper(__devm_regmap_init, #config, \ | ||
| 568 | dev, bus, bus_context, config) | ||
| 569 | |||
| 570 | /** | ||
| 571 | * devm_regmap_init_i2c(): Initialise managed register map | ||
| 572 | * | ||
| 573 | * @i2c: Device that will be interacted with | ||
| 574 | * @config: Configuration for register map | ||
| 575 | * | ||
| 576 | * The return value will be an ERR_PTR() on error or a valid pointer | ||
| 577 | * to a struct regmap. The regmap will be automatically freed by the | ||
| 578 | * device management code. | ||
| 579 | */ | ||
| 580 | #define devm_regmap_init_i2c(i2c, config) \ | ||
| 581 | __regmap_lockdep_wrapper(__devm_regmap_init_i2c, #config, \ | ||
| 582 | i2c, config) | ||
| 583 | |||
| 584 | /** | ||
| 585 | * devm_regmap_init_spi(): Initialise register map | ||
| 586 | * | ||
| 587 | * @spi: Device that will be interacted with | ||
| 588 | * @config: Configuration for register map | ||
| 589 | * | ||
| 590 | * The return value will be an ERR_PTR() on error or a valid pointer | ||
| 591 | * to a struct regmap. The map will be automatically freed by the | ||
| 592 | * device management code. | ||
| 593 | */ | ||
| 594 | #define devm_regmap_init_spi(dev, config) \ | ||
| 595 | __regmap_lockdep_wrapper(__devm_regmap_init_spi, #config, \ | ||
| 596 | dev, config) | ||
| 597 | |||
| 598 | /** | ||
| 599 | * devm_regmap_init_spmi_base(): Create managed regmap for Base register space | ||
| 600 | * @sdev: SPMI device that will be interacted with | ||
| 601 | * @config: Configuration for register map | ||
| 602 | * | ||
| 603 | * The return value will be an ERR_PTR() on error or a valid pointer | ||
| 604 | * to a struct regmap. The regmap will be automatically freed by the | ||
| 605 | * device management code. | ||
| 606 | */ | ||
| 607 | #define devm_regmap_init_spmi_base(dev, config) \ | ||
| 608 | __regmap_lockdep_wrapper(__devm_regmap_init_spmi_base, #config, \ | ||
| 609 | dev, config) | ||
| 610 | |||
| 611 | /** | ||
| 612 | * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space | ||
| 613 | * @sdev: SPMI device that will be interacted with | ||
| 614 | * @config: Configuration for register map | ||
| 615 | * | ||
| 616 | * The return value will be an ERR_PTR() on error or a valid pointer | ||
| 617 | * to a struct regmap. The regmap will be automatically freed by the | ||
| 618 | * device management code. | ||
| 619 | */ | ||
| 620 | #define devm_regmap_init_spmi_ext(dev, config) \ | ||
| 621 | __regmap_lockdep_wrapper(__devm_regmap_init_spmi_ext, #config, \ | ||
| 622 | dev, config) | ||
| 623 | |||
| 624 | /** | ||
| 625 | * devm_regmap_init_mmio_clk(): Initialise managed register map with clock | ||
| 626 | * | ||
| 627 | * @dev: Device that will be interacted with | ||
| 628 | * @clk_id: register clock consumer ID | ||
| 629 | * @regs: Pointer to memory-mapped IO region | ||
| 630 | * @config: Configuration for register map | ||
| 631 | * | ||
| 632 | * The return value will be an ERR_PTR() on error or a valid pointer | ||
| 633 | * to a struct regmap. The regmap will be automatically freed by the | ||
| 634 | * device management code. | ||
| 635 | */ | ||
| 636 | #define devm_regmap_init_mmio_clk(dev, clk_id, regs, config) \ | ||
| 637 | __regmap_lockdep_wrapper(__devm_regmap_init_mmio_clk, #config, \ | ||
| 638 | dev, clk_id, regs, config) | ||
| 394 | 639 | ||
| 395 | /** | 640 | /** |
| 396 | * devm_regmap_init_mmio(): Initialise managed register map | 641 | * devm_regmap_init_mmio(): Initialise managed register map |
| @@ -403,12 +648,22 @@ static inline struct regmap *regmap_init_mmio(struct device *dev, | |||
| 403 | * to a struct regmap. The regmap will be automatically freed by the | 648 | * to a struct regmap. The regmap will be automatically freed by the |
| 404 | * device management code. | 649 | * device management code. |
| 405 | */ | 650 | */ |
| 406 | static inline struct regmap *devm_regmap_init_mmio(struct device *dev, | 651 | #define devm_regmap_init_mmio(dev, regs, config) \ |
| 407 | void __iomem *regs, | 652 | devm_regmap_init_mmio_clk(dev, NULL, regs, config) |
| 408 | const struct regmap_config *config) | 653 | |
| 409 | { | 654 | /** |
| 410 | return devm_regmap_init_mmio_clk(dev, NULL, regs, config); | 655 | * devm_regmap_init_ac97(): Initialise AC'97 register map |
| 411 | } | 656 | * |
| 657 | * @ac97: Device that will be interacted with | ||
| 658 | * @config: Configuration for register map | ||
| 659 | * | ||
| 660 | * The return value will be an ERR_PTR() on error or a valid pointer | ||
| 661 | * to a struct regmap. The regmap will be automatically freed by the | ||
| 662 | * device management code. | ||
| 663 | */ | ||
| 664 | #define devm_regmap_init_ac97(ac97, config) \ | ||
| 665 | __regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \ | ||
| 666 | ac97, config) | ||
| 412 | 667 | ||
| 413 | void regmap_exit(struct regmap *map); | 668 | void regmap_exit(struct regmap *map); |
| 414 | int regmap_reinit_cache(struct regmap *map, | 669 | int regmap_reinit_cache(struct regmap *map, |
| @@ -450,6 +705,8 @@ int regmap_get_max_register(struct regmap *map); | |||
| 450 | int regmap_get_reg_stride(struct regmap *map); | 705 | int regmap_get_reg_stride(struct regmap *map); |
| 451 | int regmap_async_complete(struct regmap *map); | 706 | int regmap_async_complete(struct regmap *map); |
| 452 | bool regmap_can_raw_write(struct regmap *map); | 707 | bool regmap_can_raw_write(struct regmap *map); |
| 708 | size_t regmap_get_raw_read_max(struct regmap *map); | ||
| 709 | size_t regmap_get_raw_write_max(struct regmap *map); | ||
| 453 | 710 | ||
| 454 | int regcache_sync(struct regmap *map); | 711 | int regcache_sync(struct regmap *map); |
| 455 | int regcache_sync_region(struct regmap *map, unsigned int min, | 712 | int regcache_sync_region(struct regmap *map, unsigned int min, |
diff --git a/include/linux/reset.h b/include/linux/reset.h index da5602bd77d7..7f65f9cff951 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h | |||
| @@ -74,6 +74,20 @@ static inline int device_reset_optional(struct device *dev) | |||
| 74 | return -ENOSYS; | 74 | return -ENOSYS; |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | static inline struct reset_control *__must_check reset_control_get( | ||
| 78 | struct device *dev, const char *id) | ||
| 79 | { | ||
| 80 | WARN_ON(1); | ||
| 81 | return ERR_PTR(-EINVAL); | ||
| 82 | } | ||
| 83 | |||
| 84 | static inline struct reset_control *__must_check devm_reset_control_get( | ||
| 85 | struct device *dev, const char *id) | ||
| 86 | { | ||
| 87 | WARN_ON(1); | ||
| 88 | return ERR_PTR(-EINVAL); | ||
| 89 | } | ||
| 90 | |||
| 77 | static inline struct reset_control *reset_control_get_optional( | 91 | static inline struct reset_control *reset_control_get_optional( |
| 78 | struct device *dev, const char *id) | 92 | struct device *dev, const char *id) |
| 79 | { | 93 | { |
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index a19ddacdac30..f4265039a94c 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h | |||
| @@ -78,7 +78,7 @@ static inline long prctl_set_seccomp(unsigned long arg2, char __user *arg3) | |||
| 78 | 78 | ||
| 79 | static inline int seccomp_mode(struct seccomp *s) | 79 | static inline int seccomp_mode(struct seccomp *s) |
| 80 | { | 80 | { |
| 81 | return 0; | 81 | return SECCOMP_MODE_DISABLED; |
| 82 | } | 82 | } |
| 83 | #endif /* CONFIG_SECCOMP */ | 83 | #endif /* CONFIG_SECCOMP */ |
| 84 | 84 | ||
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index d4c7271382cb..dde00defbaa5 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h | |||
| @@ -114,13 +114,22 @@ int seq_open(struct file *, const struct seq_operations *); | |||
| 114 | ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); | 114 | ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); |
| 115 | loff_t seq_lseek(struct file *, loff_t, int); | 115 | loff_t seq_lseek(struct file *, loff_t, int); |
| 116 | int seq_release(struct inode *, struct file *); | 116 | int seq_release(struct inode *, struct file *); |
| 117 | int seq_escape(struct seq_file *, const char *, const char *); | ||
| 118 | int seq_putc(struct seq_file *m, char c); | ||
| 119 | int seq_puts(struct seq_file *m, const char *s); | ||
| 120 | int seq_write(struct seq_file *seq, const void *data, size_t len); | 117 | int seq_write(struct seq_file *seq, const void *data, size_t len); |
| 121 | 118 | ||
| 122 | __printf(2, 3) int seq_printf(struct seq_file *, const char *, ...); | 119 | __printf(2, 0) |
| 123 | __printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args); | 120 | void seq_vprintf(struct seq_file *m, const char *fmt, va_list args); |
| 121 | __printf(2, 3) | ||
| 122 | void seq_printf(struct seq_file *m, const char *fmt, ...); | ||
| 123 | void seq_putc(struct seq_file *m, char c); | ||
| 124 | void seq_puts(struct seq_file *m, const char *s); | ||
| 125 | void seq_put_decimal_ull(struct seq_file *m, char delimiter, | ||
| 126 | unsigned long long num); | ||
| 127 | void seq_put_decimal_ll(struct seq_file *m, char delimiter, long long num); | ||
| 128 | void seq_escape(struct seq_file *m, const char *s, const char *esc); | ||
| 129 | |||
| 130 | void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, | ||
| 131 | int rowsize, int groupsize, const void *buf, size_t len, | ||
| 132 | bool ascii); | ||
| 124 | 133 | ||
| 125 | int seq_path(struct seq_file *, const struct path *, const char *); | 134 | int seq_path(struct seq_file *, const struct path *, const char *); |
| 126 | int seq_file_path(struct seq_file *, struct file *, const char *); | 135 | int seq_file_path(struct seq_file *, struct file *, const char *); |
| @@ -134,10 +143,6 @@ int single_release(struct inode *, struct file *); | |||
| 134 | void *__seq_open_private(struct file *, const struct seq_operations *, int); | 143 | void *__seq_open_private(struct file *, const struct seq_operations *, int); |
| 135 | int seq_open_private(struct file *, const struct seq_operations *, int); | 144 | int seq_open_private(struct file *, const struct seq_operations *, int); |
| 136 | int seq_release_private(struct inode *, struct file *); | 145 | int seq_release_private(struct inode *, struct file *); |
| 137 | int seq_put_decimal_ull(struct seq_file *m, char delimiter, | ||
| 138 | unsigned long long num); | ||
| 139 | int seq_put_decimal_ll(struct seq_file *m, char delimiter, | ||
| 140 | long long num); | ||
| 141 | 146 | ||
| 142 | static inline struct user_namespace *seq_user_ns(struct seq_file *seq) | 147 | static inline struct user_namespace *seq_user_ns(struct seq_file *seq) |
| 143 | { | 148 | { |
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index 71f711db4500..dabe643eb5fa 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h | |||
| @@ -48,24 +48,24 @@ static inline int string_unescape_any_inplace(char *buf) | |||
| 48 | #define ESCAPE_HEX 0x20 | 48 | #define ESCAPE_HEX 0x20 |
| 49 | 49 | ||
| 50 | int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, | 50 | int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, |
| 51 | unsigned int flags, const char *esc); | 51 | unsigned int flags, const char *only); |
| 52 | 52 | ||
| 53 | static inline int string_escape_mem_any_np(const char *src, size_t isz, | 53 | static inline int string_escape_mem_any_np(const char *src, size_t isz, |
| 54 | char *dst, size_t osz, const char *esc) | 54 | char *dst, size_t osz, const char *only) |
| 55 | { | 55 | { |
| 56 | return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, esc); | 56 | return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, only); |
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | static inline int string_escape_str(const char *src, char *dst, size_t sz, | 59 | static inline int string_escape_str(const char *src, char *dst, size_t sz, |
| 60 | unsigned int flags, const char *esc) | 60 | unsigned int flags, const char *only) |
| 61 | { | 61 | { |
| 62 | return string_escape_mem(src, strlen(src), dst, sz, flags, esc); | 62 | return string_escape_mem(src, strlen(src), dst, sz, flags, only); |
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | static inline int string_escape_str_any_np(const char *src, char *dst, | 65 | static inline int string_escape_str_any_np(const char *src, char *dst, |
| 66 | size_t sz, const char *esc) | 66 | size_t sz, const char *only) |
| 67 | { | 67 | { |
| 68 | return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, esc); | 68 | return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only); |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | #endif | 71 | #endif |
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index d5ee6d8b7c58..7ccc961f33e9 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h | |||
| @@ -132,6 +132,7 @@ struct svcxprt_rdma { | |||
| 132 | struct list_head sc_accept_q; /* Conn. waiting accept */ | 132 | struct list_head sc_accept_q; /* Conn. waiting accept */ |
| 133 | int sc_ord; /* RDMA read limit */ | 133 | int sc_ord; /* RDMA read limit */ |
| 134 | int sc_max_sge; | 134 | int sc_max_sge; |
| 135 | int sc_max_sge_rd; /* max sge for read target */ | ||
| 135 | 136 | ||
| 136 | int sc_sq_depth; /* Depth of SQ */ | 137 | int sc_sq_depth; /* Depth of SQ */ |
| 137 | atomic_t sc_sq_count; /* Number of SQ WR on queue */ | 138 | atomic_t sc_sq_count; /* Number of SQ WR on queue */ |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 31496d201fdc..7ba7dccaf0e7 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -351,7 +351,15 @@ extern void check_move_unevictable_pages(struct page **, int nr_pages); | |||
| 351 | extern int kswapd_run(int nid); | 351 | extern int kswapd_run(int nid); |
| 352 | extern void kswapd_stop(int nid); | 352 | extern void kswapd_stop(int nid); |
| 353 | #ifdef CONFIG_MEMCG | 353 | #ifdef CONFIG_MEMCG |
| 354 | extern int mem_cgroup_swappiness(struct mem_cgroup *mem); | 354 | static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) |
| 355 | { | ||
| 356 | /* root ? */ | ||
| 357 | if (mem_cgroup_disabled() || !memcg->css.parent) | ||
| 358 | return vm_swappiness; | ||
| 359 | |||
| 360 | return memcg->swappiness; | ||
| 361 | } | ||
| 362 | |||
| 355 | #else | 363 | #else |
| 356 | static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) | 364 | static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) |
| 357 | { | 365 | { |
| @@ -398,6 +406,9 @@ extern void free_pages_and_swap_cache(struct page **, int); | |||
| 398 | extern struct page *lookup_swap_cache(swp_entry_t); | 406 | extern struct page *lookup_swap_cache(swp_entry_t); |
| 399 | extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, | 407 | extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, |
| 400 | struct vm_area_struct *vma, unsigned long addr); | 408 | struct vm_area_struct *vma, unsigned long addr); |
| 409 | extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t, | ||
| 410 | struct vm_area_struct *vma, unsigned long addr, | ||
| 411 | bool *new_page_allocated); | ||
| 401 | extern struct page *swapin_readahead(swp_entry_t, gfp_t, | 412 | extern struct page *swapin_readahead(swp_entry_t, gfp_t, |
| 402 | struct vm_area_struct *vma, unsigned long addr); | 413 | struct vm_area_struct *vma, unsigned long addr); |
| 403 | 414 | ||
| @@ -431,6 +442,7 @@ extern unsigned int count_swap_pages(int, int); | |||
| 431 | extern sector_t map_swap_page(struct page *, struct block_device **); | 442 | extern sector_t map_swap_page(struct page *, struct block_device **); |
| 432 | extern sector_t swapdev_block(int, pgoff_t); | 443 | extern sector_t swapdev_block(int, pgoff_t); |
| 433 | extern int page_swapcount(struct page *); | 444 | extern int page_swapcount(struct page *); |
| 445 | extern int swp_swapcount(swp_entry_t entry); | ||
| 434 | extern struct swap_info_struct *page_swap_info(struct page *); | 446 | extern struct swap_info_struct *page_swap_info(struct page *); |
| 435 | extern int reuse_swap_page(struct page *); | 447 | extern int reuse_swap_page(struct page *); |
| 436 | extern int try_to_free_swap(struct page *); | 448 | extern int try_to_free_swap(struct page *); |
| @@ -522,6 +534,11 @@ static inline int page_swapcount(struct page *page) | |||
| 522 | return 0; | 534 | return 0; |
| 523 | } | 535 | } |
| 524 | 536 | ||
| 537 | static inline int swp_swapcount(swp_entry_t entry) | ||
| 538 | { | ||
| 539 | return 0; | ||
| 540 | } | ||
| 541 | |||
| 525 | #define reuse_swap_page(page) (page_mapcount(page) == 1) | 542 | #define reuse_swap_page(page) (page_mapcount(page) == 1) |
| 526 | 543 | ||
| 527 | static inline int try_to_free_swap(struct page *page) | 544 | static inline int try_to_free_swap(struct page *page) |
diff --git a/include/linux/swapops.h b/include/linux/swapops.h index cedf3d3c373f..5c3a5f3e7eec 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h | |||
| @@ -164,6 +164,9 @@ static inline int is_write_migration_entry(swp_entry_t entry) | |||
| 164 | #endif | 164 | #endif |
| 165 | 165 | ||
| 166 | #ifdef CONFIG_MEMORY_FAILURE | 166 | #ifdef CONFIG_MEMORY_FAILURE |
| 167 | |||
| 168 | extern atomic_long_t num_poisoned_pages __read_mostly; | ||
| 169 | |||
| 167 | /* | 170 | /* |
| 168 | * Support for hardware poisoned pages | 171 | * Support for hardware poisoned pages |
| 169 | */ | 172 | */ |
| @@ -177,6 +180,31 @@ static inline int is_hwpoison_entry(swp_entry_t entry) | |||
| 177 | { | 180 | { |
| 178 | return swp_type(entry) == SWP_HWPOISON; | 181 | return swp_type(entry) == SWP_HWPOISON; |
| 179 | } | 182 | } |
| 183 | |||
| 184 | static inline bool test_set_page_hwpoison(struct page *page) | ||
| 185 | { | ||
| 186 | return TestSetPageHWPoison(page); | ||
| 187 | } | ||
| 188 | |||
| 189 | static inline void num_poisoned_pages_inc(void) | ||
| 190 | { | ||
| 191 | atomic_long_inc(&num_poisoned_pages); | ||
| 192 | } | ||
| 193 | |||
| 194 | static inline void num_poisoned_pages_dec(void) | ||
| 195 | { | ||
| 196 | atomic_long_dec(&num_poisoned_pages); | ||
| 197 | } | ||
| 198 | |||
| 199 | static inline void num_poisoned_pages_add(long num) | ||
| 200 | { | ||
| 201 | atomic_long_add(num, &num_poisoned_pages); | ||
| 202 | } | ||
| 203 | |||
| 204 | static inline void num_poisoned_pages_sub(long num) | ||
| 205 | { | ||
| 206 | atomic_long_sub(num, &num_poisoned_pages); | ||
| 207 | } | ||
| 180 | #else | 208 | #else |
| 181 | 209 | ||
| 182 | static inline swp_entry_t make_hwpoison_entry(struct page *page) | 210 | static inline swp_entry_t make_hwpoison_entry(struct page *page) |
| @@ -188,6 +216,15 @@ static inline int is_hwpoison_entry(swp_entry_t swp) | |||
| 188 | { | 216 | { |
| 189 | return 0; | 217 | return 0; |
| 190 | } | 218 | } |
| 219 | |||
| 220 | static inline bool test_set_page_hwpoison(struct page *page) | ||
| 221 | { | ||
| 222 | return false; | ||
| 223 | } | ||
| 224 | |||
| 225 | static inline void num_poisoned_pages_inc(void) | ||
| 226 | { | ||
| 227 | } | ||
| 191 | #endif | 228 | #endif |
| 192 | 229 | ||
| 193 | #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) | 230 | #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 08001317aee7..a460e2ef2843 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -885,4 +885,6 @@ asmlinkage long sys_execveat(int dfd, const char __user *filename, | |||
| 885 | const char __user *const __user *argv, | 885 | const char __user *const __user *argv, |
| 886 | const char __user *const __user *envp, int flags); | 886 | const char __user *const __user *envp, int flags); |
| 887 | 887 | ||
| 888 | asmlinkage long sys_membarrier(int cmd, int flags); | ||
| 889 | |||
| 888 | #endif | 890 | #endif |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 037e9df2f610..17292fee8686 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
| @@ -92,23 +92,19 @@ struct thermal_zone_device_ops { | |||
| 92 | struct thermal_cooling_device *); | 92 | struct thermal_cooling_device *); |
| 93 | int (*unbind) (struct thermal_zone_device *, | 93 | int (*unbind) (struct thermal_zone_device *, |
| 94 | struct thermal_cooling_device *); | 94 | struct thermal_cooling_device *); |
| 95 | int (*get_temp) (struct thermal_zone_device *, unsigned long *); | 95 | int (*get_temp) (struct thermal_zone_device *, int *); |
| 96 | int (*get_mode) (struct thermal_zone_device *, | 96 | int (*get_mode) (struct thermal_zone_device *, |
| 97 | enum thermal_device_mode *); | 97 | enum thermal_device_mode *); |
| 98 | int (*set_mode) (struct thermal_zone_device *, | 98 | int (*set_mode) (struct thermal_zone_device *, |
| 99 | enum thermal_device_mode); | 99 | enum thermal_device_mode); |
| 100 | int (*get_trip_type) (struct thermal_zone_device *, int, | 100 | int (*get_trip_type) (struct thermal_zone_device *, int, |
| 101 | enum thermal_trip_type *); | 101 | enum thermal_trip_type *); |
| 102 | int (*get_trip_temp) (struct thermal_zone_device *, int, | 102 | int (*get_trip_temp) (struct thermal_zone_device *, int, int *); |
| 103 | unsigned long *); | 103 | int (*set_trip_temp) (struct thermal_zone_device *, int, int); |
| 104 | int (*set_trip_temp) (struct thermal_zone_device *, int, | 104 | int (*get_trip_hyst) (struct thermal_zone_device *, int, int *); |
| 105 | unsigned long); | 105 | int (*set_trip_hyst) (struct thermal_zone_device *, int, int); |
| 106 | int (*get_trip_hyst) (struct thermal_zone_device *, int, | 106 | int (*get_crit_temp) (struct thermal_zone_device *, int *); |
| 107 | unsigned long *); | 107 | int (*set_emul_temp) (struct thermal_zone_device *, int); |
| 108 | int (*set_trip_hyst) (struct thermal_zone_device *, int, | ||
| 109 | unsigned long); | ||
| 110 | int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *); | ||
| 111 | int (*set_emul_temp) (struct thermal_zone_device *, unsigned long); | ||
| 112 | int (*get_trend) (struct thermal_zone_device *, int, | 108 | int (*get_trend) (struct thermal_zone_device *, int, |
| 113 | enum thermal_trend *); | 109 | enum thermal_trend *); |
| 114 | int (*notify) (struct thermal_zone_device *, int, | 110 | int (*notify) (struct thermal_zone_device *, int, |
| @@ -332,9 +328,9 @@ struct thermal_genl_event { | |||
| 332 | * temperature. | 328 | * temperature. |
| 333 | */ | 329 | */ |
| 334 | struct thermal_zone_of_device_ops { | 330 | struct thermal_zone_of_device_ops { |
| 335 | int (*get_temp)(void *, long *); | 331 | int (*get_temp)(void *, int *); |
| 336 | int (*get_trend)(void *, long *); | 332 | int (*get_trend)(void *, long *); |
| 337 | int (*set_emul_temp)(void *, unsigned long); | 333 | int (*set_emul_temp)(void *, int); |
| 338 | }; | 334 | }; |
| 339 | 335 | ||
| 340 | /** | 336 | /** |
| @@ -406,7 +402,7 @@ thermal_of_cooling_device_register(struct device_node *np, char *, void *, | |||
| 406 | const struct thermal_cooling_device_ops *); | 402 | const struct thermal_cooling_device_ops *); |
| 407 | void thermal_cooling_device_unregister(struct thermal_cooling_device *); | 403 | void thermal_cooling_device_unregister(struct thermal_cooling_device *); |
| 408 | struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name); | 404 | struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name); |
| 409 | int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp); | 405 | int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); |
| 410 | 406 | ||
| 411 | int get_tz_trend(struct thermal_zone_device *, int); | 407 | int get_tz_trend(struct thermal_zone_device *, int); |
| 412 | struct thermal_instance *get_thermal_instance(struct thermal_zone_device *, | 408 | struct thermal_instance *get_thermal_instance(struct thermal_zone_device *, |
| @@ -457,7 +453,7 @@ static inline struct thermal_zone_device *thermal_zone_get_zone_by_name( | |||
| 457 | const char *name) | 453 | const char *name) |
| 458 | { return ERR_PTR(-ENODEV); } | 454 | { return ERR_PTR(-ENODEV); } |
| 459 | static inline int thermal_zone_get_temp( | 455 | static inline int thermal_zone_get_temp( |
| 460 | struct thermal_zone_device *tz, unsigned long *temp) | 456 | struct thermal_zone_device *tz, int *temp) |
| 461 | { return -ENODEV; } | 457 | { return -ENODEV; } |
| 462 | static inline int get_tz_trend(struct thermal_zone_device *tz, int trip) | 458 | static inline int get_tz_trend(struct thermal_zone_device *tz, int trip) |
| 463 | { return -ENODEV; } | 459 | { return -ENODEV; } |
diff --git a/include/linux/verify_pefile.h b/include/linux/verify_pefile.h index ac34819214f9..da2049b5161c 100644 --- a/include/linux/verify_pefile.h +++ b/include/linux/verify_pefile.h | |||
| @@ -12,7 +12,11 @@ | |||
| 12 | #ifndef _LINUX_VERIFY_PEFILE_H | 12 | #ifndef _LINUX_VERIFY_PEFILE_H |
| 13 | #define _LINUX_VERIFY_PEFILE_H | 13 | #define _LINUX_VERIFY_PEFILE_H |
| 14 | 14 | ||
| 15 | #include <crypto/public_key.h> | ||
| 16 | |||
| 15 | extern int verify_pefile_signature(const void *pebuf, unsigned pelen, | 17 | extern int verify_pefile_signature(const void *pebuf, unsigned pelen, |
| 16 | struct key *trusted_keyring, bool *_trusted); | 18 | struct key *trusted_keyring, |
| 19 | enum key_being_used_for usage, | ||
| 20 | bool *_trusted); | ||
| 17 | 21 | ||
| 18 | #endif /* _LINUX_VERIFY_PEFILE_H */ | 22 | #endif /* _LINUX_VERIFY_PEFILE_H */ |
diff --git a/include/linux/zbud.h b/include/linux/zbud.h index f9d41a6e361f..e183a0a65ac1 100644 --- a/include/linux/zbud.h +++ b/include/linux/zbud.h | |||
| @@ -9,7 +9,7 @@ struct zbud_ops { | |||
| 9 | int (*evict)(struct zbud_pool *pool, unsigned long handle); | 9 | int (*evict)(struct zbud_pool *pool, unsigned long handle); |
| 10 | }; | 10 | }; |
| 11 | 11 | ||
| 12 | struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops); | 12 | struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops); |
| 13 | void zbud_destroy_pool(struct zbud_pool *pool); | 13 | void zbud_destroy_pool(struct zbud_pool *pool); |
| 14 | int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, | 14 | int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, |
| 15 | unsigned long *handle); | 15 | unsigned long *handle); |
diff --git a/include/linux/zpool.h b/include/linux/zpool.h index d30eff3d84d5..42f8ec992452 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h | |||
| @@ -36,8 +36,10 @@ enum zpool_mapmode { | |||
| 36 | ZPOOL_MM_DEFAULT = ZPOOL_MM_RW | 36 | ZPOOL_MM_DEFAULT = ZPOOL_MM_RW |
| 37 | }; | 37 | }; |
| 38 | 38 | ||
| 39 | bool zpool_has_pool(char *type); | ||
| 40 | |||
| 39 | struct zpool *zpool_create_pool(char *type, char *name, | 41 | struct zpool *zpool_create_pool(char *type, char *name, |
| 40 | gfp_t gfp, struct zpool_ops *ops); | 42 | gfp_t gfp, const struct zpool_ops *ops); |
| 41 | 43 | ||
| 42 | char *zpool_get_type(struct zpool *pool); | 44 | char *zpool_get_type(struct zpool *pool); |
| 43 | 45 | ||
| @@ -81,7 +83,7 @@ struct zpool_driver { | |||
| 81 | atomic_t refcount; | 83 | atomic_t refcount; |
| 82 | struct list_head list; | 84 | struct list_head list; |
| 83 | 85 | ||
| 84 | void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops, | 86 | void *(*create)(char *name, gfp_t gfp, const struct zpool_ops *ops, |
| 85 | struct zpool *zpool); | 87 | struct zpool *zpool); |
| 86 | void (*destroy)(void *pool); | 88 | void (*destroy)(void *pool); |
| 87 | 89 | ||
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 1338190b5478..6398dfae53f1 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h | |||
| @@ -34,6 +34,11 @@ enum zs_mapmode { | |||
| 34 | */ | 34 | */ |
| 35 | }; | 35 | }; |
| 36 | 36 | ||
| 37 | struct zs_pool_stats { | ||
| 38 | /* How many pages were migrated (freed) */ | ||
| 39 | unsigned long pages_compacted; | ||
| 40 | }; | ||
| 41 | |||
| 37 | struct zs_pool; | 42 | struct zs_pool; |
| 38 | 43 | ||
| 39 | struct zs_pool *zs_create_pool(char *name, gfp_t flags); | 44 | struct zs_pool *zs_create_pool(char *name, gfp_t flags); |
| @@ -49,4 +54,5 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle); | |||
| 49 | unsigned long zs_get_total_pages(struct zs_pool *pool); | 54 | unsigned long zs_get_total_pages(struct zs_pool *pool); |
| 50 | unsigned long zs_compact(struct zs_pool *pool); | 55 | unsigned long zs_compact(struct zs_pool *pool); |
| 51 | 56 | ||
| 57 | void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats); | ||
| 52 | #endif | 58 | #endif |
