diff options
Diffstat (limited to 'include')
70 files changed, 1637 insertions, 344 deletions
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 02549017212a..2a5f64a11b77 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h | |||
@@ -21,8 +21,9 @@ | |||
21 | #include <linux/dmaengine.h> | 21 | #include <linux/dmaengine.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | 23 | ||
24 | struct pl08x_lli; | ||
25 | struct pl08x_driver_data; | 24 | struct pl08x_driver_data; |
25 | struct pl08x_phy_chan; | ||
26 | struct pl08x_txd; | ||
26 | 27 | ||
27 | /* Bitmasks for selecting AHB ports for DMA transfers */ | 28 | /* Bitmasks for selecting AHB ports for DMA transfers */ |
28 | enum { | 29 | enum { |
@@ -46,170 +47,29 @@ enum { | |||
46 | * devices with static assignments | 47 | * devices with static assignments |
47 | * @muxval: a number usually used to poke into some mux regiser to | 48 | * @muxval: a number usually used to poke into some mux regiser to |
48 | * mux in the signal to this channel | 49 | * mux in the signal to this channel |
49 | * @cctl_opt: default options for the channel control register | 50 | * @cctl_memcpy: options for the channel control register for memcpy |
51 | * *** not used for slave channels *** | ||
50 | * @addr: source/target address in physical memory for this DMA channel, | 52 | * @addr: source/target address in physical memory for this DMA channel, |
51 | * can be the address of a FIFO register for burst requests for example. | 53 | * can be the address of a FIFO register for burst requests for example. |
52 | * This can be left undefined if the PrimeCell API is used for configuring | 54 | * This can be left undefined if the PrimeCell API is used for configuring |
53 | * this. | 55 | * this. |
54 | * @circular_buffer: whether the buffer passed in is circular and | ||
55 | * shall simply be looped round round (like a record baby round | ||
56 | * round round round) | ||
57 | * @single: the device connected to this channel will request single DMA | 56 | * @single: the device connected to this channel will request single DMA |
58 | * transfers, not bursts. (Bursts are default.) | 57 | * transfers, not bursts. (Bursts are default.) |
59 | * @periph_buses: the device connected to this channel is accessible via | 58 | * @periph_buses: the device connected to this channel is accessible via |
60 | * these buses (use PL08X_AHB1 | PL08X_AHB2). | 59 | * these buses (use PL08X_AHB1 | PL08X_AHB2). |
61 | */ | 60 | */ |
62 | struct pl08x_channel_data { | 61 | struct pl08x_channel_data { |
63 | char *bus_id; | 62 | const char *bus_id; |
64 | int min_signal; | 63 | int min_signal; |
65 | int max_signal; | 64 | int max_signal; |
66 | u32 muxval; | 65 | u32 muxval; |
67 | u32 cctl; | 66 | u32 cctl_memcpy; |
68 | dma_addr_t addr; | 67 | dma_addr_t addr; |
69 | bool circular_buffer; | ||
70 | bool single; | 68 | bool single; |
71 | u8 periph_buses; | 69 | u8 periph_buses; |
72 | }; | 70 | }; |
73 | 71 | ||
74 | /** | 72 | /** |
75 | * Struct pl08x_bus_data - information of source or destination | ||
76 | * busses for a transfer | ||
77 | * @addr: current address | ||
78 | * @maxwidth: the maximum width of a transfer on this bus | ||
79 | * @buswidth: the width of this bus in bytes: 1, 2 or 4 | ||
80 | */ | ||
81 | struct pl08x_bus_data { | ||
82 | dma_addr_t addr; | ||
83 | u8 maxwidth; | ||
84 | u8 buswidth; | ||
85 | }; | ||
86 | |||
87 | /** | ||
88 | * struct pl08x_phy_chan - holder for the physical channels | ||
89 | * @id: physical index to this channel | ||
90 | * @lock: a lock to use when altering an instance of this struct | ||
91 | * @signal: the physical signal (aka channel) serving this physical channel | ||
92 | * right now | ||
93 | * @serving: the virtual channel currently being served by this physical | ||
94 | * channel | ||
95 | * @locked: channel unavailable for the system, e.g. dedicated to secure | ||
96 | * world | ||
97 | */ | ||
98 | struct pl08x_phy_chan { | ||
99 | unsigned int id; | ||
100 | void __iomem *base; | ||
101 | spinlock_t lock; | ||
102 | int signal; | ||
103 | struct pl08x_dma_chan *serving; | ||
104 | bool locked; | ||
105 | }; | ||
106 | |||
107 | /** | ||
108 | * struct pl08x_sg - structure containing data per sg | ||
109 | * @src_addr: src address of sg | ||
110 | * @dst_addr: dst address of sg | ||
111 | * @len: transfer len in bytes | ||
112 | * @node: node for txd's dsg_list | ||
113 | */ | ||
114 | struct pl08x_sg { | ||
115 | dma_addr_t src_addr; | ||
116 | dma_addr_t dst_addr; | ||
117 | size_t len; | ||
118 | struct list_head node; | ||
119 | }; | ||
120 | |||
121 | /** | ||
122 | * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor | ||
123 | * @tx: async tx descriptor | ||
124 | * @node: node for txd list for channels | ||
125 | * @dsg_list: list of children sg's | ||
126 | * @direction: direction of transfer | ||
127 | * @llis_bus: DMA memory address (physical) start for the LLIs | ||
128 | * @llis_va: virtual memory address start for the LLIs | ||
129 | * @cctl: control reg values for current txd | ||
130 | * @ccfg: config reg values for current txd | ||
131 | */ | ||
132 | struct pl08x_txd { | ||
133 | struct dma_async_tx_descriptor tx; | ||
134 | struct list_head node; | ||
135 | struct list_head dsg_list; | ||
136 | enum dma_transfer_direction direction; | ||
137 | dma_addr_t llis_bus; | ||
138 | struct pl08x_lli *llis_va; | ||
139 | /* Default cctl value for LLIs */ | ||
140 | u32 cctl; | ||
141 | /* | ||
142 | * Settings to be put into the physical channel when we | ||
143 | * trigger this txd. Other registers are in llis_va[0]. | ||
144 | */ | ||
145 | u32 ccfg; | ||
146 | }; | ||
147 | |||
148 | /** | ||
149 | * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel | ||
150 | * states | ||
151 | * @PL08X_CHAN_IDLE: the channel is idle | ||
152 | * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport | ||
153 | * channel and is running a transfer on it | ||
154 | * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport | ||
155 | * channel, but the transfer is currently paused | ||
156 | * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport | ||
157 | * channel to become available (only pertains to memcpy channels) | ||
158 | */ | ||
159 | enum pl08x_dma_chan_state { | ||
160 | PL08X_CHAN_IDLE, | ||
161 | PL08X_CHAN_RUNNING, | ||
162 | PL08X_CHAN_PAUSED, | ||
163 | PL08X_CHAN_WAITING, | ||
164 | }; | ||
165 | |||
166 | /** | ||
167 | * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel | ||
168 | * @chan: wrappped abstract channel | ||
169 | * @phychan: the physical channel utilized by this channel, if there is one | ||
170 | * @phychan_hold: if non-zero, hold on to the physical channel even if we | ||
171 | * have no pending entries | ||
172 | * @tasklet: tasklet scheduled by the IRQ to handle actual work etc | ||
173 | * @name: name of channel | ||
174 | * @cd: channel platform data | ||
175 | * @runtime_addr: address for RX/TX according to the runtime config | ||
176 | * @runtime_direction: current direction of this channel according to | ||
177 | * runtime config | ||
178 | * @pend_list: queued transactions pending on this channel | ||
179 | * @at: active transaction on this channel | ||
180 | * @lock: a lock for this channel data | ||
181 | * @host: a pointer to the host (internal use) | ||
182 | * @state: whether the channel is idle, paused, running etc | ||
183 | * @slave: whether this channel is a device (slave) or for memcpy | ||
184 | * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave | ||
185 | * channels. Fill with 'true' if peripheral should be flow controller. Direction | ||
186 | * will be selected at Runtime. | ||
187 | * @waiting: a TX descriptor on this channel which is waiting for a physical | ||
188 | * channel to become available | ||
189 | */ | ||
190 | struct pl08x_dma_chan { | ||
191 | struct dma_chan chan; | ||
192 | struct pl08x_phy_chan *phychan; | ||
193 | int phychan_hold; | ||
194 | struct tasklet_struct tasklet; | ||
195 | char *name; | ||
196 | const struct pl08x_channel_data *cd; | ||
197 | dma_addr_t src_addr; | ||
198 | dma_addr_t dst_addr; | ||
199 | u32 src_cctl; | ||
200 | u32 dst_cctl; | ||
201 | enum dma_transfer_direction runtime_direction; | ||
202 | struct list_head pend_list; | ||
203 | struct pl08x_txd *at; | ||
204 | spinlock_t lock; | ||
205 | struct pl08x_driver_data *host; | ||
206 | enum pl08x_dma_chan_state state; | ||
207 | bool slave; | ||
208 | bool device_fc; | ||
209 | struct pl08x_txd *waiting; | ||
210 | }; | ||
211 | |||
212 | /** | ||
213 | * struct pl08x_platform_data - the platform configuration for the PL08x | 73 | * struct pl08x_platform_data - the platform configuration for the PL08x |
214 | * PrimeCells. | 74 | * PrimeCells. |
215 | * @slave_channels: the channels defined for the different devices on the | 75 | * @slave_channels: the channels defined for the different devices on the |
@@ -229,8 +89,8 @@ struct pl08x_platform_data { | |||
229 | const struct pl08x_channel_data *slave_channels; | 89 | const struct pl08x_channel_data *slave_channels; |
230 | unsigned int num_slave_channels; | 90 | unsigned int num_slave_channels; |
231 | struct pl08x_channel_data memcpy_channel; | 91 | struct pl08x_channel_data memcpy_channel; |
232 | int (*get_signal)(struct pl08x_dma_chan *); | 92 | int (*get_signal)(const struct pl08x_channel_data *); |
233 | void (*put_signal)(struct pl08x_dma_chan *); | 93 | void (*put_signal)(const struct pl08x_channel_data *, int); |
234 | u8 lli_buses; | 94 | u8 lli_buses; |
235 | u8 mem_buses; | 95 | u8 mem_buses; |
236 | }; | 96 | }; |
diff --git a/include/linux/audit.h b/include/linux/audit.h index 22f292a917a3..36abf2aa7e68 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
@@ -130,6 +130,7 @@ | |||
130 | #define AUDIT_LAST_KERN_ANOM_MSG 1799 | 130 | #define AUDIT_LAST_KERN_ANOM_MSG 1799 |
131 | #define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */ | 131 | #define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */ |
132 | #define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */ | 132 | #define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */ |
133 | #define AUDIT_ANOM_LINK 1702 /* Suspicious use of file links */ | ||
133 | #define AUDIT_INTEGRITY_DATA 1800 /* Data integrity verification */ | 134 | #define AUDIT_INTEGRITY_DATA 1800 /* Data integrity verification */ |
134 | #define AUDIT_INTEGRITY_METADATA 1801 /* Metadata integrity verification */ | 135 | #define AUDIT_INTEGRITY_METADATA 1801 /* Metadata integrity verification */ |
135 | #define AUDIT_INTEGRITY_STATUS 1802 /* Integrity enable status */ | 136 | #define AUDIT_INTEGRITY_STATUS 1802 /* Integrity enable status */ |
@@ -687,6 +688,8 @@ extern void audit_log_d_path(struct audit_buffer *ab, | |||
687 | const struct path *path); | 688 | const struct path *path); |
688 | extern void audit_log_key(struct audit_buffer *ab, | 689 | extern void audit_log_key(struct audit_buffer *ab, |
689 | char *key); | 690 | char *key); |
691 | extern void audit_log_link_denied(const char *operation, | ||
692 | struct path *link); | ||
690 | extern void audit_log_lost(const char *message); | 693 | extern void audit_log_lost(const char *message); |
691 | #ifdef CONFIG_SECURITY | 694 | #ifdef CONFIG_SECURITY |
692 | extern void audit_log_secctx(struct audit_buffer *ab, u32 secid); | 695 | extern void audit_log_secctx(struct audit_buffer *ab, u32 secid); |
@@ -716,6 +719,7 @@ extern int audit_enabled; | |||
716 | #define audit_log_untrustedstring(a,s) do { ; } while (0) | 719 | #define audit_log_untrustedstring(a,s) do { ; } while (0) |
717 | #define audit_log_d_path(b, p, d) do { ; } while (0) | 720 | #define audit_log_d_path(b, p, d) do { ; } while (0) |
718 | #define audit_log_key(b, k) do { ; } while (0) | 721 | #define audit_log_key(b, k) do { ; } while (0) |
722 | #define audit_log_link_denied(o, l) do { ; } while (0) | ||
719 | #define audit_log_secctx(b,s) do { ; } while (0) | 723 | #define audit_log_secctx(b,s) do { ; } while (0) |
720 | #define audit_enabled 0 | 724 | #define audit_enabled 0 |
721 | #endif | 725 | #endif |
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 489de625cd25..c97c6b9cd38e 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
18 | #include <linux/writeback.h> | 18 | #include <linux/writeback.h> |
19 | #include <linux/atomic.h> | 19 | #include <linux/atomic.h> |
20 | #include <linux/sysctl.h> | ||
20 | 21 | ||
21 | struct page; | 22 | struct page; |
22 | struct device; | 23 | struct device; |
@@ -304,6 +305,8 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int sync); | |||
304 | void set_bdi_congested(struct backing_dev_info *bdi, int sync); | 305 | void set_bdi_congested(struct backing_dev_info *bdi, int sync); |
305 | long congestion_wait(int sync, long timeout); | 306 | long congestion_wait(int sync, long timeout); |
306 | long wait_iff_congested(struct zone *zone, int sync, long timeout); | 307 | long wait_iff_congested(struct zone *zone, int sync, long timeout); |
308 | int pdflush_proc_obsolete(struct ctl_table *table, int write, | ||
309 | void __user *buffer, size_t *lenp, loff_t *ppos); | ||
307 | 310 | ||
308 | static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) | 311 | static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) |
309 | { | 312 | { |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 0edb65dd8edd..7b7ac9ccec7a 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -160,6 +160,7 @@ enum rq_flag_bits { | |||
160 | __REQ_FLUSH_SEQ, /* request for flush sequence */ | 160 | __REQ_FLUSH_SEQ, /* request for flush sequence */ |
161 | __REQ_IO_STAT, /* account I/O stat */ | 161 | __REQ_IO_STAT, /* account I/O stat */ |
162 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ | 162 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ |
163 | __REQ_KERNEL, /* direct IO to kernel pages */ | ||
163 | __REQ_NR_BITS, /* stops here */ | 164 | __REQ_NR_BITS, /* stops here */ |
164 | }; | 165 | }; |
165 | 166 | ||
@@ -201,5 +202,6 @@ enum rq_flag_bits { | |||
201 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) | 202 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) |
202 | #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) | 203 | #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) |
203 | #define REQ_SECURE (1 << __REQ_SECURE) | 204 | #define REQ_SECURE (1 << __REQ_SECURE) |
205 | #define REQ_KERNEL (1 << __REQ_KERNEL) | ||
204 | 206 | ||
205 | #endif /* __LINUX_BLK_TYPES_H */ | 207 | #endif /* __LINUX_BLK_TYPES_H */ |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 07954b05b86c..4e72a9d48232 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -46,16 +46,23 @@ struct blkcg_gq; | |||
46 | struct request; | 46 | struct request; |
47 | typedef void (rq_end_io_fn)(struct request *, int); | 47 | typedef void (rq_end_io_fn)(struct request *, int); |
48 | 48 | ||
49 | #define BLK_RL_SYNCFULL (1U << 0) | ||
50 | #define BLK_RL_ASYNCFULL (1U << 1) | ||
51 | |||
49 | struct request_list { | 52 | struct request_list { |
53 | struct request_queue *q; /* the queue this rl belongs to */ | ||
54 | #ifdef CONFIG_BLK_CGROUP | ||
55 | struct blkcg_gq *blkg; /* blkg this request pool belongs to */ | ||
56 | #endif | ||
50 | /* | 57 | /* |
51 | * count[], starved[], and wait[] are indexed by | 58 | * count[], starved[], and wait[] are indexed by |
52 | * BLK_RW_SYNC/BLK_RW_ASYNC | 59 | * BLK_RW_SYNC/BLK_RW_ASYNC |
53 | */ | 60 | */ |
54 | int count[2]; | 61 | int count[2]; |
55 | int starved[2]; | 62 | int starved[2]; |
56 | int elvpriv; | 63 | mempool_t *rq_pool; |
57 | mempool_t *rq_pool; | 64 | wait_queue_head_t wait[2]; |
58 | wait_queue_head_t wait[2]; | 65 | unsigned int flags; |
59 | }; | 66 | }; |
60 | 67 | ||
61 | /* | 68 | /* |
@@ -138,6 +145,7 @@ struct request { | |||
138 | struct hd_struct *part; | 145 | struct hd_struct *part; |
139 | unsigned long start_time; | 146 | unsigned long start_time; |
140 | #ifdef CONFIG_BLK_CGROUP | 147 | #ifdef CONFIG_BLK_CGROUP |
148 | struct request_list *rl; /* rl this rq is alloced from */ | ||
141 | unsigned long long start_time_ns; | 149 | unsigned long long start_time_ns; |
142 | unsigned long long io_start_time_ns; /* when passed to hardware */ | 150 | unsigned long long io_start_time_ns; /* when passed to hardware */ |
143 | #endif | 151 | #endif |
@@ -282,11 +290,16 @@ struct request_queue { | |||
282 | struct list_head queue_head; | 290 | struct list_head queue_head; |
283 | struct request *last_merge; | 291 | struct request *last_merge; |
284 | struct elevator_queue *elevator; | 292 | struct elevator_queue *elevator; |
293 | int nr_rqs[2]; /* # allocated [a]sync rqs */ | ||
294 | int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ | ||
285 | 295 | ||
286 | /* | 296 | /* |
287 | * the queue request freelist, one for reads and one for writes | 297 | * If blkcg is not used, @q->root_rl serves all requests. If blkcg |
298 | * is used, root blkg allocates from @q->root_rl and all other | ||
299 | * blkgs from their own blkg->rl. Which one to use should be | ||
300 | * determined using bio_request_list(). | ||
288 | */ | 301 | */ |
289 | struct request_list rq; | 302 | struct request_list root_rl; |
290 | 303 | ||
291 | request_fn_proc *request_fn; | 304 | request_fn_proc *request_fn; |
292 | make_request_fn *make_request_fn; | 305 | make_request_fn *make_request_fn; |
@@ -561,27 +574,25 @@ static inline bool rq_is_sync(struct request *rq) | |||
561 | return rw_is_sync(rq->cmd_flags); | 574 | return rw_is_sync(rq->cmd_flags); |
562 | } | 575 | } |
563 | 576 | ||
564 | static inline int blk_queue_full(struct request_queue *q, int sync) | 577 | static inline bool blk_rl_full(struct request_list *rl, bool sync) |
565 | { | 578 | { |
566 | if (sync) | 579 | unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; |
567 | return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); | 580 | |
568 | return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); | 581 | return rl->flags & flag; |
569 | } | 582 | } |
570 | 583 | ||
571 | static inline void blk_set_queue_full(struct request_queue *q, int sync) | 584 | static inline void blk_set_rl_full(struct request_list *rl, bool sync) |
572 | { | 585 | { |
573 | if (sync) | 586 | unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; |
574 | queue_flag_set(QUEUE_FLAG_SYNCFULL, q); | 587 | |
575 | else | 588 | rl->flags |= flag; |
576 | queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); | ||
577 | } | 589 | } |
578 | 590 | ||
579 | static inline void blk_clear_queue_full(struct request_queue *q, int sync) | 591 | static inline void blk_clear_rl_full(struct request_list *rl, bool sync) |
580 | { | 592 | { |
581 | if (sync) | 593 | unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; |
582 | queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); | 594 | |
583 | else | 595 | rl->flags &= ~flag; |
584 | queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); | ||
585 | } | 596 | } |
586 | 597 | ||
587 | 598 | ||
@@ -911,11 +922,15 @@ struct blk_plug { | |||
911 | }; | 922 | }; |
912 | #define BLK_MAX_REQUEST_COUNT 16 | 923 | #define BLK_MAX_REQUEST_COUNT 16 |
913 | 924 | ||
925 | struct blk_plug_cb; | ||
926 | typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); | ||
914 | struct blk_plug_cb { | 927 | struct blk_plug_cb { |
915 | struct list_head list; | 928 | struct list_head list; |
916 | void (*callback)(struct blk_plug_cb *); | 929 | blk_plug_cb_fn callback; |
930 | void *data; | ||
917 | }; | 931 | }; |
918 | 932 | extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, | |
933 | void *data, int size); | ||
919 | extern void blk_start_plug(struct blk_plug *); | 934 | extern void blk_start_plug(struct blk_plug *); |
920 | extern void blk_finish_plug(struct blk_plug *); | 935 | extern void blk_finish_plug(struct blk_plug *); |
921 | extern void blk_flush_plug_list(struct blk_plug *, bool); | 936 | extern void blk_flush_plug_list(struct blk_plug *, bool); |
diff --git a/include/linux/blkpg.h b/include/linux/blkpg.h index faf8a45af210..a8519446c111 100644 --- a/include/linux/blkpg.h +++ b/include/linux/blkpg.h | |||
@@ -40,6 +40,7 @@ struct blkpg_ioctl_arg { | |||
40 | /* The subfunctions (for the op field) */ | 40 | /* The subfunctions (for the op field) */ |
41 | #define BLKPG_ADD_PARTITION 1 | 41 | #define BLKPG_ADD_PARTITION 1 |
42 | #define BLKPG_DEL_PARTITION 2 | 42 | #define BLKPG_DEL_PARTITION 2 |
43 | #define BLKPG_RESIZE_PARTITION 3 | ||
43 | 44 | ||
44 | /* Sizes of name fields. Unused at present. */ | 45 | /* Sizes of name fields. Unused at present. */ |
45 | #define BLKPG_DEVNAMELTH 64 | 46 | #define BLKPG_DEVNAMELTH 64 |
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index f55ab8cdc106..4d0fb3df2f4a 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h | |||
@@ -67,7 +67,6 @@ void bsg_job_done(struct bsg_job *job, int result, | |||
67 | int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, | 67 | int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, |
68 | bsg_job_fn *job_fn, int dd_job_size); | 68 | bsg_job_fn *job_fn, int dd_job_size); |
69 | void bsg_request_fn(struct request_queue *q); | 69 | void bsg_request_fn(struct request_queue *q); |
70 | void bsg_remove_queue(struct request_queue *q); | ||
71 | void bsg_goose_queue(struct request_queue *q); | 70 | void bsg_goose_queue(struct request_queue *q); |
72 | 71 | ||
73 | #endif | 72 | #endif |
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 0bd390ce98b2..dfae957398c3 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h | |||
@@ -31,7 +31,7 @@ SUBSYS(cpuacct) | |||
31 | 31 | ||
32 | /* */ | 32 | /* */ |
33 | 33 | ||
34 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 34 | #ifdef CONFIG_MEMCG |
35 | SUBSYS(mem_cgroup) | 35 | SUBSYS(mem_cgroup) |
36 | #endif | 36 | #endif |
37 | 37 | ||
@@ -72,3 +72,9 @@ SUBSYS(net_prio) | |||
72 | #endif | 72 | #endif |
73 | 73 | ||
74 | /* */ | 74 | /* */ |
75 | |||
76 | #ifdef CONFIG_CGROUP_HUGETLB | ||
77 | SUBSYS(hugetlb) | ||
78 | #endif | ||
79 | |||
80 | /* */ | ||
diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 51a90b7f2d60..133ddcf83397 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h | |||
@@ -58,7 +58,7 @@ static inline bool compaction_deferred(struct zone *zone, int order) | |||
58 | if (++zone->compact_considered > defer_limit) | 58 | if (++zone->compact_considered > defer_limit) |
59 | zone->compact_considered = defer_limit; | 59 | zone->compact_considered = defer_limit; |
60 | 60 | ||
61 | return zone->compact_considered < (1UL << zone->compact_defer_shift); | 61 | return zone->compact_considered < defer_limit; |
62 | } | 62 | } |
63 | 63 | ||
64 | #else | 64 | #else |
@@ -85,7 +85,7 @@ static inline void defer_compaction(struct zone *zone, int order) | |||
85 | 85 | ||
86 | static inline bool compaction_deferred(struct zone *zone, int order) | 86 | static inline bool compaction_deferred(struct zone *zone, int order) |
87 | { | 87 | { |
88 | return 1; | 88 | return true; |
89 | } | 89 | } |
90 | 90 | ||
91 | #endif /* CONFIG_COMPACTION */ | 91 | #endif /* CONFIG_COMPACTION */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index b178f9e91e23..38dba16c4176 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -165,6 +165,8 @@ struct inodes_stat_t { | |||
165 | #define READ 0 | 165 | #define READ 0 |
166 | #define WRITE RW_MASK | 166 | #define WRITE RW_MASK |
167 | #define READA RWA_MASK | 167 | #define READA RWA_MASK |
168 | #define KERNEL_READ (READ|REQ_KERNEL) | ||
169 | #define KERNEL_WRITE (WRITE|REQ_KERNEL) | ||
168 | 170 | ||
169 | #define READ_SYNC (READ | REQ_SYNC) | 171 | #define READ_SYNC (READ | REQ_SYNC) |
170 | #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) | 172 | #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) |
@@ -412,6 +414,7 @@ struct inodes_stat_t { | |||
412 | #include <linux/shrinker.h> | 414 | #include <linux/shrinker.h> |
413 | #include <linux/migrate_mode.h> | 415 | #include <linux/migrate_mode.h> |
414 | #include <linux/uidgid.h> | 416 | #include <linux/uidgid.h> |
417 | #include <linux/lockdep.h> | ||
415 | 418 | ||
416 | #include <asm/byteorder.h> | 419 | #include <asm/byteorder.h> |
417 | 420 | ||
@@ -427,6 +430,7 @@ struct kstatfs; | |||
427 | struct vm_area_struct; | 430 | struct vm_area_struct; |
428 | struct vfsmount; | 431 | struct vfsmount; |
429 | struct cred; | 432 | struct cred; |
433 | struct swap_info_struct; | ||
430 | 434 | ||
431 | extern void __init inode_init(void); | 435 | extern void __init inode_init(void); |
432 | extern void __init inode_init_early(void); | 436 | extern void __init inode_init_early(void); |
@@ -437,6 +441,8 @@ extern unsigned long get_max_files(void); | |||
437 | extern int sysctl_nr_open; | 441 | extern int sysctl_nr_open; |
438 | extern struct inodes_stat_t inodes_stat; | 442 | extern struct inodes_stat_t inodes_stat; |
439 | extern int leases_enable, lease_break_time; | 443 | extern int leases_enable, lease_break_time; |
444 | extern int sysctl_protected_symlinks; | ||
445 | extern int sysctl_protected_hardlinks; | ||
440 | 446 | ||
441 | struct buffer_head; | 447 | struct buffer_head; |
442 | typedef int (get_block_t)(struct inode *inode, sector_t iblock, | 448 | typedef int (get_block_t)(struct inode *inode, sector_t iblock, |
@@ -636,6 +642,11 @@ struct address_space_operations { | |||
636 | int (*is_partially_uptodate) (struct page *, read_descriptor_t *, | 642 | int (*is_partially_uptodate) (struct page *, read_descriptor_t *, |
637 | unsigned long); | 643 | unsigned long); |
638 | int (*error_remove_page)(struct address_space *, struct page *); | 644 | int (*error_remove_page)(struct address_space *, struct page *); |
645 | |||
646 | /* swapfile support */ | ||
647 | int (*swap_activate)(struct swap_info_struct *sis, struct file *file, | ||
648 | sector_t *span); | ||
649 | void (*swap_deactivate)(struct file *file); | ||
639 | }; | 650 | }; |
640 | 651 | ||
641 | extern const struct address_space_operations empty_aops; | 652 | extern const struct address_space_operations empty_aops; |
@@ -1154,7 +1165,6 @@ struct lock_manager_operations { | |||
1154 | int (*lm_compare_owner)(struct file_lock *, struct file_lock *); | 1165 | int (*lm_compare_owner)(struct file_lock *, struct file_lock *); |
1155 | void (*lm_notify)(struct file_lock *); /* unblock callback */ | 1166 | void (*lm_notify)(struct file_lock *); /* unblock callback */ |
1156 | int (*lm_grant)(struct file_lock *, struct file_lock *, int); | 1167 | int (*lm_grant)(struct file_lock *, struct file_lock *, int); |
1157 | void (*lm_release_private)(struct file_lock *); | ||
1158 | void (*lm_break)(struct file_lock *); | 1168 | void (*lm_break)(struct file_lock *); |
1159 | int (*lm_change)(struct file_lock **, int); | 1169 | int (*lm_change)(struct file_lock **, int); |
1160 | }; | 1170 | }; |
@@ -1438,6 +1448,8 @@ extern void f_delown(struct file *filp); | |||
1438 | extern pid_t f_getown(struct file *filp); | 1448 | extern pid_t f_getown(struct file *filp); |
1439 | extern int send_sigurg(struct fown_struct *fown); | 1449 | extern int send_sigurg(struct fown_struct *fown); |
1440 | 1450 | ||
1451 | struct mm_struct; | ||
1452 | |||
1441 | /* | 1453 | /* |
1442 | * Umount options | 1454 | * Umount options |
1443 | */ | 1455 | */ |
@@ -1451,6 +1463,31 @@ extern int send_sigurg(struct fown_struct *fown); | |||
1451 | extern struct list_head super_blocks; | 1463 | extern struct list_head super_blocks; |
1452 | extern spinlock_t sb_lock; | 1464 | extern spinlock_t sb_lock; |
1453 | 1465 | ||
1466 | /* Possible states of 'frozen' field */ | ||
1467 | enum { | ||
1468 | SB_UNFROZEN = 0, /* FS is unfrozen */ | ||
1469 | SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */ | ||
1470 | SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */ | ||
1471 | SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop | ||
1472 | * internal threads if needed) */ | ||
1473 | SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */ | ||
1474 | }; | ||
1475 | |||
1476 | #define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1) | ||
1477 | |||
1478 | struct sb_writers { | ||
1479 | /* Counters for counting writers at each level */ | ||
1480 | struct percpu_counter counter[SB_FREEZE_LEVELS]; | ||
1481 | wait_queue_head_t wait; /* queue for waiting for | ||
1482 | writers / faults to finish */ | ||
1483 | int frozen; /* Is sb frozen? */ | ||
1484 | wait_queue_head_t wait_unfrozen; /* queue for waiting for | ||
1485 | sb to be thawed */ | ||
1486 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
1487 | struct lockdep_map lock_map[SB_FREEZE_LEVELS]; | ||
1488 | #endif | ||
1489 | }; | ||
1490 | |||
1454 | struct super_block { | 1491 | struct super_block { |
1455 | struct list_head s_list; /* Keep this first */ | 1492 | struct list_head s_list; /* Keep this first */ |
1456 | dev_t s_dev; /* search index; _not_ kdev_t */ | 1493 | dev_t s_dev; /* search index; _not_ kdev_t */ |
@@ -1498,8 +1535,7 @@ struct super_block { | |||
1498 | struct hlist_node s_instances; | 1535 | struct hlist_node s_instances; |
1499 | struct quota_info s_dquot; /* Diskquota specific options */ | 1536 | struct quota_info s_dquot; /* Diskquota specific options */ |
1500 | 1537 | ||
1501 | int s_frozen; | 1538 | struct sb_writers s_writers; |
1502 | wait_queue_head_t s_wait_unfrozen; | ||
1503 | 1539 | ||
1504 | char s_id[32]; /* Informational name */ | 1540 | char s_id[32]; /* Informational name */ |
1505 | u8 s_uuid[16]; /* UUID */ | 1541 | u8 s_uuid[16]; /* UUID */ |
@@ -1554,14 +1590,117 @@ extern struct timespec current_fs_time(struct super_block *sb); | |||
1554 | /* | 1590 | /* |
1555 | * Snapshotting support. | 1591 | * Snapshotting support. |
1556 | */ | 1592 | */ |
1557 | enum { | ||
1558 | SB_UNFROZEN = 0, | ||
1559 | SB_FREEZE_WRITE = 1, | ||
1560 | SB_FREEZE_TRANS = 2, | ||
1561 | }; | ||
1562 | 1593 | ||
1563 | #define vfs_check_frozen(sb, level) \ | 1594 | void __sb_end_write(struct super_block *sb, int level); |
1564 | wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level))) | 1595 | int __sb_start_write(struct super_block *sb, int level, bool wait); |
1596 | |||
1597 | /** | ||
1598 | * sb_end_write - drop write access to a superblock | ||
1599 | * @sb: the super we wrote to | ||
1600 | * | ||
1601 | * Decrement number of writers to the filesystem. Wake up possible waiters | ||
1602 | * wanting to freeze the filesystem. | ||
1603 | */ | ||
1604 | static inline void sb_end_write(struct super_block *sb) | ||
1605 | { | ||
1606 | __sb_end_write(sb, SB_FREEZE_WRITE); | ||
1607 | } | ||
1608 | |||
1609 | /** | ||
1610 | * sb_end_pagefault - drop write access to a superblock from a page fault | ||
1611 | * @sb: the super we wrote to | ||
1612 | * | ||
1613 | * Decrement number of processes handling write page fault to the filesystem. | ||
1614 | * Wake up possible waiters wanting to freeze the filesystem. | ||
1615 | */ | ||
1616 | static inline void sb_end_pagefault(struct super_block *sb) | ||
1617 | { | ||
1618 | __sb_end_write(sb, SB_FREEZE_PAGEFAULT); | ||
1619 | } | ||
1620 | |||
1621 | /** | ||
1622 | * sb_end_intwrite - drop write access to a superblock for internal fs purposes | ||
1623 | * @sb: the super we wrote to | ||
1624 | * | ||
1625 | * Decrement fs-internal number of writers to the filesystem. Wake up possible | ||
1626 | * waiters wanting to freeze the filesystem. | ||
1627 | */ | ||
1628 | static inline void sb_end_intwrite(struct super_block *sb) | ||
1629 | { | ||
1630 | __sb_end_write(sb, SB_FREEZE_FS); | ||
1631 | } | ||
1632 | |||
1633 | /** | ||
1634 | * sb_start_write - get write access to a superblock | ||
1635 | * @sb: the super we write to | ||
1636 | * | ||
1637 | * When a process wants to write data or metadata to a file system (i.e. dirty | ||
1638 | * a page or an inode), it should embed the operation in a sb_start_write() - | ||
1639 | * sb_end_write() pair to get exclusion against file system freezing. This | ||
1640 | * function increments number of writers preventing freezing. If the file | ||
1641 | * system is already frozen, the function waits until the file system is | ||
1642 | * thawed. | ||
1643 | * | ||
1644 | * Since freeze protection behaves as a lock, users have to preserve | ||
1645 | * ordering of freeze protection and other filesystem locks. Generally, | ||
1646 | * freeze protection should be the outermost lock. In particular, we have: | ||
1647 | * | ||
1648 | * sb_start_write | ||
1649 | * -> i_mutex (write path, truncate, directory ops, ...) | ||
1650 | * -> s_umount (freeze_super, thaw_super) | ||
1651 | */ | ||
1652 | static inline void sb_start_write(struct super_block *sb) | ||
1653 | { | ||
1654 | __sb_start_write(sb, SB_FREEZE_WRITE, true); | ||
1655 | } | ||
1656 | |||
1657 | static inline int sb_start_write_trylock(struct super_block *sb) | ||
1658 | { | ||
1659 | return __sb_start_write(sb, SB_FREEZE_WRITE, false); | ||
1660 | } | ||
1661 | |||
1662 | /** | ||
1663 | * sb_start_pagefault - get write access to a superblock from a page fault | ||
1664 | * @sb: the super we write to | ||
1665 | * | ||
1666 | * When a process starts handling write page fault, it should embed the | ||
1667 | * operation into sb_start_pagefault() - sb_end_pagefault() pair to get | ||
1668 | * exclusion against file system freezing. This is needed since the page fault | ||
1669 | * is going to dirty a page. This function increments number of running page | ||
1670 | * faults preventing freezing. If the file system is already frozen, the | ||
1671 | * function waits until the file system is thawed. | ||
1672 | * | ||
1673 | * Since page fault freeze protection behaves as a lock, users have to preserve | ||
1674 | * ordering of freeze protection and other filesystem locks. It is advised to | ||
1675 | * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault | ||
1676 | * handling code implies lock dependency: | ||
1677 | * | ||
1678 | * mmap_sem | ||
1679 | * -> sb_start_pagefault | ||
1680 | */ | ||
1681 | static inline void sb_start_pagefault(struct super_block *sb) | ||
1682 | { | ||
1683 | __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true); | ||
1684 | } | ||
1685 | |||
1686 | /* | ||
1687 | * sb_start_intwrite - get write access to a superblock for internal fs purposes | ||
1688 | * @sb: the super we write to | ||
1689 | * | ||
1690 | * This is the third level of protection against filesystem freezing. It is | ||
1691 | * free for use by a filesystem. The only requirement is that it must rank | ||
1692 | * below sb_start_pagefault. | ||
1693 | * | ||
1694 | * For example filesystem can call sb_start_intwrite() when starting a | ||
1695 | * transaction which somewhat eases handling of freezing for internal sources | ||
1696 | * of filesystem changes (internal fs threads, discarding preallocation on file | ||
1697 | * close, etc.). | ||
1698 | */ | ||
1699 | static inline void sb_start_intwrite(struct super_block *sb) | ||
1700 | { | ||
1701 | __sb_start_write(sb, SB_FREEZE_FS, true); | ||
1702 | } | ||
1703 | |||
1565 | 1704 | ||
1566 | extern bool inode_owner_or_capable(const struct inode *inode); | 1705 | extern bool inode_owner_or_capable(const struct inode *inode); |
1567 | 1706 | ||
@@ -1885,6 +2024,7 @@ struct file_system_type { | |||
1885 | struct lock_class_key s_lock_key; | 2024 | struct lock_class_key s_lock_key; |
1886 | struct lock_class_key s_umount_key; | 2025 | struct lock_class_key s_umount_key; |
1887 | struct lock_class_key s_vfs_rename_key; | 2026 | struct lock_class_key s_vfs_rename_key; |
2027 | struct lock_class_key s_writers_key[SB_FREEZE_LEVELS]; | ||
1888 | 2028 | ||
1889 | struct lock_class_key i_lock_key; | 2029 | struct lock_class_key i_lock_key; |
1890 | struct lock_class_key i_mutex_key; | 2030 | struct lock_class_key i_mutex_key; |
@@ -2327,9 +2467,6 @@ static inline void i_readcount_inc(struct inode *inode) | |||
2327 | } | 2467 | } |
2328 | #endif | 2468 | #endif |
2329 | extern int do_pipe_flags(int *, int); | 2469 | extern int do_pipe_flags(int *, int); |
2330 | extern struct file *create_read_pipe(struct file *f, int flags); | ||
2331 | extern struct file *create_write_pipe(int flags); | ||
2332 | extern void free_write_pipe(struct file *); | ||
2333 | 2470 | ||
2334 | extern int kernel_read(struct file *, loff_t, char *, unsigned long); | 2471 | extern int kernel_read(struct file *, loff_t, char *, unsigned long); |
2335 | extern struct file * open_exec(const char *); | 2472 | extern struct file * open_exec(const char *); |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index ae0aaa9d42fa..4f440b3e89fe 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -97,7 +97,13 @@ struct partition_meta_info { | |||
97 | 97 | ||
98 | struct hd_struct { | 98 | struct hd_struct { |
99 | sector_t start_sect; | 99 | sector_t start_sect; |
100 | /* | ||
101 | * nr_sects is protected by sequence counter. One might extend a | ||
102 | * partition while IO is happening to it and update of nr_sects | ||
103 | * can be non-atomic on 32bit machines with 64bit sector_t. | ||
104 | */ | ||
100 | sector_t nr_sects; | 105 | sector_t nr_sects; |
106 | seqcount_t nr_sects_seq; | ||
101 | sector_t alignment_offset; | 107 | sector_t alignment_offset; |
102 | unsigned int discard_alignment; | 108 | unsigned int discard_alignment; |
103 | struct device __dev; | 109 | struct device __dev; |
@@ -647,6 +653,57 @@ static inline void hd_struct_put(struct hd_struct *part) | |||
647 | __delete_partition(part); | 653 | __delete_partition(part); |
648 | } | 654 | } |
649 | 655 | ||
656 | /* | ||
657 | * Any access of part->nr_sects which is not protected by partition | ||
658 | * bd_mutex or gendisk bdev bd_mutex, should be done using this | ||
659 | * accessor function. | ||
660 | * | ||
661 | * Code written along the lines of i_size_read() and i_size_write(). | ||
662 | * CONFIG_PREEMPT case optimizes the case of UP kernel with preemption | ||
663 | * on. | ||
664 | */ | ||
665 | static inline sector_t part_nr_sects_read(struct hd_struct *part) | ||
666 | { | ||
667 | #if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP) | ||
668 | sector_t nr_sects; | ||
669 | unsigned seq; | ||
670 | do { | ||
671 | seq = read_seqcount_begin(&part->nr_sects_seq); | ||
672 | nr_sects = part->nr_sects; | ||
673 | } while (read_seqcount_retry(&part->nr_sects_seq, seq)); | ||
674 | return nr_sects; | ||
675 | #elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT) | ||
676 | sector_t nr_sects; | ||
677 | |||
678 | preempt_disable(); | ||
679 | nr_sects = part->nr_sects; | ||
680 | preempt_enable(); | ||
681 | return nr_sects; | ||
682 | #else | ||
683 | return part->nr_sects; | ||
684 | #endif | ||
685 | } | ||
686 | |||
687 | /* | ||
688 | * Should be called with mutex lock held (typically bd_mutex) of partition | ||
689 | * to provide mutual exlusion among writers otherwise seqcount might be | ||
690 | * left in wrong state leaving the readers spinning infinitely. | ||
691 | */ | ||
692 | static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) | ||
693 | { | ||
694 | #if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP) | ||
695 | write_seqcount_begin(&part->nr_sects_seq); | ||
696 | part->nr_sects = size; | ||
697 | write_seqcount_end(&part->nr_sects_seq); | ||
698 | #elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT) | ||
699 | preempt_disable(); | ||
700 | part->nr_sects = size; | ||
701 | preempt_enable(); | ||
702 | #else | ||
703 | part->nr_sects = size; | ||
704 | #endif | ||
705 | } | ||
706 | |||
650 | #else /* CONFIG_BLOCK */ | 707 | #else /* CONFIG_BLOCK */ |
651 | 708 | ||
652 | static inline void printk_all_partitions(void) { } | 709 | static inline void printk_all_partitions(void) { } |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 1e49be49d324..4883f393f50a 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -23,6 +23,7 @@ struct vm_area_struct; | |||
23 | #define ___GFP_REPEAT 0x400u | 23 | #define ___GFP_REPEAT 0x400u |
24 | #define ___GFP_NOFAIL 0x800u | 24 | #define ___GFP_NOFAIL 0x800u |
25 | #define ___GFP_NORETRY 0x1000u | 25 | #define ___GFP_NORETRY 0x1000u |
26 | #define ___GFP_MEMALLOC 0x2000u | ||
26 | #define ___GFP_COMP 0x4000u | 27 | #define ___GFP_COMP 0x4000u |
27 | #define ___GFP_ZERO 0x8000u | 28 | #define ___GFP_ZERO 0x8000u |
28 | #define ___GFP_NOMEMALLOC 0x10000u | 29 | #define ___GFP_NOMEMALLOC 0x10000u |
@@ -76,9 +77,14 @@ struct vm_area_struct; | |||
76 | #define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */ | 77 | #define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */ |
77 | #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */ | 78 | #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */ |
78 | #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */ | 79 | #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */ |
80 | #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */ | ||
79 | #define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */ | 81 | #define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */ |
80 | #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */ | 82 | #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */ |
81 | #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves */ | 83 | #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves. |
84 | * This takes precedence over the | ||
85 | * __GFP_MEMALLOC flag if both are | ||
86 | * set | ||
87 | */ | ||
82 | #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ | 88 | #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ |
83 | #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ | 89 | #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ |
84 | #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ | 90 | #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ |
@@ -129,7 +135,7 @@ struct vm_area_struct; | |||
129 | /* Control page allocator reclaim behavior */ | 135 | /* Control page allocator reclaim behavior */ |
130 | #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | 136 | #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ |
131 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | 137 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ |
132 | __GFP_NORETRY|__GFP_NOMEMALLOC) | 138 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) |
133 | 139 | ||
134 | /* Control slab gfp mask during early boot */ | 140 | /* Control slab gfp mask during early boot */ |
135 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) | 141 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) |
@@ -379,6 +385,9 @@ void drain_local_pages(void *dummy); | |||
379 | */ | 385 | */ |
380 | extern gfp_t gfp_allowed_mask; | 386 | extern gfp_t gfp_allowed_mask; |
381 | 387 | ||
388 | /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ | ||
389 | bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); | ||
390 | |||
382 | extern void pm_restrict_gfp_mask(void); | 391 | extern void pm_restrict_gfp_mask(void); |
383 | extern void pm_restore_gfp_mask(void); | 392 | extern void pm_restore_gfp_mask(void); |
384 | 393 | ||
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 774fa47b3b5b..ef788b5b4a35 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -39,10 +39,17 @@ extern unsigned long totalhigh_pages; | |||
39 | 39 | ||
40 | void kmap_flush_unused(void); | 40 | void kmap_flush_unused(void); |
41 | 41 | ||
42 | struct page *kmap_to_page(void *addr); | ||
43 | |||
42 | #else /* CONFIG_HIGHMEM */ | 44 | #else /* CONFIG_HIGHMEM */ |
43 | 45 | ||
44 | static inline unsigned int nr_free_highpages(void) { return 0; } | 46 | static inline unsigned int nr_free_highpages(void) { return 0; } |
45 | 47 | ||
48 | static inline struct page *kmap_to_page(void *addr) | ||
49 | { | ||
50 | return virt_to_page(addr); | ||
51 | } | ||
52 | |||
46 | #define totalhigh_pages 0UL | 53 | #define totalhigh_pages 0UL |
47 | 54 | ||
48 | #ifndef ARCH_HAS_KMAP | 55 | #ifndef ARCH_HAS_KMAP |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d5d6bbe2259e..225164842ab6 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -4,9 +4,11 @@ | |||
4 | #include <linux/mm_types.h> | 4 | #include <linux/mm_types.h> |
5 | #include <linux/fs.h> | 5 | #include <linux/fs.h> |
6 | #include <linux/hugetlb_inline.h> | 6 | #include <linux/hugetlb_inline.h> |
7 | #include <linux/cgroup.h> | ||
7 | 8 | ||
8 | struct ctl_table; | 9 | struct ctl_table; |
9 | struct user_struct; | 10 | struct user_struct; |
11 | struct mmu_gather; | ||
10 | 12 | ||
11 | #ifdef CONFIG_HUGETLB_PAGE | 13 | #ifdef CONFIG_HUGETLB_PAGE |
12 | 14 | ||
@@ -20,6 +22,11 @@ struct hugepage_subpool { | |||
20 | long max_hpages, used_hpages; | 22 | long max_hpages, used_hpages; |
21 | }; | 23 | }; |
22 | 24 | ||
25 | extern spinlock_t hugetlb_lock; | ||
26 | extern int hugetlb_max_hstate __read_mostly; | ||
27 | #define for_each_hstate(h) \ | ||
28 | for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) | ||
29 | |||
23 | struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); | 30 | struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); |
24 | void hugepage_put_subpool(struct hugepage_subpool *spool); | 31 | void hugepage_put_subpool(struct hugepage_subpool *spool); |
25 | 32 | ||
@@ -40,9 +47,14 @@ int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, | |||
40 | struct page **, struct vm_area_struct **, | 47 | struct page **, struct vm_area_struct **, |
41 | unsigned long *, int *, int, unsigned int flags); | 48 | unsigned long *, int *, int, unsigned int flags); |
42 | void unmap_hugepage_range(struct vm_area_struct *, | 49 | void unmap_hugepage_range(struct vm_area_struct *, |
43 | unsigned long, unsigned long, struct page *); | 50 | unsigned long, unsigned long, struct page *); |
44 | void __unmap_hugepage_range(struct vm_area_struct *, | 51 | void __unmap_hugepage_range_final(struct mmu_gather *tlb, |
45 | unsigned long, unsigned long, struct page *); | 52 | struct vm_area_struct *vma, |
53 | unsigned long start, unsigned long end, | ||
54 | struct page *ref_page); | ||
55 | void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, | ||
56 | unsigned long start, unsigned long end, | ||
57 | struct page *ref_page); | ||
46 | int hugetlb_prefault(struct address_space *, struct vm_area_struct *); | 58 | int hugetlb_prefault(struct address_space *, struct vm_area_struct *); |
47 | void hugetlb_report_meminfo(struct seq_file *); | 59 | void hugetlb_report_meminfo(struct seq_file *); |
48 | int hugetlb_report_node_meminfo(int, char *); | 60 | int hugetlb_report_node_meminfo(int, char *); |
@@ -98,7 +110,6 @@ static inline unsigned long hugetlb_total_pages(void) | |||
98 | #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) | 110 | #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) |
99 | #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) | 111 | #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) |
100 | #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) | 112 | #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) |
101 | #define unmap_hugepage_range(vma, start, end, page) BUG() | ||
102 | static inline void hugetlb_report_meminfo(struct seq_file *m) | 113 | static inline void hugetlb_report_meminfo(struct seq_file *m) |
103 | { | 114 | { |
104 | } | 115 | } |
@@ -112,13 +123,31 @@ static inline void hugetlb_report_meminfo(struct seq_file *m) | |||
112 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) | 123 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) |
113 | #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) | 124 | #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) |
114 | #define huge_pte_offset(mm, address) 0 | 125 | #define huge_pte_offset(mm, address) 0 |
115 | #define dequeue_hwpoisoned_huge_page(page) 0 | 126 | static inline int dequeue_hwpoisoned_huge_page(struct page *page) |
127 | { | ||
128 | return 0; | ||
129 | } | ||
130 | |||
116 | static inline void copy_huge_page(struct page *dst, struct page *src) | 131 | static inline void copy_huge_page(struct page *dst, struct page *src) |
117 | { | 132 | { |
118 | } | 133 | } |
119 | 134 | ||
120 | #define hugetlb_change_protection(vma, address, end, newprot) | 135 | #define hugetlb_change_protection(vma, address, end, newprot) |
121 | 136 | ||
137 | static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, | ||
138 | struct vm_area_struct *vma, unsigned long start, | ||
139 | unsigned long end, struct page *ref_page) | ||
140 | { | ||
141 | BUG(); | ||
142 | } | ||
143 | |||
144 | static inline void __unmap_hugepage_range(struct mmu_gather *tlb, | ||
145 | struct vm_area_struct *vma, unsigned long start, | ||
146 | unsigned long end, struct page *ref_page) | ||
147 | { | ||
148 | BUG(); | ||
149 | } | ||
150 | |||
122 | #endif /* !CONFIG_HUGETLB_PAGE */ | 151 | #endif /* !CONFIG_HUGETLB_PAGE */ |
123 | 152 | ||
124 | #define HUGETLB_ANON_FILE "anon_hugepage" | 153 | #define HUGETLB_ANON_FILE "anon_hugepage" |
@@ -199,10 +228,15 @@ struct hstate { | |||
199 | unsigned long resv_huge_pages; | 228 | unsigned long resv_huge_pages; |
200 | unsigned long surplus_huge_pages; | 229 | unsigned long surplus_huge_pages; |
201 | unsigned long nr_overcommit_huge_pages; | 230 | unsigned long nr_overcommit_huge_pages; |
231 | struct list_head hugepage_activelist; | ||
202 | struct list_head hugepage_freelists[MAX_NUMNODES]; | 232 | struct list_head hugepage_freelists[MAX_NUMNODES]; |
203 | unsigned int nr_huge_pages_node[MAX_NUMNODES]; | 233 | unsigned int nr_huge_pages_node[MAX_NUMNODES]; |
204 | unsigned int free_huge_pages_node[MAX_NUMNODES]; | 234 | unsigned int free_huge_pages_node[MAX_NUMNODES]; |
205 | unsigned int surplus_huge_pages_node[MAX_NUMNODES]; | 235 | unsigned int surplus_huge_pages_node[MAX_NUMNODES]; |
236 | #ifdef CONFIG_CGROUP_HUGETLB | ||
237 | /* cgroup control files */ | ||
238 | struct cftype cgroup_files[5]; | ||
239 | #endif | ||
206 | char name[HSTATE_NAME_LEN]; | 240 | char name[HSTATE_NAME_LEN]; |
207 | }; | 241 | }; |
208 | 242 | ||
@@ -302,6 +336,11 @@ static inline unsigned hstate_index_to_shift(unsigned index) | |||
302 | return hstates[index].order + PAGE_SHIFT; | 336 | return hstates[index].order + PAGE_SHIFT; |
303 | } | 337 | } |
304 | 338 | ||
339 | static inline int hstate_index(struct hstate *h) | ||
340 | { | ||
341 | return h - hstates; | ||
342 | } | ||
343 | |||
305 | #else | 344 | #else |
306 | struct hstate {}; | 345 | struct hstate {}; |
307 | #define alloc_huge_page_node(h, nid) NULL | 346 | #define alloc_huge_page_node(h, nid) NULL |
@@ -320,6 +359,7 @@ static inline unsigned int pages_per_huge_page(struct hstate *h) | |||
320 | return 1; | 359 | return 1; |
321 | } | 360 | } |
322 | #define hstate_index_to_shift(index) 0 | 361 | #define hstate_index_to_shift(index) 0 |
362 | #define hstate_index(h) 0 | ||
323 | #endif | 363 | #endif |
324 | 364 | ||
325 | #endif /* _LINUX_HUGETLB_H */ | 365 | #endif /* _LINUX_HUGETLB_H */ |
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h new file mode 100644 index 000000000000..d73878c694b3 --- /dev/null +++ b/include/linux/hugetlb_cgroup.h | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corporation, 2012 | ||
3 | * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of version 2.1 of the GNU Lesser General Public License | ||
7 | * as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #ifndef _LINUX_HUGETLB_CGROUP_H | ||
16 | #define _LINUX_HUGETLB_CGROUP_H | ||
17 | |||
18 | #include <linux/res_counter.h> | ||
19 | |||
20 | struct hugetlb_cgroup; | ||
21 | /* | ||
22 | * Minimum page order trackable by hugetlb cgroup. | ||
23 | * At least 3 pages are necessary for all the tracking information. | ||
24 | */ | ||
25 | #define HUGETLB_CGROUP_MIN_ORDER 2 | ||
26 | |||
27 | #ifdef CONFIG_CGROUP_HUGETLB | ||
28 | |||
29 | static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) | ||
30 | { | ||
31 | VM_BUG_ON(!PageHuge(page)); | ||
32 | |||
33 | if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) | ||
34 | return NULL; | ||
35 | return (struct hugetlb_cgroup *)page[2].lru.next; | ||
36 | } | ||
37 | |||
38 | static inline | ||
39 | int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) | ||
40 | { | ||
41 | VM_BUG_ON(!PageHuge(page)); | ||
42 | |||
43 | if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) | ||
44 | return -1; | ||
45 | page[2].lru.next = (void *)h_cg; | ||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | static inline bool hugetlb_cgroup_disabled(void) | ||
50 | { | ||
51 | if (hugetlb_subsys.disabled) | ||
52 | return true; | ||
53 | return false; | ||
54 | } | ||
55 | |||
56 | extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, | ||
57 | struct hugetlb_cgroup **ptr); | ||
58 | extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, | ||
59 | struct hugetlb_cgroup *h_cg, | ||
60 | struct page *page); | ||
61 | extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, | ||
62 | struct page *page); | ||
63 | extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, | ||
64 | struct hugetlb_cgroup *h_cg); | ||
65 | extern int hugetlb_cgroup_file_init(int idx) __init; | ||
66 | extern void hugetlb_cgroup_migrate(struct page *oldhpage, | ||
67 | struct page *newhpage); | ||
68 | |||
69 | #else | ||
70 | static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) | ||
71 | { | ||
72 | return NULL; | ||
73 | } | ||
74 | |||
75 | static inline | ||
76 | int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) | ||
77 | { | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | static inline bool hugetlb_cgroup_disabled(void) | ||
82 | { | ||
83 | return true; | ||
84 | } | ||
85 | |||
86 | static inline int | ||
87 | hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, | ||
88 | struct hugetlb_cgroup **ptr) | ||
89 | { | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static inline void | ||
94 | hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, | ||
95 | struct hugetlb_cgroup *h_cg, | ||
96 | struct page *page) | ||
97 | { | ||
98 | return; | ||
99 | } | ||
100 | |||
101 | static inline void | ||
102 | hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page) | ||
103 | { | ||
104 | return; | ||
105 | } | ||
106 | |||
107 | static inline void | ||
108 | hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, | ||
109 | struct hugetlb_cgroup *h_cg) | ||
110 | { | ||
111 | return; | ||
112 | } | ||
113 | |||
114 | static inline int __init hugetlb_cgroup_file_init(int idx) | ||
115 | { | ||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | static inline void hugetlb_cgroup_migrate(struct page *oldhpage, | ||
120 | struct page *newhpage) | ||
121 | { | ||
122 | return; | ||
123 | } | ||
124 | |||
125 | #endif /* CONFIG_MEM_RES_CTLR_HUGETLB */ | ||
126 | #endif | ||
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h index 555382660bc4..7ea898c55a60 100644 --- a/include/linux/i2c/twl.h +++ b/include/linux/i2c/twl.h | |||
@@ -555,6 +555,8 @@ struct twl4030_clock_init_data { | |||
555 | struct twl4030_bci_platform_data { | 555 | struct twl4030_bci_platform_data { |
556 | int *battery_tmp_tbl; | 556 | int *battery_tmp_tbl; |
557 | unsigned int tblsize; | 557 | unsigned int tblsize; |
558 | int bb_uvolt; /* voltage to charge backup battery */ | ||
559 | int bb_uamp; /* current for backup battery charging */ | ||
558 | }; | 560 | }; |
559 | 561 | ||
560 | /* TWL4030_GPIO_MAX (18) GPIOs, with interrupts */ | 562 | /* TWL4030_GPIO_MAX (18) GPIOs, with interrupts */ |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index e68a8e53bb59..c5f856a040b9 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -42,7 +42,6 @@ | |||
42 | * | 42 | * |
43 | * IRQF_DISABLED - keep irqs disabled when calling the action handler. | 43 | * IRQF_DISABLED - keep irqs disabled when calling the action handler. |
44 | * DEPRECATED. This flag is a NOOP and scheduled to be removed | 44 | * DEPRECATED. This flag is a NOOP and scheduled to be removed |
45 | * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator | ||
46 | * IRQF_SHARED - allow sharing the irq among several devices | 45 | * IRQF_SHARED - allow sharing the irq among several devices |
47 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur | 46 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur |
48 | * IRQF_TIMER - Flag to mark this interrupt as timer interrupt | 47 | * IRQF_TIMER - Flag to mark this interrupt as timer interrupt |
@@ -61,7 +60,6 @@ | |||
61 | * resume time. | 60 | * resume time. |
62 | */ | 61 | */ |
63 | #define IRQF_DISABLED 0x00000020 | 62 | #define IRQF_DISABLED 0x00000020 |
64 | #define IRQF_SAMPLE_RANDOM 0x00000040 | ||
65 | #define IRQF_SHARED 0x00000080 | 63 | #define IRQF_SHARED 0x00000080 |
66 | #define IRQF_PROBE_SHARED 0x00000100 | 64 | #define IRQF_PROBE_SHARED 0x00000100 |
67 | #define __IRQF_TIMER 0x00000200 | 65 | #define __IRQF_TIMER 0x00000200 |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index f1e2527006bd..9a323d12de1c 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
@@ -39,7 +39,6 @@ struct module; | |||
39 | */ | 39 | */ |
40 | struct irq_desc { | 40 | struct irq_desc { |
41 | struct irq_data irq_data; | 41 | struct irq_data irq_data; |
42 | struct timer_rand_state *timer_rand_state; | ||
43 | unsigned int __percpu *kstat_irqs; | 42 | unsigned int __percpu *kstat_irqs; |
44 | irq_flow_handler_t handle_irq; | 43 | irq_flow_handler_t handle_irq; |
45 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI | 44 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI |
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 5abb533eb8eb..0d5b17bf5e51 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
@@ -112,6 +112,11 @@ struct irq_domain { | |||
112 | }; | 112 | }; |
113 | 113 | ||
114 | #ifdef CONFIG_IRQ_DOMAIN | 114 | #ifdef CONFIG_IRQ_DOMAIN |
115 | struct irq_domain *irq_domain_add_simple(struct device_node *of_node, | ||
116 | unsigned int size, | ||
117 | unsigned int first_irq, | ||
118 | const struct irq_domain_ops *ops, | ||
119 | void *host_data); | ||
115 | struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, | 120 | struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, |
116 | unsigned int size, | 121 | unsigned int size, |
117 | unsigned int first_irq, | 122 | unsigned int first_irq, |
@@ -144,16 +149,31 @@ static inline struct irq_domain *irq_domain_add_legacy_isa( | |||
144 | 149 | ||
145 | extern void irq_domain_remove(struct irq_domain *host); | 150 | extern void irq_domain_remove(struct irq_domain *host); |
146 | 151 | ||
152 | extern int irq_domain_associate_many(struct irq_domain *domain, | ||
153 | unsigned int irq_base, | ||
154 | irq_hw_number_t hwirq_base, int count); | ||
155 | static inline int irq_domain_associate(struct irq_domain *domain, unsigned int irq, | ||
156 | irq_hw_number_t hwirq) | ||
157 | { | ||
158 | return irq_domain_associate_many(domain, irq, hwirq, 1); | ||
159 | } | ||
160 | |||
147 | extern unsigned int irq_create_mapping(struct irq_domain *host, | 161 | extern unsigned int irq_create_mapping(struct irq_domain *host, |
148 | irq_hw_number_t hwirq); | 162 | irq_hw_number_t hwirq); |
149 | extern void irq_dispose_mapping(unsigned int virq); | 163 | extern void irq_dispose_mapping(unsigned int virq); |
150 | extern unsigned int irq_find_mapping(struct irq_domain *host, | 164 | extern unsigned int irq_find_mapping(struct irq_domain *host, |
151 | irq_hw_number_t hwirq); | 165 | irq_hw_number_t hwirq); |
152 | extern unsigned int irq_create_direct_mapping(struct irq_domain *host); | 166 | extern unsigned int irq_create_direct_mapping(struct irq_domain *host); |
153 | extern void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq, | 167 | extern int irq_create_strict_mappings(struct irq_domain *domain, |
154 | irq_hw_number_t hwirq); | 168 | unsigned int irq_base, |
155 | extern unsigned int irq_radix_revmap_lookup(struct irq_domain *host, | 169 | irq_hw_number_t hwirq_base, int count); |
156 | irq_hw_number_t hwirq); | 170 | |
171 | static inline int irq_create_identity_mapping(struct irq_domain *host, | ||
172 | irq_hw_number_t hwirq) | ||
173 | { | ||
174 | return irq_create_strict_mappings(host, hwirq, hwirq, 1); | ||
175 | } | ||
176 | |||
157 | extern unsigned int irq_linear_revmap(struct irq_domain *host, | 177 | extern unsigned int irq_linear_revmap(struct irq_domain *host, |
158 | irq_hw_number_t hwirq); | 178 | irq_hw_number_t hwirq); |
159 | 179 | ||
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 83e7ba90d6e5..8d9489fdab2e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -38,7 +38,7 @@ struct mem_cgroup_reclaim_cookie { | |||
38 | unsigned int generation; | 38 | unsigned int generation; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 41 | #ifdef CONFIG_MEMCG |
42 | /* | 42 | /* |
43 | * All "charge" functions with gfp_mask should use GFP_KERNEL or | 43 | * All "charge" functions with gfp_mask should use GFP_KERNEL or |
44 | * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't | 44 | * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't |
@@ -72,8 +72,6 @@ extern void mem_cgroup_uncharge_end(void); | |||
72 | extern void mem_cgroup_uncharge_page(struct page *page); | 72 | extern void mem_cgroup_uncharge_page(struct page *page); |
73 | extern void mem_cgroup_uncharge_cache_page(struct page *page); | 73 | extern void mem_cgroup_uncharge_cache_page(struct page *page); |
74 | 74 | ||
75 | extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, | ||
76 | int order); | ||
77 | bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, | 75 | bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, |
78 | struct mem_cgroup *memcg); | 76 | struct mem_cgroup *memcg); |
79 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg); | 77 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg); |
@@ -100,9 +98,9 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) | |||
100 | 98 | ||
101 | extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); | 99 | extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); |
102 | 100 | ||
103 | extern int | 101 | extern void |
104 | mem_cgroup_prepare_migration(struct page *page, | 102 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage, |
105 | struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask); | 103 | struct mem_cgroup **memcgp); |
106 | extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, | 104 | extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, |
107 | struct page *oldpage, struct page *newpage, bool migration_ok); | 105 | struct page *oldpage, struct page *newpage, bool migration_ok); |
108 | 106 | ||
@@ -124,7 +122,7 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, | |||
124 | extern void mem_cgroup_replace_page_cache(struct page *oldpage, | 122 | extern void mem_cgroup_replace_page_cache(struct page *oldpage, |
125 | struct page *newpage); | 123 | struct page *newpage); |
126 | 124 | ||
127 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 125 | #ifdef CONFIG_MEMCG_SWAP |
128 | extern int do_swap_account; | 126 | extern int do_swap_account; |
129 | #endif | 127 | #endif |
130 | 128 | ||
@@ -182,7 +180,6 @@ static inline void mem_cgroup_dec_page_stat(struct page *page, | |||
182 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | 180 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, |
183 | gfp_t gfp_mask, | 181 | gfp_t gfp_mask, |
184 | unsigned long *total_scanned); | 182 | unsigned long *total_scanned); |
185 | u64 mem_cgroup_get_limit(struct mem_cgroup *memcg); | ||
186 | 183 | ||
187 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); | 184 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); |
188 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 185 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
@@ -193,7 +190,7 @@ void mem_cgroup_split_huge_fixup(struct page *head); | |||
193 | bool mem_cgroup_bad_page_check(struct page *page); | 190 | bool mem_cgroup_bad_page_check(struct page *page); |
194 | void mem_cgroup_print_bad_page(struct page *page); | 191 | void mem_cgroup_print_bad_page(struct page *page); |
195 | #endif | 192 | #endif |
196 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ | 193 | #else /* CONFIG_MEMCG */ |
197 | struct mem_cgroup; | 194 | struct mem_cgroup; |
198 | 195 | ||
199 | static inline int mem_cgroup_newpage_charge(struct page *page, | 196 | static inline int mem_cgroup_newpage_charge(struct page *page, |
@@ -279,11 +276,10 @@ static inline struct cgroup_subsys_state | |||
279 | return NULL; | 276 | return NULL; |
280 | } | 277 | } |
281 | 278 | ||
282 | static inline int | 279 | static inline void |
283 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage, | 280 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage, |
284 | struct mem_cgroup **memcgp, gfp_t gfp_mask) | 281 | struct mem_cgroup **memcgp) |
285 | { | 282 | { |
286 | return 0; | ||
287 | } | 283 | } |
288 | 284 | ||
289 | static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg, | 285 | static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg, |
@@ -366,12 +362,6 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | |||
366 | return 0; | 362 | return 0; |
367 | } | 363 | } |
368 | 364 | ||
369 | static inline | ||
370 | u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) | ||
371 | { | ||
372 | return 0; | ||
373 | } | ||
374 | |||
375 | static inline void mem_cgroup_split_huge_fixup(struct page *head) | 365 | static inline void mem_cgroup_split_huge_fixup(struct page *head) |
376 | { | 366 | { |
377 | } | 367 | } |
@@ -384,9 +374,9 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage, | |||
384 | struct page *newpage) | 374 | struct page *newpage) |
385 | { | 375 | { |
386 | } | 376 | } |
387 | #endif /* CONFIG_CGROUP_MEM_RES_CTLR */ | 377 | #endif /* CONFIG_MEMCG */ |
388 | 378 | ||
389 | #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) | 379 | #if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) |
390 | static inline bool | 380 | static inline bool |
391 | mem_cgroup_bad_page_check(struct page *page) | 381 | mem_cgroup_bad_page_check(struct page *page) |
392 | { | 382 | { |
@@ -406,7 +396,7 @@ enum { | |||
406 | }; | 396 | }; |
407 | 397 | ||
408 | struct sock; | 398 | struct sock; |
409 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM | 399 | #ifdef CONFIG_MEMCG_KMEM |
410 | void sock_update_memcg(struct sock *sk); | 400 | void sock_update_memcg(struct sock *sk); |
411 | void sock_release_memcg(struct sock *sk); | 401 | void sock_release_memcg(struct sock *sk); |
412 | #else | 402 | #else |
@@ -416,6 +406,6 @@ static inline void sock_update_memcg(struct sock *sk) | |||
416 | static inline void sock_release_memcg(struct sock *sk) | 406 | static inline void sock_release_memcg(struct sock *sk) |
417 | { | 407 | { |
418 | } | 408 | } |
419 | #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ | 409 | #endif /* CONFIG_MEMCG_KMEM */ |
420 | #endif /* _LINUX_MEMCONTROL_H */ | 410 | #endif /* _LINUX_MEMCONTROL_H */ |
421 | 411 | ||
diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 7c08052e3321..39ed62ab5b8a 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h | |||
@@ -26,7 +26,8 @@ typedef struct mempool_s { | |||
26 | extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, | 26 | extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, |
27 | mempool_free_t *free_fn, void *pool_data); | 27 | mempool_free_t *free_fn, void *pool_data); |
28 | extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, | 28 | extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, |
29 | mempool_free_t *free_fn, void *pool_data, int nid); | 29 | mempool_free_t *free_fn, void *pool_data, |
30 | gfp_t gfp_mask, int nid); | ||
30 | 31 | ||
31 | extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask); | 32 | extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask); |
32 | extern void mempool_destroy(mempool_t *pool); | 33 | extern void mempool_destroy(mempool_t *pool); |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 855c337b20c3..ce7e6671968b 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -15,7 +15,7 @@ extern int migrate_page(struct address_space *, | |||
15 | extern int migrate_pages(struct list_head *l, new_page_t x, | 15 | extern int migrate_pages(struct list_head *l, new_page_t x, |
16 | unsigned long private, bool offlining, | 16 | unsigned long private, bool offlining, |
17 | enum migrate_mode mode); | 17 | enum migrate_mode mode); |
18 | extern int migrate_huge_pages(struct list_head *l, new_page_t x, | 18 | extern int migrate_huge_page(struct page *, new_page_t x, |
19 | unsigned long private, bool offlining, | 19 | unsigned long private, bool offlining, |
20 | enum migrate_mode mode); | 20 | enum migrate_mode mode); |
21 | 21 | ||
@@ -36,7 +36,7 @@ static inline void putback_lru_pages(struct list_head *l) {} | |||
36 | static inline int migrate_pages(struct list_head *l, new_page_t x, | 36 | static inline int migrate_pages(struct list_head *l, new_page_t x, |
37 | unsigned long private, bool offlining, | 37 | unsigned long private, bool offlining, |
38 | enum migrate_mode mode) { return -ENOSYS; } | 38 | enum migrate_mode mode) { return -ENOSYS; } |
39 | static inline int migrate_huge_pages(struct list_head *l, new_page_t x, | 39 | static inline int migrate_huge_page(struct page *page, new_page_t x, |
40 | unsigned long private, bool offlining, | 40 | unsigned long private, bool offlining, |
41 | enum migrate_mode mode) { return -ENOSYS; } | 41 | enum migrate_mode mode) { return -ENOSYS; } |
42 | 42 | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index f9f279cf5b1b..311be906b57d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -805,6 +805,17 @@ static inline void *page_rmapping(struct page *page) | |||
805 | return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); | 805 | return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); |
806 | } | 806 | } |
807 | 807 | ||
808 | extern struct address_space *__page_file_mapping(struct page *); | ||
809 | |||
810 | static inline | ||
811 | struct address_space *page_file_mapping(struct page *page) | ||
812 | { | ||
813 | if (unlikely(PageSwapCache(page))) | ||
814 | return __page_file_mapping(page); | ||
815 | |||
816 | return page->mapping; | ||
817 | } | ||
818 | |||
808 | static inline int PageAnon(struct page *page) | 819 | static inline int PageAnon(struct page *page) |
809 | { | 820 | { |
810 | return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; | 821 | return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; |
@@ -821,6 +832,20 @@ static inline pgoff_t page_index(struct page *page) | |||
821 | return page->index; | 832 | return page->index; |
822 | } | 833 | } |
823 | 834 | ||
835 | extern pgoff_t __page_file_index(struct page *page); | ||
836 | |||
837 | /* | ||
838 | * Return the file index of the page. Regular pagecache pages use ->index | ||
839 | * whereas swapcache pages use swp_offset(->private) | ||
840 | */ | ||
841 | static inline pgoff_t page_file_index(struct page *page) | ||
842 | { | ||
843 | if (unlikely(PageSwapCache(page))) | ||
844 | return __page_file_index(page); | ||
845 | |||
846 | return page->index; | ||
847 | } | ||
848 | |||
824 | /* | 849 | /* |
825 | * Return true if this page is mapped into pagetables. | 850 | * Return true if this page is mapped into pagetables. |
826 | */ | 851 | */ |
@@ -994,6 +1019,10 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
994 | struct page **pages, struct vm_area_struct **vmas); | 1019 | struct page **pages, struct vm_area_struct **vmas); |
995 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | 1020 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
996 | struct page **pages); | 1021 | struct page **pages); |
1022 | struct kvec; | ||
1023 | int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, | ||
1024 | struct page **pages); | ||
1025 | int get_kernel_page(unsigned long start, int write, struct page **pages); | ||
997 | struct page *get_dump_page(unsigned long addr); | 1026 | struct page *get_dump_page(unsigned long addr); |
998 | 1027 | ||
999 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); | 1028 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); |
@@ -1331,6 +1360,7 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...); | |||
1331 | extern void setup_per_cpu_pageset(void); | 1360 | extern void setup_per_cpu_pageset(void); |
1332 | 1361 | ||
1333 | extern void zone_pcp_update(struct zone *zone); | 1362 | extern void zone_pcp_update(struct zone *zone); |
1363 | extern void zone_pcp_reset(struct zone *zone); | ||
1334 | 1364 | ||
1335 | /* nommu.c */ | 1365 | /* nommu.c */ |
1336 | extern atomic_long_t mmap_pages_allocated; | 1366 | extern atomic_long_t mmap_pages_allocated; |
@@ -1411,6 +1441,7 @@ extern void truncate_inode_pages_range(struct address_space *, | |||
1411 | 1441 | ||
1412 | /* generic vm_area_ops exported for stackable file systems */ | 1442 | /* generic vm_area_ops exported for stackable file systems */ |
1413 | extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); | 1443 | extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); |
1444 | extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | ||
1414 | 1445 | ||
1415 | /* mm/page-writeback.c */ | 1446 | /* mm/page-writeback.c */ |
1416 | int write_one_page(struct page *page, int wait); | 1447 | int write_one_page(struct page *page, int wait); |
@@ -1528,6 +1559,7 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); | |||
1528 | static inline void vm_stat_account(struct mm_struct *mm, | 1559 | static inline void vm_stat_account(struct mm_struct *mm, |
1529 | unsigned long flags, struct file *file, long pages) | 1560 | unsigned long flags, struct file *file, long pages) |
1530 | { | 1561 | { |
1562 | mm->total_vm += pages; | ||
1531 | } | 1563 | } |
1532 | #endif /* CONFIG_PROC_FS */ | 1564 | #endif /* CONFIG_PROC_FS */ |
1533 | 1565 | ||
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 074eb98fe15d..bf7867200b95 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -54,6 +54,15 @@ struct page { | |||
54 | union { | 54 | union { |
55 | pgoff_t index; /* Our offset within mapping. */ | 55 | pgoff_t index; /* Our offset within mapping. */ |
56 | void *freelist; /* slub/slob first free object */ | 56 | void *freelist; /* slub/slob first free object */ |
57 | bool pfmemalloc; /* If set by the page allocator, | ||
58 | * ALLOC_NO_WATERMARKS was set | ||
59 | * and the low watermark was not | ||
60 | * met implying that the system | ||
61 | * is under some pressure. The | ||
62 | * caller should try ensure | ||
63 | * this page is only used to | ||
64 | * free other pages. | ||
65 | */ | ||
57 | }; | 66 | }; |
58 | 67 | ||
59 | union { | 68 | union { |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 458988bd55a1..2daa54f55db7 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -201,7 +201,7 @@ struct zone_reclaim_stat { | |||
201 | struct lruvec { | 201 | struct lruvec { |
202 | struct list_head lists[NR_LRU_LISTS]; | 202 | struct list_head lists[NR_LRU_LISTS]; |
203 | struct zone_reclaim_stat reclaim_stat; | 203 | struct zone_reclaim_stat reclaim_stat; |
204 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 204 | #ifdef CONFIG_MEMCG |
205 | struct zone *zone; | 205 | struct zone *zone; |
206 | #endif | 206 | #endif |
207 | }; | 207 | }; |
@@ -209,7 +209,6 @@ struct lruvec { | |||
209 | /* Mask used at gathering information at once (see memcontrol.c) */ | 209 | /* Mask used at gathering information at once (see memcontrol.c) */ |
210 | #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) | 210 | #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) |
211 | #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) | 211 | #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) |
212 | #define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON) | ||
213 | #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) | 212 | #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) |
214 | 213 | ||
215 | /* Isolate clean file */ | 214 | /* Isolate clean file */ |
@@ -369,6 +368,10 @@ struct zone { | |||
369 | */ | 368 | */ |
370 | spinlock_t lock; | 369 | spinlock_t lock; |
371 | int all_unreclaimable; /* All pages pinned */ | 370 | int all_unreclaimable; /* All pages pinned */ |
371 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | ||
372 | /* pfn where the last incremental compaction isolated free pages */ | ||
373 | unsigned long compact_cached_free_pfn; | ||
374 | #endif | ||
372 | #ifdef CONFIG_MEMORY_HOTPLUG | 375 | #ifdef CONFIG_MEMORY_HOTPLUG |
373 | /* see spanned/present_pages for more description */ | 376 | /* see spanned/present_pages for more description */ |
374 | seqlock_t span_seqlock; | 377 | seqlock_t span_seqlock; |
@@ -475,6 +478,14 @@ struct zone { | |||
475 | * rarely used fields: | 478 | * rarely used fields: |
476 | */ | 479 | */ |
477 | const char *name; | 480 | const char *name; |
481 | #ifdef CONFIG_MEMORY_ISOLATION | ||
482 | /* | ||
483 | * the number of MIGRATE_ISOLATE *pageblock*. | ||
484 | * We need this for free page counting. Look at zone_watermark_ok_safe. | ||
485 | * It's protected by zone->lock | ||
486 | */ | ||
487 | int nr_pageblock_isolate; | ||
488 | #endif | ||
478 | } ____cacheline_internodealigned_in_smp; | 489 | } ____cacheline_internodealigned_in_smp; |
479 | 490 | ||
480 | typedef enum { | 491 | typedef enum { |
@@ -671,7 +682,7 @@ typedef struct pglist_data { | |||
671 | int nr_zones; | 682 | int nr_zones; |
672 | #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ | 683 | #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ |
673 | struct page *node_mem_map; | 684 | struct page *node_mem_map; |
674 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 685 | #ifdef CONFIG_MEMCG |
675 | struct page_cgroup *node_page_cgroup; | 686 | struct page_cgroup *node_page_cgroup; |
676 | #endif | 687 | #endif |
677 | #endif | 688 | #endif |
@@ -694,6 +705,7 @@ typedef struct pglist_data { | |||
694 | range, including holes */ | 705 | range, including holes */ |
695 | int node_id; | 706 | int node_id; |
696 | wait_queue_head_t kswapd_wait; | 707 | wait_queue_head_t kswapd_wait; |
708 | wait_queue_head_t pfmemalloc_wait; | ||
697 | struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ | 709 | struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ |
698 | int kswapd_max_order; | 710 | int kswapd_max_order; |
699 | enum zone_type classzone_idx; | 711 | enum zone_type classzone_idx; |
@@ -718,7 +730,7 @@ typedef struct pglist_data { | |||
718 | #include <linux/memory_hotplug.h> | 730 | #include <linux/memory_hotplug.h> |
719 | 731 | ||
720 | extern struct mutex zonelists_mutex; | 732 | extern struct mutex zonelists_mutex; |
721 | void build_all_zonelists(void *data); | 733 | void build_all_zonelists(pg_data_t *pgdat, struct zone *zone); |
722 | void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); | 734 | void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); |
723 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 735 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
724 | int classzone_idx, int alloc_flags); | 736 | int classzone_idx, int alloc_flags); |
@@ -736,7 +748,7 @@ extern void lruvec_init(struct lruvec *lruvec, struct zone *zone); | |||
736 | 748 | ||
737 | static inline struct zone *lruvec_zone(struct lruvec *lruvec) | 749 | static inline struct zone *lruvec_zone(struct lruvec *lruvec) |
738 | { | 750 | { |
739 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 751 | #ifdef CONFIG_MEMCG |
740 | return lruvec->zone; | 752 | return lruvec->zone; |
741 | #else | 753 | #else |
742 | return container_of(lruvec, struct zone, lruvec); | 754 | return container_of(lruvec, struct zone, lruvec); |
@@ -773,7 +785,7 @@ extern int movable_zone; | |||
773 | 785 | ||
774 | static inline int zone_movable_is_highmem(void) | 786 | static inline int zone_movable_is_highmem(void) |
775 | { | 787 | { |
776 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE) | 788 | #if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) |
777 | return movable_zone == ZONE_HIGHMEM; | 789 | return movable_zone == ZONE_HIGHMEM; |
778 | #else | 790 | #else |
779 | return 0; | 791 | return 0; |
@@ -1052,7 +1064,7 @@ struct mem_section { | |||
1052 | 1064 | ||
1053 | /* See declaration of similar field in struct zone */ | 1065 | /* See declaration of similar field in struct zone */ |
1054 | unsigned long *pageblock_flags; | 1066 | unsigned long *pageblock_flags; |
1055 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 1067 | #ifdef CONFIG_MEMCG |
1056 | /* | 1068 | /* |
1057 | * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use | 1069 | * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use |
1058 | * section. (see memcontrol.h/page_cgroup.h about this.) | 1070 | * section. (see memcontrol.h/page_cgroup.h about this.) |
diff --git a/include/linux/namei.h b/include/linux/namei.h index d2ef8b34b967..4bf19d8174ed 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h | |||
@@ -67,6 +67,7 @@ extern int kern_path(const char *, unsigned, struct path *); | |||
67 | 67 | ||
68 | extern struct dentry *kern_path_create(int, const char *, struct path *, int); | 68 | extern struct dentry *kern_path_create(int, const char *, struct path *, int); |
69 | extern struct dentry *user_path_create(int, const char __user *, struct path *, int); | 69 | extern struct dentry *user_path_create(int, const char __user *, struct path *, int); |
70 | extern void done_path_create(struct path *, struct dentry *); | ||
70 | extern struct dentry *kern_path_locked(const char *, struct path *); | 71 | extern struct dentry *kern_path_locked(const char *, struct path *); |
71 | extern int vfs_path_lookup(struct dentry *, struct vfsmount *, | 72 | extern int vfs_path_lookup(struct dentry *, struct vfsmount *, |
72 | const char *, unsigned int, struct path *); | 73 | const char *, unsigned int, struct path *); |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 4b6043c20f77..1f8fc7f9bcd8 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -191,7 +191,7 @@ struct nfs_inode { | |||
191 | struct hlist_head silly_list; | 191 | struct hlist_head silly_list; |
192 | wait_queue_head_t waitqueue; | 192 | wait_queue_head_t waitqueue; |
193 | 193 | ||
194 | #ifdef CONFIG_NFS_V4 | 194 | #if IS_ENABLED(CONFIG_NFS_V4) |
195 | struct nfs4_cached_acl *nfs4_acl; | 195 | struct nfs4_cached_acl *nfs4_acl; |
196 | /* NFSv4 state */ | 196 | /* NFSv4 state */ |
197 | struct list_head open_states; | 197 | struct list_head open_states; |
@@ -428,7 +428,7 @@ extern __be32 root_nfs_parse_addr(char *name); /*__init*/ | |||
428 | * linux/fs/nfs/file.c | 428 | * linux/fs/nfs/file.c |
429 | */ | 429 | */ |
430 | extern const struct file_operations nfs_file_operations; | 430 | extern const struct file_operations nfs_file_operations; |
431 | #ifdef CONFIG_NFS_V4 | 431 | #if IS_ENABLED(CONFIG_NFS_V4) |
432 | extern const struct file_operations nfs4_file_operations; | 432 | extern const struct file_operations nfs4_file_operations; |
433 | #endif /* CONFIG_NFS_V4 */ | 433 | #endif /* CONFIG_NFS_V4 */ |
434 | extern const struct address_space_operations nfs_file_aops; | 434 | extern const struct address_space_operations nfs_file_aops; |
@@ -473,10 +473,10 @@ extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t, | |||
473 | unsigned long); | 473 | unsigned long); |
474 | extern ssize_t nfs_file_direct_read(struct kiocb *iocb, | 474 | extern ssize_t nfs_file_direct_read(struct kiocb *iocb, |
475 | const struct iovec *iov, unsigned long nr_segs, | 475 | const struct iovec *iov, unsigned long nr_segs, |
476 | loff_t pos); | 476 | loff_t pos, bool uio); |
477 | extern ssize_t nfs_file_direct_write(struct kiocb *iocb, | 477 | extern ssize_t nfs_file_direct_write(struct kiocb *iocb, |
478 | const struct iovec *iov, unsigned long nr_segs, | 478 | const struct iovec *iov, unsigned long nr_segs, |
479 | loff_t pos); | 479 | loff_t pos, bool uio); |
480 | 480 | ||
481 | /* | 481 | /* |
482 | * linux/fs/nfs/dir.c | 482 | * linux/fs/nfs/dir.c |
@@ -538,7 +538,7 @@ extern void nfs_writeback_done(struct rpc_task *, struct nfs_write_data *); | |||
538 | extern int nfs_wb_all(struct inode *inode); | 538 | extern int nfs_wb_all(struct inode *inode); |
539 | extern int nfs_wb_page(struct inode *inode, struct page* page); | 539 | extern int nfs_wb_page(struct inode *inode, struct page* page); |
540 | extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); | 540 | extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); |
541 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 541 | #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) |
542 | extern int nfs_commit_inode(struct inode *, int); | 542 | extern int nfs_commit_inode(struct inode *, int); |
543 | extern struct nfs_commit_data *nfs_commitdata_alloc(void); | 543 | extern struct nfs_commit_data *nfs_commitdata_alloc(void); |
544 | extern void nfs_commit_free(struct nfs_commit_data *data); | 544 | extern void nfs_commit_free(struct nfs_commit_data *data); |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 65327652c61a..310c63c8ab2c 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -48,11 +48,12 @@ struct nfs_client { | |||
48 | struct rpc_clnt * cl_rpcclient; | 48 | struct rpc_clnt * cl_rpcclient; |
49 | const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */ | 49 | const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */ |
50 | int cl_proto; /* Network transport protocol */ | 50 | int cl_proto; /* Network transport protocol */ |
51 | struct nfs_subversion * cl_nfs_mod; /* pointer to nfs version module */ | ||
51 | 52 | ||
52 | u32 cl_minorversion;/* NFSv4 minorversion */ | 53 | u32 cl_minorversion;/* NFSv4 minorversion */ |
53 | struct rpc_cred *cl_machine_cred; | 54 | struct rpc_cred *cl_machine_cred; |
54 | 55 | ||
55 | #ifdef CONFIG_NFS_V4 | 56 | #if IS_ENABLED(CONFIG_NFS_V4) |
56 | u64 cl_clientid; /* constant */ | 57 | u64 cl_clientid; /* constant */ |
57 | nfs4_verifier cl_confirm; /* Clientid verifier */ | 58 | nfs4_verifier cl_confirm; /* Clientid verifier */ |
58 | unsigned long cl_state; | 59 | unsigned long cl_state; |
@@ -137,7 +138,7 @@ struct nfs_server { | |||
137 | #endif | 138 | #endif |
138 | 139 | ||
139 | u32 pnfs_blksize; /* layout_blksize attr */ | 140 | u32 pnfs_blksize; /* layout_blksize attr */ |
140 | #ifdef CONFIG_NFS_V4 | 141 | #if IS_ENABLED(CONFIG_NFS_V4) |
141 | u32 attr_bitmask[3];/* V4 bitmask representing the set | 142 | u32 attr_bitmask[3];/* V4 bitmask representing the set |
142 | of attributes supported on this | 143 | of attributes supported on this |
143 | filesystem */ | 144 | filesystem */ |
@@ -200,7 +201,7 @@ struct nfs_server { | |||
200 | #define NFS4_MAX_SLOT_TABLE (256U) | 201 | #define NFS4_MAX_SLOT_TABLE (256U) |
201 | #define NFS4_NO_SLOT ((u32)-1) | 202 | #define NFS4_NO_SLOT ((u32)-1) |
202 | 203 | ||
203 | #if defined(CONFIG_NFS_V4) | 204 | #if IS_ENABLED(CONFIG_NFS_V4) |
204 | 205 | ||
205 | /* Sessions */ | 206 | /* Sessions */ |
206 | #define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long)) | 207 | #define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long)) |
diff --git a/include/linux/nfs_idmap.h b/include/linux/nfs_idmap.h index 7eed2012d288..ece91c57ad79 100644 --- a/include/linux/nfs_idmap.h +++ b/include/linux/nfs_idmap.h | |||
@@ -69,7 +69,7 @@ struct nfs_server; | |||
69 | struct nfs_fattr; | 69 | struct nfs_fattr; |
70 | struct nfs4_string; | 70 | struct nfs4_string; |
71 | 71 | ||
72 | #ifdef CONFIG_NFS_V4 | 72 | #if IS_ENABLED(CONFIG_NFS_V4) |
73 | int nfs_idmap_init(void); | 73 | int nfs_idmap_init(void); |
74 | void nfs_idmap_quit(void); | 74 | void nfs_idmap_quit(void); |
75 | #else | 75 | #else |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 0e181c2320b7..00485e084394 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -824,7 +824,7 @@ struct nfs3_getaclres { | |||
824 | struct posix_acl * acl_default; | 824 | struct posix_acl * acl_default; |
825 | }; | 825 | }; |
826 | 826 | ||
827 | #ifdef CONFIG_NFS_V4 | 827 | #if IS_ENABLED(CONFIG_NFS_V4) |
828 | 828 | ||
829 | typedef u64 clientid4; | 829 | typedef u64 clientid4; |
830 | 830 | ||
@@ -1353,6 +1353,8 @@ struct nfs_renamedata { | |||
1353 | struct nfs_access_entry; | 1353 | struct nfs_access_entry; |
1354 | struct nfs_client; | 1354 | struct nfs_client; |
1355 | struct rpc_timeout; | 1355 | struct rpc_timeout; |
1356 | struct nfs_subversion; | ||
1357 | struct nfs_mount_info; | ||
1356 | struct nfs_client_initdata; | 1358 | struct nfs_client_initdata; |
1357 | struct nfs_pageio_descriptor; | 1359 | struct nfs_pageio_descriptor; |
1358 | 1360 | ||
@@ -1370,6 +1372,8 @@ struct nfs_rpc_ops { | |||
1370 | struct nfs_fsinfo *); | 1372 | struct nfs_fsinfo *); |
1371 | struct vfsmount *(*submount) (struct nfs_server *, struct dentry *, | 1373 | struct vfsmount *(*submount) (struct nfs_server *, struct dentry *, |
1372 | struct nfs_fh *, struct nfs_fattr *); | 1374 | struct nfs_fh *, struct nfs_fattr *); |
1375 | struct dentry *(*try_mount) (int, const char *, struct nfs_mount_info *, | ||
1376 | struct nfs_subversion *); | ||
1373 | int (*getattr) (struct nfs_server *, struct nfs_fh *, | 1377 | int (*getattr) (struct nfs_server *, struct nfs_fh *, |
1374 | struct nfs_fattr *); | 1378 | struct nfs_fattr *); |
1375 | int (*setattr) (struct dentry *, struct nfs_fattr *, | 1379 | int (*setattr) (struct dentry *, struct nfs_fattr *, |
@@ -1435,6 +1439,9 @@ struct nfs_rpc_ops { | |||
1435 | (*init_client) (struct nfs_client *, const struct rpc_timeout *, | 1439 | (*init_client) (struct nfs_client *, const struct rpc_timeout *, |
1436 | const char *, rpc_authflavor_t); | 1440 | const char *, rpc_authflavor_t); |
1437 | void (*free_client) (struct nfs_client *); | 1441 | void (*free_client) (struct nfs_client *); |
1442 | struct nfs_server *(*create_server)(struct nfs_mount_info *, struct nfs_subversion *); | ||
1443 | struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *, | ||
1444 | struct nfs_fattr *, rpc_authflavor_t); | ||
1438 | }; | 1445 | }; |
1439 | 1446 | ||
1440 | /* | 1447 | /* |
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h index ce4743a26015..fa63048fecff 100644 --- a/include/linux/nfsd/nfsfh.h +++ b/include/linux/nfsd/nfsfh.h | |||
@@ -143,6 +143,7 @@ typedef struct svc_fh { | |||
143 | int fh_maxsize; /* max size for fh_handle */ | 143 | int fh_maxsize; /* max size for fh_handle */ |
144 | 144 | ||
145 | unsigned char fh_locked; /* inode locked by us */ | 145 | unsigned char fh_locked; /* inode locked by us */ |
146 | unsigned char fh_want_write; /* remount protection taken */ | ||
146 | 147 | ||
147 | #ifdef CONFIG_NFSD_V3 | 148 | #ifdef CONFIG_NFSD_V3 |
148 | unsigned char fh_post_saved; /* post-op attrs saved */ | 149 | unsigned char fh_post_saved; /* post-op attrs saved */ |
diff --git a/include/linux/of.h b/include/linux/of.h index 42c2a58328c1..5919ee33f2b7 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/kref.h> | 21 | #include <linux/kref.h> |
22 | #include <linux/mod_devicetable.h> | 22 | #include <linux/mod_devicetable.h> |
23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
24 | #include <linux/topology.h> | ||
24 | 25 | ||
25 | #include <asm/byteorder.h> | 26 | #include <asm/byteorder.h> |
26 | #include <asm/errno.h> | 27 | #include <asm/errno.h> |
@@ -158,11 +159,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) | |||
158 | 159 | ||
159 | #define OF_BAD_ADDR ((u64)-1) | 160 | #define OF_BAD_ADDR ((u64)-1) |
160 | 161 | ||
161 | #ifndef of_node_to_nid | ||
162 | static inline int of_node_to_nid(struct device_node *np) { return -1; } | ||
163 | #define of_node_to_nid of_node_to_nid | ||
164 | #endif | ||
165 | |||
166 | static inline const char* of_node_full_name(struct device_node *np) | 162 | static inline const char* of_node_full_name(struct device_node *np) |
167 | { | 163 | { |
168 | return np ? np->full_name : "<no-node>"; | 164 | return np ? np->full_name : "<no-node>"; |
@@ -427,6 +423,15 @@ static inline int of_machine_is_compatible(const char *compat) | |||
427 | while (0) | 423 | while (0) |
428 | #endif /* CONFIG_OF */ | 424 | #endif /* CONFIG_OF */ |
429 | 425 | ||
426 | #ifndef of_node_to_nid | ||
427 | static inline int of_node_to_nid(struct device_node *np) | ||
428 | { | ||
429 | return numa_node_id(); | ||
430 | } | ||
431 | |||
432 | #define of_node_to_nid of_node_to_nid | ||
433 | #endif | ||
434 | |||
430 | /** | 435 | /** |
431 | * of_property_read_bool - Findfrom a property | 436 | * of_property_read_bool - Findfrom a property |
432 | * @np: device node from which the property value is to be read. | 437 | * @np: device node from which the property value is to be read. |
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h new file mode 100644 index 000000000000..eb475a8ea25b --- /dev/null +++ b/include/linux/omap-dma.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * OMAP DMA Engine support | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #ifndef __LINUX_OMAP_DMA_H | ||
9 | #define __LINUX_OMAP_DMA_H | ||
10 | |||
11 | struct dma_chan; | ||
12 | |||
13 | #if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE) | ||
14 | bool omap_dma_filter_fn(struct dma_chan *, void *); | ||
15 | #else | ||
16 | static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d) | ||
17 | { | ||
18 | return false; | ||
19 | } | ||
20 | #endif | ||
21 | |||
22 | #endif | ||
diff --git a/include/linux/oom.h b/include/linux/oom.h index e4c29bc72e70..49a3031fda50 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
@@ -40,15 +40,36 @@ enum oom_constraint { | |||
40 | CONSTRAINT_MEMCG, | 40 | CONSTRAINT_MEMCG, |
41 | }; | 41 | }; |
42 | 42 | ||
43 | enum oom_scan_t { | ||
44 | OOM_SCAN_OK, /* scan thread and find its badness */ | ||
45 | OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */ | ||
46 | OOM_SCAN_ABORT, /* abort the iteration and return */ | ||
47 | OOM_SCAN_SELECT, /* always select this thread first */ | ||
48 | }; | ||
49 | |||
43 | extern void compare_swap_oom_score_adj(int old_val, int new_val); | 50 | extern void compare_swap_oom_score_adj(int old_val, int new_val); |
44 | extern int test_set_oom_score_adj(int new_val); | 51 | extern int test_set_oom_score_adj(int new_val); |
45 | 52 | ||
46 | extern unsigned long oom_badness(struct task_struct *p, | 53 | extern unsigned long oom_badness(struct task_struct *p, |
47 | struct mem_cgroup *memcg, const nodemask_t *nodemask, | 54 | struct mem_cgroup *memcg, const nodemask_t *nodemask, |
48 | unsigned long totalpages); | 55 | unsigned long totalpages); |
56 | extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | ||
57 | unsigned int points, unsigned long totalpages, | ||
58 | struct mem_cgroup *memcg, nodemask_t *nodemask, | ||
59 | const char *message); | ||
60 | |||
49 | extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); | 61 | extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); |
50 | extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); | 62 | extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); |
51 | 63 | ||
64 | extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, | ||
65 | int order, const nodemask_t *nodemask); | ||
66 | |||
67 | extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task, | ||
68 | unsigned long totalpages, const nodemask_t *nodemask, | ||
69 | bool force_kill); | ||
70 | extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, | ||
71 | int order); | ||
72 | |||
52 | extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | 73 | extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, |
53 | int order, nodemask_t *mask, bool force_kill); | 74 | int order, nodemask_t *mask, bool force_kill); |
54 | extern int register_oom_notifier(struct notifier_block *nb); | 75 | extern int register_oom_notifier(struct notifier_block *nb); |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index c88d2a9451af..b5d13841604e 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #include <linux/types.h> | 8 | #include <linux/types.h> |
9 | #include <linux/bug.h> | 9 | #include <linux/bug.h> |
10 | #include <linux/mmdebug.h> | ||
10 | #ifndef __GENERATING_BOUNDS_H | 11 | #ifndef __GENERATING_BOUNDS_H |
11 | #include <linux/mm_types.h> | 12 | #include <linux/mm_types.h> |
12 | #include <generated/bounds.h> | 13 | #include <generated/bounds.h> |
@@ -453,6 +454,34 @@ static inline int PageTransTail(struct page *page) | |||
453 | } | 454 | } |
454 | #endif | 455 | #endif |
455 | 456 | ||
457 | /* | ||
458 | * If network-based swap is enabled, sl*b must keep track of whether pages | ||
459 | * were allocated from pfmemalloc reserves. | ||
460 | */ | ||
461 | static inline int PageSlabPfmemalloc(struct page *page) | ||
462 | { | ||
463 | VM_BUG_ON(!PageSlab(page)); | ||
464 | return PageActive(page); | ||
465 | } | ||
466 | |||
467 | static inline void SetPageSlabPfmemalloc(struct page *page) | ||
468 | { | ||
469 | VM_BUG_ON(!PageSlab(page)); | ||
470 | SetPageActive(page); | ||
471 | } | ||
472 | |||
473 | static inline void __ClearPageSlabPfmemalloc(struct page *page) | ||
474 | { | ||
475 | VM_BUG_ON(!PageSlab(page)); | ||
476 | __ClearPageActive(page); | ||
477 | } | ||
478 | |||
479 | static inline void ClearPageSlabPfmemalloc(struct page *page) | ||
480 | { | ||
481 | VM_BUG_ON(!PageSlab(page)); | ||
482 | ClearPageActive(page); | ||
483 | } | ||
484 | |||
456 | #ifdef CONFIG_MMU | 485 | #ifdef CONFIG_MMU |
457 | #define __PG_MLOCKED (1 << PG_mlocked) | 486 | #define __PG_MLOCKED (1 << PG_mlocked) |
458 | #else | 487 | #else |
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 3bdcab30ca41..105077aa7685 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h | |||
@@ -1,6 +1,11 @@ | |||
1 | #ifndef __LINUX_PAGEISOLATION_H | 1 | #ifndef __LINUX_PAGEISOLATION_H |
2 | #define __LINUX_PAGEISOLATION_H | 2 | #define __LINUX_PAGEISOLATION_H |
3 | 3 | ||
4 | |||
5 | bool has_unmovable_pages(struct zone *zone, struct page *page, int count); | ||
6 | void set_pageblock_migratetype(struct page *page, int migratetype); | ||
7 | int move_freepages_block(struct zone *zone, struct page *page, | ||
8 | int migratetype); | ||
4 | /* | 9 | /* |
5 | * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. | 10 | * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. |
6 | * If specified range includes migrate types other than MOVABLE or CMA, | 11 | * If specified range includes migrate types other than MOVABLE or CMA, |
@@ -10,7 +15,7 @@ | |||
10 | * free all pages in the range. test_page_isolated() can be used for | 15 | * free all pages in the range. test_page_isolated() can be used for |
11 | * test it. | 16 | * test it. |
12 | */ | 17 | */ |
13 | extern int | 18 | int |
14 | start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | 19 | start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
15 | unsigned migratetype); | 20 | unsigned migratetype); |
16 | 21 | ||
@@ -18,7 +23,7 @@ start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | |||
18 | * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. | 23 | * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. |
19 | * target range is [start_pfn, end_pfn) | 24 | * target range is [start_pfn, end_pfn) |
20 | */ | 25 | */ |
21 | extern int | 26 | int |
22 | undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | 27 | undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
23 | unsigned migratetype); | 28 | unsigned migratetype); |
24 | 29 | ||
@@ -30,8 +35,8 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn); | |||
30 | /* | 35 | /* |
31 | * Internal functions. Changes pageblock's migrate type. | 36 | * Internal functions. Changes pageblock's migrate type. |
32 | */ | 37 | */ |
33 | extern int set_migratetype_isolate(struct page *page); | 38 | int set_migratetype_isolate(struct page *page); |
34 | extern void unset_migratetype_isolate(struct page *page, unsigned migratetype); | 39 | void unset_migratetype_isolate(struct page *page, unsigned migratetype); |
35 | 40 | ||
36 | 41 | ||
37 | #endif | 42 | #endif |
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index a88cdba27809..777a524716db 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h | |||
@@ -12,7 +12,7 @@ enum { | |||
12 | #ifndef __GENERATING_BOUNDS_H | 12 | #ifndef __GENERATING_BOUNDS_H |
13 | #include <generated/bounds.h> | 13 | #include <generated/bounds.h> |
14 | 14 | ||
15 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 15 | #ifdef CONFIG_MEMCG |
16 | #include <linux/bit_spinlock.h> | 16 | #include <linux/bit_spinlock.h> |
17 | 17 | ||
18 | /* | 18 | /* |
@@ -82,7 +82,7 @@ static inline void unlock_page_cgroup(struct page_cgroup *pc) | |||
82 | bit_spin_unlock(PCG_LOCK, &pc->flags); | 82 | bit_spin_unlock(PCG_LOCK, &pc->flags); |
83 | } | 83 | } |
84 | 84 | ||
85 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ | 85 | #else /* CONFIG_MEMCG */ |
86 | struct page_cgroup; | 86 | struct page_cgroup; |
87 | 87 | ||
88 | static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) | 88 | static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) |
@@ -102,11 +102,11 @@ static inline void __init page_cgroup_init_flatmem(void) | |||
102 | { | 102 | { |
103 | } | 103 | } |
104 | 104 | ||
105 | #endif /* CONFIG_CGROUP_MEM_RES_CTLR */ | 105 | #endif /* CONFIG_MEMCG */ |
106 | 106 | ||
107 | #include <linux/swap.h> | 107 | #include <linux/swap.h> |
108 | 108 | ||
109 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 109 | #ifdef CONFIG_MEMCG_SWAP |
110 | extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, | 110 | extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, |
111 | unsigned short old, unsigned short new); | 111 | unsigned short old, unsigned short new); |
112 | extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); | 112 | extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); |
@@ -138,7 +138,7 @@ static inline void swap_cgroup_swapoff(int type) | |||
138 | return; | 138 | return; |
139 | } | 139 | } |
140 | 140 | ||
141 | #endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */ | 141 | #endif /* CONFIG_MEMCG_SWAP */ |
142 | 142 | ||
143 | #endif /* !__GENERATING_BOUNDS_H */ | 143 | #endif /* !__GENERATING_BOUNDS_H */ |
144 | 144 | ||
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 7cfad3bbb0cc..e42c762f0dc7 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -286,6 +286,11 @@ static inline loff_t page_offset(struct page *page) | |||
286 | return ((loff_t)page->index) << PAGE_CACHE_SHIFT; | 286 | return ((loff_t)page->index) << PAGE_CACHE_SHIFT; |
287 | } | 287 | } |
288 | 288 | ||
289 | static inline loff_t page_file_offset(struct page *page) | ||
290 | { | ||
291 | return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; | ||
292 | } | ||
293 | |||
289 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, | 294 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
290 | unsigned long address); | 295 | unsigned long address); |
291 | 296 | ||
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index e11d1c0fc60f..ad1a427b5267 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h | |||
@@ -160,4 +160,6 @@ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); | |||
160 | long pipe_fcntl(struct file *, unsigned int, unsigned long arg); | 160 | long pipe_fcntl(struct file *, unsigned int, unsigned long arg); |
161 | struct pipe_inode_info *get_pipe_info(struct file *file); | 161 | struct pipe_inode_info *get_pipe_info(struct file *file); |
162 | 162 | ||
163 | int create_pipe_files(struct file **, int); | ||
164 | |||
163 | #endif | 165 | #endif |
diff --git a/include/linux/lp8727.h b/include/linux/platform_data/lp8727.h index ea98c6133d32..ea98c6133d32 100644 --- a/include/linux/lp8727.h +++ b/include/linux/platform_data/lp8727.h | |||
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h index 241065c9ce51..cd22029e32aa 100644 --- a/include/linux/power/charger-manager.h +++ b/include/linux/power/charger-manager.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define _CHARGER_MANAGER_H | 16 | #define _CHARGER_MANAGER_H |
17 | 17 | ||
18 | #include <linux/power_supply.h> | 18 | #include <linux/power_supply.h> |
19 | #include <linux/extcon.h> | ||
19 | 20 | ||
20 | enum data_source { | 21 | enum data_source { |
21 | CM_BATTERY_PRESENT, | 22 | CM_BATTERY_PRESENT, |
@@ -65,6 +66,70 @@ struct charger_global_desc { | |||
65 | }; | 66 | }; |
66 | 67 | ||
67 | /** | 68 | /** |
69 | * struct charger_cable | ||
70 | * @extcon_name: the name of extcon device. | ||
71 | * @name: the name of charger cable(external connector). | ||
72 | * @extcon_dev: the extcon device. | ||
73 | * @wq: the workqueue to control charger according to the state of | ||
74 | * charger cable. If charger cable is attached, enable charger. | ||
75 | * But if charger cable is detached, disable charger. | ||
76 | * @nb: the notifier block to receive changed state from EXTCON | ||
77 | * (External Connector) when charger cable is attached/detached. | ||
78 | * @attached: the state of charger cable. | ||
79 | * true: the charger cable is attached | ||
80 | * false: the charger cable is detached | ||
81 | * @charger: the instance of struct charger_regulator. | ||
82 | * @cm: the Charger Manager representing the battery. | ||
83 | */ | ||
84 | struct charger_cable { | ||
85 | const char *extcon_name; | ||
86 | const char *name; | ||
87 | |||
88 | /* The charger-manager use Exton framework*/ | ||
89 | struct extcon_specific_cable_nb extcon_dev; | ||
90 | struct work_struct wq; | ||
91 | struct notifier_block nb; | ||
92 | |||
93 | /* The state of charger cable */ | ||
94 | bool attached; | ||
95 | |||
96 | struct charger_regulator *charger; | ||
97 | |||
98 | /* | ||
99 | * Set min/max current of regulator to protect over-current issue | ||
100 | * according to a kind of charger cable when cable is attached. | ||
101 | */ | ||
102 | int min_uA; | ||
103 | int max_uA; | ||
104 | |||
105 | struct charger_manager *cm; | ||
106 | }; | ||
107 | |||
108 | /** | ||
109 | * struct charger_regulator | ||
110 | * @regulator_name: the name of regulator for using charger. | ||
111 | * @consumer: the regulator consumer for the charger. | ||
112 | * @cables: | ||
113 | * the array of charger cables to enable/disable charger | ||
114 | * and set current limit according to constratint data of | ||
115 | * struct charger_cable if only charger cable included | ||
116 | * in the array of charger cables is attached/detached. | ||
117 | * @num_cables: the number of charger cables. | ||
118 | */ | ||
119 | struct charger_regulator { | ||
120 | /* The name of regulator for charging */ | ||
121 | const char *regulator_name; | ||
122 | struct regulator *consumer; | ||
123 | |||
124 | /* | ||
125 | * Store constraint information related to current limit, | ||
126 | * each cable have different condition for charging. | ||
127 | */ | ||
128 | struct charger_cable *cables; | ||
129 | int num_cables; | ||
130 | }; | ||
131 | |||
132 | /** | ||
68 | * struct charger_desc | 133 | * struct charger_desc |
69 | * @psy_name: the name of power-supply-class for charger manager | 134 | * @psy_name: the name of power-supply-class for charger manager |
70 | * @polling_mode: | 135 | * @polling_mode: |
@@ -109,7 +174,7 @@ struct charger_desc { | |||
109 | char **psy_charger_stat; | 174 | char **psy_charger_stat; |
110 | 175 | ||
111 | int num_charger_regulators; | 176 | int num_charger_regulators; |
112 | struct regulator_bulk_data *charger_regulators; | 177 | struct charger_regulator *charger_regulators; |
113 | 178 | ||
114 | char *psy_fuel_gauge; | 179 | char *psy_fuel_gauge; |
115 | 180 | ||
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 3b912bee28d1..0bafbb15f29c 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h | |||
@@ -109,6 +109,8 @@ enum power_supply_property { | |||
109 | POWER_SUPPLY_PROP_CHARGE_NOW, | 109 | POWER_SUPPLY_PROP_CHARGE_NOW, |
110 | POWER_SUPPLY_PROP_CHARGE_AVG, | 110 | POWER_SUPPLY_PROP_CHARGE_AVG, |
111 | POWER_SUPPLY_PROP_CHARGE_COUNTER, | 111 | POWER_SUPPLY_PROP_CHARGE_COUNTER, |
112 | POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, | ||
113 | POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, | ||
112 | POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, | 114 | POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, |
113 | POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN, | 115 | POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN, |
114 | POWER_SUPPLY_PROP_ENERGY_FULL, | 116 | POWER_SUPPLY_PROP_ENERGY_FULL, |
@@ -116,9 +118,15 @@ enum power_supply_property { | |||
116 | POWER_SUPPLY_PROP_ENERGY_NOW, | 118 | POWER_SUPPLY_PROP_ENERGY_NOW, |
117 | POWER_SUPPLY_PROP_ENERGY_AVG, | 119 | POWER_SUPPLY_PROP_ENERGY_AVG, |
118 | POWER_SUPPLY_PROP_CAPACITY, /* in percents! */ | 120 | POWER_SUPPLY_PROP_CAPACITY, /* in percents! */ |
121 | POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */ | ||
122 | POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */ | ||
119 | POWER_SUPPLY_PROP_CAPACITY_LEVEL, | 123 | POWER_SUPPLY_PROP_CAPACITY_LEVEL, |
120 | POWER_SUPPLY_PROP_TEMP, | 124 | POWER_SUPPLY_PROP_TEMP, |
125 | POWER_SUPPLY_PROP_TEMP_ALERT_MIN, | ||
126 | POWER_SUPPLY_PROP_TEMP_ALERT_MAX, | ||
121 | POWER_SUPPLY_PROP_TEMP_AMBIENT, | 127 | POWER_SUPPLY_PROP_TEMP_AMBIENT, |
128 | POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN, | ||
129 | POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX, | ||
122 | POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, | 130 | POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, |
123 | POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, | 131 | POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, |
124 | POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, | 132 | POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, |
@@ -173,6 +181,9 @@ struct power_supply { | |||
173 | /* private */ | 181 | /* private */ |
174 | struct device *dev; | 182 | struct device *dev; |
175 | struct work_struct changed_work; | 183 | struct work_struct changed_work; |
184 | #ifdef CONFIG_THERMAL | ||
185 | struct thermal_zone_device *tzd; | ||
186 | #endif | ||
176 | 187 | ||
177 | #ifdef CONFIG_LEDS_TRIGGERS | 188 | #ifdef CONFIG_LEDS_TRIGGERS |
178 | struct led_trigger *charging_full_trig; | 189 | struct led_trigger *charging_full_trig; |
@@ -236,6 +247,7 @@ static inline bool power_supply_is_amp_property(enum power_supply_property psp) | |||
236 | case POWER_SUPPLY_PROP_CHARGE_NOW: | 247 | case POWER_SUPPLY_PROP_CHARGE_NOW: |
237 | case POWER_SUPPLY_PROP_CHARGE_AVG: | 248 | case POWER_SUPPLY_PROP_CHARGE_AVG: |
238 | case POWER_SUPPLY_PROP_CHARGE_COUNTER: | 249 | case POWER_SUPPLY_PROP_CHARGE_COUNTER: |
250 | case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: | ||
239 | case POWER_SUPPLY_PROP_CURRENT_MAX: | 251 | case POWER_SUPPLY_PROP_CURRENT_MAX: |
240 | case POWER_SUPPLY_PROP_CURRENT_NOW: | 252 | case POWER_SUPPLY_PROP_CURRENT_NOW: |
241 | case POWER_SUPPLY_PROP_CURRENT_AVG: | 253 | case POWER_SUPPLY_PROP_CURRENT_AVG: |
@@ -263,6 +275,7 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp) | |||
263 | case POWER_SUPPLY_PROP_VOLTAGE_NOW: | 275 | case POWER_SUPPLY_PROP_VOLTAGE_NOW: |
264 | case POWER_SUPPLY_PROP_VOLTAGE_AVG: | 276 | case POWER_SUPPLY_PROP_VOLTAGE_AVG: |
265 | case POWER_SUPPLY_PROP_VOLTAGE_OCV: | 277 | case POWER_SUPPLY_PROP_VOLTAGE_OCV: |
278 | case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: | ||
266 | case POWER_SUPPLY_PROP_POWER_NOW: | 279 | case POWER_SUPPLY_PROP_POWER_NOW: |
267 | return 1; | 280 | return 1; |
268 | default: | 281 | default: |
diff --git a/include/linux/random.h b/include/linux/random.h index 8f74538c96db..ac621ce886ca 100644 --- a/include/linux/random.h +++ b/include/linux/random.h | |||
@@ -48,13 +48,13 @@ struct rnd_state { | |||
48 | 48 | ||
49 | #ifdef __KERNEL__ | 49 | #ifdef __KERNEL__ |
50 | 50 | ||
51 | extern void rand_initialize_irq(int irq); | 51 | extern void add_device_randomness(const void *, unsigned int); |
52 | |||
53 | extern void add_input_randomness(unsigned int type, unsigned int code, | 52 | extern void add_input_randomness(unsigned int type, unsigned int code, |
54 | unsigned int value); | 53 | unsigned int value); |
55 | extern void add_interrupt_randomness(int irq); | 54 | extern void add_interrupt_randomness(int irq, int irq_flags); |
56 | 55 | ||
57 | extern void get_random_bytes(void *buf, int nbytes); | 56 | extern void get_random_bytes(void *buf, int nbytes); |
57 | extern void get_random_bytes_arch(void *buf, int nbytes); | ||
58 | void generate_random_uuid(unsigned char uuid_out[16]); | 58 | void generate_random_uuid(unsigned char uuid_out[16]); |
59 | 59 | ||
60 | #ifndef MODULE | 60 | #ifndef MODULE |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 68dcffaa62a0..c147e7024f11 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1584,7 +1584,7 @@ struct task_struct { | |||
1584 | /* bitmask and counter of trace recursion */ | 1584 | /* bitmask and counter of trace recursion */ |
1585 | unsigned long trace_recursion; | 1585 | unsigned long trace_recursion; |
1586 | #endif /* CONFIG_TRACING */ | 1586 | #endif /* CONFIG_TRACING */ |
1587 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ | 1587 | #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ |
1588 | struct memcg_batch_info { | 1588 | struct memcg_batch_info { |
1589 | int do_batch; /* incremented when batch uncharge started */ | 1589 | int do_batch; /* incremented when batch uncharge started */ |
1590 | struct mem_cgroup *memcg; /* target memcg of uncharge */ | 1590 | struct mem_cgroup *memcg; /* target memcg of uncharge */ |
@@ -1894,6 +1894,13 @@ static inline void rcu_copy_process(struct task_struct *p) | |||
1894 | 1894 | ||
1895 | #endif | 1895 | #endif |
1896 | 1896 | ||
1897 | static inline void tsk_restore_flags(struct task_struct *task, | ||
1898 | unsigned long orig_flags, unsigned long flags) | ||
1899 | { | ||
1900 | task->flags &= ~flags; | ||
1901 | task->flags |= orig_flags & flags; | ||
1902 | } | ||
1903 | |||
1897 | #ifdef CONFIG_SMP | 1904 | #ifdef CONFIG_SMP |
1898 | extern void do_set_cpus_allowed(struct task_struct *p, | 1905 | extern void do_set_cpus_allowed(struct task_struct *p, |
1899 | const struct cpumask *new_mask); | 1906 | const struct cpumask *new_mask); |
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h index 93f9821554b6..a3728bf66f0e 100644 --- a/include/linux/shdma-base.h +++ b/include/linux/shdma-base.h | |||
@@ -50,6 +50,7 @@ struct shdma_desc { | |||
50 | struct list_head node; | 50 | struct list_head node; |
51 | struct dma_async_tx_descriptor async_tx; | 51 | struct dma_async_tx_descriptor async_tx; |
52 | enum dma_transfer_direction direction; | 52 | enum dma_transfer_direction direction; |
53 | size_t partial; | ||
53 | dma_cookie_t cookie; | 54 | dma_cookie_t cookie; |
54 | int chunks; | 55 | int chunks; |
55 | int mark; | 56 | int mark; |
@@ -98,6 +99,7 @@ struct shdma_ops { | |||
98 | void (*start_xfer)(struct shdma_chan *, struct shdma_desc *); | 99 | void (*start_xfer)(struct shdma_chan *, struct shdma_desc *); |
99 | struct shdma_desc *(*embedded_desc)(void *, int); | 100 | struct shdma_desc *(*embedded_desc)(void *, int); |
100 | bool (*chan_irq)(struct shdma_chan *, int); | 101 | bool (*chan_irq)(struct shdma_chan *, int); |
102 | size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *); | ||
101 | }; | 103 | }; |
102 | 104 | ||
103 | struct shdma_dev { | 105 | struct shdma_dev { |
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 07ceb97d53fa..ac6b8ee07825 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h | |||
@@ -20,7 +20,6 @@ struct shrink_control { | |||
20 | * 'nr_to_scan' entries and attempt to free them up. It should return | 20 | * 'nr_to_scan' entries and attempt to free them up. It should return |
21 | * the number of objects which remain in the cache. If it returns -1, it means | 21 | * the number of objects which remain in the cache. If it returns -1, it means |
22 | * it cannot do any scanning at this time (eg. there is a risk of deadlock). | 22 | * it cannot do any scanning at this time (eg. there is a risk of deadlock). |
23 | * The callback must not return -1 if nr_to_scan is zero. | ||
24 | * | 23 | * |
25 | * The 'gfpmask' refers to the allocation we are currently trying to | 24 | * The 'gfpmask' refers to the allocation we are currently trying to |
26 | * fulfil. | 25 | * fulfil. |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d205c4be7f5b..7632c87da2c9 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -462,6 +462,7 @@ struct sk_buff { | |||
462 | #ifdef CONFIG_IPV6_NDISC_NODETYPE | 462 | #ifdef CONFIG_IPV6_NDISC_NODETYPE |
463 | __u8 ndisc_nodetype:2; | 463 | __u8 ndisc_nodetype:2; |
464 | #endif | 464 | #endif |
465 | __u8 pfmemalloc:1; | ||
465 | __u8 ooo_okay:1; | 466 | __u8 ooo_okay:1; |
466 | __u8 l4_rxhash:1; | 467 | __u8 l4_rxhash:1; |
467 | __u8 wifi_acked_valid:1; | 468 | __u8 wifi_acked_valid:1; |
@@ -502,6 +503,15 @@ struct sk_buff { | |||
502 | #include <linux/slab.h> | 503 | #include <linux/slab.h> |
503 | 504 | ||
504 | 505 | ||
506 | #define SKB_ALLOC_FCLONE 0x01 | ||
507 | #define SKB_ALLOC_RX 0x02 | ||
508 | |||
509 | /* Returns true if the skb was allocated from PFMEMALLOC reserves */ | ||
510 | static inline bool skb_pfmemalloc(const struct sk_buff *skb) | ||
511 | { | ||
512 | return unlikely(skb->pfmemalloc); | ||
513 | } | ||
514 | |||
505 | /* | 515 | /* |
506 | * skb might have a dst pointer attached, refcounted or not. | 516 | * skb might have a dst pointer attached, refcounted or not. |
507 | * _skb_refdst low order bit is set if refcount was _not_ taken | 517 | * _skb_refdst low order bit is set if refcount was _not_ taken |
@@ -565,7 +575,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, | |||
565 | bool *fragstolen, int *delta_truesize); | 575 | bool *fragstolen, int *delta_truesize); |
566 | 576 | ||
567 | extern struct sk_buff *__alloc_skb(unsigned int size, | 577 | extern struct sk_buff *__alloc_skb(unsigned int size, |
568 | gfp_t priority, int fclone, int node); | 578 | gfp_t priority, int flags, int node); |
569 | extern struct sk_buff *build_skb(void *data, unsigned int frag_size); | 579 | extern struct sk_buff *build_skb(void *data, unsigned int frag_size); |
570 | static inline struct sk_buff *alloc_skb(unsigned int size, | 580 | static inline struct sk_buff *alloc_skb(unsigned int size, |
571 | gfp_t priority) | 581 | gfp_t priority) |
@@ -576,7 +586,7 @@ static inline struct sk_buff *alloc_skb(unsigned int size, | |||
576 | static inline struct sk_buff *alloc_skb_fclone(unsigned int size, | 586 | static inline struct sk_buff *alloc_skb_fclone(unsigned int size, |
577 | gfp_t priority) | 587 | gfp_t priority) |
578 | { | 588 | { |
579 | return __alloc_skb(size, priority, 1, NUMA_NO_NODE); | 589 | return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); |
580 | } | 590 | } |
581 | 591 | ||
582 | extern void skb_recycle(struct sk_buff *skb); | 592 | extern void skb_recycle(struct sk_buff *skb); |
@@ -1237,6 +1247,17 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, | |||
1237 | { | 1247 | { |
1238 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1248 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1239 | 1249 | ||
1250 | /* | ||
1251 | * Propagate page->pfmemalloc to the skb if we can. The problem is | ||
1252 | * that not all callers have unique ownership of the page. If | ||
1253 | * pfmemalloc is set, we check the mapping as a mapping implies | ||
1254 | * page->index is set (index and pfmemalloc share space). | ||
1255 | * If it's a valid mapping, we cannot use page->pfmemalloc but we | ||
1256 | * do not lose pfmemalloc information as the pages would not be | ||
1257 | * allocated using __GFP_MEMALLOC. | ||
1258 | */ | ||
1259 | if (page->pfmemalloc && !page->mapping) | ||
1260 | skb->pfmemalloc = true; | ||
1240 | frag->page.p = page; | 1261 | frag->page.p = page; |
1241 | frag->page_offset = off; | 1262 | frag->page_offset = off; |
1242 | skb_frag_size_set(frag, size); | 1263 | skb_frag_size_set(frag, size); |
@@ -1753,6 +1774,61 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, | |||
1753 | return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); | 1774 | return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); |
1754 | } | 1775 | } |
1755 | 1776 | ||
1777 | /* | ||
1778 | * __skb_alloc_page - allocate pages for ps-rx on a skb and preserve pfmemalloc data | ||
1779 | * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX | ||
1780 | * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used | ||
1781 | * @order: size of the allocation | ||
1782 | * | ||
1783 | * Allocate a new page. | ||
1784 | * | ||
1785 | * %NULL is returned if there is no free memory. | ||
1786 | */ | ||
1787 | static inline struct page *__skb_alloc_pages(gfp_t gfp_mask, | ||
1788 | struct sk_buff *skb, | ||
1789 | unsigned int order) | ||
1790 | { | ||
1791 | struct page *page; | ||
1792 | |||
1793 | gfp_mask |= __GFP_COLD; | ||
1794 | |||
1795 | if (!(gfp_mask & __GFP_NOMEMALLOC)) | ||
1796 | gfp_mask |= __GFP_MEMALLOC; | ||
1797 | |||
1798 | page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); | ||
1799 | if (skb && page && page->pfmemalloc) | ||
1800 | skb->pfmemalloc = true; | ||
1801 | |||
1802 | return page; | ||
1803 | } | ||
1804 | |||
1805 | /** | ||
1806 | * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data | ||
1807 | * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX | ||
1808 | * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used | ||
1809 | * | ||
1810 | * Allocate a new page. | ||
1811 | * | ||
1812 | * %NULL is returned if there is no free memory. | ||
1813 | */ | ||
1814 | static inline struct page *__skb_alloc_page(gfp_t gfp_mask, | ||
1815 | struct sk_buff *skb) | ||
1816 | { | ||
1817 | return __skb_alloc_pages(gfp_mask, skb, 0); | ||
1818 | } | ||
1819 | |||
1820 | /** | ||
1821 | * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page | ||
1822 | * @page: The page that was allocated from skb_alloc_page | ||
1823 | * @skb: The skb that may need pfmemalloc set | ||
1824 | */ | ||
1825 | static inline void skb_propagate_pfmemalloc(struct page *page, | ||
1826 | struct sk_buff *skb) | ||
1827 | { | ||
1828 | if (page && page->pfmemalloc) | ||
1829 | skb->pfmemalloc = true; | ||
1830 | } | ||
1831 | |||
1756 | /** | 1832 | /** |
1757 | * skb_frag_page - retrieve the page refered to by a paged fragment | 1833 | * skb_frag_page - retrieve the page refered to by a paged fragment |
1758 | * @frag: the paged fragment | 1834 | * @frag: the paged fragment |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 77d278defa70..cff40aa7db62 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
@@ -174,6 +174,8 @@ struct rpc_xprt { | |||
174 | unsigned long state; /* transport state */ | 174 | unsigned long state; /* transport state */ |
175 | unsigned char shutdown : 1, /* being shut down */ | 175 | unsigned char shutdown : 1, /* being shut down */ |
176 | resvport : 1; /* use a reserved port */ | 176 | resvport : 1; /* use a reserved port */ |
177 | unsigned int swapper; /* we're swapping over this | ||
178 | transport */ | ||
177 | unsigned int bind_index; /* bind function index */ | 179 | unsigned int bind_index; /* bind function index */ |
178 | 180 | ||
179 | /* | 181 | /* |
@@ -316,6 +318,7 @@ void xprt_release_rqst_cong(struct rpc_task *task); | |||
316 | void xprt_disconnect_done(struct rpc_xprt *xprt); | 318 | void xprt_disconnect_done(struct rpc_xprt *xprt); |
317 | void xprt_force_disconnect(struct rpc_xprt *xprt); | 319 | void xprt_force_disconnect(struct rpc_xprt *xprt); |
318 | void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); | 320 | void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); |
321 | int xs_swapper(struct rpc_xprt *xprt, int enable); | ||
319 | 322 | ||
320 | /* | 323 | /* |
321 | * Reserved bit positions in xprt->state | 324 | * Reserved bit positions in xprt->state |
diff --git a/include/linux/swap.h b/include/linux/swap.h index c84ec68eaec9..388e70601413 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -151,6 +151,7 @@ enum { | |||
151 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ | 151 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ |
152 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ | 152 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ |
153 | SWP_BLKDEV = (1 << 6), /* its a block device */ | 153 | SWP_BLKDEV = (1 << 6), /* its a block device */ |
154 | SWP_FILE = (1 << 7), /* set after swap_activate success */ | ||
154 | /* add others here before... */ | 155 | /* add others here before... */ |
155 | SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ | 156 | SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ |
156 | }; | 157 | }; |
@@ -301,7 +302,7 @@ static inline void scan_unevictable_unregister_node(struct node *node) | |||
301 | 302 | ||
302 | extern int kswapd_run(int nid); | 303 | extern int kswapd_run(int nid); |
303 | extern void kswapd_stop(int nid); | 304 | extern void kswapd_stop(int nid); |
304 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 305 | #ifdef CONFIG_MEMCG |
305 | extern int mem_cgroup_swappiness(struct mem_cgroup *mem); | 306 | extern int mem_cgroup_swappiness(struct mem_cgroup *mem); |
306 | #else | 307 | #else |
307 | static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) | 308 | static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) |
@@ -309,7 +310,7 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) | |||
309 | return vm_swappiness; | 310 | return vm_swappiness; |
310 | } | 311 | } |
311 | #endif | 312 | #endif |
312 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 313 | #ifdef CONFIG_MEMCG_SWAP |
313 | extern void mem_cgroup_uncharge_swap(swp_entry_t ent); | 314 | extern void mem_cgroup_uncharge_swap(swp_entry_t ent); |
314 | #else | 315 | #else |
315 | static inline void mem_cgroup_uncharge_swap(swp_entry_t ent) | 316 | static inline void mem_cgroup_uncharge_swap(swp_entry_t ent) |
@@ -320,8 +321,14 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t ent) | |||
320 | /* linux/mm/page_io.c */ | 321 | /* linux/mm/page_io.c */ |
321 | extern int swap_readpage(struct page *); | 322 | extern int swap_readpage(struct page *); |
322 | extern int swap_writepage(struct page *page, struct writeback_control *wbc); | 323 | extern int swap_writepage(struct page *page, struct writeback_control *wbc); |
324 | extern int swap_set_page_dirty(struct page *page); | ||
323 | extern void end_swap_bio_read(struct bio *bio, int err); | 325 | extern void end_swap_bio_read(struct bio *bio, int err); |
324 | 326 | ||
327 | int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, | ||
328 | unsigned long nr_pages, sector_t start_block); | ||
329 | int generic_swapfile_activate(struct swap_info_struct *, struct file *, | ||
330 | sector_t *); | ||
331 | |||
325 | /* linux/mm/swap_state.c */ | 332 | /* linux/mm/swap_state.c */ |
326 | extern struct address_space swapper_space; | 333 | extern struct address_space swapper_space; |
327 | #define total_swapcache_pages swapper_space.nrpages | 334 | #define total_swapcache_pages swapper_space.nrpages |
@@ -356,11 +363,12 @@ extern unsigned int count_swap_pages(int, int); | |||
356 | extern sector_t map_swap_page(struct page *, struct block_device **); | 363 | extern sector_t map_swap_page(struct page *, struct block_device **); |
357 | extern sector_t swapdev_block(int, pgoff_t); | 364 | extern sector_t swapdev_block(int, pgoff_t); |
358 | extern int page_swapcount(struct page *); | 365 | extern int page_swapcount(struct page *); |
366 | extern struct swap_info_struct *page_swap_info(struct page *); | ||
359 | extern int reuse_swap_page(struct page *); | 367 | extern int reuse_swap_page(struct page *); |
360 | extern int try_to_free_swap(struct page *); | 368 | extern int try_to_free_swap(struct page *); |
361 | struct backing_dev_info; | 369 | struct backing_dev_info; |
362 | 370 | ||
363 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 371 | #ifdef CONFIG_MEMCG |
364 | extern void | 372 | extern void |
365 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); | 373 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); |
366 | #else | 374 | #else |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index cfc8d908892e..4b94a61955df 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
@@ -151,7 +151,7 @@ enum { | |||
151 | }; | 151 | }; |
152 | #define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1) | 152 | #define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1) |
153 | 153 | ||
154 | struct thermal_zone_device *thermal_zone_device_register(char *, int, int, | 154 | struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, |
155 | void *, const struct thermal_zone_device_ops *, int tc1, | 155 | void *, const struct thermal_zone_device_ops *, int tc1, |
156 | int tc2, int passive_freq, int polling_freq); | 156 | int tc2, int passive_freq, int polling_freq); |
157 | void thermal_zone_device_unregister(struct thermal_zone_device *); | 157 | void thermal_zone_device_unregister(struct thermal_zone_device *); |
diff --git a/include/linux/vfio.h b/include/linux/vfio.h new file mode 100644 index 000000000000..0a4f180a11d8 --- /dev/null +++ b/include/linux/vfio.h | |||
@@ -0,0 +1,445 @@ | |||
1 | /* | ||
2 | * VFIO API definition | ||
3 | * | ||
4 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. | ||
5 | * Author: Alex Williamson <alex.williamson@redhat.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #ifndef VFIO_H | ||
12 | #define VFIO_H | ||
13 | |||
14 | #include <linux/types.h> | ||
15 | #include <linux/ioctl.h> | ||
16 | |||
17 | #define VFIO_API_VERSION 0 | ||
18 | |||
19 | #ifdef __KERNEL__ /* Internal VFIO-core/bus driver API */ | ||
20 | |||
21 | #include <linux/iommu.h> | ||
22 | #include <linux/mm.h> | ||
23 | |||
24 | /** | ||
25 | * struct vfio_device_ops - VFIO bus driver device callbacks | ||
26 | * | ||
27 | * @open: Called when userspace creates new file descriptor for device | ||
28 | * @release: Called when userspace releases file descriptor for device | ||
29 | * @read: Perform read(2) on device file descriptor | ||
30 | * @write: Perform write(2) on device file descriptor | ||
31 | * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_* | ||
32 | * operations documented below | ||
33 | * @mmap: Perform mmap(2) on a region of the device file descriptor | ||
34 | */ | ||
35 | struct vfio_device_ops { | ||
36 | char *name; | ||
37 | int (*open)(void *device_data); | ||
38 | void (*release)(void *device_data); | ||
39 | ssize_t (*read)(void *device_data, char __user *buf, | ||
40 | size_t count, loff_t *ppos); | ||
41 | ssize_t (*write)(void *device_data, const char __user *buf, | ||
42 | size_t count, loff_t *size); | ||
43 | long (*ioctl)(void *device_data, unsigned int cmd, | ||
44 | unsigned long arg); | ||
45 | int (*mmap)(void *device_data, struct vm_area_struct *vma); | ||
46 | }; | ||
47 | |||
48 | extern int vfio_add_group_dev(struct device *dev, | ||
49 | const struct vfio_device_ops *ops, | ||
50 | void *device_data); | ||
51 | |||
52 | extern void *vfio_del_group_dev(struct device *dev); | ||
53 | |||
54 | /** | ||
55 | * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks | ||
56 | */ | ||
57 | struct vfio_iommu_driver_ops { | ||
58 | char *name; | ||
59 | struct module *owner; | ||
60 | void *(*open)(unsigned long arg); | ||
61 | void (*release)(void *iommu_data); | ||
62 | ssize_t (*read)(void *iommu_data, char __user *buf, | ||
63 | size_t count, loff_t *ppos); | ||
64 | ssize_t (*write)(void *iommu_data, const char __user *buf, | ||
65 | size_t count, loff_t *size); | ||
66 | long (*ioctl)(void *iommu_data, unsigned int cmd, | ||
67 | unsigned long arg); | ||
68 | int (*mmap)(void *iommu_data, struct vm_area_struct *vma); | ||
69 | int (*attach_group)(void *iommu_data, | ||
70 | struct iommu_group *group); | ||
71 | void (*detach_group)(void *iommu_data, | ||
72 | struct iommu_group *group); | ||
73 | |||
74 | }; | ||
75 | |||
76 | extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); | ||
77 | |||
78 | extern void vfio_unregister_iommu_driver( | ||
79 | const struct vfio_iommu_driver_ops *ops); | ||
80 | |||
81 | /** | ||
82 | * offsetofend(TYPE, MEMBER) | ||
83 | * | ||
84 | * @TYPE: The type of the structure | ||
85 | * @MEMBER: The member within the structure to get the end offset of | ||
86 | * | ||
87 | * Simple helper macro for dealing with variable sized structures passed | ||
88 | * from user space. This allows us to easily determine if the provided | ||
89 | * structure is sized to include various fields. | ||
90 | */ | ||
91 | #define offsetofend(TYPE, MEMBER) ({ \ | ||
92 | TYPE tmp; \ | ||
93 | offsetof(TYPE, MEMBER) + sizeof(tmp.MEMBER); }) \ | ||
94 | |||
95 | #endif /* __KERNEL__ */ | ||
96 | |||
97 | /* Kernel & User level defines for VFIO IOCTLs. */ | ||
98 | |||
99 | /* Extensions */ | ||
100 | |||
101 | #define VFIO_TYPE1_IOMMU 1 | ||
102 | |||
103 | /* | ||
104 | * The IOCTL interface is designed for extensibility by embedding the | ||
105 | * structure length (argsz) and flags into structures passed between | ||
106 | * kernel and userspace. We therefore use the _IO() macro for these | ||
107 | * defines to avoid implicitly embedding a size into the ioctl request. | ||
108 | * As structure fields are added, argsz will increase to match and flag | ||
109 | * bits will be defined to indicate additional fields with valid data. | ||
110 | * It's *always* the caller's responsibility to indicate the size of | ||
111 | * the structure passed by setting argsz appropriately. | ||
112 | */ | ||
113 | |||
114 | #define VFIO_TYPE (';') | ||
115 | #define VFIO_BASE 100 | ||
116 | |||
117 | /* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */ | ||
118 | |||
119 | /** | ||
120 | * VFIO_GET_API_VERSION - _IO(VFIO_TYPE, VFIO_BASE + 0) | ||
121 | * | ||
122 | * Report the version of the VFIO API. This allows us to bump the entire | ||
123 | * API version should we later need to add or change features in incompatible | ||
124 | * ways. | ||
125 | * Return: VFIO_API_VERSION | ||
126 | * Availability: Always | ||
127 | */ | ||
128 | #define VFIO_GET_API_VERSION _IO(VFIO_TYPE, VFIO_BASE + 0) | ||
129 | |||
130 | /** | ||
131 | * VFIO_CHECK_EXTENSION - _IOW(VFIO_TYPE, VFIO_BASE + 1, __u32) | ||
132 | * | ||
133 | * Check whether an extension is supported. | ||
134 | * Return: 0 if not supported, 1 (or some other positive integer) if supported. | ||
135 | * Availability: Always | ||
136 | */ | ||
137 | #define VFIO_CHECK_EXTENSION _IO(VFIO_TYPE, VFIO_BASE + 1) | ||
138 | |||
139 | /** | ||
140 | * VFIO_SET_IOMMU - _IOW(VFIO_TYPE, VFIO_BASE + 2, __s32) | ||
141 | * | ||
142 | * Set the iommu to the given type. The type must be supported by an | ||
143 | * iommu driver as verified by calling CHECK_EXTENSION using the same | ||
144 | * type. A group must be set to this file descriptor before this | ||
145 | * ioctl is available. The IOMMU interfaces enabled by this call are | ||
146 | * specific to the value set. | ||
147 | * Return: 0 on success, -errno on failure | ||
148 | * Availability: When VFIO group attached | ||
149 | */ | ||
150 | #define VFIO_SET_IOMMU _IO(VFIO_TYPE, VFIO_BASE + 2) | ||
151 | |||
152 | /* -------- IOCTLs for GROUP file descriptors (/dev/vfio/$GROUP) -------- */ | ||
153 | |||
154 | /** | ||
155 | * VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3, | ||
156 | * struct vfio_group_status) | ||
157 | * | ||
158 | * Retrieve information about the group. Fills in provided | ||
159 | * struct vfio_group_info. Caller sets argsz. | ||
160 | * Return: 0 on succes, -errno on failure. | ||
161 | * Availability: Always | ||
162 | */ | ||
163 | struct vfio_group_status { | ||
164 | __u32 argsz; | ||
165 | __u32 flags; | ||
166 | #define VFIO_GROUP_FLAGS_VIABLE (1 << 0) | ||
167 | #define VFIO_GROUP_FLAGS_CONTAINER_SET (1 << 1) | ||
168 | }; | ||
169 | #define VFIO_GROUP_GET_STATUS _IO(VFIO_TYPE, VFIO_BASE + 3) | ||
170 | |||
171 | /** | ||
172 | * VFIO_GROUP_SET_CONTAINER - _IOW(VFIO_TYPE, VFIO_BASE + 4, __s32) | ||
173 | * | ||
174 | * Set the container for the VFIO group to the open VFIO file | ||
175 | * descriptor provided. Groups may only belong to a single | ||
176 | * container. Containers may, at their discretion, support multiple | ||
177 | * groups. Only when a container is set are all of the interfaces | ||
178 | * of the VFIO file descriptor and the VFIO group file descriptor | ||
179 | * available to the user. | ||
180 | * Return: 0 on success, -errno on failure. | ||
181 | * Availability: Always | ||
182 | */ | ||
183 | #define VFIO_GROUP_SET_CONTAINER _IO(VFIO_TYPE, VFIO_BASE + 4) | ||
184 | |||
185 | /** | ||
186 | * VFIO_GROUP_UNSET_CONTAINER - _IO(VFIO_TYPE, VFIO_BASE + 5) | ||
187 | * | ||
188 | * Remove the group from the attached container. This is the | ||
189 | * opposite of the SET_CONTAINER call and returns the group to | ||
190 | * an initial state. All device file descriptors must be released | ||
191 | * prior to calling this interface. When removing the last group | ||
192 | * from a container, the IOMMU will be disabled and all state lost, | ||
193 | * effectively also returning the VFIO file descriptor to an initial | ||
194 | * state. | ||
195 | * Return: 0 on success, -errno on failure. | ||
196 | * Availability: When attached to container | ||
197 | */ | ||
198 | #define VFIO_GROUP_UNSET_CONTAINER _IO(VFIO_TYPE, VFIO_BASE + 5) | ||
199 | |||
200 | /** | ||
201 | * VFIO_GROUP_GET_DEVICE_FD - _IOW(VFIO_TYPE, VFIO_BASE + 6, char) | ||
202 | * | ||
203 | * Return a new file descriptor for the device object described by | ||
204 | * the provided string. The string should match a device listed in | ||
205 | * the devices subdirectory of the IOMMU group sysfs entry. The | ||
206 | * group containing the device must already be added to this context. | ||
207 | * Return: new file descriptor on success, -errno on failure. | ||
208 | * Availability: When attached to container | ||
209 | */ | ||
210 | #define VFIO_GROUP_GET_DEVICE_FD _IO(VFIO_TYPE, VFIO_BASE + 6) | ||
211 | |||
212 | /* --------------- IOCTLs for DEVICE file descriptors --------------- */ | ||
213 | |||
214 | /** | ||
215 | * VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7, | ||
216 | * struct vfio_device_info) | ||
217 | * | ||
218 | * Retrieve information about the device. Fills in provided | ||
219 | * struct vfio_device_info. Caller sets argsz. | ||
220 | * Return: 0 on success, -errno on failure. | ||
221 | */ | ||
222 | struct vfio_device_info { | ||
223 | __u32 argsz; | ||
224 | __u32 flags; | ||
225 | #define VFIO_DEVICE_FLAGS_RESET (1 << 0) /* Device supports reset */ | ||
226 | #define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */ | ||
227 | __u32 num_regions; /* Max region index + 1 */ | ||
228 | __u32 num_irqs; /* Max IRQ index + 1 */ | ||
229 | }; | ||
230 | #define VFIO_DEVICE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 7) | ||
231 | |||
232 | /** | ||
233 | * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8, | ||
234 | * struct vfio_region_info) | ||
235 | * | ||
236 | * Retrieve information about a device region. Caller provides | ||
237 | * struct vfio_region_info with index value set. Caller sets argsz. | ||
238 | * Implementation of region mapping is bus driver specific. This is | ||
239 | * intended to describe MMIO, I/O port, as well as bus specific | ||
240 | * regions (ex. PCI config space). Zero sized regions may be used | ||
241 | * to describe unimplemented regions (ex. unimplemented PCI BARs). | ||
242 | * Return: 0 on success, -errno on failure. | ||
243 | */ | ||
244 | struct vfio_region_info { | ||
245 | __u32 argsz; | ||
246 | __u32 flags; | ||
247 | #define VFIO_REGION_INFO_FLAG_READ (1 << 0) /* Region supports read */ | ||
248 | #define VFIO_REGION_INFO_FLAG_WRITE (1 << 1) /* Region supports write */ | ||
249 | #define VFIO_REGION_INFO_FLAG_MMAP (1 << 2) /* Region supports mmap */ | ||
250 | __u32 index; /* Region index */ | ||
251 | __u32 resv; /* Reserved for alignment */ | ||
252 | __u64 size; /* Region size (bytes) */ | ||
253 | __u64 offset; /* Region offset from start of device fd */ | ||
254 | }; | ||
255 | #define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8) | ||
256 | |||
257 | /** | ||
258 | * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9, | ||
259 | * struct vfio_irq_info) | ||
260 | * | ||
261 | * Retrieve information about a device IRQ. Caller provides | ||
262 | * struct vfio_irq_info with index value set. Caller sets argsz. | ||
263 | * Implementation of IRQ mapping is bus driver specific. Indexes | ||
264 | * using multiple IRQs are primarily intended to support MSI-like | ||
265 | * interrupt blocks. Zero count irq blocks may be used to describe | ||
266 | * unimplemented interrupt types. | ||
267 | * | ||
268 | * The EVENTFD flag indicates the interrupt index supports eventfd based | ||
269 | * signaling. | ||
270 | * | ||
271 | * The MASKABLE flags indicates the index supports MASK and UNMASK | ||
272 | * actions described below. | ||
273 | * | ||
274 | * AUTOMASKED indicates that after signaling, the interrupt line is | ||
275 | * automatically masked by VFIO and the user needs to unmask the line | ||
276 | * to receive new interrupts. This is primarily intended to distinguish | ||
277 | * level triggered interrupts. | ||
278 | * | ||
279 | * The NORESIZE flag indicates that the interrupt lines within the index | ||
280 | * are setup as a set and new subindexes cannot be enabled without first | ||
281 | * disabling the entire index. This is used for interrupts like PCI MSI | ||
282 | * and MSI-X where the driver may only use a subset of the available | ||
283 | * indexes, but VFIO needs to enable a specific number of vectors | ||
284 | * upfront. In the case of MSI-X, where the user can enable MSI-X and | ||
285 | * then add and unmask vectors, it's up to userspace to make the decision | ||
286 | * whether to allocate the maximum supported number of vectors or tear | ||
287 | * down setup and incrementally increase the vectors as each is enabled. | ||
288 | */ | ||
289 | struct vfio_irq_info { | ||
290 | __u32 argsz; | ||
291 | __u32 flags; | ||
292 | #define VFIO_IRQ_INFO_EVENTFD (1 << 0) | ||
293 | #define VFIO_IRQ_INFO_MASKABLE (1 << 1) | ||
294 | #define VFIO_IRQ_INFO_AUTOMASKED (1 << 2) | ||
295 | #define VFIO_IRQ_INFO_NORESIZE (1 << 3) | ||
296 | __u32 index; /* IRQ index */ | ||
297 | __u32 count; /* Number of IRQs within this index */ | ||
298 | }; | ||
299 | #define VFIO_DEVICE_GET_IRQ_INFO _IO(VFIO_TYPE, VFIO_BASE + 9) | ||
300 | |||
301 | /** | ||
302 | * VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set) | ||
303 | * | ||
304 | * Set signaling, masking, and unmasking of interrupts. Caller provides | ||
305 | * struct vfio_irq_set with all fields set. 'start' and 'count' indicate | ||
306 | * the range of subindexes being specified. | ||
307 | * | ||
308 | * The DATA flags specify the type of data provided. If DATA_NONE, the | ||
309 | * operation performs the specified action immediately on the specified | ||
310 | * interrupt(s). For example, to unmask AUTOMASKED interrupt [0,0]: | ||
311 | * flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1. | ||
312 | * | ||
313 | * DATA_BOOL allows sparse support for the same on arrays of interrupts. | ||
314 | * For example, to mask interrupts [0,1] and [0,3] (but not [0,2]): | ||
315 | * flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3, | ||
316 | * data = {1,0,1} | ||
317 | * | ||
318 | * DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd. | ||
319 | * A value of -1 can be used to either de-assign interrupts if already | ||
320 | * assigned or skip un-assigned interrupts. For example, to set an eventfd | ||
321 | * to be trigger for interrupts [0,0] and [0,2]: | ||
322 | * flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3, | ||
323 | * data = {fd1, -1, fd2} | ||
324 | * If index [0,1] is previously set, two count = 1 ioctls calls would be | ||
325 | * required to set [0,0] and [0,2] without changing [0,1]. | ||
326 | * | ||
327 | * Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used | ||
328 | * with ACTION_TRIGGER to perform kernel level interrupt loopback testing | ||
329 | * from userspace (ie. simulate hardware triggering). | ||
330 | * | ||
331 | * Setting of an event triggering mechanism to userspace for ACTION_TRIGGER | ||
332 | * enables the interrupt index for the device. Individual subindex interrupts | ||
333 | * can be disabled using the -1 value for DATA_EVENTFD or the index can be | ||
334 | * disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0. | ||
335 | * | ||
336 | * Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while | ||
337 | * ACTION_TRIGGER specifies kernel->user signaling. | ||
338 | */ | ||
339 | struct vfio_irq_set { | ||
340 | __u32 argsz; | ||
341 | __u32 flags; | ||
342 | #define VFIO_IRQ_SET_DATA_NONE (1 << 0) /* Data not present */ | ||
343 | #define VFIO_IRQ_SET_DATA_BOOL (1 << 1) /* Data is bool (u8) */ | ||
344 | #define VFIO_IRQ_SET_DATA_EVENTFD (1 << 2) /* Data is eventfd (s32) */ | ||
345 | #define VFIO_IRQ_SET_ACTION_MASK (1 << 3) /* Mask interrupt */ | ||
346 | #define VFIO_IRQ_SET_ACTION_UNMASK (1 << 4) /* Unmask interrupt */ | ||
347 | #define VFIO_IRQ_SET_ACTION_TRIGGER (1 << 5) /* Trigger interrupt */ | ||
348 | __u32 index; | ||
349 | __u32 start; | ||
350 | __u32 count; | ||
351 | __u8 data[]; | ||
352 | }; | ||
353 | #define VFIO_DEVICE_SET_IRQS _IO(VFIO_TYPE, VFIO_BASE + 10) | ||
354 | |||
355 | #define VFIO_IRQ_SET_DATA_TYPE_MASK (VFIO_IRQ_SET_DATA_NONE | \ | ||
356 | VFIO_IRQ_SET_DATA_BOOL | \ | ||
357 | VFIO_IRQ_SET_DATA_EVENTFD) | ||
358 | #define VFIO_IRQ_SET_ACTION_TYPE_MASK (VFIO_IRQ_SET_ACTION_MASK | \ | ||
359 | VFIO_IRQ_SET_ACTION_UNMASK | \ | ||
360 | VFIO_IRQ_SET_ACTION_TRIGGER) | ||
361 | /** | ||
362 | * VFIO_DEVICE_RESET - _IO(VFIO_TYPE, VFIO_BASE + 11) | ||
363 | * | ||
364 | * Reset a device. | ||
365 | */ | ||
366 | #define VFIO_DEVICE_RESET _IO(VFIO_TYPE, VFIO_BASE + 11) | ||
367 | |||
368 | /* | ||
369 | * The VFIO-PCI bus driver makes use of the following fixed region and | ||
370 | * IRQ index mapping. Unimplemented regions return a size of zero. | ||
371 | * Unimplemented IRQ types return a count of zero. | ||
372 | */ | ||
373 | |||
374 | enum { | ||
375 | VFIO_PCI_BAR0_REGION_INDEX, | ||
376 | VFIO_PCI_BAR1_REGION_INDEX, | ||
377 | VFIO_PCI_BAR2_REGION_INDEX, | ||
378 | VFIO_PCI_BAR3_REGION_INDEX, | ||
379 | VFIO_PCI_BAR4_REGION_INDEX, | ||
380 | VFIO_PCI_BAR5_REGION_INDEX, | ||
381 | VFIO_PCI_ROM_REGION_INDEX, | ||
382 | VFIO_PCI_CONFIG_REGION_INDEX, | ||
383 | VFIO_PCI_NUM_REGIONS | ||
384 | }; | ||
385 | |||
386 | enum { | ||
387 | VFIO_PCI_INTX_IRQ_INDEX, | ||
388 | VFIO_PCI_MSI_IRQ_INDEX, | ||
389 | VFIO_PCI_MSIX_IRQ_INDEX, | ||
390 | VFIO_PCI_NUM_IRQS | ||
391 | }; | ||
392 | |||
393 | /* -------- API for Type1 VFIO IOMMU -------- */ | ||
394 | |||
395 | /** | ||
396 | * VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info) | ||
397 | * | ||
398 | * Retrieve information about the IOMMU object. Fills in provided | ||
399 | * struct vfio_iommu_info. Caller sets argsz. | ||
400 | * | ||
401 | * XXX Should we do these by CHECK_EXTENSION too? | ||
402 | */ | ||
403 | struct vfio_iommu_type1_info { | ||
404 | __u32 argsz; | ||
405 | __u32 flags; | ||
406 | #define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */ | ||
407 | __u64 iova_pgsizes; /* Bitmap of supported page sizes */ | ||
408 | }; | ||
409 | |||
410 | #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) | ||
411 | |||
412 | /** | ||
413 | * VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map) | ||
414 | * | ||
415 | * Map process virtual addresses to IO virtual addresses using the | ||
416 | * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required. | ||
417 | */ | ||
418 | struct vfio_iommu_type1_dma_map { | ||
419 | __u32 argsz; | ||
420 | __u32 flags; | ||
421 | #define VFIO_DMA_MAP_FLAG_READ (1 << 0) /* readable from device */ | ||
422 | #define VFIO_DMA_MAP_FLAG_WRITE (1 << 1) /* writable from device */ | ||
423 | __u64 vaddr; /* Process virtual address */ | ||
424 | __u64 iova; /* IO virtual address */ | ||
425 | __u64 size; /* Size of mapping (bytes) */ | ||
426 | }; | ||
427 | |||
428 | #define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13) | ||
429 | |||
430 | /** | ||
431 | * VFIO_IOMMU_UNMAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 14, struct vfio_dma_unmap) | ||
432 | * | ||
433 | * Unmap IO virtual addresses using the provided struct vfio_dma_unmap. | ||
434 | * Caller sets argsz. | ||
435 | */ | ||
436 | struct vfio_iommu_type1_dma_unmap { | ||
437 | __u32 argsz; | ||
438 | __u32 flags; | ||
439 | __u64 iova; /* IO virtual address */ | ||
440 | __u64 size; /* Size of mapping (bytes) */ | ||
441 | }; | ||
442 | |||
443 | #define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14) | ||
444 | |||
445 | #endif /* VFIO_H */ | ||
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 5d78910f926c..7a147c8299ab 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h | |||
@@ -274,6 +274,10 @@ struct v4l2_capability { | |||
274 | #define V4L2_CAP_VIDEO_CAPTURE_MPLANE 0x00001000 | 274 | #define V4L2_CAP_VIDEO_CAPTURE_MPLANE 0x00001000 |
275 | /* Is a video output device that supports multiplanar formats */ | 275 | /* Is a video output device that supports multiplanar formats */ |
276 | #define V4L2_CAP_VIDEO_OUTPUT_MPLANE 0x00002000 | 276 | #define V4L2_CAP_VIDEO_OUTPUT_MPLANE 0x00002000 |
277 | /* Is a video mem-to-mem device that supports multiplanar formats */ | ||
278 | #define V4L2_CAP_VIDEO_M2M_MPLANE 0x00004000 | ||
279 | /* Is a video mem-to-mem device */ | ||
280 | #define V4L2_CAP_VIDEO_M2M 0x00008000 | ||
277 | 281 | ||
278 | #define V4L2_CAP_TUNER 0x00010000 /* has a tuner */ | 282 | #define V4L2_CAP_TUNER 0x00010000 /* has a tuner */ |
279 | #define V4L2_CAP_AUDIO 0x00020000 /* has audio support */ | 283 | #define V4L2_CAP_AUDIO 0x00020000 /* has audio support */ |
@@ -2028,6 +2032,8 @@ struct v4l2_modulator { | |||
2028 | #define V4L2_TUNER_CAP_RDS 0x0080 | 2032 | #define V4L2_TUNER_CAP_RDS 0x0080 |
2029 | #define V4L2_TUNER_CAP_RDS_BLOCK_IO 0x0100 | 2033 | #define V4L2_TUNER_CAP_RDS_BLOCK_IO 0x0100 |
2030 | #define V4L2_TUNER_CAP_RDS_CONTROLS 0x0200 | 2034 | #define V4L2_TUNER_CAP_RDS_CONTROLS 0x0200 |
2035 | #define V4L2_TUNER_CAP_FREQ_BANDS 0x0400 | ||
2036 | #define V4L2_TUNER_CAP_HWSEEK_PROG_LIM 0x0800 | ||
2031 | 2037 | ||
2032 | /* Flags for the 'rxsubchans' field */ | 2038 | /* Flags for the 'rxsubchans' field */ |
2033 | #define V4L2_TUNER_SUB_MONO 0x0001 | 2039 | #define V4L2_TUNER_SUB_MONO 0x0001 |
@@ -2046,19 +2052,36 @@ struct v4l2_modulator { | |||
2046 | #define V4L2_TUNER_MODE_LANG1_LANG2 0x0004 | 2052 | #define V4L2_TUNER_MODE_LANG1_LANG2 0x0004 |
2047 | 2053 | ||
2048 | struct v4l2_frequency { | 2054 | struct v4l2_frequency { |
2049 | __u32 tuner; | 2055 | __u32 tuner; |
2050 | __u32 type; /* enum v4l2_tuner_type */ | 2056 | __u32 type; /* enum v4l2_tuner_type */ |
2051 | __u32 frequency; | 2057 | __u32 frequency; |
2052 | __u32 reserved[8]; | 2058 | __u32 reserved[8]; |
2059 | }; | ||
2060 | |||
2061 | #define V4L2_BAND_MODULATION_VSB (1 << 1) | ||
2062 | #define V4L2_BAND_MODULATION_FM (1 << 2) | ||
2063 | #define V4L2_BAND_MODULATION_AM (1 << 3) | ||
2064 | |||
2065 | struct v4l2_frequency_band { | ||
2066 | __u32 tuner; | ||
2067 | __u32 type; /* enum v4l2_tuner_type */ | ||
2068 | __u32 index; | ||
2069 | __u32 capability; | ||
2070 | __u32 rangelow; | ||
2071 | __u32 rangehigh; | ||
2072 | __u32 modulation; | ||
2073 | __u32 reserved[9]; | ||
2053 | }; | 2074 | }; |
2054 | 2075 | ||
2055 | struct v4l2_hw_freq_seek { | 2076 | struct v4l2_hw_freq_seek { |
2056 | __u32 tuner; | 2077 | __u32 tuner; |
2057 | __u32 type; /* enum v4l2_tuner_type */ | 2078 | __u32 type; /* enum v4l2_tuner_type */ |
2058 | __u32 seek_upward; | 2079 | __u32 seek_upward; |
2059 | __u32 wrap_around; | 2080 | __u32 wrap_around; |
2060 | __u32 spacing; | 2081 | __u32 spacing; |
2061 | __u32 reserved[7]; | 2082 | __u32 rangelow; |
2083 | __u32 rangehigh; | ||
2084 | __u32 reserved[5]; | ||
2062 | }; | 2085 | }; |
2063 | 2086 | ||
2064 | /* | 2087 | /* |
@@ -2626,6 +2649,10 @@ struct v4l2_create_buffers { | |||
2626 | #define VIDIOC_QUERY_DV_TIMINGS _IOR('V', 99, struct v4l2_dv_timings) | 2649 | #define VIDIOC_QUERY_DV_TIMINGS _IOR('V', 99, struct v4l2_dv_timings) |
2627 | #define VIDIOC_DV_TIMINGS_CAP _IOWR('V', 100, struct v4l2_dv_timings_cap) | 2650 | #define VIDIOC_DV_TIMINGS_CAP _IOWR('V', 100, struct v4l2_dv_timings_cap) |
2628 | 2651 | ||
2652 | /* Experimental, this ioctl may change over the next couple of kernel | ||
2653 | versions. */ | ||
2654 | #define VIDIOC_ENUM_FREQ_BANDS _IOWR('V', 101, struct v4l2_frequency_band) | ||
2655 | |||
2629 | /* Reminder: when adding new ioctls please add support for them to | 2656 | /* Reminder: when adding new ioctls please add support for them to |
2630 | drivers/media/video/v4l2-compat-ioctl32.c as well! */ | 2657 | drivers/media/video/v4l2-compat-ioctl32.c as well! */ |
2631 | 2658 | ||
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 06f8e3858251..57f7b1091511 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h | |||
@@ -30,6 +30,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
30 | FOR_ALL_ZONES(PGSTEAL_DIRECT), | 30 | FOR_ALL_ZONES(PGSTEAL_DIRECT), |
31 | FOR_ALL_ZONES(PGSCAN_KSWAPD), | 31 | FOR_ALL_ZONES(PGSCAN_KSWAPD), |
32 | FOR_ALL_ZONES(PGSCAN_DIRECT), | 32 | FOR_ALL_ZONES(PGSCAN_DIRECT), |
33 | PGSCAN_DIRECT_THROTTLE, | ||
33 | #ifdef CONFIG_NUMA | 34 | #ifdef CONFIG_NUMA |
34 | PGSCAN_ZONE_RECLAIM_FAILED, | 35 | PGSCAN_ZONE_RECLAIM_FAILED, |
35 | #endif | 36 | #endif |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 65efb92da996..ad2cfd53dadc 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -179,11 +179,6 @@ extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); | |||
179 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) | 179 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) |
180 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) | 180 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) |
181 | 181 | ||
182 | static inline void zap_zone_vm_stats(struct zone *zone) | ||
183 | { | ||
184 | memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); | ||
185 | } | ||
186 | |||
187 | extern void inc_zone_state(struct zone *, enum zone_stat_item); | 182 | extern void inc_zone_state(struct zone *, enum zone_stat_item); |
188 | 183 | ||
189 | #ifdef CONFIG_SMP | 184 | #ifdef CONFIG_SMP |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 6d0a0fcd80e7..c66fe3332d83 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -189,9 +189,4 @@ void tag_pages_for_writeback(struct address_space *mapping, | |||
189 | 189 | ||
190 | void account_page_redirty(struct page *page); | 190 | void account_page_redirty(struct page *page); |
191 | 191 | ||
192 | /* pdflush.c */ | ||
193 | extern int nr_pdflush_threads; /* Global so it can be exported to sysctl | ||
194 | read-only. */ | ||
195 | |||
196 | |||
197 | #endif /* WRITEBACK_H */ | 192 | #endif /* WRITEBACK_H */ |
diff --git a/include/media/davinci/vpif_types.h b/include/media/davinci/vpif_types.h index bd8217c2577c..d8f6ab1943e4 100644 --- a/include/media/davinci/vpif_types.h +++ b/include/media/davinci/vpif_types.h | |||
@@ -50,6 +50,8 @@ struct vpif_display_config { | |||
50 | const char **output; | 50 | const char **output; |
51 | int output_count; | 51 | int output_count; |
52 | const char *card_name; | 52 | const char *card_name; |
53 | bool ch2_clip_en; | ||
54 | bool ch3_clip_en; | ||
53 | }; | 55 | }; |
54 | 56 | ||
55 | struct vpif_input { | 57 | struct vpif_input { |
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h index 19e93523c2d8..e614c9c15e56 100644 --- a/include/media/v4l2-ioctl.h +++ b/include/media/v4l2-ioctl.h | |||
@@ -230,6 +230,8 @@ struct v4l2_ioctl_ops { | |||
230 | struct v4l2_frequency *a); | 230 | struct v4l2_frequency *a); |
231 | int (*vidioc_s_frequency) (struct file *file, void *fh, | 231 | int (*vidioc_s_frequency) (struct file *file, void *fh, |
232 | struct v4l2_frequency *a); | 232 | struct v4l2_frequency *a); |
233 | int (*vidioc_enum_freq_bands) (struct file *file, void *fh, | ||
234 | struct v4l2_frequency_band *band); | ||
233 | 235 | ||
234 | /* Sliced VBI cap */ | 236 | /* Sliced VBI cap */ |
235 | int (*vidioc_g_sliced_vbi_cap) (struct file *file, void *fh, | 237 | int (*vidioc_g_sliced_vbi_cap) (struct file *file, void *fh, |
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 613cfa401672..83b567fe1941 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h | |||
@@ -249,4 +249,13 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk) | |||
249 | return flags; | 249 | return flags; |
250 | } | 250 | } |
251 | 251 | ||
252 | static inline void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) | ||
253 | { | ||
254 | struct dst_entry *dst = skb_dst(skb); | ||
255 | |||
256 | dst_hold(dst); | ||
257 | sk->sk_rx_dst = dst; | ||
258 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; | ||
259 | } | ||
260 | |||
252 | #endif /* _INET_SOCK_H */ | 261 | #endif /* _INET_SOCK_H */ |
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index e69c3a47153d..926142ed8d7a 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/rcupdate.h> | 21 | #include <linux/rcupdate.h> |
22 | #include <net/fib_rules.h> | 22 | #include <net/fib_rules.h> |
23 | #include <net/inetpeer.h> | 23 | #include <net/inetpeer.h> |
24 | #include <linux/percpu.h> | ||
24 | 25 | ||
25 | struct fib_config { | 26 | struct fib_config { |
26 | u8 fc_dst_len; | 27 | u8 fc_dst_len; |
@@ -54,6 +55,7 @@ struct fib_nh_exception { | |||
54 | u32 fnhe_pmtu; | 55 | u32 fnhe_pmtu; |
55 | __be32 fnhe_gw; | 56 | __be32 fnhe_gw; |
56 | unsigned long fnhe_expires; | 57 | unsigned long fnhe_expires; |
58 | struct rtable __rcu *fnhe_rth; | ||
57 | unsigned long fnhe_stamp; | 59 | unsigned long fnhe_stamp; |
58 | }; | 60 | }; |
59 | 61 | ||
@@ -81,8 +83,8 @@ struct fib_nh { | |||
81 | __be32 nh_gw; | 83 | __be32 nh_gw; |
82 | __be32 nh_saddr; | 84 | __be32 nh_saddr; |
83 | int nh_saddr_genid; | 85 | int nh_saddr_genid; |
84 | struct rtable *nh_rth_output; | 86 | struct rtable __rcu * __percpu *nh_pcpu_rth_output; |
85 | struct rtable *nh_rth_input; | 87 | struct rtable __rcu *nh_rth_input; |
86 | struct fnhe_hash_bucket *nh_exceptions; | 88 | struct fnhe_hash_bucket *nh_exceptions; |
87 | }; | 89 | }; |
88 | 90 | ||
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 0ffb8e31f3cd..1474dd65c66f 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h | |||
@@ -61,8 +61,6 @@ struct netns_ipv4 { | |||
61 | int sysctl_icmp_ratelimit; | 61 | int sysctl_icmp_ratelimit; |
62 | int sysctl_icmp_ratemask; | 62 | int sysctl_icmp_ratemask; |
63 | int sysctl_icmp_errors_use_inbound_ifaddr; | 63 | int sysctl_icmp_errors_use_inbound_ifaddr; |
64 | int sysctl_rt_cache_rebuild_count; | ||
65 | int current_rt_cache_rebuild_count; | ||
66 | 64 | ||
67 | unsigned int sysctl_ping_group_range[2]; | 65 | unsigned int sysctl_ping_group_range[2]; |
68 | long sysctl_tcp_mem[3]; | 66 | long sysctl_tcp_mem[3]; |
diff --git a/include/net/route.h b/include/net/route.h index 8c52bc6f1c90..776a27f1ab78 100644 --- a/include/net/route.h +++ b/include/net/route.h | |||
@@ -57,6 +57,8 @@ struct rtable { | |||
57 | 57 | ||
58 | /* Miscellaneous cached information */ | 58 | /* Miscellaneous cached information */ |
59 | u32 rt_pmtu; | 59 | u32 rt_pmtu; |
60 | |||
61 | struct list_head rt_uncached; | ||
60 | }; | 62 | }; |
61 | 63 | ||
62 | static inline bool rt_is_input_route(const struct rtable *rt) | 64 | static inline bool rt_is_input_route(const struct rtable *rt) |
@@ -107,6 +109,7 @@ extern struct ip_rt_acct __percpu *ip_rt_acct; | |||
107 | struct in_device; | 109 | struct in_device; |
108 | extern int ip_rt_init(void); | 110 | extern int ip_rt_init(void); |
109 | extern void rt_cache_flush(struct net *net, int how); | 111 | extern void rt_cache_flush(struct net *net, int how); |
112 | extern void rt_flush_dev(struct net_device *dev); | ||
110 | extern struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp); | 113 | extern struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp); |
111 | extern struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp, | 114 | extern struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp, |
112 | struct sock *sk); | 115 | struct sock *sk); |
diff --git a/include/net/sock.h b/include/net/sock.h index e067f8c18f88..b3730239bf18 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -621,6 +621,7 @@ enum sock_flags { | |||
621 | SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */ | 621 | SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */ |
622 | SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ | 622 | SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ |
623 | SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ | 623 | SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ |
624 | SOCK_MEMALLOC, /* VM depends on this socket for swapping */ | ||
624 | SOCK_TIMESTAMPING_TX_HARDWARE, /* %SOF_TIMESTAMPING_TX_HARDWARE */ | 625 | SOCK_TIMESTAMPING_TX_HARDWARE, /* %SOF_TIMESTAMPING_TX_HARDWARE */ |
625 | SOCK_TIMESTAMPING_TX_SOFTWARE, /* %SOF_TIMESTAMPING_TX_SOFTWARE */ | 626 | SOCK_TIMESTAMPING_TX_SOFTWARE, /* %SOF_TIMESTAMPING_TX_SOFTWARE */ |
626 | SOCK_TIMESTAMPING_RX_HARDWARE, /* %SOF_TIMESTAMPING_RX_HARDWARE */ | 627 | SOCK_TIMESTAMPING_RX_HARDWARE, /* %SOF_TIMESTAMPING_RX_HARDWARE */ |
@@ -658,6 +659,26 @@ static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) | |||
658 | return test_bit(flag, &sk->sk_flags); | 659 | return test_bit(flag, &sk->sk_flags); |
659 | } | 660 | } |
660 | 661 | ||
662 | #ifdef CONFIG_NET | ||
663 | extern struct static_key memalloc_socks; | ||
664 | static inline int sk_memalloc_socks(void) | ||
665 | { | ||
666 | return static_key_false(&memalloc_socks); | ||
667 | } | ||
668 | #else | ||
669 | |||
670 | static inline int sk_memalloc_socks(void) | ||
671 | { | ||
672 | return 0; | ||
673 | } | ||
674 | |||
675 | #endif | ||
676 | |||
677 | static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask) | ||
678 | { | ||
679 | return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC); | ||
680 | } | ||
681 | |||
661 | static inline void sk_acceptq_removed(struct sock *sk) | 682 | static inline void sk_acceptq_removed(struct sock *sk) |
662 | { | 683 | { |
663 | sk->sk_ack_backlog--; | 684 | sk->sk_ack_backlog--; |
@@ -733,8 +754,13 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s | |||
733 | return 0; | 754 | return 0; |
734 | } | 755 | } |
735 | 756 | ||
757 | extern int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); | ||
758 | |||
736 | static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) | 759 | static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) |
737 | { | 760 | { |
761 | if (sk_memalloc_socks() && skb_pfmemalloc(skb)) | ||
762 | return __sk_backlog_rcv(sk, skb); | ||
763 | |||
738 | return sk->sk_backlog_rcv(sk, skb); | 764 | return sk->sk_backlog_rcv(sk, skb); |
739 | } | 765 | } |
740 | 766 | ||
@@ -798,6 +824,8 @@ extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p); | |||
798 | extern void sk_stream_wait_close(struct sock *sk, long timeo_p); | 824 | extern void sk_stream_wait_close(struct sock *sk, long timeo_p); |
799 | extern int sk_stream_error(struct sock *sk, int flags, int err); | 825 | extern int sk_stream_error(struct sock *sk, int flags, int err); |
800 | extern void sk_stream_kill_queues(struct sock *sk); | 826 | extern void sk_stream_kill_queues(struct sock *sk); |
827 | extern void sk_set_memalloc(struct sock *sk); | ||
828 | extern void sk_clear_memalloc(struct sock *sk); | ||
801 | 829 | ||
802 | extern int sk_wait_data(struct sock *sk, long *timeo); | 830 | extern int sk_wait_data(struct sock *sk, long *timeo); |
803 | 831 | ||
@@ -913,7 +941,7 @@ struct proto { | |||
913 | #ifdef SOCK_REFCNT_DEBUG | 941 | #ifdef SOCK_REFCNT_DEBUG |
914 | atomic_t socks; | 942 | atomic_t socks; |
915 | #endif | 943 | #endif |
916 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM | 944 | #ifdef CONFIG_MEMCG_KMEM |
917 | /* | 945 | /* |
918 | * cgroup specific init/deinit functions. Called once for all | 946 | * cgroup specific init/deinit functions. Called once for all |
919 | * protocols that implement it, from cgroups populate function. | 947 | * protocols that implement it, from cgroups populate function. |
@@ -994,7 +1022,7 @@ inline void sk_refcnt_debug_release(const struct sock *sk) | |||
994 | #define sk_refcnt_debug_release(sk) do { } while (0) | 1022 | #define sk_refcnt_debug_release(sk) do { } while (0) |
995 | #endif /* SOCK_REFCNT_DEBUG */ | 1023 | #endif /* SOCK_REFCNT_DEBUG */ |
996 | 1024 | ||
997 | #if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET) | 1025 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_NET) |
998 | extern struct static_key memcg_socket_limit_enabled; | 1026 | extern struct static_key memcg_socket_limit_enabled; |
999 | static inline struct cg_proto *parent_cg_proto(struct proto *proto, | 1027 | static inline struct cg_proto *parent_cg_proto(struct proto *proto, |
1000 | struct cg_proto *cg_proto) | 1028 | struct cg_proto *cg_proto) |
@@ -1301,12 +1329,14 @@ static inline bool sk_wmem_schedule(struct sock *sk, int size) | |||
1301 | __sk_mem_schedule(sk, size, SK_MEM_SEND); | 1329 | __sk_mem_schedule(sk, size, SK_MEM_SEND); |
1302 | } | 1330 | } |
1303 | 1331 | ||
1304 | static inline bool sk_rmem_schedule(struct sock *sk, int size) | 1332 | static inline bool |
1333 | sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, unsigned int size) | ||
1305 | { | 1334 | { |
1306 | if (!sk_has_account(sk)) | 1335 | if (!sk_has_account(sk)) |
1307 | return true; | 1336 | return true; |
1308 | return size <= sk->sk_forward_alloc || | 1337 | return size<= sk->sk_forward_alloc || |
1309 | __sk_mem_schedule(sk, size, SK_MEM_RECV); | 1338 | __sk_mem_schedule(sk, size, SK_MEM_RECV) || |
1339 | skb_pfmemalloc(skb); | ||
1310 | } | 1340 | } |
1311 | 1341 | ||
1312 | static inline void sk_mem_reclaim(struct sock *sk) | 1342 | static inline void sk_mem_reclaim(struct sock *sk) |
diff --git a/include/sound/es1688.h b/include/sound/es1688.h index 3ec7ecbe2502..f752dd33dfaf 100644 --- a/include/sound/es1688.h +++ b/include/sound/es1688.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #define ES1688_HW_AUTO 0x0000 | 29 | #define ES1688_HW_AUTO 0x0000 |
30 | #define ES1688_HW_688 0x0001 | 30 | #define ES1688_HW_688 0x0001 |
31 | #define ES1688_HW_1688 0x0002 | 31 | #define ES1688_HW_1688 0x0002 |
32 | #define ES1688_HW_UNDEF 0x0003 | ||
32 | 33 | ||
33 | struct snd_es1688 { | 34 | struct snd_es1688 { |
34 | unsigned long port; /* port of ESS chip */ | 35 | unsigned long port; /* port of ESS chip */ |
diff --git a/include/sound/tea575x-tuner.h b/include/sound/tea575x-tuner.h index 0c3c2fb0f939..fe8590cac5c2 100644 --- a/include/sound/tea575x-tuner.h +++ b/include/sound/tea575x-tuner.h | |||
@@ -37,6 +37,10 @@ | |||
37 | struct snd_tea575x; | 37 | struct snd_tea575x; |
38 | 38 | ||
39 | struct snd_tea575x_ops { | 39 | struct snd_tea575x_ops { |
40 | /* Drivers using snd_tea575x must either define read_ and write_val */ | ||
41 | void (*write_val)(struct snd_tea575x *tea, u32 val); | ||
42 | u32 (*read_val)(struct snd_tea575x *tea); | ||
43 | /* Or define the 3 pin functions */ | ||
40 | void (*set_pins)(struct snd_tea575x *tea, u8 pins); | 44 | void (*set_pins)(struct snd_tea575x *tea, u8 pins); |
41 | u8 (*get_pins)(struct snd_tea575x *tea); | 45 | u8 (*get_pins)(struct snd_tea575x *tea); |
42 | void (*set_direction)(struct snd_tea575x *tea, bool output); | 46 | void (*set_direction)(struct snd_tea575x *tea, bool output); |
@@ -49,6 +53,7 @@ struct snd_tea575x { | |||
49 | int radio_nr; /* radio_nr */ | 53 | int radio_nr; /* radio_nr */ |
50 | bool tea5759; /* 5759 chip is present */ | 54 | bool tea5759; /* 5759 chip is present */ |
51 | bool cannot_read_data; /* Device cannot read the data pin */ | 55 | bool cannot_read_data; /* Device cannot read the data pin */ |
56 | bool cannot_mute; /* Device cannot mute */ | ||
52 | bool mute; /* Device is muted? */ | 57 | bool mute; /* Device is muted? */ |
53 | bool stereo; /* receiving stereo */ | 58 | bool stereo; /* receiving stereo */ |
54 | bool tuned; /* tuned to a station */ | 59 | bool tuned; /* tuned to a station */ |
diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h index 9fe3a36646e9..d6fd8e5b14b7 100644 --- a/include/trace/events/gfpflags.h +++ b/include/trace/events/gfpflags.h | |||
@@ -30,6 +30,7 @@ | |||
30 | {(unsigned long)__GFP_COMP, "GFP_COMP"}, \ | 30 | {(unsigned long)__GFP_COMP, "GFP_COMP"}, \ |
31 | {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \ | 31 | {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \ |
32 | {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \ | 32 | {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \ |
33 | {(unsigned long)__GFP_MEMALLOC, "GFP_MEMALLOC"}, \ | ||
33 | {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ | 34 | {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ |
34 | {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ | 35 | {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ |
35 | {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ | 36 | {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ |
diff --git a/include/trace/events/random.h b/include/trace/events/random.h new file mode 100644 index 000000000000..422df19de732 --- /dev/null +++ b/include/trace/events/random.h | |||
@@ -0,0 +1,134 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM random | ||
3 | |||
4 | #if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_RANDOM_H | ||
6 | |||
7 | #include <linux/writeback.h> | ||
8 | #include <linux/tracepoint.h> | ||
9 | |||
10 | DECLARE_EVENT_CLASS(random__mix_pool_bytes, | ||
11 | TP_PROTO(const char *pool_name, int bytes, unsigned long IP), | ||
12 | |||
13 | TP_ARGS(pool_name, bytes, IP), | ||
14 | |||
15 | TP_STRUCT__entry( | ||
16 | __field( const char *, pool_name ) | ||
17 | __field( int, bytes ) | ||
18 | __field(unsigned long, IP ) | ||
19 | ), | ||
20 | |||
21 | TP_fast_assign( | ||
22 | __entry->pool_name = pool_name; | ||
23 | __entry->bytes = bytes; | ||
24 | __entry->IP = IP; | ||
25 | ), | ||
26 | |||
27 | TP_printk("%s pool: bytes %d caller %pF", | ||
28 | __entry->pool_name, __entry->bytes, (void *)__entry->IP) | ||
29 | ); | ||
30 | |||
31 | DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes, | ||
32 | TP_PROTO(const char *pool_name, int bytes, unsigned long IP), | ||
33 | |||
34 | TP_ARGS(pool_name, bytes, IP) | ||
35 | ); | ||
36 | |||
37 | DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock, | ||
38 | TP_PROTO(const char *pool_name, int bytes, unsigned long IP), | ||
39 | |||
40 | TP_ARGS(pool_name, bytes, IP) | ||
41 | ); | ||
42 | |||
43 | TRACE_EVENT(credit_entropy_bits, | ||
44 | TP_PROTO(const char *pool_name, int bits, int entropy_count, | ||
45 | int entropy_total, unsigned long IP), | ||
46 | |||
47 | TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP), | ||
48 | |||
49 | TP_STRUCT__entry( | ||
50 | __field( const char *, pool_name ) | ||
51 | __field( int, bits ) | ||
52 | __field( int, entropy_count ) | ||
53 | __field( int, entropy_total ) | ||
54 | __field(unsigned long, IP ) | ||
55 | ), | ||
56 | |||
57 | TP_fast_assign( | ||
58 | __entry->pool_name = pool_name; | ||
59 | __entry->bits = bits; | ||
60 | __entry->entropy_count = entropy_count; | ||
61 | __entry->entropy_total = entropy_total; | ||
62 | __entry->IP = IP; | ||
63 | ), | ||
64 | |||
65 | TP_printk("%s pool: bits %d entropy_count %d entropy_total %d " | ||
66 | "caller %pF", __entry->pool_name, __entry->bits, | ||
67 | __entry->entropy_count, __entry->entropy_total, | ||
68 | (void *)__entry->IP) | ||
69 | ); | ||
70 | |||
71 | TRACE_EVENT(get_random_bytes, | ||
72 | TP_PROTO(int nbytes, unsigned long IP), | ||
73 | |||
74 | TP_ARGS(nbytes, IP), | ||
75 | |||
76 | TP_STRUCT__entry( | ||
77 | __field( int, nbytes ) | ||
78 | __field(unsigned long, IP ) | ||
79 | ), | ||
80 | |||
81 | TP_fast_assign( | ||
82 | __entry->nbytes = nbytes; | ||
83 | __entry->IP = IP; | ||
84 | ), | ||
85 | |||
86 | TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP) | ||
87 | ); | ||
88 | |||
89 | DECLARE_EVENT_CLASS(random__extract_entropy, | ||
90 | TP_PROTO(const char *pool_name, int nbytes, int entropy_count, | ||
91 | unsigned long IP), | ||
92 | |||
93 | TP_ARGS(pool_name, nbytes, entropy_count, IP), | ||
94 | |||
95 | TP_STRUCT__entry( | ||
96 | __field( const char *, pool_name ) | ||
97 | __field( int, nbytes ) | ||
98 | __field( int, entropy_count ) | ||
99 | __field(unsigned long, IP ) | ||
100 | ), | ||
101 | |||
102 | TP_fast_assign( | ||
103 | __entry->pool_name = pool_name; | ||
104 | __entry->nbytes = nbytes; | ||
105 | __entry->entropy_count = entropy_count; | ||
106 | __entry->IP = IP; | ||
107 | ), | ||
108 | |||
109 | TP_printk("%s pool: nbytes %d entropy_count %d caller %pF", | ||
110 | __entry->pool_name, __entry->nbytes, __entry->entropy_count, | ||
111 | (void *)__entry->IP) | ||
112 | ); | ||
113 | |||
114 | |||
115 | DEFINE_EVENT(random__extract_entropy, extract_entropy, | ||
116 | TP_PROTO(const char *pool_name, int nbytes, int entropy_count, | ||
117 | unsigned long IP), | ||
118 | |||
119 | TP_ARGS(pool_name, nbytes, entropy_count, IP) | ||
120 | ); | ||
121 | |||
122 | DEFINE_EVENT(random__extract_entropy, extract_entropy_user, | ||
123 | TP_PROTO(const char *pool_name, int nbytes, int entropy_count, | ||
124 | unsigned long IP), | ||
125 | |||
126 | TP_ARGS(pool_name, nbytes, entropy_count, IP) | ||
127 | ); | ||
128 | |||
129 | |||
130 | |||
131 | #endif /* _TRACE_RANDOM_H */ | ||
132 | |||
133 | /* This part must be outside protection */ | ||
134 | #include <trace/define_trace.h> | ||
diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h index 89d43b3d4cb9..5a0e4f9efb53 100644 --- a/include/video/da8xx-fb.h +++ b/include/video/da8xx-fb.h | |||
@@ -82,6 +82,9 @@ struct lcd_ctrl_config { | |||
82 | 82 | ||
83 | /* Raster Data Order Select: 1=Most-to-least 0=Least-to-most */ | 83 | /* Raster Data Order Select: 1=Most-to-least 0=Least-to-most */ |
84 | unsigned char raster_order; | 84 | unsigned char raster_order; |
85 | |||
86 | /* DMA FIFO threshold */ | ||
87 | int fifo_th; | ||
85 | }; | 88 | }; |
86 | 89 | ||
87 | struct lcd_sync_arg { | 90 | struct lcd_sync_arg { |
diff --git a/include/video/omapdss.h b/include/video/omapdss.h index c8e59b4a3364..a6267a2d292b 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h | |||
@@ -48,6 +48,10 @@ | |||
48 | #define DISPC_IRQ_FRAMEDONEWB (1 << 23) | 48 | #define DISPC_IRQ_FRAMEDONEWB (1 << 23) |
49 | #define DISPC_IRQ_FRAMEDONETV (1 << 24) | 49 | #define DISPC_IRQ_FRAMEDONETV (1 << 24) |
50 | #define DISPC_IRQ_WBBUFFEROVERFLOW (1 << 25) | 50 | #define DISPC_IRQ_WBBUFFEROVERFLOW (1 << 25) |
51 | #define DISPC_IRQ_FRAMEDONE3 (1 << 26) | ||
52 | #define DISPC_IRQ_VSYNC3 (1 << 27) | ||
53 | #define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 28) | ||
54 | #define DISPC_IRQ_SYNC_LOST3 (1 << 29) | ||
51 | 55 | ||
52 | struct omap_dss_device; | 56 | struct omap_dss_device; |
53 | struct omap_overlay_manager; | 57 | struct omap_overlay_manager; |
@@ -75,6 +79,7 @@ enum omap_channel { | |||
75 | OMAP_DSS_CHANNEL_LCD = 0, | 79 | OMAP_DSS_CHANNEL_LCD = 0, |
76 | OMAP_DSS_CHANNEL_DIGIT = 1, | 80 | OMAP_DSS_CHANNEL_DIGIT = 1, |
77 | OMAP_DSS_CHANNEL_LCD2 = 2, | 81 | OMAP_DSS_CHANNEL_LCD2 = 2, |
82 | OMAP_DSS_CHANNEL_LCD3 = 3, | ||
78 | }; | 83 | }; |
79 | 84 | ||
80 | enum omap_color_mode { | 85 | enum omap_color_mode { |
@@ -99,11 +104,6 @@ enum omap_color_mode { | |||
99 | OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */ | 104 | OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */ |
100 | }; | 105 | }; |
101 | 106 | ||
102 | enum omap_lcd_display_type { | ||
103 | OMAP_DSS_LCD_DISPLAY_STN, | ||
104 | OMAP_DSS_LCD_DISPLAY_TFT, | ||
105 | }; | ||
106 | |||
107 | enum omap_dss_load_mode { | 107 | enum omap_dss_load_mode { |
108 | OMAP_DSS_LOAD_CLUT_AND_FRAME = 0, | 108 | OMAP_DSS_LOAD_CLUT_AND_FRAME = 0, |
109 | OMAP_DSS_LOAD_CLUT_ONLY = 1, | 109 | OMAP_DSS_LOAD_CLUT_ONLY = 1, |
@@ -121,15 +121,15 @@ enum omap_rfbi_te_mode { | |||
121 | OMAP_DSS_RFBI_TE_MODE_2 = 2, | 121 | OMAP_DSS_RFBI_TE_MODE_2 = 2, |
122 | }; | 122 | }; |
123 | 123 | ||
124 | enum omap_panel_config { | 124 | enum omap_dss_signal_level { |
125 | OMAP_DSS_LCD_IVS = 1<<0, | 125 | OMAPDSS_SIG_ACTIVE_HIGH = 0, |
126 | OMAP_DSS_LCD_IHS = 1<<1, | 126 | OMAPDSS_SIG_ACTIVE_LOW = 1, |
127 | OMAP_DSS_LCD_IPC = 1<<2, | 127 | }; |
128 | OMAP_DSS_LCD_IEO = 1<<3, | ||
129 | OMAP_DSS_LCD_RF = 1<<4, | ||
130 | OMAP_DSS_LCD_ONOFF = 1<<5, | ||
131 | 128 | ||
132 | OMAP_DSS_LCD_TFT = 1<<20, | 129 | enum omap_dss_signal_edge { |
130 | OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, | ||
131 | OMAPDSS_DRIVE_SIG_RISING_EDGE, | ||
132 | OMAPDSS_DRIVE_SIG_FALLING_EDGE, | ||
133 | }; | 133 | }; |
134 | 134 | ||
135 | enum omap_dss_venc_type { | 135 | enum omap_dss_venc_type { |
@@ -167,13 +167,6 @@ enum omap_dss_audio_state { | |||
167 | OMAP_DSS_AUDIO_PLAYING, | 167 | OMAP_DSS_AUDIO_PLAYING, |
168 | }; | 168 | }; |
169 | 169 | ||
170 | /* XXX perhaps this should be removed */ | ||
171 | enum omap_dss_overlay_managers { | ||
172 | OMAP_DSS_OVL_MGR_LCD, | ||
173 | OMAP_DSS_OVL_MGR_TV, | ||
174 | OMAP_DSS_OVL_MGR_LCD2, | ||
175 | }; | ||
176 | |||
177 | enum omap_dss_rotation_type { | 170 | enum omap_dss_rotation_type { |
178 | OMAP_DSS_ROT_DMA = 1 << 0, | 171 | OMAP_DSS_ROT_DMA = 1 << 0, |
179 | OMAP_DSS_ROT_VRFB = 1 << 1, | 172 | OMAP_DSS_ROT_VRFB = 1 << 1, |
@@ -268,9 +261,6 @@ struct omap_dss_dsi_videomode_data { | |||
268 | int hfp_blanking_mode; | 261 | int hfp_blanking_mode; |
269 | 262 | ||
270 | /* Video port sync events */ | 263 | /* Video port sync events */ |
271 | int vp_de_pol; | ||
272 | int vp_hsync_pol; | ||
273 | int vp_vsync_pol; | ||
274 | bool vp_vsync_end; | 264 | bool vp_vsync_end; |
275 | bool vp_hsync_end; | 265 | bool vp_hsync_end; |
276 | 266 | ||
@@ -346,6 +336,19 @@ struct omap_video_timings { | |||
346 | u16 vfp; /* Vertical front porch */ | 336 | u16 vfp; /* Vertical front porch */ |
347 | /* Unit: line clocks */ | 337 | /* Unit: line clocks */ |
348 | u16 vbp; /* Vertical back porch */ | 338 | u16 vbp; /* Vertical back porch */ |
339 | |||
340 | /* Vsync logic level */ | ||
341 | enum omap_dss_signal_level vsync_level; | ||
342 | /* Hsync logic level */ | ||
343 | enum omap_dss_signal_level hsync_level; | ||
344 | /* Interlaced or Progressive timings */ | ||
345 | bool interlace; | ||
346 | /* Pixel clock edge to drive LCD data */ | ||
347 | enum omap_dss_signal_edge data_pclk_edge; | ||
348 | /* Data enable logic level */ | ||
349 | enum omap_dss_signal_level de_level; | ||
350 | /* Pixel clock edges to drive HSYNC and VSYNC signals */ | ||
351 | enum omap_dss_signal_edge sync_pclk_edge; | ||
349 | }; | 352 | }; |
350 | 353 | ||
351 | #ifdef CONFIG_OMAP2_DSS_VENC | 354 | #ifdef CONFIG_OMAP2_DSS_VENC |
@@ -559,8 +562,6 @@ struct omap_dss_device { | |||
559 | /* Unit: line clocks */ | 562 | /* Unit: line clocks */ |
560 | int acb; /* ac-bias pin frequency */ | 563 | int acb; /* ac-bias pin frequency */ |
561 | 564 | ||
562 | enum omap_panel_config config; | ||
563 | |||
564 | enum omap_dss_dsi_pixel_format dsi_pix_fmt; | 565 | enum omap_dss_dsi_pixel_format dsi_pix_fmt; |
565 | enum omap_dss_dsi_mode dsi_mode; | 566 | enum omap_dss_dsi_mode dsi_mode; |
566 | struct omap_dss_dsi_videomode_data dsi_vm_data; | 567 | struct omap_dss_dsi_videomode_data dsi_vm_data; |
diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h index 7571b27a0ba1..ff43ffc1aab2 100644 --- a/include/video/sh_mobile_lcdc.h +++ b/include/video/sh_mobile_lcdc.h | |||
@@ -166,6 +166,12 @@ struct sh_mobile_lcdc_bl_info { | |||
166 | int (*get_brightness)(void); | 166 | int (*get_brightness)(void); |
167 | }; | 167 | }; |
168 | 168 | ||
169 | struct sh_mobile_lcdc_overlay_cfg { | ||
170 | int fourcc; | ||
171 | unsigned int max_xres; | ||
172 | unsigned int max_yres; | ||
173 | }; | ||
174 | |||
169 | struct sh_mobile_lcdc_chan_cfg { | 175 | struct sh_mobile_lcdc_chan_cfg { |
170 | int chan; | 176 | int chan; |
171 | int fourcc; | 177 | int fourcc; |
@@ -186,6 +192,7 @@ struct sh_mobile_lcdc_chan_cfg { | |||
186 | struct sh_mobile_lcdc_info { | 192 | struct sh_mobile_lcdc_info { |
187 | int clock_source; | 193 | int clock_source; |
188 | struct sh_mobile_lcdc_chan_cfg ch[2]; | 194 | struct sh_mobile_lcdc_chan_cfg ch[2]; |
195 | struct sh_mobile_lcdc_overlay_cfg overlays[4]; | ||
189 | struct sh_mobile_meram_info *meram_dev; | 196 | struct sh_mobile_meram_info *meram_dev; |
190 | }; | 197 | }; |
191 | 198 | ||
diff --git a/include/video/sh_mobile_meram.h b/include/video/sh_mobile_meram.h index 29b2fd3b147e..062e6e7f955c 100644 --- a/include/video/sh_mobile_meram.h +++ b/include/video/sh_mobile_meram.h | |||
@@ -15,7 +15,6 @@ enum { | |||
15 | 15 | ||
16 | 16 | ||
17 | struct sh_mobile_meram_priv; | 17 | struct sh_mobile_meram_priv; |
18 | struct sh_mobile_meram_ops; | ||
19 | 18 | ||
20 | /* | 19 | /* |
21 | * struct sh_mobile_meram_info - MERAM platform data | 20 | * struct sh_mobile_meram_info - MERAM platform data |
@@ -24,7 +23,6 @@ struct sh_mobile_meram_ops; | |||
24 | struct sh_mobile_meram_info { | 23 | struct sh_mobile_meram_info { |
25 | int addr_mode; | 24 | int addr_mode; |
26 | u32 reserved_icbs; | 25 | u32 reserved_icbs; |
27 | struct sh_mobile_meram_ops *ops; | ||
28 | struct sh_mobile_meram_priv *priv; | 26 | struct sh_mobile_meram_priv *priv; |
29 | struct platform_device *pdev; | 27 | struct platform_device *pdev; |
30 | }; | 28 | }; |
@@ -38,26 +36,59 @@ struct sh_mobile_meram_cfg { | |||
38 | struct sh_mobile_meram_icb_cfg icb[2]; | 36 | struct sh_mobile_meram_icb_cfg icb[2]; |
39 | }; | 37 | }; |
40 | 38 | ||
41 | struct module; | 39 | #if defined(CONFIG_FB_SH_MOBILE_MERAM) || \ |
42 | struct sh_mobile_meram_ops { | 40 | defined(CONFIG_FB_SH_MOBILE_MERAM_MODULE) |
43 | struct module *module; | 41 | unsigned long sh_mobile_meram_alloc(struct sh_mobile_meram_info *meram_dev, |
44 | /* register usage of meram */ | 42 | size_t size); |
45 | void *(*meram_register)(struct sh_mobile_meram_info *meram_dev, | 43 | void sh_mobile_meram_free(struct sh_mobile_meram_info *meram_dev, |
46 | const struct sh_mobile_meram_cfg *cfg, | 44 | unsigned long mem, size_t size); |
47 | unsigned int xres, unsigned int yres, | 45 | void *sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *dev, |
48 | unsigned int pixelformat, | 46 | const struct sh_mobile_meram_cfg *cfg, |
49 | unsigned int *pitch); | 47 | unsigned int xres, unsigned int yres, |
50 | 48 | unsigned int pixelformat, | |
51 | /* unregister usage of meram */ | 49 | unsigned int *pitch); |
52 | void (*meram_unregister)(struct sh_mobile_meram_info *meram_dev, | 50 | void sh_mobile_meram_cache_free(struct sh_mobile_meram_info *dev, void *data); |
53 | void *data); | 51 | void sh_mobile_meram_cache_update(struct sh_mobile_meram_info *dev, void *data, |
54 | 52 | unsigned long base_addr_y, | |
55 | /* update meram settings */ | 53 | unsigned long base_addr_c, |
56 | void (*meram_update)(struct sh_mobile_meram_info *meram_dev, void *data, | 54 | unsigned long *icb_addr_y, |
55 | unsigned long *icb_addr_c); | ||
56 | #else | ||
57 | static inline unsigned long | ||
58 | sh_mobile_meram_alloc(struct sh_mobile_meram_info *meram_dev, size_t size) | ||
59 | { | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static inline void | ||
64 | sh_mobile_meram_free(struct sh_mobile_meram_info *meram_dev, | ||
65 | unsigned long mem, size_t size) | ||
66 | { | ||
67 | } | ||
68 | |||
69 | static inline void * | ||
70 | sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *dev, | ||
71 | const struct sh_mobile_meram_cfg *cfg, | ||
72 | unsigned int xres, unsigned int yres, | ||
73 | unsigned int pixelformat, | ||
74 | unsigned int *pitch) | ||
75 | { | ||
76 | return ERR_PTR(-ENODEV); | ||
77 | } | ||
78 | |||
79 | static inline void | ||
80 | sh_mobile_meram_cache_free(struct sh_mobile_meram_info *dev, void *data) | ||
81 | { | ||
82 | } | ||
83 | |||
84 | static inline void | ||
85 | sh_mobile_meram_cache_update(struct sh_mobile_meram_info *dev, void *data, | ||
57 | unsigned long base_addr_y, | 86 | unsigned long base_addr_y, |
58 | unsigned long base_addr_c, | 87 | unsigned long base_addr_c, |
59 | unsigned long *icb_addr_y, | 88 | unsigned long *icb_addr_y, |
60 | unsigned long *icb_addr_c); | 89 | unsigned long *icb_addr_c) |
61 | }; | 90 | { |
91 | } | ||
92 | #endif | ||
62 | 93 | ||
63 | #endif /* __VIDEO_SH_MOBILE_MERAM_H__ */ | 94 | #endif /* __VIDEO_SH_MOBILE_MERAM_H__ */ |