diff options
Diffstat (limited to 'include/linux')
52 files changed, 1059 insertions, 333 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index fba8051fb297..6fce2fc2d124 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -131,22 +131,6 @@ extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity); | |||
131 | */ | 131 | */ |
132 | void acpi_unregister_gsi (u32 gsi); | 132 | void acpi_unregister_gsi (u32 gsi); |
133 | 133 | ||
134 | struct acpi_prt_entry { | ||
135 | struct list_head node; | ||
136 | struct acpi_pci_id id; | ||
137 | u8 pin; | ||
138 | struct { | ||
139 | acpi_handle handle; | ||
140 | u32 index; | ||
141 | } link; | ||
142 | u32 irq; | ||
143 | }; | ||
144 | |||
145 | struct acpi_prt_list { | ||
146 | int count; | ||
147 | struct list_head entries; | ||
148 | }; | ||
149 | |||
150 | struct pci_dev; | 134 | struct pci_dev; |
151 | 135 | ||
152 | int acpi_pci_irq_enable (struct pci_dev *dev); | 136 | int acpi_pci_irq_enable (struct pci_dev *dev); |
@@ -270,6 +254,7 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n, | |||
270 | #ifdef CONFIG_PM_SLEEP | 254 | #ifdef CONFIG_PM_SLEEP |
271 | void __init acpi_no_s4_hw_signature(void); | 255 | void __init acpi_no_s4_hw_signature(void); |
272 | void __init acpi_old_suspend_ordering(void); | 256 | void __init acpi_old_suspend_ordering(void); |
257 | void __init acpi_s4_no_nvs(void); | ||
273 | #endif /* CONFIG_PM_SLEEP */ | 258 | #endif /* CONFIG_PM_SLEEP */ |
274 | #else /* CONFIG_ACPI */ | 259 | #else /* CONFIG_ACPI */ |
275 | 260 | ||
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 0f50d4cc4360..45f6297821bd 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h | |||
@@ -59,9 +59,7 @@ enum async_tx_flags { | |||
59 | }; | 59 | }; |
60 | 60 | ||
61 | #ifdef CONFIG_DMA_ENGINE | 61 | #ifdef CONFIG_DMA_ENGINE |
62 | void async_tx_issue_pending_all(void); | 62 | #define async_tx_issue_pending_all dma_issue_pending_all |
63 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | ||
64 | void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx); | ||
65 | #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 63 | #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
66 | #include <asm/async_tx.h> | 64 | #include <asm/async_tx.h> |
67 | #else | 65 | #else |
@@ -77,19 +75,6 @@ static inline void async_tx_issue_pending_all(void) | |||
77 | do { } while (0); | 75 | do { } while (0); |
78 | } | 76 | } |
79 | 77 | ||
80 | static inline enum dma_status | ||
81 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | ||
82 | { | ||
83 | return DMA_SUCCESS; | ||
84 | } | ||
85 | |||
86 | static inline void | ||
87 | async_tx_run_dependencies(struct dma_async_tx_descriptor *tx, | ||
88 | struct dma_chan *host_chan) | ||
89 | { | ||
90 | do { } while (0); | ||
91 | } | ||
92 | |||
93 | static inline struct dma_chan * | 78 | static inline struct dma_chan * |
94 | async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | 79 | async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, |
95 | enum dma_transaction_type tx_type, struct page **dst, int dst_count, | 80 | enum dma_transaction_type tx_type, struct page **dst, int dst_count, |
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h index 2a2213eefd85..2f1f95737acb 100644 --- a/include/linux/atmel-mci.h +++ b/include/linux/atmel-mci.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #define ATMEL_MCI_MAX_NR_SLOTS 2 | 4 | #define ATMEL_MCI_MAX_NR_SLOTS 2 |
5 | 5 | ||
6 | struct dma_slave; | 6 | #include <linux/dw_dmac.h> |
7 | 7 | ||
8 | /** | 8 | /** |
9 | * struct mci_slot_pdata - board-specific per-slot configuration | 9 | * struct mci_slot_pdata - board-specific per-slot configuration |
@@ -28,11 +28,11 @@ struct mci_slot_pdata { | |||
28 | 28 | ||
29 | /** | 29 | /** |
30 | * struct mci_platform_data - board-specific MMC/SDcard configuration | 30 | * struct mci_platform_data - board-specific MMC/SDcard configuration |
31 | * @dma_slave: DMA slave interface to use in data transfers, or NULL. | 31 | * @dma_slave: DMA slave interface to use in data transfers. |
32 | * @slot: Per-slot configuration data. | 32 | * @slot: Per-slot configuration data. |
33 | */ | 33 | */ |
34 | struct mci_platform_data { | 34 | struct mci_platform_data { |
35 | struct dma_slave *dma_slave; | 35 | struct dw_dma_slave dma_slave; |
36 | struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; | 36 | struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; |
37 | }; | 37 | }; |
38 | 38 | ||
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h index d7afa9dd6635..f3b5d4e3a2ac 100644 --- a/include/linux/auxvec.h +++ b/include/linux/auxvec.h | |||
@@ -23,16 +23,16 @@ | |||
23 | #define AT_PLATFORM 15 /* string identifying CPU for optimizations */ | 23 | #define AT_PLATFORM 15 /* string identifying CPU for optimizations */ |
24 | #define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */ | 24 | #define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */ |
25 | #define AT_CLKTCK 17 /* frequency at which times() increments */ | 25 | #define AT_CLKTCK 17 /* frequency at which times() increments */ |
26 | 26 | /* AT_* values 18 through 22 are reserved */ | |
27 | #define AT_SECURE 23 /* secure mode boolean */ | 27 | #define AT_SECURE 23 /* secure mode boolean */ |
28 | |||
29 | #define AT_BASE_PLATFORM 24 /* string identifying real platform, may | 28 | #define AT_BASE_PLATFORM 24 /* string identifying real platform, may |
30 | * differ from AT_PLATFORM. */ | 29 | * differ from AT_PLATFORM. */ |
30 | #define AT_RANDOM 25 /* address of 16 random bytes */ | ||
31 | 31 | ||
32 | #define AT_EXECFN 31 /* filename of program */ | 32 | #define AT_EXECFN 31 /* filename of program */ |
33 | 33 | ||
34 | #ifdef __KERNEL__ | 34 | #ifdef __KERNEL__ |
35 | #define AT_VECTOR_SIZE_BASE 18 /* NEW_AUX_ENT entries in auxiliary table */ | 35 | #define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */ |
36 | /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ | 36 | /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ |
37 | #endif | 37 | #endif |
38 | 38 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7035cec583b6..044467ef7b11 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -690,6 +690,8 @@ struct rq_map_data { | |||
690 | struct page **pages; | 690 | struct page **pages; |
691 | int page_order; | 691 | int page_order; |
692 | int nr_entries; | 692 | int nr_entries; |
693 | unsigned long offset; | ||
694 | int null_mapped; | ||
693 | }; | 695 | }; |
694 | 696 | ||
695 | struct req_iterator { | 697 | struct req_iterator { |
diff --git a/include/linux/can/core.h b/include/linux/can/core.h index f50785ad4781..25085cbadcfc 100644 --- a/include/linux/can/core.h +++ b/include/linux/can/core.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/skbuff.h> | 19 | #include <linux/skbuff.h> |
20 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
21 | 21 | ||
22 | #define CAN_VERSION "20081130" | 22 | #define CAN_VERSION "20090105" |
23 | 23 | ||
24 | /* increment this number each time you change some user-space interface */ | 24 | /* increment this number each time you change some user-space interface */ |
25 | #define CAN_ABI_VERSION "8" | 25 | #define CAN_ABI_VERSION "8" |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 08b78c09b09a..e267e62827bb 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -52,9 +52,9 @@ struct cgroup_subsys_state { | |||
52 | * hierarchy structure */ | 52 | * hierarchy structure */ |
53 | struct cgroup *cgroup; | 53 | struct cgroup *cgroup; |
54 | 54 | ||
55 | /* State maintained by the cgroup system to allow | 55 | /* State maintained by the cgroup system to allow subsystems |
56 | * subsystems to be "busy". Should be accessed via css_get() | 56 | * to be "busy". Should be accessed via css_get(), |
57 | * and css_put() */ | 57 | * css_tryget() and and css_put(). */ |
58 | 58 | ||
59 | atomic_t refcnt; | 59 | atomic_t refcnt; |
60 | 60 | ||
@@ -64,11 +64,14 @@ struct cgroup_subsys_state { | |||
64 | /* bits in struct cgroup_subsys_state flags field */ | 64 | /* bits in struct cgroup_subsys_state flags field */ |
65 | enum { | 65 | enum { |
66 | CSS_ROOT, /* This CSS is the root of the subsystem */ | 66 | CSS_ROOT, /* This CSS is the root of the subsystem */ |
67 | CSS_REMOVED, /* This CSS is dead */ | ||
67 | }; | 68 | }; |
68 | 69 | ||
69 | /* | 70 | /* |
70 | * Call css_get() to hold a reference on the cgroup; | 71 | * Call css_get() to hold a reference on the css; it can be used |
71 | * | 72 | * for a reference obtained via: |
73 | * - an existing ref-counted reference to the css | ||
74 | * - task->cgroups for a locked task | ||
72 | */ | 75 | */ |
73 | 76 | ||
74 | static inline void css_get(struct cgroup_subsys_state *css) | 77 | static inline void css_get(struct cgroup_subsys_state *css) |
@@ -77,9 +80,32 @@ static inline void css_get(struct cgroup_subsys_state *css) | |||
77 | if (!test_bit(CSS_ROOT, &css->flags)) | 80 | if (!test_bit(CSS_ROOT, &css->flags)) |
78 | atomic_inc(&css->refcnt); | 81 | atomic_inc(&css->refcnt); |
79 | } | 82 | } |
83 | |||
84 | static inline bool css_is_removed(struct cgroup_subsys_state *css) | ||
85 | { | ||
86 | return test_bit(CSS_REMOVED, &css->flags); | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Call css_tryget() to take a reference on a css if your existing | ||
91 | * (known-valid) reference isn't already ref-counted. Returns false if | ||
92 | * the css has been destroyed. | ||
93 | */ | ||
94 | |||
95 | static inline bool css_tryget(struct cgroup_subsys_state *css) | ||
96 | { | ||
97 | if (test_bit(CSS_ROOT, &css->flags)) | ||
98 | return true; | ||
99 | while (!atomic_inc_not_zero(&css->refcnt)) { | ||
100 | if (test_bit(CSS_REMOVED, &css->flags)) | ||
101 | return false; | ||
102 | } | ||
103 | return true; | ||
104 | } | ||
105 | |||
80 | /* | 106 | /* |
81 | * css_put() should be called to release a reference taken by | 107 | * css_put() should be called to release a reference taken by |
82 | * css_get() | 108 | * css_get() or css_tryget() |
83 | */ | 109 | */ |
84 | 110 | ||
85 | extern void __css_put(struct cgroup_subsys_state *css); | 111 | extern void __css_put(struct cgroup_subsys_state *css); |
@@ -116,7 +142,7 @@ struct cgroup { | |||
116 | struct list_head children; /* my children */ | 142 | struct list_head children; /* my children */ |
117 | 143 | ||
118 | struct cgroup *parent; /* my parent */ | 144 | struct cgroup *parent; /* my parent */ |
119 | struct dentry *dentry; /* cgroup fs entry */ | 145 | struct dentry *dentry; /* cgroup fs entry, RCU protected */ |
120 | 146 | ||
121 | /* Private pointers for each registered subsystem */ | 147 | /* Private pointers for each registered subsystem */ |
122 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; | 148 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; |
@@ -145,6 +171,9 @@ struct cgroup { | |||
145 | int pids_use_count; | 171 | int pids_use_count; |
146 | /* Length of the current tasks_pids array */ | 172 | /* Length of the current tasks_pids array */ |
147 | int pids_length; | 173 | int pids_length; |
174 | |||
175 | /* For RCU-protected deletion */ | ||
176 | struct rcu_head rcu_head; | ||
148 | }; | 177 | }; |
149 | 178 | ||
150 | /* A css_set is a structure holding pointers to a set of | 179 | /* A css_set is a structure holding pointers to a set of |
@@ -337,9 +366,23 @@ struct cgroup_subsys { | |||
337 | #define MAX_CGROUP_TYPE_NAMELEN 32 | 366 | #define MAX_CGROUP_TYPE_NAMELEN 32 |
338 | const char *name; | 367 | const char *name; |
339 | 368 | ||
340 | /* Protected by RCU */ | 369 | /* |
341 | struct cgroupfs_root *root; | 370 | * Protects sibling/children links of cgroups in this |
371 | * hierarchy, plus protects which hierarchy (or none) the | ||
372 | * subsystem is a part of (i.e. root/sibling). To avoid | ||
373 | * potential deadlocks, the following operations should not be | ||
374 | * undertaken while holding any hierarchy_mutex: | ||
375 | * | ||
376 | * - allocating memory | ||
377 | * - initiating hotplug events | ||
378 | */ | ||
379 | struct mutex hierarchy_mutex; | ||
342 | 380 | ||
381 | /* | ||
382 | * Link to parent, and list entry in parent's children. | ||
383 | * Protected by this->hierarchy_mutex and cgroup_lock() | ||
384 | */ | ||
385 | struct cgroupfs_root *root; | ||
343 | struct list_head sibling; | 386 | struct list_head sibling; |
344 | }; | 387 | }; |
345 | 388 | ||
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 51ea2bdea0f9..90c6074a36ca 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -20,8 +20,9 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */ | |||
20 | extern int cpuset_init_early(void); | 20 | extern int cpuset_init_early(void); |
21 | extern int cpuset_init(void); | 21 | extern int cpuset_init(void); |
22 | extern void cpuset_init_smp(void); | 22 | extern void cpuset_init_smp(void); |
23 | extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask); | 23 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
24 | extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask); | 24 | extern void cpuset_cpus_allowed_locked(struct task_struct *p, |
25 | struct cpumask *mask); | ||
25 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | 26 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
26 | #define cpuset_current_mems_allowed (current->mems_allowed) | 27 | #define cpuset_current_mems_allowed (current->mems_allowed) |
27 | void cpuset_init_current_mems_allowed(void); | 28 | void cpuset_init_current_mems_allowed(void); |
@@ -86,12 +87,13 @@ static inline int cpuset_init_early(void) { return 0; } | |||
86 | static inline int cpuset_init(void) { return 0; } | 87 | static inline int cpuset_init(void) { return 0; } |
87 | static inline void cpuset_init_smp(void) {} | 88 | static inline void cpuset_init_smp(void) {} |
88 | 89 | ||
89 | static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask) | 90 | static inline void cpuset_cpus_allowed(struct task_struct *p, |
91 | struct cpumask *mask) | ||
90 | { | 92 | { |
91 | *mask = cpu_possible_map; | 93 | *mask = cpu_possible_map; |
92 | } | 94 | } |
93 | static inline void cpuset_cpus_allowed_locked(struct task_struct *p, | 95 | static inline void cpuset_cpus_allowed_locked(struct task_struct *p, |
94 | cpumask_t *mask) | 96 | struct cpumask *mask) |
95 | { | 97 | { |
96 | *mask = cpu_possible_map; | 98 | *mask = cpu_possible_map; |
97 | } | 99 | } |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index adb0b084eb5a..64dea2ab326c 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -29,32 +29,6 @@ | |||
29 | #include <linux/dma-mapping.h> | 29 | #include <linux/dma-mapping.h> |
30 | 30 | ||
31 | /** | 31 | /** |
32 | * enum dma_state - resource PNP/power management state | ||
33 | * @DMA_RESOURCE_SUSPEND: DMA device going into low power state | ||
34 | * @DMA_RESOURCE_RESUME: DMA device returning to full power | ||
35 | * @DMA_RESOURCE_AVAILABLE: DMA device available to the system | ||
36 | * @DMA_RESOURCE_REMOVED: DMA device removed from the system | ||
37 | */ | ||
38 | enum dma_state { | ||
39 | DMA_RESOURCE_SUSPEND, | ||
40 | DMA_RESOURCE_RESUME, | ||
41 | DMA_RESOURCE_AVAILABLE, | ||
42 | DMA_RESOURCE_REMOVED, | ||
43 | }; | ||
44 | |||
45 | /** | ||
46 | * enum dma_state_client - state of the channel in the client | ||
47 | * @DMA_ACK: client would like to use, or was using this channel | ||
48 | * @DMA_DUP: client has already seen this channel, or is not using this channel | ||
49 | * @DMA_NAK: client does not want to see any more channels | ||
50 | */ | ||
51 | enum dma_state_client { | ||
52 | DMA_ACK, | ||
53 | DMA_DUP, | ||
54 | DMA_NAK, | ||
55 | }; | ||
56 | |||
57 | /** | ||
58 | * typedef dma_cookie_t - an opaque DMA cookie | 32 | * typedef dma_cookie_t - an opaque DMA cookie |
59 | * | 33 | * |
60 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code | 34 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code |
@@ -89,23 +63,13 @@ enum dma_transaction_type { | |||
89 | DMA_MEMSET, | 63 | DMA_MEMSET, |
90 | DMA_MEMCPY_CRC32C, | 64 | DMA_MEMCPY_CRC32C, |
91 | DMA_INTERRUPT, | 65 | DMA_INTERRUPT, |
66 | DMA_PRIVATE, | ||
92 | DMA_SLAVE, | 67 | DMA_SLAVE, |
93 | }; | 68 | }; |
94 | 69 | ||
95 | /* last transaction type for creation of the capabilities mask */ | 70 | /* last transaction type for creation of the capabilities mask */ |
96 | #define DMA_TX_TYPE_END (DMA_SLAVE + 1) | 71 | #define DMA_TX_TYPE_END (DMA_SLAVE + 1) |
97 | 72 | ||
98 | /** | ||
99 | * enum dma_slave_width - DMA slave register access width. | ||
100 | * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses | ||
101 | * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses | ||
102 | * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses | ||
103 | */ | ||
104 | enum dma_slave_width { | ||
105 | DMA_SLAVE_WIDTH_8BIT, | ||
106 | DMA_SLAVE_WIDTH_16BIT, | ||
107 | DMA_SLAVE_WIDTH_32BIT, | ||
108 | }; | ||
109 | 73 | ||
110 | /** | 74 | /** |
111 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, | 75 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, |
@@ -132,32 +96,6 @@ enum dma_ctrl_flags { | |||
132 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; | 96 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; |
133 | 97 | ||
134 | /** | 98 | /** |
135 | * struct dma_slave - Information about a DMA slave | ||
136 | * @dev: device acting as DMA slave | ||
137 | * @dma_dev: required DMA master device. If non-NULL, the client can not be | ||
138 | * bound to other masters than this. | ||
139 | * @tx_reg: physical address of data register used for | ||
140 | * memory-to-peripheral transfers | ||
141 | * @rx_reg: physical address of data register used for | ||
142 | * peripheral-to-memory transfers | ||
143 | * @reg_width: peripheral register width | ||
144 | * | ||
145 | * If dma_dev is non-NULL, the client can not be bound to other DMA | ||
146 | * masters than the one corresponding to this device. The DMA master | ||
147 | * driver may use this to determine if there is controller-specific | ||
148 | * data wrapped around this struct. Drivers of platform code that sets | ||
149 | * the dma_dev field must therefore make sure to use an appropriate | ||
150 | * controller-specific dma slave structure wrapping this struct. | ||
151 | */ | ||
152 | struct dma_slave { | ||
153 | struct device *dev; | ||
154 | struct device *dma_dev; | ||
155 | dma_addr_t tx_reg; | ||
156 | dma_addr_t rx_reg; | ||
157 | enum dma_slave_width reg_width; | ||
158 | }; | ||
159 | |||
160 | /** | ||
161 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan | 99 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan |
162 | * @refcount: local_t used for open-coded "bigref" counting | 100 | * @refcount: local_t used for open-coded "bigref" counting |
163 | * @memcpy_count: transaction counter | 101 | * @memcpy_count: transaction counter |
@@ -165,7 +103,6 @@ struct dma_slave { | |||
165 | */ | 103 | */ |
166 | 104 | ||
167 | struct dma_chan_percpu { | 105 | struct dma_chan_percpu { |
168 | local_t refcount; | ||
169 | /* stats */ | 106 | /* stats */ |
170 | unsigned long memcpy_count; | 107 | unsigned long memcpy_count; |
171 | unsigned long bytes_transferred; | 108 | unsigned long bytes_transferred; |
@@ -176,13 +113,14 @@ struct dma_chan_percpu { | |||
176 | * @device: ptr to the dma device who supplies this channel, always !%NULL | 113 | * @device: ptr to the dma device who supplies this channel, always !%NULL |
177 | * @cookie: last cookie value returned to client | 114 | * @cookie: last cookie value returned to client |
178 | * @chan_id: channel ID for sysfs | 115 | * @chan_id: channel ID for sysfs |
179 | * @class_dev: class device for sysfs | 116 | * @dev: class device for sysfs |
180 | * @refcount: kref, used in "bigref" slow-mode | 117 | * @refcount: kref, used in "bigref" slow-mode |
181 | * @slow_ref: indicates that the DMA channel is free | 118 | * @slow_ref: indicates that the DMA channel is free |
182 | * @rcu: the DMA channel's RCU head | 119 | * @rcu: the DMA channel's RCU head |
183 | * @device_node: used to add this to the device chan list | 120 | * @device_node: used to add this to the device chan list |
184 | * @local: per-cpu pointer to a struct dma_chan_percpu | 121 | * @local: per-cpu pointer to a struct dma_chan_percpu |
185 | * @client-count: how many clients are using this channel | 122 | * @client-count: how many clients are using this channel |
123 | * @table_count: number of appearances in the mem-to-mem allocation table | ||
186 | */ | 124 | */ |
187 | struct dma_chan { | 125 | struct dma_chan { |
188 | struct dma_device *device; | 126 | struct dma_device *device; |
@@ -190,73 +128,47 @@ struct dma_chan { | |||
190 | 128 | ||
191 | /* sysfs */ | 129 | /* sysfs */ |
192 | int chan_id; | 130 | int chan_id; |
193 | struct device dev; | 131 | struct dma_chan_dev *dev; |
194 | |||
195 | struct kref refcount; | ||
196 | int slow_ref; | ||
197 | struct rcu_head rcu; | ||
198 | 132 | ||
199 | struct list_head device_node; | 133 | struct list_head device_node; |
200 | struct dma_chan_percpu *local; | 134 | struct dma_chan_percpu *local; |
201 | int client_count; | 135 | int client_count; |
136 | int table_count; | ||
202 | }; | 137 | }; |
203 | 138 | ||
204 | #define to_dma_chan(p) container_of(p, struct dma_chan, dev) | 139 | /** |
205 | 140 | * struct dma_chan_dev - relate sysfs device node to backing channel device | |
206 | void dma_chan_cleanup(struct kref *kref); | 141 | * @chan - driver channel device |
207 | 142 | * @device - sysfs device | |
208 | static inline void dma_chan_get(struct dma_chan *chan) | 143 | * @dev_id - parent dma_device dev_id |
209 | { | 144 | * @idr_ref - reference count to gate release of dma_device dev_id |
210 | if (unlikely(chan->slow_ref)) | 145 | */ |
211 | kref_get(&chan->refcount); | 146 | struct dma_chan_dev { |
212 | else { | 147 | struct dma_chan *chan; |
213 | local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); | 148 | struct device device; |
214 | put_cpu(); | 149 | int dev_id; |
215 | } | 150 | atomic_t *idr_ref; |
216 | } | 151 | }; |
217 | 152 | ||
218 | static inline void dma_chan_put(struct dma_chan *chan) | 153 | static inline const char *dma_chan_name(struct dma_chan *chan) |
219 | { | 154 | { |
220 | if (unlikely(chan->slow_ref)) | 155 | return dev_name(&chan->dev->device); |
221 | kref_put(&chan->refcount, dma_chan_cleanup); | ||
222 | else { | ||
223 | local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); | ||
224 | put_cpu(); | ||
225 | } | ||
226 | } | 156 | } |
227 | 157 | ||
228 | /* | 158 | void dma_chan_cleanup(struct kref *kref); |
229 | * typedef dma_event_callback - function pointer to a DMA event callback | ||
230 | * For each channel added to the system this routine is called for each client. | ||
231 | * If the client would like to use the channel it returns '1' to signal (ack) | ||
232 | * the dmaengine core to take out a reference on the channel and its | ||
233 | * corresponding device. A client must not 'ack' an available channel more | ||
234 | * than once. When a channel is removed all clients are notified. If a client | ||
235 | * is using the channel it must 'ack' the removal. A client must not 'ack' a | ||
236 | * removed channel more than once. | ||
237 | * @client - 'this' pointer for the client context | ||
238 | * @chan - channel to be acted upon | ||
239 | * @state - available or removed | ||
240 | */ | ||
241 | struct dma_client; | ||
242 | typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client, | ||
243 | struct dma_chan *chan, enum dma_state state); | ||
244 | 159 | ||
245 | /** | 160 | /** |
246 | * struct dma_client - info on the entity making use of DMA services | 161 | * typedef dma_filter_fn - callback filter for dma_request_channel |
247 | * @event_callback: func ptr to call when something happens | 162 | * @chan: channel to be reviewed |
248 | * @cap_mask: only return channels that satisfy the requested capabilities | 163 | * @filter_param: opaque parameter passed through dma_request_channel |
249 | * a value of zero corresponds to any capability | 164 | * |
250 | * @slave: data for preparing slave transfer. Must be non-NULL iff the | 165 | * When this optional parameter is specified in a call to dma_request_channel a |
251 | * DMA_SLAVE capability is requested. | 166 | * suitable channel is passed to this routine for further dispositioning before |
252 | * @global_node: list_head for global dma_client_list | 167 | * being returned. Where 'suitable' indicates a non-busy channel that |
168 | * satisfies the given capability mask. It returns 'true' to indicate that the | ||
169 | * channel is suitable. | ||
253 | */ | 170 | */ |
254 | struct dma_client { | 171 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); |
255 | dma_event_callback event_callback; | ||
256 | dma_cap_mask_t cap_mask; | ||
257 | struct dma_slave *slave; | ||
258 | struct list_head global_node; | ||
259 | }; | ||
260 | 172 | ||
261 | typedef void (*dma_async_tx_callback)(void *dma_async_param); | 173 | typedef void (*dma_async_tx_callback)(void *dma_async_param); |
262 | /** | 174 | /** |
@@ -323,14 +235,10 @@ struct dma_device { | |||
323 | dma_cap_mask_t cap_mask; | 235 | dma_cap_mask_t cap_mask; |
324 | int max_xor; | 236 | int max_xor; |
325 | 237 | ||
326 | struct kref refcount; | ||
327 | struct completion done; | ||
328 | |||
329 | int dev_id; | 238 | int dev_id; |
330 | struct device *dev; | 239 | struct device *dev; |
331 | 240 | ||
332 | int (*device_alloc_chan_resources)(struct dma_chan *chan, | 241 | int (*device_alloc_chan_resources)(struct dma_chan *chan); |
333 | struct dma_client *client); | ||
334 | void (*device_free_chan_resources)(struct dma_chan *chan); | 242 | void (*device_free_chan_resources)(struct dma_chan *chan); |
335 | 243 | ||
336 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( | 244 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( |
@@ -362,9 +270,8 @@ struct dma_device { | |||
362 | 270 | ||
363 | /* --- public DMA engine API --- */ | 271 | /* --- public DMA engine API --- */ |
364 | 272 | ||
365 | void dma_async_client_register(struct dma_client *client); | 273 | void dmaengine_get(void); |
366 | void dma_async_client_unregister(struct dma_client *client); | 274 | void dmaengine_put(void); |
367 | void dma_async_client_chan_request(struct dma_client *client); | ||
368 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | 275 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, |
369 | void *dest, void *src, size_t len); | 276 | void *dest, void *src, size_t len); |
370 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, | 277 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, |
@@ -406,6 +313,12 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | |||
406 | set_bit(tx_type, dstp->bits); | 313 | set_bit(tx_type, dstp->bits); |
407 | } | 314 | } |
408 | 315 | ||
316 | #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) | ||
317 | static inline void __dma_cap_zero(dma_cap_mask_t *dstp) | ||
318 | { | ||
319 | bitmap_zero(dstp->bits, DMA_TX_TYPE_END); | ||
320 | } | ||
321 | |||
409 | #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) | 322 | #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) |
410 | static inline int | 323 | static inline int |
411 | __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) | 324 | __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) |
@@ -475,11 +388,25 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | |||
475 | } | 388 | } |
476 | 389 | ||
477 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | 390 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); |
391 | #ifdef CONFIG_DMA_ENGINE | ||
392 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | ||
393 | #else | ||
394 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | ||
395 | { | ||
396 | return DMA_SUCCESS; | ||
397 | } | ||
398 | #endif | ||
478 | 399 | ||
479 | /* --- DMA device --- */ | 400 | /* --- DMA device --- */ |
480 | 401 | ||
481 | int dma_async_device_register(struct dma_device *device); | 402 | int dma_async_device_register(struct dma_device *device); |
482 | void dma_async_device_unregister(struct dma_device *device); | 403 | void dma_async_device_unregister(struct dma_device *device); |
404 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); | ||
405 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); | ||
406 | void dma_issue_pending_all(void); | ||
407 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) | ||
408 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); | ||
409 | void dma_release_channel(struct dma_chan *chan); | ||
483 | 410 | ||
484 | /* --- Helper iov-locking functions --- */ | 411 | /* --- Helper iov-locking functions --- */ |
485 | 412 | ||
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index 04d217b442bf..d797dde247f7 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h | |||
@@ -22,14 +22,34 @@ struct dw_dma_platform_data { | |||
22 | }; | 22 | }; |
23 | 23 | ||
24 | /** | 24 | /** |
25 | * enum dw_dma_slave_width - DMA slave register access width. | ||
26 | * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses | ||
27 | * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses | ||
28 | * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses | ||
29 | */ | ||
30 | enum dw_dma_slave_width { | ||
31 | DW_DMA_SLAVE_WIDTH_8BIT, | ||
32 | DW_DMA_SLAVE_WIDTH_16BIT, | ||
33 | DW_DMA_SLAVE_WIDTH_32BIT, | ||
34 | }; | ||
35 | |||
36 | /** | ||
25 | * struct dw_dma_slave - Controller-specific information about a slave | 37 | * struct dw_dma_slave - Controller-specific information about a slave |
26 | * @slave: Generic information about the slave | 38 | * |
27 | * @ctl_lo: Platform-specific initializer for the CTL_LO register | 39 | * @dma_dev: required DMA master device |
40 | * @tx_reg: physical address of data register used for | ||
41 | * memory-to-peripheral transfers | ||
42 | * @rx_reg: physical address of data register used for | ||
43 | * peripheral-to-memory transfers | ||
44 | * @reg_width: peripheral register width | ||
28 | * @cfg_hi: Platform-specific initializer for the CFG_HI register | 45 | * @cfg_hi: Platform-specific initializer for the CFG_HI register |
29 | * @cfg_lo: Platform-specific initializer for the CFG_LO register | 46 | * @cfg_lo: Platform-specific initializer for the CFG_LO register |
30 | */ | 47 | */ |
31 | struct dw_dma_slave { | 48 | struct dw_dma_slave { |
32 | struct dma_slave slave; | 49 | struct device *dma_dev; |
50 | dma_addr_t tx_reg; | ||
51 | dma_addr_t rx_reg; | ||
52 | enum dw_dma_slave_width reg_width; | ||
33 | u32 cfg_hi; | 53 | u32 cfg_hi; |
34 | u32 cfg_lo; | 54 | u32 cfg_lo; |
35 | }; | 55 | }; |
@@ -54,9 +74,4 @@ struct dw_dma_slave { | |||
54 | #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ | 74 | #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ |
55 | #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ | 75 | #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ |
56 | 76 | ||
57 | static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave) | ||
58 | { | ||
59 | return container_of(slave, struct dw_dma_slave, slave); | ||
60 | } | ||
61 | |||
62 | #endif /* DW_DMAC_H */ | 77 | #endif /* DW_DMAC_H */ |
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h index 78c775a83f7c..121720d74e15 100644 --- a/include/linux/ext2_fs.h +++ b/include/linux/ext2_fs.h | |||
@@ -194,6 +194,30 @@ struct ext2_group_desc | |||
194 | #define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */ | 194 | #define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */ |
195 | #define EXT2_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */ | 195 | #define EXT2_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */ |
196 | 196 | ||
197 | /* Flags that should be inherited by new inodes from their parent. */ | ||
198 | #define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\ | ||
199 | EXT2_SYNC_FL | EXT2_IMMUTABLE_FL | EXT2_APPEND_FL |\ | ||
200 | EXT2_NODUMP_FL | EXT2_NOATIME_FL | EXT2_COMPRBLK_FL|\ | ||
201 | EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\ | ||
202 | EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL) | ||
203 | |||
204 | /* Flags that are appropriate for regular files (all but dir-specific ones). */ | ||
205 | #define EXT2_REG_FLMASK (~(EXT2_DIRSYNC_FL | EXT2_TOPDIR_FL)) | ||
206 | |||
207 | /* Flags that are appropriate for non-directories/regular files. */ | ||
208 | #define EXT2_OTHER_FLMASK (EXT2_NODUMP_FL | EXT2_NOATIME_FL) | ||
209 | |||
210 | /* Mask out flags that are inappropriate for the given type of inode. */ | ||
211 | static inline __u32 ext2_mask_flags(umode_t mode, __u32 flags) | ||
212 | { | ||
213 | if (S_ISDIR(mode)) | ||
214 | return flags; | ||
215 | else if (S_ISREG(mode)) | ||
216 | return flags & EXT2_REG_FLMASK; | ||
217 | else | ||
218 | return flags & EXT2_OTHER_FLMASK; | ||
219 | } | ||
220 | |||
197 | /* | 221 | /* |
198 | * ioctl commands | 222 | * ioctl commands |
199 | */ | 223 | */ |
diff --git a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h index dc541f3653d1..1cdb66367c98 100644 --- a/include/linux/ext2_fs_sb.h +++ b/include/linux/ext2_fs_sb.h | |||
@@ -101,7 +101,7 @@ struct ext2_sb_info { | |||
101 | struct percpu_counter s_freeblocks_counter; | 101 | struct percpu_counter s_freeblocks_counter; |
102 | struct percpu_counter s_freeinodes_counter; | 102 | struct percpu_counter s_freeinodes_counter; |
103 | struct percpu_counter s_dirs_counter; | 103 | struct percpu_counter s_dirs_counter; |
104 | struct blockgroup_lock s_blockgroup_lock; | 104 | struct blockgroup_lock *s_blockgroup_lock; |
105 | /* root of the per fs reservation window tree */ | 105 | /* root of the per fs reservation window tree */ |
106 | spinlock_t s_rsv_window_lock; | 106 | spinlock_t s_rsv_window_lock; |
107 | struct rb_root s_rsv_window_root; | 107 | struct rb_root s_rsv_window_root; |
@@ -111,7 +111,7 @@ struct ext2_sb_info { | |||
111 | static inline spinlock_t * | 111 | static inline spinlock_t * |
112 | sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group) | 112 | sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group) |
113 | { | 113 | { |
114 | return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group); | 114 | return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group); |
115 | } | 115 | } |
116 | 116 | ||
117 | #endif /* _LINUX_EXT2_FS_SB */ | 117 | #endif /* _LINUX_EXT2_FS_SB */ |
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index d14f02918483..dd495b8c3091 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h | |||
@@ -178,6 +178,30 @@ struct ext3_group_desc | |||
178 | #define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ | 178 | #define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ |
179 | #define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ | 179 | #define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ |
180 | 180 | ||
181 | /* Flags that should be inherited by new inodes from their parent. */ | ||
182 | #define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\ | ||
183 | EXT3_SYNC_FL | EXT3_IMMUTABLE_FL | EXT3_APPEND_FL |\ | ||
184 | EXT3_NODUMP_FL | EXT3_NOATIME_FL | EXT3_COMPRBLK_FL|\ | ||
185 | EXT3_NOCOMPR_FL | EXT3_JOURNAL_DATA_FL |\ | ||
186 | EXT3_NOTAIL_FL | EXT3_DIRSYNC_FL) | ||
187 | |||
188 | /* Flags that are appropriate for regular files (all but dir-specific ones). */ | ||
189 | #define EXT3_REG_FLMASK (~(EXT3_DIRSYNC_FL | EXT3_TOPDIR_FL)) | ||
190 | |||
191 | /* Flags that are appropriate for non-directories/regular files. */ | ||
192 | #define EXT3_OTHER_FLMASK (EXT3_NODUMP_FL | EXT3_NOATIME_FL) | ||
193 | |||
194 | /* Mask out flags that are inappropriate for the given type of inode. */ | ||
195 | static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags) | ||
196 | { | ||
197 | if (S_ISDIR(mode)) | ||
198 | return flags; | ||
199 | else if (S_ISREG(mode)) | ||
200 | return flags & EXT3_REG_FLMASK; | ||
201 | else | ||
202 | return flags & EXT3_OTHER_FLMASK; | ||
203 | } | ||
204 | |||
181 | /* | 205 | /* |
182 | * Inode dynamic state flags | 206 | * Inode dynamic state flags |
183 | */ | 207 | */ |
@@ -354,6 +378,13 @@ struct ext3_inode { | |||
354 | #define EXT3_ORPHAN_FS 0x0004 /* Orphans being recovered */ | 378 | #define EXT3_ORPHAN_FS 0x0004 /* Orphans being recovered */ |
355 | 379 | ||
356 | /* | 380 | /* |
381 | * Misc. filesystem flags | ||
382 | */ | ||
383 | #define EXT2_FLAGS_SIGNED_HASH 0x0001 /* Signed dirhash in use */ | ||
384 | #define EXT2_FLAGS_UNSIGNED_HASH 0x0002 /* Unsigned dirhash in use */ | ||
385 | #define EXT2_FLAGS_TEST_FILESYS 0x0004 /* to test development code */ | ||
386 | |||
387 | /* | ||
357 | * Mount flags | 388 | * Mount flags |
358 | */ | 389 | */ |
359 | #define EXT3_MOUNT_CHECK 0x00001 /* Do mount-time checks */ | 390 | #define EXT3_MOUNT_CHECK 0x00001 /* Do mount-time checks */ |
@@ -489,7 +520,23 @@ struct ext3_super_block { | |||
489 | __u16 s_reserved_word_pad; | 520 | __u16 s_reserved_word_pad; |
490 | __le32 s_default_mount_opts; | 521 | __le32 s_default_mount_opts; |
491 | __le32 s_first_meta_bg; /* First metablock block group */ | 522 | __le32 s_first_meta_bg; /* First metablock block group */ |
492 | __u32 s_reserved[190]; /* Padding to the end of the block */ | 523 | __le32 s_mkfs_time; /* When the filesystem was created */ |
524 | __le32 s_jnl_blocks[17]; /* Backup of the journal inode */ | ||
525 | /* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */ | ||
526 | /*150*/ __le32 s_blocks_count_hi; /* Blocks count */ | ||
527 | __le32 s_r_blocks_count_hi; /* Reserved blocks count */ | ||
528 | __le32 s_free_blocks_count_hi; /* Free blocks count */ | ||
529 | __le16 s_min_extra_isize; /* All inodes have at least # bytes */ | ||
530 | __le16 s_want_extra_isize; /* New inodes should reserve # bytes */ | ||
531 | __le32 s_flags; /* Miscellaneous flags */ | ||
532 | __le16 s_raid_stride; /* RAID stride */ | ||
533 | __le16 s_mmp_interval; /* # seconds to wait in MMP checking */ | ||
534 | __le64 s_mmp_block; /* Block for multi-mount protection */ | ||
535 | __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ | ||
536 | __u8 s_log_groups_per_flex; /* FLEX_BG group size */ | ||
537 | __u8 s_reserved_char_pad2; | ||
538 | __le16 s_reserved_pad; | ||
539 | __u32 s_reserved[162]; /* Padding to the end of the block */ | ||
493 | }; | 540 | }; |
494 | 541 | ||
495 | #ifdef __KERNEL__ | 542 | #ifdef __KERNEL__ |
@@ -694,6 +741,9 @@ static inline __le16 ext3_rec_len_to_disk(unsigned len) | |||
694 | #define DX_HASH_LEGACY 0 | 741 | #define DX_HASH_LEGACY 0 |
695 | #define DX_HASH_HALF_MD4 1 | 742 | #define DX_HASH_HALF_MD4 1 |
696 | #define DX_HASH_TEA 2 | 743 | #define DX_HASH_TEA 2 |
744 | #define DX_HASH_LEGACY_UNSIGNED 3 | ||
745 | #define DX_HASH_HALF_MD4_UNSIGNED 4 | ||
746 | #define DX_HASH_TEA_UNSIGNED 5 | ||
697 | 747 | ||
698 | #ifdef __KERNEL__ | 748 | #ifdef __KERNEL__ |
699 | 749 | ||
diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h index e024e38248ff..f07f34de2f0e 100644 --- a/include/linux/ext3_fs_sb.h +++ b/include/linux/ext3_fs_sb.h | |||
@@ -57,10 +57,11 @@ struct ext3_sb_info { | |||
57 | u32 s_next_generation; | 57 | u32 s_next_generation; |
58 | u32 s_hash_seed[4]; | 58 | u32 s_hash_seed[4]; |
59 | int s_def_hash_version; | 59 | int s_def_hash_version; |
60 | int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */ | ||
60 | struct percpu_counter s_freeblocks_counter; | 61 | struct percpu_counter s_freeblocks_counter; |
61 | struct percpu_counter s_freeinodes_counter; | 62 | struct percpu_counter s_freeinodes_counter; |
62 | struct percpu_counter s_dirs_counter; | 63 | struct percpu_counter s_dirs_counter; |
63 | struct blockgroup_lock s_blockgroup_lock; | 64 | struct blockgroup_lock *s_blockgroup_lock; |
64 | 65 | ||
65 | /* root of the per fs reservation window tree */ | 66 | /* root of the per fs reservation window tree */ |
66 | spinlock_t s_rsv_window_lock; | 67 | spinlock_t s_rsv_window_lock; |
@@ -86,7 +87,7 @@ struct ext3_sb_info { | |||
86 | static inline spinlock_t * | 87 | static inline spinlock_t * |
87 | sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group) | 88 | sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group) |
88 | { | 89 | { |
89 | return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group); | 90 | return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group); |
90 | } | 91 | } |
91 | 92 | ||
92 | #endif /* _LINUX_EXT3_FS_SB */ | 93 | #endif /* _LINUX_EXT3_FS_SB */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index e38a64d71eff..0b87b29f4797 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -565,6 +565,7 @@ struct address_space { | |||
565 | struct block_device { | 565 | struct block_device { |
566 | dev_t bd_dev; /* not a kdev_t - it's a search key */ | 566 | dev_t bd_dev; /* not a kdev_t - it's a search key */ |
567 | struct inode * bd_inode; /* will die */ | 567 | struct inode * bd_inode; /* will die */ |
568 | struct super_block * bd_super; | ||
568 | int bd_openers; | 569 | int bd_openers; |
569 | struct mutex bd_mutex; /* open/close mutex */ | 570 | struct mutex bd_mutex; /* open/close mutex */ |
570 | struct semaphore bd_mount_sem; | 571 | struct semaphore bd_mount_sem; |
@@ -1389,6 +1390,7 @@ struct super_operations { | |||
1389 | ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); | 1390 | ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); |
1390 | ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); | 1391 | ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); |
1391 | #endif | 1392 | #endif |
1393 | int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); | ||
1392 | }; | 1394 | }; |
1393 | 1395 | ||
1394 | /* | 1396 | /* |
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index a5cb0c3f6dcf..f8ff918c208f 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h | |||
@@ -115,6 +115,11 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev); | |||
115 | extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | 115 | extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, |
116 | u16 vlan_tci, int polling); | 116 | u16 vlan_tci, int polling); |
117 | extern int vlan_hwaccel_do_receive(struct sk_buff *skb); | 117 | extern int vlan_hwaccel_do_receive(struct sk_buff *skb); |
118 | extern int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, | ||
119 | unsigned int vlan_tci, struct sk_buff *skb); | ||
120 | extern int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, | ||
121 | unsigned int vlan_tci, | ||
122 | struct napi_gro_fraginfo *info); | ||
118 | 123 | ||
119 | #else | 124 | #else |
120 | static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) | 125 | static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) |
@@ -140,6 +145,20 @@ static inline int vlan_hwaccel_do_receive(struct sk_buff *skb) | |||
140 | { | 145 | { |
141 | return 0; | 146 | return 0; |
142 | } | 147 | } |
148 | |||
149 | static inline int vlan_gro_receive(struct napi_struct *napi, | ||
150 | struct vlan_group *grp, | ||
151 | unsigned int vlan_tci, struct sk_buff *skb) | ||
152 | { | ||
153 | return NET_RX_DROP; | ||
154 | } | ||
155 | |||
156 | static inline int vlan_gro_frags(struct napi_struct *napi, | ||
157 | struct vlan_group *grp, unsigned int vlan_tci, | ||
158 | struct napi_gro_fraginfo *info) | ||
159 | { | ||
160 | return NET_RX_DROP; | ||
161 | } | ||
143 | #endif | 162 | #endif |
144 | 163 | ||
145 | /** | 164 | /** |
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index f98a656b17e5..76dad4808847 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h | |||
@@ -86,4 +86,6 @@ static inline int task_nice_ioclass(struct task_struct *task) | |||
86 | */ | 86 | */ |
87 | extern int ioprio_best(unsigned short aprio, unsigned short bprio); | 87 | extern int ioprio_best(unsigned short aprio, unsigned short bprio); |
88 | 88 | ||
89 | extern int set_task_ioprio(struct task_struct *task, int ioprio); | ||
90 | |||
89 | #endif | 91 | #endif |
diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 346e2b80be7d..6384b19efe64 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h | |||
@@ -543,6 +543,11 @@ struct transaction_s | |||
543 | unsigned long t_expires; | 543 | unsigned long t_expires; |
544 | 544 | ||
545 | /* | 545 | /* |
546 | * When this transaction started, in nanoseconds [no locking] | ||
547 | */ | ||
548 | ktime_t t_start_time; | ||
549 | |||
550 | /* | ||
546 | * How many handles used this transaction? [t_handle_lock] | 551 | * How many handles used this transaction? [t_handle_lock] |
547 | */ | 552 | */ |
548 | int t_handle_count; | 553 | int t_handle_count; |
@@ -798,9 +803,19 @@ struct journal_s | |||
798 | struct buffer_head **j_wbuf; | 803 | struct buffer_head **j_wbuf; |
799 | int j_wbufsize; | 804 | int j_wbufsize; |
800 | 805 | ||
806 | /* | ||
807 | * this is the pid of the last person to run a synchronous operation | ||
808 | * through the journal. | ||
809 | */ | ||
801 | pid_t j_last_sync_writer; | 810 | pid_t j_last_sync_writer; |
802 | 811 | ||
803 | /* | 812 | /* |
813 | * the average amount of time in nanoseconds it takes to commit a | ||
814 | * transaction to the disk. [j_state_lock] | ||
815 | */ | ||
816 | u64 j_average_commit_time; | ||
817 | |||
818 | /* | ||
804 | * An opaque pointer to fs-private information. ext3 puts its | 819 | * An opaque pointer to fs-private information. ext3 puts its |
805 | * superblock pointer here | 820 | * superblock pointer here |
806 | */ | 821 | */ |
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 34456476e761..b45109c61fba 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h | |||
@@ -638,6 +638,11 @@ struct transaction_s | |||
638 | unsigned long t_expires; | 638 | unsigned long t_expires; |
639 | 639 | ||
640 | /* | 640 | /* |
641 | * When this transaction started, in nanoseconds [no locking] | ||
642 | */ | ||
643 | ktime_t t_start_time; | ||
644 | |||
645 | /* | ||
641 | * How many handles used this transaction? [t_handle_lock] | 646 | * How many handles used this transaction? [t_handle_lock] |
642 | */ | 647 | */ |
643 | int t_handle_count; | 648 | int t_handle_count; |
@@ -682,6 +687,8 @@ jbd2_time_diff(unsigned long start, unsigned long end) | |||
682 | return end + (MAX_JIFFY_OFFSET - start); | 687 | return end + (MAX_JIFFY_OFFSET - start); |
683 | } | 688 | } |
684 | 689 | ||
690 | #define JBD2_NR_BATCH 64 | ||
691 | |||
685 | /** | 692 | /** |
686 | * struct journal_s - The journal_s type is the concrete type associated with | 693 | * struct journal_s - The journal_s type is the concrete type associated with |
687 | * journal_t. | 694 | * journal_t. |
@@ -826,6 +833,14 @@ struct journal_s | |||
826 | struct mutex j_checkpoint_mutex; | 833 | struct mutex j_checkpoint_mutex; |
827 | 834 | ||
828 | /* | 835 | /* |
836 | * List of buffer heads used by the checkpoint routine. This | ||
837 | * was moved from jbd2_log_do_checkpoint() to reduce stack | ||
838 | * usage. Access to this array is controlled by the | ||
839 | * j_checkpoint_mutex. [j_checkpoint_mutex] | ||
840 | */ | ||
841 | struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; | ||
842 | |||
843 | /* | ||
829 | * Journal head: identifies the first unused block in the journal. | 844 | * Journal head: identifies the first unused block in the journal. |
830 | * [j_state_lock] | 845 | * [j_state_lock] |
831 | */ | 846 | */ |
@@ -939,8 +954,26 @@ struct journal_s | |||
939 | struct buffer_head **j_wbuf; | 954 | struct buffer_head **j_wbuf; |
940 | int j_wbufsize; | 955 | int j_wbufsize; |
941 | 956 | ||
957 | /* | ||
958 | * this is the pid of hte last person to run a synchronous operation | ||
959 | * through the journal | ||
960 | */ | ||
942 | pid_t j_last_sync_writer; | 961 | pid_t j_last_sync_writer; |
943 | 962 | ||
963 | /* | ||
964 | * the average amount of time in nanoseconds it takes to commit a | ||
965 | * transaction to disk. [j_state_lock] | ||
966 | */ | ||
967 | u64 j_average_commit_time; | ||
968 | |||
969 | /* | ||
970 | * minimum and maximum times that we should wait for | ||
971 | * additional filesystem operations to get batched into a | ||
972 | * synchronous handle in microseconds | ||
973 | */ | ||
974 | u32 j_min_batch_time; | ||
975 | u32 j_max_batch_time; | ||
976 | |||
944 | /* This function is called when a transaction is closed */ | 977 | /* This function is called when a transaction is closed */ |
945 | void (*j_commit_callback)(journal_t *, | 978 | void (*j_commit_callback)(journal_t *, |
946 | transaction_t *); | 979 | transaction_t *); |
@@ -1102,7 +1135,6 @@ extern int jbd2_journal_set_features | |||
1102 | (journal_t *, unsigned long, unsigned long, unsigned long); | 1135 | (journal_t *, unsigned long, unsigned long, unsigned long); |
1103 | extern void jbd2_journal_clear_features | 1136 | extern void jbd2_journal_clear_features |
1104 | (journal_t *, unsigned long, unsigned long, unsigned long); | 1137 | (journal_t *, unsigned long, unsigned long, unsigned long); |
1105 | extern int jbd2_journal_create (journal_t *); | ||
1106 | extern int jbd2_journal_load (journal_t *journal); | 1138 | extern int jbd2_journal_load (journal_t *journal); |
1107 | extern int jbd2_journal_destroy (journal_t *); | 1139 | extern int jbd2_journal_destroy (journal_t *); |
1108 | extern int jbd2_journal_recover (journal_t *journal); | 1140 | extern int jbd2_journal_recover (journal_t *journal); |
@@ -1177,8 +1209,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid); | |||
1177 | int jbd2_log_do_checkpoint(journal_t *journal); | 1209 | int jbd2_log_do_checkpoint(journal_t *journal); |
1178 | 1210 | ||
1179 | void __jbd2_log_wait_for_space(journal_t *journal); | 1211 | void __jbd2_log_wait_for_space(journal_t *journal); |
1180 | extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); | 1212 | extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); |
1181 | extern int jbd2_cleanup_journal_tail(journal_t *); | 1213 | extern int jbd2_cleanup_journal_tail(journal_t *); |
1182 | 1214 | ||
1183 | /* Debugging code only: */ | 1215 | /* Debugging code only: */ |
1184 | 1216 | ||
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 6b8e2027165e..343df9ef2412 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -476,6 +476,12 @@ static inline char *pack_hex_byte(char *buf, u8 byte) | |||
476 | __val = __val < __min ? __min: __val; \ | 476 | __val = __val < __min ? __min: __val; \ |
477 | __val > __max ? __max: __val; }) | 477 | __val > __max ? __max: __val; }) |
478 | 478 | ||
479 | |||
480 | /* | ||
481 | * swap - swap value of @a and @b | ||
482 | */ | ||
483 | #define swap(a, b) ({ typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; }) | ||
484 | |||
479 | /** | 485 | /** |
480 | * container_of - cast a member of a structure out to the containing structure | 486 | * container_of - cast a member of a structure out to the containing structure |
481 | * @ptr: the pointer to the member. | 487 | * @ptr: the pointer to the member. |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 3449de597eff..4f7c8fb4d3fe 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -1518,6 +1518,7 @@ extern void sata_pmp_error_handler(struct ata_port *ap); | |||
1518 | 1518 | ||
1519 | extern const struct ata_port_operations ata_sff_port_ops; | 1519 | extern const struct ata_port_operations ata_sff_port_ops; |
1520 | extern const struct ata_port_operations ata_bmdma_port_ops; | 1520 | extern const struct ata_port_operations ata_bmdma_port_ops; |
1521 | extern const struct ata_port_operations ata_bmdma32_port_ops; | ||
1521 | 1522 | ||
1522 | /* PIO only, sg_tablesize and dma_boundary limits can be removed */ | 1523 | /* PIO only, sg_tablesize and dma_boundary limits can be removed */ |
1523 | #define ATA_PIO_SHT(drv_name) \ | 1524 | #define ATA_PIO_SHT(drv_name) \ |
@@ -1545,6 +1546,8 @@ extern void ata_sff_exec_command(struct ata_port *ap, | |||
1545 | const struct ata_taskfile *tf); | 1546 | const struct ata_taskfile *tf); |
1546 | extern unsigned int ata_sff_data_xfer(struct ata_device *dev, | 1547 | extern unsigned int ata_sff_data_xfer(struct ata_device *dev, |
1547 | unsigned char *buf, unsigned int buflen, int rw); | 1548 | unsigned char *buf, unsigned int buflen, int rw); |
1549 | extern unsigned int ata_sff_data_xfer32(struct ata_device *dev, | ||
1550 | unsigned char *buf, unsigned int buflen, int rw); | ||
1548 | extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, | 1551 | extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, |
1549 | unsigned char *buf, unsigned int buflen, int rw); | 1552 | unsigned char *buf, unsigned int buflen, int rw); |
1550 | extern u8 ata_sff_irq_on(struct ata_port *ap); | 1553 | extern u8 ata_sff_irq_on(struct ata_port *ap); |
diff --git a/include/linux/magic.h b/include/linux/magic.h index f7f3fdddbef0..439f6f3cb0c4 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #define EFS_SUPER_MAGIC 0x414A53 | 13 | #define EFS_SUPER_MAGIC 0x414A53 |
14 | #define EXT2_SUPER_MAGIC 0xEF53 | 14 | #define EXT2_SUPER_MAGIC 0xEF53 |
15 | #define EXT3_SUPER_MAGIC 0xEF53 | 15 | #define EXT3_SUPER_MAGIC 0xEF53 |
16 | #define XENFS_SUPER_MAGIC 0xabba1974 | ||
16 | #define EXT4_SUPER_MAGIC 0xEF53 | 17 | #define EXT4_SUPER_MAGIC 0xEF53 |
17 | #define HPFS_SUPER_MAGIC 0xf995e849 | 18 | #define HPFS_SUPER_MAGIC 0xf995e849 |
18 | #define ISOFS_SUPER_MAGIC 0x9660 | 19 | #define ISOFS_SUPER_MAGIC 0x9660 |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1fbe14d39521..326f45c86530 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -19,22 +19,45 @@ | |||
19 | 19 | ||
20 | #ifndef _LINUX_MEMCONTROL_H | 20 | #ifndef _LINUX_MEMCONTROL_H |
21 | #define _LINUX_MEMCONTROL_H | 21 | #define _LINUX_MEMCONTROL_H |
22 | 22 | #include <linux/cgroup.h> | |
23 | struct mem_cgroup; | 23 | struct mem_cgroup; |
24 | struct page_cgroup; | 24 | struct page_cgroup; |
25 | struct page; | 25 | struct page; |
26 | struct mm_struct; | 26 | struct mm_struct; |
27 | 27 | ||
28 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 28 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
29 | /* | ||
30 | * All "charge" functions with gfp_mask should use GFP_KERNEL or | ||
31 | * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't | ||
32 | * alloc memory but reclaims memory from all available zones. So, "where I want | ||
33 | * memory from" bits of gfp_mask has no meaning. So any bits of that field is | ||
34 | * available but adding a rule is better. charge functions' gfp_mask should | ||
35 | * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous | ||
36 | * codes. | ||
37 | * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) | ||
38 | */ | ||
29 | 39 | ||
30 | extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, | 40 | extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, |
31 | gfp_t gfp_mask); | 41 | gfp_t gfp_mask); |
42 | /* for swap handling */ | ||
43 | extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | ||
44 | struct page *page, gfp_t mask, struct mem_cgroup **ptr); | ||
45 | extern void mem_cgroup_commit_charge_swapin(struct page *page, | ||
46 | struct mem_cgroup *ptr); | ||
47 | extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); | ||
48 | |||
32 | extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 49 | extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
33 | gfp_t gfp_mask); | 50 | gfp_t gfp_mask); |
34 | extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru); | 51 | extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); |
52 | extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); | ||
53 | extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); | ||
54 | extern void mem_cgroup_del_lru(struct page *page); | ||
55 | extern void mem_cgroup_move_lists(struct page *page, | ||
56 | enum lru_list from, enum lru_list to); | ||
35 | extern void mem_cgroup_uncharge_page(struct page *page); | 57 | extern void mem_cgroup_uncharge_page(struct page *page); |
36 | extern void mem_cgroup_uncharge_cache_page(struct page *page); | 58 | extern void mem_cgroup_uncharge_cache_page(struct page *page); |
37 | extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask); | 59 | extern int mem_cgroup_shrink_usage(struct page *page, |
60 | struct mm_struct *mm, gfp_t gfp_mask); | ||
38 | 61 | ||
39 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | 62 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, |
40 | struct list_head *dst, | 63 | struct list_head *dst, |
@@ -47,12 +70,20 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); | |||
47 | 70 | ||
48 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | 71 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
49 | 72 | ||
50 | #define mm_match_cgroup(mm, cgroup) \ | 73 | static inline |
51 | ((cgroup) == mem_cgroup_from_task((mm)->owner)) | 74 | int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) |
75 | { | ||
76 | struct mem_cgroup *mem; | ||
77 | rcu_read_lock(); | ||
78 | mem = mem_cgroup_from_task((mm)->owner); | ||
79 | rcu_read_unlock(); | ||
80 | return cgroup == mem; | ||
81 | } | ||
52 | 82 | ||
53 | extern int | 83 | extern int |
54 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage); | 84 | mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr); |
55 | extern void mem_cgroup_end_migration(struct page *page); | 85 | extern void mem_cgroup_end_migration(struct mem_cgroup *mem, |
86 | struct page *oldpage, struct page *newpage); | ||
56 | 87 | ||
57 | /* | 88 | /* |
58 | * For memory reclaim. | 89 | * For memory reclaim. |
@@ -65,13 +96,32 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, | |||
65 | int priority); | 96 | int priority); |
66 | extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, | 97 | extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, |
67 | int priority); | 98 | int priority); |
99 | int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); | ||
100 | unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, | ||
101 | struct zone *zone, | ||
102 | enum lru_list lru); | ||
103 | struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, | ||
104 | struct zone *zone); | ||
105 | struct zone_reclaim_stat* | ||
106 | mem_cgroup_get_reclaim_stat_from_page(struct page *page); | ||
68 | 107 | ||
69 | extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, | 108 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
70 | int priority, enum lru_list lru); | 109 | extern int do_swap_account; |
110 | #endif | ||
71 | 111 | ||
112 | static inline bool mem_cgroup_disabled(void) | ||
113 | { | ||
114 | if (mem_cgroup_subsys.disabled) | ||
115 | return true; | ||
116 | return false; | ||
117 | } | ||
118 | |||
119 | extern bool mem_cgroup_oom_called(struct task_struct *task); | ||
72 | 120 | ||
73 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ | 121 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ |
74 | static inline int mem_cgroup_charge(struct page *page, | 122 | struct mem_cgroup; |
123 | |||
124 | static inline int mem_cgroup_newpage_charge(struct page *page, | ||
75 | struct mm_struct *mm, gfp_t gfp_mask) | 125 | struct mm_struct *mm, gfp_t gfp_mask) |
76 | { | 126 | { |
77 | return 0; | 127 | return 0; |
@@ -83,6 +133,21 @@ static inline int mem_cgroup_cache_charge(struct page *page, | |||
83 | return 0; | 133 | return 0; |
84 | } | 134 | } |
85 | 135 | ||
136 | static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | ||
137 | struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) | ||
138 | { | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static inline void mem_cgroup_commit_charge_swapin(struct page *page, | ||
143 | struct mem_cgroup *ptr) | ||
144 | { | ||
145 | } | ||
146 | |||
147 | static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) | ||
148 | { | ||
149 | } | ||
150 | |||
86 | static inline void mem_cgroup_uncharge_page(struct page *page) | 151 | static inline void mem_cgroup_uncharge_page(struct page *page) |
87 | { | 152 | { |
88 | } | 153 | } |
@@ -91,12 +156,33 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page) | |||
91 | { | 156 | { |
92 | } | 157 | } |
93 | 158 | ||
94 | static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) | 159 | static inline int mem_cgroup_shrink_usage(struct page *page, |
160 | struct mm_struct *mm, gfp_t gfp_mask) | ||
95 | { | 161 | { |
96 | return 0; | 162 | return 0; |
97 | } | 163 | } |
98 | 164 | ||
99 | static inline void mem_cgroup_move_lists(struct page *page, bool active) | 165 | static inline void mem_cgroup_add_lru_list(struct page *page, int lru) |
166 | { | ||
167 | } | ||
168 | |||
169 | static inline void mem_cgroup_del_lru_list(struct page *page, int lru) | ||
170 | { | ||
171 | return ; | ||
172 | } | ||
173 | |||
174 | static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) | ||
175 | { | ||
176 | return ; | ||
177 | } | ||
178 | |||
179 | static inline void mem_cgroup_del_lru(struct page *page) | ||
180 | { | ||
181 | return ; | ||
182 | } | ||
183 | |||
184 | static inline void | ||
185 | mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) | ||
100 | { | 186 | { |
101 | } | 187 | } |
102 | 188 | ||
@@ -112,12 +198,14 @@ static inline int task_in_mem_cgroup(struct task_struct *task, | |||
112 | } | 198 | } |
113 | 199 | ||
114 | static inline int | 200 | static inline int |
115 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage) | 201 | mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) |
116 | { | 202 | { |
117 | return 0; | 203 | return 0; |
118 | } | 204 | } |
119 | 205 | ||
120 | static inline void mem_cgroup_end_migration(struct page *page) | 206 | static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, |
207 | struct page *oldpage, | ||
208 | struct page *newpage) | ||
121 | { | 209 | { |
122 | } | 210 | } |
123 | 211 | ||
@@ -146,12 +234,42 @@ static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, | |||
146 | { | 234 | { |
147 | } | 235 | } |
148 | 236 | ||
149 | static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, | 237 | static inline bool mem_cgroup_disabled(void) |
150 | struct zone *zone, int priority, | 238 | { |
151 | enum lru_list lru) | 239 | return true; |
240 | } | ||
241 | |||
242 | static inline bool mem_cgroup_oom_called(struct task_struct *task) | ||
243 | { | ||
244 | return false; | ||
245 | } | ||
246 | |||
247 | static inline int | ||
248 | mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) | ||
249 | { | ||
250 | return 1; | ||
251 | } | ||
252 | |||
253 | static inline unsigned long | ||
254 | mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, | ||
255 | enum lru_list lru) | ||
152 | { | 256 | { |
153 | return 0; | 257 | return 0; |
154 | } | 258 | } |
259 | |||
260 | |||
261 | static inline struct zone_reclaim_stat* | ||
262 | mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) | ||
263 | { | ||
264 | return NULL; | ||
265 | } | ||
266 | |||
267 | static inline struct zone_reclaim_stat* | ||
268 | mem_cgroup_get_reclaim_stat_from_page(struct page *page) | ||
269 | { | ||
270 | return NULL; | ||
271 | } | ||
272 | |||
155 | #endif /* CONFIG_CGROUP_MEM_CONT */ | 273 | #endif /* CONFIG_CGROUP_MEM_CONT */ |
156 | 274 | ||
157 | #endif /* _LINUX_MEMCONTROL_H */ | 275 | #endif /* _LINUX_MEMCONTROL_H */ |
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index c948350c378e..7fbb97267556 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h | |||
@@ -28,6 +28,7 @@ add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) | |||
28 | { | 28 | { |
29 | list_add(&page->lru, &zone->lru[l].list); | 29 | list_add(&page->lru, &zone->lru[l].list); |
30 | __inc_zone_state(zone, NR_LRU_BASE + l); | 30 | __inc_zone_state(zone, NR_LRU_BASE + l); |
31 | mem_cgroup_add_lru_list(page, l); | ||
31 | } | 32 | } |
32 | 33 | ||
33 | static inline void | 34 | static inline void |
@@ -35,6 +36,7 @@ del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) | |||
35 | { | 36 | { |
36 | list_del(&page->lru); | 37 | list_del(&page->lru); |
37 | __dec_zone_state(zone, NR_LRU_BASE + l); | 38 | __dec_zone_state(zone, NR_LRU_BASE + l); |
39 | mem_cgroup_del_lru_list(page, l); | ||
38 | } | 40 | } |
39 | 41 | ||
40 | static inline void | 42 | static inline void |
@@ -54,6 +56,7 @@ del_page_from_lru(struct zone *zone, struct page *page) | |||
54 | l += page_is_file_cache(page); | 56 | l += page_is_file_cache(page); |
55 | } | 57 | } |
56 | __dec_zone_state(zone, NR_LRU_BASE + l); | 58 | __dec_zone_state(zone, NR_LRU_BASE + l); |
59 | mem_cgroup_del_lru_list(page, l); | ||
57 | } | 60 | } |
58 | 61 | ||
59 | /** | 62 | /** |
@@ -78,23 +81,4 @@ static inline enum lru_list page_lru(struct page *page) | |||
78 | return lru; | 81 | return lru; |
79 | } | 82 | } |
80 | 83 | ||
81 | /** | ||
82 | * inactive_anon_is_low - check if anonymous pages need to be deactivated | ||
83 | * @zone: zone to check | ||
84 | * | ||
85 | * Returns true if the zone does not have enough inactive anon pages, | ||
86 | * meaning some active anon pages need to be deactivated. | ||
87 | */ | ||
88 | static inline int inactive_anon_is_low(struct zone *zone) | ||
89 | { | ||
90 | unsigned long active, inactive; | ||
91 | |||
92 | active = zone_page_state(zone, NR_ACTIVE_ANON); | ||
93 | inactive = zone_page_state(zone, NR_INACTIVE_ANON); | ||
94 | |||
95 | if (inactive * zone->inactive_ratio < active) | ||
96 | return 1; | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | #endif | 84 | #endif |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 35a7b5e19465..09c14e213b63 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -263,6 +263,19 @@ enum zone_type { | |||
263 | #error ZONES_SHIFT -- too many zones configured adjust calculation | 263 | #error ZONES_SHIFT -- too many zones configured adjust calculation |
264 | #endif | 264 | #endif |
265 | 265 | ||
266 | struct zone_reclaim_stat { | ||
267 | /* | ||
268 | * The pageout code in vmscan.c keeps track of how many of the | ||
269 | * mem/swap backed and file backed pages are refeferenced. | ||
270 | * The higher the rotated/scanned ratio, the more valuable | ||
271 | * that cache is. | ||
272 | * | ||
273 | * The anon LRU stats live in [0], file LRU stats in [1] | ||
274 | */ | ||
275 | unsigned long recent_rotated[2]; | ||
276 | unsigned long recent_scanned[2]; | ||
277 | }; | ||
278 | |||
266 | struct zone { | 279 | struct zone { |
267 | /* Fields commonly accessed by the page allocator */ | 280 | /* Fields commonly accessed by the page allocator */ |
268 | unsigned long pages_min, pages_low, pages_high; | 281 | unsigned long pages_min, pages_low, pages_high; |
@@ -315,16 +328,7 @@ struct zone { | |||
315 | unsigned long nr_scan; | 328 | unsigned long nr_scan; |
316 | } lru[NR_LRU_LISTS]; | 329 | } lru[NR_LRU_LISTS]; |
317 | 330 | ||
318 | /* | 331 | struct zone_reclaim_stat reclaim_stat; |
319 | * The pageout code in vmscan.c keeps track of how many of the | ||
320 | * mem/swap backed and file backed pages are refeferenced. | ||
321 | * The higher the rotated/scanned ratio, the more valuable | ||
322 | * that cache is. | ||
323 | * | ||
324 | * The anon LRU stats live in [0], file LRU stats in [1] | ||
325 | */ | ||
326 | unsigned long recent_rotated[2]; | ||
327 | unsigned long recent_scanned[2]; | ||
328 | 332 | ||
329 | unsigned long pages_scanned; /* since last reclaim */ | 333 | unsigned long pages_scanned; /* since last reclaim */ |
330 | unsigned long flags; /* zone flags, see below */ | 334 | unsigned long flags; /* zone flags, see below */ |
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index 00e2b575021f..88d3d8fbf9f2 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h | |||
@@ -520,6 +520,7 @@ struct cfi_fixup { | |||
520 | 520 | ||
521 | #define CFI_MFR_AMD 0x0001 | 521 | #define CFI_MFR_AMD 0x0001 |
522 | #define CFI_MFR_ATMEL 0x001F | 522 | #define CFI_MFR_ATMEL 0x001F |
523 | #define CFI_MFR_SAMSUNG 0x00EC | ||
523 | #define CFI_MFR_ST 0x0020 /* STMicroelectronics */ | 524 | #define CFI_MFR_ST 0x0020 /* STMicroelectronics */ |
524 | 525 | ||
525 | void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); | 526 | void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); |
diff --git a/include/linux/mtd/ftl.h b/include/linux/mtd/ftl.h index 0be442f881dd..0555f7a0b9ed 100644 --- a/include/linux/mtd/ftl.h +++ b/include/linux/mtd/ftl.h | |||
@@ -32,25 +32,25 @@ | |||
32 | #define _LINUX_FTL_H | 32 | #define _LINUX_FTL_H |
33 | 33 | ||
34 | typedef struct erase_unit_header_t { | 34 | typedef struct erase_unit_header_t { |
35 | u_int8_t LinkTargetTuple[5]; | 35 | uint8_t LinkTargetTuple[5]; |
36 | u_int8_t DataOrgTuple[10]; | 36 | uint8_t DataOrgTuple[10]; |
37 | u_int8_t NumTransferUnits; | 37 | uint8_t NumTransferUnits; |
38 | u_int32_t EraseCount; | 38 | uint32_t EraseCount; |
39 | u_int16_t LogicalEUN; | 39 | uint16_t LogicalEUN; |
40 | u_int8_t BlockSize; | 40 | uint8_t BlockSize; |
41 | u_int8_t EraseUnitSize; | 41 | uint8_t EraseUnitSize; |
42 | u_int16_t FirstPhysicalEUN; | 42 | uint16_t FirstPhysicalEUN; |
43 | u_int16_t NumEraseUnits; | 43 | uint16_t NumEraseUnits; |
44 | u_int32_t FormattedSize; | 44 | uint32_t FormattedSize; |
45 | u_int32_t FirstVMAddress; | 45 | uint32_t FirstVMAddress; |
46 | u_int16_t NumVMPages; | 46 | uint16_t NumVMPages; |
47 | u_int8_t Flags; | 47 | uint8_t Flags; |
48 | u_int8_t Code; | 48 | uint8_t Code; |
49 | u_int32_t SerialNumber; | 49 | uint32_t SerialNumber; |
50 | u_int32_t AltEUHOffset; | 50 | uint32_t AltEUHOffset; |
51 | u_int32_t BAMOffset; | 51 | uint32_t BAMOffset; |
52 | u_int8_t Reserved[12]; | 52 | uint8_t Reserved[12]; |
53 | u_int8_t EndTuple[2]; | 53 | uint8_t EndTuple[2]; |
54 | } erase_unit_header_t; | 54 | } erase_unit_header_t; |
55 | 55 | ||
56 | /* Flags in erase_unit_header_t */ | 56 | /* Flags in erase_unit_header_t */ |
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index aa30244492c6..b981b8772217 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h | |||
@@ -223,6 +223,7 @@ struct map_info { | |||
223 | must leave it enabled. */ | 223 | must leave it enabled. */ |
224 | void (*set_vpp)(struct map_info *, int); | 224 | void (*set_vpp)(struct map_info *, int); |
225 | 225 | ||
226 | unsigned long pfow_base; | ||
226 | unsigned long map_priv_1; | 227 | unsigned long map_priv_1; |
227 | unsigned long map_priv_2; | 228 | unsigned long map_priv_2; |
228 | void *fldrv_priv; | 229 | void *fldrv_priv; |
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 64433eb411d7..3aa5d77c2cdb 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/mtd/compatmac.h> | 15 | #include <linux/mtd/compatmac.h> |
16 | #include <mtd/mtd-abi.h> | 16 | #include <mtd/mtd-abi.h> |
17 | 17 | ||
18 | #include <asm/div64.h> | ||
19 | |||
18 | #define MTD_CHAR_MAJOR 90 | 20 | #define MTD_CHAR_MAJOR 90 |
19 | #define MTD_BLOCK_MAJOR 31 | 21 | #define MTD_BLOCK_MAJOR 31 |
20 | #define MAX_MTD_DEVICES 32 | 22 | #define MAX_MTD_DEVICES 32 |
@@ -25,20 +27,20 @@ | |||
25 | #define MTD_ERASE_DONE 0x08 | 27 | #define MTD_ERASE_DONE 0x08 |
26 | #define MTD_ERASE_FAILED 0x10 | 28 | #define MTD_ERASE_FAILED 0x10 |
27 | 29 | ||
28 | #define MTD_FAIL_ADDR_UNKNOWN 0xffffffff | 30 | #define MTD_FAIL_ADDR_UNKNOWN -1LL |
29 | 31 | ||
30 | /* If the erase fails, fail_addr might indicate exactly which block failed. If | 32 | /* If the erase fails, fail_addr might indicate exactly which block failed. If |
31 | fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not | 33 | fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not |
32 | specific to any particular block. */ | 34 | specific to any particular block. */ |
33 | struct erase_info { | 35 | struct erase_info { |
34 | struct mtd_info *mtd; | 36 | struct mtd_info *mtd; |
35 | u_int32_t addr; | 37 | uint64_t addr; |
36 | u_int32_t len; | 38 | uint64_t len; |
37 | u_int32_t fail_addr; | 39 | uint64_t fail_addr; |
38 | u_long time; | 40 | u_long time; |
39 | u_long retries; | 41 | u_long retries; |
40 | u_int dev; | 42 | unsigned dev; |
41 | u_int cell; | 43 | unsigned cell; |
42 | void (*callback) (struct erase_info *self); | 44 | void (*callback) (struct erase_info *self); |
43 | u_long priv; | 45 | u_long priv; |
44 | u_char state; | 46 | u_char state; |
@@ -46,9 +48,9 @@ struct erase_info { | |||
46 | }; | 48 | }; |
47 | 49 | ||
48 | struct mtd_erase_region_info { | 50 | struct mtd_erase_region_info { |
49 | u_int32_t offset; /* At which this region starts, from the beginning of the MTD */ | 51 | uint64_t offset; /* At which this region starts, from the beginning of the MTD */ |
50 | u_int32_t erasesize; /* For this region */ | 52 | uint32_t erasesize; /* For this region */ |
51 | u_int32_t numblocks; /* Number of blocks of erasesize in this region */ | 53 | uint32_t numblocks; /* Number of blocks of erasesize in this region */ |
52 | unsigned long *lockmap; /* If keeping bitmap of locks */ | 54 | unsigned long *lockmap; /* If keeping bitmap of locks */ |
53 | }; | 55 | }; |
54 | 56 | ||
@@ -100,14 +102,14 @@ struct mtd_oob_ops { | |||
100 | 102 | ||
101 | struct mtd_info { | 103 | struct mtd_info { |
102 | u_char type; | 104 | u_char type; |
103 | u_int32_t flags; | 105 | uint32_t flags; |
104 | u_int32_t size; // Total size of the MTD | 106 | uint64_t size; // Total size of the MTD |
105 | 107 | ||
106 | /* "Major" erase size for the device. Naïve users may take this | 108 | /* "Major" erase size for the device. Naïve users may take this |
107 | * to be the only erase size available, or may use the more detailed | 109 | * to be the only erase size available, or may use the more detailed |
108 | * information below if they desire | 110 | * information below if they desire |
109 | */ | 111 | */ |
110 | u_int32_t erasesize; | 112 | uint32_t erasesize; |
111 | /* Minimal writable flash unit size. In case of NOR flash it is 1 (even | 113 | /* Minimal writable flash unit size. In case of NOR flash it is 1 (even |
112 | * though individual bits can be cleared), in case of NAND flash it is | 114 | * though individual bits can be cleared), in case of NAND flash it is |
113 | * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR | 115 | * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR |
@@ -115,10 +117,20 @@ struct mtd_info { | |||
115 | * Any driver registering a struct mtd_info must ensure a writesize of | 117 | * Any driver registering a struct mtd_info must ensure a writesize of |
116 | * 1 or larger. | 118 | * 1 or larger. |
117 | */ | 119 | */ |
118 | u_int32_t writesize; | 120 | uint32_t writesize; |
121 | |||
122 | uint32_t oobsize; // Amount of OOB data per block (e.g. 16) | ||
123 | uint32_t oobavail; // Available OOB bytes per block | ||
119 | 124 | ||
120 | u_int32_t oobsize; // Amount of OOB data per block (e.g. 16) | 125 | /* |
121 | u_int32_t oobavail; // Available OOB bytes per block | 126 | * If erasesize is a power of 2 then the shift is stored in |
127 | * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize. | ||
128 | */ | ||
129 | unsigned int erasesize_shift; | ||
130 | unsigned int writesize_shift; | ||
131 | /* Masks based on erasesize_shift and writesize_shift */ | ||
132 | unsigned int erasesize_mask; | ||
133 | unsigned int writesize_mask; | ||
122 | 134 | ||
123 | // Kernel-only stuff starts here. | 135 | // Kernel-only stuff starts here. |
124 | const char *name; | 136 | const char *name; |
@@ -190,8 +202,8 @@ struct mtd_info { | |||
190 | void (*sync) (struct mtd_info *mtd); | 202 | void (*sync) (struct mtd_info *mtd); |
191 | 203 | ||
192 | /* Chip-supported device locking */ | 204 | /* Chip-supported device locking */ |
193 | int (*lock) (struct mtd_info *mtd, loff_t ofs, size_t len); | 205 | int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); |
194 | int (*unlock) (struct mtd_info *mtd, loff_t ofs, size_t len); | 206 | int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); |
195 | 207 | ||
196 | /* Power Management functions */ | 208 | /* Power Management functions */ |
197 | int (*suspend) (struct mtd_info *mtd); | 209 | int (*suspend) (struct mtd_info *mtd); |
@@ -221,6 +233,35 @@ struct mtd_info { | |||
221 | void (*put_device) (struct mtd_info *mtd); | 233 | void (*put_device) (struct mtd_info *mtd); |
222 | }; | 234 | }; |
223 | 235 | ||
236 | static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) | ||
237 | { | ||
238 | if (mtd->erasesize_shift) | ||
239 | return sz >> mtd->erasesize_shift; | ||
240 | do_div(sz, mtd->erasesize); | ||
241 | return sz; | ||
242 | } | ||
243 | |||
244 | static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd) | ||
245 | { | ||
246 | if (mtd->erasesize_shift) | ||
247 | return sz & mtd->erasesize_mask; | ||
248 | return do_div(sz, mtd->erasesize); | ||
249 | } | ||
250 | |||
251 | static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd) | ||
252 | { | ||
253 | if (mtd->writesize_shift) | ||
254 | return sz >> mtd->writesize_shift; | ||
255 | do_div(sz, mtd->writesize); | ||
256 | return sz; | ||
257 | } | ||
258 | |||
259 | static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) | ||
260 | { | ||
261 | if (mtd->writesize_shift) | ||
262 | return sz & mtd->writesize_mask; | ||
263 | return do_div(sz, mtd->writesize); | ||
264 | } | ||
224 | 265 | ||
225 | /* Kernel-side ioctl definitions */ | 266 | /* Kernel-side ioctl definitions */ |
226 | 267 | ||
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 733d3f3b4eb8..db5b63da2a7e 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
@@ -335,17 +335,12 @@ struct nand_buffers { | |||
335 | * @erase_cmd: [INTERN] erase command write function, selectable due to AND support | 335 | * @erase_cmd: [INTERN] erase command write function, selectable due to AND support |
336 | * @scan_bbt: [REPLACEABLE] function to scan bad block table | 336 | * @scan_bbt: [REPLACEABLE] function to scan bad block table |
337 | * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR) | 337 | * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR) |
338 | * @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress | ||
339 | * @state: [INTERN] the current state of the NAND device | 338 | * @state: [INTERN] the current state of the NAND device |
340 | * @oob_poi: poison value buffer | 339 | * @oob_poi: poison value buffer |
341 | * @page_shift: [INTERN] number of address bits in a page (column address bits) | 340 | * @page_shift: [INTERN] number of address bits in a page (column address bits) |
342 | * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock | 341 | * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock |
343 | * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry | 342 | * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry |
344 | * @chip_shift: [INTERN] number of address bits in one chip | 343 | * @chip_shift: [INTERN] number of address bits in one chip |
345 | * @datbuf: [INTERN] internal buffer for one page + oob | ||
346 | * @oobbuf: [INTERN] oob buffer for one eraseblock | ||
347 | * @oobdirty: [INTERN] indicates that oob_buf must be reinitialized | ||
348 | * @data_poi: [INTERN] pointer to a data buffer | ||
349 | * @options: [BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about | 344 | * @options: [BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about |
350 | * special functionality. See the defines for further explanation | 345 | * special functionality. See the defines for further explanation |
351 | * @badblockpos: [INTERN] position of the bad block marker in the oob area | 346 | * @badblockpos: [INTERN] position of the bad block marker in the oob area |
@@ -399,7 +394,7 @@ struct nand_chip { | |||
399 | int bbt_erase_shift; | 394 | int bbt_erase_shift; |
400 | int chip_shift; | 395 | int chip_shift; |
401 | int numchips; | 396 | int numchips; |
402 | unsigned long chipsize; | 397 | uint64_t chipsize; |
403 | int pagemask; | 398 | int pagemask; |
404 | int pagebuf; | 399 | int pagebuf; |
405 | int subpagesize; | 400 | int subpagesize; |
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index c92b4d439609..a45dd831b3f8 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h | |||
@@ -36,9 +36,9 @@ | |||
36 | 36 | ||
37 | struct mtd_partition { | 37 | struct mtd_partition { |
38 | char *name; /* identifier string */ | 38 | char *name; /* identifier string */ |
39 | u_int32_t size; /* partition size */ | 39 | uint64_t size; /* partition size */ |
40 | u_int32_t offset; /* offset within the master MTD space */ | 40 | uint64_t offset; /* offset within the master MTD space */ |
41 | u_int32_t mask_flags; /* master MTD flags to mask out for this partition */ | 41 | uint32_t mask_flags; /* master MTD flags to mask out for this partition */ |
42 | struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/ | 42 | struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/ |
43 | struct mtd_info **mtdp; /* pointer to store the MTD object */ | 43 | struct mtd_info **mtdp; /* pointer to store the MTD object */ |
44 | }; | 44 | }; |
diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h new file mode 100644 index 000000000000..b730d4f84655 --- /dev/null +++ b/include/linux/mtd/pfow.h | |||
@@ -0,0 +1,159 @@ | |||
1 | /* Primary function overlay window definitions | ||
2 | * and service functions used by LPDDR chips | ||
3 | */ | ||
4 | #ifndef __LINUX_MTD_PFOW_H | ||
5 | #define __LINUX_MTD_PFOW_H | ||
6 | |||
7 | #include <linux/mtd/qinfo.h> | ||
8 | |||
9 | /* PFOW registers addressing */ | ||
10 | /* Address of symbol "P" */ | ||
11 | #define PFOW_QUERY_STRING_P 0x0000 | ||
12 | /* Address of symbol "F" */ | ||
13 | #define PFOW_QUERY_STRING_F 0x0002 | ||
14 | /* Address of symbol "O" */ | ||
15 | #define PFOW_QUERY_STRING_O 0x0004 | ||
16 | /* Address of symbol "W" */ | ||
17 | #define PFOW_QUERY_STRING_W 0x0006 | ||
18 | /* Identification info for LPDDR chip */ | ||
19 | #define PFOW_MANUFACTURER_ID 0x0020 | ||
20 | #define PFOW_DEVICE_ID 0x0022 | ||
21 | /* Address in PFOW where prog buffer can can be found */ | ||
22 | #define PFOW_PROGRAM_BUFFER_OFFSET 0x0040 | ||
23 | /* Size of program buffer in words */ | ||
24 | #define PFOW_PROGRAM_BUFFER_SIZE 0x0042 | ||
25 | /* Address command code register */ | ||
26 | #define PFOW_COMMAND_CODE 0x0080 | ||
27 | /* command data register */ | ||
28 | #define PFOW_COMMAND_DATA 0x0084 | ||
29 | /* command address register lower address bits */ | ||
30 | #define PFOW_COMMAND_ADDRESS_L 0x0088 | ||
31 | /* command address register upper address bits */ | ||
32 | #define PFOW_COMMAND_ADDRESS_H 0x008a | ||
33 | /* number of bytes to be proggrammed lower address bits */ | ||
34 | #define PFOW_DATA_COUNT_L 0x0090 | ||
35 | /* number of bytes to be proggrammed higher address bits */ | ||
36 | #define PFOW_DATA_COUNT_H 0x0092 | ||
37 | /* command execution register, the only possible value is 0x01 */ | ||
38 | #define PFOW_COMMAND_EXECUTE 0x00c0 | ||
39 | /* 0x01 should be written at this address to clear buffer */ | ||
40 | #define PFOW_CLEAR_PROGRAM_BUFFER 0x00c4 | ||
41 | /* device program/erase suspend register */ | ||
42 | #define PFOW_PROGRAM_ERASE_SUSPEND 0x00c8 | ||
43 | /* device status register */ | ||
44 | #define PFOW_DSR 0x00cc | ||
45 | |||
46 | /* LPDDR memory device command codes */ | ||
47 | /* They are possible values of PFOW command code register */ | ||
48 | #define LPDDR_WORD_PROGRAM 0x0041 | ||
49 | #define LPDDR_BUFF_PROGRAM 0x00E9 | ||
50 | #define LPDDR_BLOCK_ERASE 0x0020 | ||
51 | #define LPDDR_LOCK_BLOCK 0x0061 | ||
52 | #define LPDDR_UNLOCK_BLOCK 0x0062 | ||
53 | #define LPDDR_READ_BLOCK_LOCK_STATUS 0x0065 | ||
54 | #define LPDDR_INFO_QUERY 0x0098 | ||
55 | #define LPDDR_READ_OTP 0x0097 | ||
56 | #define LPDDR_PROG_OTP 0x00C0 | ||
57 | #define LPDDR_RESUME 0x00D0 | ||
58 | |||
59 | /* Defines possible value of PFOW command execution register */ | ||
60 | #define LPDDR_START_EXECUTION 0x0001 | ||
61 | |||
62 | /* Defines possible value of PFOW program/erase suspend register */ | ||
63 | #define LPDDR_SUSPEND 0x0001 | ||
64 | |||
65 | /* Possible values of PFOW device status register */ | ||
66 | /* access R - read; RC read & clearable */ | ||
67 | #define DSR_DPS (1<<1) /* RC; device protect status | ||
68 | * 0 - not protected 1 - locked */ | ||
69 | #define DSR_PSS (1<<2) /* R; program suspend status; | ||
70 | * 0-prog in progress/completed, | ||
71 | * 1- prog suspended */ | ||
72 | #define DSR_VPPS (1<<3) /* RC; 0-Vpp OK, * 1-Vpp low */ | ||
73 | #define DSR_PROGRAM_STATUS (1<<4) /* RC; 0-successful, 1-error */ | ||
74 | #define DSR_ERASE_STATUS (1<<5) /* RC; erase or blank check status; | ||
75 | * 0-success erase/blank check, | ||
76 | * 1 blank check error */ | ||
77 | #define DSR_ESS (1<<6) /* R; erase suspend status; | ||
78 | * 0-erase in progress/complete, | ||
79 | * 1 erase suspended */ | ||
80 | #define DSR_READY_STATUS (1<<7) /* R; Device status | ||
81 | * 0-busy, | ||
82 | * 1-ready */ | ||
83 | #define DSR_RPS (0x3<<8) /* RC; region program status | ||
84 | * 00 - Success, | ||
85 | * 01-re-program attempt in region with | ||
86 | * object mode data, | ||
87 | * 10-object mode program w attempt in | ||
88 | * region with control mode data | ||
89 | * 11-attempt to program invalid half | ||
90 | * with 0x41 command */ | ||
91 | #define DSR_AOS (1<<12) /* RC; 1- AO related failure */ | ||
92 | #define DSR_AVAILABLE (1<<15) /* R; Device availbility | ||
93 | * 1 - Device available | ||
94 | * 0 - not available */ | ||
95 | |||
96 | /* The superset of all possible error bits in DSR */ | ||
97 | #define DSR_ERR 0x133A | ||
98 | |||
99 | static inline void send_pfow_command(struct map_info *map, | ||
100 | unsigned long cmd_code, unsigned long adr, | ||
101 | unsigned long len, map_word *datum) | ||
102 | { | ||
103 | int bits_per_chip = map_bankwidth(map) * 8; | ||
104 | int chipnum; | ||
105 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
106 | chipnum = adr >> lpddr->chipshift; | ||
107 | |||
108 | map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE); | ||
109 | map_write(map, CMD(adr & ((1<<bits_per_chip) - 1)), | ||
110 | map->pfow_base + PFOW_COMMAND_ADDRESS_L); | ||
111 | map_write(map, CMD(adr>>bits_per_chip), | ||
112 | map->pfow_base + PFOW_COMMAND_ADDRESS_H); | ||
113 | if (len) { | ||
114 | map_write(map, CMD(len & ((1<<bits_per_chip) - 1)), | ||
115 | map->pfow_base + PFOW_DATA_COUNT_L); | ||
116 | map_write(map, CMD(len>>bits_per_chip), | ||
117 | map->pfow_base + PFOW_DATA_COUNT_H); | ||
118 | } | ||
119 | if (datum) | ||
120 | map_write(map, *datum, map->pfow_base + PFOW_COMMAND_DATA); | ||
121 | |||
122 | /* Command execution start */ | ||
123 | map_write(map, CMD(LPDDR_START_EXECUTION), | ||
124 | map->pfow_base + PFOW_COMMAND_EXECUTE); | ||
125 | } | ||
126 | |||
127 | static inline void print_drs_error(unsigned dsr) | ||
128 | { | ||
129 | int prog_status = (dsr & DSR_RPS) >> 8; | ||
130 | |||
131 | if (!(dsr & DSR_AVAILABLE)) | ||
132 | printk(KERN_NOTICE"DSR.15: (0) Device not Available\n"); | ||
133 | if (prog_status & 0x03) | ||
134 | printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid " | ||
135 | "half with 41h command\n"); | ||
136 | else if (prog_status & 0x02) | ||
137 | printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt " | ||
138 | "in region with Control Mode data\n"); | ||
139 | else if (prog_status & 0x01) | ||
140 | printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region " | ||
141 | "with Object Mode data\n"); | ||
142 | if (!(dsr & DSR_READY_STATUS)) | ||
143 | printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n"); | ||
144 | if (dsr & DSR_ESS) | ||
145 | printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n"); | ||
146 | if (dsr & DSR_ERASE_STATUS) | ||
147 | printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n"); | ||
148 | if (dsr & DSR_PROGRAM_STATUS) | ||
149 | printk(KERN_NOTICE"DSR.4: (1) Program Error\n"); | ||
150 | if (dsr & DSR_VPPS) | ||
151 | printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation " | ||
152 | "aborted\n"); | ||
153 | if (dsr & DSR_PSS) | ||
154 | printk(KERN_NOTICE"DSR.2: (1) Program suspended\n"); | ||
155 | if (dsr & DSR_DPS) | ||
156 | printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt " | ||
157 | "on locked block\n"); | ||
158 | } | ||
159 | #endif /* __LINUX_MTD_PFOW_H */ | ||
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h index c8e63a5ee72e..76f7cabf07d3 100644 --- a/include/linux/mtd/physmap.h +++ b/include/linux/mtd/physmap.h | |||
@@ -24,6 +24,7 @@ struct physmap_flash_data { | |||
24 | unsigned int width; | 24 | unsigned int width; |
25 | void (*set_vpp)(struct map_info *, int); | 25 | void (*set_vpp)(struct map_info *, int); |
26 | unsigned int nr_parts; | 26 | unsigned int nr_parts; |
27 | unsigned int pfow_base; | ||
27 | struct mtd_partition *parts; | 28 | struct mtd_partition *parts; |
28 | }; | 29 | }; |
29 | 30 | ||
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h new file mode 100644 index 000000000000..7b3d487d8b3f --- /dev/null +++ b/include/linux/mtd/qinfo.h | |||
@@ -0,0 +1,91 @@ | |||
1 | #ifndef __LINUX_MTD_QINFO_H | ||
2 | #define __LINUX_MTD_QINFO_H | ||
3 | |||
4 | #include <linux/mtd/map.h> | ||
5 | #include <linux/wait.h> | ||
6 | #include <linux/spinlock.h> | ||
7 | #include <linux/delay.h> | ||
8 | #include <linux/mtd/mtd.h> | ||
9 | #include <linux/mtd/flashchip.h> | ||
10 | #include <linux/mtd/partitions.h> | ||
11 | |||
12 | /* lpddr_private describes lpddr flash chip in memory map | ||
13 | * @ManufactId - Chip Manufacture ID | ||
14 | * @DevId - Chip Device ID | ||
15 | * @qinfo - pointer to qinfo records describing the chip | ||
16 | * @numchips - number of chips including virual RWW partitions | ||
17 | * @chipshift - Chip/partiton size 2^chipshift | ||
18 | * @chips - per-chip data structure | ||
19 | */ | ||
20 | struct lpddr_private { | ||
21 | uint16_t ManufactId; | ||
22 | uint16_t DevId; | ||
23 | struct qinfo_chip *qinfo; | ||
24 | int numchips; | ||
25 | unsigned long chipshift; | ||
26 | struct flchip chips[0]; | ||
27 | }; | ||
28 | |||
29 | /* qinfo_query_info structure contains request information for | ||
30 | * each qinfo record | ||
31 | * @major - major number of qinfo record | ||
32 | * @major - minor number of qinfo record | ||
33 | * @id_str - descriptive string to access the record | ||
34 | * @desc - detailed description for the qinfo record | ||
35 | */ | ||
36 | struct qinfo_query_info { | ||
37 | uint8_t major; | ||
38 | uint8_t minor; | ||
39 | char *id_str; | ||
40 | char *desc; | ||
41 | }; | ||
42 | |||
43 | /* | ||
44 | * qinfo_chip structure contains necessary qinfo records data | ||
45 | * @DevSizeShift - Device size 2^n bytes | ||
46 | * @BufSizeShift - Program buffer size 2^n bytes | ||
47 | * @TotalBlocksNum - Total number of blocks | ||
48 | * @UniformBlockSizeShift - Uniform block size 2^UniformBlockSizeShift bytes | ||
49 | * @HWPartsNum - Number of hardware partitions | ||
50 | * @SuspEraseSupp - Suspend erase supported | ||
51 | * @SingleWordProgTime - Single word program 2^SingleWordProgTime u-sec | ||
52 | * @ProgBufferTime - Program buffer write 2^ProgBufferTime u-sec | ||
53 | * @BlockEraseTime - Block erase 2^BlockEraseTime m-sec | ||
54 | */ | ||
55 | struct qinfo_chip { | ||
56 | /* General device info */ | ||
57 | uint16_t DevSizeShift; | ||
58 | uint16_t BufSizeShift; | ||
59 | /* Erase block information */ | ||
60 | uint16_t TotalBlocksNum; | ||
61 | uint16_t UniformBlockSizeShift; | ||
62 | /* Partition information */ | ||
63 | uint16_t HWPartsNum; | ||
64 | /* Optional features */ | ||
65 | uint16_t SuspEraseSupp; | ||
66 | /* Operation typical time */ | ||
67 | uint16_t SingleWordProgTime; | ||
68 | uint16_t ProgBufferTime; | ||
69 | uint16_t BlockEraseTime; | ||
70 | }; | ||
71 | |||
72 | /* defines for fixup usage */ | ||
73 | #define LPDDR_MFR_ANY 0xffff | ||
74 | #define LPDDR_ID_ANY 0xffff | ||
75 | #define NUMONYX_MFGR_ID 0x0089 | ||
76 | #define R18_DEVICE_ID_1G 0x893c | ||
77 | |||
78 | static inline map_word lpddr_build_cmd(u_long cmd, struct map_info *map) | ||
79 | { | ||
80 | map_word val = { {0} }; | ||
81 | val.x[0] = cmd; | ||
82 | return val; | ||
83 | } | ||
84 | |||
85 | #define CMD(x) lpddr_build_cmd(x, map) | ||
86 | #define CMDVAL(cmd) cmd.x[0] | ||
87 | |||
88 | struct mtd_info *lpddr_cmdset(struct map_info *); | ||
89 | |||
90 | #endif | ||
91 | |||
diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h new file mode 100644 index 000000000000..25f4d2a845c1 --- /dev/null +++ b/include/linux/mtd/sharpsl.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * SharpSL NAND support | ||
3 | * | ||
4 | * Copyright (C) 2008 Dmitry Baryshkov | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/mtd/nand.h> | ||
12 | #include <linux/mtd/nand_ecc.h> | ||
13 | #include <linux/mtd/partitions.h> | ||
14 | |||
15 | struct sharpsl_nand_platform_data { | ||
16 | struct nand_bbt_descr *badblock_pattern; | ||
17 | struct nand_ecclayout *ecc_layout; | ||
18 | struct mtd_partition *partitions; | ||
19 | unsigned int nr_partitions; | ||
20 | }; | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c28bbba3c23d..f24556813375 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1125,9 +1125,6 @@ struct softnet_data | |||
1125 | struct sk_buff *completion_queue; | 1125 | struct sk_buff *completion_queue; |
1126 | 1126 | ||
1127 | struct napi_struct backlog; | 1127 | struct napi_struct backlog; |
1128 | #ifdef CONFIG_NET_DMA | ||
1129 | struct dma_chan *net_dma; | ||
1130 | #endif | ||
1131 | }; | 1128 | }; |
1132 | 1129 | ||
1133 | DECLARE_PER_CPU(struct softnet_data,softnet_data); | 1130 | DECLARE_PER_CPU(struct softnet_data,softnet_data); |
@@ -1373,8 +1370,14 @@ extern int netif_rx_ni(struct sk_buff *skb); | |||
1373 | #define HAVE_NETIF_RECEIVE_SKB 1 | 1370 | #define HAVE_NETIF_RECEIVE_SKB 1 |
1374 | extern int netif_receive_skb(struct sk_buff *skb); | 1371 | extern int netif_receive_skb(struct sk_buff *skb); |
1375 | extern void napi_gro_flush(struct napi_struct *napi); | 1372 | extern void napi_gro_flush(struct napi_struct *napi); |
1373 | extern int dev_gro_receive(struct napi_struct *napi, | ||
1374 | struct sk_buff *skb); | ||
1376 | extern int napi_gro_receive(struct napi_struct *napi, | 1375 | extern int napi_gro_receive(struct napi_struct *napi, |
1377 | struct sk_buff *skb); | 1376 | struct sk_buff *skb); |
1377 | extern void napi_reuse_skb(struct napi_struct *napi, | ||
1378 | struct sk_buff *skb); | ||
1379 | extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi, | ||
1380 | struct napi_gro_fraginfo *info); | ||
1378 | extern int napi_gro_frags(struct napi_struct *napi, | 1381 | extern int napi_gro_frags(struct napi_struct *napi, |
1379 | struct napi_gro_fraginfo *info); | 1382 | struct napi_gro_fraginfo *info); |
1380 | extern void netif_nit_deliver(struct sk_buff *skb); | 1383 | extern void netif_nit_deliver(struct sk_buff *skb); |
diff --git a/include/linux/nwpserial.h b/include/linux/nwpserial.h new file mode 100644 index 000000000000..9acb21572eaf --- /dev/null +++ b/include/linux/nwpserial.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * Serial Port driver for a NWP uart device | ||
3 | * | ||
4 | * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | */ | ||
12 | #ifndef _NWPSERIAL_H | ||
13 | #define _NWPSERIAL_H | ||
14 | |||
15 | int nwpserial_register_port(struct uart_port *port); | ||
16 | void nwpserial_unregister_port(int line); | ||
17 | |||
18 | #endif /* _NWPSERIAL_H */ | ||
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 1ce9fe572e51..1d9518bc4c58 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h | |||
@@ -164,4 +164,22 @@ void oprofile_put_buff(unsigned long *buf, unsigned int start, | |||
164 | unsigned long oprofile_get_cpu_buffer_size(void); | 164 | unsigned long oprofile_get_cpu_buffer_size(void); |
165 | void oprofile_cpu_buffer_inc_smpl_lost(void); | 165 | void oprofile_cpu_buffer_inc_smpl_lost(void); |
166 | 166 | ||
167 | /* cpu buffer functions */ | ||
168 | |||
169 | struct op_sample; | ||
170 | |||
171 | struct op_entry { | ||
172 | struct ring_buffer_event *event; | ||
173 | struct op_sample *sample; | ||
174 | unsigned long irq_flags; | ||
175 | unsigned long size; | ||
176 | unsigned long *data; | ||
177 | }; | ||
178 | |||
179 | void oprofile_write_reserve(struct op_entry *entry, | ||
180 | struct pt_regs * const regs, | ||
181 | unsigned long pc, int code, int size); | ||
182 | int oprofile_add_data(struct op_entry *entry, unsigned long val); | ||
183 | int oprofile_write_commit(struct op_entry *entry); | ||
184 | |||
167 | #endif /* OPROFILE_H */ | 185 | #endif /* OPROFILE_H */ |
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 1e6d34bfa094..602cc1fdee90 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h | |||
@@ -26,10 +26,6 @@ enum { | |||
26 | PCG_LOCK, /* page cgroup is locked */ | 26 | PCG_LOCK, /* page cgroup is locked */ |
27 | PCG_CACHE, /* charged as cache */ | 27 | PCG_CACHE, /* charged as cache */ |
28 | PCG_USED, /* this object is in use. */ | 28 | PCG_USED, /* this object is in use. */ |
29 | /* flags for LRU placement */ | ||
30 | PCG_ACTIVE, /* page is active in this cgroup */ | ||
31 | PCG_FILE, /* page is file system backed */ | ||
32 | PCG_UNEVICTABLE, /* page is unevictableable */ | ||
33 | }; | 29 | }; |
34 | 30 | ||
35 | #define TESTPCGFLAG(uname, lname) \ | 31 | #define TESTPCGFLAG(uname, lname) \ |
@@ -50,19 +46,6 @@ TESTPCGFLAG(Cache, CACHE) | |||
50 | TESTPCGFLAG(Used, USED) | 46 | TESTPCGFLAG(Used, USED) |
51 | CLEARPCGFLAG(Used, USED) | 47 | CLEARPCGFLAG(Used, USED) |
52 | 48 | ||
53 | /* LRU management flags (from global-lru definition) */ | ||
54 | TESTPCGFLAG(File, FILE) | ||
55 | SETPCGFLAG(File, FILE) | ||
56 | CLEARPCGFLAG(File, FILE) | ||
57 | |||
58 | TESTPCGFLAG(Active, ACTIVE) | ||
59 | SETPCGFLAG(Active, ACTIVE) | ||
60 | CLEARPCGFLAG(Active, ACTIVE) | ||
61 | |||
62 | TESTPCGFLAG(Unevictable, UNEVICTABLE) | ||
63 | SETPCGFLAG(Unevictable, UNEVICTABLE) | ||
64 | CLEARPCGFLAG(Unevictable, UNEVICTABLE) | ||
65 | |||
66 | static inline int page_cgroup_nid(struct page_cgroup *pc) | 49 | static inline int page_cgroup_nid(struct page_cgroup *pc) |
67 | { | 50 | { |
68 | return page_to_nid(pc->page); | 51 | return page_to_nid(pc->page); |
@@ -105,4 +88,39 @@ static inline void page_cgroup_init(void) | |||
105 | } | 88 | } |
106 | 89 | ||
107 | #endif | 90 | #endif |
91 | |||
92 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | ||
93 | #include <linux/swap.h> | ||
94 | extern struct mem_cgroup * | ||
95 | swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem); | ||
96 | extern struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent); | ||
97 | extern int swap_cgroup_swapon(int type, unsigned long max_pages); | ||
98 | extern void swap_cgroup_swapoff(int type); | ||
99 | #else | ||
100 | #include <linux/swap.h> | ||
101 | |||
102 | static inline | ||
103 | struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem) | ||
104 | { | ||
105 | return NULL; | ||
106 | } | ||
107 | |||
108 | static inline | ||
109 | struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent) | ||
110 | { | ||
111 | return NULL; | ||
112 | } | ||
113 | |||
114 | static inline int | ||
115 | swap_cgroup_swapon(int type, unsigned long max_pages) | ||
116 | { | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | static inline void swap_cgroup_swapoff(int type) | ||
121 | { | ||
122 | return; | ||
123 | } | ||
124 | |||
125 | #endif | ||
108 | #endif | 126 | #endif |
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index f7cc204fab07..20998746518e 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h | |||
@@ -223,7 +223,6 @@ struct hotplug_params { | |||
223 | #ifdef CONFIG_ACPI | 223 | #ifdef CONFIG_ACPI |
224 | #include <acpi/acpi.h> | 224 | #include <acpi/acpi.h> |
225 | #include <acpi/acpi_bus.h> | 225 | #include <acpi/acpi_bus.h> |
226 | #include <acpi/actypes.h> | ||
227 | extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, | 226 | extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, |
228 | struct hotplug_params *hpp); | 227 | struct hotplug_params *hpp); |
229 | int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); | 228 | int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); |
diff --git a/include/linux/pid.h b/include/linux/pid.h index bb206c56d1f0..49f1c2f66e95 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h | |||
@@ -123,6 +123,24 @@ extern struct pid *alloc_pid(struct pid_namespace *ns); | |||
123 | extern void free_pid(struct pid *pid); | 123 | extern void free_pid(struct pid *pid); |
124 | 124 | ||
125 | /* | 125 | /* |
126 | * ns_of_pid() returns the pid namespace in which the specified pid was | ||
127 | * allocated. | ||
128 | * | ||
129 | * NOTE: | ||
130 | * ns_of_pid() is expected to be called for a process (task) that has | ||
131 | * an attached 'struct pid' (see attach_pid(), detach_pid()) i.e @pid | ||
132 | * is expected to be non-NULL. If @pid is NULL, caller should handle | ||
133 | * the resulting NULL pid-ns. | ||
134 | */ | ||
135 | static inline struct pid_namespace *ns_of_pid(struct pid *pid) | ||
136 | { | ||
137 | struct pid_namespace *ns = NULL; | ||
138 | if (pid) | ||
139 | ns = pid->numbers[pid->level].ns; | ||
140 | return ns; | ||
141 | } | ||
142 | |||
143 | /* | ||
126 | * the helpers to get the pid's id seen from different namespaces | 144 | * the helpers to get the pid's id seen from different namespaces |
127 | * | 145 | * |
128 | * pid_nr() : global id, i.e. the id seen from the init namespace; | 146 | * pid_nr() : global id, i.e. the id seen from the init namespace; |
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index d82fe825d62f..38d10326246a 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h | |||
@@ -79,11 +79,7 @@ static inline void zap_pid_ns_processes(struct pid_namespace *ns) | |||
79 | } | 79 | } |
80 | #endif /* CONFIG_PID_NS */ | 80 | #endif /* CONFIG_PID_NS */ |
81 | 81 | ||
82 | static inline struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) | 82 | extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk); |
83 | { | ||
84 | return tsk->nsproxy->pid_ns; | ||
85 | } | ||
86 | |||
87 | void pidhash_init(void); | 83 | void pidhash_init(void); |
88 | void pidmap_init(void); | 84 | void pidmap_init(void); |
89 | 85 | ||
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 8fc909ef6787..9743e4dbc918 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h | |||
@@ -137,6 +137,9 @@ struct mddev_s | |||
137 | struct gendisk *gendisk; | 137 | struct gendisk *gendisk; |
138 | 138 | ||
139 | struct kobject kobj; | 139 | struct kobject kobj; |
140 | int hold_active; | ||
141 | #define UNTIL_IOCTL 1 | ||
142 | #define UNTIL_STOP 2 | ||
140 | 143 | ||
141 | /* Superblock information */ | 144 | /* Superblock information */ |
142 | int major_version, | 145 | int major_version, |
@@ -215,6 +218,9 @@ struct mddev_s | |||
215 | #define MD_RECOVERY_FROZEN 9 | 218 | #define MD_RECOVERY_FROZEN 9 |
216 | 219 | ||
217 | unsigned long recovery; | 220 | unsigned long recovery; |
221 | int recovery_disabled; /* if we detect that recovery | ||
222 | * will always fail, set this | ||
223 | * so we don't loop trying */ | ||
218 | 224 | ||
219 | int in_sync; /* know to not need resync */ | 225 | int in_sync; /* know to not need resync */ |
220 | struct mutex reconfig_mutex; | 226 | struct mutex reconfig_mutex; |
@@ -244,6 +250,9 @@ struct mddev_s | |||
244 | struct sysfs_dirent *sysfs_state; /* handle for 'array_state' | 250 | struct sysfs_dirent *sysfs_state; /* handle for 'array_state' |
245 | * file in sysfs. | 251 | * file in sysfs. |
246 | */ | 252 | */ |
253 | struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */ | ||
254 | |||
255 | struct work_struct del_work; /* used for delayed sysfs removal */ | ||
247 | 256 | ||
248 | spinlock_t write_lock; | 257 | spinlock_t write_lock; |
249 | wait_queue_head_t sb_wait; /* for waiting on superblock updates */ | 258 | wait_queue_head_t sb_wait; /* for waiting on superblock updates */ |
@@ -334,17 +343,14 @@ static inline char * mdname (mddev_t * mddev) | |||
334 | * iterates through some rdev ringlist. It's safe to remove the | 343 | * iterates through some rdev ringlist. It's safe to remove the |
335 | * current 'rdev'. Dont touch 'tmp' though. | 344 | * current 'rdev'. Dont touch 'tmp' though. |
336 | */ | 345 | */ |
337 | #define rdev_for_each_list(rdev, tmp, list) \ | 346 | #define rdev_for_each_list(rdev, tmp, head) \ |
338 | \ | 347 | list_for_each_entry_safe(rdev, tmp, head, same_set) |
339 | for ((tmp) = (list).next; \ | 348 | |
340 | (rdev) = (list_entry((tmp), mdk_rdev_t, same_set)), \ | ||
341 | (tmp) = (tmp)->next, (tmp)->prev != &(list) \ | ||
342 | ; ) | ||
343 | /* | 349 | /* |
344 | * iterates through the 'same array disks' ringlist | 350 | * iterates through the 'same array disks' ringlist |
345 | */ | 351 | */ |
346 | #define rdev_for_each(rdev, tmp, mddev) \ | 352 | #define rdev_for_each(rdev, tmp, mddev) \ |
347 | rdev_for_each_list(rdev, tmp, (mddev)->disks) | 353 | list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set) |
348 | 354 | ||
349 | #define rdev_for_each_rcu(rdev, mddev) \ | 355 | #define rdev_for_each_rcu(rdev, mddev) \ |
350 | list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) | 356 | list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) |
diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h index 8b4de4a41ff1..9491026afe66 100644 --- a/include/linux/raid/md_p.h +++ b/include/linux/raid/md_p.h | |||
@@ -194,6 +194,8 @@ static inline __u64 md_event(mdp_super_t *sb) { | |||
194 | return (ev<<32)| sb->events_lo; | 194 | return (ev<<32)| sb->events_lo; |
195 | } | 195 | } |
196 | 196 | ||
197 | #define MD_SUPERBLOCK_1_TIME_SEC_MASK ((1ULL<<40) - 1) | ||
198 | |||
197 | /* | 199 | /* |
198 | * The version-1 superblock : | 200 | * The version-1 superblock : |
199 | * All numeric fields are little-endian. | 201 | * All numeric fields are little-endian. |
diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h index 1b2dda035f8e..fd42aa87c391 100644 --- a/include/linux/raid/raid0.h +++ b/include/linux/raid/raid0.h | |||
@@ -5,9 +5,9 @@ | |||
5 | 5 | ||
6 | struct strip_zone | 6 | struct strip_zone |
7 | { | 7 | { |
8 | sector_t zone_offset; /* Zone offset in md_dev */ | 8 | sector_t zone_start; /* Zone offset in md_dev (in sectors) */ |
9 | sector_t dev_offset; /* Zone offset in real dev */ | 9 | sector_t dev_start; /* Zone offset in real dev (in sectors) */ |
10 | sector_t size; /* Zone size */ | 10 | sector_t sectors; /* Zone size in sectors */ |
11 | int nb_dev; /* # of devices attached to the zone */ | 11 | int nb_dev; /* # of devices attached to the zone */ |
12 | mdk_rdev_t **dev; /* Devices attached to the zone */ | 12 | mdk_rdev_t **dev; /* Devices attached to the zone */ |
13 | }; | 13 | }; |
@@ -19,8 +19,8 @@ struct raid0_private_data | |||
19 | mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ | 19 | mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ |
20 | int nr_strip_zones; | 20 | int nr_strip_zones; |
21 | 21 | ||
22 | sector_t hash_spacing; | 22 | sector_t spacing; |
23 | int preshift; /* shift this before divide by hash_spacing */ | 23 | int sector_shift; /* shift this before divide by spacing */ |
24 | }; | 24 | }; |
25 | 25 | ||
26 | typedef struct raid0_private_data raid0_conf_t; | 26 | typedef struct raid0_private_data raid0_conf_t; |
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index afdc4558bb94..801bf77ff4e2 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h | |||
@@ -104,10 +104,10 @@ struct regulator; | |||
104 | /** | 104 | /** |
105 | * struct regulator_bulk_data - Data used for bulk regulator operations. | 105 | * struct regulator_bulk_data - Data used for bulk regulator operations. |
106 | * | 106 | * |
107 | * @supply The name of the supply. Initialised by the user before | 107 | * @supply: The name of the supply. Initialised by the user before |
108 | * using the bulk regulator APIs. | 108 | * using the bulk regulator APIs. |
109 | * @consumer The regulator consumer for the supply. This will be managed | 109 | * @consumer: The regulator consumer for the supply. This will be managed |
110 | * by the bulk API. | 110 | * by the bulk API. |
111 | * | 111 | * |
112 | * The regulator APIs provide a series of regulator_bulk_() API calls as | 112 | * The regulator APIs provide a series of regulator_bulk_() API calls as |
113 | * a convenience to consumers which require multiple supplies. This | 113 | * a convenience to consumers which require multiple supplies. This |
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index e37d80561985..2dae05705f13 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h | |||
@@ -24,7 +24,33 @@ struct regulator_init_data; | |||
24 | /** | 24 | /** |
25 | * struct regulator_ops - regulator operations. | 25 | * struct regulator_ops - regulator operations. |
26 | * | 26 | * |
27 | * This struct describes regulator operations. | 27 | * This struct describes regulator operations which can be implemented by |
28 | * regulator chip drivers. | ||
29 | * | ||
30 | * @enable: Enable the regulator. | ||
31 | * @disable: Disable the regulator. | ||
32 | * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise. | ||
33 | * | ||
34 | * @set_voltage: Set the voltage for the regulator within the range specified. | ||
35 | * The driver should select the voltage closest to min_uV. | ||
36 | * @get_voltage: Return the currently configured voltage for the regulator. | ||
37 | * | ||
38 | * @set_current_limit: Configure a limit for a current-limited regulator. | ||
39 | * @get_current_limit: Get the limit for a current-limited regulator. | ||
40 | * | ||
41 | * @set_mode: Set the operating mode for the regulator. | ||
42 | * @get_mode: Get the current operating mode for the regulator. | ||
43 | * @get_optimum_mode: Get the most efficient operating mode for the regulator | ||
44 | * when running with the specified parameters. | ||
45 | * | ||
46 | * @set_suspend_voltage: Set the voltage for the regulator when the system | ||
47 | * is suspended. | ||
48 | * @set_suspend_enable: Mark the regulator as enabled when the system is | ||
49 | * suspended. | ||
50 | * @set_suspend_disable: Mark the regulator as disabled when the system is | ||
51 | * suspended. | ||
52 | * @set_suspend_mode: Set the operating mode for the regulator when the | ||
53 | * system is suspended. | ||
28 | */ | 54 | */ |
29 | struct regulator_ops { | 55 | struct regulator_ops { |
30 | 56 | ||
@@ -75,6 +101,15 @@ enum regulator_type { | |||
75 | /** | 101 | /** |
76 | * struct regulator_desc - Regulator descriptor | 102 | * struct regulator_desc - Regulator descriptor |
77 | * | 103 | * |
104 | * Each regulator registered with the core is described with a structure of | ||
105 | * this type. | ||
106 | * | ||
107 | * @name: Identifying name for the regulator. | ||
108 | * @id: Numerical identifier for the regulator. | ||
109 | * @ops: Regulator operations table. | ||
110 | * @irq: Interrupt number for the regulator. | ||
111 | * @type: Indicates if the regulator is a voltage or current regulator. | ||
112 | * @owner: Module providing the regulator, used for refcounting. | ||
78 | */ | 113 | */ |
79 | struct regulator_desc { | 114 | struct regulator_desc { |
80 | const char *name; | 115 | const char *name; |
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index c6d69331a81e..3794773b23d2 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h | |||
@@ -44,6 +44,10 @@ struct regulator; | |||
44 | * struct regulator_state - regulator state during low power syatem states | 44 | * struct regulator_state - regulator state during low power syatem states |
45 | * | 45 | * |
46 | * This describes a regulators state during a system wide low power state. | 46 | * This describes a regulators state during a system wide low power state. |
47 | * | ||
48 | * @uV: Operating voltage during suspend. | ||
49 | * @mode: Operating mode during suspend. | ||
50 | * @enabled: Enabled during suspend. | ||
47 | */ | 51 | */ |
48 | struct regulator_state { | 52 | struct regulator_state { |
49 | int uV; /* suspend voltage */ | 53 | int uV; /* suspend voltage */ |
@@ -55,6 +59,30 @@ struct regulator_state { | |||
55 | * struct regulation_constraints - regulator operating constraints. | 59 | * struct regulation_constraints - regulator operating constraints. |
56 | * | 60 | * |
57 | * This struct describes regulator and board/machine specific constraints. | 61 | * This struct describes regulator and board/machine specific constraints. |
62 | * | ||
63 | * @name: Descriptive name for the constraints, used for display purposes. | ||
64 | * | ||
65 | * @min_uV: Smallest voltage consumers may set. | ||
66 | * @max_uV: Largest voltage consumers may set. | ||
67 | * | ||
68 | * @min_uA: Smallest consumers consumers may set. | ||
69 | * @max_uA: Largest current consumers may set. | ||
70 | * | ||
71 | * @valid_modes_mask: Mask of modes which may be configured by consumers. | ||
72 | * @valid_ops_mask: Operations which may be performed by consumers. | ||
73 | * | ||
74 | * @always_on: Set if the regulator should never be disabled. | ||
75 | * @boot_on: Set if the regulator is enabled when the system is initially | ||
76 | * started. | ||
77 | * @apply_uV: Apply the voltage constraint when initialising. | ||
78 | * | ||
79 | * @input_uV: Input voltage for regulator when supplied by another regulator. | ||
80 | * | ||
81 | * @state_disk: State for regulator when system is suspended in disk mode. | ||
82 | * @state_mem: State for regulator when system is suspended in mem mode. | ||
83 | * @state_standby: State for regulator when system is suspended in standby | ||
84 | * mode. | ||
85 | * @initial_state: Suspend state to set by default. | ||
58 | */ | 86 | */ |
59 | struct regulation_constraints { | 87 | struct regulation_constraints { |
60 | 88 | ||
@@ -93,6 +121,9 @@ struct regulation_constraints { | |||
93 | * struct regulator_consumer_supply - supply -> device mapping | 121 | * struct regulator_consumer_supply - supply -> device mapping |
94 | * | 122 | * |
95 | * This maps a supply name to a device. | 123 | * This maps a supply name to a device. |
124 | * | ||
125 | * @dev: Device structure for the consumer. | ||
126 | * @supply: Name for the supply. | ||
96 | */ | 127 | */ |
97 | struct regulator_consumer_supply { | 128 | struct regulator_consumer_supply { |
98 | struct device *dev; /* consumer */ | 129 | struct device *dev; /* consumer */ |
@@ -103,6 +134,16 @@ struct regulator_consumer_supply { | |||
103 | * struct regulator_init_data - regulator platform initialisation data. | 134 | * struct regulator_init_data - regulator platform initialisation data. |
104 | * | 135 | * |
105 | * Initialisation constraints, our supply and consumers supplies. | 136 | * Initialisation constraints, our supply and consumers supplies. |
137 | * | ||
138 | * @supply_regulator_dev: Parent regulator (if any). | ||
139 | * | ||
140 | * @constraints: Constraints. These must be specified for the regulator to | ||
141 | * be usable. | ||
142 | * @num_consumer_supplies: Number of consumer device supplies. | ||
143 | * @consumer_supplies: Consumer device supply configuration. | ||
144 | * | ||
145 | * @regulator_init: Callback invoked when the regulator has been registered. | ||
146 | * @driver_data: Data passed to regulator_init. | ||
106 | */ | 147 | */ |
107 | struct regulator_init_data { | 148 | struct regulator_init_data { |
108 | struct device *supply_regulator_dev; /* or NULL for LINE */ | 149 | struct device *supply_regulator_dev; /* or NULL for LINE */ |
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index 271c1c2c9f6f..dede0a2cfc45 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h | |||
@@ -43,6 +43,10 @@ struct res_counter { | |||
43 | * the routines below consider this to be IRQ-safe | 43 | * the routines below consider this to be IRQ-safe |
44 | */ | 44 | */ |
45 | spinlock_t lock; | 45 | spinlock_t lock; |
46 | /* | ||
47 | * Parent counter, used for hierarchial resource accounting | ||
48 | */ | ||
49 | struct res_counter *parent; | ||
46 | }; | 50 | }; |
47 | 51 | ||
48 | /** | 52 | /** |
@@ -87,7 +91,7 @@ enum { | |||
87 | * helpers for accounting | 91 | * helpers for accounting |
88 | */ | 92 | */ |
89 | 93 | ||
90 | void res_counter_init(struct res_counter *counter); | 94 | void res_counter_init(struct res_counter *counter, struct res_counter *parent); |
91 | 95 | ||
92 | /* | 96 | /* |
93 | * charge - try to consume more resource. | 97 | * charge - try to consume more resource. |
@@ -103,7 +107,7 @@ void res_counter_init(struct res_counter *counter); | |||
103 | int __must_check res_counter_charge_locked(struct res_counter *counter, | 107 | int __must_check res_counter_charge_locked(struct res_counter *counter, |
104 | unsigned long val); | 108 | unsigned long val); |
105 | int __must_check res_counter_charge(struct res_counter *counter, | 109 | int __must_check res_counter_charge(struct res_counter *counter, |
106 | unsigned long val); | 110 | unsigned long val, struct res_counter **limit_fail_at); |
107 | 111 | ||
108 | /* | 112 | /* |
109 | * uncharge - tell that some portion of the resource is released | 113 | * uncharge - tell that some portion of the resource is released |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index b4199841f1fc..90bbbf0b1161 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -161,6 +161,9 @@ | |||
161 | 161 | ||
162 | #define PORT_S3C6400 84 | 162 | #define PORT_S3C6400 84 |
163 | 163 | ||
164 | /* NWPSERIAL */ | ||
165 | #define PORT_NWPSERIAL 85 | ||
166 | |||
164 | #ifdef __KERNEL__ | 167 | #ifdef __KERNEL__ |
165 | 168 | ||
166 | #include <linux/compiler.h> | 169 | #include <linux/compiler.h> |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 2ce8207686e2..2b409c44db83 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -232,6 +232,11 @@ extern unsigned long get_safe_page(gfp_t gfp_mask); | |||
232 | 232 | ||
233 | extern void hibernation_set_ops(struct platform_hibernation_ops *ops); | 233 | extern void hibernation_set_ops(struct platform_hibernation_ops *ops); |
234 | extern int hibernate(void); | 234 | extern int hibernate(void); |
235 | extern int hibernate_nvs_register(unsigned long start, unsigned long size); | ||
236 | extern int hibernate_nvs_alloc(void); | ||
237 | extern void hibernate_nvs_free(void); | ||
238 | extern void hibernate_nvs_save(void); | ||
239 | extern void hibernate_nvs_restore(void); | ||
235 | #else /* CONFIG_HIBERNATION */ | 240 | #else /* CONFIG_HIBERNATION */ |
236 | static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } | 241 | static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } |
237 | static inline void swsusp_set_page_free(struct page *p) {} | 242 | static inline void swsusp_set_page_free(struct page *p) {} |
@@ -239,6 +244,14 @@ static inline void swsusp_unset_page_free(struct page *p) {} | |||
239 | 244 | ||
240 | static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} | 245 | static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} |
241 | static inline int hibernate(void) { return -ENOSYS; } | 246 | static inline int hibernate(void) { return -ENOSYS; } |
247 | static inline int hibernate_nvs_register(unsigned long a, unsigned long b) | ||
248 | { | ||
249 | return 0; | ||
250 | } | ||
251 | static inline int hibernate_nvs_alloc(void) { return 0; } | ||
252 | static inline void hibernate_nvs_free(void) {} | ||
253 | static inline void hibernate_nvs_save(void) {} | ||
254 | static inline void hibernate_nvs_restore(void) {} | ||
242 | #endif /* CONFIG_HIBERNATION */ | 255 | #endif /* CONFIG_HIBERNATION */ |
243 | 256 | ||
244 | #ifdef CONFIG_PM_SLEEP | 257 | #ifdef CONFIG_PM_SLEEP |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 91dee50fe260..d30215578877 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -214,7 +214,8 @@ static inline void lru_cache_add_active_file(struct page *page) | |||
214 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | 214 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
215 | gfp_t gfp_mask); | 215 | gfp_t gfp_mask); |
216 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, | 216 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, |
217 | gfp_t gfp_mask); | 217 | gfp_t gfp_mask, bool noswap, |
218 | unsigned int swappiness); | ||
218 | extern int __isolate_lru_page(struct page *page, int mode, int file); | 219 | extern int __isolate_lru_page(struct page *page, int mode, int file); |
219 | extern unsigned long shrink_all_memory(unsigned long nr_pages); | 220 | extern unsigned long shrink_all_memory(unsigned long nr_pages); |
220 | extern int vm_swappiness; | 221 | extern int vm_swappiness; |
@@ -333,6 +334,22 @@ static inline void disable_swap_token(void) | |||
333 | put_swap_token(swap_token_mm); | 334 | put_swap_token(swap_token_mm); |
334 | } | 335 | } |
335 | 336 | ||
337 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | ||
338 | extern void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent); | ||
339 | #else | ||
340 | static inline void | ||
341 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) | ||
342 | { | ||
343 | } | ||
344 | #endif | ||
345 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | ||
346 | extern void mem_cgroup_uncharge_swap(swp_entry_t ent); | ||
347 | #else | ||
348 | static inline void mem_cgroup_uncharge_swap(swp_entry_t ent) | ||
349 | { | ||
350 | } | ||
351 | #endif | ||
352 | |||
336 | #else /* CONFIG_SWAP */ | 353 | #else /* CONFIG_SWAP */ |
337 | 354 | ||
338 | #define nr_swap_pages 0L | 355 | #define nr_swap_pages 0L |
@@ -409,6 +426,12 @@ static inline swp_entry_t get_swap_page(void) | |||
409 | #define has_swap_token(x) 0 | 426 | #define has_swap_token(x) 0 |
410 | #define disable_swap_token() do { } while(0) | 427 | #define disable_swap_token() do { } while(0) |
411 | 428 | ||
429 | static inline int mem_cgroup_cache_charge_swapin(struct page *page, | ||
430 | struct mm_struct *mm, gfp_t mask, bool locked) | ||
431 | { | ||
432 | return 0; | ||
433 | } | ||
434 | |||
412 | #endif /* CONFIG_SWAP */ | 435 | #endif /* CONFIG_SWAP */ |
413 | #endif /* __KERNEL__*/ | 436 | #endif /* __KERNEL__*/ |
414 | #endif /* _LINUX_SWAP_H */ | 437 | #endif /* _LINUX_SWAP_H */ |