aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h17
-rw-r--r--include/linux/async_tx.h17
-rw-r--r--include/linux/atmel-mci.h6
-rw-r--r--include/linux/dmaengine.h181
-rw-r--r--include/linux/dw_dmac.h31
-rw-r--r--include/linux/mtd/cfi.h1
-rw-r--r--include/linux/mtd/ftl.h38
-rw-r--r--include/linux/mtd/map.h1
-rw-r--r--include/linux/mtd/mtd.h75
-rw-r--r--include/linux/mtd/nand.h7
-rw-r--r--include/linux/mtd/partitions.h6
-rw-r--r--include/linux/mtd/pfow.h159
-rw-r--r--include/linux/mtd/physmap.h1
-rw-r--r--include/linux/mtd/qinfo.h91
-rw-r--r--include/linux/mtd/sharpsl.h20
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/oprofile.h18
-rw-r--r--include/linux/pci_hotplug.h1
-rw-r--r--include/linux/suspend.h13
19 files changed, 467 insertions, 219 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index fba8051fb29..6fce2fc2d12 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -131,22 +131,6 @@ extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity);
131 */ 131 */
132void acpi_unregister_gsi (u32 gsi); 132void acpi_unregister_gsi (u32 gsi);
133 133
134struct acpi_prt_entry {
135 struct list_head node;
136 struct acpi_pci_id id;
137 u8 pin;
138 struct {
139 acpi_handle handle;
140 u32 index;
141 } link;
142 u32 irq;
143};
144
145struct acpi_prt_list {
146 int count;
147 struct list_head entries;
148};
149
150struct pci_dev; 134struct pci_dev;
151 135
152int acpi_pci_irq_enable (struct pci_dev *dev); 136int acpi_pci_irq_enable (struct pci_dev *dev);
@@ -270,6 +254,7 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
270#ifdef CONFIG_PM_SLEEP 254#ifdef CONFIG_PM_SLEEP
271void __init acpi_no_s4_hw_signature(void); 255void __init acpi_no_s4_hw_signature(void);
272void __init acpi_old_suspend_ordering(void); 256void __init acpi_old_suspend_ordering(void);
257void __init acpi_s4_no_nvs(void);
273#endif /* CONFIG_PM_SLEEP */ 258#endif /* CONFIG_PM_SLEEP */
274#else /* CONFIG_ACPI */ 259#else /* CONFIG_ACPI */
275 260
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 0f50d4cc436..45f6297821b 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -59,9 +59,7 @@ enum async_tx_flags {
59}; 59};
60 60
61#ifdef CONFIG_DMA_ENGINE 61#ifdef CONFIG_DMA_ENGINE
62void async_tx_issue_pending_all(void); 62#define async_tx_issue_pending_all dma_issue_pending_all
63enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
64void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx);
65#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL 63#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
66#include <asm/async_tx.h> 64#include <asm/async_tx.h>
67#else 65#else
@@ -77,19 +75,6 @@ static inline void async_tx_issue_pending_all(void)
77 do { } while (0); 75 do { } while (0);
78} 76}
79 77
80static inline enum dma_status
81dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
82{
83 return DMA_SUCCESS;
84}
85
86static inline void
87async_tx_run_dependencies(struct dma_async_tx_descriptor *tx,
88 struct dma_chan *host_chan)
89{
90 do { } while (0);
91}
92
93static inline struct dma_chan * 78static inline struct dma_chan *
94async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, 79async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
95 enum dma_transaction_type tx_type, struct page **dst, int dst_count, 80 enum dma_transaction_type tx_type, struct page **dst, int dst_count,
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 2a2213eefd8..2f1f95737ac 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -3,7 +3,7 @@
3 3
4#define ATMEL_MCI_MAX_NR_SLOTS 2 4#define ATMEL_MCI_MAX_NR_SLOTS 2
5 5
6struct dma_slave; 6#include <linux/dw_dmac.h>
7 7
8/** 8/**
9 * struct mci_slot_pdata - board-specific per-slot configuration 9 * struct mci_slot_pdata - board-specific per-slot configuration
@@ -28,11 +28,11 @@ struct mci_slot_pdata {
28 28
29/** 29/**
30 * struct mci_platform_data - board-specific MMC/SDcard configuration 30 * struct mci_platform_data - board-specific MMC/SDcard configuration
31 * @dma_slave: DMA slave interface to use in data transfers, or NULL. 31 * @dma_slave: DMA slave interface to use in data transfers.
32 * @slot: Per-slot configuration data. 32 * @slot: Per-slot configuration data.
33 */ 33 */
34struct mci_platform_data { 34struct mci_platform_data {
35 struct dma_slave *dma_slave; 35 struct dw_dma_slave dma_slave;
36 struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; 36 struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS];
37}; 37};
38 38
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index adb0b084eb5..64dea2ab326 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -29,32 +29,6 @@
29#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30 30
31/** 31/**
32 * enum dma_state - resource PNP/power management state
33 * @DMA_RESOURCE_SUSPEND: DMA device going into low power state
34 * @DMA_RESOURCE_RESUME: DMA device returning to full power
35 * @DMA_RESOURCE_AVAILABLE: DMA device available to the system
36 * @DMA_RESOURCE_REMOVED: DMA device removed from the system
37 */
38enum dma_state {
39 DMA_RESOURCE_SUSPEND,
40 DMA_RESOURCE_RESUME,
41 DMA_RESOURCE_AVAILABLE,
42 DMA_RESOURCE_REMOVED,
43};
44
45/**
46 * enum dma_state_client - state of the channel in the client
47 * @DMA_ACK: client would like to use, or was using this channel
48 * @DMA_DUP: client has already seen this channel, or is not using this channel
49 * @DMA_NAK: client does not want to see any more channels
50 */
51enum dma_state_client {
52 DMA_ACK,
53 DMA_DUP,
54 DMA_NAK,
55};
56
57/**
58 * typedef dma_cookie_t - an opaque DMA cookie 32 * typedef dma_cookie_t - an opaque DMA cookie
59 * 33 *
60 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code 34 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
@@ -89,23 +63,13 @@ enum dma_transaction_type {
89 DMA_MEMSET, 63 DMA_MEMSET,
90 DMA_MEMCPY_CRC32C, 64 DMA_MEMCPY_CRC32C,
91 DMA_INTERRUPT, 65 DMA_INTERRUPT,
66 DMA_PRIVATE,
92 DMA_SLAVE, 67 DMA_SLAVE,
93}; 68};
94 69
95/* last transaction type for creation of the capabilities mask */ 70/* last transaction type for creation of the capabilities mask */
96#define DMA_TX_TYPE_END (DMA_SLAVE + 1) 71#define DMA_TX_TYPE_END (DMA_SLAVE + 1)
97 72
98/**
99 * enum dma_slave_width - DMA slave register access width.
100 * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
101 * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
102 * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
103 */
104enum dma_slave_width {
105 DMA_SLAVE_WIDTH_8BIT,
106 DMA_SLAVE_WIDTH_16BIT,
107 DMA_SLAVE_WIDTH_32BIT,
108};
109 73
110/** 74/**
111 * enum dma_ctrl_flags - DMA flags to augment operation preparation, 75 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
@@ -132,32 +96,6 @@ enum dma_ctrl_flags {
132typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; 96typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
133 97
134/** 98/**
135 * struct dma_slave - Information about a DMA slave
136 * @dev: device acting as DMA slave
137 * @dma_dev: required DMA master device. If non-NULL, the client can not be
138 * bound to other masters than this.
139 * @tx_reg: physical address of data register used for
140 * memory-to-peripheral transfers
141 * @rx_reg: physical address of data register used for
142 * peripheral-to-memory transfers
143 * @reg_width: peripheral register width
144 *
145 * If dma_dev is non-NULL, the client can not be bound to other DMA
146 * masters than the one corresponding to this device. The DMA master
147 * driver may use this to determine if there is controller-specific
148 * data wrapped around this struct. Drivers of platform code that sets
149 * the dma_dev field must therefore make sure to use an appropriate
150 * controller-specific dma slave structure wrapping this struct.
151 */
152struct dma_slave {
153 struct device *dev;
154 struct device *dma_dev;
155 dma_addr_t tx_reg;
156 dma_addr_t rx_reg;
157 enum dma_slave_width reg_width;
158};
159
160/**
161 * struct dma_chan_percpu - the per-CPU part of struct dma_chan 99 * struct dma_chan_percpu - the per-CPU part of struct dma_chan
162 * @refcount: local_t used for open-coded "bigref" counting 100 * @refcount: local_t used for open-coded "bigref" counting
163 * @memcpy_count: transaction counter 101 * @memcpy_count: transaction counter
@@ -165,7 +103,6 @@ struct dma_slave {
165 */ 103 */
166 104
167struct dma_chan_percpu { 105struct dma_chan_percpu {
168 local_t refcount;
169 /* stats */ 106 /* stats */
170 unsigned long memcpy_count; 107 unsigned long memcpy_count;
171 unsigned long bytes_transferred; 108 unsigned long bytes_transferred;
@@ -176,13 +113,14 @@ struct dma_chan_percpu {
176 * @device: ptr to the dma device who supplies this channel, always !%NULL 113 * @device: ptr to the dma device who supplies this channel, always !%NULL
177 * @cookie: last cookie value returned to client 114 * @cookie: last cookie value returned to client
178 * @chan_id: channel ID for sysfs 115 * @chan_id: channel ID for sysfs
179 * @class_dev: class device for sysfs 116 * @dev: class device for sysfs
180 * @refcount: kref, used in "bigref" slow-mode 117 * @refcount: kref, used in "bigref" slow-mode
181 * @slow_ref: indicates that the DMA channel is free 118 * @slow_ref: indicates that the DMA channel is free
182 * @rcu: the DMA channel's RCU head 119 * @rcu: the DMA channel's RCU head
183 * @device_node: used to add this to the device chan list 120 * @device_node: used to add this to the device chan list
184 * @local: per-cpu pointer to a struct dma_chan_percpu 121 * @local: per-cpu pointer to a struct dma_chan_percpu
185 * @client-count: how many clients are using this channel 122 * @client-count: how many clients are using this channel
123 * @table_count: number of appearances in the mem-to-mem allocation table
186 */ 124 */
187struct dma_chan { 125struct dma_chan {
188 struct dma_device *device; 126 struct dma_device *device;
@@ -190,73 +128,47 @@ struct dma_chan {
190 128
191 /* sysfs */ 129 /* sysfs */
192 int chan_id; 130 int chan_id;
193 struct device dev; 131 struct dma_chan_dev *dev;
194
195 struct kref refcount;
196 int slow_ref;
197 struct rcu_head rcu;
198 132
199 struct list_head device_node; 133 struct list_head device_node;
200 struct dma_chan_percpu *local; 134 struct dma_chan_percpu *local;
201 int client_count; 135 int client_count;
136 int table_count;
202}; 137};
203 138
204#define to_dma_chan(p) container_of(p, struct dma_chan, dev) 139/**
205 140 * struct dma_chan_dev - relate sysfs device node to backing channel device
206void dma_chan_cleanup(struct kref *kref); 141 * @chan - driver channel device
207 142 * @device - sysfs device
208static inline void dma_chan_get(struct dma_chan *chan) 143 * @dev_id - parent dma_device dev_id
209{ 144 * @idr_ref - reference count to gate release of dma_device dev_id
210 if (unlikely(chan->slow_ref)) 145 */
211 kref_get(&chan->refcount); 146struct dma_chan_dev {
212 else { 147 struct dma_chan *chan;
213 local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); 148 struct device device;
214 put_cpu(); 149 int dev_id;
215 } 150 atomic_t *idr_ref;
216} 151};
217 152
218static inline void dma_chan_put(struct dma_chan *chan) 153static inline const char *dma_chan_name(struct dma_chan *chan)
219{ 154{
220 if (unlikely(chan->slow_ref)) 155 return dev_name(&chan->dev->device);
221 kref_put(&chan->refcount, dma_chan_cleanup);
222 else {
223 local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
224 put_cpu();
225 }
226} 156}
227 157
228/* 158void dma_chan_cleanup(struct kref *kref);
229 * typedef dma_event_callback - function pointer to a DMA event callback
230 * For each channel added to the system this routine is called for each client.
231 * If the client would like to use the channel it returns '1' to signal (ack)
232 * the dmaengine core to take out a reference on the channel and its
233 * corresponding device. A client must not 'ack' an available channel more
234 * than once. When a channel is removed all clients are notified. If a client
235 * is using the channel it must 'ack' the removal. A client must not 'ack' a
236 * removed channel more than once.
237 * @client - 'this' pointer for the client context
238 * @chan - channel to be acted upon
239 * @state - available or removed
240 */
241struct dma_client;
242typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
243 struct dma_chan *chan, enum dma_state state);
244 159
245/** 160/**
246 * struct dma_client - info on the entity making use of DMA services 161 * typedef dma_filter_fn - callback filter for dma_request_channel
247 * @event_callback: func ptr to call when something happens 162 * @chan: channel to be reviewed
248 * @cap_mask: only return channels that satisfy the requested capabilities 163 * @filter_param: opaque parameter passed through dma_request_channel
249 * a value of zero corresponds to any capability 164 *
250 * @slave: data for preparing slave transfer. Must be non-NULL iff the 165 * When this optional parameter is specified in a call to dma_request_channel a
251 * DMA_SLAVE capability is requested. 166 * suitable channel is passed to this routine for further dispositioning before
252 * @global_node: list_head for global dma_client_list 167 * being returned. Where 'suitable' indicates a non-busy channel that
168 * satisfies the given capability mask. It returns 'true' to indicate that the
169 * channel is suitable.
253 */ 170 */
254struct dma_client { 171typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
255 dma_event_callback event_callback;
256 dma_cap_mask_t cap_mask;
257 struct dma_slave *slave;
258 struct list_head global_node;
259};
260 172
261typedef void (*dma_async_tx_callback)(void *dma_async_param); 173typedef void (*dma_async_tx_callback)(void *dma_async_param);
262/** 174/**
@@ -323,14 +235,10 @@ struct dma_device {
323 dma_cap_mask_t cap_mask; 235 dma_cap_mask_t cap_mask;
324 int max_xor; 236 int max_xor;
325 237
326 struct kref refcount;
327 struct completion done;
328
329 int dev_id; 238 int dev_id;
330 struct device *dev; 239 struct device *dev;
331 240
332 int (*device_alloc_chan_resources)(struct dma_chan *chan, 241 int (*device_alloc_chan_resources)(struct dma_chan *chan);
333 struct dma_client *client);
334 void (*device_free_chan_resources)(struct dma_chan *chan); 242 void (*device_free_chan_resources)(struct dma_chan *chan);
335 243
336 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( 244 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
@@ -362,9 +270,8 @@ struct dma_device {
362 270
363/* --- public DMA engine API --- */ 271/* --- public DMA engine API --- */
364 272
365void dma_async_client_register(struct dma_client *client); 273void dmaengine_get(void);
366void dma_async_client_unregister(struct dma_client *client); 274void dmaengine_put(void);
367void dma_async_client_chan_request(struct dma_client *client);
368dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 275dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
369 void *dest, void *src, size_t len); 276 void *dest, void *src, size_t len);
370dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, 277dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
@@ -406,6 +313,12 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
406 set_bit(tx_type, dstp->bits); 313 set_bit(tx_type, dstp->bits);
407} 314}
408 315
316#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
317static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
318{
319 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
320}
321
409#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) 322#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
410static inline int 323static inline int
411__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) 324__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
@@ -475,11 +388,25 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
475} 388}
476 389
477enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); 390enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
391#ifdef CONFIG_DMA_ENGINE
392enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
393#else
394static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
395{
396 return DMA_SUCCESS;
397}
398#endif
478 399
479/* --- DMA device --- */ 400/* --- DMA device --- */
480 401
481int dma_async_device_register(struct dma_device *device); 402int dma_async_device_register(struct dma_device *device);
482void dma_async_device_unregister(struct dma_device *device); 403void dma_async_device_unregister(struct dma_device *device);
404void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
405struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
406void dma_issue_pending_all(void);
407#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
408struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
409void dma_release_channel(struct dma_chan *chan);
483 410
484/* --- Helper iov-locking functions --- */ 411/* --- Helper iov-locking functions --- */
485 412
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index 04d217b442b..d797dde247f 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -22,14 +22,34 @@ struct dw_dma_platform_data {
22}; 22};
23 23
24/** 24/**
25 * enum dw_dma_slave_width - DMA slave register access width.
26 * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
27 * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
28 * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
29 */
30enum dw_dma_slave_width {
31 DW_DMA_SLAVE_WIDTH_8BIT,
32 DW_DMA_SLAVE_WIDTH_16BIT,
33 DW_DMA_SLAVE_WIDTH_32BIT,
34};
35
36/**
25 * struct dw_dma_slave - Controller-specific information about a slave 37 * struct dw_dma_slave - Controller-specific information about a slave
26 * @slave: Generic information about the slave 38 *
27 * @ctl_lo: Platform-specific initializer for the CTL_LO register 39 * @dma_dev: required DMA master device
40 * @tx_reg: physical address of data register used for
41 * memory-to-peripheral transfers
42 * @rx_reg: physical address of data register used for
43 * peripheral-to-memory transfers
44 * @reg_width: peripheral register width
28 * @cfg_hi: Platform-specific initializer for the CFG_HI register 45 * @cfg_hi: Platform-specific initializer for the CFG_HI register
29 * @cfg_lo: Platform-specific initializer for the CFG_LO register 46 * @cfg_lo: Platform-specific initializer for the CFG_LO register
30 */ 47 */
31struct dw_dma_slave { 48struct dw_dma_slave {
32 struct dma_slave slave; 49 struct device *dma_dev;
50 dma_addr_t tx_reg;
51 dma_addr_t rx_reg;
52 enum dw_dma_slave_width reg_width;
33 u32 cfg_hi; 53 u32 cfg_hi;
34 u32 cfg_lo; 54 u32 cfg_lo;
35}; 55};
@@ -54,9 +74,4 @@ struct dw_dma_slave {
54#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ 74#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
55#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ 75#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
56 76
57static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave)
58{
59 return container_of(slave, struct dw_dma_slave, slave);
60}
61
62#endif /* DW_DMAC_H */ 77#endif /* DW_DMAC_H */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 00e2b575021..88d3d8fbf9f 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -520,6 +520,7 @@ struct cfi_fixup {
520 520
521#define CFI_MFR_AMD 0x0001 521#define CFI_MFR_AMD 0x0001
522#define CFI_MFR_ATMEL 0x001F 522#define CFI_MFR_ATMEL 0x001F
523#define CFI_MFR_SAMSUNG 0x00EC
523#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ 524#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
524 525
525void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); 526void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
diff --git a/include/linux/mtd/ftl.h b/include/linux/mtd/ftl.h
index 0be442f881d..0555f7a0b9e 100644
--- a/include/linux/mtd/ftl.h
+++ b/include/linux/mtd/ftl.h
@@ -32,25 +32,25 @@
32#define _LINUX_FTL_H 32#define _LINUX_FTL_H
33 33
34typedef struct erase_unit_header_t { 34typedef struct erase_unit_header_t {
35 u_int8_t LinkTargetTuple[5]; 35 uint8_t LinkTargetTuple[5];
36 u_int8_t DataOrgTuple[10]; 36 uint8_t DataOrgTuple[10];
37 u_int8_t NumTransferUnits; 37 uint8_t NumTransferUnits;
38 u_int32_t EraseCount; 38 uint32_t EraseCount;
39 u_int16_t LogicalEUN; 39 uint16_t LogicalEUN;
40 u_int8_t BlockSize; 40 uint8_t BlockSize;
41 u_int8_t EraseUnitSize; 41 uint8_t EraseUnitSize;
42 u_int16_t FirstPhysicalEUN; 42 uint16_t FirstPhysicalEUN;
43 u_int16_t NumEraseUnits; 43 uint16_t NumEraseUnits;
44 u_int32_t FormattedSize; 44 uint32_t FormattedSize;
45 u_int32_t FirstVMAddress; 45 uint32_t FirstVMAddress;
46 u_int16_t NumVMPages; 46 uint16_t NumVMPages;
47 u_int8_t Flags; 47 uint8_t Flags;
48 u_int8_t Code; 48 uint8_t Code;
49 u_int32_t SerialNumber; 49 uint32_t SerialNumber;
50 u_int32_t AltEUHOffset; 50 uint32_t AltEUHOffset;
51 u_int32_t BAMOffset; 51 uint32_t BAMOffset;
52 u_int8_t Reserved[12]; 52 uint8_t Reserved[12];
53 u_int8_t EndTuple[2]; 53 uint8_t EndTuple[2];
54} erase_unit_header_t; 54} erase_unit_header_t;
55 55
56/* Flags in erase_unit_header_t */ 56/* Flags in erase_unit_header_t */
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index aa30244492c..b981b877221 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -223,6 +223,7 @@ struct map_info {
223 must leave it enabled. */ 223 must leave it enabled. */
224 void (*set_vpp)(struct map_info *, int); 224 void (*set_vpp)(struct map_info *, int);
225 225
226 unsigned long pfow_base;
226 unsigned long map_priv_1; 227 unsigned long map_priv_1;
227 unsigned long map_priv_2; 228 unsigned long map_priv_2;
228 void *fldrv_priv; 229 void *fldrv_priv;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 64433eb411d..3aa5d77c2cd 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -15,6 +15,8 @@
15#include <linux/mtd/compatmac.h> 15#include <linux/mtd/compatmac.h>
16#include <mtd/mtd-abi.h> 16#include <mtd/mtd-abi.h>
17 17
18#include <asm/div64.h>
19
18#define MTD_CHAR_MAJOR 90 20#define MTD_CHAR_MAJOR 90
19#define MTD_BLOCK_MAJOR 31 21#define MTD_BLOCK_MAJOR 31
20#define MAX_MTD_DEVICES 32 22#define MAX_MTD_DEVICES 32
@@ -25,20 +27,20 @@
25#define MTD_ERASE_DONE 0x08 27#define MTD_ERASE_DONE 0x08
26#define MTD_ERASE_FAILED 0x10 28#define MTD_ERASE_FAILED 0x10
27 29
28#define MTD_FAIL_ADDR_UNKNOWN 0xffffffff 30#define MTD_FAIL_ADDR_UNKNOWN -1LL
29 31
30/* If the erase fails, fail_addr might indicate exactly which block failed. If 32/* If the erase fails, fail_addr might indicate exactly which block failed. If
31 fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not 33 fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not
32 specific to any particular block. */ 34 specific to any particular block. */
33struct erase_info { 35struct erase_info {
34 struct mtd_info *mtd; 36 struct mtd_info *mtd;
35 u_int32_t addr; 37 uint64_t addr;
36 u_int32_t len; 38 uint64_t len;
37 u_int32_t fail_addr; 39 uint64_t fail_addr;
38 u_long time; 40 u_long time;
39 u_long retries; 41 u_long retries;
40 u_int dev; 42 unsigned dev;
41 u_int cell; 43 unsigned cell;
42 void (*callback) (struct erase_info *self); 44 void (*callback) (struct erase_info *self);
43 u_long priv; 45 u_long priv;
44 u_char state; 46 u_char state;
@@ -46,9 +48,9 @@ struct erase_info {
46}; 48};
47 49
48struct mtd_erase_region_info { 50struct mtd_erase_region_info {
49 u_int32_t offset; /* At which this region starts, from the beginning of the MTD */ 51 uint64_t offset; /* At which this region starts, from the beginning of the MTD */
50 u_int32_t erasesize; /* For this region */ 52 uint32_t erasesize; /* For this region */
51 u_int32_t numblocks; /* Number of blocks of erasesize in this region */ 53 uint32_t numblocks; /* Number of blocks of erasesize in this region */
52 unsigned long *lockmap; /* If keeping bitmap of locks */ 54 unsigned long *lockmap; /* If keeping bitmap of locks */
53}; 55};
54 56
@@ -100,14 +102,14 @@ struct mtd_oob_ops {
100 102
101struct mtd_info { 103struct mtd_info {
102 u_char type; 104 u_char type;
103 u_int32_t flags; 105 uint32_t flags;
104 u_int32_t size; // Total size of the MTD 106 uint64_t size; // Total size of the MTD
105 107
106 /* "Major" erase size for the device. Naïve users may take this 108 /* "Major" erase size for the device. Naïve users may take this
107 * to be the only erase size available, or may use the more detailed 109 * to be the only erase size available, or may use the more detailed
108 * information below if they desire 110 * information below if they desire
109 */ 111 */
110 u_int32_t erasesize; 112 uint32_t erasesize;
111 /* Minimal writable flash unit size. In case of NOR flash it is 1 (even 113 /* Minimal writable flash unit size. In case of NOR flash it is 1 (even
112 * though individual bits can be cleared), in case of NAND flash it is 114 * though individual bits can be cleared), in case of NAND flash it is
113 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR 115 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
@@ -115,10 +117,20 @@ struct mtd_info {
115 * Any driver registering a struct mtd_info must ensure a writesize of 117 * Any driver registering a struct mtd_info must ensure a writesize of
116 * 1 or larger. 118 * 1 or larger.
117 */ 119 */
118 u_int32_t writesize; 120 uint32_t writesize;
121
122 uint32_t oobsize; // Amount of OOB data per block (e.g. 16)
123 uint32_t oobavail; // Available OOB bytes per block
119 124
120 u_int32_t oobsize; // Amount of OOB data per block (e.g. 16) 125 /*
121 u_int32_t oobavail; // Available OOB bytes per block 126 * If erasesize is a power of 2 then the shift is stored in
127 * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
128 */
129 unsigned int erasesize_shift;
130 unsigned int writesize_shift;
131 /* Masks based on erasesize_shift and writesize_shift */
132 unsigned int erasesize_mask;
133 unsigned int writesize_mask;
122 134
123 // Kernel-only stuff starts here. 135 // Kernel-only stuff starts here.
124 const char *name; 136 const char *name;
@@ -190,8 +202,8 @@ struct mtd_info {
190 void (*sync) (struct mtd_info *mtd); 202 void (*sync) (struct mtd_info *mtd);
191 203
192 /* Chip-supported device locking */ 204 /* Chip-supported device locking */
193 int (*lock) (struct mtd_info *mtd, loff_t ofs, size_t len); 205 int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
194 int (*unlock) (struct mtd_info *mtd, loff_t ofs, size_t len); 206 int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
195 207
196 /* Power Management functions */ 208 /* Power Management functions */
197 int (*suspend) (struct mtd_info *mtd); 209 int (*suspend) (struct mtd_info *mtd);
@@ -221,6 +233,35 @@ struct mtd_info {
221 void (*put_device) (struct mtd_info *mtd); 233 void (*put_device) (struct mtd_info *mtd);
222}; 234};
223 235
236static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
237{
238 if (mtd->erasesize_shift)
239 return sz >> mtd->erasesize_shift;
240 do_div(sz, mtd->erasesize);
241 return sz;
242}
243
244static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
245{
246 if (mtd->erasesize_shift)
247 return sz & mtd->erasesize_mask;
248 return do_div(sz, mtd->erasesize);
249}
250
251static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
252{
253 if (mtd->writesize_shift)
254 return sz >> mtd->writesize_shift;
255 do_div(sz, mtd->writesize);
256 return sz;
257}
258
259static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
260{
261 if (mtd->writesize_shift)
262 return sz & mtd->writesize_mask;
263 return do_div(sz, mtd->writesize);
264}
224 265
225 /* Kernel-side ioctl definitions */ 266 /* Kernel-side ioctl definitions */
226 267
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 733d3f3b4eb..db5b63da2a7 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -335,17 +335,12 @@ struct nand_buffers {
335 * @erase_cmd: [INTERN] erase command write function, selectable due to AND support 335 * @erase_cmd: [INTERN] erase command write function, selectable due to AND support
336 * @scan_bbt: [REPLACEABLE] function to scan bad block table 336 * @scan_bbt: [REPLACEABLE] function to scan bad block table
337 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR) 337 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR)
338 * @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress
339 * @state: [INTERN] the current state of the NAND device 338 * @state: [INTERN] the current state of the NAND device
340 * @oob_poi: poison value buffer 339 * @oob_poi: poison value buffer
341 * @page_shift: [INTERN] number of address bits in a page (column address bits) 340 * @page_shift: [INTERN] number of address bits in a page (column address bits)
342 * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock 341 * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
343 * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry 342 * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
344 * @chip_shift: [INTERN] number of address bits in one chip 343 * @chip_shift: [INTERN] number of address bits in one chip
345 * @datbuf: [INTERN] internal buffer for one page + oob
346 * @oobbuf: [INTERN] oob buffer for one eraseblock
347 * @oobdirty: [INTERN] indicates that oob_buf must be reinitialized
348 * @data_poi: [INTERN] pointer to a data buffer
349 * @options: [BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about 344 * @options: [BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about
350 * special functionality. See the defines for further explanation 345 * special functionality. See the defines for further explanation
351 * @badblockpos: [INTERN] position of the bad block marker in the oob area 346 * @badblockpos: [INTERN] position of the bad block marker in the oob area
@@ -399,7 +394,7 @@ struct nand_chip {
399 int bbt_erase_shift; 394 int bbt_erase_shift;
400 int chip_shift; 395 int chip_shift;
401 int numchips; 396 int numchips;
402 unsigned long chipsize; 397 uint64_t chipsize;
403 int pagemask; 398 int pagemask;
404 int pagebuf; 399 int pagebuf;
405 int subpagesize; 400 int subpagesize;
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index c92b4d43960..a45dd831b3f 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -36,9 +36,9 @@
36 36
37struct mtd_partition { 37struct mtd_partition {
38 char *name; /* identifier string */ 38 char *name; /* identifier string */
39 u_int32_t size; /* partition size */ 39 uint64_t size; /* partition size */
40 u_int32_t offset; /* offset within the master MTD space */ 40 uint64_t offset; /* offset within the master MTD space */
41 u_int32_t mask_flags; /* master MTD flags to mask out for this partition */ 41 uint32_t mask_flags; /* master MTD flags to mask out for this partition */
42 struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/ 42 struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/
43 struct mtd_info **mtdp; /* pointer to store the MTD object */ 43 struct mtd_info **mtdp; /* pointer to store the MTD object */
44}; 44};
diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h
new file mode 100644
index 00000000000..b730d4f8465
--- /dev/null
+++ b/include/linux/mtd/pfow.h
@@ -0,0 +1,159 @@
1/* Primary function overlay window definitions
2 * and service functions used by LPDDR chips
3 */
4#ifndef __LINUX_MTD_PFOW_H
5#define __LINUX_MTD_PFOW_H
6
7#include <linux/mtd/qinfo.h>
8
9/* PFOW registers addressing */
10/* Address of symbol "P" */
11#define PFOW_QUERY_STRING_P 0x0000
12/* Address of symbol "F" */
13#define PFOW_QUERY_STRING_F 0x0002
14/* Address of symbol "O" */
15#define PFOW_QUERY_STRING_O 0x0004
16/* Address of symbol "W" */
17#define PFOW_QUERY_STRING_W 0x0006
18/* Identification info for LPDDR chip */
19#define PFOW_MANUFACTURER_ID 0x0020
20#define PFOW_DEVICE_ID 0x0022
21/* Address in PFOW where prog buffer can can be found */
22#define PFOW_PROGRAM_BUFFER_OFFSET 0x0040
23/* Size of program buffer in words */
24#define PFOW_PROGRAM_BUFFER_SIZE 0x0042
25/* Address command code register */
26#define PFOW_COMMAND_CODE 0x0080
27/* command data register */
28#define PFOW_COMMAND_DATA 0x0084
29/* command address register lower address bits */
30#define PFOW_COMMAND_ADDRESS_L 0x0088
31/* command address register upper address bits */
32#define PFOW_COMMAND_ADDRESS_H 0x008a
33/* number of bytes to be proggrammed lower address bits */
34#define PFOW_DATA_COUNT_L 0x0090
35/* number of bytes to be proggrammed higher address bits */
36#define PFOW_DATA_COUNT_H 0x0092
37/* command execution register, the only possible value is 0x01 */
38#define PFOW_COMMAND_EXECUTE 0x00c0
39/* 0x01 should be written at this address to clear buffer */
40#define PFOW_CLEAR_PROGRAM_BUFFER 0x00c4
41/* device program/erase suspend register */
42#define PFOW_PROGRAM_ERASE_SUSPEND 0x00c8
43/* device status register */
44#define PFOW_DSR 0x00cc
45
46/* LPDDR memory device command codes */
47/* They are possible values of PFOW command code register */
48#define LPDDR_WORD_PROGRAM 0x0041
49#define LPDDR_BUFF_PROGRAM 0x00E9
50#define LPDDR_BLOCK_ERASE 0x0020
51#define LPDDR_LOCK_BLOCK 0x0061
52#define LPDDR_UNLOCK_BLOCK 0x0062
53#define LPDDR_READ_BLOCK_LOCK_STATUS 0x0065
54#define LPDDR_INFO_QUERY 0x0098
55#define LPDDR_READ_OTP 0x0097
56#define LPDDR_PROG_OTP 0x00C0
57#define LPDDR_RESUME 0x00D0
58
59/* Defines possible value of PFOW command execution register */
60#define LPDDR_START_EXECUTION 0x0001
61
62/* Defines possible value of PFOW program/erase suspend register */
63#define LPDDR_SUSPEND 0x0001
64
65/* Possible values of PFOW device status register */
66/* access R - read; RC read & clearable */
67#define DSR_DPS (1<<1) /* RC; device protect status
68 * 0 - not protected 1 - locked */
69#define DSR_PSS (1<<2) /* R; program suspend status;
70 * 0-prog in progress/completed,
71 * 1- prog suspended */
72#define DSR_VPPS (1<<3) /* RC; 0-Vpp OK, * 1-Vpp low */
73#define DSR_PROGRAM_STATUS (1<<4) /* RC; 0-successful, 1-error */
74#define DSR_ERASE_STATUS (1<<5) /* RC; erase or blank check status;
75 * 0-success erase/blank check,
76 * 1 blank check error */
77#define DSR_ESS (1<<6) /* R; erase suspend status;
78 * 0-erase in progress/complete,
79 * 1 erase suspended */
80#define DSR_READY_STATUS (1<<7) /* R; Device status
81 * 0-busy,
82 * 1-ready */
83#define DSR_RPS (0x3<<8) /* RC; region program status
84 * 00 - Success,
85 * 01-re-program attempt in region with
86 * object mode data,
87 * 10-object mode program w attempt in
88 * region with control mode data
89 * 11-attempt to program invalid half
90 * with 0x41 command */
91#define DSR_AOS (1<<12) /* RC; 1- AO related failure */
92#define DSR_AVAILABLE (1<<15) /* R; Device availbility
93 * 1 - Device available
94 * 0 - not available */
95
96/* The superset of all possible error bits in DSR */
97#define DSR_ERR 0x133A
98
99static inline void send_pfow_command(struct map_info *map,
100 unsigned long cmd_code, unsigned long adr,
101 unsigned long len, map_word *datum)
102{
103 int bits_per_chip = map_bankwidth(map) * 8;
104 int chipnum;
105 struct lpddr_private *lpddr = map->fldrv_priv;
106 chipnum = adr >> lpddr->chipshift;
107
108 map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE);
109 map_write(map, CMD(adr & ((1<<bits_per_chip) - 1)),
110 map->pfow_base + PFOW_COMMAND_ADDRESS_L);
111 map_write(map, CMD(adr>>bits_per_chip),
112 map->pfow_base + PFOW_COMMAND_ADDRESS_H);
113 if (len) {
114 map_write(map, CMD(len & ((1<<bits_per_chip) - 1)),
115 map->pfow_base + PFOW_DATA_COUNT_L);
116 map_write(map, CMD(len>>bits_per_chip),
117 map->pfow_base + PFOW_DATA_COUNT_H);
118 }
119 if (datum)
120 map_write(map, *datum, map->pfow_base + PFOW_COMMAND_DATA);
121
122 /* Command execution start */
123 map_write(map, CMD(LPDDR_START_EXECUTION),
124 map->pfow_base + PFOW_COMMAND_EXECUTE);
125}
126
127static inline void print_drs_error(unsigned dsr)
128{
129 int prog_status = (dsr & DSR_RPS) >> 8;
130
131 if (!(dsr & DSR_AVAILABLE))
132 printk(KERN_NOTICE"DSR.15: (0) Device not Available\n");
133 if (prog_status & 0x03)
134 printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid "
135 "half with 41h command\n");
136 else if (prog_status & 0x02)
137 printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt "
138 "in region with Control Mode data\n");
139 else if (prog_status & 0x01)
140 printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region "
141 "with Object Mode data\n");
142 if (!(dsr & DSR_READY_STATUS))
143 printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n");
144 if (dsr & DSR_ESS)
145 printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n");
146 if (dsr & DSR_ERASE_STATUS)
147 printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n");
148 if (dsr & DSR_PROGRAM_STATUS)
149 printk(KERN_NOTICE"DSR.4: (1) Program Error\n");
150 if (dsr & DSR_VPPS)
151 printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation "
152 "aborted\n");
153 if (dsr & DSR_PSS)
154 printk(KERN_NOTICE"DSR.2: (1) Program suspended\n");
155 if (dsr & DSR_DPS)
156 printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt "
157 "on locked block\n");
158}
159#endif /* __LINUX_MTD_PFOW_H */
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index c8e63a5ee72..76f7cabf07d 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -24,6 +24,7 @@ struct physmap_flash_data {
24 unsigned int width; 24 unsigned int width;
25 void (*set_vpp)(struct map_info *, int); 25 void (*set_vpp)(struct map_info *, int);
26 unsigned int nr_parts; 26 unsigned int nr_parts;
27 unsigned int pfow_base;
27 struct mtd_partition *parts; 28 struct mtd_partition *parts;
28}; 29};
29 30
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
new file mode 100644
index 00000000000..7b3d487d8b3
--- /dev/null
+++ b/include/linux/mtd/qinfo.h
@@ -0,0 +1,91 @@
1#ifndef __LINUX_MTD_QINFO_H
2#define __LINUX_MTD_QINFO_H
3
4#include <linux/mtd/map.h>
5#include <linux/wait.h>
6#include <linux/spinlock.h>
7#include <linux/delay.h>
8#include <linux/mtd/mtd.h>
9#include <linux/mtd/flashchip.h>
10#include <linux/mtd/partitions.h>
11
12/* lpddr_private describes lpddr flash chip in memory map
13 * @ManufactId - Chip Manufacture ID
14 * @DevId - Chip Device ID
15 * @qinfo - pointer to qinfo records describing the chip
16 * @numchips - number of chips including virual RWW partitions
17 * @chipshift - Chip/partiton size 2^chipshift
18 * @chips - per-chip data structure
19 */
20struct lpddr_private {
21 uint16_t ManufactId;
22 uint16_t DevId;
23 struct qinfo_chip *qinfo;
24 int numchips;
25 unsigned long chipshift;
26 struct flchip chips[0];
27};
28
29/* qinfo_query_info structure contains request information for
30 * each qinfo record
31 * @major - major number of qinfo record
32 * @major - minor number of qinfo record
33 * @id_str - descriptive string to access the record
34 * @desc - detailed description for the qinfo record
35 */
36struct qinfo_query_info {
37 uint8_t major;
38 uint8_t minor;
39 char *id_str;
40 char *desc;
41};
42
43/*
44 * qinfo_chip structure contains necessary qinfo records data
45 * @DevSizeShift - Device size 2^n bytes
46 * @BufSizeShift - Program buffer size 2^n bytes
47 * @TotalBlocksNum - Total number of blocks
48 * @UniformBlockSizeShift - Uniform block size 2^UniformBlockSizeShift bytes
49 * @HWPartsNum - Number of hardware partitions
50 * @SuspEraseSupp - Suspend erase supported
51 * @SingleWordProgTime - Single word program 2^SingleWordProgTime u-sec
52 * @ProgBufferTime - Program buffer write 2^ProgBufferTime u-sec
53 * @BlockEraseTime - Block erase 2^BlockEraseTime m-sec
54 */
55struct qinfo_chip {
56 /* General device info */
57 uint16_t DevSizeShift;
58 uint16_t BufSizeShift;
59 /* Erase block information */
60 uint16_t TotalBlocksNum;
61 uint16_t UniformBlockSizeShift;
62 /* Partition information */
63 uint16_t HWPartsNum;
64 /* Optional features */
65 uint16_t SuspEraseSupp;
66 /* Operation typical time */
67 uint16_t SingleWordProgTime;
68 uint16_t ProgBufferTime;
69 uint16_t BlockEraseTime;
70};
71
72/* defines for fixup usage */
73#define LPDDR_MFR_ANY 0xffff
74#define LPDDR_ID_ANY 0xffff
75#define NUMONYX_MFGR_ID 0x0089
76#define R18_DEVICE_ID_1G 0x893c
77
78static inline map_word lpddr_build_cmd(u_long cmd, struct map_info *map)
79{
80 map_word val = { {0} };
81 val.x[0] = cmd;
82 return val;
83}
84
85#define CMD(x) lpddr_build_cmd(x, map)
86#define CMDVAL(cmd) cmd.x[0]
87
88struct mtd_info *lpddr_cmdset(struct map_info *);
89
90#endif
91
diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h
new file mode 100644
index 00000000000..25f4d2a845c
--- /dev/null
+++ b/include/linux/mtd/sharpsl.h
@@ -0,0 +1,20 @@
1/*
2 * SharpSL NAND support
3 *
4 * Copyright (C) 2008 Dmitry Baryshkov
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/mtd/nand.h>
12#include <linux/mtd/nand_ecc.h>
13#include <linux/mtd/partitions.h>
14
15struct sharpsl_nand_platform_data {
16 struct nand_bbt_descr *badblock_pattern;
17 struct nand_ecclayout *ecc_layout;
18 struct mtd_partition *partitions;
19 unsigned int nr_partitions;
20};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 114091be887..f2455681337 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1125,9 +1125,6 @@ struct softnet_data
1125 struct sk_buff *completion_queue; 1125 struct sk_buff *completion_queue;
1126 1126
1127 struct napi_struct backlog; 1127 struct napi_struct backlog;
1128#ifdef CONFIG_NET_DMA
1129 struct dma_chan *net_dma;
1130#endif
1131}; 1128};
1132 1129
1133DECLARE_PER_CPU(struct softnet_data,softnet_data); 1130DECLARE_PER_CPU(struct softnet_data,softnet_data);
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 1ce9fe572e5..1d9518bc4c5 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -164,4 +164,22 @@ void oprofile_put_buff(unsigned long *buf, unsigned int start,
164unsigned long oprofile_get_cpu_buffer_size(void); 164unsigned long oprofile_get_cpu_buffer_size(void);
165void oprofile_cpu_buffer_inc_smpl_lost(void); 165void oprofile_cpu_buffer_inc_smpl_lost(void);
166 166
167/* cpu buffer functions */
168
169struct op_sample;
170
171struct op_entry {
172 struct ring_buffer_event *event;
173 struct op_sample *sample;
174 unsigned long irq_flags;
175 unsigned long size;
176 unsigned long *data;
177};
178
179void oprofile_write_reserve(struct op_entry *entry,
180 struct pt_regs * const regs,
181 unsigned long pc, int code, int size);
182int oprofile_add_data(struct op_entry *entry, unsigned long val);
183int oprofile_write_commit(struct op_entry *entry);
184
167#endif /* OPROFILE_H */ 185#endif /* OPROFILE_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index f7cc204fab0..20998746518 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -223,7 +223,6 @@ struct hotplug_params {
223#ifdef CONFIG_ACPI 223#ifdef CONFIG_ACPI
224#include <acpi/acpi.h> 224#include <acpi/acpi.h>
225#include <acpi/acpi_bus.h> 225#include <acpi/acpi_bus.h>
226#include <acpi/actypes.h>
227extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, 226extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
228 struct hotplug_params *hpp); 227 struct hotplug_params *hpp);
229int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); 228int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 2ce8207686e..2b409c44db8 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -232,6 +232,11 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
232 232
233extern void hibernation_set_ops(struct platform_hibernation_ops *ops); 233extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
234extern int hibernate(void); 234extern int hibernate(void);
235extern int hibernate_nvs_register(unsigned long start, unsigned long size);
236extern int hibernate_nvs_alloc(void);
237extern void hibernate_nvs_free(void);
238extern void hibernate_nvs_save(void);
239extern void hibernate_nvs_restore(void);
235#else /* CONFIG_HIBERNATION */ 240#else /* CONFIG_HIBERNATION */
236static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } 241static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
237static inline void swsusp_set_page_free(struct page *p) {} 242static inline void swsusp_set_page_free(struct page *p) {}
@@ -239,6 +244,14 @@ static inline void swsusp_unset_page_free(struct page *p) {}
239 244
240static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} 245static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
241static inline int hibernate(void) { return -ENOSYS; } 246static inline int hibernate(void) { return -ENOSYS; }
247static inline int hibernate_nvs_register(unsigned long a, unsigned long b)
248{
249 return 0;
250}
251static inline int hibernate_nvs_alloc(void) { return 0; }
252static inline void hibernate_nvs_free(void) {}
253static inline void hibernate_nvs_save(void) {}
254static inline void hibernate_nvs_restore(void) {}
242#endif /* CONFIG_HIBERNATION */ 255#endif /* CONFIG_HIBERNATION */
243 256
244#ifdef CONFIG_PM_SLEEP 257#ifdef CONFIG_PM_SLEEP