diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2008-08-12 06:28:00 -0400 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2008-08-12 06:28:00 -0400 |
commit | 742c52533b05d8ae83c794bd6811100675b85ce5 (patch) | |
tree | de89a81d88c19504d1dc4f023a4b480c9022b3b5 /include/linux | |
parent | 36cd4fb5d277f34fe9e4db0deac2d4efd7dff735 (diff) | |
parent | 10fec20ef5eec1c91913baec1225400f0d02df40 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
include/asm-arm/arch-omap/onenand.h
Diffstat (limited to 'include/linux')
41 files changed, 766 insertions, 392 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 4c4142c5aa6e..a26f565e8189 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -97,6 +97,7 @@ header-y += ioctl.h | |||
97 | header-y += ip6_tunnel.h | 97 | header-y += ip6_tunnel.h |
98 | header-y += ipmi_msgdefs.h | 98 | header-y += ipmi_msgdefs.h |
99 | header-y += ipsec.h | 99 | header-y += ipsec.h |
100 | header-y += ip_vs.h | ||
100 | header-y += ipx.h | 101 | header-y += ipx.h |
101 | header-y += irda.h | 102 | header-y += irda.h |
102 | header-y += iso_fs.h | 103 | header-y += iso_fs.h |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 88d68081a0f1..e61f22be4d0e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -655,6 +655,7 @@ extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | |||
655 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 655 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
656 | extern void blk_requeue_request(struct request_queue *, struct request *); | 656 | extern void blk_requeue_request(struct request_queue *, struct request *); |
657 | extern void blk_plug_device(struct request_queue *); | 657 | extern void blk_plug_device(struct request_queue *); |
658 | extern void blk_plug_device_unlocked(struct request_queue *); | ||
658 | extern int blk_remove_plug(struct request_queue *); | 659 | extern int blk_remove_plug(struct request_queue *); |
659 | extern void blk_recount_segments(struct request_queue *, struct bio *); | 660 | extern void blk_recount_segments(struct request_queue *, struct bio *); |
660 | extern int scsi_cmd_ioctl(struct file *, struct request_queue *, | 661 | extern int scsi_cmd_ioctl(struct file *, struct request_queue *, |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 50cfe8ceb478..eadaab44015f 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -115,7 +115,6 @@ BUFFER_FNS(Uptodate, uptodate) | |||
115 | BUFFER_FNS(Dirty, dirty) | 115 | BUFFER_FNS(Dirty, dirty) |
116 | TAS_BUFFER_FNS(Dirty, dirty) | 116 | TAS_BUFFER_FNS(Dirty, dirty) |
117 | BUFFER_FNS(Lock, locked) | 117 | BUFFER_FNS(Lock, locked) |
118 | TAS_BUFFER_FNS(Lock, locked) | ||
119 | BUFFER_FNS(Req, req) | 118 | BUFFER_FNS(Req, req) |
120 | TAS_BUFFER_FNS(Req, req) | 119 | TAS_BUFFER_FNS(Req, req) |
121 | BUFFER_FNS(Mapped, mapped) | 120 | BUFFER_FNS(Mapped, mapped) |
@@ -321,10 +320,15 @@ static inline void wait_on_buffer(struct buffer_head *bh) | |||
321 | __wait_on_buffer(bh); | 320 | __wait_on_buffer(bh); |
322 | } | 321 | } |
323 | 322 | ||
323 | static inline int trylock_buffer(struct buffer_head *bh) | ||
324 | { | ||
325 | return likely(!test_and_set_bit(BH_Lock, &bh->b_state)); | ||
326 | } | ||
327 | |||
324 | static inline void lock_buffer(struct buffer_head *bh) | 328 | static inline void lock_buffer(struct buffer_head *bh) |
325 | { | 329 | { |
326 | might_sleep(); | 330 | might_sleep(); |
327 | if (test_set_buffer_locked(bh)) | 331 | if (!trylock_buffer(bh)) |
328 | __lock_buffer(bh); | 332 | __lock_buffer(bh); |
329 | } | 333 | } |
330 | 334 | ||
diff --git a/include/linux/configfs.h b/include/linux/configfs.h index d62c19ff041c..7f627775c947 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/list.h> | 40 | #include <linux/list.h> |
41 | #include <linux/kref.h> | 41 | #include <linux/kref.h> |
42 | #include <linux/mutex.h> | 42 | #include <linux/mutex.h> |
43 | #include <linux/err.h> | ||
43 | 44 | ||
44 | #include <asm/atomic.h> | 45 | #include <asm/atomic.h> |
45 | 46 | ||
@@ -129,8 +130,25 @@ struct configfs_attribute { | |||
129 | /* | 130 | /* |
130 | * Users often need to create attribute structures for their configurable | 131 | * Users often need to create attribute structures for their configurable |
131 | * attributes, containing a configfs_attribute member and function pointers | 132 | * attributes, containing a configfs_attribute member and function pointers |
132 | * for the show() and store() operations on that attribute. They can use | 133 | * for the show() and store() operations on that attribute. If they don't |
133 | * this macro (similar to sysfs' __ATTR) to make defining attributes easier. | 134 | * need anything else on the extended attribute structure, they can use |
135 | * this macro to define it The argument _item is the name of the | ||
136 | * config_item structure. | ||
137 | */ | ||
138 | #define CONFIGFS_ATTR_STRUCT(_item) \ | ||
139 | struct _item##_attribute { \ | ||
140 | struct configfs_attribute attr; \ | ||
141 | ssize_t (*show)(struct _item *, char *); \ | ||
142 | ssize_t (*store)(struct _item *, const char *, size_t); \ | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * With the extended attribute structure, users can use this macro | ||
147 | * (similar to sysfs' __ATTR) to make defining attributes easier. | ||
148 | * An example: | ||
149 | * #define MYITEM_ATTR(_name, _mode, _show, _store) \ | ||
150 | * struct myitem_attribute childless_attr_##_name = \ | ||
151 | * __CONFIGFS_ATTR(_name, _mode, _show, _store) | ||
134 | */ | 152 | */ |
135 | #define __CONFIGFS_ATTR(_name, _mode, _show, _store) \ | 153 | #define __CONFIGFS_ATTR(_name, _mode, _show, _store) \ |
136 | { \ | 154 | { \ |
@@ -142,6 +160,52 @@ struct configfs_attribute { | |||
142 | .show = _show, \ | 160 | .show = _show, \ |
143 | .store = _store, \ | 161 | .store = _store, \ |
144 | } | 162 | } |
163 | /* Here is a readonly version, only requiring a show() operation */ | ||
164 | #define __CONFIGFS_ATTR_RO(_name, _show) \ | ||
165 | { \ | ||
166 | .attr = { \ | ||
167 | .ca_name = __stringify(_name), \ | ||
168 | .ca_mode = 0444, \ | ||
169 | .ca_owner = THIS_MODULE, \ | ||
170 | }, \ | ||
171 | .show = _show, \ | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * With these extended attributes, the simple show_attribute() and | ||
176 | * store_attribute() operations need to call the show() and store() of the | ||
177 | * attributes. This is a common pattern, so we provide a macro to define | ||
178 | * them. The argument _item is the name of the config_item structure. | ||
179 | * This macro expects the attributes to be named "struct <name>_attribute" | ||
180 | * and the function to_<name>() to exist; | ||
181 | */ | ||
182 | #define CONFIGFS_ATTR_OPS(_item) \ | ||
183 | static ssize_t _item##_attr_show(struct config_item *item, \ | ||
184 | struct configfs_attribute *attr, \ | ||
185 | char *page) \ | ||
186 | { \ | ||
187 | struct _item *_item = to_##_item(item); \ | ||
188 | struct _item##_attribute *_item##_attr = \ | ||
189 | container_of(attr, struct _item##_attribute, attr); \ | ||
190 | ssize_t ret = 0; \ | ||
191 | \ | ||
192 | if (_item##_attr->show) \ | ||
193 | ret = _item##_attr->show(_item, page); \ | ||
194 | return ret; \ | ||
195 | } \ | ||
196 | static ssize_t _item##_attr_store(struct config_item *item, \ | ||
197 | struct configfs_attribute *attr, \ | ||
198 | const char *page, size_t count) \ | ||
199 | { \ | ||
200 | struct _item *_item = to_##_item(item); \ | ||
201 | struct _item##_attribute *_item##_attr = \ | ||
202 | container_of(attr, struct _item##_attribute, attr); \ | ||
203 | ssize_t ret = -EINVAL; \ | ||
204 | \ | ||
205 | if (_item##_attr->store) \ | ||
206 | ret = _item##_attr->store(_item, page, count); \ | ||
207 | return ret; \ | ||
208 | } | ||
145 | 209 | ||
146 | /* | 210 | /* |
147 | * If allow_link() exists, the item can symlink(2) out to other | 211 | * If allow_link() exists, the item can symlink(2) out to other |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 98202c672fde..07aa198f19ed 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -230,6 +230,7 @@ extern void d_delete(struct dentry *); | |||
230 | extern struct dentry * d_alloc(struct dentry *, const struct qstr *); | 230 | extern struct dentry * d_alloc(struct dentry *, const struct qstr *); |
231 | extern struct dentry * d_alloc_anon(struct inode *); | 231 | extern struct dentry * d_alloc_anon(struct inode *); |
232 | extern struct dentry * d_splice_alias(struct inode *, struct dentry *); | 232 | extern struct dentry * d_splice_alias(struct inode *, struct dentry *); |
233 | extern struct dentry * d_add_ci(struct inode *, struct dentry *, struct qstr *); | ||
233 | extern void shrink_dcache_sb(struct super_block *); | 234 | extern void shrink_dcache_sb(struct super_block *); |
234 | extern void shrink_dcache_parent(struct dentry *); | 235 | extern void shrink_dcache_parent(struct dentry *); |
235 | extern void shrink_dcache_for_umount(struct super_block *); | 236 | extern void shrink_dcache_for_umount(struct super_block *); |
diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h index fc82446b6425..c30879cf93bc 100644 --- a/include/linux/dm9000.h +++ b/include/linux/dm9000.h | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | struct dm9000_plat_data { | 28 | struct dm9000_plat_data { |
29 | unsigned int flags; | 29 | unsigned int flags; |
30 | unsigned char dev_addr[6]; | ||
30 | 31 | ||
31 | /* allow replacement IO routines */ | 32 | /* allow replacement IO routines */ |
32 | 33 | ||
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 8bb5e87df365..b4b038b89ee6 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h | |||
@@ -27,9 +27,24 @@ struct ethtool_cmd { | |||
27 | __u8 autoneg; /* Enable or disable autonegotiation */ | 27 | __u8 autoneg; /* Enable or disable autonegotiation */ |
28 | __u32 maxtxpkt; /* Tx pkts before generating tx int */ | 28 | __u32 maxtxpkt; /* Tx pkts before generating tx int */ |
29 | __u32 maxrxpkt; /* Rx pkts before generating rx int */ | 29 | __u32 maxrxpkt; /* Rx pkts before generating rx int */ |
30 | __u32 reserved[4]; | 30 | __u16 speed_hi; |
31 | __u16 reserved2; | ||
32 | __u32 reserved[3]; | ||
31 | }; | 33 | }; |
32 | 34 | ||
35 | static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep, | ||
36 | __u32 speed) | ||
37 | { | ||
38 | |||
39 | ep->speed = (__u16)speed; | ||
40 | ep->speed_hi = (__u16)(speed >> 16); | ||
41 | } | ||
42 | |||
43 | static inline __u32 ethtool_cmd_speed(struct ethtool_cmd *ep) | ||
44 | { | ||
45 | return (ep->speed_hi << 16) | ep->speed; | ||
46 | } | ||
47 | |||
33 | #define ETHTOOL_BUSINFO_LEN 32 | 48 | #define ETHTOOL_BUSINFO_LEN 32 |
34 | /* these strings are set to whatever the driver author decides... */ | 49 | /* these strings are set to whatever the driver author decides... */ |
35 | struct ethtool_drvinfo { | 50 | struct ethtool_drvinfo { |
diff --git a/include/linux/harrier_defs.h b/include/linux/harrier_defs.h deleted file mode 100644 index efef11db790f..000000000000 --- a/include/linux/harrier_defs.h +++ /dev/null | |||
@@ -1,212 +0,0 @@ | |||
1 | /* | ||
2 | * include/linux/harrier_defs.h | ||
3 | * | ||
4 | * Definitions for Motorola MCG Harrier North Bridge & Memory controller | ||
5 | * | ||
6 | * Author: Dale Farnsworth | ||
7 | * dale.farnsworth@mvista.com | ||
8 | * | ||
9 | * Extracted from asm-ppc/harrier.h by: | ||
10 | * Randy Vinson | ||
11 | * rvinson@mvista.com | ||
12 | * | ||
13 | * Copyright 2001-2002 MontaVista Software Inc. | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or modify it | ||
16 | * under the terms of the GNU General Public License as published by the | ||
17 | * Free Software Foundation; either version 2 of the License, or (at your | ||
18 | * option) any later version. | ||
19 | */ | ||
20 | |||
21 | #ifndef __ASMPPC_HARRIER_DEFS_H | ||
22 | #define __ASMPPC_HARRIER_DEFS_H | ||
23 | |||
24 | #define HARRIER_DEFAULT_XCSR_BASE 0xfeff0000 | ||
25 | |||
26 | #define HARRIER_VEND_DEV_ID 0x1057480b | ||
27 | |||
28 | #define HARRIER_VENI_OFF 0x00 | ||
29 | |||
30 | #define HARRIER_REVI_OFF 0x05 | ||
31 | #define HARRIER_UCTL_OFF 0xd0 | ||
32 | #define HARRIER_XTAL64_MASK 0x02 | ||
33 | |||
34 | #define HARRIER_MISC_CSR_OFF 0x1c | ||
35 | #define HARRIER_RSTOUT 0x01000000 | ||
36 | #define HARRIER_SYSCON 0x08000000 | ||
37 | #define HARRIER_EREADY 0x10000000 | ||
38 | #define HARRIER_ERDYS 0x20000000 | ||
39 | |||
40 | /* Function exception registers */ | ||
41 | #define HARRIER_FEEN_OFF 0x40 /* enable */ | ||
42 | #define HARRIER_FEST_OFF 0x44 /* status */ | ||
43 | #define HARRIER_FEMA_OFF 0x48 /* mask */ | ||
44 | #define HARRIER_FECL_OFF 0x4c /* clear */ | ||
45 | |||
46 | #define HARRIER_FE_DMA 0x80 | ||
47 | #define HARRIER_FE_MIDB 0x40 | ||
48 | #define HARRIER_FE_MIM0 0x20 | ||
49 | #define HARRIER_FE_MIM1 0x10 | ||
50 | #define HARRIER_FE_MIP 0x08 | ||
51 | #define HARRIER_FE_UA0 0x04 | ||
52 | #define HARRIER_FE_UA1 0x02 | ||
53 | #define HARRIER_FE_ABT 0x01 | ||
54 | |||
55 | #define HARRIER_SERIAL_0_OFF 0xc0 | ||
56 | |||
57 | #define HARRIER_MBAR_OFF 0xe0 | ||
58 | #define HARRIER_MBAR_MSK 0xfffc0000 | ||
59 | #define HARRIER_MPIC_CSR_OFF 0xe4 | ||
60 | #define HARRIER_MPIC_OPI_ENABLE 0x40 | ||
61 | #define HARRIER_MPIC_IFEVP_OFF 0x10200 | ||
62 | #define HARRIER_MPIC_IFEVP_VECT_MSK 0xff | ||
63 | #define HARRIER_MPIC_IFEDE_OFF 0x10210 | ||
64 | |||
65 | /* | ||
66 | * Define the Memory Controller register offsets. | ||
67 | */ | ||
68 | #define HARRIER_SDBA_OFF 0x110 | ||
69 | #define HARRIER_SDBB_OFF 0x114 | ||
70 | #define HARRIER_SDBC_OFF 0x118 | ||
71 | #define HARRIER_SDBD_OFF 0x11c | ||
72 | #define HARRIER_SDBE_OFF 0x120 | ||
73 | #define HARRIER_SDBF_OFF 0x124 | ||
74 | #define HARRIER_SDBG_OFF 0x128 | ||
75 | #define HARRIER_SDBH_OFF 0x12c | ||
76 | |||
77 | #define HARRIER_SDB_ENABLE 0x00000100 | ||
78 | #define HARRIER_SDB_SIZE_MASK 0xf | ||
79 | #define HARRIER_SDB_SIZE_SHIFT 16 | ||
80 | #define HARRIER_SDB_BASE_MASK 0xff | ||
81 | #define HARRIER_SDB_BASE_SHIFT 24 | ||
82 | |||
83 | /* | ||
84 | * Define outbound register offsets. | ||
85 | */ | ||
86 | #define HARRIER_OTAD0_OFF 0x220 | ||
87 | #define HARRIER_OTOF0_OFF 0x224 | ||
88 | #define HARRIER_OTAD1_OFF 0x228 | ||
89 | #define HARRIER_OTOF1_OFF 0x22c | ||
90 | #define HARRIER_OTAD2_OFF 0x230 | ||
91 | #define HARRIER_OTOF2_OFF 0x234 | ||
92 | #define HARRIER_OTAD3_OFF 0x238 | ||
93 | #define HARRIER_OTOF3_OFF 0x23c | ||
94 | |||
95 | #define HARRIER_OTADX_START_MSK 0xffff0000UL | ||
96 | #define HARRIER_OTADX_END_MSK 0x0000ffffUL | ||
97 | |||
98 | #define HARRIER_OTOFX_OFF_MSK 0xffff0000UL | ||
99 | #define HARRIER_OTOFX_ENA 0x80UL | ||
100 | #define HARRIER_OTOFX_WPE 0x10UL | ||
101 | #define HARRIER_OTOFX_SGE 0x08UL | ||
102 | #define HARRIER_OTOFX_RAE 0x04UL | ||
103 | #define HARRIER_OTOFX_MEM 0x02UL | ||
104 | #define HARRIER_OTOFX_IOM 0x01UL | ||
105 | |||
106 | /* | ||
107 | * Define generic message passing register offsets | ||
108 | */ | ||
109 | /* Mirrored registers (visible from both PowerPC and PCI space) */ | ||
110 | #define HARRIER_XCSR_MP_BASE_OFF 0x290 /* base offset in XCSR space */ | ||
111 | #define HARRIER_PMEP_MP_BASE_OFF 0x100 /* base offset in PMEM space */ | ||
112 | #define HARRIER_MGOM0_OFF 0x00 /* outbound msg 0 */ | ||
113 | #define HARRIER_MGOM1_OFF 0x04 /* outbound msg 1 */ | ||
114 | #define HARRIER_MGOD_OFF 0x08 /* outbound doorbells */ | ||
115 | |||
116 | #define HARRIER_MGIM0_OFF 0x10 /* inbound msg 0 */ | ||
117 | #define HARRIER_MGIM1_OFF 0x14 /* inbound msg 1 */ | ||
118 | #define HARRIER_MGID_OFF 0x18 /* inbound doorbells */ | ||
119 | |||
120 | /* PowerPC-only registers */ | ||
121 | #define HARRIER_MGIDM_OFF 0x20 /* inbound doorbell mask */ | ||
122 | |||
123 | /* PCI-only registers */ | ||
124 | #define HARRIER_PMEP_MGST_OFF 0x20 /* (outbound) interrupt status */ | ||
125 | #define HARRIER_PMEP_MGMS_OFF 0x24 /* (outbound) interrupt mask */ | ||
126 | #define HARRIER_MG_OMI0 (1<<4) | ||
127 | #define HARRIER_MG_OMI1 (1<<5) | ||
128 | |||
129 | #define HARRIER_PMEP_MGODM_OFF 0x28 /* outbound doorbell mask */ | ||
130 | |||
131 | /* | ||
132 | * Define PCI configuration space register offsets | ||
133 | */ | ||
134 | #define HARRIER_XCSR_TO_PCFS_OFF 0x300 | ||
135 | |||
136 | /* | ||
137 | * Define message passing attribute register offset | ||
138 | */ | ||
139 | #define HARRIER_MPAT_OFF 0x44 | ||
140 | |||
141 | /* | ||
142 | * Define inbound attribute register offsets. | ||
143 | */ | ||
144 | #define HARRIER_ITSZ0_OFF 0x48 | ||
145 | #define HARRIER_ITAT0_OFF 0x4c | ||
146 | |||
147 | #define HARRIER_ITSZ1_OFF 0x50 | ||
148 | #define HARRIER_ITAT1_OFF 0x54 | ||
149 | |||
150 | #define HARRIER_ITSZ2_OFF 0x58 | ||
151 | #define HARRIER_ITAT2_OFF 0x5c | ||
152 | |||
153 | #define HARRIER_ITSZ3_OFF 0x60 | ||
154 | #define HARRIER_ITAT3_OFF 0x64 | ||
155 | |||
156 | /* inbound translation size constants */ | ||
157 | #define HARRIER_ITSZ_MSK 0xff | ||
158 | #define HARRIER_ITSZ_4KB 0x00 | ||
159 | #define HARRIER_ITSZ_8KB 0x01 | ||
160 | #define HARRIER_ITSZ_16KB 0x02 | ||
161 | #define HARRIER_ITSZ_32KB 0x03 | ||
162 | #define HARRIER_ITSZ_64KB 0x04 | ||
163 | #define HARRIER_ITSZ_128KB 0x05 | ||
164 | #define HARRIER_ITSZ_256KB 0x06 | ||
165 | #define HARRIER_ITSZ_512KB 0x07 | ||
166 | #define HARRIER_ITSZ_1MB 0x08 | ||
167 | #define HARRIER_ITSZ_2MB 0x09 | ||
168 | #define HARRIER_ITSZ_4MB 0x0A | ||
169 | #define HARRIER_ITSZ_8MB 0x0B | ||
170 | #define HARRIER_ITSZ_16MB 0x0C | ||
171 | #define HARRIER_ITSZ_32MB 0x0D | ||
172 | #define HARRIER_ITSZ_64MB 0x0E | ||
173 | #define HARRIER_ITSZ_128MB 0x0F | ||
174 | #define HARRIER_ITSZ_256MB 0x10 | ||
175 | #define HARRIER_ITSZ_512MB 0x11 | ||
176 | #define HARRIER_ITSZ_1GB 0x12 | ||
177 | #define HARRIER_ITSZ_2GB 0x13 | ||
178 | |||
179 | /* inbound translation offset */ | ||
180 | #define HARRIER_ITOF_SHIFT 0x10 | ||
181 | #define HARRIER_ITOF_MSK 0xffff | ||
182 | |||
183 | /* inbound translation atttributes */ | ||
184 | #define HARRIER_ITAT_PRE (1<<3) | ||
185 | #define HARRIER_ITAT_RAE (1<<4) | ||
186 | #define HARRIER_ITAT_WPE (1<<5) | ||
187 | #define HARRIER_ITAT_MEM (1<<6) | ||
188 | #define HARRIER_ITAT_ENA (1<<7) | ||
189 | #define HARRIER_ITAT_GBL (1<<16) | ||
190 | |||
191 | #define HARRIER_LBA_OFF 0x80 | ||
192 | #define HARRIER_LBA_MSK (1<<31) | ||
193 | |||
194 | #define HARRIER_XCSR_SIZE 1024 | ||
195 | |||
196 | /* macros to calculate message passing register offsets */ | ||
197 | #define HARRIER_MP_XCSR(x) ((u32)HARRIER_XCSR_MP_BASE_OFF + (u32)x) | ||
198 | |||
199 | #define HARRIER_MP_PMEP(x) ((u32)HARRIER_PMEP_MP_BASE_OFF + (u32)x) | ||
200 | |||
201 | /* | ||
202 | * Define PCI configuration space register offsets | ||
203 | */ | ||
204 | #define HARRIER_MPBAR_OFF PCI_BASE_ADDRESS_0 | ||
205 | #define HARRIER_ITBAR0_OFF PCI_BASE_ADDRESS_1 | ||
206 | #define HARRIER_ITBAR1_OFF PCI_BASE_ADDRESS_2 | ||
207 | #define HARRIER_ITBAR2_OFF PCI_BASE_ADDRESS_3 | ||
208 | #define HARRIER_ITBAR3_OFF PCI_BASE_ADDRESS_4 | ||
209 | |||
210 | #define HARRIER_XCSR_CONFIG(x) ((u32)HARRIER_XCSR_TO_PCFS_OFF + (u32)x) | ||
211 | |||
212 | #endif /* __ASMPPC_HARRIER_DEFS_H */ | ||
diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h index e6e9c814da61..f13255e06406 100644 --- a/include/linux/i2c-pnx.h +++ b/include/linux/i2c-pnx.h | |||
@@ -12,7 +12,9 @@ | |||
12 | #ifndef __I2C_PNX_H__ | 12 | #ifndef __I2C_PNX_H__ |
13 | #define __I2C_PNX_H__ | 13 | #define __I2C_PNX_H__ |
14 | 14 | ||
15 | #include <asm/arch/i2c.h> | 15 | #include <linux/pm.h> |
16 | |||
17 | struct platform_device; | ||
16 | 18 | ||
17 | struct i2c_pnx_mif { | 19 | struct i2c_pnx_mif { |
18 | int ret; /* Return value */ | 20 | int ret; /* Return value */ |
diff --git a/include/linux/ide.h b/include/linux/ide.h index b846bc44a27e..87c12ed96954 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -219,18 +219,7 @@ static inline int __ide_default_irq(unsigned long base) | |||
219 | #include <asm-generic/ide_iops.h> | 219 | #include <asm-generic/ide_iops.h> |
220 | #endif | 220 | #endif |
221 | 221 | ||
222 | #ifndef MAX_HWIFS | 222 | #define MAX_HWIFS 10 |
223 | #if defined(CONFIG_BLACKFIN) || defined(CONFIG_H8300) || defined(CONFIG_XTENSA) | ||
224 | # define MAX_HWIFS 1 | ||
225 | #else | ||
226 | # define MAX_HWIFS 10 | ||
227 | #endif | ||
228 | #endif | ||
229 | |||
230 | #if !defined(MAX_HWIFS) || defined(CONFIG_EMBEDDED) | ||
231 | #undef MAX_HWIFS | ||
232 | #define MAX_HWIFS CONFIG_IDE_MAX_HWIFS | ||
233 | #endif | ||
234 | 223 | ||
235 | /* Currently only m68k, apus and m8xx need it */ | 224 | /* Currently only m68k, apus and m8xx need it */ |
236 | #ifndef IDE_ARCH_ACK_INTR | 225 | #ifndef IDE_ARCH_ACK_INTR |
@@ -509,24 +498,33 @@ struct ide_tp_ops { | |||
509 | 498 | ||
510 | extern const struct ide_tp_ops default_tp_ops; | 499 | extern const struct ide_tp_ops default_tp_ops; |
511 | 500 | ||
501 | /** | ||
502 | * struct ide_port_ops - IDE port operations | ||
503 | * | ||
504 | * @init_dev: host specific initialization of a device | ||
505 | * @set_pio_mode: routine to program host for PIO mode | ||
506 | * @set_dma_mode: routine to program host for DMA mode | ||
507 | * @selectproc: tweaks hardware to select drive | ||
508 | * @reset_poll: chipset polling based on hba specifics | ||
509 | * @pre_reset: chipset specific changes to default for device-hba resets | ||
510 | * @resetproc: routine to reset controller after a disk reset | ||
511 | * @maskproc: special host masking for drive selection | ||
512 | * @quirkproc: check host's drive quirk list | ||
513 | * | ||
514 | * @mdma_filter: filter MDMA modes | ||
515 | * @udma_filter: filter UDMA modes | ||
516 | * | ||
517 | * @cable_detect: detect cable type | ||
518 | */ | ||
512 | struct ide_port_ops { | 519 | struct ide_port_ops { |
513 | /* host specific initialization of a device */ | ||
514 | void (*init_dev)(ide_drive_t *); | 520 | void (*init_dev)(ide_drive_t *); |
515 | /* routine to program host for PIO mode */ | ||
516 | void (*set_pio_mode)(ide_drive_t *, const u8); | 521 | void (*set_pio_mode)(ide_drive_t *, const u8); |
517 | /* routine to program host for DMA mode */ | ||
518 | void (*set_dma_mode)(ide_drive_t *, const u8); | 522 | void (*set_dma_mode)(ide_drive_t *, const u8); |
519 | /* tweaks hardware to select drive */ | ||
520 | void (*selectproc)(ide_drive_t *); | 523 | void (*selectproc)(ide_drive_t *); |
521 | /* chipset polling based on hba specifics */ | ||
522 | int (*reset_poll)(ide_drive_t *); | 524 | int (*reset_poll)(ide_drive_t *); |
523 | /* chipset specific changes to default for device-hba resets */ | ||
524 | void (*pre_reset)(ide_drive_t *); | 525 | void (*pre_reset)(ide_drive_t *); |
525 | /* routine to reset controller after a disk reset */ | ||
526 | void (*resetproc)(ide_drive_t *); | 526 | void (*resetproc)(ide_drive_t *); |
527 | /* special host masking for drive selection */ | ||
528 | void (*maskproc)(ide_drive_t *, int); | 527 | void (*maskproc)(ide_drive_t *, int); |
529 | /* check host's drive quirk list */ | ||
530 | void (*quirkproc)(ide_drive_t *); | 528 | void (*quirkproc)(ide_drive_t *); |
531 | 529 | ||
532 | u8 (*mdma_filter)(ide_drive_t *); | 530 | u8 (*mdma_filter)(ide_drive_t *); |
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a1630ba0b87c..7f4df7c7659d 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
@@ -506,6 +506,19 @@ struct ieee80211_channel_sw_ie { | |||
506 | u8 count; | 506 | u8 count; |
507 | } __attribute__ ((packed)); | 507 | } __attribute__ ((packed)); |
508 | 508 | ||
509 | /** | ||
510 | * struct ieee80211_tim | ||
511 | * | ||
512 | * This structure refers to "Traffic Indication Map information element" | ||
513 | */ | ||
514 | struct ieee80211_tim_ie { | ||
515 | u8 dtim_count; | ||
516 | u8 dtim_period; | ||
517 | u8 bitmap_ctrl; | ||
518 | /* variable size: 1 - 251 bytes */ | ||
519 | u8 virtual_map[0]; | ||
520 | } __attribute__ ((packed)); | ||
521 | |||
509 | struct ieee80211_mgmt { | 522 | struct ieee80211_mgmt { |
510 | __le16 frame_control; | 523 | __le16 frame_control; |
511 | __le16 duration; | 524 | __le16 duration; |
diff --git a/include/linux/ihex.h b/include/linux/ihex.h index 2baace2788a7..31d8629e75a1 100644 --- a/include/linux/ihex.h +++ b/include/linux/ihex.h | |||
@@ -18,7 +18,7 @@ struct ihex_binrec { | |||
18 | __be32 addr; | 18 | __be32 addr; |
19 | __be16 len; | 19 | __be16 len; |
20 | uint8_t data[0]; | 20 | uint8_t data[0]; |
21 | } __attribute__((aligned(4))); | 21 | } __attribute__((packed)); |
22 | 22 | ||
23 | /* Find the next record, taking into account the 4-byte alignment */ | 23 | /* Find the next record, taking into account the 4-byte alignment */ |
24 | static inline const struct ihex_binrec * | 24 | static inline const struct ihex_binrec * |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 62aa4f895abe..58ff4e74b2f3 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -223,35 +223,6 @@ static inline int disable_irq_wake(unsigned int irq) | |||
223 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) | 223 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) |
224 | #endif | 224 | #endif |
225 | 225 | ||
226 | /* | ||
227 | * Temporary defines for UP kernels, until all code gets fixed. | ||
228 | */ | ||
229 | #ifndef CONFIG_SMP | ||
230 | static inline void __deprecated cli(void) | ||
231 | { | ||
232 | local_irq_disable(); | ||
233 | } | ||
234 | static inline void __deprecated sti(void) | ||
235 | { | ||
236 | local_irq_enable(); | ||
237 | } | ||
238 | static inline void __deprecated save_flags(unsigned long *x) | ||
239 | { | ||
240 | local_save_flags(*x); | ||
241 | } | ||
242 | #define save_flags(x) save_flags(&x) | ||
243 | static inline void __deprecated restore_flags(unsigned long x) | ||
244 | { | ||
245 | local_irq_restore(x); | ||
246 | } | ||
247 | |||
248 | static inline void __deprecated save_and_cli(unsigned long *x) | ||
249 | { | ||
250 | local_irq_save(*x); | ||
251 | } | ||
252 | #define save_and_cli(x) save_and_cli(&x) | ||
253 | #endif /* CONFIG_SMP */ | ||
254 | |||
255 | /* Some architectures might implement lazy enabling/disabling of | 226 | /* Some architectures might implement lazy enabling/disabling of |
256 | * interrupts. In some cases, such as stop_machine, we might want | 227 | * interrupts. In some cases, such as stop_machine, we might want |
257 | * to ensure that after a local_irq_disable(), interrupts have | 228 | * to ensure that after a local_irq_disable(), interrupts have |
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h new file mode 100644 index 000000000000..ec6eb49af2d8 --- /dev/null +++ b/include/linux/ip_vs.h | |||
@@ -0,0 +1,245 @@ | |||
1 | /* | ||
2 | * IP Virtual Server | ||
3 | * data structure and functionality definitions | ||
4 | */ | ||
5 | |||
6 | #ifndef _IP_VS_H | ||
7 | #define _IP_VS_H | ||
8 | |||
9 | #include <linux/types.h> /* For __beXX types in userland */ | ||
10 | |||
11 | #define IP_VS_VERSION_CODE 0x010201 | ||
12 | #define NVERSION(version) \ | ||
13 | (version >> 16) & 0xFF, \ | ||
14 | (version >> 8) & 0xFF, \ | ||
15 | version & 0xFF | ||
16 | |||
17 | /* | ||
18 | * Virtual Service Flags | ||
19 | */ | ||
20 | #define IP_VS_SVC_F_PERSISTENT 0x0001 /* persistent port */ | ||
21 | #define IP_VS_SVC_F_HASHED 0x0002 /* hashed entry */ | ||
22 | |||
23 | /* | ||
24 | * Destination Server Flags | ||
25 | */ | ||
26 | #define IP_VS_DEST_F_AVAILABLE 0x0001 /* server is available */ | ||
27 | #define IP_VS_DEST_F_OVERLOAD 0x0002 /* server is overloaded */ | ||
28 | |||
29 | /* | ||
30 | * IPVS sync daemon states | ||
31 | */ | ||
32 | #define IP_VS_STATE_NONE 0x0000 /* daemon is stopped */ | ||
33 | #define IP_VS_STATE_MASTER 0x0001 /* started as master */ | ||
34 | #define IP_VS_STATE_BACKUP 0x0002 /* started as backup */ | ||
35 | |||
36 | /* | ||
37 | * IPVS socket options | ||
38 | */ | ||
39 | #define IP_VS_BASE_CTL (64+1024+64) /* base */ | ||
40 | |||
41 | #define IP_VS_SO_SET_NONE IP_VS_BASE_CTL /* just peek */ | ||
42 | #define IP_VS_SO_SET_INSERT (IP_VS_BASE_CTL+1) | ||
43 | #define IP_VS_SO_SET_ADD (IP_VS_BASE_CTL+2) | ||
44 | #define IP_VS_SO_SET_EDIT (IP_VS_BASE_CTL+3) | ||
45 | #define IP_VS_SO_SET_DEL (IP_VS_BASE_CTL+4) | ||
46 | #define IP_VS_SO_SET_FLUSH (IP_VS_BASE_CTL+5) | ||
47 | #define IP_VS_SO_SET_LIST (IP_VS_BASE_CTL+6) | ||
48 | #define IP_VS_SO_SET_ADDDEST (IP_VS_BASE_CTL+7) | ||
49 | #define IP_VS_SO_SET_DELDEST (IP_VS_BASE_CTL+8) | ||
50 | #define IP_VS_SO_SET_EDITDEST (IP_VS_BASE_CTL+9) | ||
51 | #define IP_VS_SO_SET_TIMEOUT (IP_VS_BASE_CTL+10) | ||
52 | #define IP_VS_SO_SET_STARTDAEMON (IP_VS_BASE_CTL+11) | ||
53 | #define IP_VS_SO_SET_STOPDAEMON (IP_VS_BASE_CTL+12) | ||
54 | #define IP_VS_SO_SET_RESTORE (IP_VS_BASE_CTL+13) | ||
55 | #define IP_VS_SO_SET_SAVE (IP_VS_BASE_CTL+14) | ||
56 | #define IP_VS_SO_SET_ZERO (IP_VS_BASE_CTL+15) | ||
57 | #define IP_VS_SO_SET_MAX IP_VS_SO_SET_ZERO | ||
58 | |||
59 | #define IP_VS_SO_GET_VERSION IP_VS_BASE_CTL | ||
60 | #define IP_VS_SO_GET_INFO (IP_VS_BASE_CTL+1) | ||
61 | #define IP_VS_SO_GET_SERVICES (IP_VS_BASE_CTL+2) | ||
62 | #define IP_VS_SO_GET_SERVICE (IP_VS_BASE_CTL+3) | ||
63 | #define IP_VS_SO_GET_DESTS (IP_VS_BASE_CTL+4) | ||
64 | #define IP_VS_SO_GET_DEST (IP_VS_BASE_CTL+5) /* not used now */ | ||
65 | #define IP_VS_SO_GET_TIMEOUT (IP_VS_BASE_CTL+6) | ||
66 | #define IP_VS_SO_GET_DAEMON (IP_VS_BASE_CTL+7) | ||
67 | #define IP_VS_SO_GET_MAX IP_VS_SO_GET_DAEMON | ||
68 | |||
69 | |||
70 | /* | ||
71 | * IPVS Connection Flags | ||
72 | */ | ||
73 | #define IP_VS_CONN_F_FWD_MASK 0x0007 /* mask for the fwd methods */ | ||
74 | #define IP_VS_CONN_F_MASQ 0x0000 /* masquerading/NAT */ | ||
75 | #define IP_VS_CONN_F_LOCALNODE 0x0001 /* local node */ | ||
76 | #define IP_VS_CONN_F_TUNNEL 0x0002 /* tunneling */ | ||
77 | #define IP_VS_CONN_F_DROUTE 0x0003 /* direct routing */ | ||
78 | #define IP_VS_CONN_F_BYPASS 0x0004 /* cache bypass */ | ||
79 | #define IP_VS_CONN_F_SYNC 0x0020 /* entry created by sync */ | ||
80 | #define IP_VS_CONN_F_HASHED 0x0040 /* hashed entry */ | ||
81 | #define IP_VS_CONN_F_NOOUTPUT 0x0080 /* no output packets */ | ||
82 | #define IP_VS_CONN_F_INACTIVE 0x0100 /* not established */ | ||
83 | #define IP_VS_CONN_F_OUT_SEQ 0x0200 /* must do output seq adjust */ | ||
84 | #define IP_VS_CONN_F_IN_SEQ 0x0400 /* must do input seq adjust */ | ||
85 | #define IP_VS_CONN_F_SEQ_MASK 0x0600 /* in/out sequence mask */ | ||
86 | #define IP_VS_CONN_F_NO_CPORT 0x0800 /* no client port set yet */ | ||
87 | #define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ | ||
88 | |||
89 | #define IP_VS_SCHEDNAME_MAXLEN 16 | ||
90 | #define IP_VS_IFNAME_MAXLEN 16 | ||
91 | |||
92 | |||
93 | /* | ||
94 | * The struct ip_vs_service_user and struct ip_vs_dest_user are | ||
95 | * used to set IPVS rules through setsockopt. | ||
96 | */ | ||
97 | struct ip_vs_service_user { | ||
98 | /* virtual service addresses */ | ||
99 | u_int16_t protocol; | ||
100 | __be32 addr; /* virtual ip address */ | ||
101 | __be16 port; | ||
102 | u_int32_t fwmark; /* firwall mark of service */ | ||
103 | |||
104 | /* virtual service options */ | ||
105 | char sched_name[IP_VS_SCHEDNAME_MAXLEN]; | ||
106 | unsigned flags; /* virtual service flags */ | ||
107 | unsigned timeout; /* persistent timeout in sec */ | ||
108 | __be32 netmask; /* persistent netmask */ | ||
109 | }; | ||
110 | |||
111 | |||
112 | struct ip_vs_dest_user { | ||
113 | /* destination server address */ | ||
114 | __be32 addr; | ||
115 | __be16 port; | ||
116 | |||
117 | /* real server options */ | ||
118 | unsigned conn_flags; /* connection flags */ | ||
119 | int weight; /* destination weight */ | ||
120 | |||
121 | /* thresholds for active connections */ | ||
122 | u_int32_t u_threshold; /* upper threshold */ | ||
123 | u_int32_t l_threshold; /* lower threshold */ | ||
124 | }; | ||
125 | |||
126 | |||
127 | /* | ||
128 | * IPVS statistics object (for user space) | ||
129 | */ | ||
130 | struct ip_vs_stats_user | ||
131 | { | ||
132 | __u32 conns; /* connections scheduled */ | ||
133 | __u32 inpkts; /* incoming packets */ | ||
134 | __u32 outpkts; /* outgoing packets */ | ||
135 | __u64 inbytes; /* incoming bytes */ | ||
136 | __u64 outbytes; /* outgoing bytes */ | ||
137 | |||
138 | __u32 cps; /* current connection rate */ | ||
139 | __u32 inpps; /* current in packet rate */ | ||
140 | __u32 outpps; /* current out packet rate */ | ||
141 | __u32 inbps; /* current in byte rate */ | ||
142 | __u32 outbps; /* current out byte rate */ | ||
143 | }; | ||
144 | |||
145 | |||
146 | /* The argument to IP_VS_SO_GET_INFO */ | ||
147 | struct ip_vs_getinfo { | ||
148 | /* version number */ | ||
149 | unsigned int version; | ||
150 | |||
151 | /* size of connection hash table */ | ||
152 | unsigned int size; | ||
153 | |||
154 | /* number of virtual services */ | ||
155 | unsigned int num_services; | ||
156 | }; | ||
157 | |||
158 | |||
159 | /* The argument to IP_VS_SO_GET_SERVICE */ | ||
160 | struct ip_vs_service_entry { | ||
161 | /* which service: user fills in these */ | ||
162 | u_int16_t protocol; | ||
163 | __be32 addr; /* virtual address */ | ||
164 | __be16 port; | ||
165 | u_int32_t fwmark; /* firwall mark of service */ | ||
166 | |||
167 | /* service options */ | ||
168 | char sched_name[IP_VS_SCHEDNAME_MAXLEN]; | ||
169 | unsigned flags; /* virtual service flags */ | ||
170 | unsigned timeout; /* persistent timeout */ | ||
171 | __be32 netmask; /* persistent netmask */ | ||
172 | |||
173 | /* number of real servers */ | ||
174 | unsigned int num_dests; | ||
175 | |||
176 | /* statistics */ | ||
177 | struct ip_vs_stats_user stats; | ||
178 | }; | ||
179 | |||
180 | |||
181 | struct ip_vs_dest_entry { | ||
182 | __be32 addr; /* destination address */ | ||
183 | __be16 port; | ||
184 | unsigned conn_flags; /* connection flags */ | ||
185 | int weight; /* destination weight */ | ||
186 | |||
187 | u_int32_t u_threshold; /* upper threshold */ | ||
188 | u_int32_t l_threshold; /* lower threshold */ | ||
189 | |||
190 | u_int32_t activeconns; /* active connections */ | ||
191 | u_int32_t inactconns; /* inactive connections */ | ||
192 | u_int32_t persistconns; /* persistent connections */ | ||
193 | |||
194 | /* statistics */ | ||
195 | struct ip_vs_stats_user stats; | ||
196 | }; | ||
197 | |||
198 | |||
199 | /* The argument to IP_VS_SO_GET_DESTS */ | ||
200 | struct ip_vs_get_dests { | ||
201 | /* which service: user fills in these */ | ||
202 | u_int16_t protocol; | ||
203 | __be32 addr; /* virtual address */ | ||
204 | __be16 port; | ||
205 | u_int32_t fwmark; /* firwall mark of service */ | ||
206 | |||
207 | /* number of real servers */ | ||
208 | unsigned int num_dests; | ||
209 | |||
210 | /* the real servers */ | ||
211 | struct ip_vs_dest_entry entrytable[0]; | ||
212 | }; | ||
213 | |||
214 | |||
215 | /* The argument to IP_VS_SO_GET_SERVICES */ | ||
216 | struct ip_vs_get_services { | ||
217 | /* number of virtual services */ | ||
218 | unsigned int num_services; | ||
219 | |||
220 | /* service table */ | ||
221 | struct ip_vs_service_entry entrytable[0]; | ||
222 | }; | ||
223 | |||
224 | |||
225 | /* The argument to IP_VS_SO_GET_TIMEOUT */ | ||
226 | struct ip_vs_timeout_user { | ||
227 | int tcp_timeout; | ||
228 | int tcp_fin_timeout; | ||
229 | int udp_timeout; | ||
230 | }; | ||
231 | |||
232 | |||
233 | /* The argument to IP_VS_SO_GET_DAEMON */ | ||
234 | struct ip_vs_daemon_user { | ||
235 | /* sync daemon state (master/backup) */ | ||
236 | int state; | ||
237 | |||
238 | /* multicast interface name */ | ||
239 | char mcast_ifn[IP_VS_IFNAME_MAXLEN]; | ||
240 | |||
241 | /* SyncID we belong to */ | ||
242 | int syncid; | ||
243 | }; | ||
244 | |||
245 | #endif /* _IP_VS_H */ | ||
diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 82f88a8a827b..32110cede64f 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h | |||
@@ -130,8 +130,8 @@ void vmcoreinfo_append_str(const char *fmt, ...) | |||
130 | __attribute__ ((format (printf, 1, 2))); | 130 | __attribute__ ((format (printf, 1, 2))); |
131 | unsigned long paddr_vmcoreinfo_note(void); | 131 | unsigned long paddr_vmcoreinfo_note(void); |
132 | 132 | ||
133 | #define VMCOREINFO_OSRELEASE(name) \ | 133 | #define VMCOREINFO_OSRELEASE(value) \ |
134 | vmcoreinfo_append_str("OSRELEASE=%s\n", #name) | 134 | vmcoreinfo_append_str("OSRELEASE=%s\n", value) |
135 | #define VMCOREINFO_PAGESIZE(value) \ | 135 | #define VMCOREINFO_PAGESIZE(value) \ |
136 | vmcoreinfo_append_str("PAGESIZE=%ld\n", value) | 136 | vmcoreinfo_append_str("PAGESIZE=%ld\n", value) |
137 | #define VMCOREINFO_SYMBOL(name) \ | 137 | #define VMCOREINFO_SYMBOL(name) \ |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 0ea064cbfbc8..69511f74f912 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -371,6 +371,7 @@ struct kvm_trace_rec { | |||
371 | #define KVM_CAP_PV_MMU 13 | 371 | #define KVM_CAP_PV_MMU 13 |
372 | #define KVM_CAP_MP_STATE 14 | 372 | #define KVM_CAP_MP_STATE 14 |
373 | #define KVM_CAP_COALESCED_MMIO 15 | 373 | #define KVM_CAP_COALESCED_MMIO 15 |
374 | #define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */ | ||
374 | 375 | ||
375 | /* | 376 | /* |
376 | * ioctls for VM fds | 377 | * ioctls for VM fds |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 07d68a8ae8e9..8525afc53107 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -121,6 +121,12 @@ struct kvm { | |||
121 | struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; | 121 | struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; |
122 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; | 122 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
123 | #endif | 123 | #endif |
124 | |||
125 | #ifdef KVM_ARCH_WANT_MMU_NOTIFIER | ||
126 | struct mmu_notifier mmu_notifier; | ||
127 | unsigned long mmu_notifier_seq; | ||
128 | long mmu_notifier_count; | ||
129 | #endif | ||
124 | }; | 130 | }; |
125 | 131 | ||
126 | /* The guest did something we don't support. */ | 132 | /* The guest did something we don't support. */ |
@@ -332,4 +338,22 @@ int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg) | |||
332 | #define kvm_trace_cleanup() ((void)0) | 338 | #define kvm_trace_cleanup() ((void)0) |
333 | #endif | 339 | #endif |
334 | 340 | ||
341 | #ifdef KVM_ARCH_WANT_MMU_NOTIFIER | ||
342 | static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) | ||
343 | { | ||
344 | if (unlikely(vcpu->kvm->mmu_notifier_count)) | ||
345 | return 1; | ||
346 | /* | ||
347 | * Both reads happen under the mmu_lock and both values are | ||
348 | * modified under mmu_lock, so there's no need of smb_rmb() | ||
349 | * here in between, otherwise mmu_notifier_count should be | ||
350 | * read before mmu_notifier_seq, see | ||
351 | * mmu_notifier_invalidate_range_end write side. | ||
352 | */ | ||
353 | if (vcpu->kvm->mmu_notifier_seq != mmu_seq) | ||
354 | return 1; | ||
355 | return 0; | ||
356 | } | ||
357 | #endif | ||
358 | |||
335 | #endif | 359 | #endif |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 5b247b8a6b3b..06b80337303b 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -60,9 +60,9 @@ | |||
60 | 60 | ||
61 | /* note: prints function name for you */ | 61 | /* note: prints function name for you */ |
62 | #ifdef ATA_DEBUG | 62 | #ifdef ATA_DEBUG |
63 | #define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) | 63 | #define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) |
64 | #ifdef ATA_VERBOSE_DEBUG | 64 | #ifdef ATA_VERBOSE_DEBUG |
65 | #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) | 65 | #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) |
66 | #else | 66 | #else |
67 | #define VPRINTK(fmt, args...) | 67 | #define VPRINTK(fmt, args...) |
68 | #endif /* ATA_VERBOSE_DEBUG */ | 68 | #endif /* ATA_VERBOSE_DEBUG */ |
@@ -71,7 +71,7 @@ | |||
71 | #define VPRINTK(fmt, args...) | 71 | #define VPRINTK(fmt, args...) |
72 | #endif /* ATA_DEBUG */ | 72 | #endif /* ATA_DEBUG */ |
73 | 73 | ||
74 | #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) | 74 | #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args) |
75 | 75 | ||
76 | /* NEW: debug levels */ | 76 | /* NEW: debug levels */ |
77 | #define HAVE_LIBATA_MSG 1 | 77 | #define HAVE_LIBATA_MSG 1 |
@@ -750,6 +750,7 @@ struct ata_port_operations { | |||
750 | void (*set_piomode)(struct ata_port *ap, struct ata_device *dev); | 750 | void (*set_piomode)(struct ata_port *ap, struct ata_device *dev); |
751 | void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev); | 751 | void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev); |
752 | int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev); | 752 | int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev); |
753 | unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id); | ||
753 | 754 | ||
754 | void (*dev_config)(struct ata_device *dev); | 755 | void (*dev_config)(struct ata_device *dev); |
755 | 756 | ||
@@ -951,6 +952,8 @@ extern void ata_id_string(const u16 *id, unsigned char *s, | |||
951 | unsigned int ofs, unsigned int len); | 952 | unsigned int ofs, unsigned int len); |
952 | extern void ata_id_c_string(const u16 *id, unsigned char *s, | 953 | extern void ata_id_c_string(const u16 *id, unsigned char *s, |
953 | unsigned int ofs, unsigned int len); | 954 | unsigned int ofs, unsigned int len); |
955 | extern unsigned int ata_do_dev_read_id(struct ata_device *dev, | ||
956 | struct ata_taskfile *tf, u16 *id); | ||
954 | extern void ata_qc_complete(struct ata_queued_cmd *qc); | 957 | extern void ata_qc_complete(struct ata_queued_cmd *qc); |
955 | extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active); | 958 | extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active); |
956 | extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, | 959 | extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, |
diff --git a/include/linux/list.h b/include/linux/list.h index 453916bc0412..db35ef02e745 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -214,22 +214,62 @@ static inline int list_is_singular(const struct list_head *head) | |||
214 | return !list_empty(head) && (head->next == head->prev); | 214 | return !list_empty(head) && (head->next == head->prev); |
215 | } | 215 | } |
216 | 216 | ||
217 | static inline void __list_cut_position(struct list_head *list, | ||
218 | struct list_head *head, struct list_head *entry) | ||
219 | { | ||
220 | struct list_head *new_first = entry->next; | ||
221 | list->next = head->next; | ||
222 | list->next->prev = list; | ||
223 | list->prev = entry; | ||
224 | entry->next = list; | ||
225 | head->next = new_first; | ||
226 | new_first->prev = head; | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * list_cut_position - cut a list into two | ||
231 | * @list: a new list to add all removed entries | ||
232 | * @head: a list with entries | ||
233 | * @entry: an entry within head, could be the head itself | ||
234 | * and if so we won't cut the list | ||
235 | * | ||
236 | * This helper moves the initial part of @head, up to and | ||
237 | * including @entry, from @head to @list. You should | ||
238 | * pass on @entry an element you know is on @head. @list | ||
239 | * should be an empty list or a list you do not care about | ||
240 | * losing its data. | ||
241 | * | ||
242 | */ | ||
243 | static inline void list_cut_position(struct list_head *list, | ||
244 | struct list_head *head, struct list_head *entry) | ||
245 | { | ||
246 | if (list_empty(head)) | ||
247 | return; | ||
248 | if (list_is_singular(head) && | ||
249 | (head->next != entry && head != entry)) | ||
250 | return; | ||
251 | if (entry == head) | ||
252 | INIT_LIST_HEAD(list); | ||
253 | else | ||
254 | __list_cut_position(list, head, entry); | ||
255 | } | ||
256 | |||
217 | static inline void __list_splice(const struct list_head *list, | 257 | static inline void __list_splice(const struct list_head *list, |
218 | struct list_head *head) | 258 | struct list_head *prev, |
259 | struct list_head *next) | ||
219 | { | 260 | { |
220 | struct list_head *first = list->next; | 261 | struct list_head *first = list->next; |
221 | struct list_head *last = list->prev; | 262 | struct list_head *last = list->prev; |
222 | struct list_head *at = head->next; | ||
223 | 263 | ||
224 | first->prev = head; | 264 | first->prev = prev; |
225 | head->next = first; | 265 | prev->next = first; |
226 | 266 | ||
227 | last->next = at; | 267 | last->next = next; |
228 | at->prev = last; | 268 | next->prev = last; |
229 | } | 269 | } |
230 | 270 | ||
231 | /** | 271 | /** |
232 | * list_splice - join two lists | 272 | * list_splice - join two lists, this is designed for stacks |
233 | * @list: the new list to add. | 273 | * @list: the new list to add. |
234 | * @head: the place to add it in the first list. | 274 | * @head: the place to add it in the first list. |
235 | */ | 275 | */ |
@@ -237,7 +277,19 @@ static inline void list_splice(const struct list_head *list, | |||
237 | struct list_head *head) | 277 | struct list_head *head) |
238 | { | 278 | { |
239 | if (!list_empty(list)) | 279 | if (!list_empty(list)) |
240 | __list_splice(list, head); | 280 | __list_splice(list, head, head->next); |
281 | } | ||
282 | |||
283 | /** | ||
284 | * list_splice_tail - join two lists, each list being a queue | ||
285 | * @list: the new list to add. | ||
286 | * @head: the place to add it in the first list. | ||
287 | */ | ||
288 | static inline void list_splice_tail(struct list_head *list, | ||
289 | struct list_head *head) | ||
290 | { | ||
291 | if (!list_empty(list)) | ||
292 | __list_splice(list, head->prev, head); | ||
241 | } | 293 | } |
242 | 294 | ||
243 | /** | 295 | /** |
@@ -251,7 +303,24 @@ static inline void list_splice_init(struct list_head *list, | |||
251 | struct list_head *head) | 303 | struct list_head *head) |
252 | { | 304 | { |
253 | if (!list_empty(list)) { | 305 | if (!list_empty(list)) { |
254 | __list_splice(list, head); | 306 | __list_splice(list, head, head->next); |
307 | INIT_LIST_HEAD(list); | ||
308 | } | ||
309 | } | ||
310 | |||
311 | /** | ||
312 | * list_splice_tail_init - join two lists and reinitialise the emptied list | ||
313 | * @list: the new list to add. | ||
314 | * @head: the place to add it in the first list. | ||
315 | * | ||
316 | * Each of the lists is a queue. | ||
317 | * The list at @list is reinitialised | ||
318 | */ | ||
319 | static inline void list_splice_tail_init(struct list_head *list, | ||
320 | struct list_head *head) | ||
321 | { | ||
322 | if (!list_empty(list)) { | ||
323 | __list_splice(list, head->prev, head); | ||
255 | INIT_LIST_HEAD(list); | 324 | INIT_LIST_HEAD(list); |
256 | } | 325 | } |
257 | } | 326 | } |
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index 5c948f337817..8f2d60da04e7 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h | |||
@@ -37,7 +37,7 @@ | |||
37 | */ | 37 | */ |
38 | #define MISDN_MAJOR_VERSION 1 | 38 | #define MISDN_MAJOR_VERSION 1 |
39 | #define MISDN_MINOR_VERSION 0 | 39 | #define MISDN_MINOR_VERSION 0 |
40 | #define MISDN_RELEASE 18 | 40 | #define MISDN_RELEASE 19 |
41 | 41 | ||
42 | /* primitives for information exchange | 42 | /* primitives for information exchange |
43 | * generell format | 43 | * generell format |
@@ -242,7 +242,8 @@ struct mISDNhead { | |||
242 | #define TEI_SAPI 63 | 242 | #define TEI_SAPI 63 |
243 | #define CTRL_SAPI 0 | 243 | #define CTRL_SAPI 0 |
244 | 244 | ||
245 | #define MISDN_CHMAP_SIZE 4 | 245 | #define MISDN_MAX_CHANNEL 127 |
246 | #define MISDN_CHMAP_SIZE ((MISDN_MAX_CHANNEL + 1) >> 3) | ||
246 | 247 | ||
247 | #define SOL_MISDN 0 | 248 | #define SOL_MISDN 0 |
248 | 249 | ||
@@ -275,11 +276,32 @@ struct mISDN_devinfo { | |||
275 | u_int Dprotocols; | 276 | u_int Dprotocols; |
276 | u_int Bprotocols; | 277 | u_int Bprotocols; |
277 | u_int protocol; | 278 | u_int protocol; |
278 | u_long channelmap[MISDN_CHMAP_SIZE]; | 279 | u_char channelmap[MISDN_CHMAP_SIZE]; |
279 | u_int nrbchan; | 280 | u_int nrbchan; |
280 | char name[MISDN_MAX_IDLEN]; | 281 | char name[MISDN_MAX_IDLEN]; |
281 | }; | 282 | }; |
282 | 283 | ||
284 | static inline int | ||
285 | test_channelmap(u_int nr, u_char *map) | ||
286 | { | ||
287 | if (nr <= MISDN_MAX_CHANNEL) | ||
288 | return map[nr >> 3] & (1 << (nr & 7)); | ||
289 | else | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | static inline void | ||
294 | set_channelmap(u_int nr, u_char *map) | ||
295 | { | ||
296 | map[nr >> 3] |= (1 << (nr & 7)); | ||
297 | } | ||
298 | |||
299 | static inline void | ||
300 | clear_channelmap(u_int nr, u_char *map) | ||
301 | { | ||
302 | map[nr >> 3] &= ~(1 << (nr & 7)); | ||
303 | } | ||
304 | |||
283 | /* CONTROL_CHANNEL parameters */ | 305 | /* CONTROL_CHANNEL parameters */ |
284 | #define MISDN_CTRL_GETOP 0x0000 | 306 | #define MISDN_CTRL_GETOP 0x0000 |
285 | #define MISDN_CTRL_LOOP 0x0001 | 307 | #define MISDN_CTRL_LOOP 0x0001 |
@@ -405,7 +427,7 @@ struct mISDNdevice { | |||
405 | u_int Dprotocols; | 427 | u_int Dprotocols; |
406 | u_int Bprotocols; | 428 | u_int Bprotocols; |
407 | u_int nrbchan; | 429 | u_int nrbchan; |
408 | u_long channelmap[MISDN_CHMAP_SIZE]; | 430 | u_char channelmap[MISDN_CHMAP_SIZE]; |
409 | struct list_head bchannels; | 431 | struct list_head bchannels; |
410 | struct mISDNchannel *teimgr; | 432 | struct mISDNchannel *teimgr; |
411 | struct device dev; | 433 | struct device dev; |
@@ -430,7 +452,7 @@ struct mISDNstack { | |||
430 | #endif | 452 | #endif |
431 | }; | 453 | }; |
432 | 454 | ||
433 | /* global alloc/queue dunctions */ | 455 | /* global alloc/queue functions */ |
434 | 456 | ||
435 | static inline struct sk_buff * | 457 | static inline struct sk_buff * |
436 | mI_alloc_skb(unsigned int len, gfp_t gfp_mask) | 458 | mI_alloc_skb(unsigned int len, gfp_t gfp_mask) |
diff --git a/include/linux/maple.h b/include/linux/maple.h index c853b1066018..c23d3f51ba40 100644 --- a/include/linux/maple.h +++ b/include/linux/maple.h | |||
@@ -51,7 +51,6 @@ struct maple_devinfo { | |||
51 | struct maple_device { | 51 | struct maple_device { |
52 | struct maple_driver *driver; | 52 | struct maple_driver *driver; |
53 | struct mapleq *mq; | 53 | struct mapleq *mq; |
54 | void *private_data; | ||
55 | void (*callback) (struct mapleq * mq); | 54 | void (*callback) (struct mapleq * mq); |
56 | unsigned long when, interval, function; | 55 | unsigned long when, interval, function; |
57 | struct maple_devinfo devinfo; | 56 | struct maple_devinfo devinfo; |
@@ -70,7 +69,9 @@ void maple_getcond_callback(struct maple_device *dev, | |||
70 | void (*callback) (struct mapleq * mq), | 69 | void (*callback) (struct mapleq * mq), |
71 | unsigned long interval, | 70 | unsigned long interval, |
72 | unsigned long function); | 71 | unsigned long function); |
73 | int maple_driver_register(struct device_driver *drv); | 72 | int maple_driver_register(struct maple_driver *); |
73 | void maple_driver_unregister(struct maple_driver *); | ||
74 | |||
74 | int maple_add_packet_sleeps(struct maple_device *mdev, u32 function, | 75 | int maple_add_packet_sleeps(struct maple_device *mdev, u32 function, |
75 | u32 command, u32 length, void *data); | 76 | u32 command, u32 length, void *data); |
76 | void maple_clear_dev(struct maple_device *mdev); | 77 | void maple_clear_dev(struct maple_device *mdev); |
@@ -78,4 +79,7 @@ void maple_clear_dev(struct maple_device *mdev); | |||
78 | #define to_maple_dev(n) container_of(n, struct maple_device, dev) | 79 | #define to_maple_dev(n) container_of(n, struct maple_device, dev) |
79 | #define to_maple_driver(n) container_of(n, struct maple_driver, drv) | 80 | #define to_maple_driver(n) container_of(n, struct maple_driver, drv) |
80 | 81 | ||
82 | #define maple_get_drvdata(d) dev_get_drvdata(&(d)->dev) | ||
83 | #define maple_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p)) | ||
84 | |||
81 | #endif /* __LINUX_MAPLE_H */ | 85 | #endif /* __LINUX_MAPLE_H */ |
diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h new file mode 100644 index 000000000000..e83c7f2036f9 --- /dev/null +++ b/include/linux/mfd/t7l66xb.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * This file contains the definitions for the T7L66XB | ||
3 | * | ||
4 | * (C) Copyright 2005 Ian Molton <spyro@f2s.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | #ifndef MFD_T7L66XB_H | ||
12 | #define MFD_T7L66XB_H | ||
13 | |||
14 | #include <linux/mfd/core.h> | ||
15 | #include <linux/mfd/tmio.h> | ||
16 | |||
17 | struct t7l66xb_platform_data { | ||
18 | int (*enable_clk32k)(struct platform_device *dev); | ||
19 | void (*disable_clk32k)(struct platform_device *dev); | ||
20 | int (*enable)(struct platform_device *dev); | ||
21 | int (*disable)(struct platform_device *dev); | ||
22 | int (*suspend)(struct platform_device *dev); | ||
23 | int (*resume)(struct platform_device *dev); | ||
24 | |||
25 | int irq_base; /* The base for subdevice irqs */ | ||
26 | |||
27 | struct tmio_nand_data *nand_data; | ||
28 | }; | ||
29 | |||
30 | |||
31 | #define IRQ_T7L66XB_MMC (1) | ||
32 | #define IRQ_T7L66XB_NAND (3) | ||
33 | |||
34 | #define T7L66XB_NR_IRQS 8 | ||
35 | |||
36 | #endif | ||
diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h new file mode 100644 index 000000000000..fa06e0610b8e --- /dev/null +++ b/include/linux/mfd/tc6387xb.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * This file contains the definitions for the TC6387XB | ||
3 | * | ||
4 | * (C) Copyright 2005 Ian Molton <spyro@f2s.com> | ||
5 | * | ||
6 | * May be copied or modified under the terms of the GNU General Public | ||
7 | * License. See linux/COPYING for more information. | ||
8 | * | ||
9 | */ | ||
10 | #ifndef MFD_TC6387XB_H | ||
11 | #define MFD_TC6387XB_H | ||
12 | |||
13 | struct tc6387xb_platform_data { | ||
14 | int (*enable_clk32k)(struct platform_device *dev); | ||
15 | void (*disable_clk32k)(struct platform_device *dev); | ||
16 | |||
17 | int (*enable)(struct platform_device *dev); | ||
18 | int (*disable)(struct platform_device *dev); | ||
19 | int (*suspend)(struct platform_device *dev); | ||
20 | int (*resume)(struct platform_device *dev); | ||
21 | }; | ||
22 | |||
23 | #endif | ||
diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h index 7cc824a58f7c..fec7b3f7a81f 100644 --- a/include/linux/mfd/tc6393xb.h +++ b/include/linux/mfd/tc6393xb.h | |||
@@ -14,8 +14,8 @@ | |||
14 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #ifndef TC6393XB_H | 17 | #ifndef MFD_TC6393XB_H |
18 | #define TC6393XB_H | 18 | #define MFD_TC6393XB_H |
19 | 19 | ||
20 | /* Also one should provide the CK3P6MI clock */ | 20 | /* Also one should provide the CK3P6MI clock */ |
21 | struct tc6393xb_platform_data { | 21 | struct tc6393xb_platform_data { |
@@ -29,7 +29,7 @@ struct tc6393xb_platform_data { | |||
29 | int (*suspend)(struct platform_device *dev); | 29 | int (*suspend)(struct platform_device *dev); |
30 | int (*resume)(struct platform_device *dev); | 30 | int (*resume)(struct platform_device *dev); |
31 | 31 | ||
32 | int irq_base; /* a base for cascaded irq */ | 32 | int irq_base; /* base for subdevice irqs */ |
33 | int gpio_base; | 33 | int gpio_base; |
34 | 34 | ||
35 | struct tmio_nand_data *nand_data; | 35 | struct tmio_nand_data *nand_data; |
@@ -40,9 +40,6 @@ struct tc6393xb_platform_data { | |||
40 | */ | 40 | */ |
41 | #define IRQ_TC6393_NAND 0 | 41 | #define IRQ_TC6393_NAND 0 |
42 | #define IRQ_TC6393_MMC 1 | 42 | #define IRQ_TC6393_MMC 1 |
43 | #define IRQ_TC6393_OHCI 2 | ||
44 | #define IRQ_TC6393_SERIAL 3 | ||
45 | #define IRQ_TC6393_FB 4 | ||
46 | 43 | ||
47 | #define TC6393XB_NR_IRQS 8 | 44 | #define TC6393XB_NR_IRQS 8 |
48 | 45 | ||
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 9438d8c9ac1c..ec612e66391c 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h | |||
@@ -1,6 +1,21 @@ | |||
1 | #ifndef MFD_TMIO_H | 1 | #ifndef MFD_TMIO_H |
2 | #define MFD_TMIO_H | 2 | #define MFD_TMIO_H |
3 | 3 | ||
4 | #define tmio_ioread8(addr) readb(addr) | ||
5 | #define tmio_ioread16(addr) readw(addr) | ||
6 | #define tmio_ioread16_rep(r, b, l) readsw(r, b, l) | ||
7 | #define tmio_ioread32(addr) \ | ||
8 | (((u32) readw((addr))) | (((u32) readw((addr) + 2)) << 16)) | ||
9 | |||
10 | #define tmio_iowrite8(val, addr) writeb((val), (addr)) | ||
11 | #define tmio_iowrite16(val, addr) writew((val), (addr)) | ||
12 | #define tmio_iowrite16_rep(r, b, l) writesw(r, b, l) | ||
13 | #define tmio_iowrite32(val, addr) \ | ||
14 | do { \ | ||
15 | writew((val), (addr)); \ | ||
16 | writew((val) >> 16, (addr) + 2); \ | ||
17 | } while (0) | ||
18 | |||
4 | /* | 19 | /* |
5 | * data for the NAND controller | 20 | * data for the NAND controller |
6 | */ | 21 | */ |
@@ -10,8 +25,4 @@ struct tmio_nand_data { | |||
10 | unsigned int num_partitions; | 25 | unsigned int num_partitions; |
11 | }; | 26 | }; |
12 | 27 | ||
13 | #define TMIO_NAND_CONFIG "tmio-nand-config" | ||
14 | #define TMIO_NAND_CONTROL "tmio-nand-control" | ||
15 | #define TMIO_NAND_IRQ "tmio-nand" | ||
16 | |||
17 | #endif | 28 | #endif |
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h index 071cf96cf01f..6f65b2c8bb89 100644 --- a/include/linux/mlx4/cq.h +++ b/include/linux/mlx4/cq.h | |||
@@ -39,17 +39,18 @@ | |||
39 | #include <linux/mlx4/doorbell.h> | 39 | #include <linux/mlx4/doorbell.h> |
40 | 40 | ||
41 | struct mlx4_cqe { | 41 | struct mlx4_cqe { |
42 | __be32 my_qpn; | 42 | __be32 vlan_my_qpn; |
43 | __be32 immed_rss_invalid; | 43 | __be32 immed_rss_invalid; |
44 | __be32 g_mlpath_rqpn; | 44 | __be32 g_mlpath_rqpn; |
45 | u8 sl; | 45 | __be16 sl_vid; |
46 | u8 reserved1; | ||
47 | __be16 rlid; | 46 | __be16 rlid; |
48 | __be32 ipoib_status; | 47 | __be16 status; |
48 | u8 ipv6_ext_mask; | ||
49 | u8 badfcs_enc; | ||
49 | __be32 byte_cnt; | 50 | __be32 byte_cnt; |
50 | __be16 wqe_index; | 51 | __be16 wqe_index; |
51 | __be16 checksum; | 52 | __be16 checksum; |
52 | u8 reserved2[3]; | 53 | u8 reserved[3]; |
53 | u8 owner_sr_opcode; | 54 | u8 owner_sr_opcode; |
54 | }; | 55 | }; |
55 | 56 | ||
@@ -64,6 +65,11 @@ struct mlx4_err_cqe { | |||
64 | }; | 65 | }; |
65 | 66 | ||
66 | enum { | 67 | enum { |
68 | MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29, | ||
69 | MLX4_CQE_QPN_MASK = 0xffffff, | ||
70 | }; | ||
71 | |||
72 | enum { | ||
67 | MLX4_CQE_OWNER_MASK = 0x80, | 73 | MLX4_CQE_OWNER_MASK = 0x80, |
68 | MLX4_CQE_IS_SEND_MASK = 0x40, | 74 | MLX4_CQE_IS_SEND_MASK = 0x40, |
69 | MLX4_CQE_OPCODE_MASK = 0x1f | 75 | MLX4_CQE_OPCODE_MASK = 0x1f |
@@ -86,13 +92,19 @@ enum { | |||
86 | }; | 92 | }; |
87 | 93 | ||
88 | enum { | 94 | enum { |
89 | MLX4_CQE_IPOIB_STATUS_IPV4 = 1 << 22, | 95 | MLX4_CQE_STATUS_IPV4 = 1 << 6, |
90 | MLX4_CQE_IPOIB_STATUS_IPV4F = 1 << 23, | 96 | MLX4_CQE_STATUS_IPV4F = 1 << 7, |
91 | MLX4_CQE_IPOIB_STATUS_IPV6 = 1 << 24, | 97 | MLX4_CQE_STATUS_IPV6 = 1 << 8, |
92 | MLX4_CQE_IPOIB_STATUS_IPV4OPT = 1 << 25, | 98 | MLX4_CQE_STATUS_IPV4OPT = 1 << 9, |
93 | MLX4_CQE_IPOIB_STATUS_TCP = 1 << 26, | 99 | MLX4_CQE_STATUS_TCP = 1 << 10, |
94 | MLX4_CQE_IPOIB_STATUS_UDP = 1 << 27, | 100 | MLX4_CQE_STATUS_UDP = 1 << 11, |
95 | MLX4_CQE_IPOIB_STATUS_IPOK = 1 << 28, | 101 | MLX4_CQE_STATUS_IPOK = 1 << 12, |
102 | }; | ||
103 | |||
104 | enum { | ||
105 | MLX4_CQE_LLC = 1, | ||
106 | MLX4_CQE_SNAP = 1 << 1, | ||
107 | MLX4_CQE_BAD_FCS = 1 << 4, | ||
96 | }; | 108 | }; |
97 | 109 | ||
98 | static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, | 110 | static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b4d056ceab96..488c56e649b5 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -61,9 +61,7 @@ struct wireless_dev; | |||
61 | #define NET_XMIT_DROP 1 /* skb dropped */ | 61 | #define NET_XMIT_DROP 1 /* skb dropped */ |
62 | #define NET_XMIT_CN 2 /* congestion notification */ | 62 | #define NET_XMIT_CN 2 /* congestion notification */ |
63 | #define NET_XMIT_POLICED 3 /* skb is shot by police */ | 63 | #define NET_XMIT_POLICED 3 /* skb is shot by police */ |
64 | #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue; | 64 | #define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */ |
65 | (TC use only - dev_queue_xmit | ||
66 | returns this as NET_XMIT_SUCCESS) */ | ||
67 | 65 | ||
68 | /* Backlog congestion levels */ | 66 | /* Backlog congestion levels */ |
69 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ | 67 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ |
@@ -440,6 +438,7 @@ static inline void napi_synchronize(const struct napi_struct *n) | |||
440 | enum netdev_queue_state_t | 438 | enum netdev_queue_state_t |
441 | { | 439 | { |
442 | __QUEUE_STATE_XOFF, | 440 | __QUEUE_STATE_XOFF, |
441 | __QUEUE_STATE_FROZEN, | ||
443 | }; | 442 | }; |
444 | 443 | ||
445 | struct netdev_queue { | 444 | struct netdev_queue { |
@@ -636,7 +635,7 @@ struct net_device | |||
636 | unsigned int real_num_tx_queues; | 635 | unsigned int real_num_tx_queues; |
637 | 636 | ||
638 | unsigned long tx_queue_len; /* Max frames per queue allowed */ | 637 | unsigned long tx_queue_len; /* Max frames per queue allowed */ |
639 | 638 | spinlock_t tx_global_lock; | |
640 | /* | 639 | /* |
641 | * One part is mostly used on xmit path (device) | 640 | * One part is mostly used on xmit path (device) |
642 | */ | 641 | */ |
@@ -1099,6 +1098,11 @@ static inline int netif_queue_stopped(const struct net_device *dev) | |||
1099 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); | 1098 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); |
1100 | } | 1099 | } |
1101 | 1100 | ||
1101 | static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue) | ||
1102 | { | ||
1103 | return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state); | ||
1104 | } | ||
1105 | |||
1102 | /** | 1106 | /** |
1103 | * netif_running - test if up | 1107 | * netif_running - test if up |
1104 | * @dev: network device | 1108 | * @dev: network device |
@@ -1475,6 +1479,26 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq) | |||
1475 | txq->xmit_lock_owner = smp_processor_id(); | 1479 | txq->xmit_lock_owner = smp_processor_id(); |
1476 | } | 1480 | } |
1477 | 1481 | ||
1482 | static inline int __netif_tx_trylock(struct netdev_queue *txq) | ||
1483 | { | ||
1484 | int ok = spin_trylock(&txq->_xmit_lock); | ||
1485 | if (likely(ok)) | ||
1486 | txq->xmit_lock_owner = smp_processor_id(); | ||
1487 | return ok; | ||
1488 | } | ||
1489 | |||
1490 | static inline void __netif_tx_unlock(struct netdev_queue *txq) | ||
1491 | { | ||
1492 | txq->xmit_lock_owner = -1; | ||
1493 | spin_unlock(&txq->_xmit_lock); | ||
1494 | } | ||
1495 | |||
1496 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | ||
1497 | { | ||
1498 | txq->xmit_lock_owner = -1; | ||
1499 | spin_unlock_bh(&txq->_xmit_lock); | ||
1500 | } | ||
1501 | |||
1478 | /** | 1502 | /** |
1479 | * netif_tx_lock - grab network device transmit lock | 1503 | * netif_tx_lock - grab network device transmit lock |
1480 | * @dev: network device | 1504 | * @dev: network device |
@@ -1484,12 +1508,23 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq) | |||
1484 | */ | 1508 | */ |
1485 | static inline void netif_tx_lock(struct net_device *dev) | 1509 | static inline void netif_tx_lock(struct net_device *dev) |
1486 | { | 1510 | { |
1487 | int cpu = smp_processor_id(); | ||
1488 | unsigned int i; | 1511 | unsigned int i; |
1512 | int cpu; | ||
1489 | 1513 | ||
1514 | spin_lock(&dev->tx_global_lock); | ||
1515 | cpu = smp_processor_id(); | ||
1490 | for (i = 0; i < dev->num_tx_queues; i++) { | 1516 | for (i = 0; i < dev->num_tx_queues; i++) { |
1491 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 1517 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
1518 | |||
1519 | /* We are the only thread of execution doing a | ||
1520 | * freeze, but we have to grab the _xmit_lock in | ||
1521 | * order to synchronize with threads which are in | ||
1522 | * the ->hard_start_xmit() handler and already | ||
1523 | * checked the frozen bit. | ||
1524 | */ | ||
1492 | __netif_tx_lock(txq, cpu); | 1525 | __netif_tx_lock(txq, cpu); |
1526 | set_bit(__QUEUE_STATE_FROZEN, &txq->state); | ||
1527 | __netif_tx_unlock(txq); | ||
1493 | } | 1528 | } |
1494 | } | 1529 | } |
1495 | 1530 | ||
@@ -1499,40 +1534,22 @@ static inline void netif_tx_lock_bh(struct net_device *dev) | |||
1499 | netif_tx_lock(dev); | 1534 | netif_tx_lock(dev); |
1500 | } | 1535 | } |
1501 | 1536 | ||
1502 | static inline int __netif_tx_trylock(struct netdev_queue *txq) | ||
1503 | { | ||
1504 | int ok = spin_trylock(&txq->_xmit_lock); | ||
1505 | if (likely(ok)) | ||
1506 | txq->xmit_lock_owner = smp_processor_id(); | ||
1507 | return ok; | ||
1508 | } | ||
1509 | |||
1510 | static inline int netif_tx_trylock(struct net_device *dev) | ||
1511 | { | ||
1512 | return __netif_tx_trylock(netdev_get_tx_queue(dev, 0)); | ||
1513 | } | ||
1514 | |||
1515 | static inline void __netif_tx_unlock(struct netdev_queue *txq) | ||
1516 | { | ||
1517 | txq->xmit_lock_owner = -1; | ||
1518 | spin_unlock(&txq->_xmit_lock); | ||
1519 | } | ||
1520 | |||
1521 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | ||
1522 | { | ||
1523 | txq->xmit_lock_owner = -1; | ||
1524 | spin_unlock_bh(&txq->_xmit_lock); | ||
1525 | } | ||
1526 | |||
1527 | static inline void netif_tx_unlock(struct net_device *dev) | 1537 | static inline void netif_tx_unlock(struct net_device *dev) |
1528 | { | 1538 | { |
1529 | unsigned int i; | 1539 | unsigned int i; |
1530 | 1540 | ||
1531 | for (i = 0; i < dev->num_tx_queues; i++) { | 1541 | for (i = 0; i < dev->num_tx_queues; i++) { |
1532 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 1542 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
1533 | __netif_tx_unlock(txq); | ||
1534 | } | ||
1535 | 1543 | ||
1544 | /* No need to grab the _xmit_lock here. If the | ||
1545 | * queue is not stopped for another reason, we | ||
1546 | * force a schedule. | ||
1547 | */ | ||
1548 | clear_bit(__QUEUE_STATE_FROZEN, &txq->state); | ||
1549 | if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) | ||
1550 | __netif_schedule(txq->qdisc); | ||
1551 | } | ||
1552 | spin_unlock(&dev->tx_global_lock); | ||
1536 | } | 1553 | } |
1537 | 1554 | ||
1538 | static inline void netif_tx_unlock_bh(struct net_device *dev) | 1555 | static inline void netif_tx_unlock_bh(struct net_device *dev) |
@@ -1556,13 +1573,18 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) | |||
1556 | static inline void netif_tx_disable(struct net_device *dev) | 1573 | static inline void netif_tx_disable(struct net_device *dev) |
1557 | { | 1574 | { |
1558 | unsigned int i; | 1575 | unsigned int i; |
1576 | int cpu; | ||
1559 | 1577 | ||
1560 | netif_tx_lock_bh(dev); | 1578 | local_bh_disable(); |
1579 | cpu = smp_processor_id(); | ||
1561 | for (i = 0; i < dev->num_tx_queues; i++) { | 1580 | for (i = 0; i < dev->num_tx_queues; i++) { |
1562 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 1581 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
1582 | |||
1583 | __netif_tx_lock(txq, cpu); | ||
1563 | netif_tx_stop_queue(txq); | 1584 | netif_tx_stop_queue(txq); |
1585 | __netif_tx_unlock(txq); | ||
1564 | } | 1586 | } |
1565 | netif_tx_unlock_bh(dev); | 1587 | local_bh_enable(); |
1566 | } | 1588 | } |
1567 | 1589 | ||
1568 | static inline void netif_addr_lock(struct net_device *dev) | 1590 | static inline void netif_addr_lock(struct net_device *dev) |
diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h index 22ce29995f13..a049df4f2236 100644 --- a/include/linux/netfilter/nf_conntrack_tcp.h +++ b/include/linux/netfilter/nf_conntrack_tcp.h | |||
@@ -30,6 +30,9 @@ enum tcp_conntrack { | |||
30 | /* Be liberal in window checking */ | 30 | /* Be liberal in window checking */ |
31 | #define IP_CT_TCP_FLAG_BE_LIBERAL 0x08 | 31 | #define IP_CT_TCP_FLAG_BE_LIBERAL 0x08 |
32 | 32 | ||
33 | /* Has unacknowledged data */ | ||
34 | #define IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED 0x10 | ||
35 | |||
33 | struct nf_ct_tcp_flags { | 36 | struct nf_ct_tcp_flags { |
34 | u_int8_t flags; | 37 | u_int8_t flags; |
35 | u_int8_t mask; | 38 | u_int8_t mask; |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 25aaccdb2f26..c74d3e875314 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -163,7 +163,7 @@ static inline int Page##uname(struct page *page) \ | |||
163 | 163 | ||
164 | struct page; /* forward declaration */ | 164 | struct page; /* forward declaration */ |
165 | 165 | ||
166 | PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked) | 166 | TESTPAGEFLAG(Locked, locked) |
167 | PAGEFLAG(Error, error) | 167 | PAGEFLAG(Error, error) |
168 | PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) | 168 | PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) |
169 | PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) | 169 | PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 69ed3cb1197a..5da31c12101c 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -250,29 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping, | |||
250 | return read_cache_page(mapping, index, filler, data); | 250 | return read_cache_page(mapping, index, filler, data); |
251 | } | 251 | } |
252 | 252 | ||
253 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | ||
254 | pgoff_t index, gfp_t gfp_mask); | ||
255 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | ||
256 | pgoff_t index, gfp_t gfp_mask); | ||
257 | extern void remove_from_page_cache(struct page *page); | ||
258 | extern void __remove_from_page_cache(struct page *page); | ||
259 | |||
260 | /* | ||
261 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | ||
262 | * the page is new, so we can just run SetPageLocked() against it. | ||
263 | */ | ||
264 | static inline int add_to_page_cache(struct page *page, | ||
265 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | ||
266 | { | ||
267 | int error; | ||
268 | |||
269 | SetPageLocked(page); | ||
270 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); | ||
271 | if (unlikely(error)) | ||
272 | ClearPageLocked(page); | ||
273 | return error; | ||
274 | } | ||
275 | |||
276 | /* | 253 | /* |
277 | * Return byte-offset into filesystem object for page. | 254 | * Return byte-offset into filesystem object for page. |
278 | */ | 255 | */ |
@@ -294,13 +271,28 @@ extern int __lock_page_killable(struct page *page); | |||
294 | extern void __lock_page_nosync(struct page *page); | 271 | extern void __lock_page_nosync(struct page *page); |
295 | extern void unlock_page(struct page *page); | 272 | extern void unlock_page(struct page *page); |
296 | 273 | ||
274 | static inline void set_page_locked(struct page *page) | ||
275 | { | ||
276 | set_bit(PG_locked, &page->flags); | ||
277 | } | ||
278 | |||
279 | static inline void clear_page_locked(struct page *page) | ||
280 | { | ||
281 | clear_bit(PG_locked, &page->flags); | ||
282 | } | ||
283 | |||
284 | static inline int trylock_page(struct page *page) | ||
285 | { | ||
286 | return !test_and_set_bit(PG_locked, &page->flags); | ||
287 | } | ||
288 | |||
297 | /* | 289 | /* |
298 | * lock_page may only be called if we have the page's inode pinned. | 290 | * lock_page may only be called if we have the page's inode pinned. |
299 | */ | 291 | */ |
300 | static inline void lock_page(struct page *page) | 292 | static inline void lock_page(struct page *page) |
301 | { | 293 | { |
302 | might_sleep(); | 294 | might_sleep(); |
303 | if (TestSetPageLocked(page)) | 295 | if (!trylock_page(page)) |
304 | __lock_page(page); | 296 | __lock_page(page); |
305 | } | 297 | } |
306 | 298 | ||
@@ -312,7 +304,7 @@ static inline void lock_page(struct page *page) | |||
312 | static inline int lock_page_killable(struct page *page) | 304 | static inline int lock_page_killable(struct page *page) |
313 | { | 305 | { |
314 | might_sleep(); | 306 | might_sleep(); |
315 | if (TestSetPageLocked(page)) | 307 | if (!trylock_page(page)) |
316 | return __lock_page_killable(page); | 308 | return __lock_page_killable(page); |
317 | return 0; | 309 | return 0; |
318 | } | 310 | } |
@@ -324,7 +316,7 @@ static inline int lock_page_killable(struct page *page) | |||
324 | static inline void lock_page_nosync(struct page *page) | 316 | static inline void lock_page_nosync(struct page *page) |
325 | { | 317 | { |
326 | might_sleep(); | 318 | might_sleep(); |
327 | if (TestSetPageLocked(page)) | 319 | if (!trylock_page(page)) |
328 | __lock_page_nosync(page); | 320 | __lock_page_nosync(page); |
329 | } | 321 | } |
330 | 322 | ||
@@ -409,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) | |||
409 | return ret; | 401 | return ret; |
410 | } | 402 | } |
411 | 403 | ||
404 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | ||
405 | pgoff_t index, gfp_t gfp_mask); | ||
406 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | ||
407 | pgoff_t index, gfp_t gfp_mask); | ||
408 | extern void remove_from_page_cache(struct page *page); | ||
409 | extern void __remove_from_page_cache(struct page *page); | ||
410 | |||
411 | /* | ||
412 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | ||
413 | * the page is new, so we can just run set_page_locked() against it. | ||
414 | */ | ||
415 | static inline int add_to_page_cache(struct page *page, | ||
416 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | ||
417 | { | ||
418 | int error; | ||
419 | |||
420 | set_page_locked(page); | ||
421 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); | ||
422 | if (unlikely(error)) | ||
423 | clear_page_locked(page); | ||
424 | return error; | ||
425 | } | ||
426 | |||
412 | #endif /* _LINUX_PAGEMAP_H */ | 427 | #endif /* _LINUX_PAGEMAP_H */ |
diff --git a/include/linux/parser.h b/include/linux/parser.h index cc554ca8bc78..7dcd05075756 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h | |||
@@ -14,7 +14,7 @@ struct match_token { | |||
14 | const char *pattern; | 14 | const char *pattern; |
15 | }; | 15 | }; |
16 | 16 | ||
17 | typedef const struct match_token match_table_t[]; | 17 | typedef struct match_token match_table_t[]; |
18 | 18 | ||
19 | /* Maximum number of arguments that match_token will find in a pattern */ | 19 | /* Maximum number of arguments that match_token will find in a pattern */ |
20 | enum {MAX_OPT_ARGS = 3}; | 20 | enum {MAX_OPT_ARGS = 3}; |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 825be3878f68..c0e14008a3c2 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -641,6 +641,7 @@ int pci_restore_state(struct pci_dev *dev); | |||
641 | int pci_set_power_state(struct pci_dev *dev, pci_power_t state); | 641 | int pci_set_power_state(struct pci_dev *dev, pci_power_t state); |
642 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); | 642 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); |
643 | bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); | 643 | bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); |
644 | void pci_pme_active(struct pci_dev *dev, bool enable); | ||
644 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); | 645 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); |
645 | pci_power_t pci_target_state(struct pci_dev *dev); | 646 | pci_power_t pci_target_state(struct pci_dev *dev); |
646 | int pci_prepare_to_sleep(struct pci_dev *dev); | 647 | int pci_prepare_to_sleep(struct pci_dev *dev); |
@@ -680,10 +681,12 @@ void pci_enable_bridges(struct pci_bus *bus); | |||
680 | /* Proper probing supporting hot-pluggable devices */ | 681 | /* Proper probing supporting hot-pluggable devices */ |
681 | int __must_check __pci_register_driver(struct pci_driver *, struct module *, | 682 | int __must_check __pci_register_driver(struct pci_driver *, struct module *, |
682 | const char *mod_name); | 683 | const char *mod_name); |
683 | static inline int __must_check pci_register_driver(struct pci_driver *driver) | 684 | |
684 | { | 685 | /* |
685 | return __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME); | 686 | * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded |
686 | } | 687 | */ |
688 | #define pci_register_driver(driver) \ | ||
689 | __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) | ||
687 | 690 | ||
688 | void pci_unregister_driver(struct pci_driver *dev); | 691 | void pci_unregister_driver(struct pci_driver *dev); |
689 | void pci_remove_behind_bridge(struct pci_dev *dev); | 692 | void pci_remove_behind_bridge(struct pci_dev *dev); |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 35a78415accc..9ec2bcce8e83 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2177,8 +2177,6 @@ | |||
2177 | #define PCI_DEVICE_ID_HERC_WIN 0x5732 | 2177 | #define PCI_DEVICE_ID_HERC_WIN 0x5732 |
2178 | #define PCI_DEVICE_ID_HERC_UNI 0x5832 | 2178 | #define PCI_DEVICE_ID_HERC_UNI 0x5832 |
2179 | 2179 | ||
2180 | #define PCI_VENDOR_ID_RDC 0x17f3 | ||
2181 | |||
2182 | #define PCI_VENDOR_ID_SITECOM 0x182d | 2180 | #define PCI_VENDOR_ID_SITECOM 0x182d |
2183 | #define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069 | 2181 | #define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069 |
2184 | 2182 | ||
diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h index 2e4e97bd19f7..d74f75ed1e47 100644 --- a/include/linux/pm_qos_params.h +++ b/include/linux/pm_qos_params.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* interface for the pm_qos_power infrastructure of the linux kernel. | 1 | /* interface for the pm_qos_power infrastructure of the linux kernel. |
2 | * | 2 | * |
3 | * Mark Gross | 3 | * Mark Gross <mgross@linux.intel.com> |
4 | */ | 4 | */ |
5 | #include <linux/list.h> | 5 | #include <linux/list.h> |
6 | #include <linux/notifier.h> | 6 | #include <linux/notifier.h> |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index fd31756e1a00..ea7416c901d1 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -172,7 +172,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) | |||
172 | child->ptrace = 0; | 172 | child->ptrace = 0; |
173 | if (unlikely(ptrace)) { | 173 | if (unlikely(ptrace)) { |
174 | child->ptrace = current->ptrace; | 174 | child->ptrace = current->ptrace; |
175 | __ptrace_link(child, current->parent); | 175 | ptrace_link(child, current->parent); |
176 | } | 176 | } |
177 | } | 177 | } |
178 | 178 | ||
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 9f2549ac0e2d..c200b9a34aff 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h | |||
@@ -128,6 +128,7 @@ struct mddev_s | |||
128 | #define MD_CHANGE_DEVS 0 /* Some device status has changed */ | 128 | #define MD_CHANGE_DEVS 0 /* Some device status has changed */ |
129 | #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ | 129 | #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ |
130 | #define MD_CHANGE_PENDING 2 /* superblock update in progress */ | 130 | #define MD_CHANGE_PENDING 2 /* superblock update in progress */ |
131 | #define MD_NOTIFY_ARRAY_STATE 3 /* atomic context wants to notify userspace */ | ||
131 | 132 | ||
132 | int ro; | 133 | int ro; |
133 | 134 | ||
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a640385e0598..cfcc45b3bef0 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -243,6 +243,7 @@ typedef unsigned char *sk_buff_data_t; | |||
243 | * @tc_index: Traffic control index | 243 | * @tc_index: Traffic control index |
244 | * @tc_verd: traffic control verdict | 244 | * @tc_verd: traffic control verdict |
245 | * @ndisc_nodetype: router type (from link layer) | 245 | * @ndisc_nodetype: router type (from link layer) |
246 | * @do_not_encrypt: set to prevent encryption of this frame | ||
246 | * @dma_cookie: a cookie to one of several possible DMA operations | 247 | * @dma_cookie: a cookie to one of several possible DMA operations |
247 | * done by skb DMA functions | 248 | * done by skb DMA functions |
248 | * @secmark: security marking | 249 | * @secmark: security marking |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 5bad61a93f65..2f5c16b1aacd 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -46,6 +46,7 @@ struct kmem_cache_cpu { | |||
46 | struct kmem_cache_node { | 46 | struct kmem_cache_node { |
47 | spinlock_t list_lock; /* Protect partial list and nr_partial */ | 47 | spinlock_t list_lock; /* Protect partial list and nr_partial */ |
48 | unsigned long nr_partial; | 48 | unsigned long nr_partial; |
49 | unsigned long min_partial; | ||
49 | struct list_head partial; | 50 | struct list_head partial; |
50 | #ifdef CONFIG_SLUB_DEBUG | 51 | #ifdef CONFIG_SLUB_DEBUG |
51 | atomic_long_t nr_slabs; | 52 | atomic_long_t nr_slabs; |
diff --git a/include/linux/spi/orion_spi.h b/include/linux/spi/orion_spi.h new file mode 100644 index 000000000000..b4d9fa6f797c --- /dev/null +++ b/include/linux/spi/orion_spi.h | |||
@@ -0,0 +1,17 @@ | |||
1 | /* | ||
2 | * orion_spi.h | ||
3 | * | ||
4 | * This file is licensed under the terms of the GNU General Public | ||
5 | * License version 2. This program is licensed "as is" without any | ||
6 | * warranty of any kind, whether express or implied. | ||
7 | */ | ||
8 | |||
9 | #ifndef __LINUX_SPI_ORION_SPI_H | ||
10 | #define __LINUX_SPI_ORION_SPI_H | ||
11 | |||
12 | struct orion_spi_info { | ||
13 | u32 tclk; /* no <linux/clk.h> support yet */ | ||
14 | }; | ||
15 | |||
16 | |||
17 | #endif | ||
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index b1875582c1a1..b48d81969574 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h | |||
@@ -280,7 +280,7 @@ static inline void tracehook_report_clone(int trace, struct pt_regs *regs, | |||
280 | unsigned long clone_flags, | 280 | unsigned long clone_flags, |
281 | pid_t pid, struct task_struct *child) | 281 | pid_t pid, struct task_struct *child) |
282 | { | 282 | { |
283 | if (unlikely(trace)) { | 283 | if (unlikely(trace) || unlikely(clone_flags & CLONE_PTRACE)) { |
284 | /* | 284 | /* |
285 | * The child starts up with an immediate SIGSTOP. | 285 | * The child starts up with an immediate SIGSTOP. |
286 | */ | 286 | */ |
@@ -487,14 +487,20 @@ static inline int tracehook_notify_jctl(int notify, int why) | |||
487 | return notify || (current->ptrace & PT_PTRACED); | 487 | return notify || (current->ptrace & PT_PTRACED); |
488 | } | 488 | } |
489 | 489 | ||
490 | #define DEATH_REAP -1 | ||
491 | #define DEATH_DELAYED_GROUP_LEADER -2 | ||
492 | |||
490 | /** | 493 | /** |
491 | * tracehook_notify_death - task is dead, ready to notify parent | 494 | * tracehook_notify_death - task is dead, ready to notify parent |
492 | * @task: @current task now exiting | 495 | * @task: @current task now exiting |
493 | * @death_cookie: value to pass to tracehook_report_death() | 496 | * @death_cookie: value to pass to tracehook_report_death() |
494 | * @group_dead: nonzero if this was the last thread in the group to die | 497 | * @group_dead: nonzero if this was the last thread in the group to die |
495 | * | 498 | * |
496 | * Return the signal number to send our parent with do_notify_parent(), or | 499 | * A return value >= 0 means call do_notify_parent() with that signal |
497 | * zero to send no signal and leave a zombie, or -1 to self-reap right now. | 500 | * number. Negative return value can be %DEATH_REAP to self-reap right |
501 | * now, or %DEATH_DELAYED_GROUP_LEADER to a zombie without notifying our | ||
502 | * parent. Note that a return value of 0 means a do_notify_parent() call | ||
503 | * that sends no signal, but still wakes up a parent blocked in wait*(). | ||
498 | * | 504 | * |
499 | * Called with write_lock_irq(&tasklist_lock) held. | 505 | * Called with write_lock_irq(&tasklist_lock) held. |
500 | */ | 506 | */ |
@@ -502,7 +508,7 @@ static inline int tracehook_notify_death(struct task_struct *task, | |||
502 | void **death_cookie, int group_dead) | 508 | void **death_cookie, int group_dead) |
503 | { | 509 | { |
504 | if (task->exit_signal == -1) | 510 | if (task->exit_signal == -1) |
505 | return task->ptrace ? SIGCHLD : -1; | 511 | return task->ptrace ? SIGCHLD : DEATH_REAP; |
506 | 512 | ||
507 | /* | 513 | /* |
508 | * If something other than our normal parent is ptracing us, then | 514 | * If something other than our normal parent is ptracing us, then |
@@ -512,21 +518,21 @@ static inline int tracehook_notify_death(struct task_struct *task, | |||
512 | if (thread_group_empty(task) && !ptrace_reparented(task)) | 518 | if (thread_group_empty(task) && !ptrace_reparented(task)) |
513 | return task->exit_signal; | 519 | return task->exit_signal; |
514 | 520 | ||
515 | return task->ptrace ? SIGCHLD : 0; | 521 | return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER; |
516 | } | 522 | } |
517 | 523 | ||
518 | /** | 524 | /** |
519 | * tracehook_report_death - task is dead and ready to be reaped | 525 | * tracehook_report_death - task is dead and ready to be reaped |
520 | * @task: @current task now exiting | 526 | * @task: @current task now exiting |
521 | * @signal: signal number sent to parent, or 0 or -1 | 527 | * @signal: return value from tracheook_notify_death() |
522 | * @death_cookie: value passed back from tracehook_notify_death() | 528 | * @death_cookie: value passed back from tracehook_notify_death() |
523 | * @group_dead: nonzero if this was the last thread in the group to die | 529 | * @group_dead: nonzero if this was the last thread in the group to die |
524 | * | 530 | * |
525 | * Thread has just become a zombie or is about to self-reap. If positive, | 531 | * Thread has just become a zombie or is about to self-reap. If positive, |
526 | * @signal is the signal number just sent to the parent (usually %SIGCHLD). | 532 | * @signal is the signal number just sent to the parent (usually %SIGCHLD). |
527 | * If @signal is -1, this thread will self-reap. If @signal is 0, this is | 533 | * If @signal is %DEATH_REAP, this thread will self-reap. If @signal is |
528 | * a delayed_group_leader() zombie. The @death_cookie was passed back by | 534 | * %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie. |
529 | * tracehook_notify_death(). | 535 | * The @death_cookie was passed back by tracehook_notify_death(). |
530 | * | 536 | * |
531 | * If normal reaping is not inhibited, @task->exit_state might be changing | 537 | * If normal reaping is not inhibited, @task->exit_state might be changing |
532 | * in parallel. | 538 | * in parallel. |
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 14c0e91be9b5..1c78d56c57e5 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h | |||
@@ -74,7 +74,7 @@ void con_protect_unimap(struct vc_data *vc, int rdonly); | |||
74 | int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); | 74 | int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); |
75 | 75 | ||
76 | #define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ | 76 | #define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ |
77 | (vc)->vc_toggle_meta ? 0x80 : 0]) | 77 | ((vc)->vc_toggle_meta ? 0x80 : 0)]) |
78 | #else | 78 | #else |
79 | #define con_set_trans_old(arg) (0) | 79 | #define con_set_trans_old(arg) (0) |
80 | #define con_get_trans_old(arg) (-EINVAL) | 80 | #define con_get_trans_old(arg) (-EINVAL) |
@@ -86,6 +86,7 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); | |||
86 | #define con_copy_unimap(d, s) (0) | 86 | #define con_copy_unimap(d, s) (0) |
87 | #define con_get_unimap(vc, ct, uct, list) (-EINVAL) | 87 | #define con_get_unimap(vc, ct, uct, list) (-EINVAL) |
88 | #define con_free_unimap(vc) do { ; } while (0) | 88 | #define con_free_unimap(vc) do { ; } while (0) |
89 | #define con_protect_unimap(vc, rdonly) do { ; } while (0) | ||
89 | 90 | ||
90 | #define vc_translate(vc, c) (c) | 91 | #define vc_translate(vc, c) (c) |
91 | #endif | 92 | #endif |