diff options
Diffstat (limited to 'include/linux')
240 files changed, 8038 insertions, 2941 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 4e8ea8c8ec1e..831c4634162c 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -301,6 +301,7 @@ header-y += quota.h | |||
301 | header-y += radeonfb.h | 301 | header-y += radeonfb.h |
302 | header-y += random.h | 302 | header-y += random.h |
303 | header-y += raw.h | 303 | header-y += raw.h |
304 | header-y += rds.h | ||
304 | header-y += reboot.h | 305 | header-y += reboot.h |
305 | header-y += reiserfs_fs.h | 306 | header-y += reiserfs_fs.h |
306 | header-y += reiserfs_xattr.h | 307 | header-y += reiserfs_xattr.h |
diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h index 7e3d2859be50..1d0ef1ae8036 100644 --- a/include/linux/acpi_pmtmr.h +++ b/include/linux/acpi_pmtmr.h | |||
@@ -25,8 +25,6 @@ static inline u32 acpi_pm_read_early(void) | |||
25 | return acpi_pm_read_verified() & ACPI_PM_MASK; | 25 | return acpi_pm_read_verified() & ACPI_PM_MASK; |
26 | } | 26 | } |
27 | 27 | ||
28 | extern void pmtimer_wait(unsigned); | ||
29 | |||
30 | #else | 28 | #else |
31 | 29 | ||
32 | static inline u32 acpi_pm_read_early(void) | 30 | static inline u32 acpi_pm_read_early(void) |
diff --git a/include/linux/altera_uart.h b/include/linux/altera_uart.h index 8d441064a30d..a10a90791976 100644 --- a/include/linux/altera_uart.h +++ b/include/linux/altera_uart.h | |||
@@ -5,10 +5,15 @@ | |||
5 | #ifndef __ALTUART_H | 5 | #ifndef __ALTUART_H |
6 | #define __ALTUART_H | 6 | #define __ALTUART_H |
7 | 7 | ||
8 | #include <linux/init.h> | ||
9 | |||
8 | struct altera_uart_platform_uart { | 10 | struct altera_uart_platform_uart { |
9 | unsigned long mapbase; /* Physical address base */ | 11 | unsigned long mapbase; /* Physical address base */ |
10 | unsigned int irq; /* Interrupt vector */ | 12 | unsigned int irq; /* Interrupt vector */ |
11 | unsigned int uartclk; /* UART clock rate */ | 13 | unsigned int uartclk; /* UART clock rate */ |
14 | unsigned int bus_shift; /* Bus shift (address stride) */ | ||
12 | }; | 15 | }; |
13 | 16 | ||
17 | int __init early_altera_uart_setup(struct altera_uart_platform_uart *platp); | ||
18 | |||
14 | #endif /* __ALTUART_H */ | 19 | #endif /* __ALTUART_H */ |
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index b0c174012436..c6454cca0447 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/resource.h> | 20 | #include <linux/resource.h> |
21 | 21 | ||
22 | #define AMBA_NR_IRQS 2 | 22 | #define AMBA_NR_IRQS 2 |
23 | #define AMBA_CID 0xb105f00d | ||
23 | 24 | ||
24 | struct clk; | 25 | struct clk; |
25 | 26 | ||
@@ -70,9 +71,15 @@ void amba_release_regions(struct amba_device *); | |||
70 | #define amba_pclk_disable(d) \ | 71 | #define amba_pclk_disable(d) \ |
71 | do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) | 72 | do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) |
72 | 73 | ||
73 | #define amba_config(d) (((d)->periphid >> 24) & 0xff) | 74 | /* Some drivers don't use the struct amba_device */ |
74 | #define amba_rev(d) (((d)->periphid >> 20) & 0x0f) | 75 | #define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) |
75 | #define amba_manf(d) (((d)->periphid >> 12) & 0xff) | 76 | #define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) |
76 | #define amba_part(d) ((d)->periphid & 0xfff) | 77 | #define AMBA_MANF_BITS(a) (((a) >> 12) & 0xff) |
78 | #define AMBA_PART_BITS(a) ((a) & 0xfff) | ||
79 | |||
80 | #define amba_config(d) AMBA_CONFIG_BITS((d)->periphid) | ||
81 | #define amba_rev(d) AMBA_REV_BITS((d)->periphid) | ||
82 | #define amba_manf(d) AMBA_MANF_BITS((d)->periphid) | ||
83 | #define amba_part(d) AMBA_PART_BITS((d)->periphid) | ||
77 | 84 | ||
78 | #endif | 85 | #endif |
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h index ca84ce70d5d5..f4ee9acc9721 100644 --- a/include/linux/amba/mmci.h +++ b/include/linux/amba/mmci.h | |||
@@ -24,6 +24,7 @@ | |||
24 | * whether a card is present in the MMC slot or not | 24 | * whether a card is present in the MMC slot or not |
25 | * @gpio_wp: read this GPIO pin to see if the card is write protected | 25 | * @gpio_wp: read this GPIO pin to see if the card is write protected |
26 | * @gpio_cd: read this GPIO pin to detect card insertion | 26 | * @gpio_cd: read this GPIO pin to detect card insertion |
27 | * @cd_invert: true if the gpio_cd pin value is active low | ||
27 | * @capabilities: the capabilities of the block as implemented in | 28 | * @capabilities: the capabilities of the block as implemented in |
28 | * this platform, signify anything MMC_CAP_* from mmc/host.h | 29 | * this platform, signify anything MMC_CAP_* from mmc/host.h |
29 | */ | 30 | */ |
@@ -35,6 +36,7 @@ struct mmci_platform_data { | |||
35 | unsigned int (*status)(struct device *); | 36 | unsigned int (*status)(struct device *); |
36 | int gpio_wp; | 37 | int gpio_wp; |
37 | int gpio_cd; | 38 | int gpio_cd; |
39 | bool cd_invert; | ||
38 | unsigned long capabilities; | 40 | unsigned long capabilities; |
39 | }; | 41 | }; |
40 | 42 | ||
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h index abf26cc47a2b..4ce98f54186b 100644 --- a/include/linux/amba/pl022.h +++ b/include/linux/amba/pl022.h | |||
@@ -228,6 +228,7 @@ enum ssp_chip_select { | |||
228 | }; | 228 | }; |
229 | 229 | ||
230 | 230 | ||
231 | struct dma_chan; | ||
231 | /** | 232 | /** |
232 | * struct pl022_ssp_master - device.platform_data for SPI controller devices. | 233 | * struct pl022_ssp_master - device.platform_data for SPI controller devices. |
233 | * @num_chipselect: chipselects are used to distinguish individual | 234 | * @num_chipselect: chipselects are used to distinguish individual |
@@ -235,11 +236,16 @@ enum ssp_chip_select { | |||
235 | * each slave has a chipselect signal, but it's common that not | 236 | * each slave has a chipselect signal, but it's common that not |
236 | * every chipselect is connected to a slave. | 237 | * every chipselect is connected to a slave. |
237 | * @enable_dma: if true enables DMA driven transfers. | 238 | * @enable_dma: if true enables DMA driven transfers. |
239 | * @dma_rx_param: parameter to locate an RX DMA channel. | ||
240 | * @dma_tx_param: parameter to locate a TX DMA channel. | ||
238 | */ | 241 | */ |
239 | struct pl022_ssp_controller { | 242 | struct pl022_ssp_controller { |
240 | u16 bus_id; | 243 | u16 bus_id; |
241 | u8 num_chipselect; | 244 | u8 num_chipselect; |
242 | u8 enable_dma:1; | 245 | u8 enable_dma:1; |
246 | bool (*dma_filter)(struct dma_chan *chan, void *filter_param); | ||
247 | void *dma_rx_param; | ||
248 | void *dma_tx_param; | ||
243 | }; | 249 | }; |
244 | 250 | ||
245 | /** | 251 | /** |
@@ -270,20 +276,13 @@ struct pl022_ssp_controller { | |||
270 | * @dma_config: DMA configuration for SSP controller and peripheral | 276 | * @dma_config: DMA configuration for SSP controller and peripheral |
271 | */ | 277 | */ |
272 | struct pl022_config_chip { | 278 | struct pl022_config_chip { |
273 | struct device *dev; | ||
274 | enum ssp_loopback lbm; | ||
275 | enum ssp_interface iface; | 279 | enum ssp_interface iface; |
276 | enum ssp_hierarchy hierarchy; | 280 | enum ssp_hierarchy hierarchy; |
277 | bool slave_tx_disable; | 281 | bool slave_tx_disable; |
278 | struct ssp_clock_params clk_freq; | 282 | struct ssp_clock_params clk_freq; |
279 | enum ssp_rx_endian endian_rx; | ||
280 | enum ssp_tx_endian endian_tx; | ||
281 | enum ssp_data_size data_size; | ||
282 | enum ssp_mode com_mode; | 283 | enum ssp_mode com_mode; |
283 | enum ssp_rx_level_trig rx_lev_trig; | 284 | enum ssp_rx_level_trig rx_lev_trig; |
284 | enum ssp_tx_level_trig tx_lev_trig; | 285 | enum ssp_tx_level_trig tx_lev_trig; |
285 | enum ssp_spi_clk_phase clk_phase; | ||
286 | enum ssp_spi_clk_pol clk_pol; | ||
287 | enum ssp_microwire_ctrl_len ctrl_len; | 286 | enum ssp_microwire_ctrl_len ctrl_len; |
288 | enum ssp_microwire_wait_state wait_state; | 287 | enum ssp_microwire_wait_state wait_state; |
289 | enum ssp_duplex duplex; | 288 | enum ssp_duplex duplex; |
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index e1b634b635f2..6021588ba0a8 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h | |||
@@ -32,7 +32,9 @@ | |||
32 | #define UART01x_RSR 0x04 /* Receive status register (Read). */ | 32 | #define UART01x_RSR 0x04 /* Receive status register (Read). */ |
33 | #define UART01x_ECR 0x04 /* Error clear register (Write). */ | 33 | #define UART01x_ECR 0x04 /* Error clear register (Write). */ |
34 | #define UART010_LCRH 0x08 /* Line control register, high byte. */ | 34 | #define UART010_LCRH 0x08 /* Line control register, high byte. */ |
35 | #define ST_UART011_DMAWM 0x08 /* DMA watermark configure register. */ | ||
35 | #define UART010_LCRM 0x0C /* Line control register, middle byte. */ | 36 | #define UART010_LCRM 0x0C /* Line control register, middle byte. */ |
37 | #define ST_UART011_TIMEOUT 0x0C /* Timeout period register. */ | ||
36 | #define UART010_LCRL 0x10 /* Line control register, low byte. */ | 38 | #define UART010_LCRL 0x10 /* Line control register, low byte. */ |
37 | #define UART010_CR 0x14 /* Control register. */ | 39 | #define UART010_CR 0x14 /* Control register. */ |
38 | #define UART01x_FR 0x18 /* Flag register (Read only). */ | 40 | #define UART01x_FR 0x18 /* Flag register (Read only). */ |
@@ -51,6 +53,15 @@ | |||
51 | #define UART011_MIS 0x40 /* Masked interrupt status. */ | 53 | #define UART011_MIS 0x40 /* Masked interrupt status. */ |
52 | #define UART011_ICR 0x44 /* Interrupt clear register. */ | 54 | #define UART011_ICR 0x44 /* Interrupt clear register. */ |
53 | #define UART011_DMACR 0x48 /* DMA control register. */ | 55 | #define UART011_DMACR 0x48 /* DMA control register. */ |
56 | #define ST_UART011_XFCR 0x50 /* XON/XOFF control register. */ | ||
57 | #define ST_UART011_XON1 0x54 /* XON1 register. */ | ||
58 | #define ST_UART011_XON2 0x58 /* XON2 register. */ | ||
59 | #define ST_UART011_XOFF1 0x5C /* XON1 register. */ | ||
60 | #define ST_UART011_XOFF2 0x60 /* XON2 register. */ | ||
61 | #define ST_UART011_ITCR 0x80 /* Integration test control register. */ | ||
62 | #define ST_UART011_ITIP 0x84 /* Integration test input register. */ | ||
63 | #define ST_UART011_ABCR 0x100 /* Autobaud control register. */ | ||
64 | #define ST_UART011_ABIMSC 0x15C /* Autobaud interrupt mask/clear register. */ | ||
54 | 65 | ||
55 | #define UART011_DR_OE (1 << 11) | 66 | #define UART011_DR_OE (1 << 11) |
56 | #define UART011_DR_BE (1 << 10) | 67 | #define UART011_DR_BE (1 << 10) |
diff --git a/include/linux/ata.h b/include/linux/ata.h index fe6e681a9d74..0c4929fa34d3 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -89,6 +89,7 @@ enum { | |||
89 | ATA_ID_SPG = 98, | 89 | ATA_ID_SPG = 98, |
90 | ATA_ID_LBA_CAPACITY_2 = 100, | 90 | ATA_ID_LBA_CAPACITY_2 = 100, |
91 | ATA_ID_SECTOR_SIZE = 106, | 91 | ATA_ID_SECTOR_SIZE = 106, |
92 | ATA_ID_LOGICAL_SECTOR_SIZE = 117, /* and 118 */ | ||
92 | ATA_ID_LAST_LUN = 126, | 93 | ATA_ID_LAST_LUN = 126, |
93 | ATA_ID_DLF = 128, | 94 | ATA_ID_DLF = 128, |
94 | ATA_ID_CSFO = 129, | 95 | ATA_ID_CSFO = 129, |
@@ -640,16 +641,49 @@ static inline int ata_id_flush_ext_enabled(const u16 *id) | |||
640 | return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400; | 641 | return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400; |
641 | } | 642 | } |
642 | 643 | ||
643 | static inline int ata_id_has_large_logical_sectors(const u16 *id) | 644 | static inline u32 ata_id_logical_sector_size(const u16 *id) |
644 | { | 645 | { |
645 | if ((id[ATA_ID_SECTOR_SIZE] & 0xc000) != 0x4000) | 646 | /* T13/1699-D Revision 6a, Sep 6, 2008. Page 128. |
646 | return 0; | 647 | * IDENTIFY DEVICE data, word 117-118. |
647 | return id[ATA_ID_SECTOR_SIZE] & (1 << 13); | 648 | * 0xd000 ignores bit 13 (logical:physical > 1) |
649 | */ | ||
650 | if ((id[ATA_ID_SECTOR_SIZE] & 0xd000) == 0x5000) | ||
651 | return (((id[ATA_ID_LOGICAL_SECTOR_SIZE+1] << 16) | ||
652 | + id[ATA_ID_LOGICAL_SECTOR_SIZE]) * sizeof(u16)) ; | ||
653 | return ATA_SECT_SIZE; | ||
654 | } | ||
655 | |||
656 | static inline u8 ata_id_log2_per_physical_sector(const u16 *id) | ||
657 | { | ||
658 | /* T13/1699-D Revision 6a, Sep 6, 2008. Page 128. | ||
659 | * IDENTIFY DEVICE data, word 106. | ||
660 | * 0xe000 ignores bit 12 (logical sector > 512 bytes) | ||
661 | */ | ||
662 | if ((id[ATA_ID_SECTOR_SIZE] & 0xe000) == 0x6000) | ||
663 | return (id[ATA_ID_SECTOR_SIZE] & 0xf); | ||
664 | return 0; | ||
648 | } | 665 | } |
649 | 666 | ||
650 | static inline u16 ata_id_logical_per_physical_sectors(const u16 *id) | 667 | /* Offset of logical sectors relative to physical sectors. |
668 | * | ||
669 | * If device has more than one logical sector per physical sector | ||
670 | * (aka 512 byte emulation), vendors might offset the "sector 0" address | ||
671 | * so sector 63 is "naturally aligned" - e.g. FAT partition table. | ||
672 | * This avoids Read/Mod/Write penalties when using FAT partition table | ||
673 | * and updating "well aligned" (FS perspective) physical sectors on every | ||
674 | * transaction. | ||
675 | */ | ||
676 | static inline u16 ata_id_logical_sector_offset(const u16 *id, | ||
677 | u8 log2_per_phys) | ||
651 | { | 678 | { |
652 | return 1 << (id[ATA_ID_SECTOR_SIZE] & 0xf); | 679 | u16 word_209 = id[209]; |
680 | |||
681 | if ((log2_per_phys > 1) && (word_209 & 0xc000) == 0x4000) { | ||
682 | u16 first = word_209 & 0x3fff; | ||
683 | if (first > 0) | ||
684 | return (1 << log2_per_phys) - first; | ||
685 | } | ||
686 | return 0; | ||
653 | } | 687 | } |
654 | 688 | ||
655 | static inline int ata_id_has_lba48(const u16 *id) | 689 | static inline int ata_id_has_lba48(const u16 *id) |
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index f6481daf6e52..a8e4e832cdbb 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h | |||
@@ -449,7 +449,7 @@ void vcc_insert_socket(struct sock *sk); | |||
449 | 449 | ||
450 | static inline int atm_guess_pdu2truesize(int size) | 450 | static inline int atm_guess_pdu2truesize(int size) |
451 | { | 451 | { |
452 | return (SKB_DATA_ALIGN(size) + sizeof(struct skb_shared_info)); | 452 | return SKB_DATA_ALIGN(size) + sizeof(struct skb_shared_info); |
453 | } | 453 | } |
454 | 454 | ||
455 | 455 | ||
diff --git a/include/linux/bfin_mac.h b/include/linux/bfin_mac.h new file mode 100644 index 000000000000..904dec7d03a1 --- /dev/null +++ b/include/linux/bfin_mac.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Blackfin On-Chip MAC Driver | ||
3 | * | ||
4 | * Copyright 2004-2010 Analog Devices Inc. | ||
5 | * | ||
6 | * Enter bugs at http://blackfin.uclinux.org/ | ||
7 | * | ||
8 | * Licensed under the GPL-2 or later. | ||
9 | */ | ||
10 | |||
11 | #ifndef _LINUX_BFIN_MAC_H_ | ||
12 | #define _LINUX_BFIN_MAC_H_ | ||
13 | |||
14 | #include <linux/phy.h> | ||
15 | |||
16 | struct bfin_phydev_platform_data { | ||
17 | unsigned short addr; | ||
18 | int irq; | ||
19 | }; | ||
20 | |||
21 | struct bfin_mii_bus_platform_data { | ||
22 | int phydev_number; | ||
23 | struct bfin_phydev_platform_data *phydev_data; | ||
24 | const unsigned short *mac_peripherals; | ||
25 | int phy_mode; | ||
26 | unsigned int phy_mask; | ||
27 | }; | ||
28 | |||
29 | #endif | ||
diff --git a/include/linux/bio.h b/include/linux/bio.h index 5274103434ad..ba679992d39b 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -346,8 +346,15 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) | |||
346 | } | 346 | } |
347 | 347 | ||
348 | #else | 348 | #else |
349 | #define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset) | 349 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
350 | #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0) | 350 | { |
351 | return page_address(bvec->bv_page) + bvec->bv_offset; | ||
352 | } | ||
353 | |||
354 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) | ||
355 | { | ||
356 | *flags = 0; | ||
357 | } | ||
351 | #endif | 358 | #endif |
352 | 359 | ||
353 | static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, | 360 | static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, |
@@ -496,6 +503,10 @@ static inline struct bio *bio_list_get(struct bio_list *bl) | |||
496 | #define bip_for_each_vec(bvl, bip, i) \ | 503 | #define bip_for_each_vec(bvl, bip, i) \ |
497 | __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) | 504 | __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) |
498 | 505 | ||
506 | #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ | ||
507 | for_each_bio(_bio) \ | ||
508 | bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) | ||
509 | |||
499 | #define bio_integrity(bio) (bio->bi_integrity != NULL) | 510 | #define bio_integrity(bio) (bio->bi_integrity != NULL) |
500 | 511 | ||
501 | extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); | 512 | extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index fc68053378ce..827cc95711ef 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
@@ -136,28 +136,6 @@ static inline unsigned long __ffs64(u64 word) | |||
136 | } | 136 | } |
137 | 137 | ||
138 | #ifdef __KERNEL__ | 138 | #ifdef __KERNEL__ |
139 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT | ||
140 | |||
141 | /** | ||
142 | * find_first_bit - find the first set bit in a memory region | ||
143 | * @addr: The address to start the search at | ||
144 | * @size: The maximum size to search | ||
145 | * | ||
146 | * Returns the bit number of the first set bit. | ||
147 | */ | ||
148 | extern unsigned long find_first_bit(const unsigned long *addr, | ||
149 | unsigned long size); | ||
150 | |||
151 | /** | ||
152 | * find_first_zero_bit - find the first cleared bit in a memory region | ||
153 | * @addr: The address to start the search at | ||
154 | * @size: The maximum size to search | ||
155 | * | ||
156 | * Returns the bit number of the first cleared bit. | ||
157 | */ | ||
158 | extern unsigned long find_first_zero_bit(const unsigned long *addr, | ||
159 | unsigned long size); | ||
160 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ | ||
161 | 139 | ||
162 | #ifdef CONFIG_GENERIC_FIND_LAST_BIT | 140 | #ifdef CONFIG_GENERIC_FIND_LAST_BIT |
163 | /** | 141 | /** |
@@ -171,28 +149,5 @@ extern unsigned long find_last_bit(const unsigned long *addr, | |||
171 | unsigned long size); | 149 | unsigned long size); |
172 | #endif /* CONFIG_GENERIC_FIND_LAST_BIT */ | 150 | #endif /* CONFIG_GENERIC_FIND_LAST_BIT */ |
173 | 151 | ||
174 | #ifdef CONFIG_GENERIC_FIND_NEXT_BIT | ||
175 | |||
176 | /** | ||
177 | * find_next_bit - find the next set bit in a memory region | ||
178 | * @addr: The address to base the search on | ||
179 | * @offset: The bitnumber to start searching at | ||
180 | * @size: The bitmap size in bits | ||
181 | */ | ||
182 | extern unsigned long find_next_bit(const unsigned long *addr, | ||
183 | unsigned long size, unsigned long offset); | ||
184 | |||
185 | /** | ||
186 | * find_next_zero_bit - find the next cleared bit in a memory region | ||
187 | * @addr: The address to base the search on | ||
188 | * @offset: The bitnumber to start searching at | ||
189 | * @size: The bitmap size in bits | ||
190 | */ | ||
191 | |||
192 | extern unsigned long find_next_zero_bit(const unsigned long *addr, | ||
193 | unsigned long size, | ||
194 | unsigned long offset); | ||
195 | |||
196 | #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ | ||
197 | #endif /* __KERNEL__ */ | 152 | #endif /* __KERNEL__ */ |
198 | #endif | 153 | #endif |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index ca83a97c9715..0437ab6bb54c 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -97,6 +97,7 @@ struct bio { | |||
97 | #define BIO_NULL_MAPPED 9 /* contains invalid user pages */ | 97 | #define BIO_NULL_MAPPED 9 /* contains invalid user pages */ |
98 | #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ | 98 | #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ |
99 | #define BIO_QUIET 11 /* Make BIO Quiet */ | 99 | #define BIO_QUIET 11 /* Make BIO Quiet */ |
100 | #define BIO_MAPPED_INTEGRITY 12/* integrity metadata has been remapped */ | ||
100 | #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) | 101 | #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) |
101 | 102 | ||
102 | /* | 103 | /* |
@@ -130,6 +131,8 @@ enum rq_flag_bits { | |||
130 | /* bio only flags */ | 131 | /* bio only flags */ |
131 | __REQ_UNPLUG, /* unplug the immediately after submission */ | 132 | __REQ_UNPLUG, /* unplug the immediately after submission */ |
132 | __REQ_RAHEAD, /* read ahead, can fail anytime */ | 133 | __REQ_RAHEAD, /* read ahead, can fail anytime */ |
134 | __REQ_THROTTLED, /* This bio has already been subjected to | ||
135 | * throttling rules. Don't do it again. */ | ||
133 | 136 | ||
134 | /* request only flags */ | 137 | /* request only flags */ |
135 | __REQ_SORTED, /* elevator knows about this request */ | 138 | __REQ_SORTED, /* elevator knows about this request */ |
@@ -143,10 +146,8 @@ enum rq_flag_bits { | |||
143 | __REQ_FAILED, /* set if the request failed */ | 146 | __REQ_FAILED, /* set if the request failed */ |
144 | __REQ_QUIET, /* don't worry about errors */ | 147 | __REQ_QUIET, /* don't worry about errors */ |
145 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | 148 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ |
146 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | ||
147 | __REQ_ALLOCED, /* request came from our alloc pool */ | 149 | __REQ_ALLOCED, /* request came from our alloc pool */ |
148 | __REQ_COPY_USER, /* contains copies of user pages */ | 150 | __REQ_COPY_USER, /* contains copies of user pages */ |
149 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | ||
150 | __REQ_FLUSH, /* request for cache flush */ | 151 | __REQ_FLUSH, /* request for cache flush */ |
151 | __REQ_IO_STAT, /* account I/O stat */ | 152 | __REQ_IO_STAT, /* account I/O stat */ |
152 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ | 153 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ |
@@ -168,10 +169,12 @@ enum rq_flag_bits { | |||
168 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) | 169 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) |
169 | #define REQ_COMMON_MASK \ | 170 | #define REQ_COMMON_MASK \ |
170 | (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ | 171 | (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ |
171 | REQ_META| REQ_DISCARD | REQ_NOIDLE) | 172 | REQ_META | REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) |
173 | #define REQ_CLONE_MASK REQ_COMMON_MASK | ||
172 | 174 | ||
173 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) | 175 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) |
174 | #define REQ_RAHEAD (1 << __REQ_RAHEAD) | 176 | #define REQ_RAHEAD (1 << __REQ_RAHEAD) |
177 | #define REQ_THROTTLED (1 << __REQ_THROTTLED) | ||
175 | 178 | ||
176 | #define REQ_SORTED (1 << __REQ_SORTED) | 179 | #define REQ_SORTED (1 << __REQ_SORTED) |
177 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | 180 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) |
@@ -184,10 +187,8 @@ enum rq_flag_bits { | |||
184 | #define REQ_FAILED (1 << __REQ_FAILED) | 187 | #define REQ_FAILED (1 << __REQ_FAILED) |
185 | #define REQ_QUIET (1 << __REQ_QUIET) | 188 | #define REQ_QUIET (1 << __REQ_QUIET) |
186 | #define REQ_PREEMPT (1 << __REQ_PREEMPT) | 189 | #define REQ_PREEMPT (1 << __REQ_PREEMPT) |
187 | #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) | ||
188 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) | 190 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) |
189 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | 191 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) |
190 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | ||
191 | #define REQ_FLUSH (1 << __REQ_FLUSH) | 192 | #define REQ_FLUSH (1 << __REQ_FLUSH) |
192 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) | 193 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) |
193 | #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) | 194 | #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2c54906f678f..646b462d04df 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -124,6 +124,9 @@ struct request { | |||
124 | * physical address coalescing is performed. | 124 | * physical address coalescing is performed. |
125 | */ | 125 | */ |
126 | unsigned short nr_phys_segments; | 126 | unsigned short nr_phys_segments; |
127 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | ||
128 | unsigned short nr_integrity_segments; | ||
129 | #endif | ||
127 | 130 | ||
128 | unsigned short ioprio; | 131 | unsigned short ioprio; |
129 | 132 | ||
@@ -243,6 +246,7 @@ struct queue_limits { | |||
243 | 246 | ||
244 | unsigned short logical_block_size; | 247 | unsigned short logical_block_size; |
245 | unsigned short max_segments; | 248 | unsigned short max_segments; |
249 | unsigned short max_integrity_segments; | ||
246 | 250 | ||
247 | unsigned char misaligned; | 251 | unsigned char misaligned; |
248 | unsigned char discard_misaligned; | 252 | unsigned char discard_misaligned; |
@@ -355,18 +359,25 @@ struct request_queue | |||
355 | struct blk_trace *blk_trace; | 359 | struct blk_trace *blk_trace; |
356 | #endif | 360 | #endif |
357 | /* | 361 | /* |
358 | * reserved for flush operations | 362 | * for flush operations |
359 | */ | 363 | */ |
360 | unsigned int ordered, next_ordered, ordseq; | 364 | unsigned int flush_flags; |
361 | int orderr, ordcolor; | 365 | unsigned int flush_seq; |
362 | struct request pre_flush_rq, bar_rq, post_flush_rq; | 366 | int flush_err; |
363 | struct request *orig_bar_rq; | 367 | struct request flush_rq; |
368 | struct request *orig_flush_rq; | ||
369 | struct list_head pending_flushes; | ||
364 | 370 | ||
365 | struct mutex sysfs_lock; | 371 | struct mutex sysfs_lock; |
366 | 372 | ||
367 | #if defined(CONFIG_BLK_DEV_BSG) | 373 | #if defined(CONFIG_BLK_DEV_BSG) |
368 | struct bsg_class_device bsg_dev; | 374 | struct bsg_class_device bsg_dev; |
369 | #endif | 375 | #endif |
376 | |||
377 | #ifdef CONFIG_BLK_DEV_THROTTLING | ||
378 | /* Throttle data */ | ||
379 | struct throtl_data *td; | ||
380 | #endif | ||
370 | }; | 381 | }; |
371 | 382 | ||
372 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 383 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
@@ -462,56 +473,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
462 | __clear_bit(flag, &q->queue_flags); | 473 | __clear_bit(flag, &q->queue_flags); |
463 | } | 474 | } |
464 | 475 | ||
465 | enum { | ||
466 | /* | ||
467 | * Hardbarrier is supported with one of the following methods. | ||
468 | * | ||
469 | * NONE : hardbarrier unsupported | ||
470 | * DRAIN : ordering by draining is enough | ||
471 | * DRAIN_FLUSH : ordering by draining w/ pre and post flushes | ||
472 | * DRAIN_FUA : ordering by draining w/ pre flush and FUA write | ||
473 | * TAG : ordering by tag is enough | ||
474 | * TAG_FLUSH : ordering by tag w/ pre and post flushes | ||
475 | * TAG_FUA : ordering by tag w/ pre flush and FUA write | ||
476 | */ | ||
477 | QUEUE_ORDERED_BY_DRAIN = 0x01, | ||
478 | QUEUE_ORDERED_BY_TAG = 0x02, | ||
479 | QUEUE_ORDERED_DO_PREFLUSH = 0x10, | ||
480 | QUEUE_ORDERED_DO_BAR = 0x20, | ||
481 | QUEUE_ORDERED_DO_POSTFLUSH = 0x40, | ||
482 | QUEUE_ORDERED_DO_FUA = 0x80, | ||
483 | |||
484 | QUEUE_ORDERED_NONE = 0x00, | ||
485 | |||
486 | QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN | | ||
487 | QUEUE_ORDERED_DO_BAR, | ||
488 | QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | | ||
489 | QUEUE_ORDERED_DO_PREFLUSH | | ||
490 | QUEUE_ORDERED_DO_POSTFLUSH, | ||
491 | QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | | ||
492 | QUEUE_ORDERED_DO_PREFLUSH | | ||
493 | QUEUE_ORDERED_DO_FUA, | ||
494 | |||
495 | QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG | | ||
496 | QUEUE_ORDERED_DO_BAR, | ||
497 | QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | | ||
498 | QUEUE_ORDERED_DO_PREFLUSH | | ||
499 | QUEUE_ORDERED_DO_POSTFLUSH, | ||
500 | QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | | ||
501 | QUEUE_ORDERED_DO_PREFLUSH | | ||
502 | QUEUE_ORDERED_DO_FUA, | ||
503 | |||
504 | /* | ||
505 | * Ordered operation sequence | ||
506 | */ | ||
507 | QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ | ||
508 | QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ | ||
509 | QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ | ||
510 | QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ | ||
511 | QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ | ||
512 | QUEUE_ORDSEQ_DONE = 0x20, | ||
513 | }; | ||
514 | |||
515 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | 476 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) |
516 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 477 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
517 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 478 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
@@ -521,7 +482,6 @@ enum { | |||
521 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 482 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
522 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | 483 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
523 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) | 484 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) |
524 | #define blk_queue_flushing(q) ((q)->ordseq) | ||
525 | #define blk_queue_stackable(q) \ | 485 | #define blk_queue_stackable(q) \ |
526 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 486 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |
527 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) | 487 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) |
@@ -592,7 +552,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync) | |||
592 | * it already be started by driver. | 552 | * it already be started by driver. |
593 | */ | 553 | */ |
594 | #define RQ_NOMERGE_FLAGS \ | 554 | #define RQ_NOMERGE_FLAGS \ |
595 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) | 555 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \ |
556 | REQ_FLUSH | REQ_FUA) | ||
596 | #define rq_mergeable(rq) \ | 557 | #define rq_mergeable(rq) \ |
597 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ | 558 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ |
598 | (((rq)->cmd_flags & REQ_DISCARD) || \ | 559 | (((rq)->cmd_flags & REQ_DISCARD) || \ |
@@ -851,7 +812,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | |||
851 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 812 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
852 | unsigned int max_discard_sectors); | 813 | unsigned int max_discard_sectors); |
853 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 814 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
854 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | 815 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); |
855 | extern void blk_queue_alignment_offset(struct request_queue *q, | 816 | extern void blk_queue_alignment_offset(struct request_queue *q, |
856 | unsigned int alignment); | 817 | unsigned int alignment); |
857 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); | 818 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); |
@@ -881,12 +842,8 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int); | |||
881 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 842 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
882 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | 843 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); |
883 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 844 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
845 | extern void blk_queue_flush(struct request_queue *q, unsigned int flush); | ||
884 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 846 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
885 | extern int blk_queue_ordered(struct request_queue *, unsigned); | ||
886 | extern bool blk_do_ordered(struct request_queue *, struct request **); | ||
887 | extern unsigned blk_ordered_cur_seq(struct request_queue *); | ||
888 | extern unsigned blk_ordered_req_seq(struct request *); | ||
889 | extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int); | ||
890 | 847 | ||
891 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); | 848 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
892 | extern void blk_dump_rq_flags(struct request *, char *); | 849 | extern void blk_dump_rq_flags(struct request *, char *); |
@@ -919,27 +876,20 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
919 | return NULL; | 876 | return NULL; |
920 | return bqt->tag_index[tag]; | 877 | return bqt->tag_index[tag]; |
921 | } | 878 | } |
922 | enum{ | 879 | |
923 | BLKDEV_WAIT, /* wait for completion */ | 880 | #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ |
924 | BLKDEV_BARRIER, /* issue request with barrier */ | 881 | |
925 | BLKDEV_SECURE, /* secure discard */ | 882 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); |
926 | }; | ||
927 | #define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT) | ||
928 | #define BLKDEV_IFL_BARRIER (1 << BLKDEV_BARRIER) | ||
929 | #define BLKDEV_IFL_SECURE (1 << BLKDEV_SECURE) | ||
930 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *, | ||
931 | unsigned long); | ||
932 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 883 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
933 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | 884 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); |
934 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | 885 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
935 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | 886 | sector_t nr_sects, gfp_t gfp_mask); |
936 | static inline int sb_issue_discard(struct super_block *sb, | 887 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, |
937 | sector_t block, sector_t nr_blocks) | 888 | sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) |
938 | { | 889 | { |
939 | block <<= (sb->s_blocksize_bits - 9); | 890 | return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), |
940 | nr_blocks <<= (sb->s_blocksize_bits - 9); | 891 | nr_blocks << (sb->s_blocksize_bits - 9), |
941 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_NOFS, | 892 | gfp_mask, flags); |
942 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); | ||
943 | } | 893 | } |
944 | 894 | ||
945 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | 895 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); |
@@ -1004,7 +954,7 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q) | |||
1004 | return q->limits.physical_block_size; | 954 | return q->limits.physical_block_size; |
1005 | } | 955 | } |
1006 | 956 | ||
1007 | static inline int bdev_physical_block_size(struct block_device *bdev) | 957 | static inline unsigned int bdev_physical_block_size(struct block_device *bdev) |
1008 | { | 958 | { |
1009 | return queue_physical_block_size(bdev_get_queue(bdev)); | 959 | return queue_physical_block_size(bdev_get_queue(bdev)); |
1010 | } | 960 | } |
@@ -1093,11 +1043,11 @@ static inline int queue_dma_alignment(struct request_queue *q) | |||
1093 | return q ? q->dma_alignment : 511; | 1043 | return q ? q->dma_alignment : 511; |
1094 | } | 1044 | } |
1095 | 1045 | ||
1096 | static inline int blk_rq_aligned(struct request_queue *q, void *addr, | 1046 | static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, |
1097 | unsigned int len) | 1047 | unsigned int len) |
1098 | { | 1048 | { |
1099 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; | 1049 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; |
1100 | return !((unsigned long)addr & alignment) && !(len & alignment); | 1050 | return !(addr & alignment) && !(len & alignment); |
1101 | } | 1051 | } |
1102 | 1052 | ||
1103 | /* assumes size > 256 */ | 1053 | /* assumes size > 256 */ |
@@ -1127,6 +1077,7 @@ static inline void put_dev_sector(Sector p) | |||
1127 | 1077 | ||
1128 | struct work_struct; | 1078 | struct work_struct; |
1129 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 1079 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
1080 | int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); | ||
1130 | 1081 | ||
1131 | #ifdef CONFIG_BLK_CGROUP | 1082 | #ifdef CONFIG_BLK_CGROUP |
1132 | /* | 1083 | /* |
@@ -1170,6 +1121,24 @@ static inline uint64_t rq_io_start_time_ns(struct request *req) | |||
1170 | } | 1121 | } |
1171 | #endif | 1122 | #endif |
1172 | 1123 | ||
1124 | #ifdef CONFIG_BLK_DEV_THROTTLING | ||
1125 | extern int blk_throtl_init(struct request_queue *q); | ||
1126 | extern void blk_throtl_exit(struct request_queue *q); | ||
1127 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); | ||
1128 | extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay); | ||
1129 | extern void throtl_shutdown_timer_wq(struct request_queue *q); | ||
1130 | #else /* CONFIG_BLK_DEV_THROTTLING */ | ||
1131 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | ||
1132 | { | ||
1133 | return 0; | ||
1134 | } | ||
1135 | |||
1136 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | ||
1137 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } | ||
1138 | static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {} | ||
1139 | static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} | ||
1140 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | ||
1141 | |||
1173 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 1142 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
1174 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 1143 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) |
1175 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ | 1144 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ |
@@ -1213,8 +1182,13 @@ struct blk_integrity { | |||
1213 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | 1182 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); |
1214 | extern void blk_integrity_unregister(struct gendisk *); | 1183 | extern void blk_integrity_unregister(struct gendisk *); |
1215 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); | 1184 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); |
1216 | extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); | 1185 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, |
1217 | extern int blk_rq_count_integrity_sg(struct request *); | 1186 | struct scatterlist *); |
1187 | extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); | ||
1188 | extern int blk_integrity_merge_rq(struct request_queue *, struct request *, | ||
1189 | struct request *); | ||
1190 | extern int blk_integrity_merge_bio(struct request_queue *, struct request *, | ||
1191 | struct bio *); | ||
1218 | 1192 | ||
1219 | static inline | 1193 | static inline |
1220 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | 1194 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) |
@@ -1235,16 +1209,32 @@ static inline int blk_integrity_rq(struct request *rq) | |||
1235 | return bio_integrity(rq->bio); | 1209 | return bio_integrity(rq->bio); |
1236 | } | 1210 | } |
1237 | 1211 | ||
1212 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, | ||
1213 | unsigned int segs) | ||
1214 | { | ||
1215 | q->limits.max_integrity_segments = segs; | ||
1216 | } | ||
1217 | |||
1218 | static inline unsigned short | ||
1219 | queue_max_integrity_segments(struct request_queue *q) | ||
1220 | { | ||
1221 | return q->limits.max_integrity_segments; | ||
1222 | } | ||
1223 | |||
1238 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 1224 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
1239 | 1225 | ||
1240 | #define blk_integrity_rq(rq) (0) | 1226 | #define blk_integrity_rq(rq) (0) |
1241 | #define blk_rq_count_integrity_sg(a) (0) | 1227 | #define blk_rq_count_integrity_sg(a, b) (0) |
1242 | #define blk_rq_map_integrity_sg(a, b) (0) | 1228 | #define blk_rq_map_integrity_sg(a, b, c) (0) |
1243 | #define bdev_get_integrity(a) (0) | 1229 | #define bdev_get_integrity(a) (0) |
1244 | #define blk_get_integrity(a) (0) | 1230 | #define blk_get_integrity(a) (0) |
1245 | #define blk_integrity_compare(a, b) (0) | 1231 | #define blk_integrity_compare(a, b) (0) |
1246 | #define blk_integrity_register(a, b) (0) | 1232 | #define blk_integrity_register(a, b) (0) |
1247 | #define blk_integrity_unregister(a) do { } while (0); | 1233 | #define blk_integrity_unregister(a) do { } while (0); |
1234 | #define blk_queue_max_integrity_segments(a, b) do { } while (0); | ||
1235 | #define queue_max_integrity_segments(a) (0) | ||
1236 | #define blk_integrity_merge_rq(a, b, c) (0) | ||
1237 | #define blk_integrity_merge_bio(a, b, c) (0) | ||
1248 | 1238 | ||
1249 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 1239 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
1250 | 1240 | ||
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index ec94c12f21da..dd1b25b2641c 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -32,7 +32,6 @@ enum bh_state_bits { | |||
32 | BH_Delay, /* Buffer is not yet allocated on disk */ | 32 | BH_Delay, /* Buffer is not yet allocated on disk */ |
33 | BH_Boundary, /* Block is followed by a discontiguity */ | 33 | BH_Boundary, /* Block is followed by a discontiguity */ |
34 | BH_Write_EIO, /* I/O error on write */ | 34 | BH_Write_EIO, /* I/O error on write */ |
35 | BH_Eopnotsupp, /* operation not supported (barrier) */ | ||
36 | BH_Unwritten, /* Buffer is allocated on disk but not written */ | 35 | BH_Unwritten, /* Buffer is allocated on disk but not written */ |
37 | BH_Quiet, /* Buffer Error Prinks to be quiet */ | 36 | BH_Quiet, /* Buffer Error Prinks to be quiet */ |
38 | 37 | ||
@@ -124,7 +123,6 @@ BUFFER_FNS(Async_Write, async_write) | |||
124 | BUFFER_FNS(Delay, delay) | 123 | BUFFER_FNS(Delay, delay) |
125 | BUFFER_FNS(Boundary, boundary) | 124 | BUFFER_FNS(Boundary, boundary) |
126 | BUFFER_FNS(Write_EIO, write_io_error) | 125 | BUFFER_FNS(Write_EIO, write_io_error) |
127 | BUFFER_FNS(Eopnotsupp, eopnotsupp) | ||
128 | BUFFER_FNS(Unwritten, unwritten) | 126 | BUFFER_FNS(Unwritten, unwritten) |
129 | 127 | ||
130 | #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) | 128 | #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) |
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h index dba28268e651..8e20540043f5 100644 --- a/include/linux/can/platform/mcp251x.h +++ b/include/linux/can/platform/mcp251x.h | |||
@@ -12,7 +12,6 @@ | |||
12 | /** | 12 | /** |
13 | * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data | 13 | * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data |
14 | * @oscillator_frequency: - oscillator frequency in Hz | 14 | * @oscillator_frequency: - oscillator frequency in Hz |
15 | * @model: - actual type of chip | ||
16 | * @board_specific_setup: - called before probing the chip (power,reset) | 15 | * @board_specific_setup: - called before probing the chip (power,reset) |
17 | * @transceiver_enable: - called to power on/off the transceiver | 16 | * @transceiver_enable: - called to power on/off the transceiver |
18 | * @power_enable: - called to power on/off the mcp *and* the | 17 | * @power_enable: - called to power on/off the mcp *and* the |
@@ -25,9 +24,6 @@ | |||
25 | 24 | ||
26 | struct mcp251x_platform_data { | 25 | struct mcp251x_platform_data { |
27 | unsigned long oscillator_frequency; | 26 | unsigned long oscillator_frequency; |
28 | int model; | ||
29 | #define CAN_MCP251X_MCP2510 0x2510 | ||
30 | #define CAN_MCP251X_MCP2515 0x2515 | ||
31 | int (*board_specific_setup)(struct spi_device *spi); | 27 | int (*board_specific_setup)(struct spi_device *spi); |
32 | int (*transceiver_enable)(int enable); | 28 | int (*transceiver_enable)(int enable); |
33 | int (*power_enable) (int enable); | 29 | int (*power_enable) (int enable); |
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h new file mode 100644 index 000000000000..7fff521d7eb5 --- /dev/null +++ b/include/linux/ceph/auth.h | |||
@@ -0,0 +1,92 @@ | |||
1 | #ifndef _FS_CEPH_AUTH_H | ||
2 | #define _FS_CEPH_AUTH_H | ||
3 | |||
4 | #include <linux/ceph/types.h> | ||
5 | #include <linux/ceph/buffer.h> | ||
6 | |||
7 | /* | ||
8 | * Abstract interface for communicating with the authenticate module. | ||
9 | * There is some handshake that takes place between us and the monitor | ||
10 | * to acquire the necessary keys. These are used to generate an | ||
11 | * 'authorizer' that we use when connecting to a service (mds, osd). | ||
12 | */ | ||
13 | |||
14 | struct ceph_auth_client; | ||
15 | struct ceph_authorizer; | ||
16 | |||
17 | struct ceph_auth_client_ops { | ||
18 | const char *name; | ||
19 | |||
20 | /* | ||
21 | * true if we are authenticated and can connect to | ||
22 | * services. | ||
23 | */ | ||
24 | int (*is_authenticated)(struct ceph_auth_client *ac); | ||
25 | |||
26 | /* | ||
27 | * true if we should (re)authenticate, e.g., when our tickets | ||
28 | * are getting old and crusty. | ||
29 | */ | ||
30 | int (*should_authenticate)(struct ceph_auth_client *ac); | ||
31 | |||
32 | /* | ||
33 | * build requests and process replies during monitor | ||
34 | * handshake. if handle_reply returns -EAGAIN, we build | ||
35 | * another request. | ||
36 | */ | ||
37 | int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end); | ||
38 | int (*handle_reply)(struct ceph_auth_client *ac, int result, | ||
39 | void *buf, void *end); | ||
40 | |||
41 | /* | ||
42 | * Create authorizer for connecting to a service, and verify | ||
43 | * the response to authenticate the service. | ||
44 | */ | ||
45 | int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type, | ||
46 | struct ceph_authorizer **a, | ||
47 | void **buf, size_t *len, | ||
48 | void **reply_buf, size_t *reply_len); | ||
49 | int (*verify_authorizer_reply)(struct ceph_auth_client *ac, | ||
50 | struct ceph_authorizer *a, size_t len); | ||
51 | void (*destroy_authorizer)(struct ceph_auth_client *ac, | ||
52 | struct ceph_authorizer *a); | ||
53 | void (*invalidate_authorizer)(struct ceph_auth_client *ac, | ||
54 | int peer_type); | ||
55 | |||
56 | /* reset when we (re)connect to a monitor */ | ||
57 | void (*reset)(struct ceph_auth_client *ac); | ||
58 | |||
59 | void (*destroy)(struct ceph_auth_client *ac); | ||
60 | }; | ||
61 | |||
62 | struct ceph_auth_client { | ||
63 | u32 protocol; /* CEPH_AUTH_* */ | ||
64 | void *private; /* for use by protocol implementation */ | ||
65 | const struct ceph_auth_client_ops *ops; /* null iff protocol==0 */ | ||
66 | |||
67 | bool negotiating; /* true if negotiating protocol */ | ||
68 | const char *name; /* entity name */ | ||
69 | u64 global_id; /* our unique id in system */ | ||
70 | const char *secret; /* our secret key */ | ||
71 | unsigned want_keys; /* which services we want */ | ||
72 | }; | ||
73 | |||
74 | extern struct ceph_auth_client *ceph_auth_init(const char *name, | ||
75 | const char *secret); | ||
76 | extern void ceph_auth_destroy(struct ceph_auth_client *ac); | ||
77 | |||
78 | extern void ceph_auth_reset(struct ceph_auth_client *ac); | ||
79 | |||
80 | extern int ceph_auth_build_hello(struct ceph_auth_client *ac, | ||
81 | void *buf, size_t len); | ||
82 | extern int ceph_handle_auth_reply(struct ceph_auth_client *ac, | ||
83 | void *buf, size_t len, | ||
84 | void *reply_buf, size_t reply_len); | ||
85 | extern int ceph_entity_name_encode(const char *name, void **p, void *end); | ||
86 | |||
87 | extern int ceph_build_auth(struct ceph_auth_client *ac, | ||
88 | void *msg_buf, size_t msg_len); | ||
89 | |||
90 | extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac); | ||
91 | |||
92 | #endif | ||
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h new file mode 100644 index 000000000000..58d19014068f --- /dev/null +++ b/include/linux/ceph/buffer.h | |||
@@ -0,0 +1,39 @@ | |||
1 | #ifndef __FS_CEPH_BUFFER_H | ||
2 | #define __FS_CEPH_BUFFER_H | ||
3 | |||
4 | #include <linux/kref.h> | ||
5 | #include <linux/mm.h> | ||
6 | #include <linux/vmalloc.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/uio.h> | ||
9 | |||
10 | /* | ||
11 | * a simple reference counted buffer. | ||
12 | * | ||
13 | * use kmalloc for small sizes (<= one page), vmalloc for larger | ||
14 | * sizes. | ||
15 | */ | ||
16 | struct ceph_buffer { | ||
17 | struct kref kref; | ||
18 | struct kvec vec; | ||
19 | size_t alloc_len; | ||
20 | bool is_vmalloc; | ||
21 | }; | ||
22 | |||
23 | extern struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp); | ||
24 | extern void ceph_buffer_release(struct kref *kref); | ||
25 | |||
26 | static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b) | ||
27 | { | ||
28 | kref_get(&b->kref); | ||
29 | return b; | ||
30 | } | ||
31 | |||
32 | static inline void ceph_buffer_put(struct ceph_buffer *b) | ||
33 | { | ||
34 | kref_put(&b->kref, ceph_buffer_release); | ||
35 | } | ||
36 | |||
37 | extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); | ||
38 | |||
39 | #endif | ||
diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h new file mode 100644 index 000000000000..aa2e19182d99 --- /dev/null +++ b/include/linux/ceph/ceph_debug.h | |||
@@ -0,0 +1,38 @@ | |||
1 | #ifndef _FS_CEPH_DEBUG_H | ||
2 | #define _FS_CEPH_DEBUG_H | ||
3 | |||
4 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
5 | |||
6 | #ifdef CONFIG_CEPH_LIB_PRETTYDEBUG | ||
7 | |||
8 | /* | ||
9 | * wrap pr_debug to include a filename:lineno prefix on each line. | ||
10 | * this incurs some overhead (kernel size and execution time) due to | ||
11 | * the extra function call at each call site. | ||
12 | */ | ||
13 | |||
14 | # if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) | ||
15 | extern const char *ceph_file_part(const char *s, int len); | ||
16 | # define dout(fmt, ...) \ | ||
17 | pr_debug("%.*s %12.12s:%-4d : " fmt, \ | ||
18 | 8 - (int)sizeof(KBUILD_MODNAME), " ", \ | ||
19 | ceph_file_part(__FILE__, sizeof(__FILE__)), \ | ||
20 | __LINE__, ##__VA_ARGS__) | ||
21 | # else | ||
22 | /* faux printk call just to see any compiler warnings. */ | ||
23 | # define dout(fmt, ...) do { \ | ||
24 | if (0) \ | ||
25 | printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ | ||
26 | } while (0) | ||
27 | # endif | ||
28 | |||
29 | #else | ||
30 | |||
31 | /* | ||
32 | * or, just wrap pr_debug | ||
33 | */ | ||
34 | # define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__) | ||
35 | |||
36 | #endif | ||
37 | |||
38 | #endif | ||
diff --git a/include/linux/ceph/ceph_frag.h b/include/linux/ceph/ceph_frag.h new file mode 100644 index 000000000000..5babb8e95352 --- /dev/null +++ b/include/linux/ceph/ceph_frag.h | |||
@@ -0,0 +1,109 @@ | |||
1 | #ifndef FS_CEPH_FRAG_H | ||
2 | #define FS_CEPH_FRAG_H | ||
3 | |||
4 | /* | ||
5 | * "Frags" are a way to describe a subset of a 32-bit number space, | ||
6 | * using a mask and a value to match against that mask. Any given frag | ||
7 | * (subset of the number space) can be partitioned into 2^n sub-frags. | ||
8 | * | ||
9 | * Frags are encoded into a 32-bit word: | ||
10 | * 8 upper bits = "bits" | ||
11 | * 24 lower bits = "value" | ||
12 | * (We could go to 5+27 bits, but who cares.) | ||
13 | * | ||
14 | * We use the _most_ significant bits of the 24 bit value. This makes | ||
15 | * values logically sort. | ||
16 | * | ||
17 | * Unfortunately, because the "bits" field is still in the high bits, we | ||
18 | * can't sort encoded frags numerically. However, it does allow you | ||
19 | * to feed encoded frags as values into frag_contains_value. | ||
20 | */ | ||
21 | static inline __u32 ceph_frag_make(__u32 b, __u32 v) | ||
22 | { | ||
23 | return (b << 24) | | ||
24 | (v & (0xffffffu << (24-b)) & 0xffffffu); | ||
25 | } | ||
26 | static inline __u32 ceph_frag_bits(__u32 f) | ||
27 | { | ||
28 | return f >> 24; | ||
29 | } | ||
30 | static inline __u32 ceph_frag_value(__u32 f) | ||
31 | { | ||
32 | return f & 0xffffffu; | ||
33 | } | ||
34 | static inline __u32 ceph_frag_mask(__u32 f) | ||
35 | { | ||
36 | return (0xffffffu << (24-ceph_frag_bits(f))) & 0xffffffu; | ||
37 | } | ||
38 | static inline __u32 ceph_frag_mask_shift(__u32 f) | ||
39 | { | ||
40 | return 24 - ceph_frag_bits(f); | ||
41 | } | ||
42 | |||
43 | static inline int ceph_frag_contains_value(__u32 f, __u32 v) | ||
44 | { | ||
45 | return (v & ceph_frag_mask(f)) == ceph_frag_value(f); | ||
46 | } | ||
47 | static inline int ceph_frag_contains_frag(__u32 f, __u32 sub) | ||
48 | { | ||
49 | /* is sub as specific as us, and contained by us? */ | ||
50 | return ceph_frag_bits(sub) >= ceph_frag_bits(f) && | ||
51 | (ceph_frag_value(sub) & ceph_frag_mask(f)) == ceph_frag_value(f); | ||
52 | } | ||
53 | |||
54 | static inline __u32 ceph_frag_parent(__u32 f) | ||
55 | { | ||
56 | return ceph_frag_make(ceph_frag_bits(f) - 1, | ||
57 | ceph_frag_value(f) & (ceph_frag_mask(f) << 1)); | ||
58 | } | ||
59 | static inline int ceph_frag_is_left_child(__u32 f) | ||
60 | { | ||
61 | return ceph_frag_bits(f) > 0 && | ||
62 | (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 0; | ||
63 | } | ||
64 | static inline int ceph_frag_is_right_child(__u32 f) | ||
65 | { | ||
66 | return ceph_frag_bits(f) > 0 && | ||
67 | (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 1; | ||
68 | } | ||
69 | static inline __u32 ceph_frag_sibling(__u32 f) | ||
70 | { | ||
71 | return ceph_frag_make(ceph_frag_bits(f), | ||
72 | ceph_frag_value(f) ^ (0x1000000 >> ceph_frag_bits(f))); | ||
73 | } | ||
74 | static inline __u32 ceph_frag_left_child(__u32 f) | ||
75 | { | ||
76 | return ceph_frag_make(ceph_frag_bits(f)+1, ceph_frag_value(f)); | ||
77 | } | ||
78 | static inline __u32 ceph_frag_right_child(__u32 f) | ||
79 | { | ||
80 | return ceph_frag_make(ceph_frag_bits(f)+1, | ||
81 | ceph_frag_value(f) | (0x1000000 >> (1+ceph_frag_bits(f)))); | ||
82 | } | ||
83 | static inline __u32 ceph_frag_make_child(__u32 f, int by, int i) | ||
84 | { | ||
85 | int newbits = ceph_frag_bits(f) + by; | ||
86 | return ceph_frag_make(newbits, | ||
87 | ceph_frag_value(f) | (i << (24 - newbits))); | ||
88 | } | ||
89 | static inline int ceph_frag_is_leftmost(__u32 f) | ||
90 | { | ||
91 | return ceph_frag_value(f) == 0; | ||
92 | } | ||
93 | static inline int ceph_frag_is_rightmost(__u32 f) | ||
94 | { | ||
95 | return ceph_frag_value(f) == ceph_frag_mask(f); | ||
96 | } | ||
97 | static inline __u32 ceph_frag_next(__u32 f) | ||
98 | { | ||
99 | return ceph_frag_make(ceph_frag_bits(f), | ||
100 | ceph_frag_value(f) + (0x1000000 >> ceph_frag_bits(f))); | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * comparator to sort frags logically, as when traversing the | ||
105 | * number space in ascending order... | ||
106 | */ | ||
107 | int ceph_frag_compare(__u32 a, __u32 b); | ||
108 | |||
109 | #endif | ||
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h new file mode 100644 index 000000000000..c3c74aef289d --- /dev/null +++ b/include/linux/ceph/ceph_fs.h | |||
@@ -0,0 +1,729 @@ | |||
1 | /* | ||
2 | * ceph_fs.h - Ceph constants and data types to share between kernel and | ||
3 | * user space. | ||
4 | * | ||
5 | * Most types in this file are defined as little-endian, and are | ||
6 | * primarily intended to describe data structures that pass over the | ||
7 | * wire or that are stored on disk. | ||
8 | * | ||
9 | * LGPL2 | ||
10 | */ | ||
11 | |||
12 | #ifndef CEPH_FS_H | ||
13 | #define CEPH_FS_H | ||
14 | |||
15 | #include "msgr.h" | ||
16 | #include "rados.h" | ||
17 | |||
18 | /* | ||
19 | * subprotocol versions. when specific messages types or high-level | ||
20 | * protocols change, bump the affected components. we keep rev | ||
21 | * internal cluster protocols separately from the public, | ||
22 | * client-facing protocol. | ||
23 | */ | ||
24 | #define CEPH_OSD_PROTOCOL 8 /* cluster internal */ | ||
25 | #define CEPH_MDS_PROTOCOL 12 /* cluster internal */ | ||
26 | #define CEPH_MON_PROTOCOL 5 /* cluster internal */ | ||
27 | #define CEPH_OSDC_PROTOCOL 24 /* server/client */ | ||
28 | #define CEPH_MDSC_PROTOCOL 32 /* server/client */ | ||
29 | #define CEPH_MONC_PROTOCOL 15 /* server/client */ | ||
30 | |||
31 | |||
32 | #define CEPH_INO_ROOT 1 | ||
33 | #define CEPH_INO_CEPH 2 /* hidden .ceph dir */ | ||
34 | |||
35 | /* arbitrary limit on max # of monitors (cluster of 3 is typical) */ | ||
36 | #define CEPH_MAX_MON 31 | ||
37 | |||
38 | |||
39 | /* | ||
40 | * feature bits | ||
41 | */ | ||
42 | #define CEPH_FEATURE_UID (1<<0) | ||
43 | #define CEPH_FEATURE_NOSRCADDR (1<<1) | ||
44 | #define CEPH_FEATURE_MONCLOCKCHECK (1<<2) | ||
45 | #define CEPH_FEATURE_FLOCK (1<<3) | ||
46 | |||
47 | |||
48 | /* | ||
49 | * ceph_file_layout - describe data layout for a file/inode | ||
50 | */ | ||
51 | struct ceph_file_layout { | ||
52 | /* file -> object mapping */ | ||
53 | __le32 fl_stripe_unit; /* stripe unit, in bytes. must be multiple | ||
54 | of page size. */ | ||
55 | __le32 fl_stripe_count; /* over this many objects */ | ||
56 | __le32 fl_object_size; /* until objects are this big, then move to | ||
57 | new objects */ | ||
58 | __le32 fl_cas_hash; /* 0 = none; 1 = sha256 */ | ||
59 | |||
60 | /* pg -> disk layout */ | ||
61 | __le32 fl_object_stripe_unit; /* for per-object parity, if any */ | ||
62 | |||
63 | /* object -> pg layout */ | ||
64 | __le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */ | ||
65 | __le32 fl_pg_pool; /* namespace, crush ruleset, rep level */ | ||
66 | } __attribute__ ((packed)); | ||
67 | |||
68 | #define CEPH_MIN_STRIPE_UNIT 65536 | ||
69 | |||
70 | int ceph_file_layout_is_valid(const struct ceph_file_layout *layout); | ||
71 | |||
72 | |||
73 | /* crypto algorithms */ | ||
74 | #define CEPH_CRYPTO_NONE 0x0 | ||
75 | #define CEPH_CRYPTO_AES 0x1 | ||
76 | |||
77 | #define CEPH_AES_IV "cephsageyudagreg" | ||
78 | |||
79 | /* security/authentication protocols */ | ||
80 | #define CEPH_AUTH_UNKNOWN 0x0 | ||
81 | #define CEPH_AUTH_NONE 0x1 | ||
82 | #define CEPH_AUTH_CEPHX 0x2 | ||
83 | |||
84 | #define CEPH_AUTH_UID_DEFAULT ((__u64) -1) | ||
85 | |||
86 | |||
87 | /********************************************* | ||
88 | * message layer | ||
89 | */ | ||
90 | |||
91 | /* | ||
92 | * message types | ||
93 | */ | ||
94 | |||
95 | /* misc */ | ||
96 | #define CEPH_MSG_SHUTDOWN 1 | ||
97 | #define CEPH_MSG_PING 2 | ||
98 | |||
99 | /* client <-> monitor */ | ||
100 | #define CEPH_MSG_MON_MAP 4 | ||
101 | #define CEPH_MSG_MON_GET_MAP 5 | ||
102 | #define CEPH_MSG_STATFS 13 | ||
103 | #define CEPH_MSG_STATFS_REPLY 14 | ||
104 | #define CEPH_MSG_MON_SUBSCRIBE 15 | ||
105 | #define CEPH_MSG_MON_SUBSCRIBE_ACK 16 | ||
106 | #define CEPH_MSG_AUTH 17 | ||
107 | #define CEPH_MSG_AUTH_REPLY 18 | ||
108 | |||
109 | /* client <-> mds */ | ||
110 | #define CEPH_MSG_MDS_MAP 21 | ||
111 | |||
112 | #define CEPH_MSG_CLIENT_SESSION 22 | ||
113 | #define CEPH_MSG_CLIENT_RECONNECT 23 | ||
114 | |||
115 | #define CEPH_MSG_CLIENT_REQUEST 24 | ||
116 | #define CEPH_MSG_CLIENT_REQUEST_FORWARD 25 | ||
117 | #define CEPH_MSG_CLIENT_REPLY 26 | ||
118 | #define CEPH_MSG_CLIENT_CAPS 0x310 | ||
119 | #define CEPH_MSG_CLIENT_LEASE 0x311 | ||
120 | #define CEPH_MSG_CLIENT_SNAP 0x312 | ||
121 | #define CEPH_MSG_CLIENT_CAPRELEASE 0x313 | ||
122 | |||
123 | /* pool ops */ | ||
124 | #define CEPH_MSG_POOLOP_REPLY 48 | ||
125 | #define CEPH_MSG_POOLOP 49 | ||
126 | |||
127 | |||
128 | /* osd */ | ||
129 | #define CEPH_MSG_OSD_MAP 41 | ||
130 | #define CEPH_MSG_OSD_OP 42 | ||
131 | #define CEPH_MSG_OSD_OPREPLY 43 | ||
132 | |||
133 | /* pool operations */ | ||
134 | enum { | ||
135 | POOL_OP_CREATE = 0x01, | ||
136 | POOL_OP_DELETE = 0x02, | ||
137 | POOL_OP_AUID_CHANGE = 0x03, | ||
138 | POOL_OP_CREATE_SNAP = 0x11, | ||
139 | POOL_OP_DELETE_SNAP = 0x12, | ||
140 | POOL_OP_CREATE_UNMANAGED_SNAP = 0x21, | ||
141 | POOL_OP_DELETE_UNMANAGED_SNAP = 0x22, | ||
142 | }; | ||
143 | |||
144 | struct ceph_mon_request_header { | ||
145 | __le64 have_version; | ||
146 | __le16 session_mon; | ||
147 | __le64 session_mon_tid; | ||
148 | } __attribute__ ((packed)); | ||
149 | |||
150 | struct ceph_mon_statfs { | ||
151 | struct ceph_mon_request_header monhdr; | ||
152 | struct ceph_fsid fsid; | ||
153 | } __attribute__ ((packed)); | ||
154 | |||
155 | struct ceph_statfs { | ||
156 | __le64 kb, kb_used, kb_avail; | ||
157 | __le64 num_objects; | ||
158 | } __attribute__ ((packed)); | ||
159 | |||
160 | struct ceph_mon_statfs_reply { | ||
161 | struct ceph_fsid fsid; | ||
162 | __le64 version; | ||
163 | struct ceph_statfs st; | ||
164 | } __attribute__ ((packed)); | ||
165 | |||
166 | const char *ceph_pool_op_name(int op); | ||
167 | |||
168 | struct ceph_mon_poolop { | ||
169 | struct ceph_mon_request_header monhdr; | ||
170 | struct ceph_fsid fsid; | ||
171 | __le32 pool; | ||
172 | __le32 op; | ||
173 | __le64 auid; | ||
174 | __le64 snapid; | ||
175 | __le32 name_len; | ||
176 | } __attribute__ ((packed)); | ||
177 | |||
178 | struct ceph_mon_poolop_reply { | ||
179 | struct ceph_mon_request_header monhdr; | ||
180 | struct ceph_fsid fsid; | ||
181 | __le32 reply_code; | ||
182 | __le32 epoch; | ||
183 | char has_data; | ||
184 | char data[0]; | ||
185 | } __attribute__ ((packed)); | ||
186 | |||
187 | struct ceph_mon_unmanaged_snap { | ||
188 | __le64 snapid; | ||
189 | } __attribute__ ((packed)); | ||
190 | |||
191 | struct ceph_osd_getmap { | ||
192 | struct ceph_mon_request_header monhdr; | ||
193 | struct ceph_fsid fsid; | ||
194 | __le32 start; | ||
195 | } __attribute__ ((packed)); | ||
196 | |||
197 | struct ceph_mds_getmap { | ||
198 | struct ceph_mon_request_header monhdr; | ||
199 | struct ceph_fsid fsid; | ||
200 | } __attribute__ ((packed)); | ||
201 | |||
202 | struct ceph_client_mount { | ||
203 | struct ceph_mon_request_header monhdr; | ||
204 | } __attribute__ ((packed)); | ||
205 | |||
206 | struct ceph_mon_subscribe_item { | ||
207 | __le64 have_version; __le64 have; | ||
208 | __u8 onetime; | ||
209 | } __attribute__ ((packed)); | ||
210 | |||
211 | struct ceph_mon_subscribe_ack { | ||
212 | __le32 duration; /* seconds */ | ||
213 | struct ceph_fsid fsid; | ||
214 | } __attribute__ ((packed)); | ||
215 | |||
216 | /* | ||
217 | * mds states | ||
218 | * > 0 -> in | ||
219 | * <= 0 -> out | ||
220 | */ | ||
221 | #define CEPH_MDS_STATE_DNE 0 /* down, does not exist. */ | ||
222 | #define CEPH_MDS_STATE_STOPPED -1 /* down, once existed, but no subtrees. | ||
223 | empty log. */ | ||
224 | #define CEPH_MDS_STATE_BOOT -4 /* up, boot announcement. */ | ||
225 | #define CEPH_MDS_STATE_STANDBY -5 /* up, idle. waiting for assignment. */ | ||
226 | #define CEPH_MDS_STATE_CREATING -6 /* up, creating MDS instance. */ | ||
227 | #define CEPH_MDS_STATE_STARTING -7 /* up, starting previously stopped mds */ | ||
228 | #define CEPH_MDS_STATE_STANDBY_REPLAY -8 /* up, tailing active node's journal */ | ||
229 | |||
230 | #define CEPH_MDS_STATE_REPLAY 8 /* up, replaying journal. */ | ||
231 | #define CEPH_MDS_STATE_RESOLVE 9 /* up, disambiguating distributed | ||
232 | operations (import, rename, etc.) */ | ||
233 | #define CEPH_MDS_STATE_RECONNECT 10 /* up, reconnect to clients */ | ||
234 | #define CEPH_MDS_STATE_REJOIN 11 /* up, rejoining distributed cache */ | ||
235 | #define CEPH_MDS_STATE_CLIENTREPLAY 12 /* up, replaying client operations */ | ||
236 | #define CEPH_MDS_STATE_ACTIVE 13 /* up, active */ | ||
237 | #define CEPH_MDS_STATE_STOPPING 14 /* up, but exporting metadata */ | ||
238 | |||
239 | extern const char *ceph_mds_state_name(int s); | ||
240 | |||
241 | |||
242 | /* | ||
243 | * metadata lock types. | ||
244 | * - these are bitmasks.. we can compose them | ||
245 | * - they also define the lock ordering by the MDS | ||
246 | * - a few of these are internal to the mds | ||
247 | */ | ||
248 | #define CEPH_LOCK_DVERSION 1 | ||
249 | #define CEPH_LOCK_DN 2 | ||
250 | #define CEPH_LOCK_ISNAP 16 | ||
251 | #define CEPH_LOCK_IVERSION 32 /* mds internal */ | ||
252 | #define CEPH_LOCK_IFILE 64 | ||
253 | #define CEPH_LOCK_IAUTH 128 | ||
254 | #define CEPH_LOCK_ILINK 256 | ||
255 | #define CEPH_LOCK_IDFT 512 /* dir frag tree */ | ||
256 | #define CEPH_LOCK_INEST 1024 /* mds internal */ | ||
257 | #define CEPH_LOCK_IXATTR 2048 | ||
258 | #define CEPH_LOCK_IFLOCK 4096 /* advisory file locks */ | ||
259 | #define CEPH_LOCK_INO 8192 /* immutable inode bits; not a lock */ | ||
260 | |||
261 | /* client_session ops */ | ||
262 | enum { | ||
263 | CEPH_SESSION_REQUEST_OPEN, | ||
264 | CEPH_SESSION_OPEN, | ||
265 | CEPH_SESSION_REQUEST_CLOSE, | ||
266 | CEPH_SESSION_CLOSE, | ||
267 | CEPH_SESSION_REQUEST_RENEWCAPS, | ||
268 | CEPH_SESSION_RENEWCAPS, | ||
269 | CEPH_SESSION_STALE, | ||
270 | CEPH_SESSION_RECALL_STATE, | ||
271 | }; | ||
272 | |||
273 | extern const char *ceph_session_op_name(int op); | ||
274 | |||
275 | struct ceph_mds_session_head { | ||
276 | __le32 op; | ||
277 | __le64 seq; | ||
278 | struct ceph_timespec stamp; | ||
279 | __le32 max_caps, max_leases; | ||
280 | } __attribute__ ((packed)); | ||
281 | |||
282 | /* client_request */ | ||
283 | /* | ||
284 | * metadata ops. | ||
285 | * & 0x001000 -> write op | ||
286 | * & 0x010000 -> follow symlink (e.g. stat(), not lstat()). | ||
287 | & & 0x100000 -> use weird ino/path trace | ||
288 | */ | ||
289 | #define CEPH_MDS_OP_WRITE 0x001000 | ||
290 | enum { | ||
291 | CEPH_MDS_OP_LOOKUP = 0x00100, | ||
292 | CEPH_MDS_OP_GETATTR = 0x00101, | ||
293 | CEPH_MDS_OP_LOOKUPHASH = 0x00102, | ||
294 | CEPH_MDS_OP_LOOKUPPARENT = 0x00103, | ||
295 | |||
296 | CEPH_MDS_OP_SETXATTR = 0x01105, | ||
297 | CEPH_MDS_OP_RMXATTR = 0x01106, | ||
298 | CEPH_MDS_OP_SETLAYOUT = 0x01107, | ||
299 | CEPH_MDS_OP_SETATTR = 0x01108, | ||
300 | CEPH_MDS_OP_SETFILELOCK= 0x01109, | ||
301 | CEPH_MDS_OP_GETFILELOCK= 0x00110, | ||
302 | CEPH_MDS_OP_SETDIRLAYOUT=0x0110a, | ||
303 | |||
304 | CEPH_MDS_OP_MKNOD = 0x01201, | ||
305 | CEPH_MDS_OP_LINK = 0x01202, | ||
306 | CEPH_MDS_OP_UNLINK = 0x01203, | ||
307 | CEPH_MDS_OP_RENAME = 0x01204, | ||
308 | CEPH_MDS_OP_MKDIR = 0x01220, | ||
309 | CEPH_MDS_OP_RMDIR = 0x01221, | ||
310 | CEPH_MDS_OP_SYMLINK = 0x01222, | ||
311 | |||
312 | CEPH_MDS_OP_CREATE = 0x01301, | ||
313 | CEPH_MDS_OP_OPEN = 0x00302, | ||
314 | CEPH_MDS_OP_READDIR = 0x00305, | ||
315 | |||
316 | CEPH_MDS_OP_LOOKUPSNAP = 0x00400, | ||
317 | CEPH_MDS_OP_MKSNAP = 0x01400, | ||
318 | CEPH_MDS_OP_RMSNAP = 0x01401, | ||
319 | CEPH_MDS_OP_LSSNAP = 0x00402, | ||
320 | }; | ||
321 | |||
322 | extern const char *ceph_mds_op_name(int op); | ||
323 | |||
324 | |||
325 | #define CEPH_SETATTR_MODE 1 | ||
326 | #define CEPH_SETATTR_UID 2 | ||
327 | #define CEPH_SETATTR_GID 4 | ||
328 | #define CEPH_SETATTR_MTIME 8 | ||
329 | #define CEPH_SETATTR_ATIME 16 | ||
330 | #define CEPH_SETATTR_SIZE 32 | ||
331 | #define CEPH_SETATTR_CTIME 64 | ||
332 | |||
333 | union ceph_mds_request_args { | ||
334 | struct { | ||
335 | __le32 mask; /* CEPH_CAP_* */ | ||
336 | } __attribute__ ((packed)) getattr; | ||
337 | struct { | ||
338 | __le32 mode; | ||
339 | __le32 uid; | ||
340 | __le32 gid; | ||
341 | struct ceph_timespec mtime; | ||
342 | struct ceph_timespec atime; | ||
343 | __le64 size, old_size; /* old_size needed by truncate */ | ||
344 | __le32 mask; /* CEPH_SETATTR_* */ | ||
345 | } __attribute__ ((packed)) setattr; | ||
346 | struct { | ||
347 | __le32 frag; /* which dir fragment */ | ||
348 | __le32 max_entries; /* how many dentries to grab */ | ||
349 | __le32 max_bytes; | ||
350 | } __attribute__ ((packed)) readdir; | ||
351 | struct { | ||
352 | __le32 mode; | ||
353 | __le32 rdev; | ||
354 | } __attribute__ ((packed)) mknod; | ||
355 | struct { | ||
356 | __le32 mode; | ||
357 | } __attribute__ ((packed)) mkdir; | ||
358 | struct { | ||
359 | __le32 flags; | ||
360 | __le32 mode; | ||
361 | __le32 stripe_unit; /* layout for newly created file */ | ||
362 | __le32 stripe_count; /* ... */ | ||
363 | __le32 object_size; | ||
364 | __le32 file_replication; | ||
365 | __le32 preferred; | ||
366 | } __attribute__ ((packed)) open; | ||
367 | struct { | ||
368 | __le32 flags; | ||
369 | } __attribute__ ((packed)) setxattr; | ||
370 | struct { | ||
371 | struct ceph_file_layout layout; | ||
372 | } __attribute__ ((packed)) setlayout; | ||
373 | struct { | ||
374 | __u8 rule; /* currently fcntl or flock */ | ||
375 | __u8 type; /* shared, exclusive, remove*/ | ||
376 | __le64 pid; /* process id requesting the lock */ | ||
377 | __le64 pid_namespace; | ||
378 | __le64 start; /* initial location to lock */ | ||
379 | __le64 length; /* num bytes to lock from start */ | ||
380 | __u8 wait; /* will caller wait for lock to become available? */ | ||
381 | } __attribute__ ((packed)) filelock_change; | ||
382 | } __attribute__ ((packed)); | ||
383 | |||
384 | #define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */ | ||
385 | #define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */ | ||
386 | |||
387 | struct ceph_mds_request_head { | ||
388 | __le64 oldest_client_tid; | ||
389 | __le32 mdsmap_epoch; /* on client */ | ||
390 | __le32 flags; /* CEPH_MDS_FLAG_* */ | ||
391 | __u8 num_retry, num_fwd; /* count retry, fwd attempts */ | ||
392 | __le16 num_releases; /* # include cap/lease release records */ | ||
393 | __le32 op; /* mds op code */ | ||
394 | __le32 caller_uid, caller_gid; | ||
395 | __le64 ino; /* use this ino for openc, mkdir, mknod, | ||
396 | etc. (if replaying) */ | ||
397 | union ceph_mds_request_args args; | ||
398 | } __attribute__ ((packed)); | ||
399 | |||
400 | /* cap/lease release record */ | ||
401 | struct ceph_mds_request_release { | ||
402 | __le64 ino, cap_id; /* ino and unique cap id */ | ||
403 | __le32 caps, wanted; /* new issued, wanted */ | ||
404 | __le32 seq, issue_seq, mseq; | ||
405 | __le32 dname_seq; /* if releasing a dentry lease, a */ | ||
406 | __le32 dname_len; /* string follows. */ | ||
407 | } __attribute__ ((packed)); | ||
408 | |||
409 | /* client reply */ | ||
410 | struct ceph_mds_reply_head { | ||
411 | __le32 op; | ||
412 | __le32 result; | ||
413 | __le32 mdsmap_epoch; | ||
414 | __u8 safe; /* true if committed to disk */ | ||
415 | __u8 is_dentry, is_target; /* true if dentry, target inode records | ||
416 | are included with reply */ | ||
417 | } __attribute__ ((packed)); | ||
418 | |||
419 | /* one for each node split */ | ||
420 | struct ceph_frag_tree_split { | ||
421 | __le32 frag; /* this frag splits... */ | ||
422 | __le32 by; /* ...by this many bits */ | ||
423 | } __attribute__ ((packed)); | ||
424 | |||
425 | struct ceph_frag_tree_head { | ||
426 | __le32 nsplits; /* num ceph_frag_tree_split records */ | ||
427 | struct ceph_frag_tree_split splits[]; | ||
428 | } __attribute__ ((packed)); | ||
429 | |||
430 | /* capability issue, for bundling with mds reply */ | ||
431 | struct ceph_mds_reply_cap { | ||
432 | __le32 caps, wanted; /* caps issued, wanted */ | ||
433 | __le64 cap_id; | ||
434 | __le32 seq, mseq; | ||
435 | __le64 realm; /* snap realm */ | ||
436 | __u8 flags; /* CEPH_CAP_FLAG_* */ | ||
437 | } __attribute__ ((packed)); | ||
438 | |||
439 | #define CEPH_CAP_FLAG_AUTH 1 /* cap is issued by auth mds */ | ||
440 | |||
441 | /* inode record, for bundling with mds reply */ | ||
442 | struct ceph_mds_reply_inode { | ||
443 | __le64 ino; | ||
444 | __le64 snapid; | ||
445 | __le32 rdev; | ||
446 | __le64 version; /* inode version */ | ||
447 | __le64 xattr_version; /* version for xattr blob */ | ||
448 | struct ceph_mds_reply_cap cap; /* caps issued for this inode */ | ||
449 | struct ceph_file_layout layout; | ||
450 | struct ceph_timespec ctime, mtime, atime; | ||
451 | __le32 time_warp_seq; | ||
452 | __le64 size, max_size, truncate_size; | ||
453 | __le32 truncate_seq; | ||
454 | __le32 mode, uid, gid; | ||
455 | __le32 nlink; | ||
456 | __le64 files, subdirs, rbytes, rfiles, rsubdirs; /* dir stats */ | ||
457 | struct ceph_timespec rctime; | ||
458 | struct ceph_frag_tree_head fragtree; /* (must be at end of struct) */ | ||
459 | } __attribute__ ((packed)); | ||
460 | /* followed by frag array, then symlink string, then xattr blob */ | ||
461 | |||
462 | /* reply_lease follows dname, and reply_inode */ | ||
463 | struct ceph_mds_reply_lease { | ||
464 | __le16 mask; /* lease type(s) */ | ||
465 | __le32 duration_ms; /* lease duration */ | ||
466 | __le32 seq; | ||
467 | } __attribute__ ((packed)); | ||
468 | |||
469 | struct ceph_mds_reply_dirfrag { | ||
470 | __le32 frag; /* fragment */ | ||
471 | __le32 auth; /* auth mds, if this is a delegation point */ | ||
472 | __le32 ndist; /* number of mds' this is replicated on */ | ||
473 | __le32 dist[]; | ||
474 | } __attribute__ ((packed)); | ||
475 | |||
476 | #define CEPH_LOCK_FCNTL 1 | ||
477 | #define CEPH_LOCK_FLOCK 2 | ||
478 | |||
479 | #define CEPH_LOCK_SHARED 1 | ||
480 | #define CEPH_LOCK_EXCL 2 | ||
481 | #define CEPH_LOCK_UNLOCK 4 | ||
482 | |||
483 | struct ceph_filelock { | ||
484 | __le64 start;/* file offset to start lock at */ | ||
485 | __le64 length; /* num bytes to lock; 0 for all following start */ | ||
486 | __le64 client; /* which client holds the lock */ | ||
487 | __le64 pid; /* process id holding the lock on the client */ | ||
488 | __le64 pid_namespace; | ||
489 | __u8 type; /* shared lock, exclusive lock, or unlock */ | ||
490 | } __attribute__ ((packed)); | ||
491 | |||
492 | |||
493 | /* file access modes */ | ||
494 | #define CEPH_FILE_MODE_PIN 0 | ||
495 | #define CEPH_FILE_MODE_RD 1 | ||
496 | #define CEPH_FILE_MODE_WR 2 | ||
497 | #define CEPH_FILE_MODE_RDWR 3 /* RD | WR */ | ||
498 | #define CEPH_FILE_MODE_LAZY 4 /* lazy io */ | ||
499 | #define CEPH_FILE_MODE_NUM 8 /* bc these are bit fields.. mostly */ | ||
500 | |||
501 | int ceph_flags_to_mode(int flags); | ||
502 | |||
503 | |||
504 | /* capability bits */ | ||
505 | #define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */ | ||
506 | |||
507 | /* generic cap bits */ | ||
508 | #define CEPH_CAP_GSHARED 1 /* client can reads */ | ||
509 | #define CEPH_CAP_GEXCL 2 /* client can read and update */ | ||
510 | #define CEPH_CAP_GCACHE 4 /* (file) client can cache reads */ | ||
511 | #define CEPH_CAP_GRD 8 /* (file) client can read */ | ||
512 | #define CEPH_CAP_GWR 16 /* (file) client can write */ | ||
513 | #define CEPH_CAP_GBUFFER 32 /* (file) client can buffer writes */ | ||
514 | #define CEPH_CAP_GWREXTEND 64 /* (file) client can extend EOF */ | ||
515 | #define CEPH_CAP_GLAZYIO 128 /* (file) client can perform lazy io */ | ||
516 | |||
517 | /* per-lock shift */ | ||
518 | #define CEPH_CAP_SAUTH 2 | ||
519 | #define CEPH_CAP_SLINK 4 | ||
520 | #define CEPH_CAP_SXATTR 6 | ||
521 | #define CEPH_CAP_SFILE 8 | ||
522 | #define CEPH_CAP_SFLOCK 20 | ||
523 | |||
524 | #define CEPH_CAP_BITS 22 | ||
525 | |||
526 | /* composed values */ | ||
527 | #define CEPH_CAP_AUTH_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SAUTH) | ||
528 | #define CEPH_CAP_AUTH_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SAUTH) | ||
529 | #define CEPH_CAP_LINK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SLINK) | ||
530 | #define CEPH_CAP_LINK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SLINK) | ||
531 | #define CEPH_CAP_XATTR_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SXATTR) | ||
532 | #define CEPH_CAP_XATTR_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SXATTR) | ||
533 | #define CEPH_CAP_FILE(x) (x << CEPH_CAP_SFILE) | ||
534 | #define CEPH_CAP_FILE_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFILE) | ||
535 | #define CEPH_CAP_FILE_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFILE) | ||
536 | #define CEPH_CAP_FILE_CACHE (CEPH_CAP_GCACHE << CEPH_CAP_SFILE) | ||
537 | #define CEPH_CAP_FILE_RD (CEPH_CAP_GRD << CEPH_CAP_SFILE) | ||
538 | #define CEPH_CAP_FILE_WR (CEPH_CAP_GWR << CEPH_CAP_SFILE) | ||
539 | #define CEPH_CAP_FILE_BUFFER (CEPH_CAP_GBUFFER << CEPH_CAP_SFILE) | ||
540 | #define CEPH_CAP_FILE_WREXTEND (CEPH_CAP_GWREXTEND << CEPH_CAP_SFILE) | ||
541 | #define CEPH_CAP_FILE_LAZYIO (CEPH_CAP_GLAZYIO << CEPH_CAP_SFILE) | ||
542 | #define CEPH_CAP_FLOCK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFLOCK) | ||
543 | #define CEPH_CAP_FLOCK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFLOCK) | ||
544 | |||
545 | |||
546 | /* cap masks (for getattr) */ | ||
547 | #define CEPH_STAT_CAP_INODE CEPH_CAP_PIN | ||
548 | #define CEPH_STAT_CAP_TYPE CEPH_CAP_PIN /* mode >> 12 */ | ||
549 | #define CEPH_STAT_CAP_SYMLINK CEPH_CAP_PIN | ||
550 | #define CEPH_STAT_CAP_UID CEPH_CAP_AUTH_SHARED | ||
551 | #define CEPH_STAT_CAP_GID CEPH_CAP_AUTH_SHARED | ||
552 | #define CEPH_STAT_CAP_MODE CEPH_CAP_AUTH_SHARED | ||
553 | #define CEPH_STAT_CAP_NLINK CEPH_CAP_LINK_SHARED | ||
554 | #define CEPH_STAT_CAP_LAYOUT CEPH_CAP_FILE_SHARED | ||
555 | #define CEPH_STAT_CAP_MTIME CEPH_CAP_FILE_SHARED | ||
556 | #define CEPH_STAT_CAP_SIZE CEPH_CAP_FILE_SHARED | ||
557 | #define CEPH_STAT_CAP_ATIME CEPH_CAP_FILE_SHARED /* fixme */ | ||
558 | #define CEPH_STAT_CAP_XATTR CEPH_CAP_XATTR_SHARED | ||
559 | #define CEPH_STAT_CAP_INODE_ALL (CEPH_CAP_PIN | \ | ||
560 | CEPH_CAP_AUTH_SHARED | \ | ||
561 | CEPH_CAP_LINK_SHARED | \ | ||
562 | CEPH_CAP_FILE_SHARED | \ | ||
563 | CEPH_CAP_XATTR_SHARED) | ||
564 | |||
565 | #define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \ | ||
566 | CEPH_CAP_LINK_SHARED | \ | ||
567 | CEPH_CAP_XATTR_SHARED | \ | ||
568 | CEPH_CAP_FILE_SHARED) | ||
569 | #define CEPH_CAP_ANY_RD (CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_RD | \ | ||
570 | CEPH_CAP_FILE_CACHE) | ||
571 | |||
572 | #define CEPH_CAP_ANY_EXCL (CEPH_CAP_AUTH_EXCL | \ | ||
573 | CEPH_CAP_LINK_EXCL | \ | ||
574 | CEPH_CAP_XATTR_EXCL | \ | ||
575 | CEPH_CAP_FILE_EXCL) | ||
576 | #define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \ | ||
577 | CEPH_CAP_FILE_EXCL) | ||
578 | #define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR) | ||
579 | #define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \ | ||
580 | CEPH_CAP_ANY_FILE_WR | CEPH_CAP_FILE_LAZYIO | \ | ||
581 | CEPH_CAP_PIN) | ||
582 | |||
583 | #define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \ | ||
584 | CEPH_LOCK_IXATTR) | ||
585 | |||
586 | int ceph_caps_for_mode(int mode); | ||
587 | |||
588 | enum { | ||
589 | CEPH_CAP_OP_GRANT, /* mds->client grant */ | ||
590 | CEPH_CAP_OP_REVOKE, /* mds->client revoke */ | ||
591 | CEPH_CAP_OP_TRUNC, /* mds->client trunc notify */ | ||
592 | CEPH_CAP_OP_EXPORT, /* mds has exported the cap */ | ||
593 | CEPH_CAP_OP_IMPORT, /* mds has imported the cap */ | ||
594 | CEPH_CAP_OP_UPDATE, /* client->mds update */ | ||
595 | CEPH_CAP_OP_DROP, /* client->mds drop cap bits */ | ||
596 | CEPH_CAP_OP_FLUSH, /* client->mds cap writeback */ | ||
597 | CEPH_CAP_OP_FLUSH_ACK, /* mds->client flushed */ | ||
598 | CEPH_CAP_OP_FLUSHSNAP, /* client->mds flush snapped metadata */ | ||
599 | CEPH_CAP_OP_FLUSHSNAP_ACK, /* mds->client flushed snapped metadata */ | ||
600 | CEPH_CAP_OP_RELEASE, /* client->mds release (clean) cap */ | ||
601 | CEPH_CAP_OP_RENEW, /* client->mds renewal request */ | ||
602 | }; | ||
603 | |||
604 | extern const char *ceph_cap_op_name(int op); | ||
605 | |||
606 | /* | ||
607 | * caps message, used for capability callbacks, acks, requests, etc. | ||
608 | */ | ||
609 | struct ceph_mds_caps { | ||
610 | __le32 op; /* CEPH_CAP_OP_* */ | ||
611 | __le64 ino, realm; | ||
612 | __le64 cap_id; | ||
613 | __le32 seq, issue_seq; | ||
614 | __le32 caps, wanted, dirty; /* latest issued/wanted/dirty */ | ||
615 | __le32 migrate_seq; | ||
616 | __le64 snap_follows; | ||
617 | __le32 snap_trace_len; | ||
618 | |||
619 | /* authlock */ | ||
620 | __le32 uid, gid, mode; | ||
621 | |||
622 | /* linklock */ | ||
623 | __le32 nlink; | ||
624 | |||
625 | /* xattrlock */ | ||
626 | __le32 xattr_len; | ||
627 | __le64 xattr_version; | ||
628 | |||
629 | /* filelock */ | ||
630 | __le64 size, max_size, truncate_size; | ||
631 | __le32 truncate_seq; | ||
632 | struct ceph_timespec mtime, atime, ctime; | ||
633 | struct ceph_file_layout layout; | ||
634 | __le32 time_warp_seq; | ||
635 | } __attribute__ ((packed)); | ||
636 | |||
637 | /* cap release msg head */ | ||
638 | struct ceph_mds_cap_release { | ||
639 | __le32 num; /* number of cap_items that follow */ | ||
640 | } __attribute__ ((packed)); | ||
641 | |||
642 | struct ceph_mds_cap_item { | ||
643 | __le64 ino; | ||
644 | __le64 cap_id; | ||
645 | __le32 migrate_seq, seq; | ||
646 | } __attribute__ ((packed)); | ||
647 | |||
648 | #define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */ | ||
649 | #define CEPH_MDS_LEASE_RELEASE 2 /* client -> mds */ | ||
650 | #define CEPH_MDS_LEASE_RENEW 3 /* client <-> mds */ | ||
651 | #define CEPH_MDS_LEASE_REVOKE_ACK 4 /* client -> mds */ | ||
652 | |||
653 | extern const char *ceph_lease_op_name(int o); | ||
654 | |||
655 | /* lease msg header */ | ||
656 | struct ceph_mds_lease { | ||
657 | __u8 action; /* CEPH_MDS_LEASE_* */ | ||
658 | __le16 mask; /* which lease */ | ||
659 | __le64 ino; | ||
660 | __le64 first, last; /* snap range */ | ||
661 | __le32 seq; | ||
662 | __le32 duration_ms; /* duration of renewal */ | ||
663 | } __attribute__ ((packed)); | ||
664 | /* followed by a __le32+string for dname */ | ||
665 | |||
666 | /* client reconnect */ | ||
667 | struct ceph_mds_cap_reconnect { | ||
668 | __le64 cap_id; | ||
669 | __le32 wanted; | ||
670 | __le32 issued; | ||
671 | __le64 snaprealm; | ||
672 | __le64 pathbase; /* base ino for our path to this ino */ | ||
673 | __le32 flock_len; /* size of flock state blob, if any */ | ||
674 | } __attribute__ ((packed)); | ||
675 | /* followed by flock blob */ | ||
676 | |||
677 | struct ceph_mds_cap_reconnect_v1 { | ||
678 | __le64 cap_id; | ||
679 | __le32 wanted; | ||
680 | __le32 issued; | ||
681 | __le64 size; | ||
682 | struct ceph_timespec mtime, atime; | ||
683 | __le64 snaprealm; | ||
684 | __le64 pathbase; /* base ino for our path to this ino */ | ||
685 | } __attribute__ ((packed)); | ||
686 | |||
687 | struct ceph_mds_snaprealm_reconnect { | ||
688 | __le64 ino; /* snap realm base */ | ||
689 | __le64 seq; /* snap seq for this snap realm */ | ||
690 | __le64 parent; /* parent realm */ | ||
691 | } __attribute__ ((packed)); | ||
692 | |||
693 | /* | ||
694 | * snaps | ||
695 | */ | ||
696 | enum { | ||
697 | CEPH_SNAP_OP_UPDATE, /* CREATE or DESTROY */ | ||
698 | CEPH_SNAP_OP_CREATE, | ||
699 | CEPH_SNAP_OP_DESTROY, | ||
700 | CEPH_SNAP_OP_SPLIT, | ||
701 | }; | ||
702 | |||
703 | extern const char *ceph_snap_op_name(int o); | ||
704 | |||
705 | /* snap msg header */ | ||
706 | struct ceph_mds_snap_head { | ||
707 | __le32 op; /* CEPH_SNAP_OP_* */ | ||
708 | __le64 split; /* ino to split off, if any */ | ||
709 | __le32 num_split_inos; /* # inos belonging to new child realm */ | ||
710 | __le32 num_split_realms; /* # child realms udner new child realm */ | ||
711 | __le32 trace_len; /* size of snap trace blob */ | ||
712 | } __attribute__ ((packed)); | ||
713 | /* followed by split ino list, then split realms, then the trace blob */ | ||
714 | |||
715 | /* | ||
716 | * encode info about a snaprealm, as viewed by a client | ||
717 | */ | ||
718 | struct ceph_mds_snap_realm { | ||
719 | __le64 ino; /* ino */ | ||
720 | __le64 created; /* snap: when created */ | ||
721 | __le64 parent; /* ino: parent realm */ | ||
722 | __le64 parent_since; /* snap: same parent since */ | ||
723 | __le64 seq; /* snap: version */ | ||
724 | __le32 num_snaps; | ||
725 | __le32 num_prior_parent_snaps; | ||
726 | } __attribute__ ((packed)); | ||
727 | /* followed by my snap list, then prior parent snap list */ | ||
728 | |||
729 | #endif | ||
diff --git a/include/linux/ceph/ceph_hash.h b/include/linux/ceph/ceph_hash.h new file mode 100644 index 000000000000..d099c3f90236 --- /dev/null +++ b/include/linux/ceph/ceph_hash.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef FS_CEPH_HASH_H | ||
2 | #define FS_CEPH_HASH_H | ||
3 | |||
4 | #define CEPH_STR_HASH_LINUX 0x1 /* linux dcache hash */ | ||
5 | #define CEPH_STR_HASH_RJENKINS 0x2 /* robert jenkins' */ | ||
6 | |||
7 | extern unsigned ceph_str_hash_linux(const char *s, unsigned len); | ||
8 | extern unsigned ceph_str_hash_rjenkins(const char *s, unsigned len); | ||
9 | |||
10 | extern unsigned ceph_str_hash(int type, const char *s, unsigned len); | ||
11 | extern const char *ceph_str_hash_name(int type); | ||
12 | |||
13 | #endif | ||
diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h new file mode 100644 index 000000000000..2a79702e092b --- /dev/null +++ b/include/linux/ceph/debugfs.h | |||
@@ -0,0 +1,33 @@ | |||
1 | #ifndef _FS_CEPH_DEBUGFS_H | ||
2 | #define _FS_CEPH_DEBUGFS_H | ||
3 | |||
4 | #include "ceph_debug.h" | ||
5 | #include "types.h" | ||
6 | |||
7 | #define CEPH_DEFINE_SHOW_FUNC(name) \ | ||
8 | static int name##_open(struct inode *inode, struct file *file) \ | ||
9 | { \ | ||
10 | struct seq_file *sf; \ | ||
11 | int ret; \ | ||
12 | \ | ||
13 | ret = single_open(file, name, NULL); \ | ||
14 | sf = file->private_data; \ | ||
15 | sf->private = inode->i_private; \ | ||
16 | return ret; \ | ||
17 | } \ | ||
18 | \ | ||
19 | static const struct file_operations name##_fops = { \ | ||
20 | .open = name##_open, \ | ||
21 | .read = seq_read, \ | ||
22 | .llseek = seq_lseek, \ | ||
23 | .release = single_release, \ | ||
24 | }; | ||
25 | |||
26 | /* debugfs.c */ | ||
27 | extern int ceph_debugfs_init(void); | ||
28 | extern void ceph_debugfs_cleanup(void); | ||
29 | extern int ceph_debugfs_client_init(struct ceph_client *client); | ||
30 | extern void ceph_debugfs_client_cleanup(struct ceph_client *client); | ||
31 | |||
32 | #endif | ||
33 | |||
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h new file mode 100644 index 000000000000..c5b6939fb32a --- /dev/null +++ b/include/linux/ceph/decode.h | |||
@@ -0,0 +1,201 @@ | |||
1 | #ifndef __CEPH_DECODE_H | ||
2 | #define __CEPH_DECODE_H | ||
3 | |||
4 | #include <asm/unaligned.h> | ||
5 | #include <linux/time.h> | ||
6 | |||
7 | #include "types.h" | ||
8 | |||
9 | /* | ||
10 | * in all cases, | ||
11 | * void **p pointer to position pointer | ||
12 | * void *end pointer to end of buffer (last byte + 1) | ||
13 | */ | ||
14 | |||
15 | static inline u64 ceph_decode_64(void **p) | ||
16 | { | ||
17 | u64 v = get_unaligned_le64(*p); | ||
18 | *p += sizeof(u64); | ||
19 | return v; | ||
20 | } | ||
21 | static inline u32 ceph_decode_32(void **p) | ||
22 | { | ||
23 | u32 v = get_unaligned_le32(*p); | ||
24 | *p += sizeof(u32); | ||
25 | return v; | ||
26 | } | ||
27 | static inline u16 ceph_decode_16(void **p) | ||
28 | { | ||
29 | u16 v = get_unaligned_le16(*p); | ||
30 | *p += sizeof(u16); | ||
31 | return v; | ||
32 | } | ||
33 | static inline u8 ceph_decode_8(void **p) | ||
34 | { | ||
35 | u8 v = *(u8 *)*p; | ||
36 | (*p)++; | ||
37 | return v; | ||
38 | } | ||
39 | static inline void ceph_decode_copy(void **p, void *pv, size_t n) | ||
40 | { | ||
41 | memcpy(pv, *p, n); | ||
42 | *p += n; | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * bounds check input. | ||
47 | */ | ||
48 | #define ceph_decode_need(p, end, n, bad) \ | ||
49 | do { \ | ||
50 | if (unlikely(*(p) + (n) > (end))) \ | ||
51 | goto bad; \ | ||
52 | } while (0) | ||
53 | |||
54 | #define ceph_decode_64_safe(p, end, v, bad) \ | ||
55 | do { \ | ||
56 | ceph_decode_need(p, end, sizeof(u64), bad); \ | ||
57 | v = ceph_decode_64(p); \ | ||
58 | } while (0) | ||
59 | #define ceph_decode_32_safe(p, end, v, bad) \ | ||
60 | do { \ | ||
61 | ceph_decode_need(p, end, sizeof(u32), bad); \ | ||
62 | v = ceph_decode_32(p); \ | ||
63 | } while (0) | ||
64 | #define ceph_decode_16_safe(p, end, v, bad) \ | ||
65 | do { \ | ||
66 | ceph_decode_need(p, end, sizeof(u16), bad); \ | ||
67 | v = ceph_decode_16(p); \ | ||
68 | } while (0) | ||
69 | #define ceph_decode_8_safe(p, end, v, bad) \ | ||
70 | do { \ | ||
71 | ceph_decode_need(p, end, sizeof(u8), bad); \ | ||
72 | v = ceph_decode_8(p); \ | ||
73 | } while (0) | ||
74 | |||
75 | #define ceph_decode_copy_safe(p, end, pv, n, bad) \ | ||
76 | do { \ | ||
77 | ceph_decode_need(p, end, n, bad); \ | ||
78 | ceph_decode_copy(p, pv, n); \ | ||
79 | } while (0) | ||
80 | |||
81 | /* | ||
82 | * struct ceph_timespec <-> struct timespec | ||
83 | */ | ||
84 | static inline void ceph_decode_timespec(struct timespec *ts, | ||
85 | const struct ceph_timespec *tv) | ||
86 | { | ||
87 | ts->tv_sec = le32_to_cpu(tv->tv_sec); | ||
88 | ts->tv_nsec = le32_to_cpu(tv->tv_nsec); | ||
89 | } | ||
90 | static inline void ceph_encode_timespec(struct ceph_timespec *tv, | ||
91 | const struct timespec *ts) | ||
92 | { | ||
93 | tv->tv_sec = cpu_to_le32(ts->tv_sec); | ||
94 | tv->tv_nsec = cpu_to_le32(ts->tv_nsec); | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * sockaddr_storage <-> ceph_sockaddr | ||
99 | */ | ||
100 | static inline void ceph_encode_addr(struct ceph_entity_addr *a) | ||
101 | { | ||
102 | __be16 ss_family = htons(a->in_addr.ss_family); | ||
103 | a->in_addr.ss_family = *(__u16 *)&ss_family; | ||
104 | } | ||
105 | static inline void ceph_decode_addr(struct ceph_entity_addr *a) | ||
106 | { | ||
107 | __be16 ss_family = *(__be16 *)&a->in_addr.ss_family; | ||
108 | a->in_addr.ss_family = ntohs(ss_family); | ||
109 | WARN_ON(a->in_addr.ss_family == 512); | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * encoders | ||
114 | */ | ||
115 | static inline void ceph_encode_64(void **p, u64 v) | ||
116 | { | ||
117 | put_unaligned_le64(v, (__le64 *)*p); | ||
118 | *p += sizeof(u64); | ||
119 | } | ||
120 | static inline void ceph_encode_32(void **p, u32 v) | ||
121 | { | ||
122 | put_unaligned_le32(v, (__le32 *)*p); | ||
123 | *p += sizeof(u32); | ||
124 | } | ||
125 | static inline void ceph_encode_16(void **p, u16 v) | ||
126 | { | ||
127 | put_unaligned_le16(v, (__le16 *)*p); | ||
128 | *p += sizeof(u16); | ||
129 | } | ||
130 | static inline void ceph_encode_8(void **p, u8 v) | ||
131 | { | ||
132 | *(u8 *)*p = v; | ||
133 | (*p)++; | ||
134 | } | ||
135 | static inline void ceph_encode_copy(void **p, const void *s, int len) | ||
136 | { | ||
137 | memcpy(*p, s, len); | ||
138 | *p += len; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * filepath, string encoders | ||
143 | */ | ||
144 | static inline void ceph_encode_filepath(void **p, void *end, | ||
145 | u64 ino, const char *path) | ||
146 | { | ||
147 | u32 len = path ? strlen(path) : 0; | ||
148 | BUG_ON(*p + sizeof(ino) + sizeof(len) + len > end); | ||
149 | ceph_encode_8(p, 1); | ||
150 | ceph_encode_64(p, ino); | ||
151 | ceph_encode_32(p, len); | ||
152 | if (len) | ||
153 | memcpy(*p, path, len); | ||
154 | *p += len; | ||
155 | } | ||
156 | |||
157 | static inline void ceph_encode_string(void **p, void *end, | ||
158 | const char *s, u32 len) | ||
159 | { | ||
160 | BUG_ON(*p + sizeof(len) + len > end); | ||
161 | ceph_encode_32(p, len); | ||
162 | if (len) | ||
163 | memcpy(*p, s, len); | ||
164 | *p += len; | ||
165 | } | ||
166 | |||
167 | #define ceph_encode_need(p, end, n, bad) \ | ||
168 | do { \ | ||
169 | if (unlikely(*(p) + (n) > (end))) \ | ||
170 | goto bad; \ | ||
171 | } while (0) | ||
172 | |||
173 | #define ceph_encode_64_safe(p, end, v, bad) \ | ||
174 | do { \ | ||
175 | ceph_encode_need(p, end, sizeof(u64), bad); \ | ||
176 | ceph_encode_64(p, v); \ | ||
177 | } while (0) | ||
178 | #define ceph_encode_32_safe(p, end, v, bad) \ | ||
179 | do { \ | ||
180 | ceph_encode_need(p, end, sizeof(u32), bad); \ | ||
181 | ceph_encode_32(p, v); \ | ||
182 | } while (0) | ||
183 | #define ceph_encode_16_safe(p, end, v, bad) \ | ||
184 | do { \ | ||
185 | ceph_encode_need(p, end, sizeof(u16), bad); \ | ||
186 | ceph_encode_16(p, v); \ | ||
187 | } while (0) | ||
188 | |||
189 | #define ceph_encode_copy_safe(p, end, pv, n, bad) \ | ||
190 | do { \ | ||
191 | ceph_encode_need(p, end, n, bad); \ | ||
192 | ceph_encode_copy(p, pv, n); \ | ||
193 | } while (0) | ||
194 | #define ceph_encode_string_safe(p, end, s, n, bad) \ | ||
195 | do { \ | ||
196 | ceph_encode_need(p, end, n, bad); \ | ||
197 | ceph_encode_string(p, end, s, n); \ | ||
198 | } while (0) | ||
199 | |||
200 | |||
201 | #endif | ||
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h new file mode 100644 index 000000000000..f22b2e941686 --- /dev/null +++ b/include/linux/ceph/libceph.h | |||
@@ -0,0 +1,249 @@ | |||
1 | #ifndef _FS_CEPH_LIBCEPH_H | ||
2 | #define _FS_CEPH_LIBCEPH_H | ||
3 | |||
4 | #include "ceph_debug.h" | ||
5 | |||
6 | #include <asm/unaligned.h> | ||
7 | #include <linux/backing-dev.h> | ||
8 | #include <linux/completion.h> | ||
9 | #include <linux/exportfs.h> | ||
10 | #include <linux/fs.h> | ||
11 | #include <linux/mempool.h> | ||
12 | #include <linux/pagemap.h> | ||
13 | #include <linux/wait.h> | ||
14 | #include <linux/writeback.h> | ||
15 | #include <linux/slab.h> | ||
16 | |||
17 | #include "types.h" | ||
18 | #include "messenger.h" | ||
19 | #include "msgpool.h" | ||
20 | #include "mon_client.h" | ||
21 | #include "osd_client.h" | ||
22 | #include "ceph_fs.h" | ||
23 | |||
24 | /* | ||
25 | * Supported features | ||
26 | */ | ||
27 | #define CEPH_FEATURE_SUPPORTED_DEFAULT CEPH_FEATURE_NOSRCADDR | ||
28 | #define CEPH_FEATURE_REQUIRED_DEFAULT CEPH_FEATURE_NOSRCADDR | ||
29 | |||
30 | /* | ||
31 | * mount options | ||
32 | */ | ||
33 | #define CEPH_OPT_FSID (1<<0) | ||
34 | #define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ | ||
35 | #define CEPH_OPT_MYIP (1<<2) /* specified my ip */ | ||
36 | #define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ | ||
37 | |||
38 | #define CEPH_OPT_DEFAULT (0); | ||
39 | |||
40 | #define ceph_set_opt(client, opt) \ | ||
41 | (client)->options->flags |= CEPH_OPT_##opt; | ||
42 | #define ceph_test_opt(client, opt) \ | ||
43 | (!!((client)->options->flags & CEPH_OPT_##opt)) | ||
44 | |||
45 | struct ceph_options { | ||
46 | int flags; | ||
47 | struct ceph_fsid fsid; | ||
48 | struct ceph_entity_addr my_addr; | ||
49 | int mount_timeout; | ||
50 | int osd_idle_ttl; | ||
51 | int osd_timeout; | ||
52 | int osd_keepalive_timeout; | ||
53 | |||
54 | /* | ||
55 | * any type that can't be simply compared or doesn't need need | ||
56 | * to be compared should go beyond this point, | ||
57 | * ceph_compare_options() should be updated accordingly | ||
58 | */ | ||
59 | |||
60 | struct ceph_entity_addr *mon_addr; /* should be the first | ||
61 | pointer type of args */ | ||
62 | int num_mon; | ||
63 | char *name; | ||
64 | char *secret; | ||
65 | }; | ||
66 | |||
67 | /* | ||
68 | * defaults | ||
69 | */ | ||
70 | #define CEPH_MOUNT_TIMEOUT_DEFAULT 60 | ||
71 | #define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */ | ||
72 | #define CEPH_OSD_KEEPALIVE_DEFAULT 5 | ||
73 | #define CEPH_OSD_IDLE_TTL_DEFAULT 60 | ||
74 | #define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */ | ||
75 | |||
76 | #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) | ||
77 | #define CEPH_MSG_MAX_DATA_LEN (16*1024*1024) | ||
78 | |||
79 | #define CEPH_AUTH_NAME_DEFAULT "guest" | ||
80 | |||
81 | /* | ||
82 | * Delay telling the MDS we no longer want caps, in case we reopen | ||
83 | * the file. Delay a minimum amount of time, even if we send a cap | ||
84 | * message for some other reason. Otherwise, take the oppotunity to | ||
85 | * update the mds to avoid sending another message later. | ||
86 | */ | ||
87 | #define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */ | ||
88 | #define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */ | ||
89 | |||
90 | #define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4) | ||
91 | |||
92 | /* mount state */ | ||
93 | enum { | ||
94 | CEPH_MOUNT_MOUNTING, | ||
95 | CEPH_MOUNT_MOUNTED, | ||
96 | CEPH_MOUNT_UNMOUNTING, | ||
97 | CEPH_MOUNT_UNMOUNTED, | ||
98 | CEPH_MOUNT_SHUTDOWN, | ||
99 | }; | ||
100 | |||
101 | /* | ||
102 | * subtract jiffies | ||
103 | */ | ||
104 | static inline unsigned long time_sub(unsigned long a, unsigned long b) | ||
105 | { | ||
106 | BUG_ON(time_after(b, a)); | ||
107 | return (long)a - (long)b; | ||
108 | } | ||
109 | |||
110 | struct ceph_mds_client; | ||
111 | |||
112 | /* | ||
113 | * per client state | ||
114 | * | ||
115 | * possibly shared by multiple mount points, if they are | ||
116 | * mounting the same ceph filesystem/cluster. | ||
117 | */ | ||
118 | struct ceph_client { | ||
119 | struct ceph_fsid fsid; | ||
120 | bool have_fsid; | ||
121 | |||
122 | void *private; | ||
123 | |||
124 | struct ceph_options *options; | ||
125 | |||
126 | struct mutex mount_mutex; /* serialize mount attempts */ | ||
127 | wait_queue_head_t auth_wq; | ||
128 | int auth_err; | ||
129 | |||
130 | int (*extra_mon_dispatch)(struct ceph_client *, struct ceph_msg *); | ||
131 | |||
132 | u32 supported_features; | ||
133 | u32 required_features; | ||
134 | |||
135 | struct ceph_messenger *msgr; /* messenger instance */ | ||
136 | struct ceph_mon_client monc; | ||
137 | struct ceph_osd_client osdc; | ||
138 | |||
139 | #ifdef CONFIG_DEBUG_FS | ||
140 | struct dentry *debugfs_dir; | ||
141 | struct dentry *debugfs_monmap; | ||
142 | struct dentry *debugfs_osdmap; | ||
143 | #endif | ||
144 | }; | ||
145 | |||
146 | |||
147 | |||
148 | /* | ||
149 | * snapshots | ||
150 | */ | ||
151 | |||
152 | /* | ||
153 | * A "snap context" is the set of existing snapshots when we | ||
154 | * write data. It is used by the OSD to guide its COW behavior. | ||
155 | * | ||
156 | * The ceph_snap_context is refcounted, and attached to each dirty | ||
157 | * page, indicating which context the dirty data belonged when it was | ||
158 | * dirtied. | ||
159 | */ | ||
160 | struct ceph_snap_context { | ||
161 | atomic_t nref; | ||
162 | u64 seq; | ||
163 | int num_snaps; | ||
164 | u64 snaps[]; | ||
165 | }; | ||
166 | |||
167 | static inline struct ceph_snap_context * | ||
168 | ceph_get_snap_context(struct ceph_snap_context *sc) | ||
169 | { | ||
170 | /* | ||
171 | printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), | ||
172 | atomic_read(&sc->nref)+1); | ||
173 | */ | ||
174 | if (sc) | ||
175 | atomic_inc(&sc->nref); | ||
176 | return sc; | ||
177 | } | ||
178 | |||
179 | static inline void ceph_put_snap_context(struct ceph_snap_context *sc) | ||
180 | { | ||
181 | if (!sc) | ||
182 | return; | ||
183 | /* | ||
184 | printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), | ||
185 | atomic_read(&sc->nref)-1); | ||
186 | */ | ||
187 | if (atomic_dec_and_test(&sc->nref)) { | ||
188 | /*printk(" deleting snap_context %p\n", sc);*/ | ||
189 | kfree(sc); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * calculate the number of pages a given length and offset map onto, | ||
195 | * if we align the data. | ||
196 | */ | ||
197 | static inline int calc_pages_for(u64 off, u64 len) | ||
198 | { | ||
199 | return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) - | ||
200 | (off >> PAGE_CACHE_SHIFT); | ||
201 | } | ||
202 | |||
203 | /* ceph_common.c */ | ||
204 | extern const char *ceph_msg_type_name(int type); | ||
205 | extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid); | ||
206 | extern struct kmem_cache *ceph_inode_cachep; | ||
207 | extern struct kmem_cache *ceph_cap_cachep; | ||
208 | extern struct kmem_cache *ceph_dentry_cachep; | ||
209 | extern struct kmem_cache *ceph_file_cachep; | ||
210 | |||
211 | extern int ceph_parse_options(struct ceph_options **popt, char *options, | ||
212 | const char *dev_name, const char *dev_name_end, | ||
213 | int (*parse_extra_token)(char *c, void *private), | ||
214 | void *private); | ||
215 | extern void ceph_destroy_options(struct ceph_options *opt); | ||
216 | extern int ceph_compare_options(struct ceph_options *new_opt, | ||
217 | struct ceph_client *client); | ||
218 | extern struct ceph_client *ceph_create_client(struct ceph_options *opt, | ||
219 | void *private); | ||
220 | extern u64 ceph_client_id(struct ceph_client *client); | ||
221 | extern void ceph_destroy_client(struct ceph_client *client); | ||
222 | extern int __ceph_open_session(struct ceph_client *client, | ||
223 | unsigned long started); | ||
224 | extern int ceph_open_session(struct ceph_client *client); | ||
225 | |||
226 | /* pagevec.c */ | ||
227 | extern void ceph_release_page_vector(struct page **pages, int num_pages); | ||
228 | |||
229 | extern struct page **ceph_get_direct_page_vector(const char __user *data, | ||
230 | int num_pages, | ||
231 | loff_t off, size_t len); | ||
232 | extern void ceph_put_page_vector(struct page **pages, int num_pages); | ||
233 | extern void ceph_release_page_vector(struct page **pages, int num_pages); | ||
234 | extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); | ||
235 | extern int ceph_copy_user_to_page_vector(struct page **pages, | ||
236 | const char __user *data, | ||
237 | loff_t off, size_t len); | ||
238 | extern int ceph_copy_to_page_vector(struct page **pages, | ||
239 | const char *data, | ||
240 | loff_t off, size_t len); | ||
241 | extern int ceph_copy_from_page_vector(struct page **pages, | ||
242 | char *data, | ||
243 | loff_t off, size_t len); | ||
244 | extern int ceph_copy_page_vector_to_user(struct page **pages, char __user *data, | ||
245 | loff_t off, size_t len); | ||
246 | extern void ceph_zero_page_vector_range(int off, int len, struct page **pages); | ||
247 | |||
248 | |||
249 | #endif /* _FS_CEPH_SUPER_H */ | ||
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h new file mode 100644 index 000000000000..4c5cb0880bba --- /dev/null +++ b/include/linux/ceph/mdsmap.h | |||
@@ -0,0 +1,62 @@ | |||
1 | #ifndef _FS_CEPH_MDSMAP_H | ||
2 | #define _FS_CEPH_MDSMAP_H | ||
3 | |||
4 | #include "types.h" | ||
5 | |||
6 | /* | ||
7 | * mds map - describe servers in the mds cluster. | ||
8 | * | ||
9 | * we limit fields to those the client actually xcares about | ||
10 | */ | ||
11 | struct ceph_mds_info { | ||
12 | u64 global_id; | ||
13 | struct ceph_entity_addr addr; | ||
14 | s32 state; | ||
15 | int num_export_targets; | ||
16 | bool laggy; | ||
17 | u32 *export_targets; | ||
18 | }; | ||
19 | |||
20 | struct ceph_mdsmap { | ||
21 | u32 m_epoch, m_client_epoch, m_last_failure; | ||
22 | u32 m_root; | ||
23 | u32 m_session_timeout; /* seconds */ | ||
24 | u32 m_session_autoclose; /* seconds */ | ||
25 | u64 m_max_file_size; | ||
26 | u32 m_max_mds; /* size of m_addr, m_state arrays */ | ||
27 | struct ceph_mds_info *m_info; | ||
28 | |||
29 | /* which object pools file data can be stored in */ | ||
30 | int m_num_data_pg_pools; | ||
31 | u32 *m_data_pg_pools; | ||
32 | u32 m_cas_pg_pool; | ||
33 | }; | ||
34 | |||
35 | static inline struct ceph_entity_addr * | ||
36 | ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w) | ||
37 | { | ||
38 | if (w >= m->m_max_mds) | ||
39 | return NULL; | ||
40 | return &m->m_info[w].addr; | ||
41 | } | ||
42 | |||
43 | static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w) | ||
44 | { | ||
45 | BUG_ON(w < 0); | ||
46 | if (w >= m->m_max_mds) | ||
47 | return CEPH_MDS_STATE_DNE; | ||
48 | return m->m_info[w].state; | ||
49 | } | ||
50 | |||
51 | static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w) | ||
52 | { | ||
53 | if (w >= 0 && w < m->m_max_mds) | ||
54 | return m->m_info[w].laggy; | ||
55 | return false; | ||
56 | } | ||
57 | |||
58 | extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m); | ||
59 | extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end); | ||
60 | extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m); | ||
61 | |||
62 | #endif | ||
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h new file mode 100644 index 000000000000..5956d62c3057 --- /dev/null +++ b/include/linux/ceph/messenger.h | |||
@@ -0,0 +1,261 @@ | |||
1 | #ifndef __FS_CEPH_MESSENGER_H | ||
2 | #define __FS_CEPH_MESSENGER_H | ||
3 | |||
4 | #include <linux/kref.h> | ||
5 | #include <linux/mutex.h> | ||
6 | #include <linux/net.h> | ||
7 | #include <linux/radix-tree.h> | ||
8 | #include <linux/uio.h> | ||
9 | #include <linux/version.h> | ||
10 | #include <linux/workqueue.h> | ||
11 | |||
12 | #include "types.h" | ||
13 | #include "buffer.h" | ||
14 | |||
15 | struct ceph_msg; | ||
16 | struct ceph_connection; | ||
17 | |||
18 | extern struct workqueue_struct *ceph_msgr_wq; /* receive work queue */ | ||
19 | |||
20 | /* | ||
21 | * Ceph defines these callbacks for handling connection events. | ||
22 | */ | ||
23 | struct ceph_connection_operations { | ||
24 | struct ceph_connection *(*get)(struct ceph_connection *); | ||
25 | void (*put)(struct ceph_connection *); | ||
26 | |||
27 | /* handle an incoming message. */ | ||
28 | void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m); | ||
29 | |||
30 | /* authorize an outgoing connection */ | ||
31 | int (*get_authorizer) (struct ceph_connection *con, | ||
32 | void **buf, int *len, int *proto, | ||
33 | void **reply_buf, int *reply_len, int force_new); | ||
34 | int (*verify_authorizer_reply) (struct ceph_connection *con, int len); | ||
35 | int (*invalidate_authorizer)(struct ceph_connection *con); | ||
36 | |||
37 | /* protocol version mismatch */ | ||
38 | void (*bad_proto) (struct ceph_connection *con); | ||
39 | |||
40 | /* there was some error on the socket (disconnect, whatever) */ | ||
41 | void (*fault) (struct ceph_connection *con); | ||
42 | |||
43 | /* a remote host as terminated a message exchange session, and messages | ||
44 | * we sent (or they tried to send us) may be lost. */ | ||
45 | void (*peer_reset) (struct ceph_connection *con); | ||
46 | |||
47 | struct ceph_msg * (*alloc_msg) (struct ceph_connection *con, | ||
48 | struct ceph_msg_header *hdr, | ||
49 | int *skip); | ||
50 | }; | ||
51 | |||
52 | /* use format string %s%d */ | ||
53 | #define ENTITY_NAME(n) ceph_entity_type_name((n).type), le64_to_cpu((n).num) | ||
54 | |||
55 | struct ceph_messenger { | ||
56 | struct ceph_entity_inst inst; /* my name+address */ | ||
57 | struct ceph_entity_addr my_enc_addr; | ||
58 | struct page *zero_page; /* used in certain error cases */ | ||
59 | |||
60 | bool nocrc; | ||
61 | |||
62 | /* | ||
63 | * the global_seq counts connections i (attempt to) initiate | ||
64 | * in order to disambiguate certain connect race conditions. | ||
65 | */ | ||
66 | u32 global_seq; | ||
67 | spinlock_t global_seq_lock; | ||
68 | |||
69 | u32 supported_features; | ||
70 | u32 required_features; | ||
71 | }; | ||
72 | |||
73 | /* | ||
74 | * a single message. it contains a header (src, dest, message type, etc.), | ||
75 | * footer (crc values, mainly), a "front" message body, and possibly a | ||
76 | * data payload (stored in some number of pages). | ||
77 | */ | ||
78 | struct ceph_msg { | ||
79 | struct ceph_msg_header hdr; /* header */ | ||
80 | struct ceph_msg_footer footer; /* footer */ | ||
81 | struct kvec front; /* unaligned blobs of message */ | ||
82 | struct ceph_buffer *middle; | ||
83 | struct page **pages; /* data payload. NOT OWNER. */ | ||
84 | unsigned nr_pages; /* size of page array */ | ||
85 | struct ceph_pagelist *pagelist; /* instead of pages */ | ||
86 | struct list_head list_head; | ||
87 | struct kref kref; | ||
88 | struct bio *bio; /* instead of pages/pagelist */ | ||
89 | struct bio *bio_iter; /* bio iterator */ | ||
90 | int bio_seg; /* current bio segment */ | ||
91 | struct ceph_pagelist *trail; /* the trailing part of the data */ | ||
92 | bool front_is_vmalloc; | ||
93 | bool more_to_follow; | ||
94 | bool needs_out_seq; | ||
95 | int front_max; | ||
96 | |||
97 | struct ceph_msgpool *pool; | ||
98 | }; | ||
99 | |||
100 | struct ceph_msg_pos { | ||
101 | int page, page_pos; /* which page; offset in page */ | ||
102 | int data_pos; /* offset in data payload */ | ||
103 | int did_page_crc; /* true if we've calculated crc for current page */ | ||
104 | }; | ||
105 | |||
106 | /* ceph connection fault delay defaults, for exponential backoff */ | ||
107 | #define BASE_DELAY_INTERVAL (HZ/2) | ||
108 | #define MAX_DELAY_INTERVAL (5 * 60 * HZ) | ||
109 | |||
110 | /* | ||
111 | * ceph_connection state bit flags | ||
112 | * | ||
113 | * QUEUED and BUSY are used together to ensure that only a single | ||
114 | * thread is currently opening, reading or writing data to the socket. | ||
115 | */ | ||
116 | #define LOSSYTX 0 /* we can close channel or drop messages on errors */ | ||
117 | #define CONNECTING 1 | ||
118 | #define NEGOTIATING 2 | ||
119 | #define KEEPALIVE_PENDING 3 | ||
120 | #define WRITE_PENDING 4 /* we have data ready to send */ | ||
121 | #define QUEUED 5 /* there is work queued on this connection */ | ||
122 | #define BUSY 6 /* work is being done */ | ||
123 | #define STANDBY 8 /* no outgoing messages, socket closed. we keep | ||
124 | * the ceph_connection around to maintain shared | ||
125 | * state with the peer. */ | ||
126 | #define CLOSED 10 /* we've closed the connection */ | ||
127 | #define SOCK_CLOSED 11 /* socket state changed to closed */ | ||
128 | #define OPENING 13 /* open connection w/ (possibly new) peer */ | ||
129 | #define DEAD 14 /* dead, about to kfree */ | ||
130 | |||
131 | /* | ||
132 | * A single connection with another host. | ||
133 | * | ||
134 | * We maintain a queue of outgoing messages, and some session state to | ||
135 | * ensure that we can preserve the lossless, ordered delivery of | ||
136 | * messages in the case of a TCP disconnect. | ||
137 | */ | ||
138 | struct ceph_connection { | ||
139 | void *private; | ||
140 | atomic_t nref; | ||
141 | |||
142 | const struct ceph_connection_operations *ops; | ||
143 | |||
144 | struct ceph_messenger *msgr; | ||
145 | struct socket *sock; | ||
146 | unsigned long state; /* connection state (see flags above) */ | ||
147 | const char *error_msg; /* error message, if any */ | ||
148 | |||
149 | struct ceph_entity_addr peer_addr; /* peer address */ | ||
150 | struct ceph_entity_name peer_name; /* peer name */ | ||
151 | struct ceph_entity_addr peer_addr_for_me; | ||
152 | unsigned peer_features; | ||
153 | u32 connect_seq; /* identify the most recent connection | ||
154 | attempt for this connection, client */ | ||
155 | u32 peer_global_seq; /* peer's global seq for this connection */ | ||
156 | |||
157 | int auth_retry; /* true if we need a newer authorizer */ | ||
158 | void *auth_reply_buf; /* where to put the authorizer reply */ | ||
159 | int auth_reply_buf_len; | ||
160 | |||
161 | struct mutex mutex; | ||
162 | |||
163 | /* out queue */ | ||
164 | struct list_head out_queue; | ||
165 | struct list_head out_sent; /* sending or sent but unacked */ | ||
166 | u64 out_seq; /* last message queued for send */ | ||
167 | bool out_keepalive_pending; | ||
168 | |||
169 | u64 in_seq, in_seq_acked; /* last message received, acked */ | ||
170 | |||
171 | /* connection negotiation temps */ | ||
172 | char in_banner[CEPH_BANNER_MAX_LEN]; | ||
173 | union { | ||
174 | struct { /* outgoing connection */ | ||
175 | struct ceph_msg_connect out_connect; | ||
176 | struct ceph_msg_connect_reply in_reply; | ||
177 | }; | ||
178 | struct { /* incoming */ | ||
179 | struct ceph_msg_connect in_connect; | ||
180 | struct ceph_msg_connect_reply out_reply; | ||
181 | }; | ||
182 | }; | ||
183 | struct ceph_entity_addr actual_peer_addr; | ||
184 | |||
185 | /* message out temps */ | ||
186 | struct ceph_msg *out_msg; /* sending message (== tail of | ||
187 | out_sent) */ | ||
188 | bool out_msg_done; | ||
189 | struct ceph_msg_pos out_msg_pos; | ||
190 | |||
191 | struct kvec out_kvec[8], /* sending header/footer data */ | ||
192 | *out_kvec_cur; | ||
193 | int out_kvec_left; /* kvec's left in out_kvec */ | ||
194 | int out_skip; /* skip this many bytes */ | ||
195 | int out_kvec_bytes; /* total bytes left */ | ||
196 | bool out_kvec_is_msg; /* kvec refers to out_msg */ | ||
197 | int out_more; /* there is more data after the kvecs */ | ||
198 | __le64 out_temp_ack; /* for writing an ack */ | ||
199 | |||
200 | /* message in temps */ | ||
201 | struct ceph_msg_header in_hdr; | ||
202 | struct ceph_msg *in_msg; | ||
203 | struct ceph_msg_pos in_msg_pos; | ||
204 | u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */ | ||
205 | |||
206 | char in_tag; /* protocol control byte */ | ||
207 | int in_base_pos; /* bytes read */ | ||
208 | __le64 in_temp_ack; /* for reading an ack */ | ||
209 | |||
210 | struct delayed_work work; /* send|recv work */ | ||
211 | unsigned long delay; /* current delay interval */ | ||
212 | }; | ||
213 | |||
214 | |||
215 | extern const char *ceph_pr_addr(const struct sockaddr_storage *ss); | ||
216 | extern int ceph_parse_ips(const char *c, const char *end, | ||
217 | struct ceph_entity_addr *addr, | ||
218 | int max_count, int *count); | ||
219 | |||
220 | |||
221 | extern int ceph_msgr_init(void); | ||
222 | extern void ceph_msgr_exit(void); | ||
223 | extern void ceph_msgr_flush(void); | ||
224 | |||
225 | extern struct ceph_messenger *ceph_messenger_create( | ||
226 | struct ceph_entity_addr *myaddr, | ||
227 | u32 features, u32 required); | ||
228 | extern void ceph_messenger_destroy(struct ceph_messenger *); | ||
229 | |||
230 | extern void ceph_con_init(struct ceph_messenger *msgr, | ||
231 | struct ceph_connection *con); | ||
232 | extern void ceph_con_open(struct ceph_connection *con, | ||
233 | struct ceph_entity_addr *addr); | ||
234 | extern bool ceph_con_opened(struct ceph_connection *con); | ||
235 | extern void ceph_con_close(struct ceph_connection *con); | ||
236 | extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg); | ||
237 | extern void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg); | ||
238 | extern void ceph_con_revoke_message(struct ceph_connection *con, | ||
239 | struct ceph_msg *msg); | ||
240 | extern void ceph_con_keepalive(struct ceph_connection *con); | ||
241 | extern struct ceph_connection *ceph_con_get(struct ceph_connection *con); | ||
242 | extern void ceph_con_put(struct ceph_connection *con); | ||
243 | |||
244 | extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags); | ||
245 | extern void ceph_msg_kfree(struct ceph_msg *m); | ||
246 | |||
247 | |||
248 | static inline struct ceph_msg *ceph_msg_get(struct ceph_msg *msg) | ||
249 | { | ||
250 | kref_get(&msg->kref); | ||
251 | return msg; | ||
252 | } | ||
253 | extern void ceph_msg_last_put(struct kref *kref); | ||
254 | static inline void ceph_msg_put(struct ceph_msg *msg) | ||
255 | { | ||
256 | kref_put(&msg->kref, ceph_msg_last_put); | ||
257 | } | ||
258 | |||
259 | extern void ceph_msg_dump(struct ceph_msg *msg); | ||
260 | |||
261 | #endif | ||
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h new file mode 100644 index 000000000000..545f85917780 --- /dev/null +++ b/include/linux/ceph/mon_client.h | |||
@@ -0,0 +1,122 @@ | |||
1 | #ifndef _FS_CEPH_MON_CLIENT_H | ||
2 | #define _FS_CEPH_MON_CLIENT_H | ||
3 | |||
4 | #include <linux/completion.h> | ||
5 | #include <linux/kref.h> | ||
6 | #include <linux/rbtree.h> | ||
7 | |||
8 | #include "messenger.h" | ||
9 | |||
10 | struct ceph_client; | ||
11 | struct ceph_mount_args; | ||
12 | struct ceph_auth_client; | ||
13 | |||
14 | /* | ||
15 | * The monitor map enumerates the set of all monitors. | ||
16 | */ | ||
17 | struct ceph_monmap { | ||
18 | struct ceph_fsid fsid; | ||
19 | u32 epoch; | ||
20 | u32 num_mon; | ||
21 | struct ceph_entity_inst mon_inst[0]; | ||
22 | }; | ||
23 | |||
24 | struct ceph_mon_client; | ||
25 | struct ceph_mon_generic_request; | ||
26 | |||
27 | |||
28 | /* | ||
29 | * Generic mechanism for resending monitor requests. | ||
30 | */ | ||
31 | typedef void (*ceph_monc_request_func_t)(struct ceph_mon_client *monc, | ||
32 | int newmon); | ||
33 | |||
34 | /* a pending monitor request */ | ||
35 | struct ceph_mon_request { | ||
36 | struct ceph_mon_client *monc; | ||
37 | struct delayed_work delayed_work; | ||
38 | unsigned long delay; | ||
39 | ceph_monc_request_func_t do_request; | ||
40 | }; | ||
41 | |||
42 | /* | ||
43 | * ceph_mon_generic_request is being used for the statfs and poolop requests | ||
44 | * which are bening done a bit differently because we need to get data back | ||
45 | * to the caller | ||
46 | */ | ||
47 | struct ceph_mon_generic_request { | ||
48 | struct kref kref; | ||
49 | u64 tid; | ||
50 | struct rb_node node; | ||
51 | int result; | ||
52 | void *buf; | ||
53 | int buf_len; | ||
54 | struct completion completion; | ||
55 | struct ceph_msg *request; /* original request */ | ||
56 | struct ceph_msg *reply; /* and reply */ | ||
57 | }; | ||
58 | |||
59 | struct ceph_mon_client { | ||
60 | struct ceph_client *client; | ||
61 | struct ceph_monmap *monmap; | ||
62 | |||
63 | struct mutex mutex; | ||
64 | struct delayed_work delayed_work; | ||
65 | |||
66 | struct ceph_auth_client *auth; | ||
67 | struct ceph_msg *m_auth, *m_auth_reply, *m_subscribe, *m_subscribe_ack; | ||
68 | int pending_auth; | ||
69 | |||
70 | bool hunting; | ||
71 | int cur_mon; /* last monitor i contacted */ | ||
72 | unsigned long sub_sent, sub_renew_after; | ||
73 | struct ceph_connection *con; | ||
74 | bool have_fsid; | ||
75 | |||
76 | /* pending generic requests */ | ||
77 | struct rb_root generic_request_tree; | ||
78 | int num_generic_requests; | ||
79 | u64 last_tid; | ||
80 | |||
81 | /* mds/osd map */ | ||
82 | int want_mdsmap; | ||
83 | int want_next_osdmap; /* 1 = want, 2 = want+asked */ | ||
84 | u32 have_osdmap, have_mdsmap; | ||
85 | |||
86 | #ifdef CONFIG_DEBUG_FS | ||
87 | struct dentry *debugfs_file; | ||
88 | #endif | ||
89 | }; | ||
90 | |||
91 | extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end); | ||
92 | extern int ceph_monmap_contains(struct ceph_monmap *m, | ||
93 | struct ceph_entity_addr *addr); | ||
94 | |||
95 | extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl); | ||
96 | extern void ceph_monc_stop(struct ceph_mon_client *monc); | ||
97 | |||
98 | /* | ||
99 | * The model here is to indicate that we need a new map of at least | ||
100 | * epoch @want, and also call in when we receive a map. We will | ||
101 | * periodically rerequest the map from the monitor cluster until we | ||
102 | * get what we want. | ||
103 | */ | ||
104 | extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 have); | ||
105 | extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have); | ||
106 | |||
107 | extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc); | ||
108 | |||
109 | extern int ceph_monc_do_statfs(struct ceph_mon_client *monc, | ||
110 | struct ceph_statfs *buf); | ||
111 | |||
112 | extern int ceph_monc_open_session(struct ceph_mon_client *monc); | ||
113 | |||
114 | extern int ceph_monc_validate_auth(struct ceph_mon_client *monc); | ||
115 | |||
116 | extern int ceph_monc_create_snapid(struct ceph_mon_client *monc, | ||
117 | u32 pool, u64 *snapid); | ||
118 | |||
119 | extern int ceph_monc_delete_snapid(struct ceph_mon_client *monc, | ||
120 | u32 pool, u64 snapid); | ||
121 | |||
122 | #endif | ||
diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h new file mode 100644 index 000000000000..a362605f9368 --- /dev/null +++ b/include/linux/ceph/msgpool.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef _FS_CEPH_MSGPOOL | ||
2 | #define _FS_CEPH_MSGPOOL | ||
3 | |||
4 | #include <linux/mempool.h> | ||
5 | #include "messenger.h" | ||
6 | |||
7 | /* | ||
8 | * we use memory pools for preallocating messages we may receive, to | ||
9 | * avoid unexpected OOM conditions. | ||
10 | */ | ||
11 | struct ceph_msgpool { | ||
12 | const char *name; | ||
13 | mempool_t *pool; | ||
14 | int front_len; /* preallocated payload size */ | ||
15 | }; | ||
16 | |||
17 | extern int ceph_msgpool_init(struct ceph_msgpool *pool, | ||
18 | int front_len, int size, bool blocking, | ||
19 | const char *name); | ||
20 | extern void ceph_msgpool_destroy(struct ceph_msgpool *pool); | ||
21 | extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *, | ||
22 | int front_len); | ||
23 | extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *); | ||
24 | |||
25 | #endif | ||
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h new file mode 100644 index 000000000000..680d3d648cac --- /dev/null +++ b/include/linux/ceph/msgr.h | |||
@@ -0,0 +1,175 @@ | |||
1 | #ifndef CEPH_MSGR_H | ||
2 | #define CEPH_MSGR_H | ||
3 | |||
4 | /* | ||
5 | * Data types for message passing layer used by Ceph. | ||
6 | */ | ||
7 | |||
8 | #define CEPH_MON_PORT 6789 /* default monitor port */ | ||
9 | |||
10 | /* | ||
11 | * client-side processes will try to bind to ports in this | ||
12 | * range, simply for the benefit of tools like nmap or wireshark | ||
13 | * that would like to identify the protocol. | ||
14 | */ | ||
15 | #define CEPH_PORT_FIRST 6789 | ||
16 | #define CEPH_PORT_START 6800 /* non-monitors start here */ | ||
17 | #define CEPH_PORT_LAST 6900 | ||
18 | |||
19 | /* | ||
20 | * tcp connection banner. include a protocol version. and adjust | ||
21 | * whenever the wire protocol changes. try to keep this string length | ||
22 | * constant. | ||
23 | */ | ||
24 | #define CEPH_BANNER "ceph v027" | ||
25 | #define CEPH_BANNER_MAX_LEN 30 | ||
26 | |||
27 | |||
28 | /* | ||
29 | * Rollover-safe type and comparator for 32-bit sequence numbers. | ||
30 | * Comparator returns -1, 0, or 1. | ||
31 | */ | ||
32 | typedef __u32 ceph_seq_t; | ||
33 | |||
34 | static inline __s32 ceph_seq_cmp(__u32 a, __u32 b) | ||
35 | { | ||
36 | return (__s32)a - (__s32)b; | ||
37 | } | ||
38 | |||
39 | |||
40 | /* | ||
41 | * entity_name -- logical name for a process participating in the | ||
42 | * network, e.g. 'mds0' or 'osd3'. | ||
43 | */ | ||
44 | struct ceph_entity_name { | ||
45 | __u8 type; /* CEPH_ENTITY_TYPE_* */ | ||
46 | __le64 num; | ||
47 | } __attribute__ ((packed)); | ||
48 | |||
49 | #define CEPH_ENTITY_TYPE_MON 0x01 | ||
50 | #define CEPH_ENTITY_TYPE_MDS 0x02 | ||
51 | #define CEPH_ENTITY_TYPE_OSD 0x04 | ||
52 | #define CEPH_ENTITY_TYPE_CLIENT 0x08 | ||
53 | #define CEPH_ENTITY_TYPE_AUTH 0x20 | ||
54 | |||
55 | #define CEPH_ENTITY_TYPE_ANY 0xFF | ||
56 | |||
57 | extern const char *ceph_entity_type_name(int type); | ||
58 | |||
59 | /* | ||
60 | * entity_addr -- network address | ||
61 | */ | ||
62 | struct ceph_entity_addr { | ||
63 | __le32 type; | ||
64 | __le32 nonce; /* unique id for process (e.g. pid) */ | ||
65 | struct sockaddr_storage in_addr; | ||
66 | } __attribute__ ((packed)); | ||
67 | |||
68 | struct ceph_entity_inst { | ||
69 | struct ceph_entity_name name; | ||
70 | struct ceph_entity_addr addr; | ||
71 | } __attribute__ ((packed)); | ||
72 | |||
73 | |||
74 | /* used by message exchange protocol */ | ||
75 | #define CEPH_MSGR_TAG_READY 1 /* server->client: ready for messages */ | ||
76 | #define CEPH_MSGR_TAG_RESETSESSION 2 /* server->client: reset, try again */ | ||
77 | #define CEPH_MSGR_TAG_WAIT 3 /* server->client: wait for racing | ||
78 | incoming connection */ | ||
79 | #define CEPH_MSGR_TAG_RETRY_SESSION 4 /* server->client + cseq: try again | ||
80 | with higher cseq */ | ||
81 | #define CEPH_MSGR_TAG_RETRY_GLOBAL 5 /* server->client + gseq: try again | ||
82 | with higher gseq */ | ||
83 | #define CEPH_MSGR_TAG_CLOSE 6 /* closing pipe */ | ||
84 | #define CEPH_MSGR_TAG_MSG 7 /* message */ | ||
85 | #define CEPH_MSGR_TAG_ACK 8 /* message ack */ | ||
86 | #define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */ | ||
87 | #define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */ | ||
88 | #define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */ | ||
89 | #define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */ | ||
90 | |||
91 | |||
92 | /* | ||
93 | * connection negotiation | ||
94 | */ | ||
95 | struct ceph_msg_connect { | ||
96 | __le64 features; /* supported feature bits */ | ||
97 | __le32 host_type; /* CEPH_ENTITY_TYPE_* */ | ||
98 | __le32 global_seq; /* count connections initiated by this host */ | ||
99 | __le32 connect_seq; /* count connections initiated in this session */ | ||
100 | __le32 protocol_version; | ||
101 | __le32 authorizer_protocol; | ||
102 | __le32 authorizer_len; | ||
103 | __u8 flags; /* CEPH_MSG_CONNECT_* */ | ||
104 | } __attribute__ ((packed)); | ||
105 | |||
106 | struct ceph_msg_connect_reply { | ||
107 | __u8 tag; | ||
108 | __le64 features; /* feature bits for this session */ | ||
109 | __le32 global_seq; | ||
110 | __le32 connect_seq; | ||
111 | __le32 protocol_version; | ||
112 | __le32 authorizer_len; | ||
113 | __u8 flags; | ||
114 | } __attribute__ ((packed)); | ||
115 | |||
116 | #define CEPH_MSG_CONNECT_LOSSY 1 /* messages i send may be safely dropped */ | ||
117 | |||
118 | |||
119 | /* | ||
120 | * message header | ||
121 | */ | ||
122 | struct ceph_msg_header_old { | ||
123 | __le64 seq; /* message seq# for this session */ | ||
124 | __le64 tid; /* transaction id */ | ||
125 | __le16 type; /* message type */ | ||
126 | __le16 priority; /* priority. higher value == higher priority */ | ||
127 | __le16 version; /* version of message encoding */ | ||
128 | |||
129 | __le32 front_len; /* bytes in main payload */ | ||
130 | __le32 middle_len;/* bytes in middle payload */ | ||
131 | __le32 data_len; /* bytes of data payload */ | ||
132 | __le16 data_off; /* sender: include full offset; | ||
133 | receiver: mask against ~PAGE_MASK */ | ||
134 | |||
135 | struct ceph_entity_inst src, orig_src; | ||
136 | __le32 reserved; | ||
137 | __le32 crc; /* header crc32c */ | ||
138 | } __attribute__ ((packed)); | ||
139 | |||
140 | struct ceph_msg_header { | ||
141 | __le64 seq; /* message seq# for this session */ | ||
142 | __le64 tid; /* transaction id */ | ||
143 | __le16 type; /* message type */ | ||
144 | __le16 priority; /* priority. higher value == higher priority */ | ||
145 | __le16 version; /* version of message encoding */ | ||
146 | |||
147 | __le32 front_len; /* bytes in main payload */ | ||
148 | __le32 middle_len;/* bytes in middle payload */ | ||
149 | __le32 data_len; /* bytes of data payload */ | ||
150 | __le16 data_off; /* sender: include full offset; | ||
151 | receiver: mask against ~PAGE_MASK */ | ||
152 | |||
153 | struct ceph_entity_name src; | ||
154 | __le32 reserved; | ||
155 | __le32 crc; /* header crc32c */ | ||
156 | } __attribute__ ((packed)); | ||
157 | |||
158 | #define CEPH_MSG_PRIO_LOW 64 | ||
159 | #define CEPH_MSG_PRIO_DEFAULT 127 | ||
160 | #define CEPH_MSG_PRIO_HIGH 196 | ||
161 | #define CEPH_MSG_PRIO_HIGHEST 255 | ||
162 | |||
163 | /* | ||
164 | * follows data payload | ||
165 | */ | ||
166 | struct ceph_msg_footer { | ||
167 | __le32 front_crc, middle_crc, data_crc; | ||
168 | __u8 flags; | ||
169 | } __attribute__ ((packed)); | ||
170 | |||
171 | #define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */ | ||
172 | #define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */ | ||
173 | |||
174 | |||
175 | #endif | ||
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h new file mode 100644 index 000000000000..6c91fb032c39 --- /dev/null +++ b/include/linux/ceph/osd_client.h | |||
@@ -0,0 +1,234 @@ | |||
1 | #ifndef _FS_CEPH_OSD_CLIENT_H | ||
2 | #define _FS_CEPH_OSD_CLIENT_H | ||
3 | |||
4 | #include <linux/completion.h> | ||
5 | #include <linux/kref.h> | ||
6 | #include <linux/mempool.h> | ||
7 | #include <linux/rbtree.h> | ||
8 | |||
9 | #include "types.h" | ||
10 | #include "osdmap.h" | ||
11 | #include "messenger.h" | ||
12 | |||
13 | struct ceph_msg; | ||
14 | struct ceph_snap_context; | ||
15 | struct ceph_osd_request; | ||
16 | struct ceph_osd_client; | ||
17 | struct ceph_authorizer; | ||
18 | struct ceph_pagelist; | ||
19 | |||
20 | /* | ||
21 | * completion callback for async writepages | ||
22 | */ | ||
23 | typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *, | ||
24 | struct ceph_msg *); | ||
25 | |||
26 | /* a given osd we're communicating with */ | ||
27 | struct ceph_osd { | ||
28 | atomic_t o_ref; | ||
29 | struct ceph_osd_client *o_osdc; | ||
30 | int o_osd; | ||
31 | int o_incarnation; | ||
32 | struct rb_node o_node; | ||
33 | struct ceph_connection o_con; | ||
34 | struct list_head o_requests; | ||
35 | struct list_head o_osd_lru; | ||
36 | struct ceph_authorizer *o_authorizer; | ||
37 | void *o_authorizer_buf, *o_authorizer_reply_buf; | ||
38 | size_t o_authorizer_buf_len, o_authorizer_reply_buf_len; | ||
39 | unsigned long lru_ttl; | ||
40 | int o_marked_for_keepalive; | ||
41 | struct list_head o_keepalive_item; | ||
42 | }; | ||
43 | |||
44 | /* an in-flight request */ | ||
45 | struct ceph_osd_request { | ||
46 | u64 r_tid; /* unique for this client */ | ||
47 | struct rb_node r_node; | ||
48 | struct list_head r_req_lru_item; | ||
49 | struct list_head r_osd_item; | ||
50 | struct ceph_osd *r_osd; | ||
51 | struct ceph_pg r_pgid; | ||
52 | int r_pg_osds[CEPH_PG_MAX_SIZE]; | ||
53 | int r_num_pg_osds; | ||
54 | |||
55 | struct ceph_connection *r_con_filling_msg; | ||
56 | |||
57 | struct ceph_msg *r_request, *r_reply; | ||
58 | int r_result; | ||
59 | int r_flags; /* any additional flags for the osd */ | ||
60 | u32 r_sent; /* >0 if r_request is sending/sent */ | ||
61 | int r_got_reply; | ||
62 | |||
63 | struct ceph_osd_client *r_osdc; | ||
64 | struct kref r_kref; | ||
65 | bool r_mempool; | ||
66 | struct completion r_completion, r_safe_completion; | ||
67 | ceph_osdc_callback_t r_callback, r_safe_callback; | ||
68 | struct ceph_eversion r_reassert_version; | ||
69 | struct list_head r_unsafe_item; | ||
70 | |||
71 | struct inode *r_inode; /* for use by callbacks */ | ||
72 | void *r_priv; /* ditto */ | ||
73 | |||
74 | char r_oid[40]; /* object name */ | ||
75 | int r_oid_len; | ||
76 | unsigned long r_stamp; /* send OR check time */ | ||
77 | bool r_resend; /* msg send failed, needs retry */ | ||
78 | |||
79 | struct ceph_file_layout r_file_layout; | ||
80 | struct ceph_snap_context *r_snapc; /* snap context for writes */ | ||
81 | unsigned r_num_pages; /* size of page array (follows) */ | ||
82 | struct page **r_pages; /* pages for data payload */ | ||
83 | int r_pages_from_pool; | ||
84 | int r_own_pages; /* if true, i own page list */ | ||
85 | #ifdef CONFIG_BLOCK | ||
86 | struct bio *r_bio; /* instead of pages */ | ||
87 | #endif | ||
88 | |||
89 | struct ceph_pagelist *r_trail; /* trailing part of the data */ | ||
90 | }; | ||
91 | |||
92 | struct ceph_osd_client { | ||
93 | struct ceph_client *client; | ||
94 | |||
95 | struct ceph_osdmap *osdmap; /* current map */ | ||
96 | struct rw_semaphore map_sem; | ||
97 | struct completion map_waiters; | ||
98 | u64 last_requested_map; | ||
99 | |||
100 | struct mutex request_mutex; | ||
101 | struct rb_root osds; /* osds */ | ||
102 | struct list_head osd_lru; /* idle osds */ | ||
103 | u64 timeout_tid; /* tid of timeout triggering rq */ | ||
104 | u64 last_tid; /* tid of last request */ | ||
105 | struct rb_root requests; /* pending requests */ | ||
106 | struct list_head req_lru; /* pending requests lru */ | ||
107 | int num_requests; | ||
108 | struct delayed_work timeout_work; | ||
109 | struct delayed_work osds_timeout_work; | ||
110 | #ifdef CONFIG_DEBUG_FS | ||
111 | struct dentry *debugfs_file; | ||
112 | #endif | ||
113 | |||
114 | mempool_t *req_mempool; | ||
115 | |||
116 | struct ceph_msgpool msgpool_op; | ||
117 | struct ceph_msgpool msgpool_op_reply; | ||
118 | }; | ||
119 | |||
120 | struct ceph_osd_req_op { | ||
121 | u16 op; /* CEPH_OSD_OP_* */ | ||
122 | u32 flags; /* CEPH_OSD_FLAG_* */ | ||
123 | union { | ||
124 | struct { | ||
125 | u64 offset, length; | ||
126 | u64 truncate_size; | ||
127 | u32 truncate_seq; | ||
128 | } extent; | ||
129 | struct { | ||
130 | const char *name; | ||
131 | u32 name_len; | ||
132 | const char *val; | ||
133 | u32 value_len; | ||
134 | __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ | ||
135 | __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ | ||
136 | } xattr; | ||
137 | struct { | ||
138 | const char *class_name; | ||
139 | __u8 class_len; | ||
140 | const char *method_name; | ||
141 | __u8 method_len; | ||
142 | __u8 argc; | ||
143 | const char *indata; | ||
144 | u32 indata_len; | ||
145 | } cls; | ||
146 | struct { | ||
147 | u64 cookie, count; | ||
148 | } pgls; | ||
149 | struct { | ||
150 | u64 snapid; | ||
151 | } snap; | ||
152 | }; | ||
153 | u32 payload_len; | ||
154 | }; | ||
155 | |||
156 | extern int ceph_osdc_init(struct ceph_osd_client *osdc, | ||
157 | struct ceph_client *client); | ||
158 | extern void ceph_osdc_stop(struct ceph_osd_client *osdc); | ||
159 | |||
160 | extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc, | ||
161 | struct ceph_msg *msg); | ||
162 | extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, | ||
163 | struct ceph_msg *msg); | ||
164 | |||
165 | extern void ceph_calc_raw_layout(struct ceph_osd_client *osdc, | ||
166 | struct ceph_file_layout *layout, | ||
167 | u64 snapid, | ||
168 | u64 off, u64 *plen, u64 *bno, | ||
169 | struct ceph_osd_request *req, | ||
170 | struct ceph_osd_req_op *op); | ||
171 | |||
172 | extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, | ||
173 | int flags, | ||
174 | struct ceph_snap_context *snapc, | ||
175 | struct ceph_osd_req_op *ops, | ||
176 | bool use_mempool, | ||
177 | gfp_t gfp_flags, | ||
178 | struct page **pages, | ||
179 | struct bio *bio); | ||
180 | |||
181 | extern void ceph_osdc_build_request(struct ceph_osd_request *req, | ||
182 | u64 off, u64 *plen, | ||
183 | struct ceph_osd_req_op *src_ops, | ||
184 | struct ceph_snap_context *snapc, | ||
185 | struct timespec *mtime, | ||
186 | const char *oid, | ||
187 | int oid_len); | ||
188 | |||
189 | extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, | ||
190 | struct ceph_file_layout *layout, | ||
191 | struct ceph_vino vino, | ||
192 | u64 offset, u64 *len, int op, int flags, | ||
193 | struct ceph_snap_context *snapc, | ||
194 | int do_sync, u32 truncate_seq, | ||
195 | u64 truncate_size, | ||
196 | struct timespec *mtime, | ||
197 | bool use_mempool, int num_reply); | ||
198 | |||
199 | static inline void ceph_osdc_get_request(struct ceph_osd_request *req) | ||
200 | { | ||
201 | kref_get(&req->r_kref); | ||
202 | } | ||
203 | extern void ceph_osdc_release_request(struct kref *kref); | ||
204 | static inline void ceph_osdc_put_request(struct ceph_osd_request *req) | ||
205 | { | ||
206 | kref_put(&req->r_kref, ceph_osdc_release_request); | ||
207 | } | ||
208 | |||
209 | extern int ceph_osdc_start_request(struct ceph_osd_client *osdc, | ||
210 | struct ceph_osd_request *req, | ||
211 | bool nofail); | ||
212 | extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc, | ||
213 | struct ceph_osd_request *req); | ||
214 | extern void ceph_osdc_sync(struct ceph_osd_client *osdc); | ||
215 | |||
216 | extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, | ||
217 | struct ceph_vino vino, | ||
218 | struct ceph_file_layout *layout, | ||
219 | u64 off, u64 *plen, | ||
220 | u32 truncate_seq, u64 truncate_size, | ||
221 | struct page **pages, int nr_pages); | ||
222 | |||
223 | extern int ceph_osdc_writepages(struct ceph_osd_client *osdc, | ||
224 | struct ceph_vino vino, | ||
225 | struct ceph_file_layout *layout, | ||
226 | struct ceph_snap_context *sc, | ||
227 | u64 off, u64 len, | ||
228 | u32 truncate_seq, u64 truncate_size, | ||
229 | struct timespec *mtime, | ||
230 | struct page **pages, int nr_pages, | ||
231 | int flags, int do_sync, bool nofail); | ||
232 | |||
233 | #endif | ||
234 | |||
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h new file mode 100644 index 000000000000..ba4c205cbb01 --- /dev/null +++ b/include/linux/ceph/osdmap.h | |||
@@ -0,0 +1,130 @@ | |||
1 | #ifndef _FS_CEPH_OSDMAP_H | ||
2 | #define _FS_CEPH_OSDMAP_H | ||
3 | |||
4 | #include <linux/rbtree.h> | ||
5 | #include "types.h" | ||
6 | #include "ceph_fs.h" | ||
7 | #include <linux/crush/crush.h> | ||
8 | |||
9 | /* | ||
10 | * The osd map describes the current membership of the osd cluster and | ||
11 | * specifies the mapping of objects to placement groups and placement | ||
12 | * groups to (sets of) osds. That is, it completely specifies the | ||
13 | * (desired) distribution of all data objects in the system at some | ||
14 | * point in time. | ||
15 | * | ||
16 | * Each map version is identified by an epoch, which increases monotonically. | ||
17 | * | ||
18 | * The map can be updated either via an incremental map (diff) describing | ||
19 | * the change between two successive epochs, or as a fully encoded map. | ||
20 | */ | ||
21 | struct ceph_pg_pool_info { | ||
22 | struct rb_node node; | ||
23 | int id; | ||
24 | struct ceph_pg_pool v; | ||
25 | int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask; | ||
26 | char *name; | ||
27 | }; | ||
28 | |||
29 | struct ceph_pg_mapping { | ||
30 | struct rb_node node; | ||
31 | struct ceph_pg pgid; | ||
32 | int len; | ||
33 | int osds[]; | ||
34 | }; | ||
35 | |||
36 | struct ceph_osdmap { | ||
37 | struct ceph_fsid fsid; | ||
38 | u32 epoch; | ||
39 | u32 mkfs_epoch; | ||
40 | struct ceph_timespec created, modified; | ||
41 | |||
42 | u32 flags; /* CEPH_OSDMAP_* */ | ||
43 | |||
44 | u32 max_osd; /* size of osd_state, _offload, _addr arrays */ | ||
45 | u8 *osd_state; /* CEPH_OSD_* */ | ||
46 | u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */ | ||
47 | struct ceph_entity_addr *osd_addr; | ||
48 | |||
49 | struct rb_root pg_temp; | ||
50 | struct rb_root pg_pools; | ||
51 | u32 pool_max; | ||
52 | |||
53 | /* the CRUSH map specifies the mapping of placement groups to | ||
54 | * the list of osds that store+replicate them. */ | ||
55 | struct crush_map *crush; | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * file layout helpers | ||
60 | */ | ||
61 | #define ceph_file_layout_su(l) ((__s32)le32_to_cpu((l).fl_stripe_unit)) | ||
62 | #define ceph_file_layout_stripe_count(l) \ | ||
63 | ((__s32)le32_to_cpu((l).fl_stripe_count)) | ||
64 | #define ceph_file_layout_object_size(l) ((__s32)le32_to_cpu((l).fl_object_size)) | ||
65 | #define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash)) | ||
66 | #define ceph_file_layout_object_su(l) \ | ||
67 | ((__s32)le32_to_cpu((l).fl_object_stripe_unit)) | ||
68 | #define ceph_file_layout_pg_preferred(l) \ | ||
69 | ((__s32)le32_to_cpu((l).fl_pg_preferred)) | ||
70 | #define ceph_file_layout_pg_pool(l) \ | ||
71 | ((__s32)le32_to_cpu((l).fl_pg_pool)) | ||
72 | |||
73 | static inline unsigned ceph_file_layout_stripe_width(struct ceph_file_layout *l) | ||
74 | { | ||
75 | return le32_to_cpu(l->fl_stripe_unit) * | ||
76 | le32_to_cpu(l->fl_stripe_count); | ||
77 | } | ||
78 | |||
79 | /* "period" == bytes before i start on a new set of objects */ | ||
80 | static inline unsigned ceph_file_layout_period(struct ceph_file_layout *l) | ||
81 | { | ||
82 | return le32_to_cpu(l->fl_object_size) * | ||
83 | le32_to_cpu(l->fl_stripe_count); | ||
84 | } | ||
85 | |||
86 | |||
87 | static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd) | ||
88 | { | ||
89 | return (osd < map->max_osd) && (map->osd_state[osd] & CEPH_OSD_UP); | ||
90 | } | ||
91 | |||
92 | static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag) | ||
93 | { | ||
94 | return map && (map->flags & flag); | ||
95 | } | ||
96 | |||
97 | extern char *ceph_osdmap_state_str(char *str, int len, int state); | ||
98 | |||
99 | static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map, | ||
100 | int osd) | ||
101 | { | ||
102 | if (osd >= map->max_osd) | ||
103 | return NULL; | ||
104 | return &map->osd_addr[osd]; | ||
105 | } | ||
106 | |||
107 | extern struct ceph_osdmap *osdmap_decode(void **p, void *end); | ||
108 | extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | ||
109 | struct ceph_osdmap *map, | ||
110 | struct ceph_messenger *msgr); | ||
111 | extern void ceph_osdmap_destroy(struct ceph_osdmap *map); | ||
112 | |||
113 | /* calculate mapping of a file extent to an object */ | ||
114 | extern void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, | ||
115 | u64 off, u64 *plen, | ||
116 | u64 *bno, u64 *oxoff, u64 *oxlen); | ||
117 | |||
118 | /* calculate mapping of object to a placement group */ | ||
119 | extern int ceph_calc_object_layout(struct ceph_object_layout *ol, | ||
120 | const char *oid, | ||
121 | struct ceph_file_layout *fl, | ||
122 | struct ceph_osdmap *osdmap); | ||
123 | extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid, | ||
124 | int *acting); | ||
125 | extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, | ||
126 | struct ceph_pg pgid); | ||
127 | |||
128 | extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name); | ||
129 | |||
130 | #endif | ||
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h new file mode 100644 index 000000000000..9660d6b0a35d --- /dev/null +++ b/include/linux/ceph/pagelist.h | |||
@@ -0,0 +1,75 @@ | |||
1 | #ifndef __FS_CEPH_PAGELIST_H | ||
2 | #define __FS_CEPH_PAGELIST_H | ||
3 | |||
4 | #include <linux/list.h> | ||
5 | |||
6 | struct ceph_pagelist { | ||
7 | struct list_head head; | ||
8 | void *mapped_tail; | ||
9 | size_t length; | ||
10 | size_t room; | ||
11 | struct list_head free_list; | ||
12 | size_t num_pages_free; | ||
13 | }; | ||
14 | |||
15 | struct ceph_pagelist_cursor { | ||
16 | struct ceph_pagelist *pl; /* pagelist, for error checking */ | ||
17 | struct list_head *page_lru; /* page in list */ | ||
18 | size_t room; /* room remaining to reset to */ | ||
19 | }; | ||
20 | |||
21 | static inline void ceph_pagelist_init(struct ceph_pagelist *pl) | ||
22 | { | ||
23 | INIT_LIST_HEAD(&pl->head); | ||
24 | pl->mapped_tail = NULL; | ||
25 | pl->length = 0; | ||
26 | pl->room = 0; | ||
27 | INIT_LIST_HEAD(&pl->free_list); | ||
28 | pl->num_pages_free = 0; | ||
29 | } | ||
30 | |||
31 | extern int ceph_pagelist_release(struct ceph_pagelist *pl); | ||
32 | |||
33 | extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l); | ||
34 | |||
35 | extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space); | ||
36 | |||
37 | extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl); | ||
38 | |||
39 | extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, | ||
40 | struct ceph_pagelist_cursor *c); | ||
41 | |||
42 | extern int ceph_pagelist_truncate(struct ceph_pagelist *pl, | ||
43 | struct ceph_pagelist_cursor *c); | ||
44 | |||
45 | static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v) | ||
46 | { | ||
47 | __le64 ev = cpu_to_le64(v); | ||
48 | return ceph_pagelist_append(pl, &ev, sizeof(ev)); | ||
49 | } | ||
50 | static inline int ceph_pagelist_encode_32(struct ceph_pagelist *pl, u32 v) | ||
51 | { | ||
52 | __le32 ev = cpu_to_le32(v); | ||
53 | return ceph_pagelist_append(pl, &ev, sizeof(ev)); | ||
54 | } | ||
55 | static inline int ceph_pagelist_encode_16(struct ceph_pagelist *pl, u16 v) | ||
56 | { | ||
57 | __le16 ev = cpu_to_le16(v); | ||
58 | return ceph_pagelist_append(pl, &ev, sizeof(ev)); | ||
59 | } | ||
60 | static inline int ceph_pagelist_encode_8(struct ceph_pagelist *pl, u8 v) | ||
61 | { | ||
62 | return ceph_pagelist_append(pl, &v, 1); | ||
63 | } | ||
64 | static inline int ceph_pagelist_encode_string(struct ceph_pagelist *pl, | ||
65 | char *s, size_t len) | ||
66 | { | ||
67 | int ret = ceph_pagelist_encode_32(pl, len); | ||
68 | if (ret) | ||
69 | return ret; | ||
70 | if (len) | ||
71 | return ceph_pagelist_append(pl, s, len); | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | #endif | ||
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h new file mode 100644 index 000000000000..6d5247f2e81b --- /dev/null +++ b/include/linux/ceph/rados.h | |||
@@ -0,0 +1,405 @@ | |||
1 | #ifndef CEPH_RADOS_H | ||
2 | #define CEPH_RADOS_H | ||
3 | |||
4 | /* | ||
5 | * Data types for the Ceph distributed object storage layer RADOS | ||
6 | * (Reliable Autonomic Distributed Object Store). | ||
7 | */ | ||
8 | |||
9 | #include "msgr.h" | ||
10 | |||
11 | /* | ||
12 | * osdmap encoding versions | ||
13 | */ | ||
14 | #define CEPH_OSDMAP_INC_VERSION 5 | ||
15 | #define CEPH_OSDMAP_INC_VERSION_EXT 5 | ||
16 | #define CEPH_OSDMAP_VERSION 5 | ||
17 | #define CEPH_OSDMAP_VERSION_EXT 5 | ||
18 | |||
19 | /* | ||
20 | * fs id | ||
21 | */ | ||
22 | struct ceph_fsid { | ||
23 | unsigned char fsid[16]; | ||
24 | }; | ||
25 | |||
26 | static inline int ceph_fsid_compare(const struct ceph_fsid *a, | ||
27 | const struct ceph_fsid *b) | ||
28 | { | ||
29 | return memcmp(a, b, sizeof(*a)); | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * ino, object, etc. | ||
34 | */ | ||
35 | typedef __le64 ceph_snapid_t; | ||
36 | #define CEPH_SNAPDIR ((__u64)(-1)) /* reserved for hidden .snap dir */ | ||
37 | #define CEPH_NOSNAP ((__u64)(-2)) /* "head", "live" revision */ | ||
38 | #define CEPH_MAXSNAP ((__u64)(-3)) /* largest valid snapid */ | ||
39 | |||
40 | struct ceph_timespec { | ||
41 | __le32 tv_sec; | ||
42 | __le32 tv_nsec; | ||
43 | } __attribute__ ((packed)); | ||
44 | |||
45 | |||
46 | /* | ||
47 | * object layout - how objects are mapped into PGs | ||
48 | */ | ||
49 | #define CEPH_OBJECT_LAYOUT_HASH 1 | ||
50 | #define CEPH_OBJECT_LAYOUT_LINEAR 2 | ||
51 | #define CEPH_OBJECT_LAYOUT_HASHINO 3 | ||
52 | |||
53 | /* | ||
54 | * pg layout -- how PGs are mapped onto (sets of) OSDs | ||
55 | */ | ||
56 | #define CEPH_PG_LAYOUT_CRUSH 0 | ||
57 | #define CEPH_PG_LAYOUT_HASH 1 | ||
58 | #define CEPH_PG_LAYOUT_LINEAR 2 | ||
59 | #define CEPH_PG_LAYOUT_HYBRID 3 | ||
60 | |||
61 | #define CEPH_PG_MAX_SIZE 16 /* max # osds in a single pg */ | ||
62 | |||
63 | /* | ||
64 | * placement group. | ||
65 | * we encode this into one __le64. | ||
66 | */ | ||
67 | struct ceph_pg { | ||
68 | __le16 preferred; /* preferred primary osd */ | ||
69 | __le16 ps; /* placement seed */ | ||
70 | __le32 pool; /* object pool */ | ||
71 | } __attribute__ ((packed)); | ||
72 | |||
73 | /* | ||
74 | * pg_pool is a set of pgs storing a pool of objects | ||
75 | * | ||
76 | * pg_num -- base number of pseudorandomly placed pgs | ||
77 | * | ||
78 | * pgp_num -- effective number when calculating pg placement. this | ||
79 | * is used for pg_num increases. new pgs result in data being "split" | ||
80 | * into new pgs. for this to proceed smoothly, new pgs are intiially | ||
81 | * colocated with their parents; that is, pgp_num doesn't increase | ||
82 | * until the new pgs have successfully split. only _then_ are the new | ||
83 | * pgs placed independently. | ||
84 | * | ||
85 | * lpg_num -- localized pg count (per device). replicas are randomly | ||
86 | * selected. | ||
87 | * | ||
88 | * lpgp_num -- as above. | ||
89 | */ | ||
90 | #define CEPH_PG_TYPE_REP 1 | ||
91 | #define CEPH_PG_TYPE_RAID4 2 | ||
92 | #define CEPH_PG_POOL_VERSION 2 | ||
93 | struct ceph_pg_pool { | ||
94 | __u8 type; /* CEPH_PG_TYPE_* */ | ||
95 | __u8 size; /* number of osds in each pg */ | ||
96 | __u8 crush_ruleset; /* crush placement rule */ | ||
97 | __u8 object_hash; /* hash mapping object name to ps */ | ||
98 | __le32 pg_num, pgp_num; /* number of pg's */ | ||
99 | __le32 lpg_num, lpgp_num; /* number of localized pg's */ | ||
100 | __le32 last_change; /* most recent epoch changed */ | ||
101 | __le64 snap_seq; /* seq for per-pool snapshot */ | ||
102 | __le32 snap_epoch; /* epoch of last snap */ | ||
103 | __le32 num_snaps; | ||
104 | __le32 num_removed_snap_intervals; /* if non-empty, NO per-pool snaps */ | ||
105 | __le64 auid; /* who owns the pg */ | ||
106 | } __attribute__ ((packed)); | ||
107 | |||
108 | /* | ||
109 | * stable_mod func is used to control number of placement groups. | ||
110 | * similar to straight-up modulo, but produces a stable mapping as b | ||
111 | * increases over time. b is the number of bins, and bmask is the | ||
112 | * containing power of 2 minus 1. | ||
113 | * | ||
114 | * b <= bmask and bmask=(2**n)-1 | ||
115 | * e.g., b=12 -> bmask=15, b=123 -> bmask=127 | ||
116 | */ | ||
117 | static inline int ceph_stable_mod(int x, int b, int bmask) | ||
118 | { | ||
119 | if ((x & bmask) < b) | ||
120 | return x & bmask; | ||
121 | else | ||
122 | return x & (bmask >> 1); | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * object layout - how a given object should be stored. | ||
127 | */ | ||
128 | struct ceph_object_layout { | ||
129 | struct ceph_pg ol_pgid; /* raw pg, with _full_ ps precision. */ | ||
130 | __le32 ol_stripe_unit; /* for per-object parity, if any */ | ||
131 | } __attribute__ ((packed)); | ||
132 | |||
133 | /* | ||
134 | * compound epoch+version, used by storage layer to serialize mutations | ||
135 | */ | ||
136 | struct ceph_eversion { | ||
137 | __le32 epoch; | ||
138 | __le64 version; | ||
139 | } __attribute__ ((packed)); | ||
140 | |||
141 | /* | ||
142 | * osd map bits | ||
143 | */ | ||
144 | |||
145 | /* status bits */ | ||
146 | #define CEPH_OSD_EXISTS 1 | ||
147 | #define CEPH_OSD_UP 2 | ||
148 | |||
149 | /* osd weights. fixed point value: 0x10000 == 1.0 ("in"), 0 == "out" */ | ||
150 | #define CEPH_OSD_IN 0x10000 | ||
151 | #define CEPH_OSD_OUT 0 | ||
152 | |||
153 | |||
154 | /* | ||
155 | * osd map flag bits | ||
156 | */ | ||
157 | #define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */ | ||
158 | #define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */ | ||
159 | #define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */ | ||
160 | #define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */ | ||
161 | #define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */ | ||
162 | |||
163 | /* | ||
164 | * osd ops | ||
165 | */ | ||
166 | #define CEPH_OSD_OP_MODE 0xf000 | ||
167 | #define CEPH_OSD_OP_MODE_RD 0x1000 | ||
168 | #define CEPH_OSD_OP_MODE_WR 0x2000 | ||
169 | #define CEPH_OSD_OP_MODE_RMW 0x3000 | ||
170 | #define CEPH_OSD_OP_MODE_SUB 0x4000 | ||
171 | |||
172 | #define CEPH_OSD_OP_TYPE 0x0f00 | ||
173 | #define CEPH_OSD_OP_TYPE_LOCK 0x0100 | ||
174 | #define CEPH_OSD_OP_TYPE_DATA 0x0200 | ||
175 | #define CEPH_OSD_OP_TYPE_ATTR 0x0300 | ||
176 | #define CEPH_OSD_OP_TYPE_EXEC 0x0400 | ||
177 | #define CEPH_OSD_OP_TYPE_PG 0x0500 | ||
178 | |||
179 | enum { | ||
180 | /** data **/ | ||
181 | /* read */ | ||
182 | CEPH_OSD_OP_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 1, | ||
183 | CEPH_OSD_OP_STAT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 2, | ||
184 | |||
185 | /* fancy read */ | ||
186 | CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4, | ||
187 | |||
188 | /* write */ | ||
189 | CEPH_OSD_OP_WRITE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 1, | ||
190 | CEPH_OSD_OP_WRITEFULL = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 2, | ||
191 | CEPH_OSD_OP_TRUNCATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 3, | ||
192 | CEPH_OSD_OP_ZERO = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 4, | ||
193 | CEPH_OSD_OP_DELETE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 5, | ||
194 | |||
195 | /* fancy write */ | ||
196 | CEPH_OSD_OP_APPEND = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 6, | ||
197 | CEPH_OSD_OP_STARTSYNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 7, | ||
198 | CEPH_OSD_OP_SETTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 8, | ||
199 | CEPH_OSD_OP_TRIMTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 9, | ||
200 | |||
201 | CEPH_OSD_OP_TMAPUP = CEPH_OSD_OP_MODE_RMW | CEPH_OSD_OP_TYPE_DATA | 10, | ||
202 | CEPH_OSD_OP_TMAPPUT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 11, | ||
203 | CEPH_OSD_OP_TMAPGET = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 12, | ||
204 | |||
205 | CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13, | ||
206 | CEPH_OSD_OP_ROLLBACK= CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 14, | ||
207 | |||
208 | /** attrs **/ | ||
209 | /* read */ | ||
210 | CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1, | ||
211 | CEPH_OSD_OP_GETXATTRS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 2, | ||
212 | CEPH_OSD_OP_CMPXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 3, | ||
213 | |||
214 | /* write */ | ||
215 | CEPH_OSD_OP_SETXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 1, | ||
216 | CEPH_OSD_OP_SETXATTRS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 2, | ||
217 | CEPH_OSD_OP_RESETXATTRS = CEPH_OSD_OP_MODE_WR|CEPH_OSD_OP_TYPE_ATTR | 3, | ||
218 | CEPH_OSD_OP_RMXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 4, | ||
219 | |||
220 | /** subop **/ | ||
221 | CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1, | ||
222 | CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2, | ||
223 | CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3, | ||
224 | CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4, | ||
225 | CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5, | ||
226 | |||
227 | /** lock **/ | ||
228 | CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1, | ||
229 | CEPH_OSD_OP_WRUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 2, | ||
230 | CEPH_OSD_OP_RDLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 3, | ||
231 | CEPH_OSD_OP_RDUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 4, | ||
232 | CEPH_OSD_OP_UPLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 5, | ||
233 | CEPH_OSD_OP_DNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 6, | ||
234 | |||
235 | /** exec **/ | ||
236 | CEPH_OSD_OP_CALL = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_EXEC | 1, | ||
237 | |||
238 | /** pg **/ | ||
239 | CEPH_OSD_OP_PGLS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 1, | ||
240 | }; | ||
241 | |||
242 | static inline int ceph_osd_op_type_lock(int op) | ||
243 | { | ||
244 | return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_LOCK; | ||
245 | } | ||
246 | static inline int ceph_osd_op_type_data(int op) | ||
247 | { | ||
248 | return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_DATA; | ||
249 | } | ||
250 | static inline int ceph_osd_op_type_attr(int op) | ||
251 | { | ||
252 | return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_ATTR; | ||
253 | } | ||
254 | static inline int ceph_osd_op_type_exec(int op) | ||
255 | { | ||
256 | return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_EXEC; | ||
257 | } | ||
258 | static inline int ceph_osd_op_type_pg(int op) | ||
259 | { | ||
260 | return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_PG; | ||
261 | } | ||
262 | |||
263 | static inline int ceph_osd_op_mode_subop(int op) | ||
264 | { | ||
265 | return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_SUB; | ||
266 | } | ||
267 | static inline int ceph_osd_op_mode_read(int op) | ||
268 | { | ||
269 | return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_RD; | ||
270 | } | ||
271 | static inline int ceph_osd_op_mode_modify(int op) | ||
272 | { | ||
273 | return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_WR; | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | * note that the following tmap stuff is also defined in the ceph librados.h | ||
278 | * any modification here needs to be updated there | ||
279 | */ | ||
280 | #define CEPH_OSD_TMAP_HDR 'h' | ||
281 | #define CEPH_OSD_TMAP_SET 's' | ||
282 | #define CEPH_OSD_TMAP_RM 'r' | ||
283 | |||
284 | extern const char *ceph_osd_op_name(int op); | ||
285 | |||
286 | |||
287 | /* | ||
288 | * osd op flags | ||
289 | * | ||
290 | * An op may be READ, WRITE, or READ|WRITE. | ||
291 | */ | ||
292 | enum { | ||
293 | CEPH_OSD_FLAG_ACK = 1, /* want (or is) "ack" ack */ | ||
294 | CEPH_OSD_FLAG_ONNVRAM = 2, /* want (or is) "onnvram" ack */ | ||
295 | CEPH_OSD_FLAG_ONDISK = 4, /* want (or is) "ondisk" ack */ | ||
296 | CEPH_OSD_FLAG_RETRY = 8, /* resend attempt */ | ||
297 | CEPH_OSD_FLAG_READ = 16, /* op may read */ | ||
298 | CEPH_OSD_FLAG_WRITE = 32, /* op may write */ | ||
299 | CEPH_OSD_FLAG_ORDERSNAP = 64, /* EOLDSNAP if snapc is out of order */ | ||
300 | CEPH_OSD_FLAG_PEERSTAT = 128, /* msg includes osd_peer_stat */ | ||
301 | CEPH_OSD_FLAG_BALANCE_READS = 256, | ||
302 | CEPH_OSD_FLAG_PARALLELEXEC = 512, /* execute op in parallel */ | ||
303 | CEPH_OSD_FLAG_PGOP = 1024, /* pg op, no object */ | ||
304 | CEPH_OSD_FLAG_EXEC = 2048, /* op may exec */ | ||
305 | CEPH_OSD_FLAG_EXEC_PUBLIC = 4096, /* op may exec (public) */ | ||
306 | }; | ||
307 | |||
308 | enum { | ||
309 | CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */ | ||
310 | }; | ||
311 | |||
312 | #define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/ | ||
313 | #define EBLACKLISTED ESHUTDOWN /* blacklisted */ | ||
314 | |||
315 | /* xattr comparison */ | ||
316 | enum { | ||
317 | CEPH_OSD_CMPXATTR_OP_NOP = 0, | ||
318 | CEPH_OSD_CMPXATTR_OP_EQ = 1, | ||
319 | CEPH_OSD_CMPXATTR_OP_NE = 2, | ||
320 | CEPH_OSD_CMPXATTR_OP_GT = 3, | ||
321 | CEPH_OSD_CMPXATTR_OP_GTE = 4, | ||
322 | CEPH_OSD_CMPXATTR_OP_LT = 5, | ||
323 | CEPH_OSD_CMPXATTR_OP_LTE = 6 | ||
324 | }; | ||
325 | |||
326 | enum { | ||
327 | CEPH_OSD_CMPXATTR_MODE_STRING = 1, | ||
328 | CEPH_OSD_CMPXATTR_MODE_U64 = 2 | ||
329 | }; | ||
330 | |||
331 | /* | ||
332 | * an individual object operation. each may be accompanied by some data | ||
333 | * payload | ||
334 | */ | ||
335 | struct ceph_osd_op { | ||
336 | __le16 op; /* CEPH_OSD_OP_* */ | ||
337 | __le32 flags; /* CEPH_OSD_FLAG_* */ | ||
338 | union { | ||
339 | struct { | ||
340 | __le64 offset, length; | ||
341 | __le64 truncate_size; | ||
342 | __le32 truncate_seq; | ||
343 | } __attribute__ ((packed)) extent; | ||
344 | struct { | ||
345 | __le32 name_len; | ||
346 | __le32 value_len; | ||
347 | __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ | ||
348 | __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ | ||
349 | } __attribute__ ((packed)) xattr; | ||
350 | struct { | ||
351 | __u8 class_len; | ||
352 | __u8 method_len; | ||
353 | __u8 argc; | ||
354 | __le32 indata_len; | ||
355 | } __attribute__ ((packed)) cls; | ||
356 | struct { | ||
357 | __le64 cookie, count; | ||
358 | } __attribute__ ((packed)) pgls; | ||
359 | struct { | ||
360 | __le64 snapid; | ||
361 | } __attribute__ ((packed)) snap; | ||
362 | }; | ||
363 | __le32 payload_len; | ||
364 | } __attribute__ ((packed)); | ||
365 | |||
366 | /* | ||
367 | * osd request message header. each request may include multiple | ||
368 | * ceph_osd_op object operations. | ||
369 | */ | ||
370 | struct ceph_osd_request_head { | ||
371 | __le32 client_inc; /* client incarnation */ | ||
372 | struct ceph_object_layout layout; /* pgid */ | ||
373 | __le32 osdmap_epoch; /* client's osdmap epoch */ | ||
374 | |||
375 | __le32 flags; | ||
376 | |||
377 | struct ceph_timespec mtime; /* for mutations only */ | ||
378 | struct ceph_eversion reassert_version; /* if we are replaying op */ | ||
379 | |||
380 | __le32 object_len; /* length of object name */ | ||
381 | |||
382 | __le64 snapid; /* snapid to read */ | ||
383 | __le64 snap_seq; /* writer's snap context */ | ||
384 | __le32 num_snaps; | ||
385 | |||
386 | __le16 num_ops; | ||
387 | struct ceph_osd_op ops[]; /* followed by ops[], obj, ticket, snaps */ | ||
388 | } __attribute__ ((packed)); | ||
389 | |||
390 | struct ceph_osd_reply_head { | ||
391 | __le32 client_inc; /* client incarnation */ | ||
392 | __le32 flags; | ||
393 | struct ceph_object_layout layout; | ||
394 | __le32 osdmap_epoch; | ||
395 | struct ceph_eversion reassert_version; /* for replaying uncommitted */ | ||
396 | |||
397 | __le32 result; /* result code */ | ||
398 | |||
399 | __le32 object_len; /* length of object name */ | ||
400 | __le32 num_ops; | ||
401 | struct ceph_osd_op ops[0]; /* ops[], object */ | ||
402 | } __attribute__ ((packed)); | ||
403 | |||
404 | |||
405 | #endif | ||
diff --git a/include/linux/ceph/types.h b/include/linux/ceph/types.h new file mode 100644 index 000000000000..28b35a005ec2 --- /dev/null +++ b/include/linux/ceph/types.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef _FS_CEPH_TYPES_H | ||
2 | #define _FS_CEPH_TYPES_H | ||
3 | |||
4 | /* needed before including ceph_fs.h */ | ||
5 | #include <linux/in.h> | ||
6 | #include <linux/types.h> | ||
7 | #include <linux/fcntl.h> | ||
8 | #include <linux/string.h> | ||
9 | |||
10 | #include "ceph_fs.h" | ||
11 | #include "ceph_frag.h" | ||
12 | #include "ceph_hash.h" | ||
13 | |||
14 | /* | ||
15 | * Identify inodes by both their ino AND snapshot id (a u64). | ||
16 | */ | ||
17 | struct ceph_vino { | ||
18 | u64 ino; | ||
19 | u64 snap; | ||
20 | }; | ||
21 | |||
22 | |||
23 | /* context for the caps reservation mechanism */ | ||
24 | struct ceph_cap_reservation { | ||
25 | int count; | ||
26 | }; | ||
27 | |||
28 | |||
29 | #endif | ||
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 0c991023ee47..709dfb901d11 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -75,7 +75,7 @@ struct cgroup_subsys_state { | |||
75 | 75 | ||
76 | unsigned long flags; | 76 | unsigned long flags; |
77 | /* ID for this css, if possible */ | 77 | /* ID for this css, if possible */ |
78 | struct css_id *id; | 78 | struct css_id __rcu *id; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | /* bits in struct cgroup_subsys_state flags field */ | 81 | /* bits in struct cgroup_subsys_state flags field */ |
@@ -205,7 +205,7 @@ struct cgroup { | |||
205 | struct list_head children; /* my children */ | 205 | struct list_head children; /* my children */ |
206 | 206 | ||
207 | struct cgroup *parent; /* my parent */ | 207 | struct cgroup *parent; /* my parent */ |
208 | struct dentry *dentry; /* cgroup fs entry, RCU protected */ | 208 | struct dentry __rcu *dentry; /* cgroup fs entry, RCU protected */ |
209 | 209 | ||
210 | /* Private pointers for each registered subsystem */ | 210 | /* Private pointers for each registered subsystem */ |
211 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; | 211 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; |
diff --git a/include/linux/coda_fs_i.h b/include/linux/coda_fs_i.h index b3ef0c461578..e35071b1de0e 100644 --- a/include/linux/coda_fs_i.h +++ b/include/linux/coda_fs_i.h | |||
@@ -10,19 +10,24 @@ | |||
10 | 10 | ||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/list.h> | 12 | #include <linux/list.h> |
13 | #include <linux/spinlock.h> | ||
13 | #include <linux/coda.h> | 14 | #include <linux/coda.h> |
14 | 15 | ||
15 | /* | 16 | /* |
16 | * coda fs inode data | 17 | * coda fs inode data |
18 | * c_lock protects accesses to c_flags, c_mapcount, c_cached_epoch, c_uid and | ||
19 | * c_cached_perm. | ||
20 | * vfs_inode is set only when the inode is created and never changes. | ||
21 | * c_fid is set when the inode is created and should be considered immutable. | ||
17 | */ | 22 | */ |
18 | struct coda_inode_info { | 23 | struct coda_inode_info { |
19 | struct CodaFid c_fid; /* Coda identifier */ | 24 | struct CodaFid c_fid; /* Coda identifier */ |
20 | u_short c_flags; /* flags (see below) */ | 25 | u_short c_flags; /* flags (see below) */ |
21 | struct list_head c_cilist; /* list of all coda inodes */ | ||
22 | unsigned int c_mapcount; /* nr of times this inode is mapped */ | 26 | unsigned int c_mapcount; /* nr of times this inode is mapped */ |
23 | unsigned int c_cached_epoch; /* epoch for cached permissions */ | 27 | unsigned int c_cached_epoch; /* epoch for cached permissions */ |
24 | vuid_t c_uid; /* fsuid for cached permissions */ | 28 | vuid_t c_uid; /* fsuid for cached permissions */ |
25 | unsigned int c_cached_perm; /* cached access permissions */ | 29 | unsigned int c_cached_perm; /* cached access permissions */ |
30 | spinlock_t c_lock; | ||
26 | struct inode vfs_inode; | 31 | struct inode vfs_inode; |
27 | }; | 32 | }; |
28 | 33 | ||
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h index dcc228aa335a..2e914d0771b9 100644 --- a/include/linux/coda_linux.h +++ b/include/linux/coda_linux.h | |||
@@ -89,7 +89,11 @@ static __inline__ char *coda_i2s(struct inode *inode) | |||
89 | /* this will not zap the inode away */ | 89 | /* this will not zap the inode away */ |
90 | static __inline__ void coda_flag_inode(struct inode *inode, int flag) | 90 | static __inline__ void coda_flag_inode(struct inode *inode, int flag) |
91 | { | 91 | { |
92 | ITOC(inode)->c_flags |= flag; | 92 | struct coda_inode_info *cii = ITOC(inode); |
93 | |||
94 | spin_lock(&cii->c_lock); | ||
95 | cii->c_flags |= flag; | ||
96 | spin_unlock(&cii->c_lock); | ||
93 | } | 97 | } |
94 | 98 | ||
95 | #endif | 99 | #endif |
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h index 284b520934a0..72f2d2f0af91 100644 --- a/include/linux/coda_psdev.h +++ b/include/linux/coda_psdev.h | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #ifdef __KERNEL__ | 9 | #ifdef __KERNEL__ |
10 | #include <linux/backing-dev.h> | 10 | #include <linux/backing-dev.h> |
11 | #include <linux/mutex.h> | ||
11 | 12 | ||
12 | struct kstatfs; | 13 | struct kstatfs; |
13 | 14 | ||
@@ -20,6 +21,7 @@ struct venus_comm { | |||
20 | int vc_inuse; | 21 | int vc_inuse; |
21 | struct super_block *vc_sb; | 22 | struct super_block *vc_sb; |
22 | struct backing_dev_info bdi; | 23 | struct backing_dev_info bdi; |
24 | struct mutex vc_mutex; | ||
23 | }; | 25 | }; |
24 | 26 | ||
25 | 27 | ||
@@ -63,7 +65,7 @@ int venus_symlink(struct super_block *sb, struct CodaFid *fid, | |||
63 | int venus_access(struct super_block *sb, struct CodaFid *fid, int mask); | 65 | int venus_access(struct super_block *sb, struct CodaFid *fid, int mask); |
64 | int venus_pioctl(struct super_block *sb, struct CodaFid *fid, | 66 | int venus_pioctl(struct super_block *sb, struct CodaFid *fid, |
65 | unsigned int cmd, struct PioctlData *data); | 67 | unsigned int cmd, struct PioctlData *data); |
66 | int coda_downcall(int opcode, union outputArgs *out, struct super_block *sb); | 68 | int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out); |
67 | int venus_fsync(struct super_block *sb, struct CodaFid *fid); | 69 | int venus_fsync(struct super_block *sb, struct CodaFid *fid); |
68 | int venus_statfs(struct dentry *dentry, struct kstatfs *sfs); | 70 | int venus_statfs(struct dentry *dentry, struct kstatfs *sfs); |
69 | 71 | ||
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c1a62c56a660..320d6c94ff84 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -16,7 +16,11 @@ | |||
16 | # define __release(x) __context__(x,-1) | 16 | # define __release(x) __context__(x,-1) |
17 | # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) | 17 | # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) |
18 | # define __percpu __attribute__((noderef, address_space(3))) | 18 | # define __percpu __attribute__((noderef, address_space(3))) |
19 | #ifdef CONFIG_SPARSE_RCU_POINTER | ||
20 | # define __rcu __attribute__((noderef, address_space(4))) | ||
21 | #else | ||
19 | # define __rcu | 22 | # define __rcu |
23 | #endif | ||
20 | extern void __chk_user_ptr(const volatile void __user *); | 24 | extern void __chk_user_ptr(const volatile void __user *); |
21 | extern void __chk_io_ptr(const volatile void __iomem *); | 25 | extern void __chk_io_ptr(const volatile void __iomem *); |
22 | #else | 26 | #else |
diff --git a/include/linux/cred.h b/include/linux/cred.h index 4d2c39573f36..4aaeab376446 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
@@ -84,7 +84,7 @@ struct thread_group_cred { | |||
84 | atomic_t usage; | 84 | atomic_t usage; |
85 | pid_t tgid; /* thread group process ID */ | 85 | pid_t tgid; /* thread group process ID */ |
86 | spinlock_t lock; | 86 | spinlock_t lock; |
87 | struct key *session_keyring; /* keyring inherited over fork */ | 87 | struct key __rcu *session_keyring; /* keyring inherited over fork */ |
88 | struct key *process_keyring; /* keyring private to this process */ | 88 | struct key *process_keyring; /* keyring private to this process */ |
89 | struct rcu_head rcu; /* RCU deletion hook */ | 89 | struct rcu_head rcu; /* RCU deletion hook */ |
90 | }; | 90 | }; |
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h new file mode 100644 index 000000000000..97e435b191f4 --- /dev/null +++ b/include/linux/crush/crush.h | |||
@@ -0,0 +1,180 @@ | |||
1 | #ifndef CEPH_CRUSH_CRUSH_H | ||
2 | #define CEPH_CRUSH_CRUSH_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | /* | ||
7 | * CRUSH is a pseudo-random data distribution algorithm that | ||
8 | * efficiently distributes input values (typically, data objects) | ||
9 | * across a heterogeneous, structured storage cluster. | ||
10 | * | ||
11 | * The algorithm was originally described in detail in this paper | ||
12 | * (although the algorithm has evolved somewhat since then): | ||
13 | * | ||
14 | * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf | ||
15 | * | ||
16 | * LGPL2 | ||
17 | */ | ||
18 | |||
19 | |||
20 | #define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */ | ||
21 | |||
22 | |||
23 | #define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */ | ||
24 | #define CRUSH_MAX_SET 10 /* max size of a mapping result */ | ||
25 | |||
26 | |||
27 | /* | ||
28 | * CRUSH uses user-defined "rules" to describe how inputs should be | ||
29 | * mapped to devices. A rule consists of sequence of steps to perform | ||
30 | * to generate the set of output devices. | ||
31 | */ | ||
32 | struct crush_rule_step { | ||
33 | __u32 op; | ||
34 | __s32 arg1; | ||
35 | __s32 arg2; | ||
36 | }; | ||
37 | |||
38 | /* step op codes */ | ||
39 | enum { | ||
40 | CRUSH_RULE_NOOP = 0, | ||
41 | CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */ | ||
42 | CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */ | ||
43 | /* arg2 = type */ | ||
44 | CRUSH_RULE_CHOOSE_INDEP = 3, /* same */ | ||
45 | CRUSH_RULE_EMIT = 4, /* no args */ | ||
46 | CRUSH_RULE_CHOOSE_LEAF_FIRSTN = 6, | ||
47 | CRUSH_RULE_CHOOSE_LEAF_INDEP = 7, | ||
48 | }; | ||
49 | |||
50 | /* | ||
51 | * for specifying choose num (arg1) relative to the max parameter | ||
52 | * passed to do_rule | ||
53 | */ | ||
54 | #define CRUSH_CHOOSE_N 0 | ||
55 | #define CRUSH_CHOOSE_N_MINUS(x) (-(x)) | ||
56 | |||
57 | /* | ||
58 | * The rule mask is used to describe what the rule is intended for. | ||
59 | * Given a ruleset and size of output set, we search through the | ||
60 | * rule list for a matching rule_mask. | ||
61 | */ | ||
62 | struct crush_rule_mask { | ||
63 | __u8 ruleset; | ||
64 | __u8 type; | ||
65 | __u8 min_size; | ||
66 | __u8 max_size; | ||
67 | }; | ||
68 | |||
69 | struct crush_rule { | ||
70 | __u32 len; | ||
71 | struct crush_rule_mask mask; | ||
72 | struct crush_rule_step steps[0]; | ||
73 | }; | ||
74 | |||
75 | #define crush_rule_size(len) (sizeof(struct crush_rule) + \ | ||
76 | (len)*sizeof(struct crush_rule_step)) | ||
77 | |||
78 | |||
79 | |||
80 | /* | ||
81 | * A bucket is a named container of other items (either devices or | ||
82 | * other buckets). Items within a bucket are chosen using one of a | ||
83 | * few different algorithms. The table summarizes how the speed of | ||
84 | * each option measures up against mapping stability when items are | ||
85 | * added or removed. | ||
86 | * | ||
87 | * Bucket Alg Speed Additions Removals | ||
88 | * ------------------------------------------------ | ||
89 | * uniform O(1) poor poor | ||
90 | * list O(n) optimal poor | ||
91 | * tree O(log n) good good | ||
92 | * straw O(n) optimal optimal | ||
93 | */ | ||
94 | enum { | ||
95 | CRUSH_BUCKET_UNIFORM = 1, | ||
96 | CRUSH_BUCKET_LIST = 2, | ||
97 | CRUSH_BUCKET_TREE = 3, | ||
98 | CRUSH_BUCKET_STRAW = 4 | ||
99 | }; | ||
100 | extern const char *crush_bucket_alg_name(int alg); | ||
101 | |||
102 | struct crush_bucket { | ||
103 | __s32 id; /* this'll be negative */ | ||
104 | __u16 type; /* non-zero; type=0 is reserved for devices */ | ||
105 | __u8 alg; /* one of CRUSH_BUCKET_* */ | ||
106 | __u8 hash; /* which hash function to use, CRUSH_HASH_* */ | ||
107 | __u32 weight; /* 16-bit fixed point */ | ||
108 | __u32 size; /* num items */ | ||
109 | __s32 *items; | ||
110 | |||
111 | /* | ||
112 | * cached random permutation: used for uniform bucket and for | ||
113 | * the linear search fallback for the other bucket types. | ||
114 | */ | ||
115 | __u32 perm_x; /* @x for which *perm is defined */ | ||
116 | __u32 perm_n; /* num elements of *perm that are permuted/defined */ | ||
117 | __u32 *perm; | ||
118 | }; | ||
119 | |||
120 | struct crush_bucket_uniform { | ||
121 | struct crush_bucket h; | ||
122 | __u32 item_weight; /* 16-bit fixed point; all items equally weighted */ | ||
123 | }; | ||
124 | |||
125 | struct crush_bucket_list { | ||
126 | struct crush_bucket h; | ||
127 | __u32 *item_weights; /* 16-bit fixed point */ | ||
128 | __u32 *sum_weights; /* 16-bit fixed point. element i is sum | ||
129 | of weights 0..i, inclusive */ | ||
130 | }; | ||
131 | |||
132 | struct crush_bucket_tree { | ||
133 | struct crush_bucket h; /* note: h.size is _tree_ size, not number of | ||
134 | actual items */ | ||
135 | __u8 num_nodes; | ||
136 | __u32 *node_weights; | ||
137 | }; | ||
138 | |||
139 | struct crush_bucket_straw { | ||
140 | struct crush_bucket h; | ||
141 | __u32 *item_weights; /* 16-bit fixed point */ | ||
142 | __u32 *straws; /* 16-bit fixed point */ | ||
143 | }; | ||
144 | |||
145 | |||
146 | |||
147 | /* | ||
148 | * CRUSH map includes all buckets, rules, etc. | ||
149 | */ | ||
150 | struct crush_map { | ||
151 | struct crush_bucket **buckets; | ||
152 | struct crush_rule **rules; | ||
153 | |||
154 | /* | ||
155 | * Parent pointers to identify the parent bucket a device or | ||
156 | * bucket in the hierarchy. If an item appears more than | ||
157 | * once, this is the _last_ time it appeared (where buckets | ||
158 | * are processed in bucket id order, from -1 on down to | ||
159 | * -max_buckets. | ||
160 | */ | ||
161 | __u32 *bucket_parents; | ||
162 | __u32 *device_parents; | ||
163 | |||
164 | __s32 max_buckets; | ||
165 | __u32 max_rules; | ||
166 | __s32 max_devices; | ||
167 | }; | ||
168 | |||
169 | |||
170 | /* crush.c */ | ||
171 | extern int crush_get_bucket_item_weight(struct crush_bucket *b, int pos); | ||
172 | extern void crush_calc_parents(struct crush_map *map); | ||
173 | extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b); | ||
174 | extern void crush_destroy_bucket_list(struct crush_bucket_list *b); | ||
175 | extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b); | ||
176 | extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b); | ||
177 | extern void crush_destroy_bucket(struct crush_bucket *b); | ||
178 | extern void crush_destroy(struct crush_map *map); | ||
179 | |||
180 | #endif | ||
diff --git a/include/linux/crush/hash.h b/include/linux/crush/hash.h new file mode 100644 index 000000000000..91e884230d5d --- /dev/null +++ b/include/linux/crush/hash.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef CEPH_CRUSH_HASH_H | ||
2 | #define CEPH_CRUSH_HASH_H | ||
3 | |||
4 | #define CRUSH_HASH_RJENKINS1 0 | ||
5 | |||
6 | #define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1 | ||
7 | |||
8 | extern const char *crush_hash_name(int type); | ||
9 | |||
10 | extern __u32 crush_hash32(int type, __u32 a); | ||
11 | extern __u32 crush_hash32_2(int type, __u32 a, __u32 b); | ||
12 | extern __u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c); | ||
13 | extern __u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d); | ||
14 | extern __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, | ||
15 | __u32 e); | ||
16 | |||
17 | #endif | ||
diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h new file mode 100644 index 000000000000..c46b99c18bb0 --- /dev/null +++ b/include/linux/crush/mapper.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef CEPH_CRUSH_MAPPER_H | ||
2 | #define CEPH_CRUSH_MAPPER_H | ||
3 | |||
4 | /* | ||
5 | * CRUSH functions for find rules and then mapping an input to an | ||
6 | * output set. | ||
7 | * | ||
8 | * LGPL2 | ||
9 | */ | ||
10 | |||
11 | #include "crush.h" | ||
12 | |||
13 | extern int crush_find_rule(struct crush_map *map, int pool, int type, int size); | ||
14 | extern int crush_do_rule(struct crush_map *map, | ||
15 | int ruleno, | ||
16 | int x, int *result, int result_max, | ||
17 | int forcefeed, /* -1 for none */ | ||
18 | __u32 *weights); | ||
19 | |||
20 | #endif | ||
diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h index 7c930dba477c..5dd428532f79 100644 --- a/include/linux/davinci_emac.h +++ b/include/linux/davinci_emac.h | |||
@@ -14,16 +14,26 @@ | |||
14 | #include <linux/if_ether.h> | 14 | #include <linux/if_ether.h> |
15 | #include <linux/memory.h> | 15 | #include <linux/memory.h> |
16 | 16 | ||
17 | struct mdio_platform_data { | ||
18 | unsigned long bus_freq; | ||
19 | }; | ||
20 | |||
17 | struct emac_platform_data { | 21 | struct emac_platform_data { |
18 | char mac_addr[ETH_ALEN]; | 22 | char mac_addr[ETH_ALEN]; |
19 | u32 ctrl_reg_offset; | 23 | u32 ctrl_reg_offset; |
20 | u32 ctrl_mod_reg_offset; | 24 | u32 ctrl_mod_reg_offset; |
21 | u32 ctrl_ram_offset; | 25 | u32 ctrl_ram_offset; |
22 | u32 hw_ram_addr; | 26 | u32 hw_ram_addr; |
23 | u32 mdio_reg_offset; | ||
24 | u32 ctrl_ram_size; | 27 | u32 ctrl_ram_size; |
25 | u32 phy_mask; | 28 | |
26 | u32 mdio_max_freq; | 29 | /* |
30 | * phy_id can be one of the following: | ||
31 | * - NULL : use the first phy on the bus, | ||
32 | * - "" : force to 100/full, no mdio control | ||
33 | * - "<bus>:<addr>" : use the specified bus and phy | ||
34 | */ | ||
35 | const char *phy_id; | ||
36 | |||
27 | u8 rmii_en; | 37 | u8 rmii_en; |
28 | u8 version; | 38 | u8 version; |
29 | void (*interrupt_enable) (void); | 39 | void (*interrupt_enable) (void); |
diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 7434a8353e23..7187bd8a75f6 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h | |||
@@ -165,8 +165,10 @@ enum { | |||
165 | DCCPO_TIMESTAMP_ECHO = 42, | 165 | DCCPO_TIMESTAMP_ECHO = 42, |
166 | DCCPO_ELAPSED_TIME = 43, | 166 | DCCPO_ELAPSED_TIME = 43, |
167 | DCCPO_MAX = 45, | 167 | DCCPO_MAX = 45, |
168 | DCCPO_MIN_CCID_SPECIFIC = 128, | 168 | DCCPO_MIN_RX_CCID_SPECIFIC = 128, /* from sender to receiver */ |
169 | DCCPO_MAX_CCID_SPECIFIC = 255, | 169 | DCCPO_MAX_RX_CCID_SPECIFIC = 191, |
170 | DCCPO_MIN_TX_CCID_SPECIFIC = 192, /* from receiver to sender */ | ||
171 | DCCPO_MAX_TX_CCID_SPECIFIC = 255, | ||
170 | }; | 172 | }; |
171 | /* maximum size of a single TLV-encoded DCCP option (sans type/len bytes) */ | 173 | /* maximum size of a single TLV-encoded DCCP option (sans type/len bytes) */ |
172 | #define DCCP_SINGLE_OPT_MAXLEN 253 | 174 | #define DCCP_SINGLE_OPT_MAXLEN 253 |
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 29b3ce3f2a1d..2833452ea01c 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h | |||
@@ -49,7 +49,6 @@ struct task_struct; | |||
49 | 49 | ||
50 | #ifdef CONFIG_LOCKDEP | 50 | #ifdef CONFIG_LOCKDEP |
51 | extern void debug_show_all_locks(void); | 51 | extern void debug_show_all_locks(void); |
52 | extern void __debug_show_held_locks(struct task_struct *task); | ||
53 | extern void debug_show_held_locks(struct task_struct *task); | 52 | extern void debug_show_held_locks(struct task_struct *task); |
54 | extern void debug_check_no_locks_freed(const void *from, unsigned long len); | 53 | extern void debug_check_no_locks_freed(const void *from, unsigned long len); |
55 | extern void debug_check_no_locks_held(struct task_struct *task); | 54 | extern void debug_check_no_locks_held(struct task_struct *task); |
@@ -58,10 +57,6 @@ static inline void debug_show_all_locks(void) | |||
58 | { | 57 | { |
59 | } | 58 | } |
60 | 59 | ||
61 | static inline void __debug_show_held_locks(struct task_struct *task) | ||
62 | { | ||
63 | } | ||
64 | |||
65 | static inline void debug_show_held_locks(struct task_struct *task) | 60 | static inline void debug_show_held_locks(struct task_struct *task) |
66 | { | 61 | { |
67 | } | 62 | } |
diff --git a/include/linux/device.h b/include/linux/device.h index 516fecacf27b..dd4895313468 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -751,4 +751,11 @@ do { \ | |||
751 | MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) | 751 | MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) |
752 | #define MODULE_ALIAS_CHARDEV_MAJOR(major) \ | 752 | #define MODULE_ALIAS_CHARDEV_MAJOR(major) \ |
753 | MODULE_ALIAS("char-major-" __stringify(major) "-*") | 753 | MODULE_ALIAS("char-major-" __stringify(major) "-*") |
754 | |||
755 | #ifdef CONFIG_SYSFS_DEPRECATED | ||
756 | extern long sysfs_deprecated; | ||
757 | #else | ||
758 | #define sysfs_deprecated 0 | ||
759 | #endif | ||
760 | |||
754 | #endif /* _DEVICE_H_ */ | 761 | #endif /* _DEVICE_H_ */ |
diff --git a/include/linux/dlm.h b/include/linux/dlm.h index 0b3518c42356..d4e02f5353a0 100644 --- a/include/linux/dlm.h +++ b/include/linux/dlm.h | |||
@@ -48,10 +48,10 @@ typedef void dlm_lockspace_t; | |||
48 | * | 48 | * |
49 | * 0 if lock request was successful | 49 | * 0 if lock request was successful |
50 | * -EAGAIN if request would block and is flagged DLM_LKF_NOQUEUE | 50 | * -EAGAIN if request would block and is flagged DLM_LKF_NOQUEUE |
51 | * -ENOMEM if there is no memory to process request | ||
52 | * -EINVAL if there are invalid parameters | ||
53 | * -DLM_EUNLOCK if unlock request was successful | 51 | * -DLM_EUNLOCK if unlock request was successful |
54 | * -DLM_ECANCEL if a cancel completed successfully | 52 | * -DLM_ECANCEL if a cancel completed successfully |
53 | * -EDEADLK if a deadlock was detected | ||
54 | * -ETIMEDOUT if the lock request was canceled due to a timeout | ||
55 | */ | 55 | */ |
56 | 56 | ||
57 | #define DLM_SBF_DEMOTED 0x01 | 57 | #define DLM_SBF_DEMOTED 0x01 |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index d7cecc90ed34..a7d9dc21391d 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -57,15 +57,15 @@ extern int dmar_table_init(void); | |||
57 | extern int dmar_dev_scope_init(void); | 57 | extern int dmar_dev_scope_init(void); |
58 | 58 | ||
59 | /* Intel IOMMU detection */ | 59 | /* Intel IOMMU detection */ |
60 | extern void detect_intel_iommu(void); | 60 | extern int detect_intel_iommu(void); |
61 | extern int enable_drhd_fault_handling(void); | 61 | extern int enable_drhd_fault_handling(void); |
62 | 62 | ||
63 | extern int parse_ioapics_under_ir(void); | 63 | extern int parse_ioapics_under_ir(void); |
64 | extern int alloc_iommu(struct dmar_drhd_unit *); | 64 | extern int alloc_iommu(struct dmar_drhd_unit *); |
65 | #else | 65 | #else |
66 | static inline void detect_intel_iommu(void) | 66 | static inline int detect_intel_iommu(void) |
67 | { | 67 | { |
68 | return; | 68 | return -ENODEV; |
69 | } | 69 | } |
70 | 70 | ||
71 | static inline int dmar_table_init(void) | 71 | static inline int dmar_table_init(void) |
@@ -106,6 +106,7 @@ struct irte { | |||
106 | __u64 high; | 106 | __u64 high; |
107 | }; | 107 | }; |
108 | }; | 108 | }; |
109 | |||
109 | #ifdef CONFIG_INTR_REMAP | 110 | #ifdef CONFIG_INTR_REMAP |
110 | extern int intr_remapping_enabled; | 111 | extern int intr_remapping_enabled; |
111 | extern int intr_remapping_supported(void); | 112 | extern int intr_remapping_supported(void); |
@@ -119,11 +120,8 @@ extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); | |||
119 | extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, | 120 | extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, |
120 | u16 sub_handle); | 121 | u16 sub_handle); |
121 | extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); | 122 | extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); |
122 | extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index); | ||
123 | extern int flush_irte(int irq); | ||
124 | extern int free_irte(int irq); | 123 | extern int free_irte(int irq); |
125 | 124 | ||
126 | extern int irq_remapped(int irq); | ||
127 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); | 125 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); |
128 | extern struct intel_iommu *map_ioapic_to_ir(int apic); | 126 | extern struct intel_iommu *map_ioapic_to_ir(int apic); |
129 | extern struct intel_iommu *map_hpet_to_ir(u8 id); | 127 | extern struct intel_iommu *map_hpet_to_ir(u8 id); |
@@ -177,7 +175,6 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) | |||
177 | return 0; | 175 | return 0; |
178 | } | 176 | } |
179 | 177 | ||
180 | #define irq_remapped(irq) (0) | ||
181 | #define enable_intr_remapping(mode) (-1) | 178 | #define enable_intr_remapping(mode) (-1) |
182 | #define disable_intr_remapping() (0) | 179 | #define disable_intr_remapping() (0) |
183 | #define reenable_intr_remapping(mode) (0) | 180 | #define reenable_intr_remapping(mode) (0) |
@@ -187,8 +184,9 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) | |||
187 | /* Can't use the common MSI interrupt functions | 184 | /* Can't use the common MSI interrupt functions |
188 | * since DMAR is not a pci device | 185 | * since DMAR is not a pci device |
189 | */ | 186 | */ |
190 | extern void dmar_msi_unmask(unsigned int irq); | 187 | struct irq_data; |
191 | extern void dmar_msi_mask(unsigned int irq); | 188 | extern void dmar_msi_unmask(struct irq_data *data); |
189 | extern void dmar_msi_mask(struct irq_data *data); | ||
192 | extern void dmar_msi_read(int irq, struct msi_msg *msg); | 190 | extern void dmar_msi_read(int irq, struct msi_msg *msg); |
193 | extern void dmar_msi_write(int irq, struct msi_msg *msg); | 191 | extern void dmar_msi_write(int irq, struct msi_msg *msg); |
194 | extern int dmar_set_interrupt(struct intel_iommu *iommu); | 192 | extern int dmar_set_interrupt(struct intel_iommu *iommu); |
diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 479ee3a1d901..9b2a0158f399 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h | |||
@@ -53,10 +53,10 @@ | |||
53 | 53 | ||
54 | 54 | ||
55 | extern const char *drbd_buildtag(void); | 55 | extern const char *drbd_buildtag(void); |
56 | #define REL_VERSION "8.3.8.1" | 56 | #define REL_VERSION "8.3.9rc2" |
57 | #define API_VERSION 88 | 57 | #define API_VERSION 88 |
58 | #define PRO_VERSION_MIN 86 | 58 | #define PRO_VERSION_MIN 86 |
59 | #define PRO_VERSION_MAX 94 | 59 | #define PRO_VERSION_MAX 95 |
60 | 60 | ||
61 | 61 | ||
62 | enum drbd_io_error_p { | 62 | enum drbd_io_error_p { |
@@ -91,6 +91,11 @@ enum drbd_after_sb_p { | |||
91 | ASB_VIOLENTLY | 91 | ASB_VIOLENTLY |
92 | }; | 92 | }; |
93 | 93 | ||
94 | enum drbd_on_no_data { | ||
95 | OND_IO_ERROR, | ||
96 | OND_SUSPEND_IO | ||
97 | }; | ||
98 | |||
94 | /* KEEP the order, do not delete or insert. Only append. */ | 99 | /* KEEP the order, do not delete or insert. Only append. */ |
95 | enum drbd_ret_codes { | 100 | enum drbd_ret_codes { |
96 | ERR_CODE_BASE = 100, | 101 | ERR_CODE_BASE = 100, |
@@ -140,6 +145,7 @@ enum drbd_ret_codes { | |||
140 | ERR_CONNECTED = 151, /* DRBD 8.3 only */ | 145 | ERR_CONNECTED = 151, /* DRBD 8.3 only */ |
141 | ERR_PERM = 152, | 146 | ERR_PERM = 152, |
142 | ERR_NEED_APV_93 = 153, | 147 | ERR_NEED_APV_93 = 153, |
148 | ERR_STONITH_AND_PROT_A = 154, | ||
143 | 149 | ||
144 | /* insert new ones above this line */ | 150 | /* insert new ones above this line */ |
145 | AFTER_LAST_ERR_CODE | 151 | AFTER_LAST_ERR_CODE |
@@ -226,13 +232,17 @@ union drbd_state { | |||
226 | unsigned conn:5 ; /* 17/32 cstates */ | 232 | unsigned conn:5 ; /* 17/32 cstates */ |
227 | unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ | 233 | unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ |
228 | unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ | 234 | unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ |
229 | unsigned susp:1 ; /* 2/2 IO suspended no/yes */ | 235 | unsigned susp:1 ; /* 2/2 IO suspended no/yes (by user) */ |
230 | unsigned aftr_isp:1 ; /* isp .. imposed sync pause */ | 236 | unsigned aftr_isp:1 ; /* isp .. imposed sync pause */ |
231 | unsigned peer_isp:1 ; | 237 | unsigned peer_isp:1 ; |
232 | unsigned user_isp:1 ; | 238 | unsigned user_isp:1 ; |
233 | unsigned _pad:11; /* 0 unused */ | 239 | unsigned susp_nod:1 ; /* IO suspended because no data */ |
240 | unsigned susp_fen:1 ; /* IO suspended because fence peer handler runs*/ | ||
241 | unsigned _pad:9; /* 0 unused */ | ||
234 | #elif defined(__BIG_ENDIAN_BITFIELD) | 242 | #elif defined(__BIG_ENDIAN_BITFIELD) |
235 | unsigned _pad:11; /* 0 unused */ | 243 | unsigned _pad:9; |
244 | unsigned susp_fen:1 ; | ||
245 | unsigned susp_nod:1 ; | ||
236 | unsigned user_isp:1 ; | 246 | unsigned user_isp:1 ; |
237 | unsigned peer_isp:1 ; | 247 | unsigned peer_isp:1 ; |
238 | unsigned aftr_isp:1 ; /* isp .. imposed sync pause */ | 248 | unsigned aftr_isp:1 ; /* isp .. imposed sync pause */ |
@@ -312,6 +322,8 @@ enum drbd_timeout_flag { | |||
312 | 322 | ||
313 | #define DRBD_MAGIC 0x83740267 | 323 | #define DRBD_MAGIC 0x83740267 |
314 | #define BE_DRBD_MAGIC __constant_cpu_to_be32(DRBD_MAGIC) | 324 | #define BE_DRBD_MAGIC __constant_cpu_to_be32(DRBD_MAGIC) |
325 | #define DRBD_MAGIC_BIG 0x835a | ||
326 | #define BE_DRBD_MAGIC_BIG __constant_cpu_to_be16(DRBD_MAGIC_BIG) | ||
315 | 327 | ||
316 | /* these are of type "int" */ | 328 | /* these are of type "int" */ |
317 | #define DRBD_MD_INDEX_INTERNAL -1 | 329 | #define DRBD_MD_INDEX_INTERNAL -1 |
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h index 440b42e38e89..4ac33f34b77e 100644 --- a/include/linux/drbd_limits.h +++ b/include/linux/drbd_limits.h | |||
@@ -128,26 +128,31 @@ | |||
128 | #define DRBD_AFTER_SB_1P_DEF ASB_DISCONNECT | 128 | #define DRBD_AFTER_SB_1P_DEF ASB_DISCONNECT |
129 | #define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT | 129 | #define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT |
130 | #define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT | 130 | #define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT |
131 | #define DRBD_ON_NO_DATA_DEF OND_IO_ERROR | ||
131 | 132 | ||
132 | #define DRBD_MAX_BIO_BVECS_MIN 0 | 133 | #define DRBD_MAX_BIO_BVECS_MIN 0 |
133 | #define DRBD_MAX_BIO_BVECS_MAX 128 | 134 | #define DRBD_MAX_BIO_BVECS_MAX 128 |
134 | #define DRBD_MAX_BIO_BVECS_DEF 0 | 135 | #define DRBD_MAX_BIO_BVECS_DEF 0 |
135 | 136 | ||
136 | #define DRBD_DP_VOLUME_MIN 4 | 137 | #define DRBD_C_PLAN_AHEAD_MIN 0 |
137 | #define DRBD_DP_VOLUME_MAX 1048576 | 138 | #define DRBD_C_PLAN_AHEAD_MAX 300 |
138 | #define DRBD_DP_VOLUME_DEF 16384 | 139 | #define DRBD_C_PLAN_AHEAD_DEF 0 /* RS rate controller disabled by default */ |
139 | 140 | ||
140 | #define DRBD_DP_INTERVAL_MIN 1 | 141 | #define DRBD_C_DELAY_TARGET_MIN 1 |
141 | #define DRBD_DP_INTERVAL_MAX 600 | 142 | #define DRBD_C_DELAY_TARGET_MAX 100 |
142 | #define DRBD_DP_INTERVAL_DEF 5 | 143 | #define DRBD_C_DELAY_TARGET_DEF 10 |
143 | 144 | ||
144 | #define DRBD_RS_THROTTLE_TH_MIN 1 | 145 | #define DRBD_C_FILL_TARGET_MIN 0 |
145 | #define DRBD_RS_THROTTLE_TH_MAX 600 | 146 | #define DRBD_C_FILL_TARGET_MAX (1<<20) /* 500MByte in sec */ |
146 | #define DRBD_RS_THROTTLE_TH_DEF 20 | 147 | #define DRBD_C_FILL_TARGET_DEF 0 /* By default disabled -> controlled by delay_target */ |
147 | 148 | ||
148 | #define DRBD_RS_HOLD_OFF_TH_MIN 1 | 149 | #define DRBD_C_MAX_RATE_MIN 250 /* kByte/sec */ |
149 | #define DRBD_RS_HOLD_OFF_TH_MAX 6000 | 150 | #define DRBD_C_MAX_RATE_MAX (4 << 20) |
150 | #define DRBD_RS_HOLD_OFF_TH_DEF 100 | 151 | #define DRBD_C_MAX_RATE_DEF 102400 |
152 | |||
153 | #define DRBD_C_MIN_RATE_MIN 0 /* kByte/sec */ | ||
154 | #define DRBD_C_MIN_RATE_MAX (4 << 20) | ||
155 | #define DRBD_C_MIN_RATE_DEF 4096 | ||
151 | 156 | ||
152 | #undef RANGE | 157 | #undef RANGE |
153 | #endif | 158 | #endif |
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h index 5f042810a56c..ade91107c9a5 100644 --- a/include/linux/drbd_nl.h +++ b/include/linux/drbd_nl.h | |||
@@ -87,6 +87,12 @@ NL_PACKET(syncer_conf, 8, | |||
87 | NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32) | 87 | NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32) |
88 | NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX) | 88 | NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX) |
89 | NL_BIT( 65, T_MAY_IGNORE, use_rle) | 89 | NL_BIT( 65, T_MAY_IGNORE, use_rle) |
90 | NL_INTEGER( 75, T_MAY_IGNORE, on_no_data) | ||
91 | NL_INTEGER( 76, T_MAY_IGNORE, c_plan_ahead) | ||
92 | NL_INTEGER( 77, T_MAY_IGNORE, c_delay_target) | ||
93 | NL_INTEGER( 78, T_MAY_IGNORE, c_fill_target) | ||
94 | NL_INTEGER( 79, T_MAY_IGNORE, c_max_rate) | ||
95 | NL_INTEGER( 80, T_MAY_IGNORE, c_min_rate) | ||
90 | ) | 96 | ) |
91 | 97 | ||
92 | NL_PACKET(invalidate, 9, ) | 98 | NL_PACKET(invalidate, 9, ) |
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 52c0da4bdd18..a90b3892074a 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _DYNAMIC_DEBUG_H | 1 | #ifndef _DYNAMIC_DEBUG_H |
2 | #define _DYNAMIC_DEBUG_H | 2 | #define _DYNAMIC_DEBUG_H |
3 | 3 | ||
4 | #include <linux/jump_label.h> | ||
5 | |||
4 | /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which | 6 | /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which |
5 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They | 7 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They |
6 | * use independent hash functions, to reduce the chance of false positives. | 8 | * use independent hash functions, to reduce the chance of false positives. |
@@ -22,8 +24,6 @@ struct _ddebug { | |||
22 | const char *function; | 24 | const char *function; |
23 | const char *filename; | 25 | const char *filename; |
24 | const char *format; | 26 | const char *format; |
25 | char primary_hash; | ||
26 | char secondary_hash; | ||
27 | unsigned int lineno:24; | 27 | unsigned int lineno:24; |
28 | /* | 28 | /* |
29 | * The flags field controls the behaviour at the callsite. | 29 | * The flags field controls the behaviour at the callsite. |
@@ -33,6 +33,7 @@ struct _ddebug { | |||
33 | #define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ | 33 | #define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ |
34 | #define _DPRINTK_FLAGS_DEFAULT 0 | 34 | #define _DPRINTK_FLAGS_DEFAULT 0 |
35 | unsigned int flags:8; | 35 | unsigned int flags:8; |
36 | char enabled; | ||
36 | } __attribute__((aligned(8))); | 37 | } __attribute__((aligned(8))); |
37 | 38 | ||
38 | 39 | ||
@@ -42,33 +43,35 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, | |||
42 | #if defined(CONFIG_DYNAMIC_DEBUG) | 43 | #if defined(CONFIG_DYNAMIC_DEBUG) |
43 | extern int ddebug_remove_module(const char *mod_name); | 44 | extern int ddebug_remove_module(const char *mod_name); |
44 | 45 | ||
45 | #define __dynamic_dbg_enabled(dd) ({ \ | ||
46 | int __ret = 0; \ | ||
47 | if (unlikely((dynamic_debug_enabled & (1LL << DEBUG_HASH)) && \ | ||
48 | (dynamic_debug_enabled2 & (1LL << DEBUG_HASH2)))) \ | ||
49 | if (unlikely(dd.flags)) \ | ||
50 | __ret = 1; \ | ||
51 | __ret; }) | ||
52 | |||
53 | #define dynamic_pr_debug(fmt, ...) do { \ | 46 | #define dynamic_pr_debug(fmt, ...) do { \ |
47 | __label__ do_printk; \ | ||
48 | __label__ out; \ | ||
54 | static struct _ddebug descriptor \ | 49 | static struct _ddebug descriptor \ |
55 | __used \ | 50 | __used \ |
56 | __attribute__((section("__verbose"), aligned(8))) = \ | 51 | __attribute__((section("__verbose"), aligned(8))) = \ |
57 | { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ | 52 | { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \ |
58 | DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ | 53 | _DPRINTK_FLAGS_DEFAULT }; \ |
59 | if (__dynamic_dbg_enabled(descriptor)) \ | 54 | JUMP_LABEL(&descriptor.enabled, do_printk); \ |
60 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \ | 55 | goto out; \ |
56 | do_printk: \ | ||
57 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \ | ||
58 | out: ; \ | ||
61 | } while (0) | 59 | } while (0) |
62 | 60 | ||
63 | 61 | ||
64 | #define dynamic_dev_dbg(dev, fmt, ...) do { \ | 62 | #define dynamic_dev_dbg(dev, fmt, ...) do { \ |
63 | __label__ do_printk; \ | ||
64 | __label__ out; \ | ||
65 | static struct _ddebug descriptor \ | 65 | static struct _ddebug descriptor \ |
66 | __used \ | 66 | __used \ |
67 | __attribute__((section("__verbose"), aligned(8))) = \ | 67 | __attribute__((section("__verbose"), aligned(8))) = \ |
68 | { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ | 68 | { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \ |
69 | DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ | 69 | _DPRINTK_FLAGS_DEFAULT }; \ |
70 | if (__dynamic_dbg_enabled(descriptor)) \ | 70 | JUMP_LABEL(&descriptor.enabled, do_printk); \ |
71 | dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ | 71 | goto out; \ |
72 | do_printk: \ | ||
73 | dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ | ||
74 | out: ; \ | ||
72 | } while (0) | 75 | } while (0) |
73 | 76 | ||
74 | #else | 77 | #else |
@@ -80,7 +83,7 @@ static inline int ddebug_remove_module(const char *mod) | |||
80 | 83 | ||
81 | #define dynamic_pr_debug(fmt, ...) \ | 84 | #define dynamic_pr_debug(fmt, ...) \ |
82 | do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) | 85 | do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) |
83 | #define dynamic_dev_dbg(dev, format, ...) \ | 86 | #define dynamic_dev_dbg(dev, fmt, ...) \ |
84 | do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) | 87 | do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) |
85 | #endif | 88 | #endif |
86 | 89 | ||
diff --git a/include/linux/early_res.h b/include/linux/early_res.h deleted file mode 100644 index 29c09f57a13c..000000000000 --- a/include/linux/early_res.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | #ifndef _LINUX_EARLY_RES_H | ||
2 | #define _LINUX_EARLY_RES_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | extern void reserve_early(u64 start, u64 end, char *name); | ||
6 | extern void reserve_early_overlap_ok(u64 start, u64 end, char *name); | ||
7 | extern void free_early(u64 start, u64 end); | ||
8 | void free_early_partial(u64 start, u64 end); | ||
9 | extern void early_res_to_bootmem(u64 start, u64 end); | ||
10 | |||
11 | void reserve_early_without_check(u64 start, u64 end, char *name); | ||
12 | u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end, | ||
13 | u64 size, u64 align); | ||
14 | u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start, | ||
15 | u64 *sizep, u64 align); | ||
16 | u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align); | ||
17 | u64 get_max_mapped(void); | ||
18 | #include <linux/range.h> | ||
19 | int get_free_all_memory_range(struct range **rangep, int nodeid); | ||
20 | |||
21 | #endif /* __KERNEL__ */ | ||
22 | |||
23 | #endif /* _LINUX_EARLY_RES_H */ | ||
diff --git a/include/linux/edac.h b/include/linux/edac.h index 7cf92e8a4196..36c66443bdfd 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #define _LINUX_EDAC_H_ | 13 | #define _LINUX_EDAC_H_ |
14 | 14 | ||
15 | #include <asm/atomic.h> | 15 | #include <asm/atomic.h> |
16 | #include <linux/sysdev.h> | ||
16 | 17 | ||
17 | #define EDAC_OPSTATE_INVAL -1 | 18 | #define EDAC_OPSTATE_INVAL -1 |
18 | #define EDAC_OPSTATE_POLL 0 | 19 | #define EDAC_OPSTATE_POLL 0 |
@@ -22,9 +23,12 @@ | |||
22 | extern int edac_op_state; | 23 | extern int edac_op_state; |
23 | extern int edac_err_assert; | 24 | extern int edac_err_assert; |
24 | extern atomic_t edac_handlers; | 25 | extern atomic_t edac_handlers; |
26 | extern struct sysdev_class edac_class; | ||
25 | 27 | ||
26 | extern int edac_handler_set(void); | 28 | extern int edac_handler_set(void); |
27 | extern void edac_atomic_assert_error(void); | 29 | extern void edac_atomic_assert_error(void); |
30 | extern struct sysdev_class *edac_get_sysfs_class(void); | ||
31 | extern void edac_put_sysfs_class(void); | ||
28 | 32 | ||
29 | static inline void opstate_init(void) | 33 | static inline void opstate_init(void) |
30 | { | 34 | { |
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 2308fbb4523a..f16a01081e15 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h | |||
@@ -71,7 +71,7 @@ static inline int is_zero_ether_addr(const u8 *addr) | |||
71 | */ | 71 | */ |
72 | static inline int is_multicast_ether_addr(const u8 *addr) | 72 | static inline int is_multicast_ether_addr(const u8 *addr) |
73 | { | 73 | { |
74 | return (0x01 & addr[0]); | 74 | return 0x01 & addr[0]; |
75 | } | 75 | } |
76 | 76 | ||
77 | /** | 77 | /** |
@@ -82,7 +82,7 @@ static inline int is_multicast_ether_addr(const u8 *addr) | |||
82 | */ | 82 | */ |
83 | static inline int is_local_ether_addr(const u8 *addr) | 83 | static inline int is_local_ether_addr(const u8 *addr) |
84 | { | 84 | { |
85 | return (0x02 & addr[0]); | 85 | return 0x02 & addr[0]; |
86 | } | 86 | } |
87 | 87 | ||
88 | /** | 88 | /** |
@@ -237,13 +237,29 @@ static inline bool is_etherdev_addr(const struct net_device *dev, | |||
237 | * entry points. | 237 | * entry points. |
238 | */ | 238 | */ |
239 | 239 | ||
240 | static inline int compare_ether_header(const void *a, const void *b) | 240 | static inline unsigned long compare_ether_header(const void *a, const void *b) |
241 | { | 241 | { |
242 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 | ||
243 | unsigned long fold; | ||
244 | |||
245 | /* | ||
246 | * We want to compare 14 bytes: | ||
247 | * [a0 ... a13] ^ [b0 ... b13] | ||
248 | * Use two long XOR, ORed together, with an overlap of two bytes. | ||
249 | * [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] | | ||
250 | * [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13] | ||
251 | * This means the [a6 a7] ^ [b6 b7] part is done two times. | ||
252 | */ | ||
253 | fold = *(unsigned long *)a ^ *(unsigned long *)b; | ||
254 | fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6); | ||
255 | return fold; | ||
256 | #else | ||
242 | u32 *a32 = (u32 *)((u8 *)a + 2); | 257 | u32 *a32 = (u32 *)((u8 *)a + 2); |
243 | u32 *b32 = (u32 *)((u8 *)b + 2); | 258 | u32 *b32 = (u32 *)((u8 *)b + 2); |
244 | 259 | ||
245 | return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) | | 260 | return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) | |
246 | (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]); | 261 | (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]); |
262 | #endif | ||
247 | } | 263 | } |
248 | 264 | ||
249 | #endif /* _LINUX_ETHERDEVICE_H */ | 265 | #endif /* _LINUX_ETHERDEVICE_H */ |
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 991269e5b152..6628a507fd3b 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #define _LINUX_ETHTOOL_H | 14 | #define _LINUX_ETHTOOL_H |
15 | 15 | ||
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/if_ether.h> | ||
17 | 18 | ||
18 | /* This should work for both 32 and 64 bit userland. */ | 19 | /* This should work for both 32 and 64 bit userland. */ |
19 | struct ethtool_cmd { | 20 | struct ethtool_cmd { |
@@ -308,15 +309,28 @@ struct ethtool_perm_addr { | |||
308 | * flag differs from the read-only value. | 309 | * flag differs from the read-only value. |
309 | */ | 310 | */ |
310 | enum ethtool_flags { | 311 | enum ethtool_flags { |
312 | ETH_FLAG_TXVLAN = (1 << 7), /* TX VLAN offload enabled */ | ||
313 | ETH_FLAG_RXVLAN = (1 << 8), /* RX VLAN offload enabled */ | ||
311 | ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */ | 314 | ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */ |
312 | ETH_FLAG_NTUPLE = (1 << 27), /* N-tuple filters enabled */ | 315 | ETH_FLAG_NTUPLE = (1 << 27), /* N-tuple filters enabled */ |
313 | ETH_FLAG_RXHASH = (1 << 28), | 316 | ETH_FLAG_RXHASH = (1 << 28), |
314 | }; | 317 | }; |
315 | 318 | ||
316 | /* The following structures are for supporting RX network flow | 319 | /* The following structures are for supporting RX network flow |
317 | * classification configuration. Note, all multibyte fields, e.g., | 320 | * classification and RX n-tuple configuration. Note, all multibyte |
318 | * ip4src, ip4dst, psrc, pdst, spi, etc. are expected to be in network | 321 | * fields, e.g., ip4src, ip4dst, psrc, pdst, spi, etc. are expected to |
319 | * byte order. | 322 | * be in network byte order. |
323 | */ | ||
324 | |||
325 | /** | ||
326 | * struct ethtool_tcpip4_spec - flow specification for TCP/IPv4 etc. | ||
327 | * @ip4src: Source host | ||
328 | * @ip4dst: Destination host | ||
329 | * @psrc: Source port | ||
330 | * @pdst: Destination port | ||
331 | * @tos: Type-of-service | ||
332 | * | ||
333 | * This can be used to specify a TCP/IPv4, UDP/IPv4 or SCTP/IPv4 flow. | ||
320 | */ | 334 | */ |
321 | struct ethtool_tcpip4_spec { | 335 | struct ethtool_tcpip4_spec { |
322 | __be32 ip4src; | 336 | __be32 ip4src; |
@@ -326,6 +340,15 @@ struct ethtool_tcpip4_spec { | |||
326 | __u8 tos; | 340 | __u8 tos; |
327 | }; | 341 | }; |
328 | 342 | ||
343 | /** | ||
344 | * struct ethtool_ah_espip4_spec - flow specification for IPsec/IPv4 | ||
345 | * @ip4src: Source host | ||
346 | * @ip4dst: Destination host | ||
347 | * @spi: Security parameters index | ||
348 | * @tos: Type-of-service | ||
349 | * | ||
350 | * This can be used to specify an IPsec transport or tunnel over IPv4. | ||
351 | */ | ||
329 | struct ethtool_ah_espip4_spec { | 352 | struct ethtool_ah_espip4_spec { |
330 | __be32 ip4src; | 353 | __be32 ip4src; |
331 | __be32 ip4dst; | 354 | __be32 ip4dst; |
@@ -333,21 +356,17 @@ struct ethtool_ah_espip4_spec { | |||
333 | __u8 tos; | 356 | __u8 tos; |
334 | }; | 357 | }; |
335 | 358 | ||
336 | struct ethtool_rawip4_spec { | ||
337 | __be32 ip4src; | ||
338 | __be32 ip4dst; | ||
339 | __u8 hdata[64]; | ||
340 | }; | ||
341 | |||
342 | struct ethtool_ether_spec { | ||
343 | __be16 ether_type; | ||
344 | __u8 frame_size; | ||
345 | __u8 eframe[16]; | ||
346 | }; | ||
347 | |||
348 | #define ETH_RX_NFC_IP4 1 | 359 | #define ETH_RX_NFC_IP4 1 |
349 | #define ETH_RX_NFC_IP6 2 | ||
350 | 360 | ||
361 | /** | ||
362 | * struct ethtool_usrip4_spec - general flow specification for IPv4 | ||
363 | * @ip4src: Source host | ||
364 | * @ip4dst: Destination host | ||
365 | * @l4_4_bytes: First 4 bytes of transport (layer 4) header | ||
366 | * @tos: Type-of-service | ||
367 | * @ip_ver: Value must be %ETH_RX_NFC_IP4; mask must be 0 | ||
368 | * @proto: Transport protocol number; mask must be 0 | ||
369 | */ | ||
351 | struct ethtool_usrip4_spec { | 370 | struct ethtool_usrip4_spec { |
352 | __be32 ip4src; | 371 | __be32 ip4src; |
353 | __be32 ip4dst; | 372 | __be32 ip4dst; |
@@ -357,6 +376,15 @@ struct ethtool_usrip4_spec { | |||
357 | __u8 proto; | 376 | __u8 proto; |
358 | }; | 377 | }; |
359 | 378 | ||
379 | /** | ||
380 | * struct ethtool_rx_flow_spec - specification for RX flow filter | ||
381 | * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW | ||
382 | * @h_u: Flow fields to match (dependent on @flow_type) | ||
383 | * @m_u: Masks for flow field bits to be ignored | ||
384 | * @ring_cookie: RX ring/queue index to deliver to, or %RX_CLS_FLOW_DISC | ||
385 | * if packets should be discarded | ||
386 | * @location: Index of filter in hardware table | ||
387 | */ | ||
360 | struct ethtool_rx_flow_spec { | 388 | struct ethtool_rx_flow_spec { |
361 | __u32 flow_type; | 389 | __u32 flow_type; |
362 | union { | 390 | union { |
@@ -365,36 +393,91 @@ struct ethtool_rx_flow_spec { | |||
365 | struct ethtool_tcpip4_spec sctp_ip4_spec; | 393 | struct ethtool_tcpip4_spec sctp_ip4_spec; |
366 | struct ethtool_ah_espip4_spec ah_ip4_spec; | 394 | struct ethtool_ah_espip4_spec ah_ip4_spec; |
367 | struct ethtool_ah_espip4_spec esp_ip4_spec; | 395 | struct ethtool_ah_espip4_spec esp_ip4_spec; |
368 | struct ethtool_rawip4_spec raw_ip4_spec; | ||
369 | struct ethtool_ether_spec ether_spec; | ||
370 | struct ethtool_usrip4_spec usr_ip4_spec; | 396 | struct ethtool_usrip4_spec usr_ip4_spec; |
371 | __u8 hdata[64]; | 397 | struct ethhdr ether_spec; |
372 | } h_u, m_u; /* entry, mask */ | 398 | __u8 hdata[72]; |
399 | } h_u, m_u; | ||
373 | __u64 ring_cookie; | 400 | __u64 ring_cookie; |
374 | __u32 location; | 401 | __u32 location; |
375 | }; | 402 | }; |
376 | 403 | ||
404 | /** | ||
405 | * struct ethtool_rxnfc - command to get or set RX flow classification rules | ||
406 | * @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH, | ||
407 | * %ETHTOOL_GRXRINGS, %ETHTOOL_GRXCLSRLCNT, %ETHTOOL_GRXCLSRULE, | ||
408 | * %ETHTOOL_GRXCLSRLALL, %ETHTOOL_SRXCLSRLDEL or %ETHTOOL_SRXCLSRLINS | ||
409 | * @flow_type: Type of flow to be affected, e.g. %TCP_V4_FLOW | ||
410 | * @data: Command-dependent value | ||
411 | * @fs: Flow filter specification | ||
412 | * @rule_cnt: Number of rules to be affected | ||
413 | * @rule_locs: Array of valid rule indices | ||
414 | * | ||
415 | * For %ETHTOOL_GRXFH and %ETHTOOL_SRXFH, @data is a bitmask indicating | ||
416 | * the fields included in the flow hash, e.g. %RXH_IP_SRC. The following | ||
417 | * structure fields must not be used. | ||
418 | * | ||
419 | * For %ETHTOOL_GRXRINGS, @data is set to the number of RX rings/queues | ||
420 | * on return. | ||
421 | * | ||
422 | * For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined | ||
423 | * rules on return. | ||
424 | * | ||
425 | * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the index of an | ||
426 | * existing filter rule on entry and @fs contains the rule on return. | ||
427 | * | ||
428 | * For %ETHTOOL_GRXCLSRLALL, @rule_cnt specifies the array size of the | ||
429 | * user buffer for @rule_locs on entry. On return, @data is the size | ||
430 | * of the filter table and @rule_locs contains the indices of the | ||
431 | * defined rules. | ||
432 | * | ||
433 | * For %ETHTOOL_SRXCLSRLINS, @fs specifies the filter rule to add or | ||
434 | * update. @fs.@location specifies the index to use and must not be | ||
435 | * ignored. | ||
436 | * | ||
437 | * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the index of an | ||
438 | * existing filter rule on entry. | ||
439 | * | ||
440 | * Implementation of indexed classification rules generally requires a | ||
441 | * TCAM. | ||
442 | */ | ||
377 | struct ethtool_rxnfc { | 443 | struct ethtool_rxnfc { |
378 | __u32 cmd; | 444 | __u32 cmd; |
379 | __u32 flow_type; | 445 | __u32 flow_type; |
380 | /* The rx flow hash value or the rule DB size */ | ||
381 | __u64 data; | 446 | __u64 data; |
382 | /* The following fields are not valid and must not be used for | ||
383 | * the ETHTOOL_{G,X}RXFH commands. */ | ||
384 | struct ethtool_rx_flow_spec fs; | 447 | struct ethtool_rx_flow_spec fs; |
385 | __u32 rule_cnt; | 448 | __u32 rule_cnt; |
386 | __u32 rule_locs[0]; | 449 | __u32 rule_locs[0]; |
387 | }; | 450 | }; |
388 | 451 | ||
452 | /** | ||
453 | * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection | ||
454 | * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR | ||
455 | * @size: On entry, the array size of the user buffer. On return from | ||
456 | * %ETHTOOL_GRXFHINDIR, the array size of the hardware indirection table. | ||
457 | * @ring_index: RX ring/queue index for each hash value | ||
458 | */ | ||
389 | struct ethtool_rxfh_indir { | 459 | struct ethtool_rxfh_indir { |
390 | __u32 cmd; | 460 | __u32 cmd; |
391 | /* On entry, this is the array size of the user buffer. On | ||
392 | * return from ETHTOOL_GRXFHINDIR, this is the array size of | ||
393 | * the hardware indirection table. */ | ||
394 | __u32 size; | 461 | __u32 size; |
395 | __u32 ring_index[0]; /* ring/queue index for each hash value */ | 462 | __u32 ring_index[0]; |
396 | }; | 463 | }; |
397 | 464 | ||
465 | /** | ||
466 | * struct ethtool_rx_ntuple_flow_spec - specification for RX flow filter | ||
467 | * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW | ||
468 | * @h_u: Flow field values to match (dependent on @flow_type) | ||
469 | * @m_u: Masks for flow field value bits to be ignored | ||
470 | * @vlan_tag: VLAN tag to match | ||
471 | * @vlan_tag_mask: Mask for VLAN tag bits to be ignored | ||
472 | * @data: Driver-dependent data to match | ||
473 | * @data_mask: Mask for driver-dependent data bits to be ignored | ||
474 | * @action: RX ring/queue index to deliver to (non-negative) or other action | ||
475 | * (negative, e.g. %ETHTOOL_RXNTUPLE_ACTION_DROP) | ||
476 | * | ||
477 | * For flow types %TCP_V4_FLOW, %UDP_V4_FLOW and %SCTP_V4_FLOW, where | ||
478 | * a field value and mask are both zero this is treated as if all mask | ||
479 | * bits are set i.e. the field is ignored. | ||
480 | */ | ||
398 | struct ethtool_rx_ntuple_flow_spec { | 481 | struct ethtool_rx_ntuple_flow_spec { |
399 | __u32 flow_type; | 482 | __u32 flow_type; |
400 | union { | 483 | union { |
@@ -403,22 +486,26 @@ struct ethtool_rx_ntuple_flow_spec { | |||
403 | struct ethtool_tcpip4_spec sctp_ip4_spec; | 486 | struct ethtool_tcpip4_spec sctp_ip4_spec; |
404 | struct ethtool_ah_espip4_spec ah_ip4_spec; | 487 | struct ethtool_ah_espip4_spec ah_ip4_spec; |
405 | struct ethtool_ah_espip4_spec esp_ip4_spec; | 488 | struct ethtool_ah_espip4_spec esp_ip4_spec; |
406 | struct ethtool_rawip4_spec raw_ip4_spec; | ||
407 | struct ethtool_ether_spec ether_spec; | ||
408 | struct ethtool_usrip4_spec usr_ip4_spec; | 489 | struct ethtool_usrip4_spec usr_ip4_spec; |
409 | __u8 hdata[64]; | 490 | struct ethhdr ether_spec; |
410 | } h_u, m_u; /* entry, mask */ | 491 | __u8 hdata[72]; |
492 | } h_u, m_u; | ||
411 | 493 | ||
412 | __u16 vlan_tag; | 494 | __u16 vlan_tag; |
413 | __u16 vlan_tag_mask; | 495 | __u16 vlan_tag_mask; |
414 | __u64 data; /* user-defined flow spec data */ | 496 | __u64 data; |
415 | __u64 data_mask; /* user-defined flow spec mask */ | 497 | __u64 data_mask; |
416 | 498 | ||
417 | /* signed to distinguish between queue and actions (DROP) */ | ||
418 | __s32 action; | 499 | __s32 action; |
419 | #define ETHTOOL_RXNTUPLE_ACTION_DROP -1 | 500 | #define ETHTOOL_RXNTUPLE_ACTION_DROP (-1) /* drop packet */ |
501 | #define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) /* clear filter */ | ||
420 | }; | 502 | }; |
421 | 503 | ||
504 | /** | ||
505 | * struct ethtool_rx_ntuple - command to set or clear RX flow filter | ||
506 | * @cmd: Command number - %ETHTOOL_SRXNTUPLE | ||
507 | * @fs: Flow filter specification | ||
508 | */ | ||
422 | struct ethtool_rx_ntuple { | 509 | struct ethtool_rx_ntuple { |
423 | __u32 cmd; | 510 | __u32 cmd; |
424 | struct ethtool_rx_ntuple_flow_spec fs; | 511 | struct ethtool_rx_ntuple_flow_spec fs; |
@@ -759,22 +846,23 @@ struct ethtool_ops { | |||
759 | #define WAKE_MAGIC (1 << 5) | 846 | #define WAKE_MAGIC (1 << 5) |
760 | #define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ | 847 | #define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ |
761 | 848 | ||
762 | /* L3-L4 network traffic flow types */ | 849 | /* L2-L4 network traffic flow types */ |
763 | #define TCP_V4_FLOW 0x01 | 850 | #define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */ |
764 | #define UDP_V4_FLOW 0x02 | 851 | #define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */ |
765 | #define SCTP_V4_FLOW 0x03 | 852 | #define SCTP_V4_FLOW 0x03 /* hash or spec (sctp_ip4_spec) */ |
766 | #define AH_ESP_V4_FLOW 0x04 | 853 | #define AH_ESP_V4_FLOW 0x04 /* hash only */ |
767 | #define TCP_V6_FLOW 0x05 | 854 | #define TCP_V6_FLOW 0x05 /* hash only */ |
768 | #define UDP_V6_FLOW 0x06 | 855 | #define UDP_V6_FLOW 0x06 /* hash only */ |
769 | #define SCTP_V6_FLOW 0x07 | 856 | #define SCTP_V6_FLOW 0x07 /* hash only */ |
770 | #define AH_ESP_V6_FLOW 0x08 | 857 | #define AH_ESP_V6_FLOW 0x08 /* hash only */ |
771 | #define AH_V4_FLOW 0x09 | 858 | #define AH_V4_FLOW 0x09 /* hash or spec (ah_ip4_spec) */ |
772 | #define ESP_V4_FLOW 0x0a | 859 | #define ESP_V4_FLOW 0x0a /* hash or spec (esp_ip4_spec) */ |
773 | #define AH_V6_FLOW 0x0b | 860 | #define AH_V6_FLOW 0x0b /* hash only */ |
774 | #define ESP_V6_FLOW 0x0c | 861 | #define ESP_V6_FLOW 0x0c /* hash only */ |
775 | #define IP_USER_FLOW 0x0d | 862 | #define IP_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ |
776 | #define IPV4_FLOW 0x10 | 863 | #define IPV4_FLOW 0x10 /* hash only */ |
777 | #define IPV6_FLOW 0x11 | 864 | #define IPV6_FLOW 0x11 /* hash only */ |
865 | #define ETHER_FLOW 0x12 /* spec only (ether_spec) */ | ||
778 | 866 | ||
779 | /* L3-L4 network traffic flow hash options */ | 867 | /* L3-L4 network traffic flow hash options */ |
780 | #define RXH_L2DA (1 << 1) | 868 | #define RXH_L2DA (1 << 1) |
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index a9cd507f8cd2..28028988c862 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h | |||
@@ -67,6 +67,19 @@ enum fid_type { | |||
67 | * 32 bit parent block number, 32 bit parent generation number | 67 | * 32 bit parent block number, 32 bit parent generation number |
68 | */ | 68 | */ |
69 | FILEID_UDF_WITH_PARENT = 0x52, | 69 | FILEID_UDF_WITH_PARENT = 0x52, |
70 | |||
71 | /* | ||
72 | * 64 bit checkpoint number, 64 bit inode number, | ||
73 | * 32 bit generation number. | ||
74 | */ | ||
75 | FILEID_NILFS_WITHOUT_PARENT = 0x61, | ||
76 | |||
77 | /* | ||
78 | * 64 bit checkpoint number, 64 bit inode number, | ||
79 | * 32 bit generation number, 32 bit parent generation. | ||
80 | * 64 bit parent inode number. | ||
81 | */ | ||
82 | FILEID_NILFS_WITH_PARENT = 0x62, | ||
70 | }; | 83 | }; |
71 | 84 | ||
72 | struct fid { | 85 | struct fid { |
diff --git a/include/linux/fdreg.h b/include/linux/fdreg.h index c2eeb63b72db..61ce64169004 100644 --- a/include/linux/fdreg.h +++ b/include/linux/fdreg.h | |||
@@ -89,7 +89,7 @@ | |||
89 | /* the following commands are new in the 82078. They are not used in the | 89 | /* the following commands are new in the 82078. They are not used in the |
90 | * floppy driver, except the first three. These commands may be useful for apps | 90 | * floppy driver, except the first three. These commands may be useful for apps |
91 | * which use the FDRAWCMD interface. For doc, get the 82078 spec sheets at | 91 | * which use the FDRAWCMD interface. For doc, get the 82078 spec sheets at |
92 | * http://www-techdoc.intel.com/docs/periph/fd_contr/datasheets/ */ | 92 | * http://www.intel.com/design/archives/periphrl/docs/29046803.htm */ |
93 | 93 | ||
94 | #define FD_PARTID 0x18 /* part id ("extended" version cmd) */ | 94 | #define FD_PARTID 0x18 /* part id ("extended" version cmd) */ |
95 | #define FD_SAVE 0x2e /* save fdc regs for later restore */ | 95 | #define FD_SAVE 0x2e /* save fdc regs for later restore */ |
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index f59ed297b661..133c0ba25e30 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h | |||
@@ -31,7 +31,7 @@ struct embedded_fd_set { | |||
31 | 31 | ||
32 | struct fdtable { | 32 | struct fdtable { |
33 | unsigned int max_fds; | 33 | unsigned int max_fds; |
34 | struct file ** fd; /* current fd array */ | 34 | struct file __rcu **fd; /* current fd array */ |
35 | fd_set *close_on_exec; | 35 | fd_set *close_on_exec; |
36 | fd_set *open_fds; | 36 | fd_set *open_fds; |
37 | struct rcu_head rcu; | 37 | struct rcu_head rcu; |
@@ -46,7 +46,7 @@ struct files_struct { | |||
46 | * read mostly part | 46 | * read mostly part |
47 | */ | 47 | */ |
48 | atomic_t count; | 48 | atomic_t count; |
49 | struct fdtable *fdt; | 49 | struct fdtable __rcu *fdt; |
50 | struct fdtable fdtab; | 50 | struct fdtable fdtab; |
51 | /* | 51 | /* |
52 | * written part on a separate cache line in SMP | 52 | * written part on a separate cache line in SMP |
@@ -55,7 +55,7 @@ struct files_struct { | |||
55 | int next_fd; | 55 | int next_fd; |
56 | struct embedded_fd_set close_on_exec_init; | 56 | struct embedded_fd_set close_on_exec_init; |
57 | struct embedded_fd_set open_fds_init; | 57 | struct embedded_fd_set open_fds_init; |
58 | struct file * fd_array[NR_OPEN_DEFAULT]; | 58 | struct file __rcu * fd_array[NR_OPEN_DEFAULT]; |
59 | }; | 59 | }; |
60 | 60 | ||
61 | #define rcu_dereference_check_fdtable(files, fdtfd) \ | 61 | #define rcu_dereference_check_fdtable(files, fdtfd) \ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index bb77843de9d6..bb20373d0b46 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -135,12 +135,12 @@ struct inodes_stat_t { | |||
135 | * immediately after submission. The write equivalent | 135 | * immediately after submission. The write equivalent |
136 | * of READ_SYNC. | 136 | * of READ_SYNC. |
137 | * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only. | 137 | * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only. |
138 | * WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all | 138 | * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush. |
139 | * previously submitted writes must be safely on storage | 139 | * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on |
140 | * before this one is started. Also guarantees that when | 140 | * non-volatile media on completion. |
141 | * this write is complete, it itself is also safely on | 141 | * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded |
142 | * storage. Prevents reordering of writes on both sides | 142 | * by a cache flush and data is guaranteed to be on |
143 | * of this IO. | 143 | * non-volatile media on completion. |
144 | * | 144 | * |
145 | */ | 145 | */ |
146 | #define RW_MASK REQ_WRITE | 146 | #define RW_MASK REQ_WRITE |
@@ -156,16 +156,12 @@ struct inodes_stat_t { | |||
156 | #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) | 156 | #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) |
157 | #define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC) | 157 | #define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC) |
158 | #define WRITE_META (WRITE | REQ_META) | 158 | #define WRITE_META (WRITE | REQ_META) |
159 | #define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ | 159 | #define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ |
160 | REQ_HARDBARRIER) | 160 | REQ_FLUSH) |
161 | 161 | #define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ | |
162 | /* | 162 | REQ_FUA) |
163 | * These aren't really reads or writes, they pass down information about | 163 | #define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ |
164 | * parts of device that are now unused by the file system. | 164 | REQ_FLUSH | REQ_FUA) |
165 | */ | ||
166 | #define DISCARD_NOBARRIER (WRITE | REQ_DISCARD) | ||
167 | #define DISCARD_BARRIER (WRITE | REQ_DISCARD | REQ_HARDBARRIER) | ||
168 | #define DISCARD_SECURE (DISCARD_NOBARRIER | REQ_SECURE) | ||
169 | 165 | ||
170 | #define SEL_IN 1 | 166 | #define SEL_IN 1 |
171 | #define SEL_OUT 2 | 167 | #define SEL_OUT 2 |
@@ -1099,10 +1095,6 @@ struct file_lock { | |||
1099 | 1095 | ||
1100 | #include <linux/fcntl.h> | 1096 | #include <linux/fcntl.h> |
1101 | 1097 | ||
1102 | /* temporary stubs for BKL removal */ | ||
1103 | #define lock_flocks() lock_kernel() | ||
1104 | #define unlock_flocks() unlock_kernel() | ||
1105 | |||
1106 | extern void send_sigio(struct fown_struct *fown, int fd, int band); | 1098 | extern void send_sigio(struct fown_struct *fown, int fd, int band); |
1107 | 1099 | ||
1108 | #ifdef CONFIG_FILE_LOCKING | 1100 | #ifdef CONFIG_FILE_LOCKING |
@@ -1141,6 +1133,8 @@ extern int vfs_setlease(struct file *, long, struct file_lock **); | |||
1141 | extern int lease_modify(struct file_lock **, int); | 1133 | extern int lease_modify(struct file_lock **, int); |
1142 | extern int lock_may_read(struct inode *, loff_t start, unsigned long count); | 1134 | extern int lock_may_read(struct inode *, loff_t start, unsigned long count); |
1143 | extern int lock_may_write(struct inode *, loff_t start, unsigned long count); | 1135 | extern int lock_may_write(struct inode *, loff_t start, unsigned long count); |
1136 | extern void lock_flocks(void); | ||
1137 | extern void unlock_flocks(void); | ||
1144 | #else /* !CONFIG_FILE_LOCKING */ | 1138 | #else /* !CONFIG_FILE_LOCKING */ |
1145 | static inline int fcntl_getlk(struct file *file, struct flock __user *user) | 1139 | static inline int fcntl_getlk(struct file *file, struct flock __user *user) |
1146 | { | 1140 | { |
@@ -1283,6 +1277,14 @@ static inline int lock_may_write(struct inode *inode, loff_t start, | |||
1283 | return 1; | 1277 | return 1; |
1284 | } | 1278 | } |
1285 | 1279 | ||
1280 | static inline void lock_flocks(void) | ||
1281 | { | ||
1282 | } | ||
1283 | |||
1284 | static inline void unlock_flocks(void) | ||
1285 | { | ||
1286 | } | ||
1287 | |||
1286 | #endif /* !CONFIG_FILE_LOCKING */ | 1288 | #endif /* !CONFIG_FILE_LOCKING */ |
1287 | 1289 | ||
1288 | 1290 | ||
@@ -1390,7 +1392,7 @@ struct super_block { | |||
1390 | * Saved mount options for lazy filesystems using | 1392 | * Saved mount options for lazy filesystems using |
1391 | * generic_show_options() | 1393 | * generic_show_options() |
1392 | */ | 1394 | */ |
1393 | char *s_options; | 1395 | char __rcu *s_options; |
1394 | }; | 1396 | }; |
1395 | 1397 | ||
1396 | extern struct timespec current_fs_time(struct super_block *sb); | 1398 | extern struct timespec current_fs_time(struct super_block *sb); |
@@ -2384,6 +2386,8 @@ extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, | |||
2384 | 2386 | ||
2385 | extern int generic_file_fsync(struct file *, int); | 2387 | extern int generic_file_fsync(struct file *, int); |
2386 | 2388 | ||
2389 | extern int generic_check_addressable(unsigned, u64); | ||
2390 | |||
2387 | #ifdef CONFIG_MIGRATION | 2391 | #ifdef CONFIG_MIGRATION |
2388 | extern int buffer_migrate_page(struct address_space *, | 2392 | extern int buffer_migrate_page(struct address_space *, |
2389 | struct page *, struct page *); | 2393 | struct page *, struct page *); |
@@ -2460,6 +2464,7 @@ static const struct file_operations __fops = { \ | |||
2460 | .release = simple_attr_release, \ | 2464 | .release = simple_attr_release, \ |
2461 | .read = simple_attr_read, \ | 2465 | .read = simple_attr_read, \ |
2462 | .write = simple_attr_write, \ | 2466 | .write = simple_attr_write, \ |
2467 | .llseek = generic_file_llseek, \ | ||
2463 | }; | 2468 | }; |
2464 | 2469 | ||
2465 | static inline void __attribute__((format(printf, 1, 2))) | 2470 | static inline void __attribute__((format(printf, 1, 2))) |
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 28e33fea5107..4eb56ed75fbc 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h | |||
@@ -58,17 +58,35 @@ enum fsl_usb2_phy_modes { | |||
58 | FSL_USB2_PHY_SERIAL, | 58 | FSL_USB2_PHY_SERIAL, |
59 | }; | 59 | }; |
60 | 60 | ||
61 | struct clk; | ||
62 | struct platform_device; | ||
63 | |||
61 | struct fsl_usb2_platform_data { | 64 | struct fsl_usb2_platform_data { |
62 | /* board specific information */ | 65 | /* board specific information */ |
63 | enum fsl_usb2_operating_modes operating_mode; | 66 | enum fsl_usb2_operating_modes operating_mode; |
64 | enum fsl_usb2_phy_modes phy_mode; | 67 | enum fsl_usb2_phy_modes phy_mode; |
65 | unsigned int port_enables; | 68 | unsigned int port_enables; |
69 | unsigned int workaround; | ||
70 | |||
71 | int (*init)(struct platform_device *); | ||
72 | void (*exit)(struct platform_device *); | ||
73 | void __iomem *regs; /* ioremap'd register base */ | ||
74 | struct clk *clk; | ||
75 | unsigned big_endian_mmio:1; | ||
76 | unsigned big_endian_desc:1; | ||
77 | unsigned es:1; /* need USBMODE:ES */ | ||
78 | unsigned le_setup_buf:1; | ||
79 | unsigned have_sysif_regs:1; | ||
80 | unsigned invert_drvvbus:1; | ||
81 | unsigned invert_pwr_fault:1; | ||
66 | }; | 82 | }; |
67 | 83 | ||
68 | /* Flags in fsl_usb2_mph_platform_data */ | 84 | /* Flags in fsl_usb2_mph_platform_data */ |
69 | #define FSL_USB2_PORT0_ENABLED 0x00000001 | 85 | #define FSL_USB2_PORT0_ENABLED 0x00000001 |
70 | #define FSL_USB2_PORT1_ENABLED 0x00000002 | 86 | #define FSL_USB2_PORT1_ENABLED 0x00000002 |
71 | 87 | ||
88 | #define FLS_USB2_WORKAROUND_ENGCM09152 (1 << 0) | ||
89 | |||
72 | struct spi_device; | 90 | struct spi_device; |
73 | 91 | ||
74 | struct fsl_spi_platform_data { | 92 | struct fsl_spi_platform_data { |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 02b8b24f8f51..8beabb958f61 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -191,8 +191,8 @@ struct ftrace_event_call { | |||
191 | unsigned int flags; | 191 | unsigned int flags; |
192 | 192 | ||
193 | #ifdef CONFIG_PERF_EVENTS | 193 | #ifdef CONFIG_PERF_EVENTS |
194 | int perf_refcount; | 194 | int perf_refcount; |
195 | struct hlist_head *perf_events; | 195 | struct hlist_head __percpu *perf_events; |
196 | #endif | 196 | #endif |
197 | }; | 197 | }; |
198 | 198 | ||
@@ -252,8 +252,8 @@ DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); | |||
252 | 252 | ||
253 | extern int perf_trace_init(struct perf_event *event); | 253 | extern int perf_trace_init(struct perf_event *event); |
254 | extern void perf_trace_destroy(struct perf_event *event); | 254 | extern void perf_trace_destroy(struct perf_event *event); |
255 | extern int perf_trace_enable(struct perf_event *event); | 255 | extern int perf_trace_add(struct perf_event *event, int flags); |
256 | extern void perf_trace_disable(struct perf_event *event); | 256 | extern void perf_trace_del(struct perf_event *event, int flags); |
257 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, | 257 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, |
258 | char *filter_str); | 258 | char *filter_str); |
259 | extern void ftrace_profile_free_filter(struct perf_event *event); | 259 | extern void ftrace_profile_free_filter(struct perf_event *event); |
diff --git a/include/linux/gameport.h b/include/linux/gameport.h index 361d1cc288d0..b65a6f472775 100644 --- a/include/linux/gameport.h +++ b/include/linux/gameport.h | |||
@@ -53,9 +53,7 @@ struct gameport { | |||
53 | #define to_gameport_port(d) container_of(d, struct gameport, dev) | 53 | #define to_gameport_port(d) container_of(d, struct gameport, dev) |
54 | 54 | ||
55 | struct gameport_driver { | 55 | struct gameport_driver { |
56 | 56 | const char *description; | |
57 | void *private; | ||
58 | char *description; | ||
59 | 57 | ||
60 | int (*connect)(struct gameport *, struct gameport_driver *drv); | 58 | int (*connect)(struct gameport *, struct gameport_driver *drv); |
61 | int (*reconnect)(struct gameport *); | 59 | int (*reconnect)(struct gameport *); |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 5f2f4c4d8fb0..7a7b9c1644e4 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/kdev_t.h> | 13 | #include <linux/kdev_t.h> |
14 | #include <linux/rcupdate.h> | 14 | #include <linux/rcupdate.h> |
15 | #include <linux/slab.h> | ||
15 | 16 | ||
16 | #ifdef CONFIG_BLOCK | 17 | #ifdef CONFIG_BLOCK |
17 | 18 | ||
@@ -86,7 +87,15 @@ struct disk_stats { | |||
86 | unsigned long io_ticks; | 87 | unsigned long io_ticks; |
87 | unsigned long time_in_queue; | 88 | unsigned long time_in_queue; |
88 | }; | 89 | }; |
89 | 90 | ||
91 | #define PARTITION_META_INFO_VOLNAMELTH 64 | ||
92 | #define PARTITION_META_INFO_UUIDLTH 16 | ||
93 | |||
94 | struct partition_meta_info { | ||
95 | u8 uuid[PARTITION_META_INFO_UUIDLTH]; /* always big endian */ | ||
96 | u8 volname[PARTITION_META_INFO_VOLNAMELTH]; | ||
97 | }; | ||
98 | |||
90 | struct hd_struct { | 99 | struct hd_struct { |
91 | sector_t start_sect; | 100 | sector_t start_sect; |
92 | sector_t nr_sects; | 101 | sector_t nr_sects; |
@@ -95,6 +104,7 @@ struct hd_struct { | |||
95 | struct device __dev; | 104 | struct device __dev; |
96 | struct kobject *holder_dir; | 105 | struct kobject *holder_dir; |
97 | int policy, partno; | 106 | int policy, partno; |
107 | struct partition_meta_info *info; | ||
98 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 108 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
99 | int make_it_fail; | 109 | int make_it_fail; |
100 | #endif | 110 | #endif |
@@ -129,8 +139,8 @@ struct blk_scsi_cmd_filter { | |||
129 | struct disk_part_tbl { | 139 | struct disk_part_tbl { |
130 | struct rcu_head rcu_head; | 140 | struct rcu_head rcu_head; |
131 | int len; | 141 | int len; |
132 | struct hd_struct *last_lookup; | 142 | struct hd_struct __rcu *last_lookup; |
133 | struct hd_struct *part[]; | 143 | struct hd_struct __rcu *part[]; |
134 | }; | 144 | }; |
135 | 145 | ||
136 | struct gendisk { | 146 | struct gendisk { |
@@ -149,7 +159,7 @@ struct gendisk { | |||
149 | * non-critical accesses use RCU. Always access through | 159 | * non-critical accesses use RCU. Always access through |
150 | * helpers. | 160 | * helpers. |
151 | */ | 161 | */ |
152 | struct disk_part_tbl *part_tbl; | 162 | struct disk_part_tbl __rcu *part_tbl; |
153 | struct hd_struct part0; | 163 | struct hd_struct part0; |
154 | 164 | ||
155 | const struct block_device_operations *fops; | 165 | const struct block_device_operations *fops; |
@@ -181,6 +191,30 @@ static inline struct gendisk *part_to_disk(struct hd_struct *part) | |||
181 | return NULL; | 191 | return NULL; |
182 | } | 192 | } |
183 | 193 | ||
194 | static inline void part_pack_uuid(const u8 *uuid_str, u8 *to) | ||
195 | { | ||
196 | int i; | ||
197 | for (i = 0; i < 16; ++i) { | ||
198 | *to++ = (hex_to_bin(*uuid_str) << 4) | | ||
199 | (hex_to_bin(*(uuid_str + 1))); | ||
200 | uuid_str += 2; | ||
201 | switch (i) { | ||
202 | case 3: | ||
203 | case 5: | ||
204 | case 7: | ||
205 | case 9: | ||
206 | uuid_str++; | ||
207 | continue; | ||
208 | } | ||
209 | } | ||
210 | } | ||
211 | |||
212 | static inline char *part_unpack_uuid(const u8 *uuid, char *out) | ||
213 | { | ||
214 | sprintf(out, "%pU", uuid); | ||
215 | return out; | ||
216 | } | ||
217 | |||
184 | static inline int disk_max_parts(struct gendisk *disk) | 218 | static inline int disk_max_parts(struct gendisk *disk) |
185 | { | 219 | { |
186 | if (disk->flags & GENHD_FL_EXT_DEVT) | 220 | if (disk->flags & GENHD_FL_EXT_DEVT) |
@@ -342,6 +376,19 @@ static inline int part_in_flight(struct hd_struct *part) | |||
342 | return part->in_flight[0] + part->in_flight[1]; | 376 | return part->in_flight[0] + part->in_flight[1]; |
343 | } | 377 | } |
344 | 378 | ||
379 | static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk) | ||
380 | { | ||
381 | if (disk) | ||
382 | return kzalloc_node(sizeof(struct partition_meta_info), | ||
383 | GFP_KERNEL, disk->node_id); | ||
384 | return kzalloc(sizeof(struct partition_meta_info), GFP_KERNEL); | ||
385 | } | ||
386 | |||
387 | static inline void free_part_info(struct hd_struct *part) | ||
388 | { | ||
389 | kfree(part->info); | ||
390 | } | ||
391 | |||
345 | /* block/blk-core.c */ | 392 | /* block/blk-core.c */ |
346 | extern void part_round_stats(int cpu, struct hd_struct *part); | 393 | extern void part_round_stats(int cpu, struct hd_struct *part); |
347 | 394 | ||
@@ -533,7 +580,9 @@ extern int disk_expand_part_tbl(struct gendisk *disk, int target); | |||
533 | extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); | 580 | extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); |
534 | extern struct hd_struct * __must_check add_partition(struct gendisk *disk, | 581 | extern struct hd_struct * __must_check add_partition(struct gendisk *disk, |
535 | int partno, sector_t start, | 582 | int partno, sector_t start, |
536 | sector_t len, int flags); | 583 | sector_t len, int flags, |
584 | struct partition_meta_info | ||
585 | *info); | ||
537 | extern void delete_partition(struct gendisk *, int); | 586 | extern void delete_partition(struct gendisk *, int); |
538 | extern void printk_all_partitions(void); | 587 | extern void printk_all_partitions(void); |
539 | 588 | ||
diff --git a/include/linux/gpio-fan.h b/include/linux/gpio-fan.h new file mode 100644 index 000000000000..096659169215 --- /dev/null +++ b/include/linux/gpio-fan.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * include/linux/gpio-fan.h | ||
3 | * | ||
4 | * Platform data structure for GPIO fan driver | ||
5 | * | ||
6 | * This file is licensed under the terms of the GNU General Public | ||
7 | * License version 2. This program is licensed "as is" without any | ||
8 | * warranty of any kind, whether express or implied. | ||
9 | */ | ||
10 | |||
11 | #ifndef __LINUX_GPIO_FAN_H | ||
12 | #define __LINUX_GPIO_FAN_H | ||
13 | |||
14 | struct gpio_fan_alarm { | ||
15 | unsigned gpio; | ||
16 | unsigned active_low; | ||
17 | }; | ||
18 | |||
19 | struct gpio_fan_speed { | ||
20 | int rpm; | ||
21 | int ctrl_val; | ||
22 | }; | ||
23 | |||
24 | struct gpio_fan_platform_data { | ||
25 | int num_ctrl; | ||
26 | unsigned *ctrl; /* fan control GPIOs. */ | ||
27 | struct gpio_fan_alarm *alarm; /* fan alarm GPIO. */ | ||
28 | /* | ||
29 | * Speed conversion array: rpm from/to GPIO bit field. | ||
30 | * This array _must_ be sorted in ascending rpm order. | ||
31 | */ | ||
32 | int num_speed; | ||
33 | struct gpio_fan_speed *speed; | ||
34 | }; | ||
35 | |||
36 | #endif /* __LINUX_GPIO_FAN_H */ | ||
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index d5b387669dab..8a389b608ce3 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <linux/lockdep.h> | 8 | #include <linux/lockdep.h> |
9 | #include <linux/ftrace_irq.h> | 9 | #include <linux/ftrace_irq.h> |
10 | #include <asm/hardirq.h> | 10 | #include <asm/hardirq.h> |
11 | #include <asm/system.h> | ||
12 | 11 | ||
13 | /* | 12 | /* |
14 | * We put the hardirq and softirq counter into the preemption | 13 | * We put the hardirq and softirq counter into the preemption |
@@ -64,6 +63,8 @@ | |||
64 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | 63 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
65 | #define NMI_OFFSET (1UL << NMI_SHIFT) | 64 | #define NMI_OFFSET (1UL << NMI_SHIFT) |
66 | 65 | ||
66 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) | ||
67 | |||
67 | #ifndef PREEMPT_ACTIVE | 68 | #ifndef PREEMPT_ACTIVE |
68 | #define PREEMPT_ACTIVE_BITS 1 | 69 | #define PREEMPT_ACTIVE_BITS 1 |
69 | #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) | 70 | #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) |
@@ -82,10 +83,13 @@ | |||
82 | /* | 83 | /* |
83 | * Are we doing bottom half or hardware interrupt processing? | 84 | * Are we doing bottom half or hardware interrupt processing? |
84 | * Are we in a softirq context? Interrupt context? | 85 | * Are we in a softirq context? Interrupt context? |
86 | * in_softirq - Are we currently processing softirq or have bh disabled? | ||
87 | * in_serving_softirq - Are we currently processing softirq? | ||
85 | */ | 88 | */ |
86 | #define in_irq() (hardirq_count()) | 89 | #define in_irq() (hardirq_count()) |
87 | #define in_softirq() (softirq_count()) | 90 | #define in_softirq() (softirq_count()) |
88 | #define in_interrupt() (irq_count()) | 91 | #define in_interrupt() (irq_count()) |
92 | #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) | ||
89 | 93 | ||
90 | /* | 94 | /* |
91 | * Are we in NMI context? | 95 | * Are we in NMI context? |
@@ -132,14 +136,16 @@ extern void synchronize_irq(unsigned int irq); | |||
132 | 136 | ||
133 | struct task_struct; | 137 | struct task_struct; |
134 | 138 | ||
135 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 139 | #if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING) |
136 | static inline void account_system_vtime(struct task_struct *tsk) | 140 | static inline void account_system_vtime(struct task_struct *tsk) |
137 | { | 141 | { |
138 | } | 142 | } |
143 | #else | ||
144 | extern void account_system_vtime(struct task_struct *tsk); | ||
139 | #endif | 145 | #endif |
140 | 146 | ||
141 | #if defined(CONFIG_NO_HZ) | 147 | #if defined(CONFIG_NO_HZ) |
142 | #if defined(CONFIG_TINY_RCU) | 148 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) |
143 | extern void rcu_enter_nohz(void); | 149 | extern void rcu_enter_nohz(void); |
144 | extern void rcu_exit_nohz(void); | 150 | extern void rcu_exit_nohz(void); |
145 | 151 | ||
diff --git a/include/linux/hid.h b/include/linux/hid.h index 42a0f1d11365..bb0f56f5c01e 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
@@ -316,6 +316,7 @@ struct hid_item { | |||
316 | #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 | 316 | #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 |
317 | #define HID_QUIRK_NO_INIT_REPORTS 0x20000000 | 317 | #define HID_QUIRK_NO_INIT_REPORTS 0x20000000 |
318 | #define HID_QUIRK_NO_IGNORE 0x40000000 | 318 | #define HID_QUIRK_NO_IGNORE 0x40000000 |
319 | #define HID_QUIRK_NO_INPUT_SYNC 0x80000000 | ||
319 | 320 | ||
320 | /* | 321 | /* |
321 | * This is the global environment of the parser. This information is | 322 | * This is the global environment of the parser. This information is |
@@ -626,8 +627,8 @@ struct hid_driver { | |||
626 | int (*event)(struct hid_device *hdev, struct hid_field *field, | 627 | int (*event)(struct hid_device *hdev, struct hid_field *field, |
627 | struct hid_usage *usage, __s32 value); | 628 | struct hid_usage *usage, __s32 value); |
628 | 629 | ||
629 | void (*report_fixup)(struct hid_device *hdev, __u8 *buf, | 630 | __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf, |
630 | unsigned int size); | 631 | unsigned int *size); |
631 | 632 | ||
632 | int (*input_mapping)(struct hid_device *hdev, | 633 | int (*input_mapping)(struct hid_device *hdev, |
633 | struct hid_input *hidinput, struct hid_field *field, | 634 | struct hid_input *hidinput, struct hid_field *field, |
diff --git a/include/linux/hiddev.h b/include/linux/hiddev.h index bb6f58baf319..a3f481a3063b 100644 --- a/include/linux/hiddev.h +++ b/include/linux/hiddev.h | |||
@@ -226,8 +226,6 @@ void hiddev_disconnect(struct hid_device *); | |||
226 | void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, | 226 | void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, |
227 | struct hid_usage *usage, __s32 value); | 227 | struct hid_usage *usage, __s32 value); |
228 | void hiddev_report_event(struct hid_device *hid, struct hid_report *report); | 228 | void hiddev_report_event(struct hid_device *hid, struct hid_report *report); |
229 | int __init hiddev_init(void); | ||
230 | void hiddev_exit(void); | ||
231 | #else | 229 | #else |
232 | static inline int hiddev_connect(struct hid_device *hid, | 230 | static inline int hiddev_connect(struct hid_device *hid, |
233 | unsigned int force) | 231 | unsigned int force) |
@@ -236,8 +234,6 @@ static inline void hiddev_disconnect(struct hid_device *hid) { } | |||
236 | static inline void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, | 234 | static inline void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, |
237 | struct hid_usage *usage, __s32 value) { } | 235 | struct hid_usage *usage, __s32 value) { } |
238 | static inline void hiddev_report_event(struct hid_device *hid, struct hid_report *report) { } | 236 | static inline void hiddev_report_event(struct hid_device *hid, struct hid_report *report) { } |
239 | static inline int hiddev_init(void) { return 0; } | ||
240 | static inline void hiddev_exit(void) { } | ||
241 | #endif | 237 | #endif |
242 | 238 | ||
243 | #endif | 239 | #endif |
diff --git a/include/linux/htirq.h b/include/linux/htirq.h index c96ea46737d0..70a1dbbf2093 100644 --- a/include/linux/htirq.h +++ b/include/linux/htirq.h | |||
@@ -9,8 +9,9 @@ struct ht_irq_msg { | |||
9 | /* Helper functions.. */ | 9 | /* Helper functions.. */ |
10 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); | 10 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); |
11 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); | 11 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); |
12 | void mask_ht_irq(unsigned int irq); | 12 | struct irq_data; |
13 | void unmask_ht_irq(unsigned int irq); | 13 | void mask_ht_irq(struct irq_data *data); |
14 | void unmask_ht_irq(struct irq_data *data); | ||
14 | 15 | ||
15 | /* The arch hook for getting things started */ | 16 | /* The arch hook for getting things started */ |
16 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev); | 17 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev); |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index f479700df61b..943c76b3d4bb 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -43,7 +43,8 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to, | |||
43 | struct vm_area_struct *vma, | 43 | struct vm_area_struct *vma, |
44 | int acctflags); | 44 | int acctflags); |
45 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); | 45 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); |
46 | void __isolate_hwpoisoned_huge_page(struct page *page); | 46 | int dequeue_hwpoisoned_huge_page(struct page *page); |
47 | void copy_huge_page(struct page *dst, struct page *src); | ||
47 | 48 | ||
48 | extern unsigned long hugepages_treat_as_movable; | 49 | extern unsigned long hugepages_treat_as_movable; |
49 | extern const unsigned long hugetlb_zero, hugetlb_infinity; | 50 | extern const unsigned long hugetlb_zero, hugetlb_infinity; |
@@ -101,7 +102,10 @@ static inline void hugetlb_report_meminfo(struct seq_file *m) | |||
101 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) | 102 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) |
102 | #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) | 103 | #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) |
103 | #define huge_pte_offset(mm, address) 0 | 104 | #define huge_pte_offset(mm, address) 0 |
104 | #define __isolate_hwpoisoned_huge_page(page) 0 | 105 | #define dequeue_hwpoisoned_huge_page(page) 0 |
106 | static inline void copy_huge_page(struct page *dst, struct page *src) | ||
107 | { | ||
108 | } | ||
105 | 109 | ||
106 | #define hugetlb_change_protection(vma, address, end, newprot) | 110 | #define hugetlb_change_protection(vma, address, end, newprot) |
107 | 111 | ||
@@ -228,6 +232,8 @@ struct huge_bootmem_page { | |||
228 | struct hstate *hstate; | 232 | struct hstate *hstate; |
229 | }; | 233 | }; |
230 | 234 | ||
235 | struct page *alloc_huge_page_node(struct hstate *h, int nid); | ||
236 | |||
231 | /* arch callback */ | 237 | /* arch callback */ |
232 | int __init alloc_bootmem_huge_page(struct hstate *h); | 238 | int __init alloc_bootmem_huge_page(struct hstate *h); |
233 | 239 | ||
@@ -301,8 +307,14 @@ static inline struct hstate *page_hstate(struct page *page) | |||
301 | return size_to_hstate(PAGE_SIZE << compound_order(page)); | 307 | return size_to_hstate(PAGE_SIZE << compound_order(page)); |
302 | } | 308 | } |
303 | 309 | ||
310 | static inline unsigned hstate_index_to_shift(unsigned index) | ||
311 | { | ||
312 | return hstates[index].order + PAGE_SHIFT; | ||
313 | } | ||
314 | |||
304 | #else | 315 | #else |
305 | struct hstate {}; | 316 | struct hstate {}; |
317 | #define alloc_huge_page_node(h, nid) NULL | ||
306 | #define alloc_bootmem_huge_page(h) NULL | 318 | #define alloc_bootmem_huge_page(h) NULL |
307 | #define hstate_file(f) NULL | 319 | #define hstate_file(f) NULL |
308 | #define hstate_vma(v) NULL | 320 | #define hstate_vma(v) NULL |
@@ -317,6 +329,7 @@ static inline unsigned int pages_per_huge_page(struct hstate *h) | |||
317 | { | 329 | { |
318 | return 1; | 330 | return 1; |
319 | } | 331 | } |
332 | #define hstate_index_to_shift(index) 0 | ||
320 | #endif | 333 | #endif |
321 | 334 | ||
322 | #endif /* _LINUX_HUGETLB_H */ | 335 | #endif /* _LINUX_HUGETLB_H */ |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 4bae0b72ed3c..1f66fa06a97c 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -384,11 +384,15 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data) | |||
384 | dev_set_drvdata(&dev->dev, data); | 384 | dev_set_drvdata(&dev->dev, data); |
385 | } | 385 | } |
386 | 386 | ||
387 | static inline int i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) | 387 | static inline struct i2c_adapter * |
388 | i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) | ||
388 | { | 389 | { |
389 | return adapter->dev.parent != NULL | 390 | struct device *parent = adapter->dev.parent; |
390 | && adapter->dev.parent->bus == &i2c_bus_type | 391 | |
391 | && adapter->dev.parent->type == &i2c_adapter_type; | 392 | if (parent != NULL && parent->type == &i2c_adapter_type) |
393 | return to_i2c_adapter(parent); | ||
394 | else | ||
395 | return NULL; | ||
392 | } | 396 | } |
393 | 397 | ||
394 | /* Adapter locking functions, exported for shared pin cases */ | 398 | /* Adapter locking functions, exported for shared pin cases */ |
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h index 6de90bfc6acd..4793d8a7f480 100644 --- a/include/linux/i2c/twl.h +++ b/include/linux/i2c/twl.h | |||
@@ -553,8 +553,12 @@ extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts); | |||
553 | extern int twl4030_remove_script(u8 flags); | 553 | extern int twl4030_remove_script(u8 flags); |
554 | 554 | ||
555 | struct twl4030_codec_audio_data { | 555 | struct twl4030_codec_audio_data { |
556 | unsigned int audio_mclk; | 556 | unsigned int audio_mclk; /* not used, will be removed */ |
557 | unsigned int digimic_delay; /* in ms */ | ||
557 | unsigned int ramp_delay_value; | 558 | unsigned int ramp_delay_value; |
559 | unsigned int offset_cncl_path; | ||
560 | unsigned int check_defaults:1; | ||
561 | unsigned int reset_registers:1; | ||
558 | unsigned int hs_extmute:1; | 562 | unsigned int hs_extmute:1; |
559 | void (*set_hs_extmute)(int mute); | 563 | void (*set_hs_extmute)(int mute); |
560 | }; | 564 | }; |
diff --git a/include/linux/idr.h b/include/linux/idr.h index e968db71e33a..928ae712709f 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h | |||
@@ -50,14 +50,14 @@ | |||
50 | 50 | ||
51 | struct idr_layer { | 51 | struct idr_layer { |
52 | unsigned long bitmap; /* A zero bit means "space here" */ | 52 | unsigned long bitmap; /* A zero bit means "space here" */ |
53 | struct idr_layer *ary[1<<IDR_BITS]; | 53 | struct idr_layer __rcu *ary[1<<IDR_BITS]; |
54 | int count; /* When zero, we can release it */ | 54 | int count; /* When zero, we can release it */ |
55 | int layer; /* distance from leaf */ | 55 | int layer; /* distance from leaf */ |
56 | struct rcu_head rcu_head; | 56 | struct rcu_head rcu_head; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct idr { | 59 | struct idr { |
60 | struct idr_layer *top; | 60 | struct idr_layer __rcu *top; |
61 | struct idr_layer *id_free; | 61 | struct idr_layer *id_free; |
62 | int layers; /* only valid without concurrent changes */ | 62 | int layers; /* only valid without concurrent changes */ |
63 | int id_free_cnt; | 63 | int id_free_cnt; |
@@ -117,10 +117,13 @@ void idr_init(struct idr *idp); | |||
117 | /* | 117 | /* |
118 | * IDA - IDR based id allocator, use when translation from id to | 118 | * IDA - IDR based id allocator, use when translation from id to |
119 | * pointer isn't necessary. | 119 | * pointer isn't necessary. |
120 | * | ||
121 | * IDA_BITMAP_LONGS is calculated to be one less to accommodate | ||
122 | * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes. | ||
120 | */ | 123 | */ |
121 | #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ | 124 | #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ |
122 | #define IDA_BITMAP_LONGS (128 / sizeof(long) - 1) | 125 | #define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1) |
123 | #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) | 126 | #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) |
124 | 127 | ||
125 | struct ida_bitmap { | 128 | struct ida_bitmap { |
126 | long nr_busy; | 129 | long nr_busy; |
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 97b2eae6a22c..ed5a03cbe184 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
@@ -986,6 +986,7 @@ struct ieee80211_ht_info { | |||
986 | #define WLAN_AUTH_OPEN 0 | 986 | #define WLAN_AUTH_OPEN 0 |
987 | #define WLAN_AUTH_SHARED_KEY 1 | 987 | #define WLAN_AUTH_SHARED_KEY 1 |
988 | #define WLAN_AUTH_FT 2 | 988 | #define WLAN_AUTH_FT 2 |
989 | #define WLAN_AUTH_SAE 3 | ||
989 | #define WLAN_AUTH_LEAP 128 | 990 | #define WLAN_AUTH_LEAP 128 |
990 | 991 | ||
991 | #define WLAN_AUTH_CHALLENGE_LEN 128 | 992 | #define WLAN_AUTH_CHALLENGE_LEN 128 |
@@ -1072,6 +1073,10 @@ enum ieee80211_statuscode { | |||
1072 | WLAN_STATUS_NO_DIRECT_LINK = 48, | 1073 | WLAN_STATUS_NO_DIRECT_LINK = 48, |
1073 | WLAN_STATUS_STA_NOT_PRESENT = 49, | 1074 | WLAN_STATUS_STA_NOT_PRESENT = 49, |
1074 | WLAN_STATUS_STA_NOT_QSTA = 50, | 1075 | WLAN_STATUS_STA_NOT_QSTA = 50, |
1076 | /* 802.11s */ | ||
1077 | WLAN_STATUS_ANTI_CLOG_REQUIRED = 76, | ||
1078 | WLAN_STATUS_FCG_NOT_SUPP = 78, | ||
1079 | WLAN_STATUS_STA_NO_TBTT = 78, | ||
1075 | }; | 1080 | }; |
1076 | 1081 | ||
1077 | 1082 | ||
@@ -1112,6 +1117,22 @@ enum ieee80211_reasoncode { | |||
1112 | WLAN_REASON_QSTA_REQUIRE_SETUP = 38, | 1117 | WLAN_REASON_QSTA_REQUIRE_SETUP = 38, |
1113 | WLAN_REASON_QSTA_TIMEOUT = 39, | 1118 | WLAN_REASON_QSTA_TIMEOUT = 39, |
1114 | WLAN_REASON_QSTA_CIPHER_NOT_SUPP = 45, | 1119 | WLAN_REASON_QSTA_CIPHER_NOT_SUPP = 45, |
1120 | /* 802.11s */ | ||
1121 | WLAN_REASON_MESH_PEER_CANCELED = 52, | ||
1122 | WLAN_REASON_MESH_MAX_PEERS = 53, | ||
1123 | WLAN_REASON_MESH_CONFIG = 54, | ||
1124 | WLAN_REASON_MESH_CLOSE = 55, | ||
1125 | WLAN_REASON_MESH_MAX_RETRIES = 56, | ||
1126 | WLAN_REASON_MESH_CONFIRM_TIMEOUT = 57, | ||
1127 | WLAN_REASON_MESH_INVALID_GTK = 58, | ||
1128 | WLAN_REASON_MESH_INCONSISTENT_PARAM = 59, | ||
1129 | WLAN_REASON_MESH_INVALID_SECURITY = 60, | ||
1130 | WLAN_REASON_MESH_PATH_ERROR = 61, | ||
1131 | WLAN_REASON_MESH_PATH_NOFORWARD = 62, | ||
1132 | WLAN_REASON_MESH_PATH_DEST_UNREACHABLE = 63, | ||
1133 | WLAN_REASON_MAC_EXISTS_IN_MBSS = 64, | ||
1134 | WLAN_REASON_MESH_CHAN_REGULATORY = 65, | ||
1135 | WLAN_REASON_MESH_CHAN = 66, | ||
1115 | }; | 1136 | }; |
1116 | 1137 | ||
1117 | 1138 | ||
@@ -1139,20 +1160,33 @@ enum ieee80211_eid { | |||
1139 | WLAN_EID_TS_DELAY = 43, | 1160 | WLAN_EID_TS_DELAY = 43, |
1140 | WLAN_EID_TCLAS_PROCESSING = 44, | 1161 | WLAN_EID_TCLAS_PROCESSING = 44, |
1141 | WLAN_EID_QOS_CAPA = 46, | 1162 | WLAN_EID_QOS_CAPA = 46, |
1142 | /* 802.11s | 1163 | /* 802.11s */ |
1143 | * | 1164 | WLAN_EID_MESH_CONFIG = 113, |
1144 | * All mesh EID numbers are pending IEEE 802.11 ANA approval. | 1165 | WLAN_EID_MESH_ID = 114, |
1145 | * The numbers have been incremented from those suggested in | 1166 | WLAN_EID_LINK_METRIC_REPORT = 115, |
1146 | * 802.11s/D2.0 so that MESH_CONFIG does not conflict with | 1167 | WLAN_EID_CONGESTION_NOTIFICATION = 116, |
1147 | * EXT_SUPP_RATES. | 1168 | /* Note that the Peer Link IE has been replaced with the similar |
1169 | * Peer Management IE. We will keep the former definition until mesh | ||
1170 | * code is changed to comply with latest 802.11s drafts. | ||
1148 | */ | 1171 | */ |
1149 | WLAN_EID_MESH_CONFIG = 51, | 1172 | WLAN_EID_PEER_LINK = 55, /* no longer in 802.11s drafts */ |
1150 | WLAN_EID_MESH_ID = 52, | 1173 | WLAN_EID_PEER_MGMT = 117, |
1151 | WLAN_EID_PEER_LINK = 55, | 1174 | WLAN_EID_CHAN_SWITCH_PARAM = 118, |
1152 | WLAN_EID_PREQ = 68, | 1175 | WLAN_EID_MESH_AWAKE_WINDOW = 119, |
1153 | WLAN_EID_PREP = 69, | 1176 | WLAN_EID_BEACON_TIMING = 120, |
1154 | WLAN_EID_PERR = 70, | 1177 | WLAN_EID_MCCAOP_SETUP_REQ = 121, |
1155 | WLAN_EID_RANN = 49, /* compatible with FreeBSD */ | 1178 | WLAN_EID_MCCAOP_SETUP_RESP = 122, |
1179 | WLAN_EID_MCCAOP_ADVERT = 123, | ||
1180 | WLAN_EID_MCCAOP_TEARDOWN = 124, | ||
1181 | WLAN_EID_GANN = 125, | ||
1182 | WLAN_EID_RANN = 126, | ||
1183 | WLAN_EID_PREQ = 130, | ||
1184 | WLAN_EID_PREP = 131, | ||
1185 | WLAN_EID_PERR = 132, | ||
1186 | WLAN_EID_PXU = 137, | ||
1187 | WLAN_EID_PXUC = 138, | ||
1188 | WLAN_EID_AUTH_MESH_PEER_EXCH = 139, | ||
1189 | WLAN_EID_MIC = 140, | ||
1156 | 1190 | ||
1157 | WLAN_EID_PWR_CONSTRAINT = 32, | 1191 | WLAN_EID_PWR_CONSTRAINT = 32, |
1158 | WLAN_EID_PWR_CAPABILITY = 33, | 1192 | WLAN_EID_PWR_CAPABILITY = 33, |
@@ -1211,9 +1245,14 @@ enum ieee80211_category { | |||
1211 | WLAN_CATEGORY_HT = 7, | 1245 | WLAN_CATEGORY_HT = 7, |
1212 | WLAN_CATEGORY_SA_QUERY = 8, | 1246 | WLAN_CATEGORY_SA_QUERY = 8, |
1213 | WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, | 1247 | WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, |
1248 | WLAN_CATEGORY_MESH_ACTION = 13, | ||
1249 | WLAN_CATEGORY_MULTIHOP_ACTION = 14, | ||
1250 | WLAN_CATEGORY_SELF_PROTECTED = 15, | ||
1214 | WLAN_CATEGORY_WMM = 17, | 1251 | WLAN_CATEGORY_WMM = 17, |
1215 | WLAN_CATEGORY_MESH_PLINK = 30, /* Pending ANA approval */ | 1252 | /* TODO: remove MESH_PLINK and MESH_PATH_SEL after */ |
1216 | WLAN_CATEGORY_MESH_PATH_SEL = 32, /* Pending ANA approval */ | 1253 | /* mesh is updated to current 802.11s draft */ |
1254 | WLAN_CATEGORY_MESH_PLINK = 30, | ||
1255 | WLAN_CATEGORY_MESH_PATH_SEL = 32, | ||
1217 | WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, | 1256 | WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, |
1218 | WLAN_CATEGORY_VENDOR_SPECIFIC = 127, | 1257 | WLAN_CATEGORY_VENDOR_SPECIFIC = 127, |
1219 | }; | 1258 | }; |
@@ -1351,6 +1390,8 @@ enum ieee80211_sa_query_action { | |||
1351 | /* AKM suite selectors */ | 1390 | /* AKM suite selectors */ |
1352 | #define WLAN_AKM_SUITE_8021X 0x000FAC01 | 1391 | #define WLAN_AKM_SUITE_8021X 0x000FAC01 |
1353 | #define WLAN_AKM_SUITE_PSK 0x000FAC02 | 1392 | #define WLAN_AKM_SUITE_PSK 0x000FAC02 |
1393 | #define WLAN_AKM_SUITE_SAE 0x000FAC08 | ||
1394 | #define WLAN_AKM_SUITE_FT_OVER_SAE 0x000FAC09 | ||
1354 | 1395 | ||
1355 | #define WLAN_MAX_KEY_LEN 32 | 1396 | #define WLAN_MAX_KEY_LEN 32 |
1356 | 1397 | ||
diff --git a/include/linux/if.h b/include/linux/if.h index 53558ec59e1b..123959927745 100644 --- a/include/linux/if.h +++ b/include/linux/if.h | |||
@@ -75,6 +75,8 @@ | |||
75 | #define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */ | 75 | #define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */ |
76 | #define IFF_MACVLAN_PORT 0x4000 /* device used as macvlan port */ | 76 | #define IFF_MACVLAN_PORT 0x4000 /* device used as macvlan port */ |
77 | #define IFF_BRIDGE_PORT 0x8000 /* device used as bridge port */ | 77 | #define IFF_BRIDGE_PORT 0x8000 /* device used as bridge port */ |
78 | #define IFF_OVS_DATAPATH 0x10000 /* device used as Open vSwitch | ||
79 | * datapath port */ | ||
78 | 80 | ||
79 | #define IF_GET_IFACE 0x0001 /* for querying only */ | 81 | #define IF_GET_IFACE 0x0001 /* for querying only */ |
80 | #define IF_GET_PROTO 0x0002 | 82 | #define IF_GET_PROTO 0x0002 |
diff --git a/include/linux/if_bonding.h b/include/linux/if_bonding.h index 2c7994372bde..a17edda8a781 100644 --- a/include/linux/if_bonding.h +++ b/include/linux/if_bonding.h | |||
@@ -84,6 +84,9 @@ | |||
84 | #define BOND_DEFAULT_MAX_BONDS 1 /* Default maximum number of devices to support */ | 84 | #define BOND_DEFAULT_MAX_BONDS 1 /* Default maximum number of devices to support */ |
85 | 85 | ||
86 | #define BOND_DEFAULT_TX_QUEUES 16 /* Default number of tx queues per device */ | 86 | #define BOND_DEFAULT_TX_QUEUES 16 /* Default number of tx queues per device */ |
87 | |||
88 | #define BOND_DEFAULT_RESEND_IGMP 1 /* Default number of IGMP membership reports */ | ||
89 | |||
87 | /* hashing types */ | 90 | /* hashing types */ |
88 | #define BOND_XMIT_POLICY_LAYER2 0 /* layer 2 (MAC only), default */ | 91 | #define BOND_XMIT_POLICY_LAYER2 0 /* layer 2 (MAC only), default */ |
89 | #define BOND_XMIT_POLICY_LAYER34 1 /* layer 3+4 (IP ^ (TCP || UDP)) */ | 92 | #define BOND_XMIT_POLICY_LAYER34 1 /* layer 3+4 (IP ^ (TCP || UDP)) */ |
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index bed7a4682b90..f9c3df03db0f 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h | |||
@@ -137,8 +137,6 @@ extern struct ctl_table ether_table[]; | |||
137 | 137 | ||
138 | extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); | 138 | extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); |
139 | 139 | ||
140 | #define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" | ||
141 | |||
142 | #endif | 140 | #endif |
143 | 141 | ||
144 | #endif /* _LINUX_IF_ETHER_H */ | 142 | #endif /* _LINUX_IF_ETHER_H */ |
diff --git a/include/linux/if_infiniband.h b/include/linux/if_infiniband.h index 3e659ec7dfdd..7d958475d4ac 100644 --- a/include/linux/if_infiniband.h +++ b/include/linux/if_infiniband.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD | 5 | * <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD |
6 | * license, available in the LICENSE.TXT file accompanying this | 6 | * license, available in the LICENSE.TXT file accompanying this |
7 | * software. These details are also available at | 7 | * software. These details are also available at |
8 | * <http://openib.org/license.html>. | 8 | * <http://www.openfabrics.org/software_license.htm>. |
9 | * | 9 | * |
10 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 10 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
11 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 11 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 35280b302290..8a2fd66a8b5f 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h | |||
@@ -40,6 +40,12 @@ struct macvlan_rx_stats { | |||
40 | unsigned long rx_errors; | 40 | unsigned long rx_errors; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | /* | ||
44 | * Maximum times a macvtap device can be opened. This can be used to | ||
45 | * configure the number of receive queue, e.g. for multiqueue virtio. | ||
46 | */ | ||
47 | #define MAX_MACVTAP_QUEUES (NR_CPUS < 16 ? NR_CPUS : 16) | ||
48 | |||
43 | struct macvlan_dev { | 49 | struct macvlan_dev { |
44 | struct net_device *dev; | 50 | struct net_device *dev; |
45 | struct list_head list; | 51 | struct list_head list; |
@@ -50,7 +56,8 @@ struct macvlan_dev { | |||
50 | enum macvlan_mode mode; | 56 | enum macvlan_mode mode; |
51 | int (*receive)(struct sk_buff *skb); | 57 | int (*receive)(struct sk_buff *skb); |
52 | int (*forward)(struct net_device *dev, struct sk_buff *skb); | 58 | int (*forward)(struct net_device *dev, struct sk_buff *skb); |
53 | struct macvtap_queue *tap; | 59 | struct macvtap_queue *taps[MAX_MACVTAP_QUEUES]; |
60 | int numvtaps; | ||
54 | }; | 61 | }; |
55 | 62 | ||
56 | static inline void macvlan_count_rx(const struct macvlan_dev *vlan, | 63 | static inline void macvlan_count_rx(const struct macvlan_dev *vlan, |
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 27741e05446f..397921b09ef9 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h | |||
@@ -40,25 +40,35 @@ | |||
40 | * PPPoE addressing definition | 40 | * PPPoE addressing definition |
41 | */ | 41 | */ |
42 | typedef __be16 sid_t; | 42 | typedef __be16 sid_t; |
43 | struct pppoe_addr{ | 43 | struct pppoe_addr { |
44 | sid_t sid; /* Session identifier */ | 44 | sid_t sid; /* Session identifier */ |
45 | unsigned char remote[ETH_ALEN]; /* Remote address */ | 45 | unsigned char remote[ETH_ALEN]; /* Remote address */ |
46 | char dev[IFNAMSIZ]; /* Local device to use */ | 46 | char dev[IFNAMSIZ]; /* Local device to use */ |
47 | }; | 47 | }; |
48 | 48 | ||
49 | /************************************************************************ | 49 | /************************************************************************ |
50 | * Protocols supported by AF_PPPOX | 50 | * PPTP addressing definition |
51 | */ | 51 | */ |
52 | struct pptp_addr { | ||
53 | __be16 call_id; | ||
54 | struct in_addr sin_addr; | ||
55 | }; | ||
56 | |||
57 | /************************************************************************ | ||
58 | * Protocols supported by AF_PPPOX | ||
59 | */ | ||
52 | #define PX_PROTO_OE 0 /* Currently just PPPoE */ | 60 | #define PX_PROTO_OE 0 /* Currently just PPPoE */ |
53 | #define PX_PROTO_OL2TP 1 /* Now L2TP also */ | 61 | #define PX_PROTO_OL2TP 1 /* Now L2TP also */ |
54 | #define PX_MAX_PROTO 2 | 62 | #define PX_PROTO_PPTP 2 |
55 | 63 | #define PX_MAX_PROTO 3 | |
56 | struct sockaddr_pppox { | 64 | |
57 | sa_family_t sa_family; /* address family, AF_PPPOX */ | 65 | struct sockaddr_pppox { |
58 | unsigned int sa_protocol; /* protocol identifier */ | 66 | sa_family_t sa_family; /* address family, AF_PPPOX */ |
59 | union{ | 67 | unsigned int sa_protocol; /* protocol identifier */ |
60 | struct pppoe_addr pppoe; | 68 | union { |
61 | }sa_addr; | 69 | struct pppoe_addr pppoe; |
70 | struct pptp_addr pptp; | ||
71 | } sa_addr; | ||
62 | } __attribute__((packed)); | 72 | } __attribute__((packed)); |
63 | 73 | ||
64 | /* The use of the above union isn't viable because the size of this | 74 | /* The use of the above union isn't viable because the size of this |
@@ -150,15 +160,23 @@ struct pppoe_opt { | |||
150 | relayed to (PPPoE relaying) */ | 160 | relayed to (PPPoE relaying) */ |
151 | }; | 161 | }; |
152 | 162 | ||
163 | struct pptp_opt { | ||
164 | struct pptp_addr src_addr; | ||
165 | struct pptp_addr dst_addr; | ||
166 | u32 ack_sent, ack_recv; | ||
167 | u32 seq_sent, seq_recv; | ||
168 | int ppp_flags; | ||
169 | }; | ||
153 | #include <net/sock.h> | 170 | #include <net/sock.h> |
154 | 171 | ||
155 | struct pppox_sock { | 172 | struct pppox_sock { |
156 | /* struct sock must be the first member of pppox_sock */ | 173 | /* struct sock must be the first member of pppox_sock */ |
157 | struct sock sk; | 174 | struct sock sk; |
158 | struct ppp_channel chan; | 175 | struct ppp_channel chan; |
159 | struct pppox_sock *next; /* for hash table */ | 176 | struct pppox_sock *next; /* for hash table */ |
160 | union { | 177 | union { |
161 | struct pppoe_opt pppoe; | 178 | struct pppoe_opt pppoe; |
179 | struct pptp_opt pptp; | ||
162 | } proto; | 180 | } proto; |
163 | __be16 num; | 181 | __be16 num; |
164 | }; | 182 | }; |
@@ -186,7 +204,7 @@ struct pppox_proto { | |||
186 | struct module *owner; | 204 | struct module *owner; |
187 | }; | 205 | }; |
188 | 206 | ||
189 | extern int register_pppox_proto(int proto_num, struct pppox_proto *pp); | 207 | extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp); |
190 | extern void unregister_pppox_proto(int proto_num); | 208 | extern void unregister_pppox_proto(int proto_num); |
191 | extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */ | 209 | extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */ |
192 | extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); | 210 | extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); |
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 3d870fda8c4f..c2f3a72712ce 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #ifdef __KERNEL__ | 16 | #ifdef __KERNEL__ |
17 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
18 | #include <linux/etherdevice.h> | 18 | #include <linux/etherdevice.h> |
19 | #include <linux/rtnetlink.h> | ||
19 | 20 | ||
20 | #define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header) | 21 | #define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header) |
21 | * that VLAN requires. | 22 | * that VLAN requires. |
@@ -68,6 +69,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) | |||
68 | #define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ | 69 | #define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ |
69 | #define VLAN_TAG_PRESENT VLAN_CFI_MASK | 70 | #define VLAN_TAG_PRESENT VLAN_CFI_MASK |
70 | #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ | 71 | #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ |
72 | #define VLAN_N_VID 4096 | ||
71 | 73 | ||
72 | /* found in socket.c */ | 74 | /* found in socket.c */ |
73 | extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); | 75 | extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); |
@@ -76,9 +78,8 @@ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); | |||
76 | * depends on completely exhausting the VLAN identifier space. Thus | 78 | * depends on completely exhausting the VLAN identifier space. Thus |
77 | * it gives constant time look-up, but in many cases it wastes memory. | 79 | * it gives constant time look-up, but in many cases it wastes memory. |
78 | */ | 80 | */ |
79 | #define VLAN_GROUP_ARRAY_LEN 4096 | ||
80 | #define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 | 81 | #define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 |
81 | #define VLAN_GROUP_ARRAY_PART_LEN (VLAN_GROUP_ARRAY_LEN/VLAN_GROUP_ARRAY_SPLIT_PARTS) | 82 | #define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS) |
82 | 83 | ||
83 | struct vlan_group { | 84 | struct vlan_group { |
84 | struct net_device *real_dev; /* The ethernet(like) device | 85 | struct net_device *real_dev; /* The ethernet(like) device |
@@ -114,12 +115,24 @@ static inline void vlan_group_set_device(struct vlan_group *vg, | |||
114 | #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) | 115 | #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) |
115 | 116 | ||
116 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 117 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
118 | /* Must be invoked with rcu_read_lock or with RTNL. */ | ||
119 | static inline struct net_device *vlan_find_dev(struct net_device *real_dev, | ||
120 | u16 vlan_id) | ||
121 | { | ||
122 | struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp); | ||
123 | |||
124 | if (grp) | ||
125 | return vlan_group_get_device(grp, vlan_id); | ||
126 | |||
127 | return NULL; | ||
128 | } | ||
129 | |||
117 | extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); | 130 | extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); |
118 | extern u16 vlan_dev_vlan_id(const struct net_device *dev); | 131 | extern u16 vlan_dev_vlan_id(const struct net_device *dev); |
119 | 132 | ||
120 | extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | 133 | extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, |
121 | u16 vlan_tci, int polling); | 134 | u16 vlan_tci, int polling); |
122 | extern int vlan_hwaccel_do_receive(struct sk_buff *skb); | 135 | extern bool vlan_hwaccel_do_receive(struct sk_buff **skb); |
123 | extern gro_result_t | 136 | extern gro_result_t |
124 | vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, | 137 | vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, |
125 | unsigned int vlan_tci, struct sk_buff *skb); | 138 | unsigned int vlan_tci, struct sk_buff *skb); |
@@ -128,6 +141,12 @@ vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, | |||
128 | unsigned int vlan_tci); | 141 | unsigned int vlan_tci); |
129 | 142 | ||
130 | #else | 143 | #else |
144 | static inline struct net_device *vlan_find_dev(struct net_device *real_dev, | ||
145 | u16 vlan_id) | ||
146 | { | ||
147 | return NULL; | ||
148 | } | ||
149 | |||
131 | static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) | 150 | static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) |
132 | { | 151 | { |
133 | BUG(); | 152 | BUG(); |
@@ -147,9 +166,11 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | |||
147 | return NET_XMIT_SUCCESS; | 166 | return NET_XMIT_SUCCESS; |
148 | } | 167 | } |
149 | 168 | ||
150 | static inline int vlan_hwaccel_do_receive(struct sk_buff *skb) | 169 | static inline bool vlan_hwaccel_do_receive(struct sk_buff **skb) |
151 | { | 170 | { |
152 | return 0; | 171 | if ((*skb)->vlan_tci & VLAN_VID_MASK) |
172 | (*skb)->pkt_type = PACKET_OTHERHOST; | ||
173 | return false; | ||
153 | } | 174 | } |
154 | 175 | ||
155 | static inline gro_result_t | 176 | static inline gro_result_t |
diff --git a/include/linux/in.h b/include/linux/in.h index 41d88a4689af..beeb6dee2b49 100644 --- a/include/linux/in.h +++ b/include/linux/in.h | |||
@@ -250,6 +250,25 @@ struct sockaddr_in { | |||
250 | 250 | ||
251 | #ifdef __KERNEL__ | 251 | #ifdef __KERNEL__ |
252 | 252 | ||
253 | #include <linux/errno.h> | ||
254 | |||
255 | static inline int proto_ports_offset(int proto) | ||
256 | { | ||
257 | switch (proto) { | ||
258 | case IPPROTO_TCP: | ||
259 | case IPPROTO_UDP: | ||
260 | case IPPROTO_DCCP: | ||
261 | case IPPROTO_ESP: /* SPI */ | ||
262 | case IPPROTO_SCTP: | ||
263 | case IPPROTO_UDPLITE: | ||
264 | return 0; | ||
265 | case IPPROTO_AH: /* SPI */ | ||
266 | return 4; | ||
267 | default: | ||
268 | return -EINVAL; | ||
269 | } | ||
270 | } | ||
271 | |||
253 | static inline bool ipv4_is_loopback(__be32 addr) | 272 | static inline bool ipv4_is_loopback(__be32 addr) |
254 | { | 273 | { |
255 | return (addr & htonl(0xff000000)) == htonl(0x7f000000); | 274 | return (addr & htonl(0xff000000)) == htonl(0x7f000000); |
diff --git a/include/linux/in6.h b/include/linux/in6.h index c4bf46f764bf..097a34b55560 100644 --- a/include/linux/in6.h +++ b/include/linux/in6.h | |||
@@ -268,6 +268,10 @@ struct in6_flowlabel_req { | |||
268 | /* RFC5082: Generalized Ttl Security Mechanism */ | 268 | /* RFC5082: Generalized Ttl Security Mechanism */ |
269 | #define IPV6_MINHOPCOUNT 73 | 269 | #define IPV6_MINHOPCOUNT 73 |
270 | 270 | ||
271 | #define IPV6_ORIGDSTADDR 74 | ||
272 | #define IPV6_RECVORIGDSTADDR IPV6_ORIGDSTADDR | ||
273 | #define IPV6_TRANSPARENT 75 | ||
274 | |||
271 | /* | 275 | /* |
272 | * Multicast Routing: | 276 | * Multicast Routing: |
273 | * see include/linux/mroute6.h. | 277 | * see include/linux/mroute6.h. |
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 2be1a1a2beb9..ccd5b07d678d 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/rcupdate.h> | 9 | #include <linux/rcupdate.h> |
10 | #include <linux/timer.h> | 10 | #include <linux/timer.h> |
11 | #include <linux/sysctl.h> | 11 | #include <linux/sysctl.h> |
12 | #include <linux/rtnetlink.h> | ||
12 | 13 | ||
13 | enum | 14 | enum |
14 | { | 15 | { |
@@ -158,7 +159,12 @@ struct in_ifaddr { | |||
158 | extern int register_inetaddr_notifier(struct notifier_block *nb); | 159 | extern int register_inetaddr_notifier(struct notifier_block *nb); |
159 | extern int unregister_inetaddr_notifier(struct notifier_block *nb); | 160 | extern int unregister_inetaddr_notifier(struct notifier_block *nb); |
160 | 161 | ||
161 | extern struct net_device *ip_dev_find(struct net *net, __be32 addr); | 162 | extern struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref); |
163 | static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) | ||
164 | { | ||
165 | return __ip_dev_find(net, addr, true); | ||
166 | } | ||
167 | |||
162 | extern int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); | 168 | extern int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); |
163 | extern int devinet_ioctl(struct net *net, unsigned int cmd, void __user *); | 169 | extern int devinet_ioctl(struct net *net, unsigned int cmd, void __user *); |
164 | extern void devinet_init(void); | 170 | extern void devinet_init(void); |
@@ -198,14 +204,10 @@ static __inline__ int bad_mask(__be32 mask, __be32 addr) | |||
198 | 204 | ||
199 | static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) | 205 | static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) |
200 | { | 206 | { |
201 | struct in_device *in_dev = dev->ip_ptr; | 207 | return rcu_dereference(dev->ip_ptr); |
202 | if (in_dev) | ||
203 | in_dev = rcu_dereference(in_dev); | ||
204 | return in_dev; | ||
205 | } | 208 | } |
206 | 209 | ||
207 | static __inline__ struct in_device * | 210 | static inline struct in_device *in_dev_get(const struct net_device *dev) |
208 | in_dev_get(const struct net_device *dev) | ||
209 | { | 211 | { |
210 | struct in_device *in_dev; | 212 | struct in_device *in_dev; |
211 | 213 | ||
@@ -217,10 +219,9 @@ in_dev_get(const struct net_device *dev) | |||
217 | return in_dev; | 219 | return in_dev; |
218 | } | 220 | } |
219 | 221 | ||
220 | static __inline__ struct in_device * | 222 | static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev) |
221 | __in_dev_get_rtnl(const struct net_device *dev) | ||
222 | { | 223 | { |
223 | return (struct in_device*)dev->ip_ptr; | 224 | return rcu_dereference_check(dev->ip_ptr, lockdep_rtnl_is_held()); |
224 | } | 225 | } |
225 | 226 | ||
226 | extern void in_dev_finish_destroy(struct in_device *idev); | 227 | extern void in_dev_finish_destroy(struct in_device *idev); |
diff --git a/include/linux/init.h b/include/linux/init.h index de994304e0bb..577671c55153 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
@@ -46,16 +46,23 @@ | |||
46 | #define __exitdata __section(.exit.data) | 46 | #define __exitdata __section(.exit.data) |
47 | #define __exit_call __used __section(.exitcall.exit) | 47 | #define __exit_call __used __section(.exitcall.exit) |
48 | 48 | ||
49 | /* modpost check for section mismatches during the kernel build. | 49 | /* |
50 | * modpost check for section mismatches during the kernel build. | ||
50 | * A section mismatch happens when there are references from a | 51 | * A section mismatch happens when there are references from a |
51 | * code or data section to an init section (both code or data). | 52 | * code or data section to an init section (both code or data). |
52 | * The init sections are (for most archs) discarded by the kernel | 53 | * The init sections are (for most archs) discarded by the kernel |
53 | * when early init has completed so all such references are potential bugs. | 54 | * when early init has completed so all such references are potential bugs. |
54 | * For exit sections the same issue exists. | 55 | * For exit sections the same issue exists. |
56 | * | ||
55 | * The following markers are used for the cases where the reference to | 57 | * The following markers are used for the cases where the reference to |
56 | * the *init / *exit section (code or data) is valid and will teach | 58 | * the *init / *exit section (code or data) is valid and will teach |
57 | * modpost not to issue a warning. | 59 | * modpost not to issue a warning. Intended semantics is that a code or |
58 | * The markers follow same syntax rules as __init / __initdata. */ | 60 | * data tagged __ref* can reference code or data from init section without |
61 | * producing a warning (of course, no warning does not mean code is | ||
62 | * correct, so optimally document why the __ref is needed and why it's OK). | ||
63 | * | ||
64 | * The markers follow same syntax rules as __init / __initdata. | ||
65 | */ | ||
59 | #define __ref __section(.ref.text) noinline | 66 | #define __ref __section(.ref.text) noinline |
60 | #define __refdata __section(.ref.data) | 67 | #define __refdata __section(.ref.data) |
61 | #define __refconst __section(.ref.rodata) | 68 | #define __refconst __section(.ref.rodata) |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 1f43fa56f600..2fea6c8ef6ba 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -82,11 +82,17 @@ extern struct group_info init_groups; | |||
82 | # define CAP_INIT_BSET CAP_FULL_SET | 82 | # define CAP_INIT_BSET CAP_FULL_SET |
83 | 83 | ||
84 | #ifdef CONFIG_TREE_PREEMPT_RCU | 84 | #ifdef CONFIG_TREE_PREEMPT_RCU |
85 | #define INIT_TASK_RCU_TREE_PREEMPT() \ | ||
86 | .rcu_blocked_node = NULL, | ||
87 | #else | ||
88 | #define INIT_TASK_RCU_TREE_PREEMPT(tsk) | ||
89 | #endif | ||
90 | #ifdef CONFIG_PREEMPT_RCU | ||
85 | #define INIT_TASK_RCU_PREEMPT(tsk) \ | 91 | #define INIT_TASK_RCU_PREEMPT(tsk) \ |
86 | .rcu_read_lock_nesting = 0, \ | 92 | .rcu_read_lock_nesting = 0, \ |
87 | .rcu_read_unlock_special = 0, \ | 93 | .rcu_read_unlock_special = 0, \ |
88 | .rcu_blocked_node = NULL, \ | 94 | .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ |
89 | .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), | 95 | INIT_TASK_RCU_TREE_PREEMPT() |
90 | #else | 96 | #else |
91 | #define INIT_TASK_RCU_PREEMPT(tsk) | 97 | #define INIT_TASK_RCU_PREEMPT(tsk) |
92 | #endif | 98 | #endif |
@@ -137,8 +143,8 @@ extern struct cred init_cred; | |||
137 | .children = LIST_HEAD_INIT(tsk.children), \ | 143 | .children = LIST_HEAD_INIT(tsk.children), \ |
138 | .sibling = LIST_HEAD_INIT(tsk.sibling), \ | 144 | .sibling = LIST_HEAD_INIT(tsk.sibling), \ |
139 | .group_leader = &tsk, \ | 145 | .group_leader = &tsk, \ |
140 | .real_cred = &init_cred, \ | 146 | RCU_INIT_POINTER(.real_cred, &init_cred), \ |
141 | .cred = &init_cred, \ | 147 | RCU_INIT_POINTER(.cred, &init_cred), \ |
142 | .cred_guard_mutex = \ | 148 | .cred_guard_mutex = \ |
143 | __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \ | 149 | __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \ |
144 | .comm = "swapper", \ | 150 | .comm = "swapper", \ |
diff --git a/include/linux/input.h b/include/linux/input.h index 896a92227bc4..51af441f3a21 100644 --- a/include/linux/input.h +++ b/include/linux/input.h | |||
@@ -34,7 +34,7 @@ struct input_event { | |||
34 | * Protocol version. | 34 | * Protocol version. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #define EV_VERSION 0x010000 | 37 | #define EV_VERSION 0x010001 |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * IOCTLs (0x00 - 0x7f) | 40 | * IOCTLs (0x00 - 0x7f) |
@@ -56,25 +56,50 @@ struct input_absinfo { | |||
56 | __s32 resolution; | 56 | __s32 resolution; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | /** | ||
60 | * struct input_keymap_entry - used by EVIOCGKEYCODE/EVIOCSKEYCODE ioctls | ||
61 | * @scancode: scancode represented in machine-endian form. | ||
62 | * @len: length of the scancode that resides in @scancode buffer. | ||
63 | * @index: index in the keymap, may be used instead of scancode | ||
64 | * @flags: allows to specify how kernel should handle the request. For | ||
65 | * example, setting INPUT_KEYMAP_BY_INDEX flag indicates that kernel | ||
66 | * should perform lookup in keymap by @index instead of @scancode | ||
67 | * @keycode: key code assigned to this scancode | ||
68 | * | ||
69 | * The structure is used to retrieve and modify keymap data. Users have | ||
70 | * option of performing lookup either by @scancode itself or by @index | ||
71 | * in keymap entry. EVIOCGKEYCODE will also return scancode or index | ||
72 | * (depending on which element was used to perform lookup). | ||
73 | */ | ||
74 | struct input_keymap_entry { | ||
75 | #define INPUT_KEYMAP_BY_INDEX (1 << 0) | ||
76 | __u8 flags; | ||
77 | __u8 len; | ||
78 | __u16 index; | ||
79 | __u32 keycode; | ||
80 | __u8 scancode[32]; | ||
81 | }; | ||
82 | |||
59 | #define EVIOCGVERSION _IOR('E', 0x01, int) /* get driver version */ | 83 | #define EVIOCGVERSION _IOR('E', 0x01, int) /* get driver version */ |
60 | #define EVIOCGID _IOR('E', 0x02, struct input_id) /* get device ID */ | 84 | #define EVIOCGID _IOR('E', 0x02, struct input_id) /* get device ID */ |
61 | #define EVIOCGREP _IOR('E', 0x03, unsigned int[2]) /* get repeat settings */ | 85 | #define EVIOCGREP _IOR('E', 0x03, unsigned int[2]) /* get repeat settings */ |
62 | #define EVIOCSREP _IOW('E', 0x03, unsigned int[2]) /* set repeat settings */ | 86 | #define EVIOCSREP _IOW('E', 0x03, unsigned int[2]) /* set repeat settings */ |
63 | #define EVIOCGKEYCODE _IOR('E', 0x04, unsigned int[2]) /* get keycode */ | 87 | |
64 | #define EVIOCSKEYCODE _IOW('E', 0x04, unsigned int[2]) /* set keycode */ | 88 | #define EVIOCGKEYCODE _IOR('E', 0x04, struct input_keymap_entry) /* get keycode */ |
89 | #define EVIOCSKEYCODE _IOW('E', 0x04, struct input_keymap_entry) /* set keycode */ | ||
65 | 90 | ||
66 | #define EVIOCGNAME(len) _IOC(_IOC_READ, 'E', 0x06, len) /* get device name */ | 91 | #define EVIOCGNAME(len) _IOC(_IOC_READ, 'E', 0x06, len) /* get device name */ |
67 | #define EVIOCGPHYS(len) _IOC(_IOC_READ, 'E', 0x07, len) /* get physical location */ | 92 | #define EVIOCGPHYS(len) _IOC(_IOC_READ, 'E', 0x07, len) /* get physical location */ |
68 | #define EVIOCGUNIQ(len) _IOC(_IOC_READ, 'E', 0x08, len) /* get unique identifier */ | 93 | #define EVIOCGUNIQ(len) _IOC(_IOC_READ, 'E', 0x08, len) /* get unique identifier */ |
69 | 94 | ||
70 | #define EVIOCGKEY(len) _IOC(_IOC_READ, 'E', 0x18, len) /* get global keystate */ | 95 | #define EVIOCGKEY(len) _IOC(_IOC_READ, 'E', 0x18, len) /* get global key state */ |
71 | #define EVIOCGLED(len) _IOC(_IOC_READ, 'E', 0x19, len) /* get all LEDs */ | 96 | #define EVIOCGLED(len) _IOC(_IOC_READ, 'E', 0x19, len) /* get all LEDs */ |
72 | #define EVIOCGSND(len) _IOC(_IOC_READ, 'E', 0x1a, len) /* get all sounds status */ | 97 | #define EVIOCGSND(len) _IOC(_IOC_READ, 'E', 0x1a, len) /* get all sounds status */ |
73 | #define EVIOCGSW(len) _IOC(_IOC_READ, 'E', 0x1b, len) /* get all switch states */ | 98 | #define EVIOCGSW(len) _IOC(_IOC_READ, 'E', 0x1b, len) /* get all switch states */ |
74 | 99 | ||
75 | #define EVIOCGBIT(ev,len) _IOC(_IOC_READ, 'E', 0x20 + ev, len) /* get event bits */ | 100 | #define EVIOCGBIT(ev,len) _IOC(_IOC_READ, 'E', 0x20 + ev, len) /* get event bits */ |
76 | #define EVIOCGABS(abs) _IOR('E', 0x40 + abs, struct input_absinfo) /* get abs value/limits */ | 101 | #define EVIOCGABS(abs) _IOR('E', 0x40 + abs, struct input_absinfo) /* get abs value/limits */ |
77 | #define EVIOCSABS(abs) _IOW('E', 0xc0 + abs, struct input_absinfo) /* set abs value/limits */ | 102 | #define EVIOCSABS(abs) _IOW('E', 0xc0 + abs, struct input_absinfo) /* set abs value/limits */ |
78 | 103 | ||
79 | #define EVIOCSFF _IOC(_IOC_WRITE, 'E', 0x80, sizeof(struct ff_effect)) /* send a force effect to a force feedback device */ | 104 | #define EVIOCSFF _IOC(_IOC_WRITE, 'E', 0x80, sizeof(struct ff_effect)) /* send a force effect to a force feedback device */ |
80 | #define EVIOCRMFF _IOW('E', 0x81, int) /* Erase a force effect */ | 105 | #define EVIOCRMFF _IOW('E', 0x81, int) /* Erase a force effect */ |
@@ -1088,13 +1113,13 @@ struct input_mt_slot { | |||
1088 | * @keycodemax: size of keycode table | 1113 | * @keycodemax: size of keycode table |
1089 | * @keycodesize: size of elements in keycode table | 1114 | * @keycodesize: size of elements in keycode table |
1090 | * @keycode: map of scancodes to keycodes for this device | 1115 | * @keycode: map of scancodes to keycodes for this device |
1116 | * @getkeycode: optional legacy method to retrieve current keymap. | ||
1091 | * @setkeycode: optional method to alter current keymap, used to implement | 1117 | * @setkeycode: optional method to alter current keymap, used to implement |
1092 | * sparse keymaps. If not supplied default mechanism will be used. | 1118 | * sparse keymaps. If not supplied default mechanism will be used. |
1093 | * The method is being called while holding event_lock and thus must | 1119 | * The method is being called while holding event_lock and thus must |
1094 | * not sleep | 1120 | * not sleep |
1095 | * @getkeycode: optional method to retrieve current keymap. If not supplied | 1121 | * @getkeycode_new: transition method |
1096 | * default mechanism will be used. The method is being called while | 1122 | * @setkeycode_new: transition method |
1097 | * holding event_lock and thus must not sleep | ||
1098 | * @ff: force feedback structure associated with the device if device | 1123 | * @ff: force feedback structure associated with the device if device |
1099 | * supports force feedback effects | 1124 | * supports force feedback effects |
1100 | * @repeat_key: stores key code of the last key pressed; used to implement | 1125 | * @repeat_key: stores key code of the last key pressed; used to implement |
@@ -1168,10 +1193,16 @@ struct input_dev { | |||
1168 | unsigned int keycodemax; | 1193 | unsigned int keycodemax; |
1169 | unsigned int keycodesize; | 1194 | unsigned int keycodesize; |
1170 | void *keycode; | 1195 | void *keycode; |
1196 | |||
1171 | int (*setkeycode)(struct input_dev *dev, | 1197 | int (*setkeycode)(struct input_dev *dev, |
1172 | unsigned int scancode, unsigned int keycode); | 1198 | unsigned int scancode, unsigned int keycode); |
1173 | int (*getkeycode)(struct input_dev *dev, | 1199 | int (*getkeycode)(struct input_dev *dev, |
1174 | unsigned int scancode, unsigned int *keycode); | 1200 | unsigned int scancode, unsigned int *keycode); |
1201 | int (*setkeycode_new)(struct input_dev *dev, | ||
1202 | const struct input_keymap_entry *ke, | ||
1203 | unsigned int *old_keycode); | ||
1204 | int (*getkeycode_new)(struct input_dev *dev, | ||
1205 | struct input_keymap_entry *ke); | ||
1175 | 1206 | ||
1176 | struct ff_device *ff; | 1207 | struct ff_device *ff; |
1177 | 1208 | ||
@@ -1196,7 +1227,7 @@ struct input_dev { | |||
1196 | int (*flush)(struct input_dev *dev, struct file *file); | 1227 | int (*flush)(struct input_dev *dev, struct file *file); |
1197 | int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); | 1228 | int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); |
1198 | 1229 | ||
1199 | struct input_handle *grab; | 1230 | struct input_handle __rcu *grab; |
1200 | 1231 | ||
1201 | spinlock_t event_lock; | 1232 | spinlock_t event_lock; |
1202 | struct mutex mutex; | 1233 | struct mutex mutex; |
@@ -1478,10 +1509,12 @@ INPUT_GENERATE_ABS_ACCESSORS(fuzz, fuzz) | |||
1478 | INPUT_GENERATE_ABS_ACCESSORS(flat, flat) | 1509 | INPUT_GENERATE_ABS_ACCESSORS(flat, flat) |
1479 | INPUT_GENERATE_ABS_ACCESSORS(res, resolution) | 1510 | INPUT_GENERATE_ABS_ACCESSORS(res, resolution) |
1480 | 1511 | ||
1481 | int input_get_keycode(struct input_dev *dev, | 1512 | int input_scancode_to_scalar(const struct input_keymap_entry *ke, |
1482 | unsigned int scancode, unsigned int *keycode); | 1513 | unsigned int *scancode); |
1514 | |||
1515 | int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke); | ||
1483 | int input_set_keycode(struct input_dev *dev, | 1516 | int input_set_keycode(struct input_dev *dev, |
1484 | unsigned int scancode, unsigned int keycode); | 1517 | const struct input_keymap_entry *ke); |
1485 | 1518 | ||
1486 | extern struct class input_class; | 1519 | extern struct class input_class; |
1487 | 1520 | ||
diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h new file mode 100644 index 000000000000..e470d387dd49 --- /dev/null +++ b/include/linux/input/bu21013.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson SA 2010 | ||
3 | * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson | ||
4 | * License terms:GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #ifndef _BU21013_H | ||
8 | #define _BU21013_H | ||
9 | |||
10 | /** | ||
11 | * struct bu21013_platform_device - Handle the platform data | ||
12 | * @cs_en: pointer to the cs enable function | ||
13 | * @cs_dis: pointer to the cs disable function | ||
14 | * @irq_read_val: pointer to read the pen irq value function | ||
15 | * @x_max_res: xmax resolution | ||
16 | * @y_max_res: ymax resolution | ||
17 | * @touch_x_max: touch x max | ||
18 | * @touch_y_max: touch y max | ||
19 | * @cs_pin: chip select pin | ||
20 | * @irq: irq pin | ||
21 | * @ext_clk: external clock flag | ||
22 | * @x_flip: x flip flag | ||
23 | * @y_flip: y flip flag | ||
24 | * @wakeup: wakeup flag | ||
25 | * | ||
26 | * This is used to handle the platform data | ||
27 | */ | ||
28 | struct bu21013_platform_device { | ||
29 | int (*cs_en)(int reset_pin); | ||
30 | int (*cs_dis)(int reset_pin); | ||
31 | int (*irq_read_val)(void); | ||
32 | int x_max_res; | ||
33 | int y_max_res; | ||
34 | int touch_x_max; | ||
35 | int touch_y_max; | ||
36 | unsigned int cs_pin; | ||
37 | unsigned int irq; | ||
38 | bool ext_clk; | ||
39 | bool x_flip; | ||
40 | bool y_flip; | ||
41 | bool wakeup; | ||
42 | }; | ||
43 | |||
44 | #endif | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index a0384a4d1e6f..01b281646251 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/atomic.h> | 18 | #include <asm/atomic.h> |
19 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
20 | #include <asm/system.h> | 20 | #include <asm/system.h> |
21 | #include <trace/events/irq.h> | ||
21 | 22 | ||
22 | /* | 23 | /* |
23 | * These correspond to the IORESOURCE_IRQ_* defines in | 24 | * These correspond to the IORESOURCE_IRQ_* defines in |
@@ -407,10 +408,14 @@ asmlinkage void do_softirq(void); | |||
407 | asmlinkage void __do_softirq(void); | 408 | asmlinkage void __do_softirq(void); |
408 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); | 409 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); |
409 | extern void softirq_init(void); | 410 | extern void softirq_init(void); |
410 | #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) | 411 | static inline void __raise_softirq_irqoff(unsigned int nr) |
412 | { | ||
413 | trace_softirq_raise((struct softirq_action *)(unsigned long)nr, NULL); | ||
414 | or_softirq_pending(1UL << nr); | ||
415 | } | ||
416 | |||
411 | extern void raise_softirq_irqoff(unsigned int nr); | 417 | extern void raise_softirq_irqoff(unsigned int nr); |
412 | extern void raise_softirq(unsigned int nr); | 418 | extern void raise_softirq(unsigned int nr); |
413 | extern void wakeup_softirqd(void); | ||
414 | 419 | ||
415 | /* This is the worklist that queues up per-cpu softirq work. | 420 | /* This is the worklist that queues up per-cpu softirq work. |
416 | * | 421 | * |
@@ -641,11 +646,8 @@ static inline void init_irq_proc(void) | |||
641 | struct seq_file; | 646 | struct seq_file; |
642 | int show_interrupts(struct seq_file *p, void *v); | 647 | int show_interrupts(struct seq_file *p, void *v); |
643 | 648 | ||
644 | struct irq_desc; | ||
645 | |||
646 | extern int early_irq_init(void); | 649 | extern int early_irq_init(void); |
647 | extern int arch_probe_nr_irqs(void); | 650 | extern int arch_probe_nr_irqs(void); |
648 | extern int arch_early_irq_init(void); | 651 | extern int arch_early_irq_init(void); |
649 | extern int arch_init_chip_data(struct irq_desc *desc, int node); | ||
650 | 652 | ||
651 | #endif | 653 | #endif |
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 64d529133031..3e70b21884a9 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h | |||
@@ -53,7 +53,7 @@ struct io_context { | |||
53 | 53 | ||
54 | struct radix_tree_root radix_root; | 54 | struct radix_tree_root radix_root; |
55 | struct hlist_head cic_list; | 55 | struct hlist_head cic_list; |
56 | void *ioc_data; | 56 | void __rcu *ioc_data; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static inline struct io_context *ioc_task_link(struct io_context *ioc) | 59 | static inline struct io_context *ioc_task_link(struct io_context *ioc) |
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h index 9708de265bb1..5f43a3b2e3ad 100644 --- a/include/linux/ip_vs.h +++ b/include/linux/ip_vs.h | |||
@@ -70,6 +70,7 @@ | |||
70 | 70 | ||
71 | /* | 71 | /* |
72 | * IPVS Connection Flags | 72 | * IPVS Connection Flags |
73 | * Only flags 0..15 are sent to backup server | ||
73 | */ | 74 | */ |
74 | #define IP_VS_CONN_F_FWD_MASK 0x0007 /* mask for the fwd methods */ | 75 | #define IP_VS_CONN_F_FWD_MASK 0x0007 /* mask for the fwd methods */ |
75 | #define IP_VS_CONN_F_MASQ 0x0000 /* masquerading/NAT */ | 76 | #define IP_VS_CONN_F_MASQ 0x0000 /* masquerading/NAT */ |
@@ -88,9 +89,20 @@ | |||
88 | #define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ | 89 | #define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ |
89 | #define IP_VS_CONN_F_ONE_PACKET 0x2000 /* forward only one packet */ | 90 | #define IP_VS_CONN_F_ONE_PACKET 0x2000 /* forward only one packet */ |
90 | 91 | ||
92 | /* Flags that are not sent to backup server start from bit 16 */ | ||
93 | #define IP_VS_CONN_F_NFCT (1 << 16) /* use netfilter conntrack */ | ||
94 | |||
95 | /* Connection flags from destination that can be changed by user space */ | ||
96 | #define IP_VS_CONN_F_DEST_MASK (IP_VS_CONN_F_FWD_MASK | \ | ||
97 | IP_VS_CONN_F_ONE_PACKET | \ | ||
98 | IP_VS_CONN_F_NFCT | \ | ||
99 | 0) | ||
100 | |||
91 | #define IP_VS_SCHEDNAME_MAXLEN 16 | 101 | #define IP_VS_SCHEDNAME_MAXLEN 16 |
102 | #define IP_VS_PENAME_MAXLEN 16 | ||
92 | #define IP_VS_IFNAME_MAXLEN 16 | 103 | #define IP_VS_IFNAME_MAXLEN 16 |
93 | 104 | ||
105 | #define IP_VS_PEDATA_MAXLEN 255 | ||
94 | 106 | ||
95 | /* | 107 | /* |
96 | * The struct ip_vs_service_user and struct ip_vs_dest_user are | 108 | * The struct ip_vs_service_user and struct ip_vs_dest_user are |
@@ -324,6 +336,9 @@ enum { | |||
324 | IPVS_SVC_ATTR_NETMASK, /* persistent netmask */ | 336 | IPVS_SVC_ATTR_NETMASK, /* persistent netmask */ |
325 | 337 | ||
326 | IPVS_SVC_ATTR_STATS, /* nested attribute for service stats */ | 338 | IPVS_SVC_ATTR_STATS, /* nested attribute for service stats */ |
339 | |||
340 | IPVS_SVC_ATTR_PE_NAME, /* name of ct retriever */ | ||
341 | |||
327 | __IPVS_SVC_ATTR_MAX, | 342 | __IPVS_SVC_ATTR_MAX, |
328 | }; | 343 | }; |
329 | 344 | ||
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index e62683ba88e6..8e429d0e0405 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -341,7 +341,9 @@ struct ipv6_pinfo { | |||
341 | odstopts:1, | 341 | odstopts:1, |
342 | rxflow:1, | 342 | rxflow:1, |
343 | rxtclass:1, | 343 | rxtclass:1, |
344 | rxpmtu:1; | 344 | rxpmtu:1, |
345 | rxorigdstaddr:1; | ||
346 | /* 2 bits hole */ | ||
345 | } bits; | 347 | } bits; |
346 | __u16 all; | 348 | __u16 all; |
347 | } rxopt; | 349 | } rxopt; |
diff --git a/include/linux/irq.h b/include/linux/irq.h index c03243ad84b4..e9639115dff1 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -72,6 +72,10 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
72 | #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ | 72 | #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ |
73 | #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ | 73 | #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ |
74 | 74 | ||
75 | #define IRQF_MODIFY_MASK \ | ||
76 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ | ||
77 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL) | ||
78 | |||
75 | #ifdef CONFIG_IRQ_PER_CPU | 79 | #ifdef CONFIG_IRQ_PER_CPU |
76 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | 80 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) |
77 | # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) | 81 | # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
@@ -80,36 +84,77 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
80 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING | 84 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING |
81 | #endif | 85 | #endif |
82 | 86 | ||
83 | struct proc_dir_entry; | ||
84 | struct msi_desc; | 87 | struct msi_desc; |
85 | 88 | ||
86 | /** | 89 | /** |
90 | * struct irq_data - per irq and irq chip data passed down to chip functions | ||
91 | * @irq: interrupt number | ||
92 | * @node: node index useful for balancing | ||
93 | * @chip: low level interrupt hardware access | ||
94 | * @handler_data: per-IRQ data for the irq_chip methods | ||
95 | * @chip_data: platform-specific per-chip private data for the chip | ||
96 | * methods, to allow shared chip implementations | ||
97 | * @msi_desc: MSI descriptor | ||
98 | * @affinity: IRQ affinity on SMP | ||
99 | * | ||
100 | * The fields here need to overlay the ones in irq_desc until we | ||
101 | * cleaned up the direct references and switched everything over to | ||
102 | * irq_data. | ||
103 | */ | ||
104 | struct irq_data { | ||
105 | unsigned int irq; | ||
106 | unsigned int node; | ||
107 | struct irq_chip *chip; | ||
108 | void *handler_data; | ||
109 | void *chip_data; | ||
110 | struct msi_desc *msi_desc; | ||
111 | #ifdef CONFIG_SMP | ||
112 | cpumask_var_t affinity; | ||
113 | #endif | ||
114 | }; | ||
115 | |||
116 | /** | ||
87 | * struct irq_chip - hardware interrupt chip descriptor | 117 | * struct irq_chip - hardware interrupt chip descriptor |
88 | * | 118 | * |
89 | * @name: name for /proc/interrupts | 119 | * @name: name for /proc/interrupts |
90 | * @startup: start up the interrupt (defaults to ->enable if NULL) | 120 | * @startup: deprecated, replaced by irq_startup |
91 | * @shutdown: shut down the interrupt (defaults to ->disable if NULL) | 121 | * @shutdown: deprecated, replaced by irq_shutdown |
92 | * @enable: enable the interrupt (defaults to chip->unmask if NULL) | 122 | * @enable: deprecated, replaced by irq_enable |
93 | * @disable: disable the interrupt | 123 | * @disable: deprecated, replaced by irq_disable |
94 | * @ack: start of a new interrupt | 124 | * @ack: deprecated, replaced by irq_ack |
95 | * @mask: mask an interrupt source | 125 | * @mask: deprecated, replaced by irq_mask |
96 | * @mask_ack: ack and mask an interrupt source | 126 | * @mask_ack: deprecated, replaced by irq_mask_ack |
97 | * @unmask: unmask an interrupt source | 127 | * @unmask: deprecated, replaced by irq_unmask |
98 | * @eoi: end of interrupt - chip level | 128 | * @eoi: deprecated, replaced by irq_eoi |
99 | * @end: end of interrupt - flow level | 129 | * @end: deprecated, will go away with __do_IRQ() |
100 | * @set_affinity: set the CPU affinity on SMP machines | 130 | * @set_affinity: deprecated, replaced by irq_set_affinity |
101 | * @retrigger: resend an IRQ to the CPU | 131 | * @retrigger: deprecated, replaced by irq_retrigger |
102 | * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | 132 | * @set_type: deprecated, replaced by irq_set_type |
103 | * @set_wake: enable/disable power-management wake-on of an IRQ | 133 | * @set_wake: deprecated, replaced by irq_wake |
134 | * @bus_lock: deprecated, replaced by irq_bus_lock | ||
135 | * @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock | ||
104 | * | 136 | * |
105 | * @bus_lock: function to lock access to slow bus (i2c) chips | 137 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) |
106 | * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips | 138 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) |
139 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) | ||
140 | * @irq_disable: disable the interrupt | ||
141 | * @irq_ack: start of a new interrupt | ||
142 | * @irq_mask: mask an interrupt source | ||
143 | * @irq_mask_ack: ack and mask an interrupt source | ||
144 | * @irq_unmask: unmask an interrupt source | ||
145 | * @irq_eoi: end of interrupt | ||
146 | * @irq_set_affinity: set the CPU affinity on SMP machines | ||
147 | * @irq_retrigger: resend an IRQ to the CPU | ||
148 | * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | ||
149 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ | ||
150 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips | ||
151 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | ||
107 | * | 152 | * |
108 | * @release: release function solely used by UML | 153 | * @release: release function solely used by UML |
109 | * @typename: obsoleted by name, kept as migration helper | ||
110 | */ | 154 | */ |
111 | struct irq_chip { | 155 | struct irq_chip { |
112 | const char *name; | 156 | const char *name; |
157 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
113 | unsigned int (*startup)(unsigned int irq); | 158 | unsigned int (*startup)(unsigned int irq); |
114 | void (*shutdown)(unsigned int irq); | 159 | void (*shutdown)(unsigned int irq); |
115 | void (*enable)(unsigned int irq); | 160 | void (*enable)(unsigned int irq); |
@@ -130,154 +175,66 @@ struct irq_chip { | |||
130 | 175 | ||
131 | void (*bus_lock)(unsigned int irq); | 176 | void (*bus_lock)(unsigned int irq); |
132 | void (*bus_sync_unlock)(unsigned int irq); | 177 | void (*bus_sync_unlock)(unsigned int irq); |
178 | #endif | ||
179 | unsigned int (*irq_startup)(struct irq_data *data); | ||
180 | void (*irq_shutdown)(struct irq_data *data); | ||
181 | void (*irq_enable)(struct irq_data *data); | ||
182 | void (*irq_disable)(struct irq_data *data); | ||
183 | |||
184 | void (*irq_ack)(struct irq_data *data); | ||
185 | void (*irq_mask)(struct irq_data *data); | ||
186 | void (*irq_mask_ack)(struct irq_data *data); | ||
187 | void (*irq_unmask)(struct irq_data *data); | ||
188 | void (*irq_eoi)(struct irq_data *data); | ||
189 | |||
190 | int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); | ||
191 | int (*irq_retrigger)(struct irq_data *data); | ||
192 | int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); | ||
193 | int (*irq_set_wake)(struct irq_data *data, unsigned int on); | ||
194 | |||
195 | void (*irq_bus_lock)(struct irq_data *data); | ||
196 | void (*irq_bus_sync_unlock)(struct irq_data *data); | ||
133 | 197 | ||
134 | /* Currently used only by UML, might disappear one day.*/ | 198 | /* Currently used only by UML, might disappear one day.*/ |
135 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 199 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
136 | void (*release)(unsigned int irq, void *dev_id); | 200 | void (*release)(unsigned int irq, void *dev_id); |
137 | #endif | 201 | #endif |
138 | /* | ||
139 | * For compatibility, ->typename is copied into ->name. | ||
140 | * Will disappear. | ||
141 | */ | ||
142 | const char *typename; | ||
143 | }; | 202 | }; |
144 | 203 | ||
145 | struct timer_rand_state; | 204 | /* This include will go away once we isolated irq_desc usage to core code */ |
146 | struct irq_2_iommu; | 205 | #include <linux/irqdesc.h> |
147 | /** | ||
148 | * struct irq_desc - interrupt descriptor | ||
149 | * @irq: interrupt number for this descriptor | ||
150 | * @timer_rand_state: pointer to timer rand state struct | ||
151 | * @kstat_irqs: irq stats per cpu | ||
152 | * @irq_2_iommu: iommu with this irq | ||
153 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | ||
154 | * @chip: low level interrupt hardware access | ||
155 | * @msi_desc: MSI descriptor | ||
156 | * @handler_data: per-IRQ data for the irq_chip methods | ||
157 | * @chip_data: platform-specific per-chip private data for the chip | ||
158 | * methods, to allow shared chip implementations | ||
159 | * @action: the irq action chain | ||
160 | * @status: status information | ||
161 | * @depth: disable-depth, for nested irq_disable() calls | ||
162 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | ||
163 | * @irq_count: stats field to detect stalled irqs | ||
164 | * @last_unhandled: aging timer for unhandled count | ||
165 | * @irqs_unhandled: stats field for spurious unhandled interrupts | ||
166 | * @lock: locking for SMP | ||
167 | * @affinity: IRQ affinity on SMP | ||
168 | * @node: node index useful for balancing | ||
169 | * @pending_mask: pending rebalanced interrupts | ||
170 | * @threads_active: number of irqaction threads currently running | ||
171 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | ||
172 | * @dir: /proc/irq/ procfs entry | ||
173 | * @name: flow handler name for /proc/interrupts output | ||
174 | */ | ||
175 | struct irq_desc { | ||
176 | unsigned int irq; | ||
177 | struct timer_rand_state *timer_rand_state; | ||
178 | unsigned int *kstat_irqs; | ||
179 | #ifdef CONFIG_INTR_REMAP | ||
180 | struct irq_2_iommu *irq_2_iommu; | ||
181 | #endif | ||
182 | irq_flow_handler_t handle_irq; | ||
183 | struct irq_chip *chip; | ||
184 | struct msi_desc *msi_desc; | ||
185 | void *handler_data; | ||
186 | void *chip_data; | ||
187 | struct irqaction *action; /* IRQ action list */ | ||
188 | unsigned int status; /* IRQ status */ | ||
189 | |||
190 | unsigned int depth; /* nested irq disables */ | ||
191 | unsigned int wake_depth; /* nested wake enables */ | ||
192 | unsigned int irq_count; /* For detecting broken IRQs */ | ||
193 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | ||
194 | unsigned int irqs_unhandled; | ||
195 | raw_spinlock_t lock; | ||
196 | #ifdef CONFIG_SMP | ||
197 | cpumask_var_t affinity; | ||
198 | const struct cpumask *affinity_hint; | ||
199 | unsigned int node; | ||
200 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
201 | cpumask_var_t pending_mask; | ||
202 | #endif | ||
203 | #endif | ||
204 | atomic_t threads_active; | ||
205 | wait_queue_head_t wait_for_threads; | ||
206 | #ifdef CONFIG_PROC_FS | ||
207 | struct proc_dir_entry *dir; | ||
208 | #endif | ||
209 | const char *name; | ||
210 | } ____cacheline_internodealigned_in_smp; | ||
211 | 206 | ||
212 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, | 207 | /* |
213 | struct irq_desc *desc, int node); | 208 | * Pick up the arch-dependent methods: |
214 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); | 209 | */ |
210 | #include <asm/hw_irq.h> | ||
215 | 211 | ||
216 | #ifndef CONFIG_SPARSE_IRQ | 212 | #ifndef NR_IRQS_LEGACY |
217 | extern struct irq_desc irq_desc[NR_IRQS]; | 213 | # define NR_IRQS_LEGACY 0 |
218 | #endif | 214 | #endif |
219 | 215 | ||
220 | #ifdef CONFIG_NUMA_IRQ_DESC | 216 | #ifndef ARCH_IRQ_INIT_FLAGS |
221 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); | 217 | # define ARCH_IRQ_INIT_FLAGS 0 |
222 | #else | ||
223 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
224 | { | ||
225 | return desc; | ||
226 | } | ||
227 | #endif | 218 | #endif |
228 | 219 | ||
229 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | 220 | #define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS) |
230 | |||
231 | /* | ||
232 | * Pick up the arch-dependent methods: | ||
233 | */ | ||
234 | #include <asm/hw_irq.h> | ||
235 | 221 | ||
222 | struct irqaction; | ||
236 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 223 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
237 | extern void remove_irq(unsigned int irq, struct irqaction *act); | 224 | extern void remove_irq(unsigned int irq, struct irqaction *act); |
238 | 225 | ||
239 | #ifdef CONFIG_GENERIC_HARDIRQS | 226 | #ifdef CONFIG_GENERIC_HARDIRQS |
240 | 227 | ||
241 | #ifdef CONFIG_SMP | 228 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
242 | |||
243 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
244 | |||
245 | void move_native_irq(int irq); | 229 | void move_native_irq(int irq); |
246 | void move_masked_irq(int irq); | 230 | void move_masked_irq(int irq); |
247 | 231 | #else | |
248 | #else /* CONFIG_GENERIC_PENDING_IRQ */ | 232 | static inline void move_native_irq(int irq) { } |
249 | 233 | static inline void move_masked_irq(int irq) { } | |
250 | static inline void move_irq(int irq) | 234 | #endif |
251 | { | ||
252 | } | ||
253 | |||
254 | static inline void move_native_irq(int irq) | ||
255 | { | ||
256 | } | ||
257 | |||
258 | static inline void move_masked_irq(int irq) | ||
259 | { | ||
260 | } | ||
261 | |||
262 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ | ||
263 | |||
264 | #else /* CONFIG_SMP */ | ||
265 | |||
266 | #define move_native_irq(x) | ||
267 | #define move_masked_irq(x) | ||
268 | |||
269 | #endif /* CONFIG_SMP */ | ||
270 | 235 | ||
271 | extern int no_irq_affinity; | 236 | extern int no_irq_affinity; |
272 | 237 | ||
273 | static inline int irq_balancing_disabled(unsigned int irq) | ||
274 | { | ||
275 | struct irq_desc *desc; | ||
276 | |||
277 | desc = irq_to_desc(irq); | ||
278 | return desc->status & IRQ_NO_BALANCING_MASK; | ||
279 | } | ||
280 | |||
281 | /* Handle irq action chains: */ | 238 | /* Handle irq action chains: */ |
282 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); | 239 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); |
283 | 240 | ||
@@ -293,42 +250,10 @@ extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | |||
293 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 250 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
294 | extern void handle_nested_irq(unsigned int irq); | 251 | extern void handle_nested_irq(unsigned int irq); |
295 | 252 | ||
296 | /* | ||
297 | * Monolithic do_IRQ implementation. | ||
298 | */ | ||
299 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
300 | extern unsigned int __do_IRQ(unsigned int irq); | ||
301 | #endif | ||
302 | |||
303 | /* | ||
304 | * Architectures call this to let the generic IRQ layer | ||
305 | * handle an interrupt. If the descriptor is attached to an | ||
306 | * irqchip-style controller then we call the ->handle_irq() handler, | ||
307 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | ||
308 | */ | ||
309 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
310 | { | ||
311 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
312 | desc->handle_irq(irq, desc); | ||
313 | #else | ||
314 | if (likely(desc->handle_irq)) | ||
315 | desc->handle_irq(irq, desc); | ||
316 | else | ||
317 | __do_IRQ(irq); | ||
318 | #endif | ||
319 | } | ||
320 | |||
321 | static inline void generic_handle_irq(unsigned int irq) | ||
322 | { | ||
323 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | ||
324 | } | ||
325 | |||
326 | /* Handling of unhandled and spurious interrupts: */ | 253 | /* Handling of unhandled and spurious interrupts: */ |
327 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, | 254 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
328 | irqreturn_t action_ret); | 255 | irqreturn_t action_ret); |
329 | 256 | ||
330 | /* Resending of interrupts :*/ | ||
331 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | ||
332 | 257 | ||
333 | /* Enable/disable irq debugging output: */ | 258 | /* Enable/disable irq debugging output: */ |
334 | extern int noirqdebug_setup(char *str); | 259 | extern int noirqdebug_setup(char *str); |
@@ -351,16 +276,6 @@ extern void | |||
351 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 276 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
352 | const char *name); | 277 | const char *name); |
353 | 278 | ||
354 | /* caller has locked the irq_desc and both params are valid */ | ||
355 | static inline void __set_irq_handler_unlocked(int irq, | ||
356 | irq_flow_handler_t handler) | ||
357 | { | ||
358 | struct irq_desc *desc; | ||
359 | |||
360 | desc = irq_to_desc(irq); | ||
361 | desc->handle_irq = handler; | ||
362 | } | ||
363 | |||
364 | /* | 279 | /* |
365 | * Set a highlevel flow handler for a given IRQ: | 280 | * Set a highlevel flow handler for a given IRQ: |
366 | */ | 281 | */ |
@@ -384,141 +299,121 @@ set_irq_chained_handler(unsigned int irq, | |||
384 | 299 | ||
385 | extern void set_irq_nested_thread(unsigned int irq, int nest); | 300 | extern void set_irq_nested_thread(unsigned int irq, int nest); |
386 | 301 | ||
387 | extern void set_irq_noprobe(unsigned int irq); | 302 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); |
388 | extern void set_irq_probe(unsigned int irq); | 303 | |
304 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) | ||
305 | { | ||
306 | irq_modify_status(irq, 0, set); | ||
307 | } | ||
308 | |||
309 | static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) | ||
310 | { | ||
311 | irq_modify_status(irq, clr, 0); | ||
312 | } | ||
313 | |||
314 | static inline void set_irq_noprobe(unsigned int irq) | ||
315 | { | ||
316 | irq_modify_status(irq, 0, IRQ_NOPROBE); | ||
317 | } | ||
318 | |||
319 | static inline void set_irq_probe(unsigned int irq) | ||
320 | { | ||
321 | irq_modify_status(irq, IRQ_NOPROBE, 0); | ||
322 | } | ||
389 | 323 | ||
390 | /* Handle dynamic irq creation and destruction */ | 324 | /* Handle dynamic irq creation and destruction */ |
391 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); | 325 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
392 | extern int create_irq(void); | 326 | extern int create_irq(void); |
393 | extern void destroy_irq(unsigned int irq); | 327 | extern void destroy_irq(unsigned int irq); |
394 | 328 | ||
395 | /* Test to see if a driver has successfully requested an irq */ | 329 | /* |
396 | static inline int irq_has_action(unsigned int irq) | 330 | * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and |
331 | * irq_free_desc instead. | ||
332 | */ | ||
333 | extern void dynamic_irq_cleanup(unsigned int irq); | ||
334 | static inline void dynamic_irq_init(unsigned int irq) | ||
397 | { | 335 | { |
398 | struct irq_desc *desc = irq_to_desc(irq); | 336 | dynamic_irq_cleanup(irq); |
399 | return desc->action != NULL; | ||
400 | } | 337 | } |
401 | 338 | ||
402 | /* Dynamic irq helper functions */ | ||
403 | extern void dynamic_irq_init(unsigned int irq); | ||
404 | void dynamic_irq_init_keep_chip_data(unsigned int irq); | ||
405 | extern void dynamic_irq_cleanup(unsigned int irq); | ||
406 | void dynamic_irq_cleanup_keep_chip_data(unsigned int irq); | ||
407 | |||
408 | /* Set/get chip/data for an IRQ: */ | 339 | /* Set/get chip/data for an IRQ: */ |
409 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); | 340 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); |
410 | extern int set_irq_data(unsigned int irq, void *data); | 341 | extern int set_irq_data(unsigned int irq, void *data); |
411 | extern int set_irq_chip_data(unsigned int irq, void *data); | 342 | extern int set_irq_chip_data(unsigned int irq, void *data); |
412 | extern int set_irq_type(unsigned int irq, unsigned int type); | 343 | extern int set_irq_type(unsigned int irq, unsigned int type); |
413 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | 344 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); |
345 | extern struct irq_data *irq_get_irq_data(unsigned int irq); | ||
414 | 346 | ||
415 | #define get_irq_chip(irq) (irq_to_desc(irq)->chip) | 347 | static inline struct irq_chip *get_irq_chip(unsigned int irq) |
416 | #define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data) | ||
417 | #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) | ||
418 | #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) | ||
419 | |||
420 | #define get_irq_desc_chip(desc) ((desc)->chip) | ||
421 | #define get_irq_desc_chip_data(desc) ((desc)->chip_data) | ||
422 | #define get_irq_desc_data(desc) ((desc)->handler_data) | ||
423 | #define get_irq_desc_msi(desc) ((desc)->msi_desc) | ||
424 | |||
425 | #endif /* CONFIG_GENERIC_HARDIRQS */ | ||
426 | |||
427 | #endif /* !CONFIG_S390 */ | ||
428 | |||
429 | #ifdef CONFIG_SMP | ||
430 | /** | ||
431 | * alloc_desc_masks - allocate cpumasks for irq_desc | ||
432 | * @desc: pointer to irq_desc struct | ||
433 | * @node: node which will be handling the cpumasks | ||
434 | * @boot: true if need bootmem | ||
435 | * | ||
436 | * Allocates affinity and pending_mask cpumask if required. | ||
437 | * Returns true if successful (or not required). | ||
438 | */ | ||
439 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | ||
440 | bool boot) | ||
441 | { | 348 | { |
442 | gfp_t gfp = GFP_ATOMIC; | 349 | struct irq_data *d = irq_get_irq_data(irq); |
443 | 350 | return d ? d->chip : NULL; | |
444 | if (boot) | 351 | } |
445 | gfp = GFP_NOWAIT; | ||
446 | |||
447 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
448 | if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) | ||
449 | return false; | ||
450 | 352 | ||
451 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 353 | static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) |
452 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | 354 | { |
453 | free_cpumask_var(desc->affinity); | 355 | return d->chip; |
454 | return false; | ||
455 | } | ||
456 | #endif | ||
457 | #endif | ||
458 | return true; | ||
459 | } | 356 | } |
460 | 357 | ||
461 | static inline void init_desc_masks(struct irq_desc *desc) | 358 | static inline void *get_irq_chip_data(unsigned int irq) |
462 | { | 359 | { |
463 | cpumask_setall(desc->affinity); | 360 | struct irq_data *d = irq_get_irq_data(irq); |
464 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 361 | return d ? d->chip_data : NULL; |
465 | cpumask_clear(desc->pending_mask); | ||
466 | #endif | ||
467 | } | 362 | } |
468 | 363 | ||
469 | /** | 364 | static inline void *irq_data_get_irq_chip_data(struct irq_data *d) |
470 | * init_copy_desc_masks - copy cpumasks for irq_desc | 365 | { |
471 | * @old_desc: pointer to old irq_desc struct | 366 | return d->chip_data; |
472 | * @new_desc: pointer to new irq_desc struct | 367 | } |
473 | * | ||
474 | * Insures affinity and pending_masks are copied to new irq_desc. | ||
475 | * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the | ||
476 | * irq_desc struct so the copy is redundant. | ||
477 | */ | ||
478 | 368 | ||
479 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 369 | static inline void *get_irq_data(unsigned int irq) |
480 | struct irq_desc *new_desc) | ||
481 | { | 370 | { |
482 | #ifdef CONFIG_CPUMASK_OFFSTACK | 371 | struct irq_data *d = irq_get_irq_data(irq); |
483 | cpumask_copy(new_desc->affinity, old_desc->affinity); | 372 | return d ? d->handler_data : NULL; |
373 | } | ||
484 | 374 | ||
485 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 375 | static inline void *irq_data_get_irq_data(struct irq_data *d) |
486 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); | 376 | { |
487 | #endif | 377 | return d->handler_data; |
488 | #endif | ||
489 | } | 378 | } |
490 | 379 | ||
491 | static inline void free_desc_masks(struct irq_desc *old_desc, | 380 | static inline struct msi_desc *get_irq_msi(unsigned int irq) |
492 | struct irq_desc *new_desc) | ||
493 | { | 381 | { |
494 | free_cpumask_var(old_desc->affinity); | 382 | struct irq_data *d = irq_get_irq_data(irq); |
383 | return d ? d->msi_desc : NULL; | ||
384 | } | ||
495 | 385 | ||
496 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 386 | static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) |
497 | free_cpumask_var(old_desc->pending_mask); | 387 | { |
498 | #endif | 388 | return d->msi_desc; |
499 | } | 389 | } |
500 | 390 | ||
501 | #else /* !CONFIG_SMP */ | 391 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); |
392 | void irq_free_descs(unsigned int irq, unsigned int cnt); | ||
393 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); | ||
502 | 394 | ||
503 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | 395 | static inline int irq_alloc_desc(int node) |
504 | bool boot) | ||
505 | { | 396 | { |
506 | return true; | 397 | return irq_alloc_descs(-1, 0, 1, node); |
507 | } | 398 | } |
508 | 399 | ||
509 | static inline void init_desc_masks(struct irq_desc *desc) | 400 | static inline int irq_alloc_desc_at(unsigned int at, int node) |
510 | { | 401 | { |
402 | return irq_alloc_descs(at, at, 1, node); | ||
511 | } | 403 | } |
512 | 404 | ||
513 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 405 | static inline int irq_alloc_desc_from(unsigned int from, int node) |
514 | struct irq_desc *new_desc) | ||
515 | { | 406 | { |
407 | return irq_alloc_descs(-1, from, 1, node); | ||
516 | } | 408 | } |
517 | 409 | ||
518 | static inline void free_desc_masks(struct irq_desc *old_desc, | 410 | static inline void irq_free_desc(unsigned int irq) |
519 | struct irq_desc *new_desc) | ||
520 | { | 411 | { |
412 | irq_free_descs(irq, 1); | ||
521 | } | 413 | } |
522 | #endif /* CONFIG_SMP */ | 414 | |
415 | #endif /* CONFIG_GENERIC_HARDIRQS */ | ||
416 | |||
417 | #endif /* !CONFIG_S390 */ | ||
523 | 418 | ||
524 | #endif /* _LINUX_IRQ_H */ | 419 | #endif /* _LINUX_IRQ_H */ |
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h new file mode 100644 index 000000000000..4fa09d4d0b71 --- /dev/null +++ b/include/linux/irq_work.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _LINUX_IRQ_WORK_H | ||
2 | #define _LINUX_IRQ_WORK_H | ||
3 | |||
4 | struct irq_work { | ||
5 | struct irq_work *next; | ||
6 | void (*func)(struct irq_work *); | ||
7 | }; | ||
8 | |||
9 | static inline | ||
10 | void init_irq_work(struct irq_work *entry, void (*func)(struct irq_work *)) | ||
11 | { | ||
12 | entry->next = NULL; | ||
13 | entry->func = func; | ||
14 | } | ||
15 | |||
16 | bool irq_work_queue(struct irq_work *entry); | ||
17 | void irq_work_run(void); | ||
18 | void irq_work_sync(struct irq_work *entry); | ||
19 | |||
20 | #endif /* _LINUX_IRQ_WORK_H */ | ||
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h new file mode 100644 index 000000000000..979c68cc7458 --- /dev/null +++ b/include/linux/irqdesc.h | |||
@@ -0,0 +1,159 @@ | |||
1 | #ifndef _LINUX_IRQDESC_H | ||
2 | #define _LINUX_IRQDESC_H | ||
3 | |||
4 | /* | ||
5 | * Core internal functions to deal with irq descriptors | ||
6 | * | ||
7 | * This include will move to kernel/irq once we cleaned up the tree. | ||
8 | * For now it's included from <linux/irq.h> | ||
9 | */ | ||
10 | |||
11 | struct proc_dir_entry; | ||
12 | struct timer_rand_state; | ||
13 | /** | ||
14 | * struct irq_desc - interrupt descriptor | ||
15 | * @irq_data: per irq and chip data passed down to chip functions | ||
16 | * @timer_rand_state: pointer to timer rand state struct | ||
17 | * @kstat_irqs: irq stats per cpu | ||
18 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | ||
19 | * @action: the irq action chain | ||
20 | * @status: status information | ||
21 | * @depth: disable-depth, for nested irq_disable() calls | ||
22 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | ||
23 | * @irq_count: stats field to detect stalled irqs | ||
24 | * @last_unhandled: aging timer for unhandled count | ||
25 | * @irqs_unhandled: stats field for spurious unhandled interrupts | ||
26 | * @lock: locking for SMP | ||
27 | * @pending_mask: pending rebalanced interrupts | ||
28 | * @threads_active: number of irqaction threads currently running | ||
29 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | ||
30 | * @dir: /proc/irq/ procfs entry | ||
31 | * @name: flow handler name for /proc/interrupts output | ||
32 | */ | ||
33 | struct irq_desc { | ||
34 | |||
35 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
36 | struct irq_data irq_data; | ||
37 | #else | ||
38 | /* | ||
39 | * This union will go away, once we fixed the direct access to | ||
40 | * irq_desc all over the place. The direct fields are a 1:1 | ||
41 | * overlay of irq_data. | ||
42 | */ | ||
43 | union { | ||
44 | struct irq_data irq_data; | ||
45 | struct { | ||
46 | unsigned int irq; | ||
47 | unsigned int node; | ||
48 | struct irq_chip *chip; | ||
49 | void *handler_data; | ||
50 | void *chip_data; | ||
51 | struct msi_desc *msi_desc; | ||
52 | #ifdef CONFIG_SMP | ||
53 | cpumask_var_t affinity; | ||
54 | #endif | ||
55 | }; | ||
56 | }; | ||
57 | #endif | ||
58 | |||
59 | struct timer_rand_state *timer_rand_state; | ||
60 | unsigned int *kstat_irqs; | ||
61 | irq_flow_handler_t handle_irq; | ||
62 | struct irqaction *action; /* IRQ action list */ | ||
63 | unsigned int status; /* IRQ status */ | ||
64 | |||
65 | unsigned int depth; /* nested irq disables */ | ||
66 | unsigned int wake_depth; /* nested wake enables */ | ||
67 | unsigned int irq_count; /* For detecting broken IRQs */ | ||
68 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | ||
69 | unsigned int irqs_unhandled; | ||
70 | raw_spinlock_t lock; | ||
71 | #ifdef CONFIG_SMP | ||
72 | const struct cpumask *affinity_hint; | ||
73 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
74 | cpumask_var_t pending_mask; | ||
75 | #endif | ||
76 | #endif | ||
77 | atomic_t threads_active; | ||
78 | wait_queue_head_t wait_for_threads; | ||
79 | #ifdef CONFIG_PROC_FS | ||
80 | struct proc_dir_entry *dir; | ||
81 | #endif | ||
82 | const char *name; | ||
83 | } ____cacheline_internodealigned_in_smp; | ||
84 | |||
85 | #ifndef CONFIG_SPARSE_IRQ | ||
86 | extern struct irq_desc irq_desc[NR_IRQS]; | ||
87 | #endif | ||
88 | |||
89 | /* Will be removed once the last users in power and sh are gone */ | ||
90 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | ||
91 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
92 | { | ||
93 | return desc; | ||
94 | } | ||
95 | |||
96 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
97 | |||
98 | #define get_irq_desc_chip(desc) ((desc)->irq_data.chip) | ||
99 | #define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) | ||
100 | #define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) | ||
101 | #define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) | ||
102 | |||
103 | /* | ||
104 | * Monolithic do_IRQ implementation. | ||
105 | */ | ||
106 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
107 | extern unsigned int __do_IRQ(unsigned int irq); | ||
108 | #endif | ||
109 | |||
110 | /* | ||
111 | * Architectures call this to let the generic IRQ layer | ||
112 | * handle an interrupt. If the descriptor is attached to an | ||
113 | * irqchip-style controller then we call the ->handle_irq() handler, | ||
114 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | ||
115 | */ | ||
116 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
117 | { | ||
118 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
119 | desc->handle_irq(irq, desc); | ||
120 | #else | ||
121 | if (likely(desc->handle_irq)) | ||
122 | desc->handle_irq(irq, desc); | ||
123 | else | ||
124 | __do_IRQ(irq); | ||
125 | #endif | ||
126 | } | ||
127 | |||
128 | static inline void generic_handle_irq(unsigned int irq) | ||
129 | { | ||
130 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | ||
131 | } | ||
132 | |||
133 | /* Test to see if a driver has successfully requested an irq */ | ||
134 | static inline int irq_has_action(unsigned int irq) | ||
135 | { | ||
136 | struct irq_desc *desc = irq_to_desc(irq); | ||
137 | return desc->action != NULL; | ||
138 | } | ||
139 | |||
140 | static inline int irq_balancing_disabled(unsigned int irq) | ||
141 | { | ||
142 | struct irq_desc *desc; | ||
143 | |||
144 | desc = irq_to_desc(irq); | ||
145 | return desc->status & IRQ_NO_BALANCING_MASK; | ||
146 | } | ||
147 | |||
148 | /* caller has locked the irq_desc and both params are valid */ | ||
149 | static inline void __set_irq_handler_unlocked(int irq, | ||
150 | irq_flow_handler_t handler) | ||
151 | { | ||
152 | struct irq_desc *desc; | ||
153 | |||
154 | desc = irq_to_desc(irq); | ||
155 | desc->handle_irq = handler; | ||
156 | } | ||
157 | #endif | ||
158 | |||
159 | #endif | ||
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 006bf45eae30..d176d658fe25 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define _LINUX_TRACE_IRQFLAGS_H | 12 | #define _LINUX_TRACE_IRQFLAGS_H |
13 | 13 | ||
14 | #include <linux/typecheck.h> | 14 | #include <linux/typecheck.h> |
15 | #include <asm/irqflags.h> | ||
15 | 16 | ||
16 | #ifdef CONFIG_TRACE_IRQFLAGS | 17 | #ifdef CONFIG_TRACE_IRQFLAGS |
17 | extern void trace_softirqs_on(unsigned long ip); | 18 | extern void trace_softirqs_on(unsigned long ip); |
@@ -52,17 +53,45 @@ | |||
52 | # define start_critical_timings() do { } while (0) | 53 | # define start_critical_timings() do { } while (0) |
53 | #endif | 54 | #endif |
54 | 55 | ||
55 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 56 | /* |
56 | 57 | * Wrap the arch provided IRQ routines to provide appropriate checks. | |
57 | #include <asm/irqflags.h> | 58 | */ |
59 | #define raw_local_irq_disable() arch_local_irq_disable() | ||
60 | #define raw_local_irq_enable() arch_local_irq_enable() | ||
61 | #define raw_local_irq_save(flags) \ | ||
62 | do { \ | ||
63 | typecheck(unsigned long, flags); \ | ||
64 | flags = arch_local_irq_save(); \ | ||
65 | } while (0) | ||
66 | #define raw_local_irq_restore(flags) \ | ||
67 | do { \ | ||
68 | typecheck(unsigned long, flags); \ | ||
69 | arch_local_irq_restore(flags); \ | ||
70 | } while (0) | ||
71 | #define raw_local_save_flags(flags) \ | ||
72 | do { \ | ||
73 | typecheck(unsigned long, flags); \ | ||
74 | flags = arch_local_save_flags(); \ | ||
75 | } while (0) | ||
76 | #define raw_irqs_disabled_flags(flags) \ | ||
77 | ({ \ | ||
78 | typecheck(unsigned long, flags); \ | ||
79 | arch_irqs_disabled_flags(flags); \ | ||
80 | }) | ||
81 | #define raw_irqs_disabled() (arch_irqs_disabled()) | ||
82 | #define raw_safe_halt() arch_safe_halt() | ||
58 | 83 | ||
84 | /* | ||
85 | * The local_irq_*() APIs are equal to the raw_local_irq*() | ||
86 | * if !TRACE_IRQFLAGS. | ||
87 | */ | ||
88 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | ||
59 | #define local_irq_enable() \ | 89 | #define local_irq_enable() \ |
60 | do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) | 90 | do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) |
61 | #define local_irq_disable() \ | 91 | #define local_irq_disable() \ |
62 | do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) | 92 | do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) |
63 | #define local_irq_save(flags) \ | 93 | #define local_irq_save(flags) \ |
64 | do { \ | 94 | do { \ |
65 | typecheck(unsigned long, flags); \ | ||
66 | raw_local_irq_save(flags); \ | 95 | raw_local_irq_save(flags); \ |
67 | trace_hardirqs_off(); \ | 96 | trace_hardirqs_off(); \ |
68 | } while (0) | 97 | } while (0) |
@@ -70,7 +99,6 @@ | |||
70 | 99 | ||
71 | #define local_irq_restore(flags) \ | 100 | #define local_irq_restore(flags) \ |
72 | do { \ | 101 | do { \ |
73 | typecheck(unsigned long, flags); \ | ||
74 | if (raw_irqs_disabled_flags(flags)) { \ | 102 | if (raw_irqs_disabled_flags(flags)) { \ |
75 | raw_local_irq_restore(flags); \ | 103 | raw_local_irq_restore(flags); \ |
76 | trace_hardirqs_off(); \ | 104 | trace_hardirqs_off(); \ |
@@ -79,51 +107,44 @@ | |||
79 | raw_local_irq_restore(flags); \ | 107 | raw_local_irq_restore(flags); \ |
80 | } \ | 108 | } \ |
81 | } while (0) | 109 | } while (0) |
82 | #else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ | 110 | #define local_save_flags(flags) \ |
83 | /* | ||
84 | * The local_irq_*() APIs are equal to the raw_local_irq*() | ||
85 | * if !TRACE_IRQFLAGS. | ||
86 | */ | ||
87 | # define raw_local_irq_disable() local_irq_disable() | ||
88 | # define raw_local_irq_enable() local_irq_enable() | ||
89 | # define raw_local_irq_save(flags) \ | ||
90 | do { \ | ||
91 | typecheck(unsigned long, flags); \ | ||
92 | local_irq_save(flags); \ | ||
93 | } while (0) | ||
94 | # define raw_local_irq_restore(flags) \ | ||
95 | do { \ | 111 | do { \ |
96 | typecheck(unsigned long, flags); \ | 112 | raw_local_save_flags(flags); \ |
97 | local_irq_restore(flags); \ | ||
98 | } while (0) | 113 | } while (0) |
99 | #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ | ||
100 | 114 | ||
101 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 115 | #define irqs_disabled_flags(flags) \ |
102 | #define safe_halt() \ | 116 | ({ \ |
103 | do { \ | 117 | raw_irqs_disabled_flags(flags); \ |
104 | trace_hardirqs_on(); \ | 118 | }) |
105 | raw_safe_halt(); \ | ||
106 | } while (0) | ||
107 | 119 | ||
108 | #define local_save_flags(flags) \ | 120 | #define irqs_disabled() \ |
109 | do { \ | 121 | ({ \ |
110 | typecheck(unsigned long, flags); \ | 122 | unsigned long _flags; \ |
111 | raw_local_save_flags(flags); \ | 123 | raw_local_save_flags(_flags); \ |
124 | raw_irqs_disabled_flags(_flags); \ | ||
125 | }) | ||
126 | |||
127 | #define safe_halt() \ | ||
128 | do { \ | ||
129 | trace_hardirqs_on(); \ | ||
130 | raw_safe_halt(); \ | ||
112 | } while (0) | 131 | } while (0) |
113 | 132 | ||
114 | #define irqs_disabled() \ | ||
115 | ({ \ | ||
116 | unsigned long _flags; \ | ||
117 | \ | ||
118 | raw_local_save_flags(_flags); \ | ||
119 | raw_irqs_disabled_flags(_flags); \ | ||
120 | }) | ||
121 | 133 | ||
122 | #define irqs_disabled_flags(flags) \ | 134 | #else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ |
123 | ({ \ | 135 | |
124 | typecheck(unsigned long, flags); \ | 136 | #define local_irq_enable() do { raw_local_irq_enable(); } while (0) |
125 | raw_irqs_disabled_flags(flags); \ | 137 | #define local_irq_disable() do { raw_local_irq_disable(); } while (0) |
126 | }) | 138 | #define local_irq_save(flags) \ |
139 | do { \ | ||
140 | raw_local_irq_save(flags); \ | ||
141 | } while (0) | ||
142 | #define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) | ||
143 | #define local_save_flags(flags) do { raw_local_save_flags(flags); } while (0) | ||
144 | #define irqs_disabled() (raw_irqs_disabled()) | ||
145 | #define irqs_disabled_flags(flags) (raw_irqs_disabled_flags(flags)) | ||
146 | #define safe_halt() do { raw_safe_halt(); } while (0) | ||
147 | |||
127 | #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ | 148 | #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ |
128 | 149 | ||
129 | #endif | 150 | #endif |
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 7bf89bc8cbca..05aa8c23483f 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | extern int nr_irqs; | 26 | extern int nr_irqs; |
27 | extern struct irq_desc *irq_to_desc(unsigned int irq); | 27 | extern struct irq_desc *irq_to_desc(unsigned int irq); |
28 | unsigned int irq_get_next_irq(unsigned int offset); | ||
28 | 29 | ||
29 | # define for_each_irq_desc(irq, desc) \ | 30 | # define for_each_irq_desc(irq, desc) \ |
30 | for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ | 31 | for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ |
@@ -47,6 +48,10 @@ extern struct irq_desc *irq_to_desc(unsigned int irq); | |||
47 | #define irq_node(irq) 0 | 48 | #define irq_node(irq) 0 |
48 | #endif | 49 | #endif |
49 | 50 | ||
51 | # define for_each_active_irq(irq) \ | ||
52 | for (irq = irq_get_next_irq(0); irq < nr_irqs; \ | ||
53 | irq = irq_get_next_irq(irq + 1)) | ||
54 | |||
50 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 55 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
51 | 56 | ||
52 | #define for_each_irq_nr(irq) \ | 57 | #define for_each_irq_nr(irq) \ |
diff --git a/include/linux/jhash.h b/include/linux/jhash.h index 2a2f99fbcb16..ced1159fa4f2 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h | |||
@@ -116,7 +116,7 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval) | |||
116 | /* A special ultra-optimized versions that knows they are hashing exactly | 116 | /* A special ultra-optimized versions that knows they are hashing exactly |
117 | * 3, 2 or 1 word(s). | 117 | * 3, 2 or 1 word(s). |
118 | * | 118 | * |
119 | * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally | 119 | * NOTE: In particular the "c += length; __jhash_mix(a,b,c);" normally |
120 | * done at the end is not done here. | 120 | * done at the end is not done here. |
121 | */ | 121 | */ |
122 | static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) | 122 | static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h new file mode 100644 index 000000000000..b67cb180e6e9 --- /dev/null +++ b/include/linux/jump_label.h | |||
@@ -0,0 +1,74 @@ | |||
1 | #ifndef _LINUX_JUMP_LABEL_H | ||
2 | #define _LINUX_JUMP_LABEL_H | ||
3 | |||
4 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_HAVE_ARCH_JUMP_LABEL) | ||
5 | # include <asm/jump_label.h> | ||
6 | # define HAVE_JUMP_LABEL | ||
7 | #endif | ||
8 | |||
9 | enum jump_label_type { | ||
10 | JUMP_LABEL_ENABLE, | ||
11 | JUMP_LABEL_DISABLE | ||
12 | }; | ||
13 | |||
14 | struct module; | ||
15 | |||
16 | #ifdef HAVE_JUMP_LABEL | ||
17 | |||
18 | extern struct jump_entry __start___jump_table[]; | ||
19 | extern struct jump_entry __stop___jump_table[]; | ||
20 | |||
21 | extern void arch_jump_label_transform(struct jump_entry *entry, | ||
22 | enum jump_label_type type); | ||
23 | extern void arch_jump_label_text_poke_early(jump_label_t addr); | ||
24 | extern void jump_label_update(unsigned long key, enum jump_label_type type); | ||
25 | extern void jump_label_apply_nops(struct module *mod); | ||
26 | extern int jump_label_text_reserved(void *start, void *end); | ||
27 | |||
28 | #define jump_label_enable(key) \ | ||
29 | jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); | ||
30 | |||
31 | #define jump_label_disable(key) \ | ||
32 | jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); | ||
33 | |||
34 | #else | ||
35 | |||
36 | #define JUMP_LABEL(key, label) \ | ||
37 | do { \ | ||
38 | if (unlikely(*key)) \ | ||
39 | goto label; \ | ||
40 | } while (0) | ||
41 | |||
42 | #define jump_label_enable(cond_var) \ | ||
43 | do { \ | ||
44 | *(cond_var) = 1; \ | ||
45 | } while (0) | ||
46 | |||
47 | #define jump_label_disable(cond_var) \ | ||
48 | do { \ | ||
49 | *(cond_var) = 0; \ | ||
50 | } while (0) | ||
51 | |||
52 | static inline int jump_label_apply_nops(struct module *mod) | ||
53 | { | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static inline int jump_label_text_reserved(void *start, void *end) | ||
58 | { | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | #endif | ||
63 | |||
64 | #define COND_STMT(key, stmt) \ | ||
65 | do { \ | ||
66 | __label__ jl_enabled; \ | ||
67 | JUMP_LABEL(key, jl_enabled); \ | ||
68 | if (0) { \ | ||
69 | jl_enabled: \ | ||
70 | stmt; \ | ||
71 | } \ | ||
72 | } while (0) | ||
73 | |||
74 | #endif | ||
diff --git a/include/linux/jump_label_ref.h b/include/linux/jump_label_ref.h new file mode 100644 index 000000000000..e5d012ad92c6 --- /dev/null +++ b/include/linux/jump_label_ref.h | |||
@@ -0,0 +1,44 @@ | |||
1 | #ifndef _LINUX_JUMP_LABEL_REF_H | ||
2 | #define _LINUX_JUMP_LABEL_REF_H | ||
3 | |||
4 | #include <linux/jump_label.h> | ||
5 | #include <asm/atomic.h> | ||
6 | |||
7 | #ifdef HAVE_JUMP_LABEL | ||
8 | |||
9 | static inline void jump_label_inc(atomic_t *key) | ||
10 | { | ||
11 | if (atomic_add_return(1, key) == 1) | ||
12 | jump_label_enable(key); | ||
13 | } | ||
14 | |||
15 | static inline void jump_label_dec(atomic_t *key) | ||
16 | { | ||
17 | if (atomic_dec_and_test(key)) | ||
18 | jump_label_disable(key); | ||
19 | } | ||
20 | |||
21 | #else /* !HAVE_JUMP_LABEL */ | ||
22 | |||
23 | static inline void jump_label_inc(atomic_t *key) | ||
24 | { | ||
25 | atomic_inc(key); | ||
26 | } | ||
27 | |||
28 | static inline void jump_label_dec(atomic_t *key) | ||
29 | { | ||
30 | atomic_dec(key); | ||
31 | } | ||
32 | |||
33 | #undef JUMP_LABEL | ||
34 | #define JUMP_LABEL(key, label) \ | ||
35 | do { \ | ||
36 | if (unlikely(__builtin_choose_expr( \ | ||
37 | __builtin_types_compatible_p(typeof(key), atomic_t *), \ | ||
38 | atomic_read((atomic_t *)(key)), *(key)))) \ | ||
39 | goto label; \ | ||
40 | } while (0) | ||
41 | |||
42 | #endif /* HAVE_JUMP_LABEL */ | ||
43 | |||
44 | #endif /* _LINUX_JUMP_LABEL_REF_H */ | ||
diff --git a/include/linux/kdb.h b/include/linux/kdb.h index ea6e5244ed3f..aadff7cc2b84 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h | |||
@@ -28,6 +28,41 @@ extern int kdb_poll_idx; | |||
28 | extern int kdb_initial_cpu; | 28 | extern int kdb_initial_cpu; |
29 | extern atomic_t kdb_event; | 29 | extern atomic_t kdb_event; |
30 | 30 | ||
31 | /* Types and messages used for dynamically added kdb shell commands */ | ||
32 | |||
33 | #define KDB_MAXARGS 16 /* Maximum number of arguments to a function */ | ||
34 | |||
35 | typedef enum { | ||
36 | KDB_REPEAT_NONE = 0, /* Do not repeat this command */ | ||
37 | KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ | ||
38 | KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ | ||
39 | } kdb_repeat_t; | ||
40 | |||
41 | typedef int (*kdb_func_t)(int, const char **); | ||
42 | |||
43 | /* KDB return codes from a command or internal kdb function */ | ||
44 | #define KDB_NOTFOUND (-1) | ||
45 | #define KDB_ARGCOUNT (-2) | ||
46 | #define KDB_BADWIDTH (-3) | ||
47 | #define KDB_BADRADIX (-4) | ||
48 | #define KDB_NOTENV (-5) | ||
49 | #define KDB_NOENVVALUE (-6) | ||
50 | #define KDB_NOTIMP (-7) | ||
51 | #define KDB_ENVFULL (-8) | ||
52 | #define KDB_ENVBUFFULL (-9) | ||
53 | #define KDB_TOOMANYBPT (-10) | ||
54 | #define KDB_TOOMANYDBREGS (-11) | ||
55 | #define KDB_DUPBPT (-12) | ||
56 | #define KDB_BPTNOTFOUND (-13) | ||
57 | #define KDB_BADMODE (-14) | ||
58 | #define KDB_BADINT (-15) | ||
59 | #define KDB_INVADDRFMT (-16) | ||
60 | #define KDB_BADREG (-17) | ||
61 | #define KDB_BADCPUNUM (-18) | ||
62 | #define KDB_BADLENGTH (-19) | ||
63 | #define KDB_NOBP (-20) | ||
64 | #define KDB_BADADDR (-21) | ||
65 | |||
31 | /* | 66 | /* |
32 | * kdb_diemsg | 67 | * kdb_diemsg |
33 | * | 68 | * |
@@ -104,10 +139,26 @@ int kdb_process_cpu(const struct task_struct *p) | |||
104 | 139 | ||
105 | /* kdb access to register set for stack dumping */ | 140 | /* kdb access to register set for stack dumping */ |
106 | extern struct pt_regs *kdb_current_regs; | 141 | extern struct pt_regs *kdb_current_regs; |
142 | #ifdef CONFIG_KALLSYMS | ||
143 | extern const char *kdb_walk_kallsyms(loff_t *pos); | ||
144 | #else /* ! CONFIG_KALLSYMS */ | ||
145 | static inline const char *kdb_walk_kallsyms(loff_t *pos) | ||
146 | { | ||
147 | return NULL; | ||
148 | } | ||
149 | #endif /* ! CONFIG_KALLSYMS */ | ||
107 | 150 | ||
151 | /* Dynamic kdb shell command registration */ | ||
152 | extern int kdb_register(char *, kdb_func_t, char *, char *, short); | ||
153 | extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, | ||
154 | short, kdb_repeat_t); | ||
155 | extern int kdb_unregister(char *); | ||
108 | #else /* ! CONFIG_KGDB_KDB */ | 156 | #else /* ! CONFIG_KGDB_KDB */ |
109 | #define kdb_printf(...) | 157 | #define kdb_printf(...) |
110 | #define kdb_init(x) | 158 | #define kdb_init(x) |
159 | #define kdb_register(...) | ||
160 | #define kdb_register_repeat(...) | ||
161 | #define kdb_uregister(x) | ||
111 | #endif /* CONFIG_KGDB_KDB */ | 162 | #endif /* CONFIG_KGDB_KDB */ |
112 | enum { | 163 | enum { |
113 | KDB_NOT_INITIALIZED, | 164 | KDB_NOT_INITIALIZED, |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2b0a35e6bc69..edef168a0406 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -58,7 +58,18 @@ extern const char linux_proc_banner[]; | |||
58 | 58 | ||
59 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) | 59 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) |
60 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) | 60 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) |
61 | #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) | 61 | #define roundup(x, y) ( \ |
62 | { \ | ||
63 | typeof(y) __y = y; \ | ||
64 | (((x) + (__y - 1)) / __y) * __y; \ | ||
65 | } \ | ||
66 | ) | ||
67 | #define rounddown(x, y) ( \ | ||
68 | { \ | ||
69 | typeof(x) __x = (x); \ | ||
70 | __x - (__x % (y)); \ | ||
71 | } \ | ||
72 | ) | ||
62 | #define DIV_ROUND_CLOSEST(x, divisor)( \ | 73 | #define DIV_ROUND_CLOSEST(x, divisor)( \ |
63 | { \ | 74 | { \ |
64 | typeof(divisor) __divisor = divisor; \ | 75 | typeof(divisor) __divisor = divisor; \ |
@@ -641,6 +652,16 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
641 | _max1 > _max2 ? _max1 : _max2; }) | 652 | _max1 > _max2 ? _max1 : _max2; }) |
642 | 653 | ||
643 | /** | 654 | /** |
655 | * min_not_zero - return the minimum that is _not_ zero, unless both are zero | ||
656 | * @x: value1 | ||
657 | * @y: value2 | ||
658 | */ | ||
659 | #define min_not_zero(x, y) ({ \ | ||
660 | typeof(x) __x = (x); \ | ||
661 | typeof(y) __y = (y); \ | ||
662 | __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) | ||
663 | |||
664 | /** | ||
644 | * clamp - return a value clamped to a given range with strict typechecking | 665 | * clamp - return a value clamped to a given range with strict typechecking |
645 | * @val: current value | 666 | * @val: current value |
646 | * @min: minimum allowable value | 667 | * @min: minimum allowable value |
diff --git a/include/linux/key.h b/include/linux/key.h index cd50dfa1d4c2..3db0adce1fda 100644 --- a/include/linux/key.h +++ b/include/linux/key.h | |||
@@ -178,8 +178,9 @@ struct key { | |||
178 | */ | 178 | */ |
179 | union { | 179 | union { |
180 | unsigned long value; | 180 | unsigned long value; |
181 | void __rcu *rcudata; | ||
181 | void *data; | 182 | void *data; |
182 | struct keyring_list *subscriptions; | 183 | struct keyring_list __rcu *subscriptions; |
183 | } payload; | 184 | } payload; |
184 | }; | 185 | }; |
185 | 186 | ||
diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 7950a37a7146..8f6d12151048 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h | |||
@@ -191,6 +191,8 @@ static inline struct kobj_type *get_ktype(struct kobject *kobj) | |||
191 | } | 191 | } |
192 | 192 | ||
193 | extern struct kobject *kset_find_obj(struct kset *, const char *); | 193 | extern struct kobject *kset_find_obj(struct kset *, const char *); |
194 | extern struct kobject *kset_find_obj_hinted(struct kset *, const char *, | ||
195 | struct kobject *); | ||
194 | 196 | ||
195 | /* The global /sys/kernel/ kobject for people to chain off of */ | 197 | /* The global /sys/kernel/ kobject for people to chain off of */ |
196 | extern struct kobject *kernel_kobj; | 198 | extern struct kobject *kernel_kobj; |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 636fc381c897..919ae53adc5c 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -414,6 +414,14 @@ struct kvm_enable_cap { | |||
414 | __u8 pad[64]; | 414 | __u8 pad[64]; |
415 | }; | 415 | }; |
416 | 416 | ||
417 | /* for KVM_PPC_GET_PVINFO */ | ||
418 | struct kvm_ppc_pvinfo { | ||
419 | /* out */ | ||
420 | __u32 flags; | ||
421 | __u32 hcall[4]; | ||
422 | __u8 pad[108]; | ||
423 | }; | ||
424 | |||
417 | #define KVMIO 0xAE | 425 | #define KVMIO 0xAE |
418 | 426 | ||
419 | /* | 427 | /* |
@@ -530,6 +538,8 @@ struct kvm_enable_cap { | |||
530 | #ifdef __KVM_HAVE_XCRS | 538 | #ifdef __KVM_HAVE_XCRS |
531 | #define KVM_CAP_XCRS 56 | 539 | #define KVM_CAP_XCRS 56 |
532 | #endif | 540 | #endif |
541 | #define KVM_CAP_PPC_GET_PVINFO 57 | ||
542 | #define KVM_CAP_PPC_IRQ_LEVEL 58 | ||
533 | 543 | ||
534 | #ifdef KVM_CAP_IRQ_ROUTING | 544 | #ifdef KVM_CAP_IRQ_ROUTING |
535 | 545 | ||
@@ -664,6 +674,8 @@ struct kvm_clock_data { | |||
664 | /* Available with KVM_CAP_PIT_STATE2 */ | 674 | /* Available with KVM_CAP_PIT_STATE2 */ |
665 | #define KVM_GET_PIT2 _IOR(KVMIO, 0x9f, struct kvm_pit_state2) | 675 | #define KVM_GET_PIT2 _IOR(KVMIO, 0x9f, struct kvm_pit_state2) |
666 | #define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2) | 676 | #define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2) |
677 | /* Available with KVM_CAP_PPC_GET_PVINFO */ | ||
678 | #define KVM_PPC_GET_PVINFO _IOW(KVMIO, 0xa1, struct kvm_ppc_pvinfo) | ||
667 | 679 | ||
668 | /* | 680 | /* |
669 | * ioctls for vcpu fds | 681 | * ioctls for vcpu fds |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c13cc48697aa..a0557422715e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -36,9 +36,10 @@ | |||
36 | #define KVM_REQ_PENDING_TIMER 5 | 36 | #define KVM_REQ_PENDING_TIMER 5 |
37 | #define KVM_REQ_UNHALT 6 | 37 | #define KVM_REQ_UNHALT 6 |
38 | #define KVM_REQ_MMU_SYNC 7 | 38 | #define KVM_REQ_MMU_SYNC 7 |
39 | #define KVM_REQ_KVMCLOCK_UPDATE 8 | 39 | #define KVM_REQ_CLOCK_UPDATE 8 |
40 | #define KVM_REQ_KICK 9 | 40 | #define KVM_REQ_KICK 9 |
41 | #define KVM_REQ_DEACTIVATE_FPU 10 | 41 | #define KVM_REQ_DEACTIVATE_FPU 10 |
42 | #define KVM_REQ_EVENT 11 | ||
42 | 43 | ||
43 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 44 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
44 | 45 | ||
@@ -205,7 +206,7 @@ struct kvm { | |||
205 | 206 | ||
206 | struct mutex irq_lock; | 207 | struct mutex irq_lock; |
207 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | 208 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
208 | struct kvm_irq_routing_table *irq_routing; | 209 | struct kvm_irq_routing_table __rcu *irq_routing; |
209 | struct hlist_head mask_notifier_list; | 210 | struct hlist_head mask_notifier_list; |
210 | struct hlist_head irq_ack_notifier_list; | 211 | struct hlist_head irq_ack_notifier_list; |
211 | #endif | 212 | #endif |
@@ -289,6 +290,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
289 | void kvm_disable_largepages(void); | 290 | void kvm_disable_largepages(void); |
290 | void kvm_arch_flush_shadow(struct kvm *kvm); | 291 | void kvm_arch_flush_shadow(struct kvm *kvm); |
291 | 292 | ||
293 | int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, | ||
294 | int nr_pages); | ||
295 | |||
292 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); | 296 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
293 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); | 297 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
294 | void kvm_release_page_clean(struct page *page); | 298 | void kvm_release_page_clean(struct page *page); |
@@ -296,6 +300,8 @@ void kvm_release_page_dirty(struct page *page); | |||
296 | void kvm_set_page_dirty(struct page *page); | 300 | void kvm_set_page_dirty(struct page *page); |
297 | void kvm_set_page_accessed(struct page *page); | 301 | void kvm_set_page_accessed(struct page *page); |
298 | 302 | ||
303 | pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr); | ||
304 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); | ||
299 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); | 305 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
300 | pfn_t gfn_to_pfn_memslot(struct kvm *kvm, | 306 | pfn_t gfn_to_pfn_memslot(struct kvm *kvm, |
301 | struct kvm_memory_slot *slot, gfn_t gfn); | 307 | struct kvm_memory_slot *slot, gfn_t gfn); |
@@ -477,8 +483,7 @@ int kvm_deassign_device(struct kvm *kvm, | |||
477 | struct kvm_assigned_dev_kernel *assigned_dev); | 483 | struct kvm_assigned_dev_kernel *assigned_dev); |
478 | #else /* CONFIG_IOMMU_API */ | 484 | #else /* CONFIG_IOMMU_API */ |
479 | static inline int kvm_iommu_map_pages(struct kvm *kvm, | 485 | static inline int kvm_iommu_map_pages(struct kvm *kvm, |
480 | gfn_t base_gfn, | 486 | struct kvm_memory_slot *slot) |
481 | unsigned long npages) | ||
482 | { | 487 | { |
483 | return 0; | 488 | return 0; |
484 | } | 489 | } |
@@ -518,11 +523,22 @@ static inline void kvm_guest_exit(void) | |||
518 | current->flags &= ~PF_VCPU; | 523 | current->flags &= ~PF_VCPU; |
519 | } | 524 | } |
520 | 525 | ||
526 | static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, | ||
527 | gfn_t gfn) | ||
528 | { | ||
529 | return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; | ||
530 | } | ||
531 | |||
521 | static inline gpa_t gfn_to_gpa(gfn_t gfn) | 532 | static inline gpa_t gfn_to_gpa(gfn_t gfn) |
522 | { | 533 | { |
523 | return (gpa_t)gfn << PAGE_SHIFT; | 534 | return (gpa_t)gfn << PAGE_SHIFT; |
524 | } | 535 | } |
525 | 536 | ||
537 | static inline gfn_t gpa_to_gfn(gpa_t gpa) | ||
538 | { | ||
539 | return (gfn_t)(gpa >> PAGE_SHIFT); | ||
540 | } | ||
541 | |||
526 | static inline hpa_t pfn_to_hpa(pfn_t pfn) | 542 | static inline hpa_t pfn_to_hpa(pfn_t pfn) |
527 | { | 543 | { |
528 | return (hpa_t)pfn << PAGE_SHIFT; | 544 | return (hpa_t)pfn << PAGE_SHIFT; |
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h index d73109243fda..47a070b0520e 100644 --- a/include/linux/kvm_para.h +++ b/include/linux/kvm_para.h | |||
@@ -17,6 +17,8 @@ | |||
17 | 17 | ||
18 | #define KVM_HC_VAPIC_POLL_IRQ 1 | 18 | #define KVM_HC_VAPIC_POLL_IRQ 1 |
19 | #define KVM_HC_MMU_OP 2 | 19 | #define KVM_HC_MMU_OP 2 |
20 | #define KVM_HC_FEATURES 3 | ||
21 | #define KVM_HC_PPC_MAP_MAGIC_PAGE 4 | ||
20 | 22 | ||
21 | /* | 23 | /* |
22 | * hypercalls use architecture specific | 24 | * hypercalls use architecture specific |
@@ -24,11 +26,6 @@ | |||
24 | #include <asm/kvm_para.h> | 26 | #include <asm/kvm_para.h> |
25 | 27 | ||
26 | #ifdef __KERNEL__ | 28 | #ifdef __KERNEL__ |
27 | #ifdef CONFIG_KVM_GUEST | ||
28 | void __init kvm_guest_init(void); | ||
29 | #else | ||
30 | #define kvm_guest_init() do { } while (0) | ||
31 | #endif | ||
32 | 29 | ||
33 | static inline int kvm_para_has_feature(unsigned int feature) | 30 | static inline int kvm_para_has_feature(unsigned int feature) |
34 | { | 31 | { |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 45fb2967b66d..15b77b8dc7e1 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <scsi/scsi_host.h> | 37 | #include <scsi/scsi_host.h> |
38 | #include <linux/acpi.h> | 38 | #include <linux/acpi.h> |
39 | #include <linux/cdrom.h> | 39 | #include <linux/cdrom.h> |
40 | #include <linux/sched.h> | ||
40 | 41 | ||
41 | /* | 42 | /* |
42 | * Define if arch has non-standard setup. This is a _PCI_ standard | 43 | * Define if arch has non-standard setup. This is a _PCI_ standard |
@@ -172,6 +173,7 @@ enum { | |||
172 | ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */ | 173 | ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */ |
173 | ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */ | 174 | ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */ |
174 | ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ | 175 | ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ |
176 | ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */ | ||
175 | 177 | ||
176 | /* struct ata_port flags */ | 178 | /* struct ata_port flags */ |
177 | ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ | 179 | ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ |
@@ -196,7 +198,7 @@ enum { | |||
196 | ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */ | 198 | ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */ |
197 | ATA_FLAG_AN = (1 << 18), /* controller supports AN */ | 199 | ATA_FLAG_AN = (1 << 18), /* controller supports AN */ |
198 | ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */ | 200 | ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */ |
199 | ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */ | 201 | ATA_FLAG_LPM = (1 << 20), /* driver can handle LPM */ |
200 | ATA_FLAG_EM = (1 << 21), /* driver supports enclosure | 202 | ATA_FLAG_EM = (1 << 21), /* driver supports enclosure |
201 | * management */ | 203 | * management */ |
202 | ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity | 204 | ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity |
@@ -324,12 +326,11 @@ enum { | |||
324 | ATA_EH_HARDRESET = (1 << 2), /* meaningful only in ->prereset */ | 326 | ATA_EH_HARDRESET = (1 << 2), /* meaningful only in ->prereset */ |
325 | ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, | 327 | ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, |
326 | ATA_EH_ENABLE_LINK = (1 << 3), | 328 | ATA_EH_ENABLE_LINK = (1 << 3), |
327 | ATA_EH_LPM = (1 << 4), /* link power management action */ | ||
328 | ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */ | 329 | ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */ |
329 | 330 | ||
330 | ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK, | 331 | ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK, |
331 | ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET | | 332 | ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET | |
332 | ATA_EH_ENABLE_LINK | ATA_EH_LPM, | 333 | ATA_EH_ENABLE_LINK, |
333 | 334 | ||
334 | /* ata_eh_info->flags */ | 335 | /* ata_eh_info->flags */ |
335 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ | 336 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ |
@@ -341,7 +342,7 @@ enum { | |||
341 | ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ | 342 | ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ |
342 | ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */ | 343 | ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */ |
343 | ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */ | 344 | ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */ |
344 | ATA_EHI_POST_SETMODE = (1 << 20), /* revaildating after setmode */ | 345 | ATA_EHI_POST_SETMODE = (1 << 20), /* revalidating after setmode */ |
345 | 346 | ||
346 | ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, | 347 | ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, |
347 | 348 | ||
@@ -377,7 +378,6 @@ enum { | |||
377 | ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */ | 378 | ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */ |
378 | ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */ | 379 | ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */ |
379 | ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */ | 380 | ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */ |
380 | ATA_HORKAGE_IPM = (1 << 7), /* Link PM problems */ | ||
381 | ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */ | 381 | ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */ |
382 | ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */ | 382 | ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */ |
383 | ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ | 383 | ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ |
@@ -464,6 +464,22 @@ enum ata_completion_errors { | |||
464 | AC_ERR_NCQ = (1 << 10), /* marker for offending NCQ qc */ | 464 | AC_ERR_NCQ = (1 << 10), /* marker for offending NCQ qc */ |
465 | }; | 465 | }; |
466 | 466 | ||
467 | /* | ||
468 | * Link power management policy: If you alter this, you also need to | ||
469 | * alter libata-scsi.c (for the ascii descriptions) | ||
470 | */ | ||
471 | enum ata_lpm_policy { | ||
472 | ATA_LPM_UNKNOWN, | ||
473 | ATA_LPM_MAX_POWER, | ||
474 | ATA_LPM_MED_POWER, | ||
475 | ATA_LPM_MIN_POWER, | ||
476 | }; | ||
477 | |||
478 | enum ata_lpm_hints { | ||
479 | ATA_LPM_EMPTY = (1 << 0), /* port empty/probing */ | ||
480 | ATA_LPM_HIPM = (1 << 1), /* may use HIPM */ | ||
481 | }; | ||
482 | |||
467 | /* forward declarations */ | 483 | /* forward declarations */ |
468 | struct scsi_device; | 484 | struct scsi_device; |
469 | struct ata_port_operations; | 485 | struct ata_port_operations; |
@@ -478,16 +494,6 @@ typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes, | |||
478 | unsigned long deadline); | 494 | unsigned long deadline); |
479 | typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes); | 495 | typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes); |
480 | 496 | ||
481 | /* | ||
482 | * host pm policy: If you alter this, you also need to alter libata-scsi.c | ||
483 | * (for the ascii descriptions) | ||
484 | */ | ||
485 | enum link_pm { | ||
486 | NOT_AVAILABLE, | ||
487 | MIN_POWER, | ||
488 | MAX_PERFORMANCE, | ||
489 | MEDIUM_POWER, | ||
490 | }; | ||
491 | extern struct device_attribute dev_attr_link_power_management_policy; | 497 | extern struct device_attribute dev_attr_link_power_management_policy; |
492 | extern struct device_attribute dev_attr_unload_heads; | 498 | extern struct device_attribute dev_attr_unload_heads; |
493 | extern struct device_attribute dev_attr_em_message_type; | 499 | extern struct device_attribute dev_attr_em_message_type; |
@@ -530,6 +536,10 @@ struct ata_host { | |||
530 | void *private_data; | 536 | void *private_data; |
531 | struct ata_port_operations *ops; | 537 | struct ata_port_operations *ops; |
532 | unsigned long flags; | 538 | unsigned long flags; |
539 | |||
540 | struct mutex eh_mutex; | ||
541 | struct task_struct *eh_owner; | ||
542 | |||
533 | #ifdef CONFIG_ATA_ACPI | 543 | #ifdef CONFIG_ATA_ACPI |
534 | acpi_handle acpi_handle; | 544 | acpi_handle acpi_handle; |
535 | #endif | 545 | #endif |
@@ -560,13 +570,13 @@ struct ata_queued_cmd { | |||
560 | unsigned int extrabytes; | 570 | unsigned int extrabytes; |
561 | unsigned int curbytes; | 571 | unsigned int curbytes; |
562 | 572 | ||
563 | struct scatterlist *cursg; | ||
564 | unsigned int cursg_ofs; | ||
565 | |||
566 | struct scatterlist sgent; | 573 | struct scatterlist sgent; |
567 | 574 | ||
568 | struct scatterlist *sg; | 575 | struct scatterlist *sg; |
569 | 576 | ||
577 | struct scatterlist *cursg; | ||
578 | unsigned int cursg_ofs; | ||
579 | |||
570 | unsigned int err_mask; | 580 | unsigned int err_mask; |
571 | struct ata_taskfile result_tf; | 581 | struct ata_taskfile result_tf; |
572 | ata_qc_cb_t complete_fn; | 582 | ata_qc_cb_t complete_fn; |
@@ -604,6 +614,7 @@ struct ata_device { | |||
604 | union acpi_object *gtf_cache; | 614 | union acpi_object *gtf_cache; |
605 | unsigned int gtf_filter; | 615 | unsigned int gtf_filter; |
606 | #endif | 616 | #endif |
617 | struct device tdev; | ||
607 | /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ | 618 | /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ |
608 | u64 n_sectors; /* size of device, if ATA */ | 619 | u64 n_sectors; /* size of device, if ATA */ |
609 | u64 n_native_sectors; /* native size, if ATA */ | 620 | u64 n_native_sectors; /* native size, if ATA */ |
@@ -690,6 +701,7 @@ struct ata_link { | |||
690 | struct ata_port *ap; | 701 | struct ata_port *ap; |
691 | int pmp; /* port multiplier port # */ | 702 | int pmp; /* port multiplier port # */ |
692 | 703 | ||
704 | struct device tdev; | ||
693 | unsigned int active_tag; /* active tag on this link */ | 705 | unsigned int active_tag; /* active tag on this link */ |
694 | u32 sactive; /* active NCQ commands */ | 706 | u32 sactive; /* active NCQ commands */ |
695 | 707 | ||
@@ -699,6 +711,7 @@ struct ata_link { | |||
699 | unsigned int hw_sata_spd_limit; | 711 | unsigned int hw_sata_spd_limit; |
700 | unsigned int sata_spd_limit; | 712 | unsigned int sata_spd_limit; |
701 | unsigned int sata_spd; /* current SATA PHY speed */ | 713 | unsigned int sata_spd; /* current SATA PHY speed */ |
714 | enum ata_lpm_policy lpm_policy; | ||
702 | 715 | ||
703 | /* record runtime error info, protected by host_set lock */ | 716 | /* record runtime error info, protected by host_set lock */ |
704 | struct ata_eh_info eh_info; | 717 | struct ata_eh_info eh_info; |
@@ -707,6 +720,8 @@ struct ata_link { | |||
707 | 720 | ||
708 | struct ata_device device[ATA_MAX_DEVICES]; | 721 | struct ata_device device[ATA_MAX_DEVICES]; |
709 | }; | 722 | }; |
723 | #define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag) | ||
724 | #define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0]) | ||
710 | 725 | ||
711 | struct ata_port { | 726 | struct ata_port { |
712 | struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ | 727 | struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ |
@@ -752,6 +767,7 @@ struct ata_port { | |||
752 | struct ata_port_stats stats; | 767 | struct ata_port_stats stats; |
753 | struct ata_host *host; | 768 | struct ata_host *host; |
754 | struct device *dev; | 769 | struct device *dev; |
770 | struct device tdev; | ||
755 | 771 | ||
756 | struct mutex scsi_scan_mutex; | 772 | struct mutex scsi_scan_mutex; |
757 | struct delayed_work hotplug_task; | 773 | struct delayed_work hotplug_task; |
@@ -767,7 +783,7 @@ struct ata_port { | |||
767 | 783 | ||
768 | pm_message_t pm_mesg; | 784 | pm_message_t pm_mesg; |
769 | int *pm_result; | 785 | int *pm_result; |
770 | enum link_pm pm_policy; | 786 | enum ata_lpm_policy target_lpm_policy; |
771 | 787 | ||
772 | struct timer_list fastdrain_timer; | 788 | struct timer_list fastdrain_timer; |
773 | unsigned long fastdrain_cnt; | 789 | unsigned long fastdrain_cnt; |
@@ -833,8 +849,8 @@ struct ata_port_operations { | |||
833 | int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val); | 849 | int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val); |
834 | void (*pmp_attach)(struct ata_port *ap); | 850 | void (*pmp_attach)(struct ata_port *ap); |
835 | void (*pmp_detach)(struct ata_port *ap); | 851 | void (*pmp_detach)(struct ata_port *ap); |
836 | int (*enable_pm)(struct ata_port *ap, enum link_pm policy); | 852 | int (*set_lpm)(struct ata_link *link, enum ata_lpm_policy policy, |
837 | void (*disable_pm)(struct ata_port *ap); | 853 | unsigned hints); |
838 | 854 | ||
839 | /* | 855 | /* |
840 | * Start, stop, suspend and resume | 856 | * Start, stop, suspend and resume |
@@ -946,6 +962,8 @@ extern int sata_link_debounce(struct ata_link *link, | |||
946 | const unsigned long *params, unsigned long deadline); | 962 | const unsigned long *params, unsigned long deadline); |
947 | extern int sata_link_resume(struct ata_link *link, const unsigned long *params, | 963 | extern int sata_link_resume(struct ata_link *link, const unsigned long *params, |
948 | unsigned long deadline); | 964 | unsigned long deadline); |
965 | extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, | ||
966 | bool spm_wakeup); | ||
949 | extern int sata_link_hardreset(struct ata_link *link, | 967 | extern int sata_link_hardreset(struct ata_link *link, |
950 | const unsigned long *timing, unsigned long deadline, | 968 | const unsigned long *timing, unsigned long deadline, |
951 | bool *online, int (*check_ready)(struct ata_link *)); | 969 | bool *online, int (*check_ready)(struct ata_link *)); |
@@ -991,8 +1009,9 @@ extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); | |||
991 | extern void ata_host_resume(struct ata_host *host); | 1009 | extern void ata_host_resume(struct ata_host *host); |
992 | #endif | 1010 | #endif |
993 | extern int ata_ratelimit(void); | 1011 | extern int ata_ratelimit(void); |
994 | extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, | 1012 | extern void ata_msleep(struct ata_port *ap, unsigned int msecs); |
995 | unsigned long interval, unsigned long timeout); | 1013 | extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, |
1014 | u32 val, unsigned long interval, unsigned long timeout); | ||
996 | extern int atapi_cmd_type(u8 opcode); | 1015 | extern int atapi_cmd_type(u8 opcode); |
997 | extern void ata_tf_to_fis(const struct ata_taskfile *tf, | 1016 | extern void ata_tf_to_fis(const struct ata_taskfile *tf, |
998 | u8 pmp, int is_cmd, u8 *fis); | 1017 | u8 pmp, int is_cmd, u8 *fis); |
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h index 0e8a346424bb..d4292c8431e0 100644 --- a/include/linux/lis3lv02d.h +++ b/include/linux/lis3lv02d.h | |||
@@ -1,6 +1,52 @@ | |||
1 | #ifndef __LIS3LV02D_H_ | 1 | #ifndef __LIS3LV02D_H_ |
2 | #define __LIS3LV02D_H_ | 2 | #define __LIS3LV02D_H_ |
3 | 3 | ||
4 | /** | ||
5 | * struct lis3lv02d_platform_data - lis3 chip family platform data | ||
6 | * @click_flags: Click detection unit configuration | ||
7 | * @click_thresh_x: Click detection unit x axis threshold | ||
8 | * @click_thresh_y: Click detection unit y axis threshold | ||
9 | * @click_thresh_z: Click detection unit z axis threshold | ||
10 | * @click_time_limit: Click detection unit time parameter | ||
11 | * @click_latency: Click detection unit latency parameter | ||
12 | * @click_window: Click detection unit window parameter | ||
13 | * @irq_cfg: On chip irq source and type configuration (click / | ||
14 | * data available / wake up, open drain, polarity) | ||
15 | * @irq_flags1: Additional irq triggering flags for irq channel 0 | ||
16 | * @irq_flags2: Additional irq triggering flags for irq channel 1 | ||
17 | * @duration1: Wake up unit 1 duration parameter | ||
18 | * @duration2: Wake up unit 2 duration parameter | ||
19 | * @wakeup_flags: Wake up unit 1 flags | ||
20 | * @wakeup_thresh: Wake up unit 1 threshold value | ||
21 | * @wakeup_flags2: Wake up unit 2 flags | ||
22 | * @wakeup_thresh2: Wake up unit 2 threshold value | ||
23 | * @hipass_ctrl: High pass filter control (enable / disable, cut off | ||
24 | * frequency) | ||
25 | * @axis_x: Sensor orientation remapping for x-axis | ||
26 | * @axis_y: Sensor orientation remapping for y-axis | ||
27 | * @axis_z: Sensor orientation remapping for z-axis | ||
28 | * @driver_features: Enable bits for different features. Disabled by default | ||
29 | * @default_rate: Default sampling rate. 0 means reset default | ||
30 | * @setup_resources: Interrupt line setup call back function | ||
31 | * @release_resources: Interrupt line release call back function | ||
32 | * @st_min_limits[3]: Selftest acceptance minimum values | ||
33 | * @st_max_limits[3]: Selftest acceptance maximum values | ||
34 | * @irq2: Irq line 2 number | ||
35 | * | ||
36 | * Platform data is used to setup the sensor chip. Meaning of the different | ||
37 | * chip features can be found from the data sheet. It is publicly available | ||
38 | * at www.st.com web pages. Currently the platform data is used | ||
39 | * only for the 8 bit device. The 8 bit device has two wake up / free fall | ||
40 | * detection units and click detection unit. There are plenty of ways to | ||
41 | * configure the chip which makes is quite hard to explain deeper meaning of | ||
42 | * the fields here. Behaviour of the detection blocks varies heavily depending | ||
43 | * on the configuration. For example, interrupt detection block can use high | ||
44 | * pass filtered data which makes it react to the changes in the acceleration. | ||
45 | * Irq_flags can be used to enable interrupt detection on the both edges. | ||
46 | * With proper chip configuration this produces interrupt when some trigger | ||
47 | * starts and when it goes away. | ||
48 | */ | ||
49 | |||
4 | struct lis3lv02d_platform_data { | 50 | struct lis3lv02d_platform_data { |
5 | /* please note: the 'click' feature is only supported for | 51 | /* please note: the 'click' feature is only supported for |
6 | * LIS[32]02DL variants of the chip and will be ignored for | 52 | * LIS[32]02DL variants of the chip and will be ignored for |
@@ -36,7 +82,10 @@ struct lis3lv02d_platform_data { | |||
36 | #define LIS3_IRQ_OPEN_DRAIN (1 << 6) | 82 | #define LIS3_IRQ_OPEN_DRAIN (1 << 6) |
37 | #define LIS3_IRQ_ACTIVE_LOW (1 << 7) | 83 | #define LIS3_IRQ_ACTIVE_LOW (1 << 7) |
38 | unsigned char irq_cfg; | 84 | unsigned char irq_cfg; |
39 | 85 | unsigned char irq_flags1; /* Additional irq edge / level flags */ | |
86 | unsigned char irq_flags2; /* Additional irq edge / level flags */ | ||
87 | unsigned char duration1; | ||
88 | unsigned char duration2; | ||
40 | #define LIS3_WAKEUP_X_LO (1 << 0) | 89 | #define LIS3_WAKEUP_X_LO (1 << 0) |
41 | #define LIS3_WAKEUP_X_HI (1 << 1) | 90 | #define LIS3_WAKEUP_X_HI (1 << 1) |
42 | #define LIS3_WAKEUP_Y_LO (1 << 2) | 91 | #define LIS3_WAKEUP_Y_LO (1 << 2) |
@@ -64,6 +113,10 @@ struct lis3lv02d_platform_data { | |||
64 | s8 axis_x; | 113 | s8 axis_x; |
65 | s8 axis_y; | 114 | s8 axis_y; |
66 | s8 axis_z; | 115 | s8 axis_z; |
116 | #define LIS3_USE_REGULATOR_CTRL 0x01 | ||
117 | #define LIS3_USE_BLOCK_READ 0x02 | ||
118 | u16 driver_features; | ||
119 | int default_rate; | ||
67 | int (*setup_resources)(void); | 120 | int (*setup_resources)(void); |
68 | int (*release_resources)(void); | 121 | int (*release_resources)(void); |
69 | /* Limits for selftest are specified in chip data sheet */ | 122 | /* Limits for selftest are specified in chip data sheet */ |
diff --git a/include/linux/list.h b/include/linux/list.h index d167b5d7c0ac..88a000617d77 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -5,7 +5,6 @@ | |||
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
6 | #include <linux/poison.h> | 6 | #include <linux/poison.h> |
7 | #include <linux/prefetch.h> | 7 | #include <linux/prefetch.h> |
8 | #include <asm/system.h> | ||
9 | 8 | ||
10 | /* | 9 | /* |
11 | * Simple doubly linked list implementation. | 10 | * Simple doubly linked list implementation. |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 06aed8305bf3..71c09b26c759 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -32,6 +32,17 @@ extern int lock_stat; | |||
32 | #define MAX_LOCKDEP_SUBCLASSES 8UL | 32 | #define MAX_LOCKDEP_SUBCLASSES 8UL |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * NR_LOCKDEP_CACHING_CLASSES ... Number of classes | ||
36 | * cached in the instance of lockdep_map | ||
37 | * | ||
38 | * Currently main class (subclass == 0) and signle depth subclass | ||
39 | * are cached in lockdep_map. This optimization is mainly targeting | ||
40 | * on rq->lock. double_rq_lock() acquires this highly competitive with | ||
41 | * single depth. | ||
42 | */ | ||
43 | #define NR_LOCKDEP_CACHING_CLASSES 2 | ||
44 | |||
45 | /* | ||
35 | * Lock-classes are keyed via unique addresses, by embedding the | 46 | * Lock-classes are keyed via unique addresses, by embedding the |
36 | * lockclass-key into the kernel (or module) .data section. (For | 47 | * lockclass-key into the kernel (or module) .data section. (For |
37 | * static locks we use the lock address itself as the key.) | 48 | * static locks we use the lock address itself as the key.) |
@@ -138,7 +149,7 @@ void clear_lock_stats(struct lock_class *class); | |||
138 | */ | 149 | */ |
139 | struct lockdep_map { | 150 | struct lockdep_map { |
140 | struct lock_class_key *key; | 151 | struct lock_class_key *key; |
141 | struct lock_class *class_cache; | 152 | struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; |
142 | const char *name; | 153 | const char *name; |
143 | #ifdef CONFIG_LOCK_STAT | 154 | #ifdef CONFIG_LOCK_STAT |
144 | int cpu; | 155 | int cpu; |
@@ -424,14 +435,6 @@ do { \ | |||
424 | 435 | ||
425 | #endif /* CONFIG_LOCKDEP */ | 436 | #endif /* CONFIG_LOCKDEP */ |
426 | 437 | ||
427 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
428 | extern void early_init_irq_lock_class(void); | ||
429 | #else | ||
430 | static inline void early_init_irq_lock_class(void) | ||
431 | { | ||
432 | } | ||
433 | #endif | ||
434 | |||
435 | #ifdef CONFIG_TRACE_IRQFLAGS | 438 | #ifdef CONFIG_TRACE_IRQFLAGS |
436 | extern void early_boot_irqs_off(void); | 439 | extern void early_boot_irqs_off(void); |
437 | extern void early_boot_irqs_on(void); | 440 | extern void early_boot_irqs_on(void); |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index a59faf2b5edd..62a10c2a11f2 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _LINUX_MEMBLOCK_H | 2 | #define _LINUX_MEMBLOCK_H |
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | #ifdef CONFIG_HAVE_MEMBLOCK | ||
5 | /* | 6 | /* |
6 | * Logical memory blocks. | 7 | * Logical memory blocks. |
7 | * | 8 | * |
@@ -16,73 +17,150 @@ | |||
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
17 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
18 | 19 | ||
19 | #define MAX_MEMBLOCK_REGIONS 128 | 20 | #include <asm/memblock.h> |
20 | 21 | ||
21 | struct memblock_property { | 22 | #define INIT_MEMBLOCK_REGIONS 128 |
22 | u64 base; | 23 | #define MEMBLOCK_ERROR 0 |
23 | u64 size; | ||
24 | }; | ||
25 | 24 | ||
26 | struct memblock_region { | 25 | struct memblock_region { |
27 | unsigned long cnt; | 26 | phys_addr_t base; |
28 | u64 size; | 27 | phys_addr_t size; |
29 | struct memblock_property region[MAX_MEMBLOCK_REGIONS+1]; | 28 | }; |
29 | |||
30 | struct memblock_type { | ||
31 | unsigned long cnt; /* number of regions */ | ||
32 | unsigned long max; /* size of the allocated array */ | ||
33 | struct memblock_region *regions; | ||
30 | }; | 34 | }; |
31 | 35 | ||
32 | struct memblock { | 36 | struct memblock { |
33 | unsigned long debug; | 37 | phys_addr_t current_limit; |
34 | u64 rmo_size; | 38 | phys_addr_t memory_size; /* Updated by memblock_analyze() */ |
35 | struct memblock_region memory; | 39 | struct memblock_type memory; |
36 | struct memblock_region reserved; | 40 | struct memblock_type reserved; |
37 | }; | 41 | }; |
38 | 42 | ||
39 | extern struct memblock memblock; | 43 | extern struct memblock memblock; |
44 | extern int memblock_debug; | ||
45 | extern int memblock_can_resize; | ||
40 | 46 | ||
41 | extern void __init memblock_init(void); | 47 | #define memblock_dbg(fmt, ...) \ |
42 | extern void __init memblock_analyze(void); | 48 | if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) |
43 | extern long memblock_add(u64 base, u64 size); | 49 | |
44 | extern long memblock_remove(u64 base, u64 size); | 50 | u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align); |
45 | extern long __init memblock_free(u64 base, u64 size); | 51 | int memblock_free_reserved_regions(void); |
46 | extern long __init memblock_reserve(u64 base, u64 size); | 52 | int memblock_reserve_reserved_regions(void); |
47 | extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, | 53 | |
48 | u64 (*nid_range)(u64, u64, int *)); | 54 | extern void memblock_init(void); |
49 | extern u64 __init memblock_alloc(u64 size, u64 align); | 55 | extern void memblock_analyze(void); |
50 | extern u64 __init memblock_alloc_base(u64 size, | 56 | extern long memblock_add(phys_addr_t base, phys_addr_t size); |
51 | u64, u64 max_addr); | 57 | extern long memblock_remove(phys_addr_t base, phys_addr_t size); |
52 | extern u64 __init __memblock_alloc_base(u64 size, | 58 | extern long memblock_free(phys_addr_t base, phys_addr_t size); |
53 | u64 align, u64 max_addr); | 59 | extern long memblock_reserve(phys_addr_t base, phys_addr_t size); |
54 | extern u64 __init memblock_phys_mem_size(void); | 60 | |
55 | extern u64 memblock_end_of_DRAM(void); | 61 | /* The numa aware allocator is only available if |
56 | extern void __init memblock_enforce_memory_limit(u64 memory_limit); | 62 | * CONFIG_ARCH_POPULATES_NODE_MAP is set |
57 | extern int __init memblock_is_reserved(u64 addr); | 63 | */ |
58 | extern int memblock_is_region_reserved(u64 base, u64 size); | 64 | extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, |
59 | extern int memblock_find(struct memblock_property *res); | 65 | int nid); |
66 | extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, | ||
67 | int nid); | ||
68 | |||
69 | extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); | ||
70 | |||
71 | /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ | ||
72 | #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) | ||
73 | #define MEMBLOCK_ALLOC_ACCESSIBLE 0 | ||
74 | |||
75 | extern phys_addr_t memblock_alloc_base(phys_addr_t size, | ||
76 | phys_addr_t align, | ||
77 | phys_addr_t max_addr); | ||
78 | extern phys_addr_t __memblock_alloc_base(phys_addr_t size, | ||
79 | phys_addr_t align, | ||
80 | phys_addr_t max_addr); | ||
81 | extern phys_addr_t memblock_phys_mem_size(void); | ||
82 | extern phys_addr_t memblock_end_of_DRAM(void); | ||
83 | extern void memblock_enforce_memory_limit(phys_addr_t memory_limit); | ||
84 | extern int memblock_is_memory(phys_addr_t addr); | ||
85 | extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); | ||
86 | extern int memblock_is_reserved(phys_addr_t addr); | ||
87 | extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); | ||
60 | 88 | ||
61 | extern void memblock_dump_all(void); | 89 | extern void memblock_dump_all(void); |
62 | 90 | ||
63 | static inline u64 | 91 | /* Provided by the architecture */ |
64 | memblock_size_bytes(struct memblock_region *type, unsigned long region_nr) | 92 | extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid); |
93 | extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, | ||
94 | phys_addr_t addr2, phys_addr_t size2); | ||
95 | |||
96 | /** | ||
97 | * memblock_set_current_limit - Set the current allocation limit to allow | ||
98 | * limiting allocations to what is currently | ||
99 | * accessible during boot | ||
100 | * @limit: New limit value (physical address) | ||
101 | */ | ||
102 | extern void memblock_set_current_limit(phys_addr_t limit); | ||
103 | |||
104 | |||
105 | /* | ||
106 | * pfn conversion functions | ||
107 | * | ||
108 | * While the memory MEMBLOCKs should always be page aligned, the reserved | ||
109 | * MEMBLOCKs may not be. This accessor attempt to provide a very clear | ||
110 | * idea of what they return for such non aligned MEMBLOCKs. | ||
111 | */ | ||
112 | |||
113 | /** | ||
114 | * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region | ||
115 | * @reg: memblock_region structure | ||
116 | */ | ||
117 | static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) | ||
65 | { | 118 | { |
66 | return type->region[region_nr].size; | 119 | return PFN_UP(reg->base); |
67 | } | 120 | } |
68 | static inline u64 | 121 | |
69 | memblock_size_pages(struct memblock_region *type, unsigned long region_nr) | 122 | /** |
123 | * memblock_region_memory_end_pfn - Return the end_pfn this region | ||
124 | * @reg: memblock_region structure | ||
125 | */ | ||
126 | static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) | ||
70 | { | 127 | { |
71 | return memblock_size_bytes(type, region_nr) >> PAGE_SHIFT; | 128 | return PFN_DOWN(reg->base + reg->size); |
72 | } | 129 | } |
73 | static inline u64 | 130 | |
74 | memblock_start_pfn(struct memblock_region *type, unsigned long region_nr) | 131 | /** |
132 | * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region | ||
133 | * @reg: memblock_region structure | ||
134 | */ | ||
135 | static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) | ||
75 | { | 136 | { |
76 | return type->region[region_nr].base >> PAGE_SHIFT; | 137 | return PFN_DOWN(reg->base); |
77 | } | 138 | } |
78 | static inline u64 | 139 | |
79 | memblock_end_pfn(struct memblock_region *type, unsigned long region_nr) | 140 | /** |
141 | * memblock_region_reserved_end_pfn - Return the end_pfn this region | ||
142 | * @reg: memblock_region structure | ||
143 | */ | ||
144 | static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) | ||
80 | { | 145 | { |
81 | return memblock_start_pfn(type, region_nr) + | 146 | return PFN_UP(reg->base + reg->size); |
82 | memblock_size_pages(type, region_nr); | ||
83 | } | 147 | } |
84 | 148 | ||
85 | #include <asm/memblock.h> | 149 | #define for_each_memblock(memblock_type, region) \ |
150 | for (region = memblock.memblock_type.regions; \ | ||
151 | region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ | ||
152 | region++) | ||
153 | |||
154 | |||
155 | #ifdef ARCH_DISCARD_MEMBLOCK | ||
156 | #define __init_memblock __init | ||
157 | #define __initdata_memblock __initdata | ||
158 | #else | ||
159 | #define __init_memblock | ||
160 | #define __initdata_memblock | ||
161 | #endif | ||
162 | |||
163 | #endif /* CONFIG_HAVE_MEMBLOCK */ | ||
86 | 164 | ||
87 | #endif /* __KERNEL__ */ | 165 | #endif /* __KERNEL__ */ |
88 | 166 | ||
diff --git a/include/linux/memory.h b/include/linux/memory.h index 85582e1bcee9..06c1fa0a5c7b 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h | |||
@@ -23,6 +23,8 @@ | |||
23 | struct memory_block { | 23 | struct memory_block { |
24 | unsigned long phys_index; | 24 | unsigned long phys_index; |
25 | unsigned long state; | 25 | unsigned long state; |
26 | int section_count; | ||
27 | |||
26 | /* | 28 | /* |
27 | * This serializes all state change requests. It isn't | 29 | * This serializes all state change requests. It isn't |
28 | * held during creation because the control files are | 30 | * held during creation because the control files are |
@@ -113,6 +115,8 @@ extern int memory_dev_init(void); | |||
113 | extern int remove_memory_block(unsigned long, struct mem_section *, int); | 115 | extern int remove_memory_block(unsigned long, struct mem_section *, int); |
114 | extern int memory_notify(unsigned long val, void *v); | 116 | extern int memory_notify(unsigned long val, void *v); |
115 | extern int memory_isolate_notify(unsigned long val, void *v); | 117 | extern int memory_isolate_notify(unsigned long val, void *v); |
118 | extern struct memory_block *find_memory_block_hinted(struct mem_section *, | ||
119 | struct memory_block *); | ||
116 | extern struct memory_block *find_memory_block(struct mem_section *); | 120 | extern struct memory_block *find_memory_block(struct mem_section *); |
117 | #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) | 121 | #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) |
118 | enum mem_add_context { BOOT, HOTPLUG }; | 122 | enum mem_add_context { BOOT, HOTPLUG }; |
diff --git a/include/linux/mfd/tc35892.h b/include/linux/mfd/tc35892.h index e47f770d3068..eff3094ca84e 100644 --- a/include/linux/mfd/tc35892.h +++ b/include/linux/mfd/tc35892.h | |||
@@ -111,9 +111,13 @@ extern int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val); | |||
111 | * struct tc35892_gpio_platform_data - TC35892 GPIO platform data | 111 | * struct tc35892_gpio_platform_data - TC35892 GPIO platform data |
112 | * @gpio_base: first gpio number assigned to TC35892. A maximum of | 112 | * @gpio_base: first gpio number assigned to TC35892. A maximum of |
113 | * %TC35892_NR_GPIOS GPIOs will be allocated. | 113 | * %TC35892_NR_GPIOS GPIOs will be allocated. |
114 | * @setup: callback for board-specific initialization | ||
115 | * @remove: callback for board-specific teardown | ||
114 | */ | 116 | */ |
115 | struct tc35892_gpio_platform_data { | 117 | struct tc35892_gpio_platform_data { |
116 | int gpio_base; | 118 | int gpio_base; |
119 | void (*setup)(struct tc35892 *tc35892, unsigned gpio_base); | ||
120 | void (*remove)(struct tc35892 *tc35892, unsigned gpio_base); | ||
117 | }; | 121 | }; |
118 | 122 | ||
119 | /** | 123 | /** |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 7238231b8dd4..085527fb8261 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -14,6 +14,8 @@ extern int migrate_page(struct address_space *, | |||
14 | struct page *, struct page *); | 14 | struct page *, struct page *); |
15 | extern int migrate_pages(struct list_head *l, new_page_t x, | 15 | extern int migrate_pages(struct list_head *l, new_page_t x, |
16 | unsigned long private, int offlining); | 16 | unsigned long private, int offlining); |
17 | extern int migrate_huge_pages(struct list_head *l, new_page_t x, | ||
18 | unsigned long private, int offlining); | ||
17 | 19 | ||
18 | extern int fail_migrate_page(struct address_space *, | 20 | extern int fail_migrate_page(struct address_space *, |
19 | struct page *, struct page *); | 21 | struct page *, struct page *); |
@@ -23,12 +25,17 @@ extern int migrate_prep_local(void); | |||
23 | extern int migrate_vmas(struct mm_struct *mm, | 25 | extern int migrate_vmas(struct mm_struct *mm, |
24 | const nodemask_t *from, const nodemask_t *to, | 26 | const nodemask_t *from, const nodemask_t *to, |
25 | unsigned long flags); | 27 | unsigned long flags); |
28 | extern void migrate_page_copy(struct page *newpage, struct page *page); | ||
29 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, | ||
30 | struct page *newpage, struct page *page); | ||
26 | #else | 31 | #else |
27 | #define PAGE_MIGRATION 0 | 32 | #define PAGE_MIGRATION 0 |
28 | 33 | ||
29 | static inline void putback_lru_pages(struct list_head *l) {} | 34 | static inline void putback_lru_pages(struct list_head *l) {} |
30 | static inline int migrate_pages(struct list_head *l, new_page_t x, | 35 | static inline int migrate_pages(struct list_head *l, new_page_t x, |
31 | unsigned long private, int offlining) { return -ENOSYS; } | 36 | unsigned long private, int offlining) { return -ENOSYS; } |
37 | static inline int migrate_huge_pages(struct list_head *l, new_page_t x, | ||
38 | unsigned long private, int offlining) { return -ENOSYS; } | ||
32 | 39 | ||
33 | static inline int migrate_prep(void) { return -ENOSYS; } | 40 | static inline int migrate_prep(void) { return -ENOSYS; } |
34 | static inline int migrate_prep_local(void) { return -ENOSYS; } | 41 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
@@ -40,6 +47,15 @@ static inline int migrate_vmas(struct mm_struct *mm, | |||
40 | return -ENOSYS; | 47 | return -ENOSYS; |
41 | } | 48 | } |
42 | 49 | ||
50 | static inline void migrate_page_copy(struct page *newpage, | ||
51 | struct page *page) {} | ||
52 | |||
53 | static inline int migrate_huge_page_move_mapping(struct address_space *mapping, | ||
54 | struct page *newpage, struct page *page) | ||
55 | { | ||
56 | return -ENOSYS; | ||
57 | } | ||
58 | |||
43 | /* Possible settings for the migrate_page() method in address_operations */ | 59 | /* Possible settings for the migrate_page() method in address_operations */ |
44 | #define migrate_page NULL | 60 | #define migrate_page NULL |
45 | #define fail_migrate_page NULL | 61 | #define fail_migrate_page NULL |
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 0f82293a82ed..78a1b9671752 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h | |||
@@ -56,6 +56,7 @@ enum { | |||
56 | MLX4_CMD_QUERY_HCA = 0xb, | 56 | MLX4_CMD_QUERY_HCA = 0xb, |
57 | MLX4_CMD_QUERY_PORT = 0x43, | 57 | MLX4_CMD_QUERY_PORT = 0x43, |
58 | MLX4_CMD_SENSE_PORT = 0x4d, | 58 | MLX4_CMD_SENSE_PORT = 0x4d, |
59 | MLX4_CMD_HW_HEALTH_CHECK = 0x50, | ||
59 | MLX4_CMD_SET_PORT = 0xc, | 60 | MLX4_CMD_SET_PORT = 0xc, |
60 | MLX4_CMD_ACCESS_DDR = 0x2e, | 61 | MLX4_CMD_ACCESS_DDR = 0x2e, |
61 | MLX4_CMD_MAP_ICM = 0xffa, | 62 | MLX4_CMD_MAP_ICM = 0xffa, |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 7a7f9c1e679a..7338654c02b4 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -186,6 +186,10 @@ struct mlx4_caps { | |||
186 | int eth_mtu_cap[MLX4_MAX_PORTS + 1]; | 186 | int eth_mtu_cap[MLX4_MAX_PORTS + 1]; |
187 | int gid_table_len[MLX4_MAX_PORTS + 1]; | 187 | int gid_table_len[MLX4_MAX_PORTS + 1]; |
188 | int pkey_table_len[MLX4_MAX_PORTS + 1]; | 188 | int pkey_table_len[MLX4_MAX_PORTS + 1]; |
189 | int trans_type[MLX4_MAX_PORTS + 1]; | ||
190 | int vendor_oui[MLX4_MAX_PORTS + 1]; | ||
191 | int wavelength[MLX4_MAX_PORTS + 1]; | ||
192 | u64 trans_code[MLX4_MAX_PORTS + 1]; | ||
189 | int local_ca_ack_delay; | 193 | int local_ca_ack_delay; |
190 | int num_uars; | 194 | int num_uars; |
191 | int bf_reg_size; | 195 | int bf_reg_size; |
@@ -229,6 +233,8 @@ struct mlx4_caps { | |||
229 | u32 bmme_flags; | 233 | u32 bmme_flags; |
230 | u32 reserved_lkey; | 234 | u32 reserved_lkey; |
231 | u16 stat_rate_support; | 235 | u16 stat_rate_support; |
236 | int udp_rss; | ||
237 | int loopback_support; | ||
232 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; | 238 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; |
233 | int max_gso_sz; | 239 | int max_gso_sz; |
234 | int reserved_qps_cnt[MLX4_NUM_QP_REGION]; | 240 | int reserved_qps_cnt[MLX4_NUM_QP_REGION]; |
@@ -480,5 +486,6 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, | |||
480 | u32 *lkey, u32 *rkey); | 486 | u32 *lkey, u32 *rkey); |
481 | int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); | 487 | int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); |
482 | int mlx4_SYNC_TPT(struct mlx4_dev *dev); | 488 | int mlx4_SYNC_TPT(struct mlx4_dev *dev); |
489 | int mlx4_test_interrupts(struct mlx4_dev *dev); | ||
483 | 490 | ||
484 | #endif /* MLX4_DEVICE_H */ | 491 | #endif /* MLX4_DEVICE_H */ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 74949fbef8c6..a4c66846fb8f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -718,12 +718,20 @@ static inline int page_mapped(struct page *page) | |||
718 | #define VM_FAULT_SIGBUS 0x0002 | 718 | #define VM_FAULT_SIGBUS 0x0002 |
719 | #define VM_FAULT_MAJOR 0x0004 | 719 | #define VM_FAULT_MAJOR 0x0004 |
720 | #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ | 720 | #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ |
721 | #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned page */ | 721 | #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ |
722 | #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ | ||
722 | 723 | ||
723 | #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ | 724 | #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ |
724 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ | 725 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ |
725 | 726 | ||
726 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON) | 727 | #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ |
728 | |||
729 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ | ||
730 | VM_FAULT_HWPOISON_LARGE) | ||
731 | |||
732 | /* Encode hstate index for a hwpoisoned large page */ | ||
733 | #define VM_FAULT_SET_HINDEX(x) ((x) << 12) | ||
734 | #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) | ||
727 | 735 | ||
728 | /* | 736 | /* |
729 | * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. | 737 | * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. |
@@ -1175,6 +1183,8 @@ extern void free_bootmem_with_active_regions(int nid, | |||
1175 | unsigned long max_low_pfn); | 1183 | unsigned long max_low_pfn); |
1176 | int add_from_early_node_map(struct range *range, int az, | 1184 | int add_from_early_node_map(struct range *range, int az, |
1177 | int nr_range, int nid); | 1185 | int nr_range, int nid); |
1186 | u64 __init find_memory_core_early(int nid, u64 size, u64 align, | ||
1187 | u64 goal, u64 limit); | ||
1178 | void *__alloc_memory_core_early(int nodeid, u64 size, u64 align, | 1188 | void *__alloc_memory_core_early(int nodeid, u64 size, u64 align, |
1179 | u64 goal, u64 limit); | 1189 | u64 goal, u64 limit); |
1180 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); | 1190 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index ee7e258627f9..cb57d657ce4d 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -299,7 +299,7 @@ struct mm_struct { | |||
299 | * new_owner->mm == mm | 299 | * new_owner->mm == mm |
300 | * new_owner->alloc_lock is held | 300 | * new_owner->alloc_lock is held |
301 | */ | 301 | */ |
302 | struct task_struct *owner; | 302 | struct task_struct __rcu *owner; |
303 | #endif | 303 | #endif |
304 | 304 | ||
305 | #ifdef CONFIG_PROC_FS | 305 | #ifdef CONFIG_PROC_FS |
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 33b2ea09a4ad..a36ab3bc7b03 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #define SDIO_CLASS_PHS 0x06 /* PHS standard interface */ | 18 | #define SDIO_CLASS_PHS 0x06 /* PHS standard interface */ |
19 | #define SDIO_CLASS_WLAN 0x07 /* WLAN interface */ | 19 | #define SDIO_CLASS_WLAN 0x07 /* WLAN interface */ |
20 | #define SDIO_CLASS_ATA 0x08 /* Embedded SDIO-ATA std interface */ | 20 | #define SDIO_CLASS_ATA 0x08 /* Embedded SDIO-ATA std interface */ |
21 | #define SDIO_CLASS_BT_AMP 0x09 /* Type-A Bluetooth AMP interface */ | ||
21 | 22 | ||
22 | /* | 23 | /* |
23 | * Vendors and devices. Sort key: vendor first, device next. | 24 | * Vendors and devices. Sort key: vendor first, device next. |
diff --git a/include/linux/module.h b/include/linux/module.h index aace066bad8f..b29e7458b966 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -350,7 +350,10 @@ struct module | |||
350 | struct tracepoint *tracepoints; | 350 | struct tracepoint *tracepoints; |
351 | unsigned int num_tracepoints; | 351 | unsigned int num_tracepoints; |
352 | #endif | 352 | #endif |
353 | 353 | #ifdef HAVE_JUMP_LABEL | |
354 | struct jump_entry *jump_entries; | ||
355 | unsigned int num_jump_entries; | ||
356 | #endif | ||
354 | #ifdef CONFIG_TRACING | 357 | #ifdef CONFIG_TRACING |
355 | const char **trace_bprintk_fmt_start; | 358 | const char **trace_bprintk_fmt_start; |
356 | unsigned int num_trace_bprintk_fmt; | 359 | unsigned int num_trace_bprintk_fmt; |
diff --git a/include/linux/mroute.h b/include/linux/mroute.h index fa04b246c9ae..0fa7a3a874c8 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h | |||
@@ -213,6 +213,7 @@ struct mfc_cache { | |||
213 | unsigned char ttls[MAXVIFS]; /* TTL thresholds */ | 213 | unsigned char ttls[MAXVIFS]; /* TTL thresholds */ |
214 | } res; | 214 | } res; |
215 | } mfc_un; | 215 | } mfc_un; |
216 | struct rcu_head rcu; | ||
216 | }; | 217 | }; |
217 | 218 | ||
218 | #define MFC_STATIC 1 | 219 | #define MFC_STATIC 1 |
diff --git a/include/linux/msi.h b/include/linux/msi.h index 91b05c171854..05acced439a3 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
@@ -10,12 +10,13 @@ struct msi_msg { | |||
10 | }; | 10 | }; |
11 | 11 | ||
12 | /* Helper functions */ | 12 | /* Helper functions */ |
13 | struct irq_desc; | 13 | struct irq_data; |
14 | extern void mask_msi_irq(unsigned int irq); | 14 | struct msi_desc; |
15 | extern void unmask_msi_irq(unsigned int irq); | 15 | extern void mask_msi_irq(struct irq_data *data); |
16 | extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | 16 | extern void unmask_msi_irq(struct irq_data *data); |
17 | extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | 17 | extern void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
18 | extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | 18 | extern void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
19 | extern void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); | ||
19 | extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); | 20 | extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); |
20 | extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); | 21 | extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); |
21 | extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); | 22 | extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); |
diff --git a/include/linux/mtio.h b/include/linux/mtio.h index ef01d6aa5934..8f825756c459 100644 --- a/include/linux/mtio.h +++ b/include/linux/mtio.h | |||
@@ -63,6 +63,7 @@ struct mtop { | |||
63 | #define MTCOMPRESSION 32/* control compression with SCSI mode page 15 */ | 63 | #define MTCOMPRESSION 32/* control compression with SCSI mode page 15 */ |
64 | #define MTSETPART 33 /* Change the active tape partition */ | 64 | #define MTSETPART 33 /* Change the active tape partition */ |
65 | #define MTMKPART 34 /* Format the tape with one or two partitions */ | 65 | #define MTMKPART 34 /* Format the tape with one or two partitions */ |
66 | #define MTWEOFI 35 /* write an end-of-file record (mark) in immediate mode */ | ||
66 | 67 | ||
67 | /* structure for MTIOCGET - mag tape get status command */ | 68 | /* structure for MTIOCGET - mag tape get status command */ |
68 | 69 | ||
diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h index de24af79ebd3..54b8e0d8d916 100644 --- a/include/linux/n_r3964.h +++ b/include/linux/n_r3964.h | |||
@@ -4,7 +4,6 @@ | |||
4 | * Copyright by | 4 | * Copyright by |
5 | * Philips Automation Projects | 5 | * Philips Automation Projects |
6 | * Kassel (Germany) | 6 | * Kassel (Germany) |
7 | * http://www.pap-philips.de | ||
8 | * ----------------------------------------------------------- | 7 | * ----------------------------------------------------------- |
9 | * This software may be used and distributed according to the terms of | 8 | * This software may be used and distributed according to the terms of |
10 | * the GNU General Public License, incorporated herein by reference. | 9 | * the GNU General Public License, incorporated herein by reference. |
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h index 4522aed00906..ef663061d5ac 100644 --- a/include/linux/ncp_fs.h +++ b/include/linux/ncp_fs.h | |||
@@ -241,34 +241,6 @@ int ncp_mmap(struct file *, struct vm_area_struct *); | |||
241 | /* linux/fs/ncpfs/ncplib_kernel.c */ | 241 | /* linux/fs/ncpfs/ncplib_kernel.c */ |
242 | int ncp_make_closed(struct inode *); | 242 | int ncp_make_closed(struct inode *); |
243 | 243 | ||
244 | #define ncp_namespace(i) (NCP_SERVER(i)->name_space[NCP_FINFO(i)->volNumber]) | ||
245 | |||
246 | static inline int ncp_preserve_entry_case(struct inode *i, __u32 nscreator) | ||
247 | { | ||
248 | #ifdef CONFIG_NCPFS_SMALLDOS | ||
249 | int ns = ncp_namespace(i); | ||
250 | |||
251 | if ((ns == NW_NS_DOS) | ||
252 | #ifdef CONFIG_NCPFS_OS2_NS | ||
253 | || ((ns == NW_NS_OS2) && (nscreator == NW_NS_DOS)) | ||
254 | #endif /* CONFIG_NCPFS_OS2_NS */ | ||
255 | ) | ||
256 | return 0; | ||
257 | #endif /* CONFIG_NCPFS_SMALLDOS */ | ||
258 | return 1; | ||
259 | } | ||
260 | |||
261 | #define ncp_preserve_case(i) (ncp_namespace(i) != NW_NS_DOS) | ||
262 | |||
263 | static inline int ncp_case_sensitive(struct inode *i) | ||
264 | { | ||
265 | #ifdef CONFIG_NCPFS_NFS_NS | ||
266 | return ncp_namespace(i) == NW_NS_NFS; | ||
267 | #else | ||
268 | return 0; | ||
269 | #endif /* CONFIG_NCPFS_NFS_NS */ | ||
270 | } | ||
271 | |||
272 | #endif /* __KERNEL__ */ | 244 | #endif /* __KERNEL__ */ |
273 | 245 | ||
274 | #endif /* _LINUX_NCP_FS_H */ | 246 | #endif /* _LINUX_NCP_FS_H */ |
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h index 8da05bc098ca..d64b0e894336 100644 --- a/include/linux/ncp_fs_sb.h +++ b/include/linux/ncp_fs_sb.h | |||
@@ -62,6 +62,7 @@ struct ncp_server { | |||
62 | int ncp_reply_size; | 62 | int ncp_reply_size; |
63 | 63 | ||
64 | int root_setuped; | 64 | int root_setuped; |
65 | struct mutex root_setup_lock; | ||
65 | 66 | ||
66 | /* info for packet signing */ | 67 | /* info for packet signing */ |
67 | int sign_wanted; /* 1=Server needs signed packets */ | 68 | int sign_wanted; /* 1=Server needs signed packets */ |
@@ -81,13 +82,14 @@ struct ncp_server { | |||
81 | size_t len; | 82 | size_t len; |
82 | void* data; | 83 | void* data; |
83 | } priv; | 84 | } priv; |
85 | struct rw_semaphore auth_rwsem; | ||
84 | 86 | ||
85 | /* nls info: codepage for volume and charset for I/O */ | 87 | /* nls info: codepage for volume and charset for I/O */ |
86 | struct nls_table *nls_vol; | 88 | struct nls_table *nls_vol; |
87 | struct nls_table *nls_io; | 89 | struct nls_table *nls_io; |
88 | 90 | ||
89 | /* maximum age in jiffies */ | 91 | /* maximum age in jiffies */ |
90 | int dentry_ttl; | 92 | atomic_t dentry_ttl; |
91 | 93 | ||
92 | /* miscellaneous */ | 94 | /* miscellaneous */ |
93 | unsigned int flags; | 95 | unsigned int flags; |
diff --git a/include/linux/net.h b/include/linux/net.h index dee0b11a8759..16faa130088c 100644 --- a/include/linux/net.h +++ b/include/linux/net.h | |||
@@ -229,6 +229,8 @@ enum { | |||
229 | extern int sock_wake_async(struct socket *sk, int how, int band); | 229 | extern int sock_wake_async(struct socket *sk, int how, int band); |
230 | extern int sock_register(const struct net_proto_family *fam); | 230 | extern int sock_register(const struct net_proto_family *fam); |
231 | extern void sock_unregister(int family); | 231 | extern void sock_unregister(int family); |
232 | extern int __sock_create(struct net *net, int family, int type, int proto, | ||
233 | struct socket **res, int kern); | ||
232 | extern int sock_create(int family, int type, int proto, | 234 | extern int sock_create(int family, int type, int proto, |
233 | struct socket **res); | 235 | struct socket **res); |
234 | extern int sock_create_kern(int family, int type, int proto, | 236 | extern int sock_create_kern(int family, int type, int proto, |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 46c36ffe20ee..fcd3dda86322 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -228,9 +228,9 @@ struct netdev_hw_addr { | |||
228 | #define NETDEV_HW_ADDR_T_SLAVE 3 | 228 | #define NETDEV_HW_ADDR_T_SLAVE 3 |
229 | #define NETDEV_HW_ADDR_T_UNICAST 4 | 229 | #define NETDEV_HW_ADDR_T_UNICAST 4 |
230 | #define NETDEV_HW_ADDR_T_MULTICAST 5 | 230 | #define NETDEV_HW_ADDR_T_MULTICAST 5 |
231 | int refcount; | ||
232 | bool synced; | 231 | bool synced; |
233 | bool global_use; | 232 | bool global_use; |
233 | int refcount; | ||
234 | struct rcu_head rcu_head; | 234 | struct rcu_head rcu_head; |
235 | }; | 235 | }; |
236 | 236 | ||
@@ -281,6 +281,12 @@ struct hh_cache { | |||
281 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | 281 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; |
282 | }; | 282 | }; |
283 | 283 | ||
284 | static inline void hh_cache_put(struct hh_cache *hh) | ||
285 | { | ||
286 | if (atomic_dec_and_test(&hh->hh_refcnt)) | ||
287 | kfree(hh); | ||
288 | } | ||
289 | |||
284 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. | 290 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. |
285 | * Alternative is: | 291 | * Alternative is: |
286 | * dev->hard_header_len ? (dev->hard_header_len + | 292 | * dev->hard_header_len ? (dev->hard_header_len + |
@@ -884,6 +890,9 @@ struct net_device { | |||
884 | int iflink; | 890 | int iflink; |
885 | 891 | ||
886 | struct net_device_stats stats; | 892 | struct net_device_stats stats; |
893 | atomic_long_t rx_dropped; /* dropped packets by core network | ||
894 | * Do not use this in drivers. | ||
895 | */ | ||
887 | 896 | ||
888 | #ifdef CONFIG_WIRELESS_EXT | 897 | #ifdef CONFIG_WIRELESS_EXT |
889 | /* List of functions to handle Wireless Extensions (instead of ioctl). | 898 | /* List of functions to handle Wireless Extensions (instead of ioctl). |
@@ -901,7 +910,7 @@ struct net_device { | |||
901 | 910 | ||
902 | unsigned int flags; /* interface flags (a la BSD) */ | 911 | unsigned int flags; /* interface flags (a la BSD) */ |
903 | unsigned short gflags; | 912 | unsigned short gflags; |
904 | unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */ | 913 | unsigned int priv_flags; /* Like 'flags' but invisible to userspace. */ |
905 | unsigned short padded; /* How much padding added by alloc_netdev() */ | 914 | unsigned short padded; /* How much padding added by alloc_netdev() */ |
906 | 915 | ||
907 | unsigned char operstate; /* RFC2863 operstate */ | 916 | unsigned char operstate; /* RFC2863 operstate */ |
@@ -918,10 +927,6 @@ struct net_device { | |||
918 | unsigned short needed_headroom; | 927 | unsigned short needed_headroom; |
919 | unsigned short needed_tailroom; | 928 | unsigned short needed_tailroom; |
920 | 929 | ||
921 | struct net_device *master; /* Pointer to master device of a group, | ||
922 | * which this device is member of. | ||
923 | */ | ||
924 | |||
925 | /* Interface address info. */ | 930 | /* Interface address info. */ |
926 | unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ | 931 | unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ |
927 | unsigned char addr_assign_type; /* hw address assignment type */ | 932 | unsigned char addr_assign_type; /* hw address assignment type */ |
@@ -937,12 +942,15 @@ struct net_device { | |||
937 | 942 | ||
938 | 943 | ||
939 | /* Protocol specific pointers */ | 944 | /* Protocol specific pointers */ |
940 | 945 | ||
946 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
947 | struct vlan_group *vlgrp; /* VLAN group */ | ||
948 | #endif | ||
941 | #ifdef CONFIG_NET_DSA | 949 | #ifdef CONFIG_NET_DSA |
942 | void *dsa_ptr; /* dsa specific data */ | 950 | void *dsa_ptr; /* dsa specific data */ |
943 | #endif | 951 | #endif |
944 | void *atalk_ptr; /* AppleTalk link */ | 952 | void *atalk_ptr; /* AppleTalk link */ |
945 | void *ip_ptr; /* IPv4 specific data */ | 953 | struct in_device __rcu *ip_ptr; /* IPv4 specific data */ |
946 | void *dn_ptr; /* DECnet specific data */ | 954 | void *dn_ptr; /* DECnet specific data */ |
947 | void *ip6_ptr; /* IPv6 specific data */ | 955 | void *ip6_ptr; /* IPv6 specific data */ |
948 | void *ec_ptr; /* Econet specific data */ | 956 | void *ec_ptr; /* Econet specific data */ |
@@ -951,9 +959,20 @@ struct net_device { | |||
951 | assign before registering */ | 959 | assign before registering */ |
952 | 960 | ||
953 | /* | 961 | /* |
954 | * Cache line mostly used on receive path (including eth_type_trans()) | 962 | * Cache lines mostly used on receive path (including eth_type_trans()) |
955 | */ | 963 | */ |
956 | unsigned long last_rx; /* Time of last Rx */ | 964 | unsigned long last_rx; /* Time of last Rx |
965 | * This should not be set in | ||
966 | * drivers, unless really needed, | ||
967 | * because network stack (bonding) | ||
968 | * use it if/when necessary, to | ||
969 | * avoid dirtying this cache line. | ||
970 | */ | ||
971 | |||
972 | struct net_device *master; /* Pointer to master device of a group, | ||
973 | * which this device is member of. | ||
974 | */ | ||
975 | |||
957 | /* Interface address info used in eth_type_trans() */ | 976 | /* Interface address info used in eth_type_trans() */ |
958 | unsigned char *dev_addr; /* hw address, (before bcast | 977 | unsigned char *dev_addr; /* hw address, (before bcast |
959 | because most packets are | 978 | because most packets are |
@@ -969,14 +988,21 @@ struct net_device { | |||
969 | 988 | ||
970 | struct netdev_rx_queue *_rx; | 989 | struct netdev_rx_queue *_rx; |
971 | 990 | ||
972 | /* Number of RX queues allocated at alloc_netdev_mq() time */ | 991 | /* Number of RX queues allocated at register_netdev() time */ |
973 | unsigned int num_rx_queues; | 992 | unsigned int num_rx_queues; |
993 | |||
994 | /* Number of RX queues currently active in device */ | ||
995 | unsigned int real_num_rx_queues; | ||
974 | #endif | 996 | #endif |
975 | 997 | ||
976 | struct netdev_queue rx_queue; | ||
977 | rx_handler_func_t *rx_handler; | 998 | rx_handler_func_t *rx_handler; |
978 | void *rx_handler_data; | 999 | void *rx_handler_data; |
979 | 1000 | ||
1001 | struct netdev_queue __rcu *ingress_queue; | ||
1002 | |||
1003 | /* | ||
1004 | * Cache lines mostly used on transmit path | ||
1005 | */ | ||
980 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; | 1006 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
981 | 1007 | ||
982 | /* Number of TX queues allocated at alloc_netdev_mq() time */ | 1008 | /* Number of TX queues allocated at alloc_netdev_mq() time */ |
@@ -990,9 +1016,7 @@ struct net_device { | |||
990 | 1016 | ||
991 | unsigned long tx_queue_len; /* Max frames per queue allowed */ | 1017 | unsigned long tx_queue_len; /* Max frames per queue allowed */ |
992 | spinlock_t tx_global_lock; | 1018 | spinlock_t tx_global_lock; |
993 | /* | 1019 | |
994 | * One part is mostly used on xmit path (device) | ||
995 | */ | ||
996 | /* These may be needed for future network-power-down code. */ | 1020 | /* These may be needed for future network-power-down code. */ |
997 | 1021 | ||
998 | /* | 1022 | /* |
@@ -1005,7 +1029,7 @@ struct net_device { | |||
1005 | struct timer_list watchdog_timer; | 1029 | struct timer_list watchdog_timer; |
1006 | 1030 | ||
1007 | /* Number of references to this device */ | 1031 | /* Number of references to this device */ |
1008 | atomic_t refcnt ____cacheline_aligned_in_smp; | 1032 | int __percpu *pcpu_refcnt; |
1009 | 1033 | ||
1010 | /* delayed register/unregister */ | 1034 | /* delayed register/unregister */ |
1011 | struct list_head todo_list; | 1035 | struct list_head todo_list; |
@@ -1041,8 +1065,12 @@ struct net_device { | |||
1041 | #endif | 1065 | #endif |
1042 | 1066 | ||
1043 | /* mid-layer private */ | 1067 | /* mid-layer private */ |
1044 | void *ml_priv; | 1068 | union { |
1045 | 1069 | void *ml_priv; | |
1070 | struct pcpu_lstats __percpu *lstats; /* loopback stats */ | ||
1071 | struct pcpu_tstats __percpu *tstats; /* tunnel stats */ | ||
1072 | struct pcpu_dstats __percpu *dstats; /* dummy stats */ | ||
1073 | }; | ||
1046 | /* GARP */ | 1074 | /* GARP */ |
1047 | struct garp_port *garp_port; | 1075 | struct garp_port *garp_port; |
1048 | 1076 | ||
@@ -1305,6 +1333,7 @@ static inline void unregister_netdevice(struct net_device *dev) | |||
1305 | unregister_netdevice_queue(dev, NULL); | 1333 | unregister_netdevice_queue(dev, NULL); |
1306 | } | 1334 | } |
1307 | 1335 | ||
1336 | extern int netdev_refcnt_read(const struct net_device *dev); | ||
1308 | extern void free_netdev(struct net_device *dev); | 1337 | extern void free_netdev(struct net_device *dev); |
1309 | extern void synchronize_net(void); | 1338 | extern void synchronize_net(void); |
1310 | extern int register_netdevice_notifier(struct notifier_block *nb); | 1339 | extern int register_netdevice_notifier(struct notifier_block *nb); |
@@ -1667,11 +1696,34 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | |||
1667 | */ | 1696 | */ |
1668 | static inline int netif_is_multiqueue(const struct net_device *dev) | 1697 | static inline int netif_is_multiqueue(const struct net_device *dev) |
1669 | { | 1698 | { |
1670 | return (dev->num_tx_queues > 1); | 1699 | return dev->num_tx_queues > 1; |
1671 | } | 1700 | } |
1672 | 1701 | ||
1673 | extern void netif_set_real_num_tx_queues(struct net_device *dev, | 1702 | extern int netif_set_real_num_tx_queues(struct net_device *dev, |
1674 | unsigned int txq); | 1703 | unsigned int txq); |
1704 | |||
1705 | #ifdef CONFIG_RPS | ||
1706 | extern int netif_set_real_num_rx_queues(struct net_device *dev, | ||
1707 | unsigned int rxq); | ||
1708 | #else | ||
1709 | static inline int netif_set_real_num_rx_queues(struct net_device *dev, | ||
1710 | unsigned int rxq) | ||
1711 | { | ||
1712 | return 0; | ||
1713 | } | ||
1714 | #endif | ||
1715 | |||
1716 | static inline int netif_copy_real_num_queues(struct net_device *to_dev, | ||
1717 | const struct net_device *from_dev) | ||
1718 | { | ||
1719 | netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues); | ||
1720 | #ifdef CONFIG_RPS | ||
1721 | return netif_set_real_num_rx_queues(to_dev, | ||
1722 | from_dev->real_num_rx_queues); | ||
1723 | #else | ||
1724 | return 0; | ||
1725 | #endif | ||
1726 | } | ||
1675 | 1727 | ||
1676 | /* Use this variant when it is known for sure that it | 1728 | /* Use this variant when it is known for sure that it |
1677 | * is executing from hardware interrupt context or with hardware interrupts | 1729 | * is executing from hardware interrupt context or with hardware interrupts |
@@ -1695,8 +1747,7 @@ extern gro_result_t dev_gro_receive(struct napi_struct *napi, | |||
1695 | extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb); | 1747 | extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb); |
1696 | extern gro_result_t napi_gro_receive(struct napi_struct *napi, | 1748 | extern gro_result_t napi_gro_receive(struct napi_struct *napi, |
1697 | struct sk_buff *skb); | 1749 | struct sk_buff *skb); |
1698 | extern void napi_reuse_skb(struct napi_struct *napi, | 1750 | extern void napi_gro_flush(struct napi_struct *napi); |
1699 | struct sk_buff *skb); | ||
1700 | extern struct sk_buff * napi_get_frags(struct napi_struct *napi); | 1751 | extern struct sk_buff * napi_get_frags(struct napi_struct *napi); |
1701 | extern gro_result_t napi_frags_finish(struct napi_struct *napi, | 1752 | extern gro_result_t napi_frags_finish(struct napi_struct *napi, |
1702 | struct sk_buff *skb, | 1753 | struct sk_buff *skb, |
@@ -1715,7 +1766,6 @@ extern int netdev_rx_handler_register(struct net_device *dev, | |||
1715 | void *rx_handler_data); | 1766 | void *rx_handler_data); |
1716 | extern void netdev_rx_handler_unregister(struct net_device *dev); | 1767 | extern void netdev_rx_handler_unregister(struct net_device *dev); |
1717 | 1768 | ||
1718 | extern void netif_nit_deliver(struct sk_buff *skb); | ||
1719 | extern int dev_valid_name(const char *name); | 1769 | extern int dev_valid_name(const char *name); |
1720 | extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); | 1770 | extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); |
1721 | extern int dev_ethtool(struct net *net, struct ifreq *); | 1771 | extern int dev_ethtool(struct net *net, struct ifreq *); |
@@ -1749,7 +1799,7 @@ extern void netdev_run_todo(void); | |||
1749 | */ | 1799 | */ |
1750 | static inline void dev_put(struct net_device *dev) | 1800 | static inline void dev_put(struct net_device *dev) |
1751 | { | 1801 | { |
1752 | atomic_dec(&dev->refcnt); | 1802 | irqsafe_cpu_dec(*dev->pcpu_refcnt); |
1753 | } | 1803 | } |
1754 | 1804 | ||
1755 | /** | 1805 | /** |
@@ -1760,7 +1810,7 @@ static inline void dev_put(struct net_device *dev) | |||
1760 | */ | 1810 | */ |
1761 | static inline void dev_hold(struct net_device *dev) | 1811 | static inline void dev_hold(struct net_device *dev) |
1762 | { | 1812 | { |
1763 | atomic_inc(&dev->refcnt); | 1813 | irqsafe_cpu_inc(*dev->pcpu_refcnt); |
1764 | } | 1814 | } |
1765 | 1815 | ||
1766 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on | 1816 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on |
@@ -2171,6 +2221,8 @@ extern void dev_seq_stop(struct seq_file *seq, void *v); | |||
2171 | extern int netdev_class_create_file(struct class_attribute *class_attr); | 2221 | extern int netdev_class_create_file(struct class_attribute *class_attr); |
2172 | extern void netdev_class_remove_file(struct class_attribute *class_attr); | 2222 | extern void netdev_class_remove_file(struct class_attribute *class_attr); |
2173 | 2223 | ||
2224 | extern struct kobj_ns_type_operations net_ns_type_operations; | ||
2225 | |||
2174 | extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len); | 2226 | extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len); |
2175 | 2227 | ||
2176 | extern void linkwatch_run_queue(void); | 2228 | extern void linkwatch_run_queue(void); |
@@ -2191,14 +2243,22 @@ static inline int net_gso_ok(int features, int gso_type) | |||
2191 | static inline int skb_gso_ok(struct sk_buff *skb, int features) | 2243 | static inline int skb_gso_ok(struct sk_buff *skb, int features) |
2192 | { | 2244 | { |
2193 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && | 2245 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && |
2194 | (!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST)); | 2246 | (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); |
2195 | } | 2247 | } |
2196 | 2248 | ||
2197 | static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) | 2249 | static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) |
2198 | { | 2250 | { |
2199 | return skb_is_gso(skb) && | 2251 | if (skb_is_gso(skb)) { |
2200 | (!skb_gso_ok(skb, dev->features) || | 2252 | int features = dev->features; |
2201 | unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); | 2253 | |
2254 | if (skb->protocol == htons(ETH_P_8021Q) || skb->vlan_tci) | ||
2255 | features &= dev->vlan_features; | ||
2256 | |||
2257 | return (!skb_gso_ok(skb, features) || | ||
2258 | unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); | ||
2259 | } | ||
2260 | |||
2261 | return 0; | ||
2202 | } | 2262 | } |
2203 | 2263 | ||
2204 | static inline void netif_set_gso_max_size(struct net_device *dev, | 2264 | static inline void netif_set_gso_max_size(struct net_device *dev, |
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 1afd18c855ec..50cdc2559a5a 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h | |||
@@ -98,8 +98,14 @@ enum ip_conntrack_events { | |||
98 | 98 | ||
99 | enum ip_conntrack_expect_events { | 99 | enum ip_conntrack_expect_events { |
100 | IPEXP_NEW, /* new expectation */ | 100 | IPEXP_NEW, /* new expectation */ |
101 | IPEXP_DESTROY, /* destroyed expectation */ | ||
101 | }; | 102 | }; |
102 | 103 | ||
104 | /* expectation flags */ | ||
105 | #define NF_CT_EXPECT_PERMANENT 0x1 | ||
106 | #define NF_CT_EXPECT_INACTIVE 0x2 | ||
107 | #define NF_CT_EXPECT_USERSPACE 0x4 | ||
108 | |||
103 | #ifdef __KERNEL__ | 109 | #ifdef __KERNEL__ |
104 | struct ip_conntrack_stat { | 110 | struct ip_conntrack_stat { |
105 | unsigned int searched; | 111 | unsigned int searched; |
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h index ff8cfbcf3b81..0ce91d56a5f2 100644 --- a/include/linux/netfilter/nf_conntrack_sip.h +++ b/include/linux/netfilter/nf_conntrack_sip.h | |||
@@ -89,6 +89,7 @@ enum sip_header_types { | |||
89 | SIP_HDR_VIA_TCP, | 89 | SIP_HDR_VIA_TCP, |
90 | SIP_HDR_EXPIRES, | 90 | SIP_HDR_EXPIRES, |
91 | SIP_HDR_CONTENT_LENGTH, | 91 | SIP_HDR_CONTENT_LENGTH, |
92 | SIP_HDR_CALL_ID, | ||
92 | }; | 93 | }; |
93 | 94 | ||
94 | enum sdp_header_types { | 95 | enum sdp_header_types { |
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h index 9ed534c991b9..19711e3ffd42 100644 --- a/include/linux/netfilter/nfnetlink_conntrack.h +++ b/include/linux/netfilter/nfnetlink_conntrack.h | |||
@@ -39,8 +39,9 @@ enum ctattr_type { | |||
39 | CTA_TUPLE_MASTER, | 39 | CTA_TUPLE_MASTER, |
40 | CTA_NAT_SEQ_ADJ_ORIG, | 40 | CTA_NAT_SEQ_ADJ_ORIG, |
41 | CTA_NAT_SEQ_ADJ_REPLY, | 41 | CTA_NAT_SEQ_ADJ_REPLY, |
42 | CTA_SECMARK, | 42 | CTA_SECMARK, /* obsolete */ |
43 | CTA_ZONE, | 43 | CTA_ZONE, |
44 | CTA_SECCTX, | ||
44 | __CTA_MAX | 45 | __CTA_MAX |
45 | }; | 46 | }; |
46 | #define CTA_MAX (__CTA_MAX - 1) | 47 | #define CTA_MAX (__CTA_MAX - 1) |
@@ -161,6 +162,7 @@ enum ctattr_expect { | |||
161 | CTA_EXPECT_ID, | 162 | CTA_EXPECT_ID, |
162 | CTA_EXPECT_HELP_NAME, | 163 | CTA_EXPECT_HELP_NAME, |
163 | CTA_EXPECT_ZONE, | 164 | CTA_EXPECT_ZONE, |
165 | CTA_EXPECT_FLAGS, | ||
164 | __CTA_EXPECT_MAX | 166 | __CTA_EXPECT_MAX |
165 | }; | 167 | }; |
166 | #define CTA_EXPECT_MAX (__CTA_EXPECT_MAX - 1) | 168 | #define CTA_EXPECT_MAX (__CTA_EXPECT_MAX - 1) |
@@ -172,4 +174,11 @@ enum ctattr_help { | |||
172 | }; | 174 | }; |
173 | #define CTA_HELP_MAX (__CTA_HELP_MAX - 1) | 175 | #define CTA_HELP_MAX (__CTA_HELP_MAX - 1) |
174 | 176 | ||
177 | enum ctattr_secctx { | ||
178 | CTA_SECCTX_UNSPEC, | ||
179 | CTA_SECCTX_NAME, | ||
180 | __CTA_SECCTX_MAX | ||
181 | }; | ||
182 | #define CTA_SECCTX_MAX (__CTA_SECCTX_MAX - 1) | ||
183 | |||
175 | #endif /* _IPCONNTRACK_NETLINK_H */ | 184 | #endif /* _IPCONNTRACK_NETLINK_H */ |
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 24e5d01d27d0..742bec051440 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h | |||
@@ -66,6 +66,11 @@ struct xt_standard_target { | |||
66 | int verdict; | 66 | int verdict; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | struct xt_error_target { | ||
70 | struct xt_entry_target target; | ||
71 | char errorname[XT_FUNCTION_MAXNAMELEN]; | ||
72 | }; | ||
73 | |||
69 | /* The argument to IPT_SO_GET_REVISION_*. Returns highest revision | 74 | /* The argument to IPT_SO_GET_REVISION_*. Returns highest revision |
70 | * kernel supports, if >= revision. */ | 75 | * kernel supports, if >= revision. */ |
71 | struct xt_get_revision { | 76 | struct xt_get_revision { |
diff --git a/include/linux/netfilter/xt_SECMARK.h b/include/linux/netfilter/xt_SECMARK.h index 6fcd3448b186..989092bd6274 100644 --- a/include/linux/netfilter/xt_SECMARK.h +++ b/include/linux/netfilter/xt_SECMARK.h | |||
@@ -11,18 +11,12 @@ | |||
11 | * packets are being marked for. | 11 | * packets are being marked for. |
12 | */ | 12 | */ |
13 | #define SECMARK_MODE_SEL 0x01 /* SELinux */ | 13 | #define SECMARK_MODE_SEL 0x01 /* SELinux */ |
14 | #define SECMARK_SELCTX_MAX 256 | 14 | #define SECMARK_SECCTX_MAX 256 |
15 | |||
16 | struct xt_secmark_target_selinux_info { | ||
17 | __u32 selsid; | ||
18 | char selctx[SECMARK_SELCTX_MAX]; | ||
19 | }; | ||
20 | 15 | ||
21 | struct xt_secmark_target_info { | 16 | struct xt_secmark_target_info { |
22 | __u8 mode; | 17 | __u8 mode; |
23 | union { | 18 | __u32 secid; |
24 | struct xt_secmark_target_selinux_info sel; | 19 | char secctx[SECMARK_SECCTX_MAX]; |
25 | } u; | ||
26 | }; | 20 | }; |
27 | 21 | ||
28 | #endif /*_XT_SECMARK_H_target */ | 22 | #endif /*_XT_SECMARK_H_target */ |
diff --git a/include/linux/netfilter/xt_TPROXY.h b/include/linux/netfilter/xt_TPROXY.h index 152e8f97132b..3f3d69361289 100644 --- a/include/linux/netfilter/xt_TPROXY.h +++ b/include/linux/netfilter/xt_TPROXY.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _XT_TPROXY_H_target | 1 | #ifndef _XT_TPROXY_H |
2 | #define _XT_TPROXY_H_target | 2 | #define _XT_TPROXY_H |
3 | 3 | ||
4 | /* TPROXY target is capable of marking the packet to perform | 4 | /* TPROXY target is capable of marking the packet to perform |
5 | * redirection. We can get rid of that whenever we get support for | 5 | * redirection. We can get rid of that whenever we get support for |
@@ -11,4 +11,11 @@ struct xt_tproxy_target_info { | |||
11 | __be16 lport; | 11 | __be16 lport; |
12 | }; | 12 | }; |
13 | 13 | ||
14 | #endif /* _XT_TPROXY_H_target */ | 14 | struct xt_tproxy_target_info_v1 { |
15 | u_int32_t mark_mask; | ||
16 | u_int32_t mark_value; | ||
17 | union nf_inet_addr laddr; | ||
18 | __be16 lport; | ||
19 | }; | ||
20 | |||
21 | #endif /* _XT_TPROXY_H */ | ||
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index e9948c0560f6..adbf4bff87ed 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h | |||
@@ -21,8 +21,21 @@ | |||
21 | 21 | ||
22 | #include <linux/netfilter/x_tables.h> | 22 | #include <linux/netfilter/x_tables.h> |
23 | 23 | ||
24 | #ifndef __KERNEL__ | ||
24 | #define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN | 25 | #define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN |
25 | #define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN | 26 | #define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN |
27 | #define arpt_entry_target xt_entry_target | ||
28 | #define arpt_standard_target xt_standard_target | ||
29 | #define arpt_error_target xt_error_target | ||
30 | #define ARPT_CONTINUE XT_CONTINUE | ||
31 | #define ARPT_RETURN XT_RETURN | ||
32 | #define arpt_counters_info xt_counters_info | ||
33 | #define arpt_counters xt_counters | ||
34 | #define ARPT_STANDARD_TARGET XT_STANDARD_TARGET | ||
35 | #define ARPT_ERROR_TARGET XT_ERROR_TARGET | ||
36 | #define ARPT_ENTRY_ITERATE(entries, size, fn, args...) \ | ||
37 | XT_ENTRY_ITERATE(struct arpt_entry, entries, size, fn, ## args) | ||
38 | #endif | ||
26 | 39 | ||
27 | #define ARPT_DEV_ADDR_LEN_MAX 16 | 40 | #define ARPT_DEV_ADDR_LEN_MAX 16 |
28 | 41 | ||
@@ -63,9 +76,6 @@ struct arpt_arp { | |||
63 | u_int16_t invflags; | 76 | u_int16_t invflags; |
64 | }; | 77 | }; |
65 | 78 | ||
66 | #define arpt_entry_target xt_entry_target | ||
67 | #define arpt_standard_target xt_standard_target | ||
68 | |||
69 | /* Values for "flag" field in struct arpt_ip (general arp structure). | 79 | /* Values for "flag" field in struct arpt_ip (general arp structure). |
70 | * No flags defined yet. | 80 | * No flags defined yet. |
71 | */ | 81 | */ |
@@ -125,16 +135,10 @@ struct arpt_entry | |||
125 | #define ARPT_SO_GET_REVISION_TARGET (ARPT_BASE_CTL + 3) | 135 | #define ARPT_SO_GET_REVISION_TARGET (ARPT_BASE_CTL + 3) |
126 | #define ARPT_SO_GET_MAX (ARPT_SO_GET_REVISION_TARGET) | 136 | #define ARPT_SO_GET_MAX (ARPT_SO_GET_REVISION_TARGET) |
127 | 137 | ||
128 | /* CONTINUE verdict for targets */ | ||
129 | #define ARPT_CONTINUE XT_CONTINUE | ||
130 | |||
131 | /* For standard target */ | ||
132 | #define ARPT_RETURN XT_RETURN | ||
133 | |||
134 | /* The argument to ARPT_SO_GET_INFO */ | 138 | /* The argument to ARPT_SO_GET_INFO */ |
135 | struct arpt_getinfo { | 139 | struct arpt_getinfo { |
136 | /* Which table: caller fills this in. */ | 140 | /* Which table: caller fills this in. */ |
137 | char name[ARPT_TABLE_MAXNAMELEN]; | 141 | char name[XT_TABLE_MAXNAMELEN]; |
138 | 142 | ||
139 | /* Kernel fills these in. */ | 143 | /* Kernel fills these in. */ |
140 | /* Which hook entry points are valid: bitmask */ | 144 | /* Which hook entry points are valid: bitmask */ |
@@ -156,7 +160,7 @@ struct arpt_getinfo { | |||
156 | /* The argument to ARPT_SO_SET_REPLACE. */ | 160 | /* The argument to ARPT_SO_SET_REPLACE. */ |
157 | struct arpt_replace { | 161 | struct arpt_replace { |
158 | /* Which table. */ | 162 | /* Which table. */ |
159 | char name[ARPT_TABLE_MAXNAMELEN]; | 163 | char name[XT_TABLE_MAXNAMELEN]; |
160 | 164 | ||
161 | /* Which hook entry points are valid: bitmask. You can't | 165 | /* Which hook entry points are valid: bitmask. You can't |
162 | change this. */ | 166 | change this. */ |
@@ -184,14 +188,10 @@ struct arpt_replace { | |||
184 | struct arpt_entry entries[0]; | 188 | struct arpt_entry entries[0]; |
185 | }; | 189 | }; |
186 | 190 | ||
187 | /* The argument to ARPT_SO_ADD_COUNTERS. */ | ||
188 | #define arpt_counters_info xt_counters_info | ||
189 | #define arpt_counters xt_counters | ||
190 | |||
191 | /* The argument to ARPT_SO_GET_ENTRIES. */ | 191 | /* The argument to ARPT_SO_GET_ENTRIES. */ |
192 | struct arpt_get_entries { | 192 | struct arpt_get_entries { |
193 | /* Which table: user fills this in. */ | 193 | /* Which table: user fills this in. */ |
194 | char name[ARPT_TABLE_MAXNAMELEN]; | 194 | char name[XT_TABLE_MAXNAMELEN]; |
195 | 195 | ||
196 | /* User fills this in: total entry size. */ | 196 | /* User fills this in: total entry size. */ |
197 | unsigned int size; | 197 | unsigned int size; |
@@ -200,23 +200,12 @@ struct arpt_get_entries { | |||
200 | struct arpt_entry entrytable[0]; | 200 | struct arpt_entry entrytable[0]; |
201 | }; | 201 | }; |
202 | 202 | ||
203 | /* Standard return verdict, or do jump. */ | ||
204 | #define ARPT_STANDARD_TARGET XT_STANDARD_TARGET | ||
205 | /* Error verdict. */ | ||
206 | #define ARPT_ERROR_TARGET XT_ERROR_TARGET | ||
207 | |||
208 | /* Helper functions */ | 203 | /* Helper functions */ |
209 | static __inline__ struct arpt_entry_target *arpt_get_target(struct arpt_entry *e) | 204 | static __inline__ struct xt_entry_target *arpt_get_target(struct arpt_entry *e) |
210 | { | 205 | { |
211 | return (void *)e + e->target_offset; | 206 | return (void *)e + e->target_offset; |
212 | } | 207 | } |
213 | 208 | ||
214 | #ifndef __KERNEL__ | ||
215 | /* fn returns 0 to continue iteration */ | ||
216 | #define ARPT_ENTRY_ITERATE(entries, size, fn, args...) \ | ||
217 | XT_ENTRY_ITERATE(struct arpt_entry, entries, size, fn, ## args) | ||
218 | #endif | ||
219 | |||
220 | /* | 209 | /* |
221 | * Main firewall chains definitions and global var's definitions. | 210 | * Main firewall chains definitions and global var's definitions. |
222 | */ | 211 | */ |
@@ -225,17 +214,12 @@ static __inline__ struct arpt_entry_target *arpt_get_target(struct arpt_entry *e | |||
225 | /* Standard entry. */ | 214 | /* Standard entry. */ |
226 | struct arpt_standard { | 215 | struct arpt_standard { |
227 | struct arpt_entry entry; | 216 | struct arpt_entry entry; |
228 | struct arpt_standard_target target; | 217 | struct xt_standard_target target; |
229 | }; | ||
230 | |||
231 | struct arpt_error_target { | ||
232 | struct arpt_entry_target target; | ||
233 | char errorname[ARPT_FUNCTION_MAXNAMELEN]; | ||
234 | }; | 218 | }; |
235 | 219 | ||
236 | struct arpt_error { | 220 | struct arpt_error { |
237 | struct arpt_entry entry; | 221 | struct arpt_entry entry; |
238 | struct arpt_error_target target; | 222 | struct xt_error_target target; |
239 | }; | 223 | }; |
240 | 224 | ||
241 | #define ARPT_ENTRY_INIT(__size) \ | 225 | #define ARPT_ENTRY_INIT(__size) \ |
@@ -247,16 +231,16 @@ struct arpt_error { | |||
247 | #define ARPT_STANDARD_INIT(__verdict) \ | 231 | #define ARPT_STANDARD_INIT(__verdict) \ |
248 | { \ | 232 | { \ |
249 | .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_standard)), \ | 233 | .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_standard)), \ |
250 | .target = XT_TARGET_INIT(ARPT_STANDARD_TARGET, \ | 234 | .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ |
251 | sizeof(struct arpt_standard_target)), \ | 235 | sizeof(struct xt_standard_target)), \ |
252 | .target.verdict = -(__verdict) - 1, \ | 236 | .target.verdict = -(__verdict) - 1, \ |
253 | } | 237 | } |
254 | 238 | ||
255 | #define ARPT_ERROR_INIT \ | 239 | #define ARPT_ERROR_INIT \ |
256 | { \ | 240 | { \ |
257 | .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_error)), \ | 241 | .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_error)), \ |
258 | .target = XT_TARGET_INIT(ARPT_ERROR_TARGET, \ | 242 | .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ |
259 | sizeof(struct arpt_error_target)), \ | 243 | sizeof(struct xt_error_target)), \ |
260 | .target.errorname = "ERROR", \ | 244 | .target.errorname = "ERROR", \ |
261 | } | 245 | } |
262 | 246 | ||
@@ -271,8 +255,6 @@ extern unsigned int arpt_do_table(struct sk_buff *skb, | |||
271 | const struct net_device *out, | 255 | const struct net_device *out, |
272 | struct xt_table *table); | 256 | struct xt_table *table); |
273 | 257 | ||
274 | #define ARPT_ALIGN(s) XT_ALIGN(s) | ||
275 | |||
276 | #ifdef CONFIG_COMPAT | 258 | #ifdef CONFIG_COMPAT |
277 | #include <net/compat.h> | 259 | #include <net/compat.h> |
278 | 260 | ||
@@ -285,14 +267,12 @@ struct compat_arpt_entry { | |||
285 | unsigned char elems[0]; | 267 | unsigned char elems[0]; |
286 | }; | 268 | }; |
287 | 269 | ||
288 | static inline struct arpt_entry_target * | 270 | static inline struct xt_entry_target * |
289 | compat_arpt_get_target(struct compat_arpt_entry *e) | 271 | compat_arpt_get_target(struct compat_arpt_entry *e) |
290 | { | 272 | { |
291 | return (void *)e + e->target_offset; | 273 | return (void *)e + e->target_offset; |
292 | } | 274 | } |
293 | 275 | ||
294 | #define COMPAT_ARPT_ALIGN(s) COMPAT_XT_ALIGN(s) | ||
295 | |||
296 | #endif /* CONFIG_COMPAT */ | 276 | #endif /* CONFIG_COMPAT */ |
297 | #endif /*__KERNEL__*/ | 277 | #endif /*__KERNEL__*/ |
298 | #endif /* _ARPTABLES_H */ | 278 | #endif /* _ARPTABLES_H */ |
diff --git a/include/linux/netfilter_bridge/Kbuild b/include/linux/netfilter_bridge/Kbuild index d4d78672873e..e48f1a3f5a4a 100644 --- a/include/linux/netfilter_bridge/Kbuild +++ b/include/linux/netfilter_bridge/Kbuild | |||
@@ -3,11 +3,13 @@ header-y += ebt_among.h | |||
3 | header-y += ebt_arp.h | 3 | header-y += ebt_arp.h |
4 | header-y += ebt_arpreply.h | 4 | header-y += ebt_arpreply.h |
5 | header-y += ebt_ip.h | 5 | header-y += ebt_ip.h |
6 | header-y += ebt_ip6.h | ||
6 | header-y += ebt_limit.h | 7 | header-y += ebt_limit.h |
7 | header-y += ebt_log.h | 8 | header-y += ebt_log.h |
8 | header-y += ebt_mark_m.h | 9 | header-y += ebt_mark_m.h |
9 | header-y += ebt_mark_t.h | 10 | header-y += ebt_mark_t.h |
10 | header-y += ebt_nat.h | 11 | header-y += ebt_nat.h |
12 | header-y += ebt_nflog.h | ||
11 | header-y += ebt_pkttype.h | 13 | header-y += ebt_pkttype.h |
12 | header-y += ebt_redirect.h | 14 | header-y += ebt_redirect.h |
13 | header-y += ebt_stp.h | 15 | header-y += ebt_stp.h |
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index 704a7b6e8169..64a5d95c58e8 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h | |||
@@ -27,12 +27,49 @@ | |||
27 | 27 | ||
28 | #include <linux/netfilter/x_tables.h> | 28 | #include <linux/netfilter/x_tables.h> |
29 | 29 | ||
30 | #ifndef __KERNEL__ | ||
30 | #define IPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN | 31 | #define IPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN |
31 | #define IPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN | 32 | #define IPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN |
32 | #define ipt_match xt_match | 33 | #define ipt_match xt_match |
33 | #define ipt_target xt_target | 34 | #define ipt_target xt_target |
34 | #define ipt_table xt_table | 35 | #define ipt_table xt_table |
35 | #define ipt_get_revision xt_get_revision | 36 | #define ipt_get_revision xt_get_revision |
37 | #define ipt_entry_match xt_entry_match | ||
38 | #define ipt_entry_target xt_entry_target | ||
39 | #define ipt_standard_target xt_standard_target | ||
40 | #define ipt_error_target xt_error_target | ||
41 | #define ipt_counters xt_counters | ||
42 | #define IPT_CONTINUE XT_CONTINUE | ||
43 | #define IPT_RETURN XT_RETURN | ||
44 | |||
45 | /* This group is older than old (iptables < v1.4.0-rc1~89) */ | ||
46 | #include <linux/netfilter/xt_tcpudp.h> | ||
47 | #define ipt_udp xt_udp | ||
48 | #define ipt_tcp xt_tcp | ||
49 | #define IPT_TCP_INV_SRCPT XT_TCP_INV_SRCPT | ||
50 | #define IPT_TCP_INV_DSTPT XT_TCP_INV_DSTPT | ||
51 | #define IPT_TCP_INV_FLAGS XT_TCP_INV_FLAGS | ||
52 | #define IPT_TCP_INV_OPTION XT_TCP_INV_OPTION | ||
53 | #define IPT_TCP_INV_MASK XT_TCP_INV_MASK | ||
54 | #define IPT_UDP_INV_SRCPT XT_UDP_INV_SRCPT | ||
55 | #define IPT_UDP_INV_DSTPT XT_UDP_INV_DSTPT | ||
56 | #define IPT_UDP_INV_MASK XT_UDP_INV_MASK | ||
57 | |||
58 | /* The argument to IPT_SO_ADD_COUNTERS. */ | ||
59 | #define ipt_counters_info xt_counters_info | ||
60 | /* Standard return verdict, or do jump. */ | ||
61 | #define IPT_STANDARD_TARGET XT_STANDARD_TARGET | ||
62 | /* Error verdict. */ | ||
63 | #define IPT_ERROR_TARGET XT_ERROR_TARGET | ||
64 | |||
65 | /* fn returns 0 to continue iteration */ | ||
66 | #define IPT_MATCH_ITERATE(e, fn, args...) \ | ||
67 | XT_MATCH_ITERATE(struct ipt_entry, e, fn, ## args) | ||
68 | |||
69 | /* fn returns 0 to continue iteration */ | ||
70 | #define IPT_ENTRY_ITERATE(entries, size, fn, args...) \ | ||
71 | XT_ENTRY_ITERATE(struct ipt_entry, entries, size, fn, ## args) | ||
72 | #endif | ||
36 | 73 | ||
37 | /* Yes, Virginia, you have to zero the padding. */ | 74 | /* Yes, Virginia, you have to zero the padding. */ |
38 | struct ipt_ip { | 75 | struct ipt_ip { |
@@ -52,12 +89,6 @@ struct ipt_ip { | |||
52 | u_int8_t invflags; | 89 | u_int8_t invflags; |
53 | }; | 90 | }; |
54 | 91 | ||
55 | #define ipt_entry_match xt_entry_match | ||
56 | #define ipt_entry_target xt_entry_target | ||
57 | #define ipt_standard_target xt_standard_target | ||
58 | |||
59 | #define ipt_counters xt_counters | ||
60 | |||
61 | /* Values for "flag" field in struct ipt_ip (general ip structure). */ | 92 | /* Values for "flag" field in struct ipt_ip (general ip structure). */ |
62 | #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */ | 93 | #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */ |
63 | #define IPT_F_GOTO 0x02 /* Set if jump is a goto */ | 94 | #define IPT_F_GOTO 0x02 /* Set if jump is a goto */ |
@@ -116,23 +147,6 @@ struct ipt_entry { | |||
116 | #define IPT_SO_GET_REVISION_TARGET (IPT_BASE_CTL + 3) | 147 | #define IPT_SO_GET_REVISION_TARGET (IPT_BASE_CTL + 3) |
117 | #define IPT_SO_GET_MAX IPT_SO_GET_REVISION_TARGET | 148 | #define IPT_SO_GET_MAX IPT_SO_GET_REVISION_TARGET |
118 | 149 | ||
119 | #define IPT_CONTINUE XT_CONTINUE | ||
120 | #define IPT_RETURN XT_RETURN | ||
121 | |||
122 | #include <linux/netfilter/xt_tcpudp.h> | ||
123 | #define ipt_udp xt_udp | ||
124 | #define ipt_tcp xt_tcp | ||
125 | |||
126 | #define IPT_TCP_INV_SRCPT XT_TCP_INV_SRCPT | ||
127 | #define IPT_TCP_INV_DSTPT XT_TCP_INV_DSTPT | ||
128 | #define IPT_TCP_INV_FLAGS XT_TCP_INV_FLAGS | ||
129 | #define IPT_TCP_INV_OPTION XT_TCP_INV_OPTION | ||
130 | #define IPT_TCP_INV_MASK XT_TCP_INV_MASK | ||
131 | |||
132 | #define IPT_UDP_INV_SRCPT XT_UDP_INV_SRCPT | ||
133 | #define IPT_UDP_INV_DSTPT XT_UDP_INV_DSTPT | ||
134 | #define IPT_UDP_INV_MASK XT_UDP_INV_MASK | ||
135 | |||
136 | /* ICMP matching stuff */ | 150 | /* ICMP matching stuff */ |
137 | struct ipt_icmp { | 151 | struct ipt_icmp { |
138 | u_int8_t type; /* type to match */ | 152 | u_int8_t type; /* type to match */ |
@@ -146,7 +160,7 @@ struct ipt_icmp { | |||
146 | /* The argument to IPT_SO_GET_INFO */ | 160 | /* The argument to IPT_SO_GET_INFO */ |
147 | struct ipt_getinfo { | 161 | struct ipt_getinfo { |
148 | /* Which table: caller fills this in. */ | 162 | /* Which table: caller fills this in. */ |
149 | char name[IPT_TABLE_MAXNAMELEN]; | 163 | char name[XT_TABLE_MAXNAMELEN]; |
150 | 164 | ||
151 | /* Kernel fills these in. */ | 165 | /* Kernel fills these in. */ |
152 | /* Which hook entry points are valid: bitmask */ | 166 | /* Which hook entry points are valid: bitmask */ |
@@ -168,7 +182,7 @@ struct ipt_getinfo { | |||
168 | /* The argument to IPT_SO_SET_REPLACE. */ | 182 | /* The argument to IPT_SO_SET_REPLACE. */ |
169 | struct ipt_replace { | 183 | struct ipt_replace { |
170 | /* Which table. */ | 184 | /* Which table. */ |
171 | char name[IPT_TABLE_MAXNAMELEN]; | 185 | char name[XT_TABLE_MAXNAMELEN]; |
172 | 186 | ||
173 | /* Which hook entry points are valid: bitmask. You can't | 187 | /* Which hook entry points are valid: bitmask. You can't |
174 | change this. */ | 188 | change this. */ |
@@ -196,13 +210,10 @@ struct ipt_replace { | |||
196 | struct ipt_entry entries[0]; | 210 | struct ipt_entry entries[0]; |
197 | }; | 211 | }; |
198 | 212 | ||
199 | /* The argument to IPT_SO_ADD_COUNTERS. */ | ||
200 | #define ipt_counters_info xt_counters_info | ||
201 | |||
202 | /* The argument to IPT_SO_GET_ENTRIES. */ | 213 | /* The argument to IPT_SO_GET_ENTRIES. */ |
203 | struct ipt_get_entries { | 214 | struct ipt_get_entries { |
204 | /* Which table: user fills this in. */ | 215 | /* Which table: user fills this in. */ |
205 | char name[IPT_TABLE_MAXNAMELEN]; | 216 | char name[XT_TABLE_MAXNAMELEN]; |
206 | 217 | ||
207 | /* User fills this in: total entry size. */ | 218 | /* User fills this in: total entry size. */ |
208 | unsigned int size; | 219 | unsigned int size; |
@@ -211,28 +222,13 @@ struct ipt_get_entries { | |||
211 | struct ipt_entry entrytable[0]; | 222 | struct ipt_entry entrytable[0]; |
212 | }; | 223 | }; |
213 | 224 | ||
214 | /* Standard return verdict, or do jump. */ | ||
215 | #define IPT_STANDARD_TARGET XT_STANDARD_TARGET | ||
216 | /* Error verdict. */ | ||
217 | #define IPT_ERROR_TARGET XT_ERROR_TARGET | ||
218 | |||
219 | /* Helper functions */ | 225 | /* Helper functions */ |
220 | static __inline__ struct ipt_entry_target * | 226 | static __inline__ struct xt_entry_target * |
221 | ipt_get_target(struct ipt_entry *e) | 227 | ipt_get_target(struct ipt_entry *e) |
222 | { | 228 | { |
223 | return (void *)e + e->target_offset; | 229 | return (void *)e + e->target_offset; |
224 | } | 230 | } |
225 | 231 | ||
226 | #ifndef __KERNEL__ | ||
227 | /* fn returns 0 to continue iteration */ | ||
228 | #define IPT_MATCH_ITERATE(e, fn, args...) \ | ||
229 | XT_MATCH_ITERATE(struct ipt_entry, e, fn, ## args) | ||
230 | |||
231 | /* fn returns 0 to continue iteration */ | ||
232 | #define IPT_ENTRY_ITERATE(entries, size, fn, args...) \ | ||
233 | XT_ENTRY_ITERATE(struct ipt_entry, entries, size, fn, ## args) | ||
234 | #endif | ||
235 | |||
236 | /* | 232 | /* |
237 | * Main firewall chains definitions and global var's definitions. | 233 | * Main firewall chains definitions and global var's definitions. |
238 | */ | 234 | */ |
@@ -249,17 +245,12 @@ extern void ipt_unregister_table(struct net *net, struct xt_table *table); | |||
249 | /* Standard entry. */ | 245 | /* Standard entry. */ |
250 | struct ipt_standard { | 246 | struct ipt_standard { |
251 | struct ipt_entry entry; | 247 | struct ipt_entry entry; |
252 | struct ipt_standard_target target; | 248 | struct xt_standard_target target; |
253 | }; | ||
254 | |||
255 | struct ipt_error_target { | ||
256 | struct ipt_entry_target target; | ||
257 | char errorname[IPT_FUNCTION_MAXNAMELEN]; | ||
258 | }; | 249 | }; |
259 | 250 | ||
260 | struct ipt_error { | 251 | struct ipt_error { |
261 | struct ipt_entry entry; | 252 | struct ipt_entry entry; |
262 | struct ipt_error_target target; | 253 | struct xt_error_target target; |
263 | }; | 254 | }; |
264 | 255 | ||
265 | #define IPT_ENTRY_INIT(__size) \ | 256 | #define IPT_ENTRY_INIT(__size) \ |
@@ -271,7 +262,7 @@ struct ipt_error { | |||
271 | #define IPT_STANDARD_INIT(__verdict) \ | 262 | #define IPT_STANDARD_INIT(__verdict) \ |
272 | { \ | 263 | { \ |
273 | .entry = IPT_ENTRY_INIT(sizeof(struct ipt_standard)), \ | 264 | .entry = IPT_ENTRY_INIT(sizeof(struct ipt_standard)), \ |
274 | .target = XT_TARGET_INIT(IPT_STANDARD_TARGET, \ | 265 | .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ |
275 | sizeof(struct xt_standard_target)), \ | 266 | sizeof(struct xt_standard_target)), \ |
276 | .target.verdict = -(__verdict) - 1, \ | 267 | .target.verdict = -(__verdict) - 1, \ |
277 | } | 268 | } |
@@ -279,8 +270,8 @@ struct ipt_error { | |||
279 | #define IPT_ERROR_INIT \ | 270 | #define IPT_ERROR_INIT \ |
280 | { \ | 271 | { \ |
281 | .entry = IPT_ENTRY_INIT(sizeof(struct ipt_error)), \ | 272 | .entry = IPT_ENTRY_INIT(sizeof(struct ipt_error)), \ |
282 | .target = XT_TARGET_INIT(IPT_ERROR_TARGET, \ | 273 | .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ |
283 | sizeof(struct ipt_error_target)), \ | 274 | sizeof(struct xt_error_target)), \ |
284 | .target.errorname = "ERROR", \ | 275 | .target.errorname = "ERROR", \ |
285 | } | 276 | } |
286 | 277 | ||
@@ -291,8 +282,6 @@ extern unsigned int ipt_do_table(struct sk_buff *skb, | |||
291 | const struct net_device *out, | 282 | const struct net_device *out, |
292 | struct xt_table *table); | 283 | struct xt_table *table); |
293 | 284 | ||
294 | #define IPT_ALIGN(s) XT_ALIGN(s) | ||
295 | |||
296 | #ifdef CONFIG_COMPAT | 285 | #ifdef CONFIG_COMPAT |
297 | #include <net/compat.h> | 286 | #include <net/compat.h> |
298 | 287 | ||
@@ -307,14 +296,12 @@ struct compat_ipt_entry { | |||
307 | }; | 296 | }; |
308 | 297 | ||
309 | /* Helper functions */ | 298 | /* Helper functions */ |
310 | static inline struct ipt_entry_target * | 299 | static inline struct xt_entry_target * |
311 | compat_ipt_get_target(struct compat_ipt_entry *e) | 300 | compat_ipt_get_target(struct compat_ipt_entry *e) |
312 | { | 301 | { |
313 | return (void *)e + e->target_offset; | 302 | return (void *)e + e->target_offset; |
314 | } | 303 | } |
315 | 304 | ||
316 | #define COMPAT_IPT_ALIGN(s) COMPAT_XT_ALIGN(s) | ||
317 | |||
318 | #endif /* CONFIG_COMPAT */ | 305 | #endif /* CONFIG_COMPAT */ |
319 | #endif /*__KERNEL__*/ | 306 | #endif /*__KERNEL__*/ |
320 | #endif /* _IPTABLES_H */ | 307 | #endif /* _IPTABLES_H */ |
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 18442ff19c07..c9784f7a9c1f 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h | |||
@@ -27,13 +27,42 @@ | |||
27 | 27 | ||
28 | #include <linux/netfilter/x_tables.h> | 28 | #include <linux/netfilter/x_tables.h> |
29 | 29 | ||
30 | #ifndef __KERNEL__ | ||
30 | #define IP6T_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN | 31 | #define IP6T_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN |
31 | #define IP6T_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN | 32 | #define IP6T_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN |
32 | |||
33 | #define ip6t_match xt_match | 33 | #define ip6t_match xt_match |
34 | #define ip6t_target xt_target | 34 | #define ip6t_target xt_target |
35 | #define ip6t_table xt_table | 35 | #define ip6t_table xt_table |
36 | #define ip6t_get_revision xt_get_revision | 36 | #define ip6t_get_revision xt_get_revision |
37 | #define ip6t_entry_match xt_entry_match | ||
38 | #define ip6t_entry_target xt_entry_target | ||
39 | #define ip6t_standard_target xt_standard_target | ||
40 | #define ip6t_error_target xt_error_target | ||
41 | #define ip6t_counters xt_counters | ||
42 | #define IP6T_CONTINUE XT_CONTINUE | ||
43 | #define IP6T_RETURN XT_RETURN | ||
44 | |||
45 | /* Pre-iptables-1.4.0 */ | ||
46 | #include <linux/netfilter/xt_tcpudp.h> | ||
47 | #define ip6t_tcp xt_tcp | ||
48 | #define ip6t_udp xt_udp | ||
49 | #define IP6T_TCP_INV_SRCPT XT_TCP_INV_SRCPT | ||
50 | #define IP6T_TCP_INV_DSTPT XT_TCP_INV_DSTPT | ||
51 | #define IP6T_TCP_INV_FLAGS XT_TCP_INV_FLAGS | ||
52 | #define IP6T_TCP_INV_OPTION XT_TCP_INV_OPTION | ||
53 | #define IP6T_TCP_INV_MASK XT_TCP_INV_MASK | ||
54 | #define IP6T_UDP_INV_SRCPT XT_UDP_INV_SRCPT | ||
55 | #define IP6T_UDP_INV_DSTPT XT_UDP_INV_DSTPT | ||
56 | #define IP6T_UDP_INV_MASK XT_UDP_INV_MASK | ||
57 | |||
58 | #define ip6t_counters_info xt_counters_info | ||
59 | #define IP6T_STANDARD_TARGET XT_STANDARD_TARGET | ||
60 | #define IP6T_ERROR_TARGET XT_ERROR_TARGET | ||
61 | #define IP6T_MATCH_ITERATE(e, fn, args...) \ | ||
62 | XT_MATCH_ITERATE(struct ip6t_entry, e, fn, ## args) | ||
63 | #define IP6T_ENTRY_ITERATE(entries, size, fn, args...) \ | ||
64 | XT_ENTRY_ITERATE(struct ip6t_entry, entries, size, fn, ## args) | ||
65 | #endif | ||
37 | 66 | ||
38 | /* Yes, Virginia, you have to zero the padding. */ | 67 | /* Yes, Virginia, you have to zero the padding. */ |
39 | struct ip6t_ip6 { | 68 | struct ip6t_ip6 { |
@@ -62,12 +91,6 @@ struct ip6t_ip6 { | |||
62 | u_int8_t invflags; | 91 | u_int8_t invflags; |
63 | }; | 92 | }; |
64 | 93 | ||
65 | #define ip6t_entry_match xt_entry_match | ||
66 | #define ip6t_entry_target xt_entry_target | ||
67 | #define ip6t_standard_target xt_standard_target | ||
68 | |||
69 | #define ip6t_counters xt_counters | ||
70 | |||
71 | /* Values for "flag" field in struct ip6t_ip6 (general ip6 structure). */ | 94 | /* Values for "flag" field in struct ip6t_ip6 (general ip6 structure). */ |
72 | #define IP6T_F_PROTO 0x01 /* Set if rule cares about upper | 95 | #define IP6T_F_PROTO 0x01 /* Set if rule cares about upper |
73 | protocols */ | 96 | protocols */ |
@@ -112,17 +135,12 @@ struct ip6t_entry { | |||
112 | /* Standard entry */ | 135 | /* Standard entry */ |
113 | struct ip6t_standard { | 136 | struct ip6t_standard { |
114 | struct ip6t_entry entry; | 137 | struct ip6t_entry entry; |
115 | struct ip6t_standard_target target; | 138 | struct xt_standard_target target; |
116 | }; | ||
117 | |||
118 | struct ip6t_error_target { | ||
119 | struct ip6t_entry_target target; | ||
120 | char errorname[IP6T_FUNCTION_MAXNAMELEN]; | ||
121 | }; | 139 | }; |
122 | 140 | ||
123 | struct ip6t_error { | 141 | struct ip6t_error { |
124 | struct ip6t_entry entry; | 142 | struct ip6t_entry entry; |
125 | struct ip6t_error_target target; | 143 | struct xt_error_target target; |
126 | }; | 144 | }; |
127 | 145 | ||
128 | #define IP6T_ENTRY_INIT(__size) \ | 146 | #define IP6T_ENTRY_INIT(__size) \ |
@@ -134,16 +152,16 @@ struct ip6t_error { | |||
134 | #define IP6T_STANDARD_INIT(__verdict) \ | 152 | #define IP6T_STANDARD_INIT(__verdict) \ |
135 | { \ | 153 | { \ |
136 | .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_standard)), \ | 154 | .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_standard)), \ |
137 | .target = XT_TARGET_INIT(IP6T_STANDARD_TARGET, \ | 155 | .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ |
138 | sizeof(struct ip6t_standard_target)), \ | 156 | sizeof(struct xt_standard_target)), \ |
139 | .target.verdict = -(__verdict) - 1, \ | 157 | .target.verdict = -(__verdict) - 1, \ |
140 | } | 158 | } |
141 | 159 | ||
142 | #define IP6T_ERROR_INIT \ | 160 | #define IP6T_ERROR_INIT \ |
143 | { \ | 161 | { \ |
144 | .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_error)), \ | 162 | .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_error)), \ |
145 | .target = XT_TARGET_INIT(IP6T_ERROR_TARGET, \ | 163 | .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ |
146 | sizeof(struct ip6t_error_target)), \ | 164 | sizeof(struct xt_error_target)), \ |
147 | .target.errorname = "ERROR", \ | 165 | .target.errorname = "ERROR", \ |
148 | } | 166 | } |
149 | 167 | ||
@@ -166,30 +184,6 @@ struct ip6t_error { | |||
166 | #define IP6T_SO_GET_REVISION_TARGET (IP6T_BASE_CTL + 5) | 184 | #define IP6T_SO_GET_REVISION_TARGET (IP6T_BASE_CTL + 5) |
167 | #define IP6T_SO_GET_MAX IP6T_SO_GET_REVISION_TARGET | 185 | #define IP6T_SO_GET_MAX IP6T_SO_GET_REVISION_TARGET |
168 | 186 | ||
169 | /* CONTINUE verdict for targets */ | ||
170 | #define IP6T_CONTINUE XT_CONTINUE | ||
171 | |||
172 | /* For standard target */ | ||
173 | #define IP6T_RETURN XT_RETURN | ||
174 | |||
175 | /* TCP/UDP matching stuff */ | ||
176 | #include <linux/netfilter/xt_tcpudp.h> | ||
177 | |||
178 | #define ip6t_tcp xt_tcp | ||
179 | #define ip6t_udp xt_udp | ||
180 | |||
181 | /* Values for "inv" field in struct ipt_tcp. */ | ||
182 | #define IP6T_TCP_INV_SRCPT XT_TCP_INV_SRCPT | ||
183 | #define IP6T_TCP_INV_DSTPT XT_TCP_INV_DSTPT | ||
184 | #define IP6T_TCP_INV_FLAGS XT_TCP_INV_FLAGS | ||
185 | #define IP6T_TCP_INV_OPTION XT_TCP_INV_OPTION | ||
186 | #define IP6T_TCP_INV_MASK XT_TCP_INV_MASK | ||
187 | |||
188 | /* Values for "invflags" field in struct ipt_udp. */ | ||
189 | #define IP6T_UDP_INV_SRCPT XT_UDP_INV_SRCPT | ||
190 | #define IP6T_UDP_INV_DSTPT XT_UDP_INV_DSTPT | ||
191 | #define IP6T_UDP_INV_MASK XT_UDP_INV_MASK | ||
192 | |||
193 | /* ICMP matching stuff */ | 187 | /* ICMP matching stuff */ |
194 | struct ip6t_icmp { | 188 | struct ip6t_icmp { |
195 | u_int8_t type; /* type to match */ | 189 | u_int8_t type; /* type to match */ |
@@ -203,7 +197,7 @@ struct ip6t_icmp { | |||
203 | /* The argument to IP6T_SO_GET_INFO */ | 197 | /* The argument to IP6T_SO_GET_INFO */ |
204 | struct ip6t_getinfo { | 198 | struct ip6t_getinfo { |
205 | /* Which table: caller fills this in. */ | 199 | /* Which table: caller fills this in. */ |
206 | char name[IP6T_TABLE_MAXNAMELEN]; | 200 | char name[XT_TABLE_MAXNAMELEN]; |
207 | 201 | ||
208 | /* Kernel fills these in. */ | 202 | /* Kernel fills these in. */ |
209 | /* Which hook entry points are valid: bitmask */ | 203 | /* Which hook entry points are valid: bitmask */ |
@@ -225,7 +219,7 @@ struct ip6t_getinfo { | |||
225 | /* The argument to IP6T_SO_SET_REPLACE. */ | 219 | /* The argument to IP6T_SO_SET_REPLACE. */ |
226 | struct ip6t_replace { | 220 | struct ip6t_replace { |
227 | /* Which table. */ | 221 | /* Which table. */ |
228 | char name[IP6T_TABLE_MAXNAMELEN]; | 222 | char name[XT_TABLE_MAXNAMELEN]; |
229 | 223 | ||
230 | /* Which hook entry points are valid: bitmask. You can't | 224 | /* Which hook entry points are valid: bitmask. You can't |
231 | change this. */ | 225 | change this. */ |
@@ -253,13 +247,10 @@ struct ip6t_replace { | |||
253 | struct ip6t_entry entries[0]; | 247 | struct ip6t_entry entries[0]; |
254 | }; | 248 | }; |
255 | 249 | ||
256 | /* The argument to IP6T_SO_ADD_COUNTERS. */ | ||
257 | #define ip6t_counters_info xt_counters_info | ||
258 | |||
259 | /* The argument to IP6T_SO_GET_ENTRIES. */ | 250 | /* The argument to IP6T_SO_GET_ENTRIES. */ |
260 | struct ip6t_get_entries { | 251 | struct ip6t_get_entries { |
261 | /* Which table: user fills this in. */ | 252 | /* Which table: user fills this in. */ |
262 | char name[IP6T_TABLE_MAXNAMELEN]; | 253 | char name[XT_TABLE_MAXNAMELEN]; |
263 | 254 | ||
264 | /* User fills this in: total entry size. */ | 255 | /* User fills this in: total entry size. */ |
265 | unsigned int size; | 256 | unsigned int size; |
@@ -268,28 +259,13 @@ struct ip6t_get_entries { | |||
268 | struct ip6t_entry entrytable[0]; | 259 | struct ip6t_entry entrytable[0]; |
269 | }; | 260 | }; |
270 | 261 | ||
271 | /* Standard return verdict, or do jump. */ | ||
272 | #define IP6T_STANDARD_TARGET XT_STANDARD_TARGET | ||
273 | /* Error verdict. */ | ||
274 | #define IP6T_ERROR_TARGET XT_ERROR_TARGET | ||
275 | |||
276 | /* Helper functions */ | 262 | /* Helper functions */ |
277 | static __inline__ struct ip6t_entry_target * | 263 | static __inline__ struct xt_entry_target * |
278 | ip6t_get_target(struct ip6t_entry *e) | 264 | ip6t_get_target(struct ip6t_entry *e) |
279 | { | 265 | { |
280 | return (void *)e + e->target_offset; | 266 | return (void *)e + e->target_offset; |
281 | } | 267 | } |
282 | 268 | ||
283 | #ifndef __KERNEL__ | ||
284 | /* fn returns 0 to continue iteration */ | ||
285 | #define IP6T_MATCH_ITERATE(e, fn, args...) \ | ||
286 | XT_MATCH_ITERATE(struct ip6t_entry, e, fn, ## args) | ||
287 | |||
288 | /* fn returns 0 to continue iteration */ | ||
289 | #define IP6T_ENTRY_ITERATE(entries, size, fn, args...) \ | ||
290 | XT_ENTRY_ITERATE(struct ip6t_entry, entries, size, fn, ## args) | ||
291 | #endif | ||
292 | |||
293 | /* | 269 | /* |
294 | * Main firewall chains definitions and global var's definitions. | 270 | * Main firewall chains definitions and global var's definitions. |
295 | */ | 271 | */ |
@@ -316,8 +292,6 @@ extern int ip6t_ext_hdr(u8 nexthdr); | |||
316 | extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, | 292 | extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, |
317 | int target, unsigned short *fragoff); | 293 | int target, unsigned short *fragoff); |
318 | 294 | ||
319 | #define IP6T_ALIGN(s) XT_ALIGN(s) | ||
320 | |||
321 | #ifdef CONFIG_COMPAT | 295 | #ifdef CONFIG_COMPAT |
322 | #include <net/compat.h> | 296 | #include <net/compat.h> |
323 | 297 | ||
@@ -331,14 +305,12 @@ struct compat_ip6t_entry { | |||
331 | unsigned char elems[0]; | 305 | unsigned char elems[0]; |
332 | }; | 306 | }; |
333 | 307 | ||
334 | static inline struct ip6t_entry_target * | 308 | static inline struct xt_entry_target * |
335 | compat_ip6t_get_target(struct compat_ip6t_entry *e) | 309 | compat_ip6t_get_target(struct compat_ip6t_entry *e) |
336 | { | 310 | { |
337 | return (void *)e + e->target_offset; | 311 | return (void *)e + e->target_offset; |
338 | } | 312 | } |
339 | 313 | ||
340 | #define COMPAT_IP6T_ALIGN(s) COMPAT_XT_ALIGN(s) | ||
341 | |||
342 | #endif /* CONFIG_COMPAT */ | 314 | #endif /* CONFIG_COMPAT */ |
343 | #endif /*__KERNEL__*/ | 315 | #endif /*__KERNEL__*/ |
344 | #endif /* _IP6_TABLES_H */ | 316 | #endif /* _IP6_TABLES_H */ |
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 50d8009be86c..79358bb712c6 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h | |||
@@ -14,7 +14,6 @@ | |||
14 | 14 | ||
15 | struct netpoll { | 15 | struct netpoll { |
16 | struct net_device *dev; | 16 | struct net_device *dev; |
17 | struct net_device *real_dev; | ||
18 | char dev_name[IFNAMSIZ]; | 17 | char dev_name[IFNAMSIZ]; |
19 | const char *name; | 18 | const char *name; |
20 | void (*rx_hook)(struct netpoll *, int, char *, int); | 19 | void (*rx_hook)(struct netpoll *, int, char *, int); |
@@ -53,7 +52,13 @@ void netpoll_set_trap(int trap); | |||
53 | void __netpoll_cleanup(struct netpoll *np); | 52 | void __netpoll_cleanup(struct netpoll *np); |
54 | void netpoll_cleanup(struct netpoll *np); | 53 | void netpoll_cleanup(struct netpoll *np); |
55 | int __netpoll_rx(struct sk_buff *skb); | 54 | int __netpoll_rx(struct sk_buff *skb); |
56 | void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); | 55 | void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, |
56 | struct net_device *dev); | ||
57 | static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | ||
58 | { | ||
59 | netpoll_send_skb_on_dev(np, skb, np->dev); | ||
60 | } | ||
61 | |||
57 | 62 | ||
58 | 63 | ||
59 | #ifdef CONFIG_NETPOLL | 64 | #ifdef CONFIG_NETPOLL |
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 07e40c625972..4925b22219d2 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h | |||
@@ -17,7 +17,9 @@ | |||
17 | 17 | ||
18 | #define NFS4_BITMAP_SIZE 2 | 18 | #define NFS4_BITMAP_SIZE 2 |
19 | #define NFS4_VERIFIER_SIZE 8 | 19 | #define NFS4_VERIFIER_SIZE 8 |
20 | #define NFS4_STATEID_SIZE 16 | 20 | #define NFS4_STATEID_SEQID_SIZE 4 |
21 | #define NFS4_STATEID_OTHER_SIZE 12 | ||
22 | #define NFS4_STATEID_SIZE (NFS4_STATEID_SEQID_SIZE + NFS4_STATEID_OTHER_SIZE) | ||
21 | #define NFS4_FHSIZE 128 | 23 | #define NFS4_FHSIZE 128 |
22 | #define NFS4_MAXPATHLEN PATH_MAX | 24 | #define NFS4_MAXPATHLEN PATH_MAX |
23 | #define NFS4_MAXNAMLEN NAME_MAX | 25 | #define NFS4_MAXNAMLEN NAME_MAX |
@@ -61,6 +63,9 @@ | |||
61 | #define NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL 0x10000 | 63 | #define NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL 0x10000 |
62 | #define NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED 0x20000 | 64 | #define NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED 0x20000 |
63 | 65 | ||
66 | #define NFS4_CDFC4_FORE 0x1 | ||
67 | #define NFS4_CDFC4_BACK 0x2 | ||
68 | |||
64 | #define NFS4_SET_TO_SERVER_TIME 0 | 69 | #define NFS4_SET_TO_SERVER_TIME 0 |
65 | #define NFS4_SET_TO_CLIENT_TIME 1 | 70 | #define NFS4_SET_TO_CLIENT_TIME 1 |
66 | 71 | ||
@@ -167,7 +172,16 @@ struct nfs4_acl { | |||
167 | }; | 172 | }; |
168 | 173 | ||
169 | typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier; | 174 | typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier; |
170 | typedef struct { char data[NFS4_STATEID_SIZE]; } nfs4_stateid; | 175 | |
176 | struct nfs41_stateid { | ||
177 | __be32 seqid; | ||
178 | char other[NFS4_STATEID_OTHER_SIZE]; | ||
179 | } __attribute__ ((packed)); | ||
180 | |||
181 | typedef union { | ||
182 | char data[NFS4_STATEID_SIZE]; | ||
183 | struct nfs41_stateid stateid; | ||
184 | } nfs4_stateid; | ||
171 | 185 | ||
172 | enum nfs_opnum4 { | 186 | enum nfs_opnum4 { |
173 | OP_ACCESS = 3, | 187 | OP_ACCESS = 3, |
@@ -471,6 +485,8 @@ enum lock_type4 { | |||
471 | #define FATTR4_WORD1_TIME_MODIFY (1UL << 21) | 485 | #define FATTR4_WORD1_TIME_MODIFY (1UL << 21) |
472 | #define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22) | 486 | #define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22) |
473 | #define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23) | 487 | #define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23) |
488 | #define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30) | ||
489 | #define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) | ||
474 | 490 | ||
475 | #define NFSPROC4_NULL 0 | 491 | #define NFSPROC4_NULL 0 |
476 | #define NFSPROC4_COMPOUND 1 | 492 | #define NFSPROC4_COMPOUND 1 |
@@ -532,6 +548,8 @@ enum { | |||
532 | NFSPROC4_CLNT_SEQUENCE, | 548 | NFSPROC4_CLNT_SEQUENCE, |
533 | NFSPROC4_CLNT_GET_LEASE_TIME, | 549 | NFSPROC4_CLNT_GET_LEASE_TIME, |
534 | NFSPROC4_CLNT_RECLAIM_COMPLETE, | 550 | NFSPROC4_CLNT_RECLAIM_COMPLETE, |
551 | NFSPROC4_CLNT_LAYOUTGET, | ||
552 | NFSPROC4_CLNT_GETDEVICEINFO, | ||
535 | }; | 553 | }; |
536 | 554 | ||
537 | /* nfs41 types */ | 555 | /* nfs41 types */ |
@@ -550,6 +568,49 @@ enum state_protect_how4 { | |||
550 | SP4_SSV = 2 | 568 | SP4_SSV = 2 |
551 | }; | 569 | }; |
552 | 570 | ||
571 | enum pnfs_layouttype { | ||
572 | LAYOUT_NFSV4_1_FILES = 1, | ||
573 | LAYOUT_OSD2_OBJECTS = 2, | ||
574 | LAYOUT_BLOCK_VOLUME = 3, | ||
575 | }; | ||
576 | |||
577 | /* used for both layout return and recall */ | ||
578 | enum pnfs_layoutreturn_type { | ||
579 | RETURN_FILE = 1, | ||
580 | RETURN_FSID = 2, | ||
581 | RETURN_ALL = 3 | ||
582 | }; | ||
583 | |||
584 | enum pnfs_iomode { | ||
585 | IOMODE_READ = 1, | ||
586 | IOMODE_RW = 2, | ||
587 | IOMODE_ANY = 3, | ||
588 | }; | ||
589 | |||
590 | enum pnfs_notify_deviceid_type4 { | ||
591 | NOTIFY_DEVICEID4_CHANGE = 1 << 1, | ||
592 | NOTIFY_DEVICEID4_DELETE = 1 << 2, | ||
593 | }; | ||
594 | |||
595 | #define NFL4_UFLG_MASK 0x0000003F | ||
596 | #define NFL4_UFLG_DENSE 0x00000001 | ||
597 | #define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002 | ||
598 | #define NFL4_UFLG_STRIPE_UNIT_SIZE_MASK 0xFFFFFFC0 | ||
599 | |||
600 | /* Encoded in the loh_body field of type layouthint4 */ | ||
601 | enum filelayout_hint_care4 { | ||
602 | NFLH4_CARE_DENSE = NFL4_UFLG_DENSE, | ||
603 | NFLH4_CARE_COMMIT_THRU_MDS = NFL4_UFLG_COMMIT_THRU_MDS, | ||
604 | NFLH4_CARE_STRIPE_UNIT_SIZE = 0x00000040, | ||
605 | NFLH4_CARE_STRIPE_COUNT = 0x00000080 | ||
606 | }; | ||
607 | |||
608 | #define NFS4_DEVICEID4_SIZE 16 | ||
609 | |||
610 | struct nfs4_deviceid { | ||
611 | char data[NFS4_DEVICEID4_SIZE]; | ||
612 | }; | ||
613 | |||
553 | #endif | 614 | #endif |
554 | #endif | 615 | #endif |
555 | 616 | ||
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 508f8cf6da37..bba26684acdc 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -185,9 +185,12 @@ struct nfs_inode { | |||
185 | struct nfs4_cached_acl *nfs4_acl; | 185 | struct nfs4_cached_acl *nfs4_acl; |
186 | /* NFSv4 state */ | 186 | /* NFSv4 state */ |
187 | struct list_head open_states; | 187 | struct list_head open_states; |
188 | struct nfs_delegation *delegation; | 188 | struct nfs_delegation __rcu *delegation; |
189 | fmode_t delegation_state; | 189 | fmode_t delegation_state; |
190 | struct rw_semaphore rwsem; | 190 | struct rw_semaphore rwsem; |
191 | |||
192 | /* pNFS layout information */ | ||
193 | struct pnfs_layout_hdr *layout; | ||
191 | #endif /* CONFIG_NFS_V4*/ | 194 | #endif /* CONFIG_NFS_V4*/ |
192 | #ifdef CONFIG_NFS_FSCACHE | 195 | #ifdef CONFIG_NFS_FSCACHE |
193 | struct fscache_cookie *fscache; | 196 | struct fscache_cookie *fscache; |
@@ -360,10 +363,13 @@ extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); | |||
360 | extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); | 363 | extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); |
361 | extern void put_nfs_open_context(struct nfs_open_context *ctx); | 364 | extern void put_nfs_open_context(struct nfs_open_context *ctx); |
362 | extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); | 365 | extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); |
366 | extern struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cred *cred, fmode_t f_mode); | ||
367 | extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); | ||
363 | extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); | 368 | extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); |
364 | extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); | 369 | extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); |
365 | extern u64 nfs_compat_user_ino64(u64 fileid); | 370 | extern u64 nfs_compat_user_ino64(u64 fileid); |
366 | extern void nfs_fattr_init(struct nfs_fattr *fattr); | 371 | extern void nfs_fattr_init(struct nfs_fattr *fattr); |
372 | extern unsigned long nfs_inc_attr_generation_counter(void); | ||
367 | 373 | ||
368 | extern struct nfs_fattr *nfs_alloc_fattr(void); | 374 | extern struct nfs_fattr *nfs_alloc_fattr(void); |
369 | 375 | ||
@@ -379,9 +385,12 @@ static inline void nfs_free_fhandle(const struct nfs_fh *fh) | |||
379 | kfree(fh); | 385 | kfree(fh); |
380 | } | 386 | } |
381 | 387 | ||
388 | /* | ||
389 | * linux/fs/nfs/nfsroot.c | ||
390 | */ | ||
391 | extern int nfs_root_data(char **root_device, char **root_data); /*__init*/ | ||
382 | /* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */ | 392 | /* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */ |
383 | extern __be32 root_nfs_parse_addr(char *name); /*__init*/ | 393 | extern __be32 root_nfs_parse_addr(char *name); /*__init*/ |
384 | extern unsigned long nfs_inc_attr_generation_counter(void); | ||
385 | 394 | ||
386 | /* | 395 | /* |
387 | * linux/fs/nfs/file.c | 396 | * linux/fs/nfs/file.c |
@@ -479,10 +488,10 @@ extern void nfs_release_automount_timer(void); | |||
479 | /* | 488 | /* |
480 | * linux/fs/nfs/unlink.c | 489 | * linux/fs/nfs/unlink.c |
481 | */ | 490 | */ |
482 | extern int nfs_async_unlink(struct inode *dir, struct dentry *dentry); | ||
483 | extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); | 491 | extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); |
484 | extern void nfs_block_sillyrename(struct dentry *dentry); | 492 | extern void nfs_block_sillyrename(struct dentry *dentry); |
485 | extern void nfs_unblock_sillyrename(struct dentry *dentry); | 493 | extern void nfs_unblock_sillyrename(struct dentry *dentry); |
494 | extern int nfs_sillyrename(struct inode *dir, struct dentry *dentry); | ||
486 | 495 | ||
487 | /* | 496 | /* |
488 | * linux/fs/nfs/write.c | 497 | * linux/fs/nfs/write.c |
@@ -584,10 +593,6 @@ nfs_fileid_to_ino_t(u64 fileid) | |||
584 | return ino; | 593 | return ino; |
585 | } | 594 | } |
586 | 595 | ||
587 | /* NFS root */ | ||
588 | |||
589 | extern void * nfs_root_data(void); | ||
590 | |||
591 | #define nfs_wait_event(clnt, wq, condition) \ | 596 | #define nfs_wait_event(clnt, wq, condition) \ |
592 | ({ \ | 597 | ({ \ |
593 | int __retval = wait_event_killable(wq, condition); \ | 598 | int __retval = wait_event_killable(wq, condition); \ |
@@ -613,6 +618,8 @@ extern void * nfs_root_data(void); | |||
613 | #define NFSDBG_CLIENT 0x0200 | 618 | #define NFSDBG_CLIENT 0x0200 |
614 | #define NFSDBG_MOUNT 0x0400 | 619 | #define NFSDBG_MOUNT 0x0400 |
615 | #define NFSDBG_FSCACHE 0x0800 | 620 | #define NFSDBG_FSCACHE 0x0800 |
621 | #define NFSDBG_PNFS 0x1000 | ||
622 | #define NFSDBG_PNFS_LD 0x2000 | ||
616 | #define NFSDBG_ALL 0xFFFF | 623 | #define NFSDBG_ALL 0xFFFF |
617 | 624 | ||
618 | #ifdef __KERNEL__ | 625 | #ifdef __KERNEL__ |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index c82ee7cd6288..452d96436d26 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -82,6 +82,8 @@ struct nfs_client { | |||
82 | /* The flags used for obtaining the clientid during EXCHANGE_ID */ | 82 | /* The flags used for obtaining the clientid during EXCHANGE_ID */ |
83 | u32 cl_exchange_flags; | 83 | u32 cl_exchange_flags; |
84 | struct nfs4_session *cl_session; /* sharred session */ | 84 | struct nfs4_session *cl_session; /* sharred session */ |
85 | struct list_head cl_layouts; | ||
86 | struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */ | ||
85 | #endif /* CONFIG_NFS_V4_1 */ | 87 | #endif /* CONFIG_NFS_V4_1 */ |
86 | 88 | ||
87 | #ifdef CONFIG_NFS_FSCACHE | 89 | #ifdef CONFIG_NFS_FSCACHE |
@@ -124,6 +126,7 @@ struct nfs_server { | |||
124 | 126 | ||
125 | struct nfs_fsid fsid; | 127 | struct nfs_fsid fsid; |
126 | __u64 maxfilesize; /* maximum file size */ | 128 | __u64 maxfilesize; /* maximum file size */ |
129 | struct timespec time_delta; /* smallest time granularity */ | ||
127 | unsigned long mount_time; /* when this fs was mounted */ | 130 | unsigned long mount_time; /* when this fs was mounted */ |
128 | dev_t s_dev; /* superblock dev numbers */ | 131 | dev_t s_dev; /* superblock dev numbers */ |
129 | 132 | ||
@@ -144,6 +147,7 @@ struct nfs_server { | |||
144 | u32 acl_bitmask; /* V4 bitmask representing the ACEs | 147 | u32 acl_bitmask; /* V4 bitmask representing the ACEs |
145 | that are supported on this | 148 | that are supported on this |
146 | filesystem */ | 149 | filesystem */ |
150 | struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */ | ||
147 | #endif | 151 | #endif |
148 | void (*destroy)(struct nfs_server *); | 152 | void (*destroy)(struct nfs_server *); |
149 | 153 | ||
diff --git a/include/linux/nfs_idmap.h b/include/linux/nfs_idmap.h index 91a1c24e0cbf..e8352dc5afb5 100644 --- a/include/linux/nfs_idmap.h +++ b/include/linux/nfs_idmap.h | |||
@@ -66,13 +66,40 @@ struct idmap_msg { | |||
66 | /* Forward declaration to make this header independent of others */ | 66 | /* Forward declaration to make this header independent of others */ |
67 | struct nfs_client; | 67 | struct nfs_client; |
68 | 68 | ||
69 | #ifdef CONFIG_NFS_USE_NEW_IDMAPPER | ||
70 | |||
71 | int nfs_idmap_init(void); | ||
72 | void nfs_idmap_quit(void); | ||
73 | |||
74 | static inline int nfs_idmap_new(struct nfs_client *clp) | ||
75 | { | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static inline void nfs_idmap_delete(struct nfs_client *clp) | ||
80 | { | ||
81 | } | ||
82 | |||
83 | #else /* CONFIG_NFS_USE_NEW_IDMAPPER not set */ | ||
84 | |||
85 | static inline int nfs_idmap_init(void) | ||
86 | { | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static inline void nfs_idmap_quit(void) | ||
91 | { | ||
92 | } | ||
93 | |||
69 | int nfs_idmap_new(struct nfs_client *); | 94 | int nfs_idmap_new(struct nfs_client *); |
70 | void nfs_idmap_delete(struct nfs_client *); | 95 | void nfs_idmap_delete(struct nfs_client *); |
71 | 96 | ||
97 | #endif /* CONFIG_NFS_USE_NEW_IDMAPPER */ | ||
98 | |||
72 | int nfs_map_name_to_uid(struct nfs_client *, const char *, size_t, __u32 *); | 99 | int nfs_map_name_to_uid(struct nfs_client *, const char *, size_t, __u32 *); |
73 | int nfs_map_group_to_gid(struct nfs_client *, const char *, size_t, __u32 *); | 100 | int nfs_map_group_to_gid(struct nfs_client *, const char *, size_t, __u32 *); |
74 | int nfs_map_uid_to_name(struct nfs_client *, __u32, char *); | 101 | int nfs_map_uid_to_name(struct nfs_client *, __u32, char *, size_t); |
75 | int nfs_map_gid_to_group(struct nfs_client *, __u32, char *); | 102 | int nfs_map_gid_to_group(struct nfs_client *, __u32, char *, size_t); |
76 | 103 | ||
77 | extern unsigned int nfs_idmap_cache_timeout; | 104 | extern unsigned int nfs_idmap_cache_timeout; |
78 | #endif /* __KERNEL__ */ | 105 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h index 5d59ae861aa6..576bddd72e04 100644 --- a/include/linux/nfs_mount.h +++ b/include/linux/nfs_mount.h | |||
@@ -71,4 +71,7 @@ struct nfs_mount_data { | |||
71 | #define NFS_MOUNT_NORESVPORT 0x40000 | 71 | #define NFS_MOUNT_NORESVPORT 0x40000 |
72 | #define NFS_MOUNT_LEGACY_INTERFACE 0x80000 | 72 | #define NFS_MOUNT_LEGACY_INTERFACE 0x80000 |
73 | 73 | ||
74 | #define NFS_MOUNT_LOCAL_FLOCK 0x100000 | ||
75 | #define NFS_MOUNT_LOCAL_FCNTL 0x200000 | ||
76 | |||
74 | #endif | 77 | #endif |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index fc461926c412..ba6cc8f223c9 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -112,7 +112,9 @@ struct nfs_fsinfo { | |||
112 | __u32 wtmult; /* writes should be multiple of this */ | 112 | __u32 wtmult; /* writes should be multiple of this */ |
113 | __u32 dtpref; /* pref. readdir transfer size */ | 113 | __u32 dtpref; /* pref. readdir transfer size */ |
114 | __u64 maxfilesize; | 114 | __u64 maxfilesize; |
115 | struct timespec time_delta; /* server time granularity */ | ||
115 | __u32 lease_time; /* in seconds */ | 116 | __u32 lease_time; /* in seconds */ |
117 | __u32 layouttype; /* supported pnfs layout driver */ | ||
116 | }; | 118 | }; |
117 | 119 | ||
118 | struct nfs_fsstat { | 120 | struct nfs_fsstat { |
@@ -170,7 +172,7 @@ struct nfs4_sequence_args { | |||
170 | 172 | ||
171 | struct nfs4_sequence_res { | 173 | struct nfs4_sequence_res { |
172 | struct nfs4_session *sr_session; | 174 | struct nfs4_session *sr_session; |
173 | u8 sr_slotid; /* slot used to send request */ | 175 | struct nfs4_slot *sr_slot; /* slot used to send request */ |
174 | int sr_status; /* sequence operation status */ | 176 | int sr_status; /* sequence operation status */ |
175 | unsigned long sr_renewal_time; | 177 | unsigned long sr_renewal_time; |
176 | u32 sr_status_flags; | 178 | u32 sr_status_flags; |
@@ -185,6 +187,55 @@ struct nfs4_get_lease_time_res { | |||
185 | struct nfs4_sequence_res lr_seq_res; | 187 | struct nfs4_sequence_res lr_seq_res; |
186 | }; | 188 | }; |
187 | 189 | ||
190 | #define PNFS_LAYOUT_MAXSIZE 4096 | ||
191 | |||
192 | struct nfs4_layoutdriver_data { | ||
193 | __u32 len; | ||
194 | void *buf; | ||
195 | }; | ||
196 | |||
197 | struct pnfs_layout_range { | ||
198 | u32 iomode; | ||
199 | u64 offset; | ||
200 | u64 length; | ||
201 | }; | ||
202 | |||
203 | struct nfs4_layoutget_args { | ||
204 | __u32 type; | ||
205 | struct pnfs_layout_range range; | ||
206 | __u64 minlength; | ||
207 | __u32 maxcount; | ||
208 | struct inode *inode; | ||
209 | struct nfs_open_context *ctx; | ||
210 | struct nfs4_sequence_args seq_args; | ||
211 | }; | ||
212 | |||
213 | struct nfs4_layoutget_res { | ||
214 | __u32 return_on_close; | ||
215 | struct pnfs_layout_range range; | ||
216 | __u32 type; | ||
217 | nfs4_stateid stateid; | ||
218 | struct nfs4_layoutdriver_data layout; | ||
219 | struct nfs4_sequence_res seq_res; | ||
220 | }; | ||
221 | |||
222 | struct nfs4_layoutget { | ||
223 | struct nfs4_layoutget_args args; | ||
224 | struct nfs4_layoutget_res res; | ||
225 | struct pnfs_layout_segment **lsegpp; | ||
226 | int status; | ||
227 | }; | ||
228 | |||
229 | struct nfs4_getdeviceinfo_args { | ||
230 | struct pnfs_device *pdev; | ||
231 | struct nfs4_sequence_args seq_args; | ||
232 | }; | ||
233 | |||
234 | struct nfs4_getdeviceinfo_res { | ||
235 | struct pnfs_device *pdev; | ||
236 | struct nfs4_sequence_res seq_res; | ||
237 | }; | ||
238 | |||
188 | /* | 239 | /* |
189 | * Arguments to the open call. | 240 | * Arguments to the open call. |
190 | */ | 241 | */ |
@@ -400,6 +451,27 @@ struct nfs_removeres { | |||
400 | }; | 451 | }; |
401 | 452 | ||
402 | /* | 453 | /* |
454 | * Common arguments to the rename call | ||
455 | */ | ||
456 | struct nfs_renameargs { | ||
457 | const struct nfs_fh *old_dir; | ||
458 | const struct nfs_fh *new_dir; | ||
459 | const struct qstr *old_name; | ||
460 | const struct qstr *new_name; | ||
461 | const u32 *bitmask; | ||
462 | struct nfs4_sequence_args seq_args; | ||
463 | }; | ||
464 | |||
465 | struct nfs_renameres { | ||
466 | const struct nfs_server *server; | ||
467 | struct nfs4_change_info old_cinfo; | ||
468 | struct nfs_fattr *old_fattr; | ||
469 | struct nfs4_change_info new_cinfo; | ||
470 | struct nfs_fattr *new_fattr; | ||
471 | struct nfs4_sequence_res seq_res; | ||
472 | }; | ||
473 | |||
474 | /* | ||
403 | * Argument struct for decode_entry function | 475 | * Argument struct for decode_entry function |
404 | */ | 476 | */ |
405 | struct nfs_entry { | 477 | struct nfs_entry { |
@@ -434,15 +506,6 @@ struct nfs_createargs { | |||
434 | struct iattr * sattr; | 506 | struct iattr * sattr; |
435 | }; | 507 | }; |
436 | 508 | ||
437 | struct nfs_renameargs { | ||
438 | struct nfs_fh * fromfh; | ||
439 | const char * fromname; | ||
440 | unsigned int fromlen; | ||
441 | struct nfs_fh * tofh; | ||
442 | const char * toname; | ||
443 | unsigned int tolen; | ||
444 | }; | ||
445 | |||
446 | struct nfs_setattrargs { | 509 | struct nfs_setattrargs { |
447 | struct nfs_fh * fh; | 510 | struct nfs_fh * fh; |
448 | nfs4_stateid stateid; | 511 | nfs4_stateid stateid; |
@@ -586,15 +649,6 @@ struct nfs3_mknodargs { | |||
586 | dev_t rdev; | 649 | dev_t rdev; |
587 | }; | 650 | }; |
588 | 651 | ||
589 | struct nfs3_renameargs { | ||
590 | struct nfs_fh * fromfh; | ||
591 | const char * fromname; | ||
592 | unsigned int fromlen; | ||
593 | struct nfs_fh * tofh; | ||
594 | const char * toname; | ||
595 | unsigned int tolen; | ||
596 | }; | ||
597 | |||
598 | struct nfs3_linkargs { | 652 | struct nfs3_linkargs { |
599 | struct nfs_fh * fromfh; | 653 | struct nfs_fh * fromfh; |
600 | struct nfs_fh * tofh; | 654 | struct nfs_fh * tofh; |
@@ -629,11 +683,6 @@ struct nfs3_readlinkargs { | |||
629 | struct page ** pages; | 683 | struct page ** pages; |
630 | }; | 684 | }; |
631 | 685 | ||
632 | struct nfs3_renameres { | ||
633 | struct nfs_fattr * fromattr; | ||
634 | struct nfs_fattr * toattr; | ||
635 | }; | ||
636 | |||
637 | struct nfs3_linkres { | 686 | struct nfs3_linkres { |
638 | struct nfs_fattr * dir_attr; | 687 | struct nfs_fattr * dir_attr; |
639 | struct nfs_fattr * fattr; | 688 | struct nfs_fattr * fattr; |
@@ -780,6 +829,7 @@ struct nfs4_readdir_arg { | |||
780 | struct page ** pages; /* zero-copy data */ | 829 | struct page ** pages; /* zero-copy data */ |
781 | unsigned int pgbase; /* zero-copy data */ | 830 | unsigned int pgbase; /* zero-copy data */ |
782 | const u32 * bitmask; | 831 | const u32 * bitmask; |
832 | int plus; | ||
783 | struct nfs4_sequence_args seq_args; | 833 | struct nfs4_sequence_args seq_args; |
784 | }; | 834 | }; |
785 | 835 | ||
@@ -801,24 +851,6 @@ struct nfs4_readlink_res { | |||
801 | struct nfs4_sequence_res seq_res; | 851 | struct nfs4_sequence_res seq_res; |
802 | }; | 852 | }; |
803 | 853 | ||
804 | struct nfs4_rename_arg { | ||
805 | const struct nfs_fh * old_dir; | ||
806 | const struct nfs_fh * new_dir; | ||
807 | const struct qstr * old_name; | ||
808 | const struct qstr * new_name; | ||
809 | const u32 * bitmask; | ||
810 | struct nfs4_sequence_args seq_args; | ||
811 | }; | ||
812 | |||
813 | struct nfs4_rename_res { | ||
814 | const struct nfs_server * server; | ||
815 | struct nfs4_change_info old_cinfo; | ||
816 | struct nfs_fattr * old_fattr; | ||
817 | struct nfs4_change_info new_cinfo; | ||
818 | struct nfs_fattr * new_fattr; | ||
819 | struct nfs4_sequence_res seq_res; | ||
820 | }; | ||
821 | |||
822 | #define NFS4_SETCLIENTID_NAMELEN (127) | 854 | #define NFS4_SETCLIENTID_NAMELEN (127) |
823 | struct nfs4_setclientid { | 855 | struct nfs4_setclientid { |
824 | const nfs4_verifier * sc_verifier; | 856 | const nfs4_verifier * sc_verifier; |
@@ -1032,19 +1064,21 @@ struct nfs_rpc_ops { | |||
1032 | int (*readlink)(struct inode *, struct page *, unsigned int, | 1064 | int (*readlink)(struct inode *, struct page *, unsigned int, |
1033 | unsigned int); | 1065 | unsigned int); |
1034 | int (*create) (struct inode *, struct dentry *, | 1066 | int (*create) (struct inode *, struct dentry *, |
1035 | struct iattr *, int, struct nameidata *); | 1067 | struct iattr *, int, struct nfs_open_context *); |
1036 | int (*remove) (struct inode *, struct qstr *); | 1068 | int (*remove) (struct inode *, struct qstr *); |
1037 | void (*unlink_setup) (struct rpc_message *, struct inode *dir); | 1069 | void (*unlink_setup) (struct rpc_message *, struct inode *dir); |
1038 | int (*unlink_done) (struct rpc_task *, struct inode *); | 1070 | int (*unlink_done) (struct rpc_task *, struct inode *); |
1039 | int (*rename) (struct inode *, struct qstr *, | 1071 | int (*rename) (struct inode *, struct qstr *, |
1040 | struct inode *, struct qstr *); | 1072 | struct inode *, struct qstr *); |
1073 | void (*rename_setup) (struct rpc_message *msg, struct inode *dir); | ||
1074 | int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir); | ||
1041 | int (*link) (struct inode *, struct inode *, struct qstr *); | 1075 | int (*link) (struct inode *, struct inode *, struct qstr *); |
1042 | int (*symlink) (struct inode *, struct dentry *, struct page *, | 1076 | int (*symlink) (struct inode *, struct dentry *, struct page *, |
1043 | unsigned int, struct iattr *); | 1077 | unsigned int, struct iattr *); |
1044 | int (*mkdir) (struct inode *, struct dentry *, struct iattr *); | 1078 | int (*mkdir) (struct inode *, struct dentry *, struct iattr *); |
1045 | int (*rmdir) (struct inode *, struct qstr *); | 1079 | int (*rmdir) (struct inode *, struct qstr *); |
1046 | int (*readdir) (struct dentry *, struct rpc_cred *, | 1080 | int (*readdir) (struct dentry *, struct rpc_cred *, |
1047 | u64, struct page *, unsigned int, int); | 1081 | u64, struct page **, unsigned int, int); |
1048 | int (*mknod) (struct inode *, struct dentry *, struct iattr *, | 1082 | int (*mknod) (struct inode *, struct dentry *, struct iattr *, |
1049 | dev_t); | 1083 | dev_t); |
1050 | int (*statfs) (struct nfs_server *, struct nfs_fh *, | 1084 | int (*statfs) (struct nfs_server *, struct nfs_fh *, |
@@ -1054,7 +1088,7 @@ struct nfs_rpc_ops { | |||
1054 | int (*pathconf) (struct nfs_server *, struct nfs_fh *, | 1088 | int (*pathconf) (struct nfs_server *, struct nfs_fh *, |
1055 | struct nfs_pathconf *); | 1089 | struct nfs_pathconf *); |
1056 | int (*set_capabilities)(struct nfs_server *, struct nfs_fh *); | 1090 | int (*set_capabilities)(struct nfs_server *, struct nfs_fh *); |
1057 | __be32 *(*decode_dirent)(__be32 *, struct nfs_entry *, int plus); | 1091 | __be32 *(*decode_dirent)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int plus); |
1058 | void (*read_setup) (struct nfs_read_data *, struct rpc_message *); | 1092 | void (*read_setup) (struct nfs_read_data *, struct rpc_message *); |
1059 | int (*read_done) (struct rpc_task *, struct nfs_read_data *); | 1093 | int (*read_done) (struct rpc_task *, struct nfs_read_data *); |
1060 | void (*write_setup) (struct nfs_write_data *, struct rpc_message *); | 1094 | void (*write_setup) (struct nfs_write_data *, struct rpc_message *); |
@@ -1065,6 +1099,10 @@ struct nfs_rpc_ops { | |||
1065 | int (*lock_check_bounds)(const struct file_lock *); | 1099 | int (*lock_check_bounds)(const struct file_lock *); |
1066 | void (*clear_acl_cache)(struct inode *); | 1100 | void (*clear_acl_cache)(struct inode *); |
1067 | void (*close_context)(struct nfs_open_context *ctx, int); | 1101 | void (*close_context)(struct nfs_open_context *ctx, int); |
1102 | struct inode * (*open_context) (struct inode *dir, | ||
1103 | struct nfs_open_context *ctx, | ||
1104 | int open_flags, | ||
1105 | struct iattr *iattr); | ||
1068 | }; | 1106 | }; |
1069 | 1107 | ||
1070 | /* | 1108 | /* |
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index f5487b6f91ed..227e49dd5720 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h | |||
@@ -4,16 +4,16 @@ | |||
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU Lesser General Public License as published |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * by the Free Software Foundation; either version 2.1 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU Lesser General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU Lesser General Public License |
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
19 | * | 19 | * |
@@ -147,7 +147,6 @@ struct nilfs_super_root { | |||
147 | #define NILFS_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */ | 147 | #define NILFS_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */ |
148 | #define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */ | 148 | #define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */ |
149 | #define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */ | 149 | #define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */ |
150 | #define NILFS_MOUNT_SNAPSHOT 0x0080 /* Snapshot flag */ | ||
151 | #define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */ | 150 | #define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */ |
152 | #define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order | 151 | #define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order |
153 | semantics also for data */ | 152 | semantics also for data */ |
@@ -229,6 +228,7 @@ struct nilfs_super_block { | |||
229 | */ | 228 | */ |
230 | #define NILFS_CURRENT_REV 2 /* current major revision */ | 229 | #define NILFS_CURRENT_REV 2 /* current major revision */ |
231 | #define NILFS_MINOR_REV 0 /* minor revision */ | 230 | #define NILFS_MINOR_REV 0 /* minor revision */ |
231 | #define NILFS_MIN_SUPP_REV 2 /* minimum supported revision */ | ||
232 | 232 | ||
233 | /* | 233 | /* |
234 | * Feature set definitions | 234 | * Feature set definitions |
@@ -270,6 +270,14 @@ struct nilfs_super_block { | |||
270 | segments */ | 270 | segments */ |
271 | 271 | ||
272 | /* | 272 | /* |
273 | * We call DAT, cpfile, and sufile root metadata files. Inodes of | ||
274 | * these files are written in super root block instead of ifile, and | ||
275 | * garbage collector doesn't keep any past versions of these files. | ||
276 | */ | ||
277 | #define NILFS_ROOT_METADATA_FILE(ino) \ | ||
278 | ((ino) >= NILFS_DAT_INO && (ino) <= NILFS_SUFILE_INO) | ||
279 | |||
280 | /* | ||
273 | * bytes offset of secondary super block | 281 | * bytes offset of secondary super block |
274 | */ | 282 | */ |
275 | #define NILFS_SB2_OFFSET_BYTES(devsize) ((((devsize) >> 12) - 1) << 12) | 283 | #define NILFS_SB2_OFFSET_BYTES(devsize) ((((devsize) >> 12) - 1) << 12) |
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index 2c8701687336..0edb2566c14c 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h | |||
@@ -40,6 +40,43 @@ | |||
40 | */ | 40 | */ |
41 | 41 | ||
42 | /** | 42 | /** |
43 | * DOC: Frame transmission/registration support | ||
44 | * | ||
45 | * Frame transmission and registration support exists to allow userspace | ||
46 | * management entities such as wpa_supplicant react to management frames | ||
47 | * that are not being handled by the kernel. This includes, for example, | ||
48 | * certain classes of action frames that cannot be handled in the kernel | ||
49 | * for various reasons. | ||
50 | * | ||
51 | * Frame registration is done on a per-interface basis and registrations | ||
52 | * cannot be removed other than by closing the socket. It is possible to | ||
53 | * specify a registration filter to register, for example, only for a | ||
54 | * certain type of action frame. In particular with action frames, those | ||
55 | * that userspace registers for will not be returned as unhandled by the | ||
56 | * driver, so that the registered application has to take responsibility | ||
57 | * for doing that. | ||
58 | * | ||
59 | * The type of frame that can be registered for is also dependent on the | ||
60 | * driver and interface type. The frame types are advertised in wiphy | ||
61 | * attributes so applications know what to expect. | ||
62 | * | ||
63 | * NOTE: When an interface changes type while registrations are active, | ||
64 | * these registrations are ignored until the interface type is | ||
65 | * changed again. This means that changing the interface type can | ||
66 | * lead to a situation that couldn't otherwise be produced, but | ||
67 | * any such registrations will be dormant in the sense that they | ||
68 | * will not be serviced, i.e. they will not receive any frames. | ||
69 | * | ||
70 | * Frame transmission allows userspace to send for example the required | ||
71 | * responses to action frames. It is subject to some sanity checking, | ||
72 | * but many frames can be transmitted. When a frame was transmitted, its | ||
73 | * status is indicated to the sending socket. | ||
74 | * | ||
75 | * For more technical details, see the corresponding command descriptions | ||
76 | * below. | ||
77 | */ | ||
78 | |||
79 | /** | ||
43 | * enum nl80211_commands - supported nl80211 commands | 80 | * enum nl80211_commands - supported nl80211 commands |
44 | * | 81 | * |
45 | * @NL80211_CMD_UNSPEC: unspecified command to catch errors | 82 | * @NL80211_CMD_UNSPEC: unspecified command to catch errors |
@@ -258,7 +295,9 @@ | |||
258 | * auth and assoc steps. For this, you need to specify the SSID in a | 295 | * auth and assoc steps. For this, you need to specify the SSID in a |
259 | * %NL80211_ATTR_SSID attribute, and can optionally specify the association | 296 | * %NL80211_ATTR_SSID attribute, and can optionally specify the association |
260 | * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_MAC, | 297 | * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_MAC, |
261 | * %NL80211_ATTR_WIPHY_FREQ and %NL80211_ATTR_CONTROL_PORT. | 298 | * %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT, |
299 | * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE and | ||
300 | * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT. | ||
262 | * It is also sent as an event, with the BSSID and response IEs when the | 301 | * It is also sent as an event, with the BSSID and response IEs when the |
263 | * connection is established or failed to be established. This can be | 302 | * connection is established or failed to be established. This can be |
264 | * determined by the STATUS_CODE attribute. | 303 | * determined by the STATUS_CODE attribute. |
@@ -276,8 +315,8 @@ | |||
276 | * channel for the specified amount of time. This can be used to do | 315 | * channel for the specified amount of time. This can be used to do |
277 | * off-channel operations like transmit a Public Action frame and wait for | 316 | * off-channel operations like transmit a Public Action frame and wait for |
278 | * a response while being associated to an AP on another channel. | 317 | * a response while being associated to an AP on another channel. |
279 | * %NL80211_ATTR_WIPHY or %NL80211_ATTR_IFINDEX is used to specify which | 318 | * %NL80211_ATTR_IFINDEX is used to specify which interface (and thus |
280 | * radio is used. %NL80211_ATTR_WIPHY_FREQ is used to specify the | 319 | * radio) is used. %NL80211_ATTR_WIPHY_FREQ is used to specify the |
281 | * frequency for the operation and %NL80211_ATTR_WIPHY_CHANNEL_TYPE may be | 320 | * frequency for the operation and %NL80211_ATTR_WIPHY_CHANNEL_TYPE may be |
282 | * optionally used to specify additional channel parameters. | 321 | * optionally used to specify additional channel parameters. |
283 | * %NL80211_ATTR_DURATION is used to specify the duration in milliseconds | 322 | * %NL80211_ATTR_DURATION is used to specify the duration in milliseconds |
@@ -301,16 +340,20 @@ | |||
301 | * rate selection. %NL80211_ATTR_IFINDEX is used to specify the interface | 340 | * rate selection. %NL80211_ATTR_IFINDEX is used to specify the interface |
302 | * and @NL80211_ATTR_TX_RATES the set of allowed rates. | 341 | * and @NL80211_ATTR_TX_RATES the set of allowed rates. |
303 | * | 342 | * |
304 | * @NL80211_CMD_REGISTER_ACTION: Register for receiving certain action frames | 343 | * @NL80211_CMD_REGISTER_FRAME: Register for receiving certain mgmt frames |
305 | * (via @NL80211_CMD_ACTION) for processing in userspace. This command | 344 | * (via @NL80211_CMD_FRAME) for processing in userspace. This command |
306 | * requires an interface index and a match attribute containing the first | 345 | * requires an interface index, a frame type attribute (optional for |
307 | * few bytes of the frame that should match, e.g. a single byte for only | 346 | * backward compatibility reasons, if not given assumes action frames) |
308 | * a category match or four bytes for vendor frames including the OUI. | 347 | * and a match attribute containing the first few bytes of the frame |
309 | * The registration cannot be dropped, but is removed automatically | 348 | * that should match, e.g. a single byte for only a category match or |
310 | * when the netlink socket is closed. Multiple registrations can be made. | 349 | * four bytes for vendor frames including the OUI. The registration |
311 | * @NL80211_CMD_ACTION: Action frame TX request and RX notification. This | 350 | * cannot be dropped, but is removed automatically when the netlink |
312 | * command is used both as a request to transmit an Action frame and as an | 351 | * socket is closed. Multiple registrations can be made. |
313 | * event indicating reception of an Action frame that was not processed in | 352 | * @NL80211_CMD_REGISTER_ACTION: Alias for @NL80211_CMD_REGISTER_FRAME for |
353 | * backward compatibility | ||
354 | * @NL80211_CMD_FRAME: Management frame TX request and RX notification. This | ||
355 | * command is used both as a request to transmit a management frame and | ||
356 | * as an event indicating reception of a frame that was not processed in | ||
314 | * kernel code, but is for us (i.e., which may need to be processed in a | 357 | * kernel code, but is for us (i.e., which may need to be processed in a |
315 | * user space application). %NL80211_ATTR_FRAME is used to specify the | 358 | * user space application). %NL80211_ATTR_FRAME is used to specify the |
316 | * frame contents (including header). %NL80211_ATTR_WIPHY_FREQ (and | 359 | * frame contents (including header). %NL80211_ATTR_WIPHY_FREQ (and |
@@ -320,11 +363,14 @@ | |||
320 | * operational channel). When called, this operation returns a cookie | 363 | * operational channel). When called, this operation returns a cookie |
321 | * (%NL80211_ATTR_COOKIE) that will be included with the TX status event | 364 | * (%NL80211_ATTR_COOKIE) that will be included with the TX status event |
322 | * pertaining to the TX request. | 365 | * pertaining to the TX request. |
323 | * @NL80211_CMD_ACTION_TX_STATUS: Report TX status of an Action frame | 366 | * @NL80211_CMD_ACTION: Alias for @NL80211_CMD_FRAME for backward compatibility. |
324 | * transmitted with %NL80211_CMD_ACTION. %NL80211_ATTR_COOKIE identifies | 367 | * @NL80211_CMD_FRAME_TX_STATUS: Report TX status of a management frame |
368 | * transmitted with %NL80211_CMD_FRAME. %NL80211_ATTR_COOKIE identifies | ||
325 | * the TX command and %NL80211_ATTR_FRAME includes the contents of the | 369 | * the TX command and %NL80211_ATTR_FRAME includes the contents of the |
326 | * frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged | 370 | * frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged |
327 | * the frame. | 371 | * the frame. |
372 | * @NL80211_CMD_ACTION_TX_STATUS: Alias for @NL80211_CMD_FRAME_TX_STATUS for | ||
373 | * backward compatibility. | ||
328 | * @NL80211_CMD_SET_CQM: Connection quality monitor configuration. This command | 374 | * @NL80211_CMD_SET_CQM: Connection quality monitor configuration. This command |
329 | * is used to configure connection quality monitoring notification trigger | 375 | * is used to configure connection quality monitoring notification trigger |
330 | * levels. | 376 | * levels. |
@@ -341,6 +387,8 @@ | |||
341 | * of any other interfaces, and other interfaces will again take | 387 | * of any other interfaces, and other interfaces will again take |
342 | * precedence when they are used. | 388 | * precedence when they are used. |
343 | * | 389 | * |
390 | * @NL80211_CMD_SET_WDS_PEER: Set the MAC address of the peer on a WDS interface. | ||
391 | * | ||
344 | * @NL80211_CMD_MAX: highest used command number | 392 | * @NL80211_CMD_MAX: highest used command number |
345 | * @__NL80211_CMD_AFTER_LAST: internal use | 393 | * @__NL80211_CMD_AFTER_LAST: internal use |
346 | */ | 394 | */ |
@@ -429,9 +477,12 @@ enum nl80211_commands { | |||
429 | 477 | ||
430 | NL80211_CMD_SET_TX_BITRATE_MASK, | 478 | NL80211_CMD_SET_TX_BITRATE_MASK, |
431 | 479 | ||
432 | NL80211_CMD_REGISTER_ACTION, | 480 | NL80211_CMD_REGISTER_FRAME, |
433 | NL80211_CMD_ACTION, | 481 | NL80211_CMD_REGISTER_ACTION = NL80211_CMD_REGISTER_FRAME, |
434 | NL80211_CMD_ACTION_TX_STATUS, | 482 | NL80211_CMD_FRAME, |
483 | NL80211_CMD_ACTION = NL80211_CMD_FRAME, | ||
484 | NL80211_CMD_FRAME_TX_STATUS, | ||
485 | NL80211_CMD_ACTION_TX_STATUS = NL80211_CMD_FRAME_TX_STATUS, | ||
435 | 486 | ||
436 | NL80211_CMD_SET_POWER_SAVE, | 487 | NL80211_CMD_SET_POWER_SAVE, |
437 | NL80211_CMD_GET_POWER_SAVE, | 488 | NL80211_CMD_GET_POWER_SAVE, |
@@ -440,6 +491,7 @@ enum nl80211_commands { | |||
440 | NL80211_CMD_NOTIFY_CQM, | 491 | NL80211_CMD_NOTIFY_CQM, |
441 | 492 | ||
442 | NL80211_CMD_SET_CHANNEL, | 493 | NL80211_CMD_SET_CHANNEL, |
494 | NL80211_CMD_SET_WDS_PEER, | ||
443 | 495 | ||
444 | /* add new commands above here */ | 496 | /* add new commands above here */ |
445 | 497 | ||
@@ -639,6 +691,15 @@ enum nl80211_commands { | |||
639 | * request, the driver will assume that the port is unauthorized until | 691 | * request, the driver will assume that the port is unauthorized until |
640 | * authorized by user space. Otherwise, port is marked authorized by | 692 | * authorized by user space. Otherwise, port is marked authorized by |
641 | * default in station mode. | 693 | * default in station mode. |
694 | * @NL80211_ATTR_CONTROL_PORT_ETHERTYPE: A 16-bit value indicating the | ||
695 | * ethertype that will be used for key negotiation. It can be | ||
696 | * specified with the associate and connect commands. If it is not | ||
697 | * specified, the value defaults to 0x888E (PAE, 802.1X). This | ||
698 | * attribute is also used as a flag in the wiphy information to | ||
699 | * indicate that protocols other than PAE are supported. | ||
700 | * @NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT: When included along with | ||
701 | * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE, indicates that the custom | ||
702 | * ethertype frames used for key negotiation must not be encrypted. | ||
642 | * | 703 | * |
643 | * @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver. | 704 | * @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver. |
644 | * We recommend using nested, driver-specific attributes within this. | 705 | * We recommend using nested, driver-specific attributes within this. |
@@ -708,7 +769,16 @@ enum nl80211_commands { | |||
708 | * is used with %NL80211_CMD_SET_TX_BITRATE_MASK. | 769 | * is used with %NL80211_CMD_SET_TX_BITRATE_MASK. |
709 | * | 770 | * |
710 | * @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain | 771 | * @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain |
711 | * at least one byte, currently used with @NL80211_CMD_REGISTER_ACTION. | 772 | * at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME. |
773 | * @NL80211_ATTR_FRAME_TYPE: A u16 indicating the frame type/subtype for the | ||
774 | * @NL80211_CMD_REGISTER_FRAME command. | ||
775 | * @NL80211_ATTR_TX_FRAME_TYPES: wiphy capability attribute, which is a | ||
776 | * nested attribute of %NL80211_ATTR_FRAME_TYPE attributes, containing | ||
777 | * information about which frame types can be transmitted with | ||
778 | * %NL80211_CMD_FRAME. | ||
779 | * @NL80211_ATTR_RX_FRAME_TYPES: wiphy capability attribute, which is a | ||
780 | * nested attribute of %NL80211_ATTR_FRAME_TYPE attributes, containing | ||
781 | * information about which frame types can be registered for RX. | ||
712 | * | 782 | * |
713 | * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was | 783 | * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was |
714 | * acknowledged by the recipient. | 784 | * acknowledged by the recipient. |
@@ -731,6 +801,9 @@ enum nl80211_commands { | |||
731 | * This is used in association with @NL80211_ATTR_WIPHY_TX_POWER_SETTING | 801 | * This is used in association with @NL80211_ATTR_WIPHY_TX_POWER_SETTING |
732 | * for non-automatic settings. | 802 | * for non-automatic settings. |
733 | * | 803 | * |
804 | * @NL80211_ATTR_SUPPORT_IBSS_RSN: The device supports IBSS RSN, which mostly | ||
805 | * means support for per-station GTKs. | ||
806 | * | ||
734 | * @NL80211_ATTR_MAX: highest attribute number currently defined | 807 | * @NL80211_ATTR_MAX: highest attribute number currently defined |
735 | * @__NL80211_ATTR_AFTER_LAST: internal use | 808 | * @__NL80211_ATTR_AFTER_LAST: internal use |
736 | */ | 809 | */ |
@@ -891,6 +964,15 @@ enum nl80211_attrs { | |||
891 | NL80211_ATTR_WIPHY_TX_POWER_SETTING, | 964 | NL80211_ATTR_WIPHY_TX_POWER_SETTING, |
892 | NL80211_ATTR_WIPHY_TX_POWER_LEVEL, | 965 | NL80211_ATTR_WIPHY_TX_POWER_LEVEL, |
893 | 966 | ||
967 | NL80211_ATTR_TX_FRAME_TYPES, | ||
968 | NL80211_ATTR_RX_FRAME_TYPES, | ||
969 | NL80211_ATTR_FRAME_TYPE, | ||
970 | |||
971 | NL80211_ATTR_CONTROL_PORT_ETHERTYPE, | ||
972 | NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT, | ||
973 | |||
974 | NL80211_ATTR_SUPPORT_IBSS_RSN, | ||
975 | |||
894 | /* add attributes here, update the policy in nl80211.c */ | 976 | /* add attributes here, update the policy in nl80211.c */ |
895 | 977 | ||
896 | __NL80211_ATTR_AFTER_LAST, | 978 | __NL80211_ATTR_AFTER_LAST, |
@@ -946,8 +1028,10 @@ enum nl80211_attrs { | |||
946 | * @NL80211_IFTYPE_WDS: wireless distribution interface | 1028 | * @NL80211_IFTYPE_WDS: wireless distribution interface |
947 | * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames | 1029 | * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames |
948 | * @NL80211_IFTYPE_MESH_POINT: mesh point | 1030 | * @NL80211_IFTYPE_MESH_POINT: mesh point |
1031 | * @NL80211_IFTYPE_P2P_CLIENT: P2P client | ||
1032 | * @NL80211_IFTYPE_P2P_GO: P2P group owner | ||
949 | * @NL80211_IFTYPE_MAX: highest interface type number currently defined | 1033 | * @NL80211_IFTYPE_MAX: highest interface type number currently defined |
950 | * @__NL80211_IFTYPE_AFTER_LAST: internal use | 1034 | * @NUM_NL80211_IFTYPES: number of defined interface types |
951 | * | 1035 | * |
952 | * These values are used with the %NL80211_ATTR_IFTYPE | 1036 | * These values are used with the %NL80211_ATTR_IFTYPE |
953 | * to set the type of an interface. | 1037 | * to set the type of an interface. |
@@ -962,10 +1046,12 @@ enum nl80211_iftype { | |||
962 | NL80211_IFTYPE_WDS, | 1046 | NL80211_IFTYPE_WDS, |
963 | NL80211_IFTYPE_MONITOR, | 1047 | NL80211_IFTYPE_MONITOR, |
964 | NL80211_IFTYPE_MESH_POINT, | 1048 | NL80211_IFTYPE_MESH_POINT, |
1049 | NL80211_IFTYPE_P2P_CLIENT, | ||
1050 | NL80211_IFTYPE_P2P_GO, | ||
965 | 1051 | ||
966 | /* keep last */ | 1052 | /* keep last */ |
967 | __NL80211_IFTYPE_AFTER_LAST, | 1053 | NUM_NL80211_IFTYPES, |
968 | NL80211_IFTYPE_MAX = __NL80211_IFTYPE_AFTER_LAST - 1 | 1054 | NL80211_IFTYPE_MAX = NUM_NL80211_IFTYPES - 1 |
969 | }; | 1055 | }; |
970 | 1056 | ||
971 | /** | 1057 | /** |
@@ -974,11 +1060,14 @@ enum nl80211_iftype { | |||
974 | * Station flags. When a station is added to an AP interface, it is | 1060 | * Station flags. When a station is added to an AP interface, it is |
975 | * assumed to be already associated (and hence authenticated.) | 1061 | * assumed to be already associated (and hence authenticated.) |
976 | * | 1062 | * |
1063 | * @__NL80211_STA_FLAG_INVALID: attribute number 0 is reserved | ||
977 | * @NL80211_STA_FLAG_AUTHORIZED: station is authorized (802.1X) | 1064 | * @NL80211_STA_FLAG_AUTHORIZED: station is authorized (802.1X) |
978 | * @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames | 1065 | * @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames |
979 | * with short barker preamble | 1066 | * with short barker preamble |
980 | * @NL80211_STA_FLAG_WME: station is WME/QoS capable | 1067 | * @NL80211_STA_FLAG_WME: station is WME/QoS capable |
981 | * @NL80211_STA_FLAG_MFP: station uses management frame protection | 1068 | * @NL80211_STA_FLAG_MFP: station uses management frame protection |
1069 | * @NL80211_STA_FLAG_MAX: highest station flag number currently defined | ||
1070 | * @__NL80211_STA_FLAG_AFTER_LAST: internal use | ||
982 | */ | 1071 | */ |
983 | enum nl80211_sta_flags { | 1072 | enum nl80211_sta_flags { |
984 | __NL80211_STA_FLAG_INVALID, | 1073 | __NL80211_STA_FLAG_INVALID, |
@@ -1048,6 +1137,8 @@ enum nl80211_rate_info { | |||
1048 | * @NL80211_STA_INFO_RX_PACKETS: total received packet (u32, from this station) | 1137 | * @NL80211_STA_INFO_RX_PACKETS: total received packet (u32, from this station) |
1049 | * @NL80211_STA_INFO_TX_PACKETS: total transmitted packets (u32, to this | 1138 | * @NL80211_STA_INFO_TX_PACKETS: total transmitted packets (u32, to this |
1050 | * station) | 1139 | * station) |
1140 | * @NL80211_STA_INFO_TX_RETRIES: total retries (u32, to this station) | ||
1141 | * @NL80211_STA_INFO_TX_FAILED: total failed packets (u32, to this station) | ||
1051 | */ | 1142 | */ |
1052 | enum nl80211_sta_info { | 1143 | enum nl80211_sta_info { |
1053 | __NL80211_STA_INFO_INVALID, | 1144 | __NL80211_STA_INFO_INVALID, |
@@ -1061,6 +1152,8 @@ enum nl80211_sta_info { | |||
1061 | NL80211_STA_INFO_TX_BITRATE, | 1152 | NL80211_STA_INFO_TX_BITRATE, |
1062 | NL80211_STA_INFO_RX_PACKETS, | 1153 | NL80211_STA_INFO_RX_PACKETS, |
1063 | NL80211_STA_INFO_TX_PACKETS, | 1154 | NL80211_STA_INFO_TX_PACKETS, |
1155 | NL80211_STA_INFO_TX_RETRIES, | ||
1156 | NL80211_STA_INFO_TX_FAILED, | ||
1064 | 1157 | ||
1065 | /* keep last */ | 1158 | /* keep last */ |
1066 | __NL80211_STA_INFO_AFTER_LAST, | 1159 | __NL80211_STA_INFO_AFTER_LAST, |
@@ -1091,14 +1184,17 @@ enum nl80211_mpath_flags { | |||
1091 | * information about a mesh path. | 1184 | * information about a mesh path. |
1092 | * | 1185 | * |
1093 | * @__NL80211_MPATH_INFO_INVALID: attribute number 0 is reserved | 1186 | * @__NL80211_MPATH_INFO_INVALID: attribute number 0 is reserved |
1094 | * @NL80211_ATTR_MPATH_FRAME_QLEN: number of queued frames for this destination | 1187 | * @NL80211_MPATH_INFO_FRAME_QLEN: number of queued frames for this destination |
1095 | * @NL80211_ATTR_MPATH_SN: destination sequence number | 1188 | * @NL80211_MPATH_INFO_SN: destination sequence number |
1096 | * @NL80211_ATTR_MPATH_METRIC: metric (cost) of this mesh path | 1189 | * @NL80211_MPATH_INFO_METRIC: metric (cost) of this mesh path |
1097 | * @NL80211_ATTR_MPATH_EXPTIME: expiration time for the path, in msec from now | 1190 | * @NL80211_MPATH_INFO_EXPTIME: expiration time for the path, in msec from now |
1098 | * @NL80211_ATTR_MPATH_FLAGS: mesh path flags, enumerated in | 1191 | * @NL80211_MPATH_INFO_FLAGS: mesh path flags, enumerated in |
1099 | * &enum nl80211_mpath_flags; | 1192 | * &enum nl80211_mpath_flags; |
1100 | * @NL80211_ATTR_MPATH_DISCOVERY_TIMEOUT: total path discovery timeout, in msec | 1193 | * @NL80211_MPATH_INFO_DISCOVERY_TIMEOUT: total path discovery timeout, in msec |
1101 | * @NL80211_ATTR_MPATH_DISCOVERY_RETRIES: mesh path discovery retries | 1194 | * @NL80211_MPATH_INFO_DISCOVERY_RETRIES: mesh path discovery retries |
1195 | * @NL80211_MPATH_INFO_MAX: highest mesh path information attribute number | ||
1196 | * currently defind | ||
1197 | * @__NL80211_MPATH_INFO_AFTER_LAST: internal use | ||
1102 | */ | 1198 | */ |
1103 | enum nl80211_mpath_info { | 1199 | enum nl80211_mpath_info { |
1104 | __NL80211_MPATH_INFO_INVALID, | 1200 | __NL80211_MPATH_INFO_INVALID, |
@@ -1127,6 +1223,8 @@ enum nl80211_mpath_info { | |||
1127 | * @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE | 1223 | * @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE |
1128 | * @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n | 1224 | * @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n |
1129 | * @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n | 1225 | * @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n |
1226 | * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined | ||
1227 | * @__NL80211_BAND_ATTR_AFTER_LAST: internal use | ||
1130 | */ | 1228 | */ |
1131 | enum nl80211_band_attr { | 1229 | enum nl80211_band_attr { |
1132 | __NL80211_BAND_ATTR_INVALID, | 1230 | __NL80211_BAND_ATTR_INVALID, |
@@ -1147,6 +1245,7 @@ enum nl80211_band_attr { | |||
1147 | 1245 | ||
1148 | /** | 1246 | /** |
1149 | * enum nl80211_frequency_attr - frequency attributes | 1247 | * enum nl80211_frequency_attr - frequency attributes |
1248 | * @__NL80211_FREQUENCY_ATTR_INVALID: attribute number 0 is reserved | ||
1150 | * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz | 1249 | * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz |
1151 | * @NL80211_FREQUENCY_ATTR_DISABLED: Channel is disabled in current | 1250 | * @NL80211_FREQUENCY_ATTR_DISABLED: Channel is disabled in current |
1152 | * regulatory domain. | 1251 | * regulatory domain. |
@@ -1158,6 +1257,9 @@ enum nl80211_band_attr { | |||
1158 | * on this channel in current regulatory domain. | 1257 | * on this channel in current regulatory domain. |
1159 | * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm | 1258 | * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm |
1160 | * (100 * dBm). | 1259 | * (100 * dBm). |
1260 | * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number | ||
1261 | * currently defined | ||
1262 | * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use | ||
1161 | */ | 1263 | */ |
1162 | enum nl80211_frequency_attr { | 1264 | enum nl80211_frequency_attr { |
1163 | __NL80211_FREQUENCY_ATTR_INVALID, | 1265 | __NL80211_FREQUENCY_ATTR_INVALID, |
@@ -1177,9 +1279,13 @@ enum nl80211_frequency_attr { | |||
1177 | 1279 | ||
1178 | /** | 1280 | /** |
1179 | * enum nl80211_bitrate_attr - bitrate attributes | 1281 | * enum nl80211_bitrate_attr - bitrate attributes |
1282 | * @__NL80211_BITRATE_ATTR_INVALID: attribute number 0 is reserved | ||
1180 | * @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps | 1283 | * @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps |
1181 | * @NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE: Short preamble supported | 1284 | * @NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE: Short preamble supported |
1182 | * in 2.4 GHz band. | 1285 | * in 2.4 GHz band. |
1286 | * @NL80211_BITRATE_ATTR_MAX: highest bitrate attribute number | ||
1287 | * currently defined | ||
1288 | * @__NL80211_BITRATE_ATTR_AFTER_LAST: internal use | ||
1183 | */ | 1289 | */ |
1184 | enum nl80211_bitrate_attr { | 1290 | enum nl80211_bitrate_attr { |
1185 | __NL80211_BITRATE_ATTR_INVALID, | 1291 | __NL80211_BITRATE_ATTR_INVALID, |
@@ -1235,6 +1341,7 @@ enum nl80211_reg_type { | |||
1235 | 1341 | ||
1236 | /** | 1342 | /** |
1237 | * enum nl80211_reg_rule_attr - regulatory rule attributes | 1343 | * enum nl80211_reg_rule_attr - regulatory rule attributes |
1344 | * @__NL80211_REG_RULE_ATTR_INVALID: attribute number 0 is reserved | ||
1238 | * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional | 1345 | * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional |
1239 | * considerations for a given frequency range. These are the | 1346 | * considerations for a given frequency range. These are the |
1240 | * &enum nl80211_reg_rule_flags. | 1347 | * &enum nl80211_reg_rule_flags. |
@@ -1251,6 +1358,9 @@ enum nl80211_reg_type { | |||
1251 | * If you don't have one then don't send this. | 1358 | * If you don't have one then don't send this. |
1252 | * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for | 1359 | * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for |
1253 | * a given frequency range. The value is in mBm (100 * dBm). | 1360 | * a given frequency range. The value is in mBm (100 * dBm). |
1361 | * @NL80211_REG_RULE_ATTR_MAX: highest regulatory rule attribute number | ||
1362 | * currently defined | ||
1363 | * @__NL80211_REG_RULE_ATTR_AFTER_LAST: internal use | ||
1254 | */ | 1364 | */ |
1255 | enum nl80211_reg_rule_attr { | 1365 | enum nl80211_reg_rule_attr { |
1256 | __NL80211_REG_RULE_ATTR_INVALID, | 1366 | __NL80211_REG_RULE_ATTR_INVALID, |
@@ -1302,11 +1412,31 @@ enum nl80211_reg_rule_flags { | |||
1302 | * @__NL80211_SURVEY_INFO_INVALID: attribute number 0 is reserved | 1412 | * @__NL80211_SURVEY_INFO_INVALID: attribute number 0 is reserved |
1303 | * @NL80211_SURVEY_INFO_FREQUENCY: center frequency of channel | 1413 | * @NL80211_SURVEY_INFO_FREQUENCY: center frequency of channel |
1304 | * @NL80211_SURVEY_INFO_NOISE: noise level of channel (u8, dBm) | 1414 | * @NL80211_SURVEY_INFO_NOISE: noise level of channel (u8, dBm) |
1415 | * @NL80211_SURVEY_INFO_IN_USE: channel is currently being used | ||
1416 | * @NL80211_SURVEY_INFO_CHANNEL_TIME: amount of time (in ms) that the radio | ||
1417 | * spent on this channel | ||
1418 | * @NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY: amount of the time the primary | ||
1419 | * channel was sensed busy (either due to activity or energy detect) | ||
1420 | * @NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY: amount of time the extension | ||
1421 | * channel was sensed busy | ||
1422 | * @NL80211_SURVEY_INFO_CHANNEL_TIME_RX: amount of time the radio spent | ||
1423 | * receiving data | ||
1424 | * @NL80211_SURVEY_INFO_CHANNEL_TIME_TX: amount of time the radio spent | ||
1425 | * transmitting data | ||
1426 | * @NL80211_SURVEY_INFO_MAX: highest survey info attribute number | ||
1427 | * currently defined | ||
1428 | * @__NL80211_SURVEY_INFO_AFTER_LAST: internal use | ||
1305 | */ | 1429 | */ |
1306 | enum nl80211_survey_info { | 1430 | enum nl80211_survey_info { |
1307 | __NL80211_SURVEY_INFO_INVALID, | 1431 | __NL80211_SURVEY_INFO_INVALID, |
1308 | NL80211_SURVEY_INFO_FREQUENCY, | 1432 | NL80211_SURVEY_INFO_FREQUENCY, |
1309 | NL80211_SURVEY_INFO_NOISE, | 1433 | NL80211_SURVEY_INFO_NOISE, |
1434 | NL80211_SURVEY_INFO_IN_USE, | ||
1435 | NL80211_SURVEY_INFO_CHANNEL_TIME, | ||
1436 | NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY, | ||
1437 | NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY, | ||
1438 | NL80211_SURVEY_INFO_CHANNEL_TIME_RX, | ||
1439 | NL80211_SURVEY_INFO_CHANNEL_TIME_TX, | ||
1310 | 1440 | ||
1311 | /* keep last */ | 1441 | /* keep last */ |
1312 | __NL80211_SURVEY_INFO_AFTER_LAST, | 1442 | __NL80211_SURVEY_INFO_AFTER_LAST, |
@@ -1466,6 +1596,7 @@ enum nl80211_channel_type { | |||
1466 | * enum nl80211_bss - netlink attributes for a BSS | 1596 | * enum nl80211_bss - netlink attributes for a BSS |
1467 | * | 1597 | * |
1468 | * @__NL80211_BSS_INVALID: invalid | 1598 | * @__NL80211_BSS_INVALID: invalid |
1599 | * @NL80211_BSS_BSSID: BSSID of the BSS (6 octets) | ||
1469 | * @NL80211_BSS_FREQUENCY: frequency in MHz (u32) | 1600 | * @NL80211_BSS_FREQUENCY: frequency in MHz (u32) |
1470 | * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64) | 1601 | * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64) |
1471 | * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16) | 1602 | * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16) |
@@ -1509,6 +1640,12 @@ enum nl80211_bss { | |||
1509 | 1640 | ||
1510 | /** | 1641 | /** |
1511 | * enum nl80211_bss_status - BSS "status" | 1642 | * enum nl80211_bss_status - BSS "status" |
1643 | * @NL80211_BSS_STATUS_AUTHENTICATED: Authenticated with this BSS. | ||
1644 | * @NL80211_BSS_STATUS_ASSOCIATED: Associated with this BSS. | ||
1645 | * @NL80211_BSS_STATUS_IBSS_JOINED: Joined to this IBSS. | ||
1646 | * | ||
1647 | * The BSS status is a BSS attribute in scan dumps, which | ||
1648 | * indicates the status the interface has wrt. this BSS. | ||
1512 | */ | 1649 | */ |
1513 | enum nl80211_bss_status { | 1650 | enum nl80211_bss_status { |
1514 | NL80211_BSS_STATUS_AUTHENTICATED, | 1651 | NL80211_BSS_STATUS_AUTHENTICATED, |
@@ -1546,11 +1683,14 @@ enum nl80211_auth_type { | |||
1546 | * @NL80211_KEYTYPE_GROUP: Group (broadcast/multicast) key | 1683 | * @NL80211_KEYTYPE_GROUP: Group (broadcast/multicast) key |
1547 | * @NL80211_KEYTYPE_PAIRWISE: Pairwise (unicast/individual) key | 1684 | * @NL80211_KEYTYPE_PAIRWISE: Pairwise (unicast/individual) key |
1548 | * @NL80211_KEYTYPE_PEERKEY: PeerKey (DLS) | 1685 | * @NL80211_KEYTYPE_PEERKEY: PeerKey (DLS) |
1686 | * @NUM_NL80211_KEYTYPES: number of defined key types | ||
1549 | */ | 1687 | */ |
1550 | enum nl80211_key_type { | 1688 | enum nl80211_key_type { |
1551 | NL80211_KEYTYPE_GROUP, | 1689 | NL80211_KEYTYPE_GROUP, |
1552 | NL80211_KEYTYPE_PAIRWISE, | 1690 | NL80211_KEYTYPE_PAIRWISE, |
1553 | NL80211_KEYTYPE_PEERKEY, | 1691 | NL80211_KEYTYPE_PEERKEY, |
1692 | |||
1693 | NUM_NL80211_KEYTYPES | ||
1554 | }; | 1694 | }; |
1555 | 1695 | ||
1556 | /** | 1696 | /** |
@@ -1581,6 +1721,9 @@ enum nl80211_wpa_versions { | |||
1581 | * CCMP keys, each six bytes in little endian | 1721 | * CCMP keys, each six bytes in little endian |
1582 | * @NL80211_KEY_DEFAULT: flag indicating default key | 1722 | * @NL80211_KEY_DEFAULT: flag indicating default key |
1583 | * @NL80211_KEY_DEFAULT_MGMT: flag indicating default management key | 1723 | * @NL80211_KEY_DEFAULT_MGMT: flag indicating default management key |
1724 | * @NL80211_KEY_TYPE: the key type from enum nl80211_key_type, if not | ||
1725 | * specified the default depends on whether a MAC address was | ||
1726 | * given with the command using the key or not (u32) | ||
1584 | * @__NL80211_KEY_AFTER_LAST: internal | 1727 | * @__NL80211_KEY_AFTER_LAST: internal |
1585 | * @NL80211_KEY_MAX: highest key attribute | 1728 | * @NL80211_KEY_MAX: highest key attribute |
1586 | */ | 1729 | */ |
@@ -1592,6 +1735,7 @@ enum nl80211_key_attributes { | |||
1592 | NL80211_KEY_SEQ, | 1735 | NL80211_KEY_SEQ, |
1593 | NL80211_KEY_DEFAULT, | 1736 | NL80211_KEY_DEFAULT, |
1594 | NL80211_KEY_DEFAULT_MGMT, | 1737 | NL80211_KEY_DEFAULT_MGMT, |
1738 | NL80211_KEY_TYPE, | ||
1595 | 1739 | ||
1596 | /* keep last */ | 1740 | /* keep last */ |
1597 | __NL80211_KEY_AFTER_LAST, | 1741 | __NL80211_KEY_AFTER_LAST, |
@@ -1619,8 +1763,8 @@ enum nl80211_tx_rate_attributes { | |||
1619 | 1763 | ||
1620 | /** | 1764 | /** |
1621 | * enum nl80211_band - Frequency band | 1765 | * enum nl80211_band - Frequency band |
1622 | * @NL80211_BAND_2GHZ - 2.4 GHz ISM band | 1766 | * @NL80211_BAND_2GHZ: 2.4 GHz ISM band |
1623 | * @NL80211_BAND_5GHZ - around 5 GHz band (4.9 - 5.7 GHz) | 1767 | * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz) |
1624 | */ | 1768 | */ |
1625 | enum nl80211_band { | 1769 | enum nl80211_band { |
1626 | NL80211_BAND_2GHZ, | 1770 | NL80211_BAND_2GHZ, |
@@ -1658,9 +1802,9 @@ enum nl80211_attr_cqm { | |||
1658 | 1802 | ||
1659 | /** | 1803 | /** |
1660 | * enum nl80211_cqm_rssi_threshold_event - RSSI threshold event | 1804 | * enum nl80211_cqm_rssi_threshold_event - RSSI threshold event |
1661 | * @NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW - The RSSI level is lower than the | 1805 | * @NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW: The RSSI level is lower than the |
1662 | * configured threshold | 1806 | * configured threshold |
1663 | * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH - The RSSI is higher than the | 1807 | * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH: The RSSI is higher than the |
1664 | * configured threshold | 1808 | * configured threshold |
1665 | */ | 1809 | */ |
1666 | enum nl80211_cqm_rssi_threshold_event { | 1810 | enum nl80211_cqm_rssi_threshold_event { |
diff --git a/include/linux/notifier.h b/include/linux/notifier.h index b2f1a4d83550..2026f9e1ceb8 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h | |||
@@ -49,28 +49,28 @@ | |||
49 | 49 | ||
50 | struct notifier_block { | 50 | struct notifier_block { |
51 | int (*notifier_call)(struct notifier_block *, unsigned long, void *); | 51 | int (*notifier_call)(struct notifier_block *, unsigned long, void *); |
52 | struct notifier_block *next; | 52 | struct notifier_block __rcu *next; |
53 | int priority; | 53 | int priority; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | struct atomic_notifier_head { | 56 | struct atomic_notifier_head { |
57 | spinlock_t lock; | 57 | spinlock_t lock; |
58 | struct notifier_block *head; | 58 | struct notifier_block __rcu *head; |
59 | }; | 59 | }; |
60 | 60 | ||
61 | struct blocking_notifier_head { | 61 | struct blocking_notifier_head { |
62 | struct rw_semaphore rwsem; | 62 | struct rw_semaphore rwsem; |
63 | struct notifier_block *head; | 63 | struct notifier_block __rcu *head; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | struct raw_notifier_head { | 66 | struct raw_notifier_head { |
67 | struct notifier_block *head; | 67 | struct notifier_block __rcu *head; |
68 | }; | 68 | }; |
69 | 69 | ||
70 | struct srcu_notifier_head { | 70 | struct srcu_notifier_head { |
71 | struct mutex mutex; | 71 | struct mutex mutex; |
72 | struct srcu_struct srcu; | 72 | struct srcu_struct srcu; |
73 | struct notifier_block *head; | 73 | struct notifier_block __rcu *head; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | #define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \ | 76 | #define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \ |
diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 835f85ecd2de..975d347079d9 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h | |||
@@ -27,20 +27,19 @@ static inline int of_driver_match_device(const struct device *dev, | |||
27 | extern struct platform_device *of_dev_get(struct platform_device *dev); | 27 | extern struct platform_device *of_dev_get(struct platform_device *dev); |
28 | extern void of_dev_put(struct platform_device *dev); | 28 | extern void of_dev_put(struct platform_device *dev); |
29 | 29 | ||
30 | extern int of_device_add(struct platform_device *pdev); | ||
30 | extern int of_device_register(struct platform_device *ofdev); | 31 | extern int of_device_register(struct platform_device *ofdev); |
31 | extern void of_device_unregister(struct platform_device *ofdev); | 32 | extern void of_device_unregister(struct platform_device *ofdev); |
32 | extern void of_release_dev(struct device *dev); | ||
33 | |||
34 | static inline void of_device_free(struct platform_device *dev) | ||
35 | { | ||
36 | of_release_dev(&dev->dev); | ||
37 | } | ||
38 | 33 | ||
39 | extern ssize_t of_device_get_modalias(struct device *dev, | 34 | extern ssize_t of_device_get_modalias(struct device *dev, |
40 | char *str, ssize_t len); | 35 | char *str, ssize_t len); |
41 | 36 | ||
42 | extern int of_device_uevent(struct device *dev, struct kobj_uevent_env *env); | 37 | extern int of_device_uevent(struct device *dev, struct kobj_uevent_env *env); |
43 | 38 | ||
39 | static inline void of_device_node_put(struct device *dev) | ||
40 | { | ||
41 | of_node_put(dev->of_node); | ||
42 | } | ||
44 | 43 | ||
45 | #else /* CONFIG_OF_DEVICE */ | 44 | #else /* CONFIG_OF_DEVICE */ |
46 | 45 | ||
@@ -56,6 +55,8 @@ static inline int of_device_uevent(struct device *dev, | |||
56 | return -ENODEV; | 55 | return -ENODEV; |
57 | } | 56 | } |
58 | 57 | ||
58 | static inline void of_device_node_put(struct device *dev) { } | ||
59 | |||
59 | #endif /* CONFIG_OF_DEVICE */ | 60 | #endif /* CONFIG_OF_DEVICE */ |
60 | 61 | ||
61 | #endif /* _LINUX_OF_DEVICE_H */ | 62 | #endif /* _LINUX_OF_DEVICE_H */ |
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 71e1a916d3fa..7bbf5b328438 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h | |||
@@ -72,7 +72,7 @@ extern void *of_get_flat_dt_prop(unsigned long node, const char *name, | |||
72 | unsigned long *size); | 72 | unsigned long *size); |
73 | extern int of_flat_dt_is_compatible(unsigned long node, const char *name); | 73 | extern int of_flat_dt_is_compatible(unsigned long node, const char *name); |
74 | extern unsigned long of_get_flat_dt_root(void); | 74 | extern unsigned long of_get_flat_dt_root(void); |
75 | extern void early_init_dt_scan_chosen_arch(unsigned long node); | 75 | |
76 | extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, | 76 | extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, |
77 | int depth, void *data); | 77 | int depth, void *data); |
78 | extern void early_init_dt_check_for_initrd(unsigned long node); | 78 | extern void early_init_dt_check_for_initrd(unsigned long node); |
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 5929781c104d..109e013b1772 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h | |||
@@ -5,6 +5,7 @@ | |||
5 | struct of_irq; | 5 | struct of_irq; |
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <linux/errno.h> | 7 | #include <linux/errno.h> |
8 | #include <linux/irq.h> | ||
8 | #include <linux/ioport.h> | 9 | #include <linux/ioport.h> |
9 | #include <linux/of.h> | 10 | #include <linux/of.h> |
10 | 11 | ||
@@ -64,6 +65,9 @@ extern unsigned int irq_create_of_mapping(struct device_node *controller, | |||
64 | unsigned int intsize); | 65 | unsigned int intsize); |
65 | extern int of_irq_to_resource(struct device_node *dev, int index, | 66 | extern int of_irq_to_resource(struct device_node *dev, int index, |
66 | struct resource *r); | 67 | struct resource *r); |
68 | extern int of_irq_count(struct device_node *dev); | ||
69 | extern int of_irq_to_resource_table(struct device_node *dev, | ||
70 | struct resource *res, int nr_irqs); | ||
67 | 71 | ||
68 | #endif /* CONFIG_OF_IRQ */ | 72 | #endif /* CONFIG_OF_IRQ */ |
69 | #endif /* CONFIG_OF */ | 73 | #endif /* CONFIG_OF */ |
diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h new file mode 100644 index 000000000000..c65a18a0cfdf --- /dev/null +++ b/include/linux/of_pdt.h | |||
@@ -0,0 +1,45 @@ | |||
1 | /* | ||
2 | * Definitions for building a device tree by calling into the | ||
3 | * Open Firmware PROM. | ||
4 | * | ||
5 | * Copyright (C) 2010 Andres Salomon <dilinger@queued.net> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #ifndef _LINUX_OF_PDT_H | ||
14 | #define _LINUX_OF_PDT_H | ||
15 | |||
16 | /* overridable operations for calling into the PROM */ | ||
17 | struct of_pdt_ops { | ||
18 | /* | ||
19 | * buf should be 32 bytes; return 0 on success. | ||
20 | * If prev is NULL, the first property will be returned. | ||
21 | */ | ||
22 | int (*nextprop)(phandle node, char *prev, char *buf); | ||
23 | |||
24 | /* for both functions, return proplen on success; -1 on error */ | ||
25 | int (*getproplen)(phandle node, const char *prop); | ||
26 | int (*getproperty)(phandle node, const char *prop, char *buf, | ||
27 | int bufsize); | ||
28 | |||
29 | /* phandles are 0 if no child or sibling exists */ | ||
30 | phandle (*getchild)(phandle parent); | ||
31 | phandle (*getsibling)(phandle node); | ||
32 | |||
33 | /* return 0 on success; fill in 'len' with number of bytes in path */ | ||
34 | int (*pkg2path)(phandle node, char *buf, const int buflen, int *len); | ||
35 | }; | ||
36 | |||
37 | extern void *prom_early_alloc(unsigned long size); | ||
38 | |||
39 | /* for building the device tree */ | ||
40 | extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); | ||
41 | |||
42 | extern void (*of_pdt_build_more)(struct device_node *dp, | ||
43 | struct device_node ***nextp); | ||
44 | |||
45 | #endif /* _LINUX_OF_PDT_H */ | ||
diff --git a/include/linux/opp.h b/include/linux/opp.h new file mode 100644 index 000000000000..5449945d589f --- /dev/null +++ b/include/linux/opp.h | |||
@@ -0,0 +1,105 @@ | |||
1 | /* | ||
2 | * Generic OPP Interface | ||
3 | * | ||
4 | * Copyright (C) 2009-2010 Texas Instruments Incorporated. | ||
5 | * Nishanth Menon | ||
6 | * Romit Dasgupta | ||
7 | * Kevin Hilman | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #ifndef __LINUX_OPP_H__ | ||
15 | #define __LINUX_OPP_H__ | ||
16 | |||
17 | #include <linux/err.h> | ||
18 | #include <linux/cpufreq.h> | ||
19 | |||
20 | struct opp; | ||
21 | |||
22 | #if defined(CONFIG_PM_OPP) | ||
23 | |||
24 | unsigned long opp_get_voltage(struct opp *opp); | ||
25 | |||
26 | unsigned long opp_get_freq(struct opp *opp); | ||
27 | |||
28 | int opp_get_opp_count(struct device *dev); | ||
29 | |||
30 | struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | ||
31 | bool available); | ||
32 | |||
33 | struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq); | ||
34 | |||
35 | struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq); | ||
36 | |||
37 | int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt); | ||
38 | |||
39 | int opp_enable(struct device *dev, unsigned long freq); | ||
40 | |||
41 | int opp_disable(struct device *dev, unsigned long freq); | ||
42 | |||
43 | #else | ||
44 | static inline unsigned long opp_get_voltage(struct opp *opp) | ||
45 | { | ||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | static inline unsigned long opp_get_freq(struct opp *opp) | ||
50 | { | ||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | static inline int opp_get_opp_count(struct device *dev) | ||
55 | { | ||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | static inline struct opp *opp_find_freq_exact(struct device *dev, | ||
60 | unsigned long freq, bool available) | ||
61 | { | ||
62 | return ERR_PTR(-EINVAL); | ||
63 | } | ||
64 | |||
65 | static inline struct opp *opp_find_freq_floor(struct device *dev, | ||
66 | unsigned long *freq) | ||
67 | { | ||
68 | return ERR_PTR(-EINVAL); | ||
69 | } | ||
70 | |||
71 | static inline struct opp *opp_find_freq_ceil(struct device *dev, | ||
72 | unsigned long *freq) | ||
73 | { | ||
74 | return ERR_PTR(-EINVAL); | ||
75 | } | ||
76 | |||
77 | static inline int opp_add(struct device *dev, unsigned long freq, | ||
78 | unsigned long u_volt) | ||
79 | { | ||
80 | return -EINVAL; | ||
81 | } | ||
82 | |||
83 | static inline int opp_enable(struct device *dev, unsigned long freq) | ||
84 | { | ||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | static inline int opp_disable(struct device *dev, unsigned long freq) | ||
89 | { | ||
90 | return 0; | ||
91 | } | ||
92 | #endif /* CONFIG_PM */ | ||
93 | |||
94 | #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) | ||
95 | int opp_init_cpufreq_table(struct device *dev, | ||
96 | struct cpufreq_frequency_table **table); | ||
97 | #else | ||
98 | static inline int opp_init_cpufreq_table(struct device *dev, | ||
99 | struct cpufreq_frequency_table **table) | ||
100 | { | ||
101 | return -EINVAL; | ||
102 | } | ||
103 | #endif /* CONFIG_CPU_FREQ */ | ||
104 | |||
105 | #endif /* __LINUX_OPP_H__ */ | ||
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 5171639ecf0f..32fb81212fd1 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <linux/init.h> | ||
18 | #include <asm/atomic.h> | 19 | #include <asm/atomic.h> |
19 | 20 | ||
20 | /* Each escaped entry is prefixed by ESCAPE_CODE | 21 | /* Each escaped entry is prefixed by ESCAPE_CODE |
@@ -185,4 +186,10 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val); | |||
185 | int oprofile_add_data64(struct op_entry *entry, u64 val); | 186 | int oprofile_add_data64(struct op_entry *entry, u64 val); |
186 | int oprofile_write_commit(struct op_entry *entry); | 187 | int oprofile_write_commit(struct op_entry *entry); |
187 | 188 | ||
189 | #ifdef CONFIG_PERF_EVENTS | ||
190 | int __init oprofile_perf_init(struct oprofile_operations *ops); | ||
191 | void oprofile_perf_exit(void); | ||
192 | char *op_name_from_perf_id(void); | ||
193 | #endif /* CONFIG_PERF_EVENTS */ | ||
194 | |||
188 | #endif /* OPROFILE_H */ | 195 | #endif /* OPROFILE_H */ |
diff --git a/include/linux/padata.h b/include/linux/padata.h index bdcd1e9eacea..4633b2f726b6 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h | |||
@@ -127,8 +127,8 @@ struct padata_cpumask { | |||
127 | */ | 127 | */ |
128 | struct parallel_data { | 128 | struct parallel_data { |
129 | struct padata_instance *pinst; | 129 | struct padata_instance *pinst; |
130 | struct padata_parallel_queue *pqueue; | 130 | struct padata_parallel_queue __percpu *pqueue; |
131 | struct padata_serial_queue *squeue; | 131 | struct padata_serial_queue __percpu *squeue; |
132 | atomic_t seq_nr; | 132 | atomic_t seq_nr; |
133 | atomic_t reorder_objects; | 133 | atomic_t reorder_objects; |
134 | atomic_t refcnt; | 134 | atomic_t refcnt; |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 6fa317801e1c..5f38c460367e 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -310,7 +310,7 @@ static inline void SetPageUptodate(struct page *page) | |||
310 | { | 310 | { |
311 | #ifdef CONFIG_S390 | 311 | #ifdef CONFIG_S390 |
312 | if (!test_and_set_bit(PG_uptodate, &page->flags)) | 312 | if (!test_and_set_bit(PG_uptodate, &page->flags)) |
313 | page_clear_dirty(page); | 313 | page_clear_dirty(page, 0); |
314 | #else | 314 | #else |
315 | /* | 315 | /* |
316 | * Memory barrier must be issued before setting the PG_uptodate bit, | 316 | * Memory barrier must be issued before setting the PG_uptodate bit, |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 570fddeb0388..b4c3d1b50037 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -517,6 +517,7 @@ | |||
517 | #define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 | 517 | #define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 |
518 | #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 | 518 | #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 |
519 | #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 | 519 | #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 |
520 | #define PCI_DEVICE_ID_AMD_15H_NB_MISC 0x1603 | ||
520 | #define PCI_DEVICE_ID_AMD_LANCE 0x2000 | 521 | #define PCI_DEVICE_ID_AMD_LANCE 0x2000 |
521 | #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 | 522 | #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 |
522 | #define PCI_DEVICE_ID_AMD_SCSI 0x2020 | 523 | #define PCI_DEVICE_ID_AMD_SCSI 0x2020 |
@@ -742,6 +743,7 @@ | |||
742 | #define PCI_DEVICE_ID_HP_CISSC 0x3230 | 743 | #define PCI_DEVICE_ID_HP_CISSC 0x3230 |
743 | #define PCI_DEVICE_ID_HP_CISSD 0x3238 | 744 | #define PCI_DEVICE_ID_HP_CISSD 0x3238 |
744 | #define PCI_DEVICE_ID_HP_CISSE 0x323a | 745 | #define PCI_DEVICE_ID_HP_CISSE 0x323a |
746 | #define PCI_DEVICE_ID_HP_CISSF 0x323b | ||
745 | #define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 | 747 | #define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 |
746 | 748 | ||
747 | #define PCI_VENDOR_ID_PCTECH 0x1042 | 749 | #define PCI_VENDOR_ID_PCTECH 0x1042 |
@@ -818,7 +820,7 @@ | |||
818 | 820 | ||
819 | #define PCI_VENDOR_ID_ANIGMA 0x1051 | 821 | #define PCI_VENDOR_ID_ANIGMA 0x1051 |
820 | #define PCI_DEVICE_ID_ANIGMA_MC145575 0x0100 | 822 | #define PCI_DEVICE_ID_ANIGMA_MC145575 0x0100 |
821 | 823 | ||
822 | #define PCI_VENDOR_ID_EFAR 0x1055 | 824 | #define PCI_VENDOR_ID_EFAR 0x1055 |
823 | #define PCI_DEVICE_ID_EFAR_SLC90E66_1 0x9130 | 825 | #define PCI_DEVICE_ID_EFAR_SLC90E66_1 0x9130 |
824 | #define PCI_DEVICE_ID_EFAR_SLC90E66_3 0x9463 | 826 | #define PCI_DEVICE_ID_EFAR_SLC90E66_3 0x9463 |
@@ -1449,7 +1451,7 @@ | |||
1449 | 1451 | ||
1450 | #define PCI_VENDOR_ID_ZIATECH 0x1138 | 1452 | #define PCI_VENDOR_ID_ZIATECH 0x1138 |
1451 | #define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 | 1453 | #define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 |
1452 | 1454 | ||
1453 | 1455 | ||
1454 | #define PCI_VENDOR_ID_SYSKONNECT 0x1148 | 1456 | #define PCI_VENDOR_ID_SYSKONNECT 0x1148 |
1455 | #define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200 | 1457 | #define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200 |
@@ -1603,8 +1605,8 @@ | |||
1603 | #define PCI_DEVICE_ID_RP8OCTA 0x0005 | 1605 | #define PCI_DEVICE_ID_RP8OCTA 0x0005 |
1604 | #define PCI_DEVICE_ID_RP8J 0x0006 | 1606 | #define PCI_DEVICE_ID_RP8J 0x0006 |
1605 | #define PCI_DEVICE_ID_RP4J 0x0007 | 1607 | #define PCI_DEVICE_ID_RP4J 0x0007 |
1606 | #define PCI_DEVICE_ID_RP8SNI 0x0008 | 1608 | #define PCI_DEVICE_ID_RP8SNI 0x0008 |
1607 | #define PCI_DEVICE_ID_RP16SNI 0x0009 | 1609 | #define PCI_DEVICE_ID_RP16SNI 0x0009 |
1608 | #define PCI_DEVICE_ID_RPP4 0x000A | 1610 | #define PCI_DEVICE_ID_RPP4 0x000A |
1609 | #define PCI_DEVICE_ID_RPP8 0x000B | 1611 | #define PCI_DEVICE_ID_RPP8 0x000B |
1610 | #define PCI_DEVICE_ID_RP4M 0x000D | 1612 | #define PCI_DEVICE_ID_RP4M 0x000D |
@@ -1614,9 +1616,9 @@ | |||
1614 | #define PCI_DEVICE_ID_URP8INTF 0x0802 | 1616 | #define PCI_DEVICE_ID_URP8INTF 0x0802 |
1615 | #define PCI_DEVICE_ID_URP16INTF 0x0803 | 1617 | #define PCI_DEVICE_ID_URP16INTF 0x0803 |
1616 | #define PCI_DEVICE_ID_URP8OCTA 0x0805 | 1618 | #define PCI_DEVICE_ID_URP8OCTA 0x0805 |
1617 | #define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C | 1619 | #define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C |
1618 | #define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D | 1620 | #define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D |
1619 | #define PCI_DEVICE_ID_CRP16INTF 0x0903 | 1621 | #define PCI_DEVICE_ID_CRP16INTF 0x0903 |
1620 | 1622 | ||
1621 | #define PCI_VENDOR_ID_CYCLADES 0x120e | 1623 | #define PCI_VENDOR_ID_CYCLADES 0x120e |
1622 | #define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100 | 1624 | #define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100 |
@@ -2142,7 +2144,7 @@ | |||
2142 | #define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 | 2144 | #define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 |
2143 | 2145 | ||
2144 | #define PCI_VENDOR_ID_ZOLTRIX 0x15b0 | 2146 | #define PCI_VENDOR_ID_ZOLTRIX 0x15b0 |
2145 | #define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 | 2147 | #define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 |
2146 | 2148 | ||
2147 | #define PCI_VENDOR_ID_MELLANOX 0x15b3 | 2149 | #define PCI_VENDOR_ID_MELLANOX 0x15b3 |
2148 | #define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 | 2150 | #define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 |
@@ -2192,6 +2194,9 @@ | |||
2192 | #define PCI_VENDOR_ID_ARIMA 0x161f | 2194 | #define PCI_VENDOR_ID_ARIMA 0x161f |
2193 | 2195 | ||
2194 | #define PCI_VENDOR_ID_BROCADE 0x1657 | 2196 | #define PCI_VENDOR_ID_BROCADE 0x1657 |
2197 | #define PCI_DEVICE_ID_BROCADE_CT 0x0014 | ||
2198 | #define PCI_DEVICE_ID_BROCADE_FC_8G1P 0x0017 | ||
2199 | #define PCI_DEVICE_ID_BROCADE_CT_FC 0x0021 | ||
2195 | 2200 | ||
2196 | #define PCI_VENDOR_ID_SIBYTE 0x166d | 2201 | #define PCI_VENDOR_ID_SIBYTE 0x166d |
2197 | #define PCI_DEVICE_ID_BCM1250_PCI 0x0001 | 2202 | #define PCI_DEVICE_ID_BCM1250_PCI 0x0001 |
@@ -2263,6 +2268,13 @@ | |||
2263 | 2268 | ||
2264 | #define PCI_VENDOR_ID_SILAN 0x1904 | 2269 | #define PCI_VENDOR_ID_SILAN 0x1904 |
2265 | 2270 | ||
2271 | #define PCI_VENDOR_ID_RENESAS 0x1912 | ||
2272 | #define PCI_DEVICE_ID_RENESAS_SH7781 0x0001 | ||
2273 | #define PCI_DEVICE_ID_RENESAS_SH7780 0x0002 | ||
2274 | #define PCI_DEVICE_ID_RENESAS_SH7763 0x0004 | ||
2275 | #define PCI_DEVICE_ID_RENESAS_SH7785 0x0007 | ||
2276 | #define PCI_DEVICE_ID_RENESAS_SH7786 0x0010 | ||
2277 | |||
2266 | #define PCI_VENDOR_ID_TDI 0x192E | 2278 | #define PCI_VENDOR_ID_TDI 0x192E |
2267 | #define PCI_DEVICE_ID_TDI_EHCI 0x0101 | 2279 | #define PCI_DEVICE_ID_TDI_EHCI 0x0101 |
2268 | 2280 | ||
@@ -2315,6 +2327,14 @@ | |||
2315 | #define PCI_DEVICE_ID_P4080 0x0401 | 2327 | #define PCI_DEVICE_ID_P4080 0x0401 |
2316 | #define PCI_DEVICE_ID_P4040E 0x0408 | 2328 | #define PCI_DEVICE_ID_P4040E 0x0408 |
2317 | #define PCI_DEVICE_ID_P4040 0x0409 | 2329 | #define PCI_DEVICE_ID_P4040 0x0409 |
2330 | #define PCI_DEVICE_ID_P2040E 0x0410 | ||
2331 | #define PCI_DEVICE_ID_P2040 0x0411 | ||
2332 | #define PCI_DEVICE_ID_P3041E 0x041E | ||
2333 | #define PCI_DEVICE_ID_P3041 0x041F | ||
2334 | #define PCI_DEVICE_ID_P5020E 0x0420 | ||
2335 | #define PCI_DEVICE_ID_P5020 0x0421 | ||
2336 | #define PCI_DEVICE_ID_P5010E 0x0428 | ||
2337 | #define PCI_DEVICE_ID_P5010 0x0429 | ||
2318 | #define PCI_DEVICE_ID_MPC8641 0x7010 | 2338 | #define PCI_DEVICE_ID_MPC8641 0x7010 |
2319 | #define PCI_DEVICE_ID_MPC8641D 0x7011 | 2339 | #define PCI_DEVICE_ID_MPC8641D 0x7011 |
2320 | #define PCI_DEVICE_ID_MPC8610 0x7018 | 2340 | #define PCI_DEVICE_ID_MPC8610 0x7018 |
@@ -2418,7 +2438,7 @@ | |||
2418 | #define PCI_DEVICE_ID_INTEL_82815_MC 0x1130 | 2438 | #define PCI_DEVICE_ID_INTEL_82815_MC 0x1130 |
2419 | #define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132 | 2439 | #define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132 |
2420 | #define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 | 2440 | #define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 |
2421 | #define PCI_DEVICE_ID_INTEL_7505_0 0x2550 | 2441 | #define PCI_DEVICE_ID_INTEL_7505_0 0x2550 |
2422 | #define PCI_DEVICE_ID_INTEL_7205_0 0x255d | 2442 | #define PCI_DEVICE_ID_INTEL_7205_0 0x255d |
2423 | #define PCI_DEVICE_ID_INTEL_82437 0x122d | 2443 | #define PCI_DEVICE_ID_INTEL_82437 0x122d |
2424 | #define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e | 2444 | #define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e |
@@ -2621,6 +2641,9 @@ | |||
2621 | #define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599 | 2641 | #define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599 |
2622 | #define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a | 2642 | #define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a |
2623 | #define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e | 2643 | #define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e |
2644 | #define PCI_DEVICE_ID_INTEL_I7300_MCH_ERR 0x360c | ||
2645 | #define PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 0x360f | ||
2646 | #define PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 0x3610 | ||
2624 | #define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b | 2647 | #define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b |
2625 | #define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c | 2648 | #define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c |
2626 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710 | 2649 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710 |
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index ce2dc655cd1d..018db9a62ffe 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h | |||
@@ -139,6 +139,27 @@ | |||
139 | __aligned(PAGE_SIZE) | 139 | __aligned(PAGE_SIZE) |
140 | 140 | ||
141 | /* | 141 | /* |
142 | * Declaration/definition used for per-CPU variables that must be read mostly. | ||
143 | */ | ||
144 | #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ | ||
145 | DECLARE_PER_CPU_SECTION(type, name, "..readmostly") | ||
146 | |||
147 | #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ | ||
148 | DEFINE_PER_CPU_SECTION(type, name, "..readmostly") | ||
149 | |||
150 | /* | ||
151 | * Declaration/definition used for large per-CPU variables that must be | ||
152 | * aligned to something larger than the pagesize. | ||
153 | */ | ||
154 | #define DECLARE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size) \ | ||
155 | DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \ | ||
156 | __aligned(size) | ||
157 | |||
158 | #define DEFINE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size) \ | ||
159 | DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ | ||
160 | __aligned(size) | ||
161 | |||
162 | /* | ||
142 | * Intermodule exports for per-CPU variables. sparse forgets about | 163 | * Intermodule exports for per-CPU variables. sparse forgets about |
143 | * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to | 164 | * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to |
144 | * noop if __CHECKER__. | 165 | * noop if __CHECKER__. |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 49466b13c5c6..5095b834a6fb 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -39,10 +39,17 @@ | |||
39 | preempt_enable(); \ | 39 | preempt_enable(); \ |
40 | } while (0) | 40 | } while (0) |
41 | 41 | ||
42 | #ifdef CONFIG_SMP | 42 | #define get_cpu_ptr(var) ({ \ |
43 | preempt_disable(); \ | ||
44 | this_cpu_ptr(var); }) | ||
45 | |||
46 | #define put_cpu_ptr(var) do { \ | ||
47 | (void)(var); \ | ||
48 | preempt_enable(); \ | ||
49 | } while (0) | ||
43 | 50 | ||
44 | /* minimum unit size, also is the maximum supported allocation size */ | 51 | /* minimum unit size, also is the maximum supported allocation size */ |
45 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) | 52 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) |
46 | 53 | ||
47 | /* | 54 | /* |
48 | * Percpu allocator can serve percpu allocations before slab is | 55 | * Percpu allocator can serve percpu allocations before slab is |
@@ -137,37 +144,20 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, | |||
137 | * dynamically allocated. Non-atomic access to the current CPU's | 144 | * dynamically allocated. Non-atomic access to the current CPU's |
138 | * version should probably be combined with get_cpu()/put_cpu(). | 145 | * version should probably be combined with get_cpu()/put_cpu(). |
139 | */ | 146 | */ |
147 | #ifdef CONFIG_SMP | ||
140 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) | 148 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
149 | #else | ||
150 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) | ||
151 | #endif | ||
141 | 152 | ||
142 | extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); | 153 | extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); |
143 | extern bool is_kernel_percpu_address(unsigned long addr); | 154 | extern bool is_kernel_percpu_address(unsigned long addr); |
144 | 155 | ||
145 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | 156 | #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) |
146 | extern void __init setup_per_cpu_areas(void); | 157 | extern void __init setup_per_cpu_areas(void); |
147 | #endif | 158 | #endif |
148 | extern void __init percpu_init_late(void); | 159 | extern void __init percpu_init_late(void); |
149 | 160 | ||
150 | #else /* CONFIG_SMP */ | ||
151 | |||
152 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) | ||
153 | |||
154 | /* can't distinguish from other static vars, always false */ | ||
155 | static inline bool is_kernel_percpu_address(unsigned long addr) | ||
156 | { | ||
157 | return false; | ||
158 | } | ||
159 | |||
160 | static inline void __init setup_per_cpu_areas(void) { } | ||
161 | |||
162 | static inline void __init percpu_init_late(void) { } | ||
163 | |||
164 | static inline void *pcpu_lpage_remapped(void *kaddr) | ||
165 | { | ||
166 | return NULL; | ||
167 | } | ||
168 | |||
169 | #endif /* CONFIG_SMP */ | ||
170 | |||
171 | extern void __percpu *__alloc_percpu(size_t size, size_t align); | 161 | extern void __percpu *__alloc_percpu(size_t size, size_t align); |
172 | extern void free_percpu(void __percpu *__pdata); | 162 | extern void free_percpu(void __percpu *__pdata); |
173 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | 163 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 716f99b682c1..057bf22a8323 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -486,6 +486,8 @@ struct perf_guest_info_callbacks { | |||
486 | #include <linux/workqueue.h> | 486 | #include <linux/workqueue.h> |
487 | #include <linux/ftrace.h> | 487 | #include <linux/ftrace.h> |
488 | #include <linux/cpu.h> | 488 | #include <linux/cpu.h> |
489 | #include <linux/irq_work.h> | ||
490 | #include <linux/jump_label_ref.h> | ||
489 | #include <asm/atomic.h> | 491 | #include <asm/atomic.h> |
490 | #include <asm/local.h> | 492 | #include <asm/local.h> |
491 | 493 | ||
@@ -529,16 +531,22 @@ struct hw_perf_event { | |||
529 | int last_cpu; | 531 | int last_cpu; |
530 | }; | 532 | }; |
531 | struct { /* software */ | 533 | struct { /* software */ |
532 | s64 remaining; | ||
533 | struct hrtimer hrtimer; | 534 | struct hrtimer hrtimer; |
534 | }; | 535 | }; |
535 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 536 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
536 | struct { /* breakpoint */ | 537 | struct { /* breakpoint */ |
537 | struct arch_hw_breakpoint info; | 538 | struct arch_hw_breakpoint info; |
538 | struct list_head bp_list; | 539 | struct list_head bp_list; |
540 | /* | ||
541 | * Crufty hack to avoid the chicken and egg | ||
542 | * problem hw_breakpoint has with context | ||
543 | * creation and event initalization. | ||
544 | */ | ||
545 | struct task_struct *bp_target; | ||
539 | }; | 546 | }; |
540 | #endif | 547 | #endif |
541 | }; | 548 | }; |
549 | int state; | ||
542 | local64_t prev_count; | 550 | local64_t prev_count; |
543 | u64 sample_period; | 551 | u64 sample_period; |
544 | u64 last_period; | 552 | u64 last_period; |
@@ -550,6 +558,13 @@ struct hw_perf_event { | |||
550 | #endif | 558 | #endif |
551 | }; | 559 | }; |
552 | 560 | ||
561 | /* | ||
562 | * hw_perf_event::state flags | ||
563 | */ | ||
564 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ | ||
565 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ | ||
566 | #define PERF_HES_ARCH 0x04 | ||
567 | |||
553 | struct perf_event; | 568 | struct perf_event; |
554 | 569 | ||
555 | /* | 570 | /* |
@@ -561,36 +576,70 @@ struct perf_event; | |||
561 | * struct pmu - generic performance monitoring unit | 576 | * struct pmu - generic performance monitoring unit |
562 | */ | 577 | */ |
563 | struct pmu { | 578 | struct pmu { |
564 | int (*enable) (struct perf_event *event); | 579 | struct list_head entry; |
565 | void (*disable) (struct perf_event *event); | 580 | |
566 | int (*start) (struct perf_event *event); | 581 | int * __percpu pmu_disable_count; |
567 | void (*stop) (struct perf_event *event); | 582 | struct perf_cpu_context * __percpu pmu_cpu_context; |
568 | void (*read) (struct perf_event *event); | 583 | int task_ctx_nr; |
569 | void (*unthrottle) (struct perf_event *event); | 584 | |
585 | /* | ||
586 | * Fully disable/enable this PMU, can be used to protect from the PMI | ||
587 | * as well as for lazy/batch writing of the MSRs. | ||
588 | */ | ||
589 | void (*pmu_enable) (struct pmu *pmu); /* optional */ | ||
590 | void (*pmu_disable) (struct pmu *pmu); /* optional */ | ||
570 | 591 | ||
571 | /* | 592 | /* |
572 | * Group events scheduling is treated as a transaction, add group | 593 | * Try and initialize the event for this PMU. |
573 | * events as a whole and perform one schedulability test. If the test | 594 | * Should return -ENOENT when the @event doesn't match this PMU. |
574 | * fails, roll back the whole group | ||
575 | */ | 595 | */ |
596 | int (*event_init) (struct perf_event *event); | ||
597 | |||
598 | #define PERF_EF_START 0x01 /* start the counter when adding */ | ||
599 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ | ||
600 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ | ||
576 | 601 | ||
577 | /* | 602 | /* |
578 | * Start the transaction, after this ->enable() doesn't need | 603 | * Adds/Removes a counter to/from the PMU, can be done inside |
579 | * to do schedulability tests. | 604 | * a transaction, see the ->*_txn() methods. |
580 | */ | 605 | */ |
581 | void (*start_txn) (const struct pmu *pmu); | 606 | int (*add) (struct perf_event *event, int flags); |
607 | void (*del) (struct perf_event *event, int flags); | ||
608 | |||
582 | /* | 609 | /* |
583 | * If ->start_txn() disabled the ->enable() schedulability test | 610 | * Starts/Stops a counter present on the PMU. The PMI handler |
611 | * should stop the counter when perf_event_overflow() returns | ||
612 | * !0. ->start() will be used to continue. | ||
613 | */ | ||
614 | void (*start) (struct perf_event *event, int flags); | ||
615 | void (*stop) (struct perf_event *event, int flags); | ||
616 | |||
617 | /* | ||
618 | * Updates the counter value of the event. | ||
619 | */ | ||
620 | void (*read) (struct perf_event *event); | ||
621 | |||
622 | /* | ||
623 | * Group events scheduling is treated as a transaction, add | ||
624 | * group events as a whole and perform one schedulability test. | ||
625 | * If the test fails, roll back the whole group | ||
626 | * | ||
627 | * Start the transaction, after this ->add() doesn't need to | ||
628 | * do schedulability tests. | ||
629 | */ | ||
630 | void (*start_txn) (struct pmu *pmu); /* optional */ | ||
631 | /* | ||
632 | * If ->start_txn() disabled the ->add() schedulability test | ||
584 | * then ->commit_txn() is required to perform one. On success | 633 | * then ->commit_txn() is required to perform one. On success |
585 | * the transaction is closed. On error the transaction is kept | 634 | * the transaction is closed. On error the transaction is kept |
586 | * open until ->cancel_txn() is called. | 635 | * open until ->cancel_txn() is called. |
587 | */ | 636 | */ |
588 | int (*commit_txn) (const struct pmu *pmu); | 637 | int (*commit_txn) (struct pmu *pmu); /* optional */ |
589 | /* | 638 | /* |
590 | * Will cancel the transaction, assumes ->disable() is called for | 639 | * Will cancel the transaction, assumes ->del() is called |
591 | * each successfull ->enable() during the transaction. | 640 | * for each successfull ->add() during the transaction. |
592 | */ | 641 | */ |
593 | void (*cancel_txn) (const struct pmu *pmu); | 642 | void (*cancel_txn) (struct pmu *pmu); /* optional */ |
594 | }; | 643 | }; |
595 | 644 | ||
596 | /** | 645 | /** |
@@ -631,11 +680,6 @@ struct perf_buffer { | |||
631 | void *data_pages[0]; | 680 | void *data_pages[0]; |
632 | }; | 681 | }; |
633 | 682 | ||
634 | struct perf_pending_entry { | ||
635 | struct perf_pending_entry *next; | ||
636 | void (*func)(struct perf_pending_entry *); | ||
637 | }; | ||
638 | |||
639 | struct perf_sample_data; | 683 | struct perf_sample_data; |
640 | 684 | ||
641 | typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | 685 | typedef void (*perf_overflow_handler_t)(struct perf_event *, int, |
@@ -656,6 +700,7 @@ struct swevent_hlist { | |||
656 | 700 | ||
657 | #define PERF_ATTACH_CONTEXT 0x01 | 701 | #define PERF_ATTACH_CONTEXT 0x01 |
658 | #define PERF_ATTACH_GROUP 0x02 | 702 | #define PERF_ATTACH_GROUP 0x02 |
703 | #define PERF_ATTACH_TASK 0x04 | ||
659 | 704 | ||
660 | /** | 705 | /** |
661 | * struct perf_event - performance event kernel representation: | 706 | * struct perf_event - performance event kernel representation: |
@@ -669,7 +714,7 @@ struct perf_event { | |||
669 | int nr_siblings; | 714 | int nr_siblings; |
670 | int group_flags; | 715 | int group_flags; |
671 | struct perf_event *group_leader; | 716 | struct perf_event *group_leader; |
672 | const struct pmu *pmu; | 717 | struct pmu *pmu; |
673 | 718 | ||
674 | enum perf_event_active_state state; | 719 | enum perf_event_active_state state; |
675 | unsigned int attach_state; | 720 | unsigned int attach_state; |
@@ -743,7 +788,7 @@ struct perf_event { | |||
743 | int pending_wakeup; | 788 | int pending_wakeup; |
744 | int pending_kill; | 789 | int pending_kill; |
745 | int pending_disable; | 790 | int pending_disable; |
746 | struct perf_pending_entry pending; | 791 | struct irq_work pending; |
747 | 792 | ||
748 | atomic_t event_limit; | 793 | atomic_t event_limit; |
749 | 794 | ||
@@ -763,12 +808,19 @@ struct perf_event { | |||
763 | #endif /* CONFIG_PERF_EVENTS */ | 808 | #endif /* CONFIG_PERF_EVENTS */ |
764 | }; | 809 | }; |
765 | 810 | ||
811 | enum perf_event_context_type { | ||
812 | task_context, | ||
813 | cpu_context, | ||
814 | }; | ||
815 | |||
766 | /** | 816 | /** |
767 | * struct perf_event_context - event context structure | 817 | * struct perf_event_context - event context structure |
768 | * | 818 | * |
769 | * Used as a container for task events and CPU events as well: | 819 | * Used as a container for task events and CPU events as well: |
770 | */ | 820 | */ |
771 | struct perf_event_context { | 821 | struct perf_event_context { |
822 | enum perf_event_context_type type; | ||
823 | struct pmu *pmu; | ||
772 | /* | 824 | /* |
773 | * Protect the states of the events in the list, | 825 | * Protect the states of the events in the list, |
774 | * nr_active, and the list: | 826 | * nr_active, and the list: |
@@ -808,6 +860,12 @@ struct perf_event_context { | |||
808 | struct rcu_head rcu_head; | 860 | struct rcu_head rcu_head; |
809 | }; | 861 | }; |
810 | 862 | ||
863 | /* | ||
864 | * Number of contexts where an event can trigger: | ||
865 | * task, softirq, hardirq, nmi. | ||
866 | */ | ||
867 | #define PERF_NR_CONTEXTS 4 | ||
868 | |||
811 | /** | 869 | /** |
812 | * struct perf_event_cpu_context - per cpu event context structure | 870 | * struct perf_event_cpu_context - per cpu event context structure |
813 | */ | 871 | */ |
@@ -815,18 +873,9 @@ struct perf_cpu_context { | |||
815 | struct perf_event_context ctx; | 873 | struct perf_event_context ctx; |
816 | struct perf_event_context *task_ctx; | 874 | struct perf_event_context *task_ctx; |
817 | int active_oncpu; | 875 | int active_oncpu; |
818 | int max_pertask; | ||
819 | int exclusive; | 876 | int exclusive; |
820 | struct swevent_hlist *swevent_hlist; | 877 | struct list_head rotation_list; |
821 | struct mutex hlist_mutex; | 878 | int jiffies_interval; |
822 | int hlist_refcount; | ||
823 | |||
824 | /* | ||
825 | * Recursion avoidance: | ||
826 | * | ||
827 | * task, softirq, irq, nmi context | ||
828 | */ | ||
829 | int recursion[4]; | ||
830 | }; | 879 | }; |
831 | 880 | ||
832 | struct perf_output_handle { | 881 | struct perf_output_handle { |
@@ -842,26 +891,34 @@ struct perf_output_handle { | |||
842 | 891 | ||
843 | #ifdef CONFIG_PERF_EVENTS | 892 | #ifdef CONFIG_PERF_EVENTS |
844 | 893 | ||
845 | /* | 894 | extern int perf_pmu_register(struct pmu *pmu); |
846 | * Set by architecture code: | 895 | extern void perf_pmu_unregister(struct pmu *pmu); |
847 | */ | 896 | |
848 | extern int perf_max_events; | 897 | extern int perf_num_counters(void); |
898 | extern const char *perf_pmu_name(void); | ||
899 | extern void __perf_event_task_sched_in(struct task_struct *task); | ||
900 | extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); | ||
849 | 901 | ||
850 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); | 902 | extern atomic_t perf_task_events; |
903 | |||
904 | static inline void perf_event_task_sched_in(struct task_struct *task) | ||
905 | { | ||
906 | COND_STMT(&perf_task_events, __perf_event_task_sched_in(task)); | ||
907 | } | ||
908 | |||
909 | static inline | ||
910 | void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) | ||
911 | { | ||
912 | COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next)); | ||
913 | } | ||
851 | 914 | ||
852 | extern void perf_event_task_sched_in(struct task_struct *task); | ||
853 | extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); | ||
854 | extern void perf_event_task_tick(struct task_struct *task); | ||
855 | extern int perf_event_init_task(struct task_struct *child); | 915 | extern int perf_event_init_task(struct task_struct *child); |
856 | extern void perf_event_exit_task(struct task_struct *child); | 916 | extern void perf_event_exit_task(struct task_struct *child); |
857 | extern void perf_event_free_task(struct task_struct *task); | 917 | extern void perf_event_free_task(struct task_struct *task); |
858 | extern void set_perf_event_pending(void); | 918 | extern void perf_event_delayed_put(struct task_struct *task); |
859 | extern void perf_event_do_pending(void); | ||
860 | extern void perf_event_print_debug(void); | 919 | extern void perf_event_print_debug(void); |
861 | extern void __perf_disable(void); | 920 | extern void perf_pmu_disable(struct pmu *pmu); |
862 | extern bool __perf_enable(void); | 921 | extern void perf_pmu_enable(struct pmu *pmu); |
863 | extern void perf_disable(void); | ||
864 | extern void perf_enable(void); | ||
865 | extern int perf_event_task_disable(void); | 922 | extern int perf_event_task_disable(void); |
866 | extern int perf_event_task_enable(void); | 923 | extern int perf_event_task_enable(void); |
867 | extern void perf_event_update_userpage(struct perf_event *event); | 924 | extern void perf_event_update_userpage(struct perf_event *event); |
@@ -869,7 +926,7 @@ extern int perf_event_release_kernel(struct perf_event *event); | |||
869 | extern struct perf_event * | 926 | extern struct perf_event * |
870 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | 927 | perf_event_create_kernel_counter(struct perf_event_attr *attr, |
871 | int cpu, | 928 | int cpu, |
872 | pid_t pid, | 929 | struct task_struct *task, |
873 | perf_overflow_handler_t callback); | 930 | perf_overflow_handler_t callback); |
874 | extern u64 perf_event_read_value(struct perf_event *event, | 931 | extern u64 perf_event_read_value(struct perf_event *event, |
875 | u64 *enabled, u64 *running); | 932 | u64 *enabled, u64 *running); |
@@ -920,14 +977,7 @@ extern int perf_event_overflow(struct perf_event *event, int nmi, | |||
920 | */ | 977 | */ |
921 | static inline int is_software_event(struct perf_event *event) | 978 | static inline int is_software_event(struct perf_event *event) |
922 | { | 979 | { |
923 | switch (event->attr.type) { | 980 | return event->pmu->task_ctx_nr == perf_sw_context; |
924 | case PERF_TYPE_SOFTWARE: | ||
925 | case PERF_TYPE_TRACEPOINT: | ||
926 | /* for now the breakpoint stuff also works as software event */ | ||
927 | case PERF_TYPE_BREAKPOINT: | ||
928 | return 1; | ||
929 | } | ||
930 | return 0; | ||
931 | } | 981 | } |
932 | 982 | ||
933 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 983 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
@@ -954,18 +1004,20 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs) | |||
954 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); | 1004 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); |
955 | } | 1005 | } |
956 | 1006 | ||
957 | static inline void | 1007 | static __always_inline void |
958 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | 1008 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) |
959 | { | 1009 | { |
960 | if (atomic_read(&perf_swevent_enabled[event_id])) { | 1010 | struct pt_regs hot_regs; |
961 | struct pt_regs hot_regs; | 1011 | |
962 | 1012 | JUMP_LABEL(&perf_swevent_enabled[event_id], have_event); | |
963 | if (!regs) { | 1013 | return; |
964 | perf_fetch_caller_regs(&hot_regs); | 1014 | |
965 | regs = &hot_regs; | 1015 | have_event: |
966 | } | 1016 | if (!regs) { |
967 | __perf_sw_event(event_id, nr, nmi, regs, addr); | 1017 | perf_fetch_caller_regs(&hot_regs); |
1018 | regs = &hot_regs; | ||
968 | } | 1019 | } |
1020 | __perf_sw_event(event_id, nr, nmi, regs, addr); | ||
969 | } | 1021 | } |
970 | 1022 | ||
971 | extern void perf_event_mmap(struct vm_area_struct *vma); | 1023 | extern void perf_event_mmap(struct vm_area_struct *vma); |
@@ -976,7 +1028,21 @@ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks | |||
976 | extern void perf_event_comm(struct task_struct *tsk); | 1028 | extern void perf_event_comm(struct task_struct *tsk); |
977 | extern void perf_event_fork(struct task_struct *tsk); | 1029 | extern void perf_event_fork(struct task_struct *tsk); |
978 | 1030 | ||
979 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | 1031 | /* Callchains */ |
1032 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | ||
1033 | |||
1034 | extern void perf_callchain_user(struct perf_callchain_entry *entry, | ||
1035 | struct pt_regs *regs); | ||
1036 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, | ||
1037 | struct pt_regs *regs); | ||
1038 | |||
1039 | |||
1040 | static inline void | ||
1041 | perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
1042 | { | ||
1043 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
1044 | entry->ip[entry->nr++] = ip; | ||
1045 | } | ||
980 | 1046 | ||
981 | extern int sysctl_perf_event_paranoid; | 1047 | extern int sysctl_perf_event_paranoid; |
982 | extern int sysctl_perf_event_mlock; | 1048 | extern int sysctl_perf_event_mlock; |
@@ -1019,21 +1085,18 @@ extern int perf_swevent_get_recursion_context(void); | |||
1019 | extern void perf_swevent_put_recursion_context(int rctx); | 1085 | extern void perf_swevent_put_recursion_context(int rctx); |
1020 | extern void perf_event_enable(struct perf_event *event); | 1086 | extern void perf_event_enable(struct perf_event *event); |
1021 | extern void perf_event_disable(struct perf_event *event); | 1087 | extern void perf_event_disable(struct perf_event *event); |
1088 | extern void perf_event_task_tick(void); | ||
1022 | #else | 1089 | #else |
1023 | static inline void | 1090 | static inline void |
1024 | perf_event_task_sched_in(struct task_struct *task) { } | 1091 | perf_event_task_sched_in(struct task_struct *task) { } |
1025 | static inline void | 1092 | static inline void |
1026 | perf_event_task_sched_out(struct task_struct *task, | 1093 | perf_event_task_sched_out(struct task_struct *task, |
1027 | struct task_struct *next) { } | 1094 | struct task_struct *next) { } |
1028 | static inline void | ||
1029 | perf_event_task_tick(struct task_struct *task) { } | ||
1030 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } | 1095 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } |
1031 | static inline void perf_event_exit_task(struct task_struct *child) { } | 1096 | static inline void perf_event_exit_task(struct task_struct *child) { } |
1032 | static inline void perf_event_free_task(struct task_struct *task) { } | 1097 | static inline void perf_event_free_task(struct task_struct *task) { } |
1033 | static inline void perf_event_do_pending(void) { } | 1098 | static inline void perf_event_delayed_put(struct task_struct *task) { } |
1034 | static inline void perf_event_print_debug(void) { } | 1099 | static inline void perf_event_print_debug(void) { } |
1035 | static inline void perf_disable(void) { } | ||
1036 | static inline void perf_enable(void) { } | ||
1037 | static inline int perf_event_task_disable(void) { return -EINVAL; } | 1100 | static inline int perf_event_task_disable(void) { return -EINVAL; } |
1038 | static inline int perf_event_task_enable(void) { return -EINVAL; } | 1101 | static inline int perf_event_task_enable(void) { return -EINVAL; } |
1039 | 1102 | ||
@@ -1056,6 +1119,7 @@ static inline int perf_swevent_get_recursion_context(void) { return -1; } | |||
1056 | static inline void perf_swevent_put_recursion_context(int rctx) { } | 1119 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
1057 | static inline void perf_event_enable(struct perf_event *event) { } | 1120 | static inline void perf_event_enable(struct perf_event *event) { } |
1058 | static inline void perf_event_disable(struct perf_event *event) { } | 1121 | static inline void perf_event_disable(struct perf_event *event) { } |
1122 | static inline void perf_event_task_tick(void) { } | ||
1059 | #endif | 1123 | #endif |
1060 | 1124 | ||
1061 | #define perf_output_put(handle, x) \ | 1125 | #define perf_output_put(handle, x) \ |
diff --git a/include/linux/phonet.h b/include/linux/phonet.h index 76edadf046d3..26c8df786918 100644 --- a/include/linux/phonet.h +++ b/include/linux/phonet.h | |||
@@ -36,6 +36,9 @@ | |||
36 | /* Socket options for SOL_PNPIPE level */ | 36 | /* Socket options for SOL_PNPIPE level */ |
37 | #define PNPIPE_ENCAP 1 | 37 | #define PNPIPE_ENCAP 1 |
38 | #define PNPIPE_IFINDEX 2 | 38 | #define PNPIPE_IFINDEX 2 |
39 | #define PNPIPE_PIPE_HANDLE 3 | ||
40 | #define PNPIPE_ENABLE 4 | ||
41 | /* unused slot */ | ||
39 | 42 | ||
40 | #define PNADDR_ANY 0 | 43 | #define PNADDR_ANY 0 |
41 | #define PNADDR_BROADCAST 0xFC | 44 | #define PNADDR_BROADCAST 0xFC |
@@ -47,6 +50,8 @@ | |||
47 | 50 | ||
48 | /* ioctls */ | 51 | /* ioctls */ |
49 | #define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0) | 52 | #define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0) |
53 | #define SIOCPNADDRESOURCE (SIOCPROTOPRIVATE + 14) | ||
54 | #define SIOCPNDELRESOURCE (SIOCPROTOPRIVATE + 15) | ||
50 | 55 | ||
51 | /* Phonet protocol header */ | 56 | /* Phonet protocol header */ |
52 | struct phonethdr { | 57 | struct phonethdr { |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 6b0a782c6224..a6e047a04f79 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -116,7 +116,7 @@ struct mii_bus { | |||
116 | /* list of all PHYs on bus */ | 116 | /* list of all PHYs on bus */ |
117 | struct phy_device *phy_map[PHY_MAX_ADDR]; | 117 | struct phy_device *phy_map[PHY_MAX_ADDR]; |
118 | 118 | ||
119 | /* Phy addresses to be ignored when probing */ | 119 | /* PHY addresses to be ignored when probing */ |
120 | u32 phy_mask; | 120 | u32 phy_mask; |
121 | 121 | ||
122 | /* | 122 | /* |
@@ -283,7 +283,7 @@ struct phy_device { | |||
283 | 283 | ||
284 | phy_interface_t interface; | 284 | phy_interface_t interface; |
285 | 285 | ||
286 | /* Bus address of the PHY (0-32) */ | 286 | /* Bus address of the PHY (0-31) */ |
287 | int addr; | 287 | int addr; |
288 | 288 | ||
289 | /* | 289 | /* |
diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h index 7f6ba8658abe..defbde203d07 100644 --- a/include/linux/pkt_cls.h +++ b/include/linux/pkt_cls.h | |||
@@ -332,6 +332,7 @@ enum { | |||
332 | FLOW_KEY_SKUID, | 332 | FLOW_KEY_SKUID, |
333 | FLOW_KEY_SKGID, | 333 | FLOW_KEY_SKGID, |
334 | FLOW_KEY_VLAN_TAG, | 334 | FLOW_KEY_VLAN_TAG, |
335 | FLOW_KEY_RXHASH, | ||
335 | __FLOW_KEY_MAX, | 336 | __FLOW_KEY_MAX, |
336 | }; | 337 | }; |
337 | 338 | ||
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index d7ecad0093bb..2e700ec0601f 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
@@ -138,6 +138,9 @@ extern struct platform_device *platform_create_bundle(struct platform_driver *dr | |||
138 | struct resource *res, unsigned int n_res, | 138 | struct resource *res, unsigned int n_res, |
139 | const void *data, size_t size); | 139 | const void *data, size_t size); |
140 | 140 | ||
141 | extern const struct dev_pm_ops * platform_bus_get_pm_ops(void); | ||
142 | extern void platform_bus_set_pm_ops(const struct dev_pm_ops *pm); | ||
143 | |||
141 | /* early platform driver interface */ | 144 | /* early platform driver interface */ |
142 | struct early_platform_driver { | 145 | struct early_platform_driver { |
143 | const char *class_str; | 146 | const char *class_str; |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 52e8c55ff314..40f3f45702ba 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -41,6 +41,12 @@ extern void (*pm_power_off_prepare)(void); | |||
41 | 41 | ||
42 | struct device; | 42 | struct device; |
43 | 43 | ||
44 | #ifdef CONFIG_PM | ||
45 | extern const char power_group_name[]; /* = "power" */ | ||
46 | #else | ||
47 | #define power_group_name NULL | ||
48 | #endif | ||
49 | |||
44 | typedef struct pm_message { | 50 | typedef struct pm_message { |
45 | int event; | 51 | int event; |
46 | } pm_message_t; | 52 | } pm_message_t; |
@@ -438,6 +444,9 @@ enum rpm_status { | |||
438 | * | 444 | * |
439 | * RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback | 445 | * RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback |
440 | * | 446 | * |
447 | * RPM_REQ_AUTOSUSPEND Same as RPM_REQ_SUSPEND, but not until the device has | ||
448 | * been inactive for as long as power.autosuspend_delay | ||
449 | * | ||
441 | * RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback | 450 | * RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback |
442 | */ | 451 | */ |
443 | 452 | ||
@@ -445,26 +454,28 @@ enum rpm_request { | |||
445 | RPM_REQ_NONE = 0, | 454 | RPM_REQ_NONE = 0, |
446 | RPM_REQ_IDLE, | 455 | RPM_REQ_IDLE, |
447 | RPM_REQ_SUSPEND, | 456 | RPM_REQ_SUSPEND, |
457 | RPM_REQ_AUTOSUSPEND, | ||
448 | RPM_REQ_RESUME, | 458 | RPM_REQ_RESUME, |
449 | }; | 459 | }; |
450 | 460 | ||
461 | struct wakeup_source; | ||
462 | |||
451 | struct dev_pm_info { | 463 | struct dev_pm_info { |
452 | pm_message_t power_state; | 464 | pm_message_t power_state; |
453 | unsigned int can_wakeup:1; | 465 | unsigned int can_wakeup:1; |
454 | unsigned int should_wakeup:1; | ||
455 | unsigned async_suspend:1; | 466 | unsigned async_suspend:1; |
456 | enum dpm_state status; /* Owned by the PM core */ | 467 | enum dpm_state status; /* Owned by the PM core */ |
468 | spinlock_t lock; | ||
457 | #ifdef CONFIG_PM_SLEEP | 469 | #ifdef CONFIG_PM_SLEEP |
458 | struct list_head entry; | 470 | struct list_head entry; |
459 | struct completion completion; | 471 | struct completion completion; |
460 | unsigned long wakeup_count; | 472 | struct wakeup_source *wakeup; |
461 | #endif | 473 | #endif |
462 | #ifdef CONFIG_PM_RUNTIME | 474 | #ifdef CONFIG_PM_RUNTIME |
463 | struct timer_list suspend_timer; | 475 | struct timer_list suspend_timer; |
464 | unsigned long timer_expires; | 476 | unsigned long timer_expires; |
465 | struct work_struct work; | 477 | struct work_struct work; |
466 | wait_queue_head_t wait_queue; | 478 | wait_queue_head_t wait_queue; |
467 | spinlock_t lock; | ||
468 | atomic_t usage_count; | 479 | atomic_t usage_count; |
469 | atomic_t child_count; | 480 | atomic_t child_count; |
470 | unsigned int disable_depth:3; | 481 | unsigned int disable_depth:3; |
@@ -474,9 +485,14 @@ struct dev_pm_info { | |||
474 | unsigned int deferred_resume:1; | 485 | unsigned int deferred_resume:1; |
475 | unsigned int run_wake:1; | 486 | unsigned int run_wake:1; |
476 | unsigned int runtime_auto:1; | 487 | unsigned int runtime_auto:1; |
488 | unsigned int no_callbacks:1; | ||
489 | unsigned int use_autosuspend:1; | ||
490 | unsigned int timer_autosuspends:1; | ||
477 | enum rpm_request request; | 491 | enum rpm_request request; |
478 | enum rpm_status runtime_status; | 492 | enum rpm_status runtime_status; |
479 | int runtime_error; | 493 | int runtime_error; |
494 | int autosuspend_delay; | ||
495 | unsigned long last_busy; | ||
480 | unsigned long active_jiffies; | 496 | unsigned long active_jiffies; |
481 | unsigned long suspended_jiffies; | 497 | unsigned long suspended_jiffies; |
482 | unsigned long accounting_timestamp; | 498 | unsigned long accounting_timestamp; |
@@ -558,12 +574,7 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); | |||
558 | __suspend_report_result(__func__, fn, ret); \ | 574 | __suspend_report_result(__func__, fn, ret); \ |
559 | } while (0) | 575 | } while (0) |
560 | 576 | ||
561 | extern void device_pm_wait_for_dev(struct device *sub, struct device *dev); | 577 | extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); |
562 | |||
563 | /* drivers/base/power/wakeup.c */ | ||
564 | extern void pm_wakeup_event(struct device *dev, unsigned int msec); | ||
565 | extern void pm_stay_awake(struct device *dev); | ||
566 | extern void pm_relax(void); | ||
567 | #else /* !CONFIG_PM_SLEEP */ | 578 | #else /* !CONFIG_PM_SLEEP */ |
568 | 579 | ||
569 | #define device_pm_lock() do {} while (0) | 580 | #define device_pm_lock() do {} while (0) |
@@ -576,11 +587,10 @@ static inline int dpm_suspend_start(pm_message_t state) | |||
576 | 587 | ||
577 | #define suspend_report_result(fn, ret) do {} while (0) | 588 | #define suspend_report_result(fn, ret) do {} while (0) |
578 | 589 | ||
579 | static inline void device_pm_wait_for_dev(struct device *a, struct device *b) {} | 590 | static inline int device_pm_wait_for_dev(struct device *a, struct device *b) |
580 | 591 | { | |
581 | static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} | 592 | return 0; |
582 | static inline void pm_stay_awake(struct device *dev) {} | 593 | } |
583 | static inline void pm_relax(void) {} | ||
584 | #endif /* !CONFIG_PM_SLEEP */ | 594 | #endif /* !CONFIG_PM_SLEEP */ |
585 | 595 | ||
586 | /* How to reorder dpm_list after device_move() */ | 596 | /* How to reorder dpm_list after device_move() */ |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 6e81888c6222..3ec2358f8692 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
@@ -12,18 +12,24 @@ | |||
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <linux/pm.h> | 13 | #include <linux/pm.h> |
14 | 14 | ||
15 | #include <linux/jiffies.h> | ||
16 | |||
17 | /* Runtime PM flag argument bits */ | ||
18 | #define RPM_ASYNC 0x01 /* Request is asynchronous */ | ||
19 | #define RPM_NOWAIT 0x02 /* Don't wait for concurrent | ||
20 | state change */ | ||
21 | #define RPM_GET_PUT 0x04 /* Increment/decrement the | ||
22 | usage_count */ | ||
23 | #define RPM_AUTO 0x08 /* Use autosuspend_delay */ | ||
24 | |||
15 | #ifdef CONFIG_PM_RUNTIME | 25 | #ifdef CONFIG_PM_RUNTIME |
16 | 26 | ||
17 | extern struct workqueue_struct *pm_wq; | 27 | extern struct workqueue_struct *pm_wq; |
18 | 28 | ||
19 | extern int pm_runtime_idle(struct device *dev); | 29 | extern int __pm_runtime_idle(struct device *dev, int rpmflags); |
20 | extern int pm_runtime_suspend(struct device *dev); | 30 | extern int __pm_runtime_suspend(struct device *dev, int rpmflags); |
21 | extern int pm_runtime_resume(struct device *dev); | 31 | extern int __pm_runtime_resume(struct device *dev, int rpmflags); |
22 | extern int pm_request_idle(struct device *dev); | ||
23 | extern int pm_schedule_suspend(struct device *dev, unsigned int delay); | 32 | extern int pm_schedule_suspend(struct device *dev, unsigned int delay); |
24 | extern int pm_request_resume(struct device *dev); | ||
25 | extern int __pm_runtime_get(struct device *dev, bool sync); | ||
26 | extern int __pm_runtime_put(struct device *dev, bool sync); | ||
27 | extern int __pm_runtime_set_status(struct device *dev, unsigned int status); | 33 | extern int __pm_runtime_set_status(struct device *dev, unsigned int status); |
28 | extern int pm_runtime_barrier(struct device *dev); | 34 | extern int pm_runtime_barrier(struct device *dev); |
29 | extern void pm_runtime_enable(struct device *dev); | 35 | extern void pm_runtime_enable(struct device *dev); |
@@ -33,6 +39,10 @@ extern void pm_runtime_forbid(struct device *dev); | |||
33 | extern int pm_generic_runtime_idle(struct device *dev); | 39 | extern int pm_generic_runtime_idle(struct device *dev); |
34 | extern int pm_generic_runtime_suspend(struct device *dev); | 40 | extern int pm_generic_runtime_suspend(struct device *dev); |
35 | extern int pm_generic_runtime_resume(struct device *dev); | 41 | extern int pm_generic_runtime_resume(struct device *dev); |
42 | extern void pm_runtime_no_callbacks(struct device *dev); | ||
43 | extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); | ||
44 | extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); | ||
45 | extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); | ||
36 | 46 | ||
37 | static inline bool pm_children_suspended(struct device *dev) | 47 | static inline bool pm_children_suspended(struct device *dev) |
38 | { | 48 | { |
@@ -70,19 +80,29 @@ static inline bool pm_runtime_suspended(struct device *dev) | |||
70 | return dev->power.runtime_status == RPM_SUSPENDED; | 80 | return dev->power.runtime_status == RPM_SUSPENDED; |
71 | } | 81 | } |
72 | 82 | ||
83 | static inline void pm_runtime_mark_last_busy(struct device *dev) | ||
84 | { | ||
85 | ACCESS_ONCE(dev->power.last_busy) = jiffies; | ||
86 | } | ||
87 | |||
73 | #else /* !CONFIG_PM_RUNTIME */ | 88 | #else /* !CONFIG_PM_RUNTIME */ |
74 | 89 | ||
75 | static inline int pm_runtime_idle(struct device *dev) { return -ENOSYS; } | 90 | static inline int __pm_runtime_idle(struct device *dev, int rpmflags) |
76 | static inline int pm_runtime_suspend(struct device *dev) { return -ENOSYS; } | 91 | { |
77 | static inline int pm_runtime_resume(struct device *dev) { return 0; } | 92 | return -ENOSYS; |
78 | static inline int pm_request_idle(struct device *dev) { return -ENOSYS; } | 93 | } |
94 | static inline int __pm_runtime_suspend(struct device *dev, int rpmflags) | ||
95 | { | ||
96 | return -ENOSYS; | ||
97 | } | ||
98 | static inline int __pm_runtime_resume(struct device *dev, int rpmflags) | ||
99 | { | ||
100 | return 1; | ||
101 | } | ||
79 | static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) | 102 | static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) |
80 | { | 103 | { |
81 | return -ENOSYS; | 104 | return -ENOSYS; |
82 | } | 105 | } |
83 | static inline int pm_request_resume(struct device *dev) { return 0; } | ||
84 | static inline int __pm_runtime_get(struct device *dev, bool sync) { return 1; } | ||
85 | static inline int __pm_runtime_put(struct device *dev, bool sync) { return 0; } | ||
86 | static inline int __pm_runtime_set_status(struct device *dev, | 106 | static inline int __pm_runtime_set_status(struct device *dev, |
87 | unsigned int status) { return 0; } | 107 | unsigned int status) { return 0; } |
88 | static inline int pm_runtime_barrier(struct device *dev) { return 0; } | 108 | static inline int pm_runtime_barrier(struct device *dev) { return 0; } |
@@ -102,27 +122,82 @@ static inline bool pm_runtime_suspended(struct device *dev) { return false; } | |||
102 | static inline int pm_generic_runtime_idle(struct device *dev) { return 0; } | 122 | static inline int pm_generic_runtime_idle(struct device *dev) { return 0; } |
103 | static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } | 123 | static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } |
104 | static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } | 124 | static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } |
125 | static inline void pm_runtime_no_callbacks(struct device *dev) {} | ||
126 | |||
127 | static inline void pm_runtime_mark_last_busy(struct device *dev) {} | ||
128 | static inline void __pm_runtime_use_autosuspend(struct device *dev, | ||
129 | bool use) {} | ||
130 | static inline void pm_runtime_set_autosuspend_delay(struct device *dev, | ||
131 | int delay) {} | ||
132 | static inline unsigned long pm_runtime_autosuspend_expiration( | ||
133 | struct device *dev) { return 0; } | ||
105 | 134 | ||
106 | #endif /* !CONFIG_PM_RUNTIME */ | 135 | #endif /* !CONFIG_PM_RUNTIME */ |
107 | 136 | ||
137 | static inline int pm_runtime_idle(struct device *dev) | ||
138 | { | ||
139 | return __pm_runtime_idle(dev, 0); | ||
140 | } | ||
141 | |||
142 | static inline int pm_runtime_suspend(struct device *dev) | ||
143 | { | ||
144 | return __pm_runtime_suspend(dev, 0); | ||
145 | } | ||
146 | |||
147 | static inline int pm_runtime_autosuspend(struct device *dev) | ||
148 | { | ||
149 | return __pm_runtime_suspend(dev, RPM_AUTO); | ||
150 | } | ||
151 | |||
152 | static inline int pm_runtime_resume(struct device *dev) | ||
153 | { | ||
154 | return __pm_runtime_resume(dev, 0); | ||
155 | } | ||
156 | |||
157 | static inline int pm_request_idle(struct device *dev) | ||
158 | { | ||
159 | return __pm_runtime_idle(dev, RPM_ASYNC); | ||
160 | } | ||
161 | |||
162 | static inline int pm_request_resume(struct device *dev) | ||
163 | { | ||
164 | return __pm_runtime_resume(dev, RPM_ASYNC); | ||
165 | } | ||
166 | |||
167 | static inline int pm_request_autosuspend(struct device *dev) | ||
168 | { | ||
169 | return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO); | ||
170 | } | ||
171 | |||
108 | static inline int pm_runtime_get(struct device *dev) | 172 | static inline int pm_runtime_get(struct device *dev) |
109 | { | 173 | { |
110 | return __pm_runtime_get(dev, false); | 174 | return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC); |
111 | } | 175 | } |
112 | 176 | ||
113 | static inline int pm_runtime_get_sync(struct device *dev) | 177 | static inline int pm_runtime_get_sync(struct device *dev) |
114 | { | 178 | { |
115 | return __pm_runtime_get(dev, true); | 179 | return __pm_runtime_resume(dev, RPM_GET_PUT); |
116 | } | 180 | } |
117 | 181 | ||
118 | static inline int pm_runtime_put(struct device *dev) | 182 | static inline int pm_runtime_put(struct device *dev) |
119 | { | 183 | { |
120 | return __pm_runtime_put(dev, false); | 184 | return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); |
185 | } | ||
186 | |||
187 | static inline int pm_runtime_put_autosuspend(struct device *dev) | ||
188 | { | ||
189 | return __pm_runtime_suspend(dev, | ||
190 | RPM_GET_PUT | RPM_ASYNC | RPM_AUTO); | ||
121 | } | 191 | } |
122 | 192 | ||
123 | static inline int pm_runtime_put_sync(struct device *dev) | 193 | static inline int pm_runtime_put_sync(struct device *dev) |
124 | { | 194 | { |
125 | return __pm_runtime_put(dev, true); | 195 | return __pm_runtime_idle(dev, RPM_GET_PUT); |
196 | } | ||
197 | |||
198 | static inline int pm_runtime_put_sync_autosuspend(struct device *dev) | ||
199 | { | ||
200 | return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO); | ||
126 | } | 201 | } |
127 | 202 | ||
128 | static inline int pm_runtime_set_active(struct device *dev) | 203 | static inline int pm_runtime_set_active(struct device *dev) |
@@ -140,4 +215,14 @@ static inline void pm_runtime_disable(struct device *dev) | |||
140 | __pm_runtime_disable(dev, true); | 215 | __pm_runtime_disable(dev, true); |
141 | } | 216 | } |
142 | 217 | ||
218 | static inline void pm_runtime_use_autosuspend(struct device *dev) | ||
219 | { | ||
220 | __pm_runtime_use_autosuspend(dev, true); | ||
221 | } | ||
222 | |||
223 | static inline void pm_runtime_dont_use_autosuspend(struct device *dev) | ||
224 | { | ||
225 | __pm_runtime_use_autosuspend(dev, false); | ||
226 | } | ||
227 | |||
143 | #endif | 228 | #endif |
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 76aca48722ae..9cff00dd6b63 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h | |||
@@ -2,6 +2,7 @@ | |||
2 | * pm_wakeup.h - Power management wakeup interface | 2 | * pm_wakeup.h - Power management wakeup interface |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Alan Stern | 4 | * Copyright (C) 2008 Alan Stern |
5 | * Copyright (C) 2010 Rafael J. Wysocki, Novell Inc. | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
@@ -27,19 +28,77 @@ | |||
27 | 28 | ||
28 | #include <linux/types.h> | 29 | #include <linux/types.h> |
29 | 30 | ||
30 | #ifdef CONFIG_PM | 31 | /** |
31 | 32 | * struct wakeup_source - Representation of wakeup sources | |
32 | /* Changes to device_may_wakeup take effect on the next pm state change. | ||
33 | * | 33 | * |
34 | * By default, most devices should leave wakeup disabled. The exceptions | 34 | * @total_time: Total time this wakeup source has been active. |
35 | * are devices that everyone expects to be wakeup sources: keyboards, | 35 | * @max_time: Maximum time this wakeup source has been continuously active. |
36 | * power buttons, possibly network interfaces, etc. | 36 | * @last_time: Monotonic clock when the wakeup source's was activated last time. |
37 | * @event_count: Number of signaled wakeup events. | ||
38 | * @active_count: Number of times the wakeup sorce was activated. | ||
39 | * @relax_count: Number of times the wakeup sorce was deactivated. | ||
40 | * @hit_count: Number of times the wakeup sorce might abort system suspend. | ||
41 | * @active: Status of the wakeup source. | ||
37 | */ | 42 | */ |
38 | static inline void device_init_wakeup(struct device *dev, bool val) | 43 | struct wakeup_source { |
44 | char *name; | ||
45 | struct list_head entry; | ||
46 | spinlock_t lock; | ||
47 | struct timer_list timer; | ||
48 | unsigned long timer_expires; | ||
49 | ktime_t total_time; | ||
50 | ktime_t max_time; | ||
51 | ktime_t last_time; | ||
52 | unsigned long event_count; | ||
53 | unsigned long active_count; | ||
54 | unsigned long relax_count; | ||
55 | unsigned long hit_count; | ||
56 | unsigned int active:1; | ||
57 | }; | ||
58 | |||
59 | #ifdef CONFIG_PM_SLEEP | ||
60 | |||
61 | /* | ||
62 | * Changes to device_may_wakeup take effect on the next pm state change. | ||
63 | */ | ||
64 | |||
65 | static inline void device_set_wakeup_capable(struct device *dev, bool capable) | ||
66 | { | ||
67 | dev->power.can_wakeup = capable; | ||
68 | } | ||
69 | |||
70 | static inline bool device_can_wakeup(struct device *dev) | ||
71 | { | ||
72 | return dev->power.can_wakeup; | ||
73 | } | ||
74 | |||
75 | |||
76 | |||
77 | static inline bool device_may_wakeup(struct device *dev) | ||
39 | { | 78 | { |
40 | dev->power.can_wakeup = dev->power.should_wakeup = val; | 79 | return dev->power.can_wakeup && !!dev->power.wakeup; |
41 | } | 80 | } |
42 | 81 | ||
82 | /* drivers/base/power/wakeup.c */ | ||
83 | extern struct wakeup_source *wakeup_source_create(const char *name); | ||
84 | extern void wakeup_source_destroy(struct wakeup_source *ws); | ||
85 | extern void wakeup_source_add(struct wakeup_source *ws); | ||
86 | extern void wakeup_source_remove(struct wakeup_source *ws); | ||
87 | extern struct wakeup_source *wakeup_source_register(const char *name); | ||
88 | extern void wakeup_source_unregister(struct wakeup_source *ws); | ||
89 | extern int device_wakeup_enable(struct device *dev); | ||
90 | extern int device_wakeup_disable(struct device *dev); | ||
91 | extern int device_init_wakeup(struct device *dev, bool val); | ||
92 | extern int device_set_wakeup_enable(struct device *dev, bool enable); | ||
93 | extern void __pm_stay_awake(struct wakeup_source *ws); | ||
94 | extern void pm_stay_awake(struct device *dev); | ||
95 | extern void __pm_relax(struct wakeup_source *ws); | ||
96 | extern void pm_relax(struct device *dev); | ||
97 | extern void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec); | ||
98 | extern void pm_wakeup_event(struct device *dev, unsigned int msec); | ||
99 | |||
100 | #else /* !CONFIG_PM_SLEEP */ | ||
101 | |||
43 | static inline void device_set_wakeup_capable(struct device *dev, bool capable) | 102 | static inline void device_set_wakeup_capable(struct device *dev, bool capable) |
44 | { | 103 | { |
45 | dev->power.can_wakeup = capable; | 104 | dev->power.can_wakeup = capable; |
@@ -50,43 +109,63 @@ static inline bool device_can_wakeup(struct device *dev) | |||
50 | return dev->power.can_wakeup; | 109 | return dev->power.can_wakeup; |
51 | } | 110 | } |
52 | 111 | ||
53 | static inline void device_set_wakeup_enable(struct device *dev, bool enable) | 112 | static inline bool device_may_wakeup(struct device *dev) |
54 | { | 113 | { |
55 | dev->power.should_wakeup = enable; | 114 | return false; |
56 | } | 115 | } |
57 | 116 | ||
58 | static inline bool device_may_wakeup(struct device *dev) | 117 | static inline struct wakeup_source *wakeup_source_create(const char *name) |
59 | { | 118 | { |
60 | return dev->power.can_wakeup && dev->power.should_wakeup; | 119 | return NULL; |
61 | } | 120 | } |
62 | 121 | ||
63 | #else /* !CONFIG_PM */ | 122 | static inline void wakeup_source_destroy(struct wakeup_source *ws) {} |
123 | |||
124 | static inline void wakeup_source_add(struct wakeup_source *ws) {} | ||
64 | 125 | ||
65 | /* For some reason the following routines work even without CONFIG_PM */ | 126 | static inline void wakeup_source_remove(struct wakeup_source *ws) {} |
66 | static inline void device_init_wakeup(struct device *dev, bool val) | 127 | |
128 | static inline struct wakeup_source *wakeup_source_register(const char *name) | ||
67 | { | 129 | { |
68 | dev->power.can_wakeup = val; | 130 | return NULL; |
69 | } | 131 | } |
70 | 132 | ||
71 | static inline void device_set_wakeup_capable(struct device *dev, bool capable) | 133 | static inline void wakeup_source_unregister(struct wakeup_source *ws) {} |
134 | |||
135 | static inline int device_wakeup_enable(struct device *dev) | ||
72 | { | 136 | { |
73 | dev->power.can_wakeup = capable; | 137 | return -EINVAL; |
74 | } | 138 | } |
75 | 139 | ||
76 | static inline bool device_can_wakeup(struct device *dev) | 140 | static inline int device_wakeup_disable(struct device *dev) |
77 | { | 141 | { |
78 | return dev->power.can_wakeup; | 142 | return 0; |
79 | } | 143 | } |
80 | 144 | ||
81 | static inline void device_set_wakeup_enable(struct device *dev, bool enable) | 145 | static inline int device_init_wakeup(struct device *dev, bool val) |
82 | { | 146 | { |
147 | dev->power.can_wakeup = val; | ||
148 | return val ? -EINVAL : 0; | ||
83 | } | 149 | } |
84 | 150 | ||
85 | static inline bool device_may_wakeup(struct device *dev) | 151 | |
152 | static inline int device_set_wakeup_enable(struct device *dev, bool enable) | ||
86 | { | 153 | { |
87 | return false; | 154 | return -EINVAL; |
88 | } | 155 | } |
89 | 156 | ||
90 | #endif /* !CONFIG_PM */ | 157 | static inline void __pm_stay_awake(struct wakeup_source *ws) {} |
158 | |||
159 | static inline void pm_stay_awake(struct device *dev) {} | ||
160 | |||
161 | static inline void __pm_relax(struct wakeup_source *ws) {} | ||
162 | |||
163 | static inline void pm_relax(struct device *dev) {} | ||
164 | |||
165 | static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) {} | ||
166 | |||
167 | static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} | ||
168 | |||
169 | #endif /* !CONFIG_PM_SLEEP */ | ||
91 | 170 | ||
92 | #endif /* _LINUX_PM_WAKEUP_H */ | 171 | #endif /* _LINUX_PM_WAKEUP_H */ |
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 30083a896f36..7d7325685c42 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h | |||
@@ -89,6 +89,7 @@ enum power_supply_property { | |||
89 | POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, | 89 | POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, |
90 | POWER_SUPPLY_PROP_VOLTAGE_NOW, | 90 | POWER_SUPPLY_PROP_VOLTAGE_NOW, |
91 | POWER_SUPPLY_PROP_VOLTAGE_AVG, | 91 | POWER_SUPPLY_PROP_VOLTAGE_AVG, |
92 | POWER_SUPPLY_PROP_CURRENT_MAX, | ||
92 | POWER_SUPPLY_PROP_CURRENT_NOW, | 93 | POWER_SUPPLY_PROP_CURRENT_NOW, |
93 | POWER_SUPPLY_PROP_CURRENT_AVG, | 94 | POWER_SUPPLY_PROP_CURRENT_AVG, |
94 | POWER_SUPPLY_PROP_POWER_NOW, | 95 | POWER_SUPPLY_PROP_POWER_NOW, |
@@ -125,7 +126,10 @@ enum power_supply_type { | |||
125 | POWER_SUPPLY_TYPE_BATTERY = 0, | 126 | POWER_SUPPLY_TYPE_BATTERY = 0, |
126 | POWER_SUPPLY_TYPE_UPS, | 127 | POWER_SUPPLY_TYPE_UPS, |
127 | POWER_SUPPLY_TYPE_MAINS, | 128 | POWER_SUPPLY_TYPE_MAINS, |
128 | POWER_SUPPLY_TYPE_USB, | 129 | POWER_SUPPLY_TYPE_USB, /* Standard Downstream Port */ |
130 | POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */ | ||
131 | POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */ | ||
132 | POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */ | ||
129 | }; | 133 | }; |
130 | 134 | ||
131 | union power_supply_propval { | 135 | union power_supply_propval { |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 634b8e674ac5..a39cbed9ee17 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
@@ -47,6 +47,8 @@ static inline void *radix_tree_indirect_to_ptr(void *ptr) | |||
47 | { | 47 | { |
48 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); | 48 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); |
49 | } | 49 | } |
50 | #define radix_tree_indirect_to_ptr(ptr) \ | ||
51 | radix_tree_indirect_to_ptr((void __force *)(ptr)) | ||
50 | 52 | ||
51 | static inline int radix_tree_is_indirect_ptr(void *ptr) | 53 | static inline int radix_tree_is_indirect_ptr(void *ptr) |
52 | { | 54 | { |
@@ -61,7 +63,7 @@ static inline int radix_tree_is_indirect_ptr(void *ptr) | |||
61 | struct radix_tree_root { | 63 | struct radix_tree_root { |
62 | unsigned int height; | 64 | unsigned int height; |
63 | gfp_t gfp_mask; | 65 | gfp_t gfp_mask; |
64 | struct radix_tree_node *rnode; | 66 | struct radix_tree_node __rcu *rnode; |
65 | }; | 67 | }; |
66 | 68 | ||
67 | #define RADIX_TREE_INIT(mask) { \ | 69 | #define RADIX_TREE_INIT(mask) { \ |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 4ec3b38ce9c5..f31ef61f1c65 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -10,6 +10,21 @@ | |||
10 | #include <linux/rcupdate.h> | 10 | #include <linux/rcupdate.h> |
11 | 11 | ||
12 | /* | 12 | /* |
13 | * Why is there no list_empty_rcu()? Because list_empty() serves this | ||
14 | * purpose. The list_empty() function fetches the RCU-protected pointer | ||
15 | * and compares it to the address of the list head, but neither dereferences | ||
16 | * this pointer itself nor provides this pointer to the caller. Therefore, | ||
17 | * it is not necessary to use rcu_dereference(), so that list_empty() can | ||
18 | * be used anywhere you would want to use a list_empty_rcu(). | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * return the ->next pointer of a list_head in an rcu safe | ||
23 | * way, we must not access it directly | ||
24 | */ | ||
25 | #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) | ||
26 | |||
27 | /* | ||
13 | * Insert a new entry between two known consecutive entries. | 28 | * Insert a new entry between two known consecutive entries. |
14 | * | 29 | * |
15 | * This is only for internal list manipulation where we know | 30 | * This is only for internal list manipulation where we know |
@@ -20,7 +35,7 @@ static inline void __list_add_rcu(struct list_head *new, | |||
20 | { | 35 | { |
21 | new->next = next; | 36 | new->next = next; |
22 | new->prev = prev; | 37 | new->prev = prev; |
23 | rcu_assign_pointer(prev->next, new); | 38 | rcu_assign_pointer(list_next_rcu(prev), new); |
24 | next->prev = new; | 39 | next->prev = new; |
25 | } | 40 | } |
26 | 41 | ||
@@ -138,7 +153,7 @@ static inline void list_replace_rcu(struct list_head *old, | |||
138 | { | 153 | { |
139 | new->next = old->next; | 154 | new->next = old->next; |
140 | new->prev = old->prev; | 155 | new->prev = old->prev; |
141 | rcu_assign_pointer(new->prev->next, new); | 156 | rcu_assign_pointer(list_next_rcu(new->prev), new); |
142 | new->next->prev = new; | 157 | new->next->prev = new; |
143 | old->prev = LIST_POISON2; | 158 | old->prev = LIST_POISON2; |
144 | } | 159 | } |
@@ -193,7 +208,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
193 | */ | 208 | */ |
194 | 209 | ||
195 | last->next = at; | 210 | last->next = at; |
196 | rcu_assign_pointer(head->next, first); | 211 | rcu_assign_pointer(list_next_rcu(head), first); |
197 | first->prev = head; | 212 | first->prev = head; |
198 | at->prev = last; | 213 | at->prev = last; |
199 | } | 214 | } |
@@ -208,7 +223,9 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
208 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | 223 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
209 | */ | 224 | */ |
210 | #define list_entry_rcu(ptr, type, member) \ | 225 | #define list_entry_rcu(ptr, type, member) \ |
211 | container_of(rcu_dereference_raw(ptr), type, member) | 226 | ({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \ |
227 | container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ | ||
228 | }) | ||
212 | 229 | ||
213 | /** | 230 | /** |
214 | * list_first_entry_rcu - get the first element from a list | 231 | * list_first_entry_rcu - get the first element from a list |
@@ -225,9 +242,9 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
225 | list_entry_rcu((ptr)->next, type, member) | 242 | list_entry_rcu((ptr)->next, type, member) |
226 | 243 | ||
227 | #define __list_for_each_rcu(pos, head) \ | 244 | #define __list_for_each_rcu(pos, head) \ |
228 | for (pos = rcu_dereference_raw((head)->next); \ | 245 | for (pos = rcu_dereference_raw(list_next_rcu(head)); \ |
229 | pos != (head); \ | 246 | pos != (head); \ |
230 | pos = rcu_dereference_raw(pos->next)) | 247 | pos = rcu_dereference_raw(list_next_rcu((pos))) |
231 | 248 | ||
232 | /** | 249 | /** |
233 | * list_for_each_entry_rcu - iterate over rcu list of given type | 250 | * list_for_each_entry_rcu - iterate over rcu list of given type |
@@ -257,9 +274,9 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
257 | * as long as the traversal is guarded by rcu_read_lock(). | 274 | * as long as the traversal is guarded by rcu_read_lock(). |
258 | */ | 275 | */ |
259 | #define list_for_each_continue_rcu(pos, head) \ | 276 | #define list_for_each_continue_rcu(pos, head) \ |
260 | for ((pos) = rcu_dereference_raw((pos)->next); \ | 277 | for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ |
261 | prefetch((pos)->next), (pos) != (head); \ | 278 | prefetch((pos)->next), (pos) != (head); \ |
262 | (pos) = rcu_dereference_raw((pos)->next)) | 279 | (pos) = rcu_dereference_raw(list_next_rcu(pos))) |
263 | 280 | ||
264 | /** | 281 | /** |
265 | * list_for_each_entry_continue_rcu - continue iteration over list of given type | 282 | * list_for_each_entry_continue_rcu - continue iteration over list of given type |
@@ -314,12 +331,19 @@ static inline void hlist_replace_rcu(struct hlist_node *old, | |||
314 | 331 | ||
315 | new->next = next; | 332 | new->next = next; |
316 | new->pprev = old->pprev; | 333 | new->pprev = old->pprev; |
317 | rcu_assign_pointer(*new->pprev, new); | 334 | rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); |
318 | if (next) | 335 | if (next) |
319 | new->next->pprev = &new->next; | 336 | new->next->pprev = &new->next; |
320 | old->pprev = LIST_POISON2; | 337 | old->pprev = LIST_POISON2; |
321 | } | 338 | } |
322 | 339 | ||
340 | /* | ||
341 | * return the first or the next element in an RCU protected hlist | ||
342 | */ | ||
343 | #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) | ||
344 | #define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) | ||
345 | #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) | ||
346 | |||
323 | /** | 347 | /** |
324 | * hlist_add_head_rcu | 348 | * hlist_add_head_rcu |
325 | * @n: the element to add to the hash list. | 349 | * @n: the element to add to the hash list. |
@@ -346,7 +370,7 @@ static inline void hlist_add_head_rcu(struct hlist_node *n, | |||
346 | 370 | ||
347 | n->next = first; | 371 | n->next = first; |
348 | n->pprev = &h->first; | 372 | n->pprev = &h->first; |
349 | rcu_assign_pointer(h->first, n); | 373 | rcu_assign_pointer(hlist_first_rcu(h), n); |
350 | if (first) | 374 | if (first) |
351 | first->pprev = &n->next; | 375 | first->pprev = &n->next; |
352 | } | 376 | } |
@@ -374,7 +398,7 @@ static inline void hlist_add_before_rcu(struct hlist_node *n, | |||
374 | { | 398 | { |
375 | n->pprev = next->pprev; | 399 | n->pprev = next->pprev; |
376 | n->next = next; | 400 | n->next = next; |
377 | rcu_assign_pointer(*(n->pprev), n); | 401 | rcu_assign_pointer(hlist_pprev_rcu(n), n); |
378 | next->pprev = &n->next; | 402 | next->pprev = &n->next; |
379 | } | 403 | } |
380 | 404 | ||
@@ -401,15 +425,15 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
401 | { | 425 | { |
402 | n->next = prev->next; | 426 | n->next = prev->next; |
403 | n->pprev = &prev->next; | 427 | n->pprev = &prev->next; |
404 | rcu_assign_pointer(prev->next, n); | 428 | rcu_assign_pointer(hlist_next_rcu(prev), n); |
405 | if (n->next) | 429 | if (n->next) |
406 | n->next->pprev = &n->next; | 430 | n->next->pprev = &n->next; |
407 | } | 431 | } |
408 | 432 | ||
409 | #define __hlist_for_each_rcu(pos, head) \ | 433 | #define __hlist_for_each_rcu(pos, head) \ |
410 | for (pos = rcu_dereference((head)->first); \ | 434 | for (pos = rcu_dereference(hlist_first_rcu(head)); \ |
411 | pos && ({ prefetch(pos->next); 1; }); \ | 435 | pos && ({ prefetch(pos->next); 1; }); \ |
412 | pos = rcu_dereference(pos->next)) | 436 | pos = rcu_dereference(hlist_next_rcu(pos))) |
413 | 437 | ||
414 | /** | 438 | /** |
415 | * hlist_for_each_entry_rcu - iterate over rcu list of given type | 439 | * hlist_for_each_entry_rcu - iterate over rcu list of given type |
@@ -422,11 +446,11 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
422 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | 446 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() |
423 | * as long as the traversal is guarded by rcu_read_lock(). | 447 | * as long as the traversal is guarded by rcu_read_lock(). |
424 | */ | 448 | */ |
425 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ | 449 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ |
426 | for (pos = rcu_dereference_raw((head)->first); \ | 450 | for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ |
427 | pos && ({ prefetch(pos->next); 1; }) && \ | 451 | pos && ({ prefetch(pos->next); 1; }) && \ |
428 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 452 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
429 | pos = rcu_dereference_raw(pos->next)) | 453 | pos = rcu_dereference_raw(hlist_next_rcu(pos))) |
430 | 454 | ||
431 | /** | 455 | /** |
432 | * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type | 456 | * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type |
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index b70ffe53cb9f..2ae13714828b 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h | |||
@@ -37,6 +37,12 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) | |||
37 | } | 37 | } |
38 | } | 38 | } |
39 | 39 | ||
40 | #define hlist_nulls_first_rcu(head) \ | ||
41 | (*((struct hlist_nulls_node __rcu __force **)&(head)->first)) | ||
42 | |||
43 | #define hlist_nulls_next_rcu(node) \ | ||
44 | (*((struct hlist_nulls_node __rcu __force **)&(node)->next)) | ||
45 | |||
40 | /** | 46 | /** |
41 | * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization | 47 | * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization |
42 | * @n: the element to delete from the hash list. | 48 | * @n: the element to delete from the hash list. |
@@ -88,7 +94,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, | |||
88 | 94 | ||
89 | n->next = first; | 95 | n->next = first; |
90 | n->pprev = &h->first; | 96 | n->pprev = &h->first; |
91 | rcu_assign_pointer(h->first, n); | 97 | rcu_assign_pointer(hlist_nulls_first_rcu(h), n); |
92 | if (!is_a_nulls(first)) | 98 | if (!is_a_nulls(first)) |
93 | first->pprev = &n->next; | 99 | first->pprev = &n->next; |
94 | } | 100 | } |
@@ -100,11 +106,11 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, | |||
100 | * @member: the name of the hlist_nulls_node within the struct. | 106 | * @member: the name of the hlist_nulls_node within the struct. |
101 | * | 107 | * |
102 | */ | 108 | */ |
103 | #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ | 109 | #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ |
104 | for (pos = rcu_dereference_raw((head)->first); \ | 110 | for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ |
105 | (!is_a_nulls(pos)) && \ | 111 | (!is_a_nulls(pos)) && \ |
106 | ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ | 112 | ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ |
107 | pos = rcu_dereference_raw(pos->next)) | 113 | pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) |
108 | 114 | ||
109 | #endif | 115 | #endif |
110 | #endif | 116 | #endif |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 83af1f8d8b74..03cda7bed985 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -41,11 +41,15 @@ | |||
41 | #include <linux/lockdep.h> | 41 | #include <linux/lockdep.h> |
42 | #include <linux/completion.h> | 42 | #include <linux/completion.h> |
43 | #include <linux/debugobjects.h> | 43 | #include <linux/debugobjects.h> |
44 | #include <linux/compiler.h> | ||
44 | 45 | ||
45 | #ifdef CONFIG_RCU_TORTURE_TEST | 46 | #ifdef CONFIG_RCU_TORTURE_TEST |
46 | extern int rcutorture_runnable; /* for sysctl */ | 47 | extern int rcutorture_runnable; /* for sysctl */ |
47 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ | 48 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ |
48 | 49 | ||
50 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) | ||
51 | #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) | ||
52 | |||
49 | /** | 53 | /** |
50 | * struct rcu_head - callback structure for use with RCU | 54 | * struct rcu_head - callback structure for use with RCU |
51 | * @next: next update requests in a list | 55 | * @next: next update requests in a list |
@@ -57,29 +61,94 @@ struct rcu_head { | |||
57 | }; | 61 | }; |
58 | 62 | ||
59 | /* Exported common interfaces */ | 63 | /* Exported common interfaces */ |
60 | extern void rcu_barrier(void); | 64 | extern void call_rcu_sched(struct rcu_head *head, |
65 | void (*func)(struct rcu_head *rcu)); | ||
66 | extern void synchronize_sched(void); | ||
61 | extern void rcu_barrier_bh(void); | 67 | extern void rcu_barrier_bh(void); |
62 | extern void rcu_barrier_sched(void); | 68 | extern void rcu_barrier_sched(void); |
63 | extern void synchronize_sched_expedited(void); | 69 | extern void synchronize_sched_expedited(void); |
64 | extern int sched_expedited_torture_stats(char *page); | 70 | extern int sched_expedited_torture_stats(char *page); |
65 | 71 | ||
72 | static inline void __rcu_read_lock_bh(void) | ||
73 | { | ||
74 | local_bh_disable(); | ||
75 | } | ||
76 | |||
77 | static inline void __rcu_read_unlock_bh(void) | ||
78 | { | ||
79 | local_bh_enable(); | ||
80 | } | ||
81 | |||
82 | #ifdef CONFIG_PREEMPT_RCU | ||
83 | |||
84 | extern void __rcu_read_lock(void); | ||
85 | extern void __rcu_read_unlock(void); | ||
86 | void synchronize_rcu(void); | ||
87 | |||
88 | /* | ||
89 | * Defined as a macro as it is a very low level header included from | ||
90 | * areas that don't even know about current. This gives the rcu_read_lock() | ||
91 | * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other | ||
92 | * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. | ||
93 | */ | ||
94 | #define rcu_preempt_depth() (current->rcu_read_lock_nesting) | ||
95 | |||
96 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | ||
97 | |||
98 | static inline void __rcu_read_lock(void) | ||
99 | { | ||
100 | preempt_disable(); | ||
101 | } | ||
102 | |||
103 | static inline void __rcu_read_unlock(void) | ||
104 | { | ||
105 | preempt_enable(); | ||
106 | } | ||
107 | |||
108 | static inline void synchronize_rcu(void) | ||
109 | { | ||
110 | synchronize_sched(); | ||
111 | } | ||
112 | |||
113 | static inline int rcu_preempt_depth(void) | ||
114 | { | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | ||
119 | |||
66 | /* Internal to kernel */ | 120 | /* Internal to kernel */ |
67 | extern void rcu_init(void); | 121 | extern void rcu_init(void); |
122 | extern void rcu_sched_qs(int cpu); | ||
123 | extern void rcu_bh_qs(int cpu); | ||
124 | extern void rcu_check_callbacks(int cpu, int user); | ||
125 | struct notifier_block; | ||
126 | |||
127 | #ifdef CONFIG_NO_HZ | ||
128 | |||
129 | extern void rcu_enter_nohz(void); | ||
130 | extern void rcu_exit_nohz(void); | ||
131 | |||
132 | #else /* #ifdef CONFIG_NO_HZ */ | ||
133 | |||
134 | static inline void rcu_enter_nohz(void) | ||
135 | { | ||
136 | } | ||
137 | |||
138 | static inline void rcu_exit_nohz(void) | ||
139 | { | ||
140 | } | ||
141 | |||
142 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
68 | 143 | ||
69 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 144 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
70 | #include <linux/rcutree.h> | 145 | #include <linux/rcutree.h> |
71 | #elif defined(CONFIG_TINY_RCU) | 146 | #elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) |
72 | #include <linux/rcutiny.h> | 147 | #include <linux/rcutiny.h> |
73 | #else | 148 | #else |
74 | #error "Unknown RCU implementation specified to kernel configuration" | 149 | #error "Unknown RCU implementation specified to kernel configuration" |
75 | #endif | 150 | #endif |
76 | 151 | ||
77 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } | ||
78 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | ||
79 | #define INIT_RCU_HEAD(ptr) do { \ | ||
80 | (ptr)->next = NULL; (ptr)->func = NULL; \ | ||
81 | } while (0) | ||
82 | |||
83 | /* | 152 | /* |
84 | * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic | 153 | * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic |
85 | * initialization and destruction of rcu_head on the stack. rcu_head structures | 154 | * initialization and destruction of rcu_head on the stack. rcu_head structures |
@@ -120,14 +189,15 @@ extern struct lockdep_map rcu_sched_lock_map; | |||
120 | extern int debug_lockdep_rcu_enabled(void); | 189 | extern int debug_lockdep_rcu_enabled(void); |
121 | 190 | ||
122 | /** | 191 | /** |
123 | * rcu_read_lock_held - might we be in RCU read-side critical section? | 192 | * rcu_read_lock_held() - might we be in RCU read-side critical section? |
124 | * | 193 | * |
125 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU | 194 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU |
126 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, | 195 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, |
127 | * this assumes we are in an RCU read-side critical section unless it can | 196 | * this assumes we are in an RCU read-side critical section unless it can |
128 | * prove otherwise. | 197 | * prove otherwise. This is useful for debug checks in functions that |
198 | * require that they be called within an RCU read-side critical section. | ||
129 | * | 199 | * |
130 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot | 200 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot |
131 | * and while lockdep is disabled. | 201 | * and while lockdep is disabled. |
132 | */ | 202 | */ |
133 | static inline int rcu_read_lock_held(void) | 203 | static inline int rcu_read_lock_held(void) |
@@ -144,14 +214,16 @@ static inline int rcu_read_lock_held(void) | |||
144 | extern int rcu_read_lock_bh_held(void); | 214 | extern int rcu_read_lock_bh_held(void); |
145 | 215 | ||
146 | /** | 216 | /** |
147 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? | 217 | * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? |
148 | * | 218 | * |
149 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an | 219 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an |
150 | * RCU-sched read-side critical section. In absence of | 220 | * RCU-sched read-side critical section. In absence of |
151 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side | 221 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side |
152 | * critical section unless it can prove otherwise. Note that disabling | 222 | * critical section unless it can prove otherwise. Note that disabling |
153 | * of preemption (including disabling irqs) counts as an RCU-sched | 223 | * of preemption (including disabling irqs) counts as an RCU-sched |
154 | * read-side critical section. | 224 | * read-side critical section. This is useful for debug checks in functions |
225 | * that required that they be called within an RCU-sched read-side | ||
226 | * critical section. | ||
155 | * | 227 | * |
156 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot | 228 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
157 | * and while lockdep is disabled. | 229 | * and while lockdep is disabled. |
@@ -211,7 +283,11 @@ static inline int rcu_read_lock_sched_held(void) | |||
211 | 283 | ||
212 | extern int rcu_my_thread_group_empty(void); | 284 | extern int rcu_my_thread_group_empty(void); |
213 | 285 | ||
214 | #define __do_rcu_dereference_check(c) \ | 286 | /** |
287 | * rcu_lockdep_assert - emit lockdep splat if specified condition not met | ||
288 | * @c: condition to check | ||
289 | */ | ||
290 | #define rcu_lockdep_assert(c) \ | ||
215 | do { \ | 291 | do { \ |
216 | static bool __warned; \ | 292 | static bool __warned; \ |
217 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ | 293 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ |
@@ -220,41 +296,163 @@ extern int rcu_my_thread_group_empty(void); | |||
220 | } \ | 296 | } \ |
221 | } while (0) | 297 | } while (0) |
222 | 298 | ||
299 | #else /* #ifdef CONFIG_PROVE_RCU */ | ||
300 | |||
301 | #define rcu_lockdep_assert(c) do { } while (0) | ||
302 | |||
303 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | ||
304 | |||
305 | /* | ||
306 | * Helper functions for rcu_dereference_check(), rcu_dereference_protected() | ||
307 | * and rcu_assign_pointer(). Some of these could be folded into their | ||
308 | * callers, but they are left separate in order to ease introduction of | ||
309 | * multiple flavors of pointers to match the multiple flavors of RCU | ||
310 | * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in | ||
311 | * the future. | ||
312 | */ | ||
313 | |||
314 | #ifdef __CHECKER__ | ||
315 | #define rcu_dereference_sparse(p, space) \ | ||
316 | ((void)(((typeof(*p) space *)p) == p)) | ||
317 | #else /* #ifdef __CHECKER__ */ | ||
318 | #define rcu_dereference_sparse(p, space) | ||
319 | #endif /* #else #ifdef __CHECKER__ */ | ||
320 | |||
321 | #define __rcu_access_pointer(p, space) \ | ||
322 | ({ \ | ||
323 | typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ | ||
324 | rcu_dereference_sparse(p, space); \ | ||
325 | ((typeof(*p) __force __kernel *)(_________p1)); \ | ||
326 | }) | ||
327 | #define __rcu_dereference_check(p, c, space) \ | ||
328 | ({ \ | ||
329 | typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ | ||
330 | rcu_lockdep_assert(c); \ | ||
331 | rcu_dereference_sparse(p, space); \ | ||
332 | smp_read_barrier_depends(); \ | ||
333 | ((typeof(*p) __force __kernel *)(_________p1)); \ | ||
334 | }) | ||
335 | #define __rcu_dereference_protected(p, c, space) \ | ||
336 | ({ \ | ||
337 | rcu_lockdep_assert(c); \ | ||
338 | rcu_dereference_sparse(p, space); \ | ||
339 | ((typeof(*p) __force __kernel *)(p)); \ | ||
340 | }) | ||
341 | |||
342 | #define __rcu_dereference_index_check(p, c) \ | ||
343 | ({ \ | ||
344 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | ||
345 | rcu_lockdep_assert(c); \ | ||
346 | smp_read_barrier_depends(); \ | ||
347 | (_________p1); \ | ||
348 | }) | ||
349 | #define __rcu_assign_pointer(p, v, space) \ | ||
350 | ({ \ | ||
351 | if (!__builtin_constant_p(v) || \ | ||
352 | ((v) != NULL)) \ | ||
353 | smp_wmb(); \ | ||
354 | (p) = (typeof(*v) __force space *)(v); \ | ||
355 | }) | ||
356 | |||
357 | |||
358 | /** | ||
359 | * rcu_access_pointer() - fetch RCU pointer with no dereferencing | ||
360 | * @p: The pointer to read | ||
361 | * | ||
362 | * Return the value of the specified RCU-protected pointer, but omit the | ||
363 | * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful | ||
364 | * when the value of this pointer is accessed, but the pointer is not | ||
365 | * dereferenced, for example, when testing an RCU-protected pointer against | ||
366 | * NULL. Although rcu_access_pointer() may also be used in cases where | ||
367 | * update-side locks prevent the value of the pointer from changing, you | ||
368 | * should instead use rcu_dereference_protected() for this use case. | ||
369 | */ | ||
370 | #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) | ||
371 | |||
223 | /** | 372 | /** |
224 | * rcu_dereference_check - rcu_dereference with debug checking | 373 | * rcu_dereference_check() - rcu_dereference with debug checking |
225 | * @p: The pointer to read, prior to dereferencing | 374 | * @p: The pointer to read, prior to dereferencing |
226 | * @c: The conditions under which the dereference will take place | 375 | * @c: The conditions under which the dereference will take place |
227 | * | 376 | * |
228 | * Do an rcu_dereference(), but check that the conditions under which the | 377 | * Do an rcu_dereference(), but check that the conditions under which the |
229 | * dereference will take place are correct. Typically the conditions indicate | 378 | * dereference will take place are correct. Typically the conditions |
230 | * the various locking conditions that should be held at that point. The check | 379 | * indicate the various locking conditions that should be held at that |
231 | * should return true if the conditions are satisfied. | 380 | * point. The check should return true if the conditions are satisfied. |
381 | * An implicit check for being in an RCU read-side critical section | ||
382 | * (rcu_read_lock()) is included. | ||
232 | * | 383 | * |
233 | * For example: | 384 | * For example: |
234 | * | 385 | * |
235 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | 386 | * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); |
236 | * lockdep_is_held(&foo->lock)); | ||
237 | * | 387 | * |
238 | * could be used to indicate to lockdep that foo->bar may only be dereferenced | 388 | * could be used to indicate to lockdep that foo->bar may only be dereferenced |
239 | * if either the RCU read lock is held, or that the lock required to replace | 389 | * if either rcu_read_lock() is held, or that the lock required to replace |
240 | * the bar struct at foo->bar is held. | 390 | * the bar struct at foo->bar is held. |
241 | * | 391 | * |
242 | * Note that the list of conditions may also include indications of when a lock | 392 | * Note that the list of conditions may also include indications of when a lock |
243 | * need not be held, for example during initialisation or destruction of the | 393 | * need not be held, for example during initialisation or destruction of the |
244 | * target struct: | 394 | * target struct: |
245 | * | 395 | * |
246 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | 396 | * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || |
247 | * lockdep_is_held(&foo->lock) || | ||
248 | * atomic_read(&foo->usage) == 0); | 397 | * atomic_read(&foo->usage) == 0); |
398 | * | ||
399 | * Inserts memory barriers on architectures that require them | ||
400 | * (currently only the Alpha), prevents the compiler from refetching | ||
401 | * (and from merging fetches), and, more importantly, documents exactly | ||
402 | * which pointers are protected by RCU and checks that the pointer is | ||
403 | * annotated as __rcu. | ||
249 | */ | 404 | */ |
250 | #define rcu_dereference_check(p, c) \ | 405 | #define rcu_dereference_check(p, c) \ |
251 | ({ \ | 406 | __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu) |
252 | __do_rcu_dereference_check(c); \ | 407 | |
253 | rcu_dereference_raw(p); \ | 408 | /** |
254 | }) | 409 | * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking |
410 | * @p: The pointer to read, prior to dereferencing | ||
411 | * @c: The conditions under which the dereference will take place | ||
412 | * | ||
413 | * This is the RCU-bh counterpart to rcu_dereference_check(). | ||
414 | */ | ||
415 | #define rcu_dereference_bh_check(p, c) \ | ||
416 | __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu) | ||
255 | 417 | ||
256 | /** | 418 | /** |
257 | * rcu_dereference_protected - fetch RCU pointer when updates prevented | 419 | * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking |
420 | * @p: The pointer to read, prior to dereferencing | ||
421 | * @c: The conditions under which the dereference will take place | ||
422 | * | ||
423 | * This is the RCU-sched counterpart to rcu_dereference_check(). | ||
424 | */ | ||
425 | #define rcu_dereference_sched_check(p, c) \ | ||
426 | __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \ | ||
427 | __rcu) | ||
428 | |||
429 | #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ | ||
430 | |||
431 | /** | ||
432 | * rcu_dereference_index_check() - rcu_dereference for indices with debug checking | ||
433 | * @p: The pointer to read, prior to dereferencing | ||
434 | * @c: The conditions under which the dereference will take place | ||
435 | * | ||
436 | * Similar to rcu_dereference_check(), but omits the sparse checking. | ||
437 | * This allows rcu_dereference_index_check() to be used on integers, | ||
438 | * which can then be used as array indices. Attempting to use | ||
439 | * rcu_dereference_check() on an integer will give compiler warnings | ||
440 | * because the sparse address-space mechanism relies on dereferencing | ||
441 | * the RCU-protected pointer. Dereferencing integers is not something | ||
442 | * that even gcc will put up with. | ||
443 | * | ||
444 | * Note that this function does not implicitly check for RCU read-side | ||
445 | * critical sections. If this function gains lots of uses, it might | ||
446 | * make sense to provide versions for each flavor of RCU, but it does | ||
447 | * not make sense as of early 2010. | ||
448 | */ | ||
449 | #define rcu_dereference_index_check(p, c) \ | ||
450 | __rcu_dereference_index_check((p), (c)) | ||
451 | |||
452 | /** | ||
453 | * rcu_dereference_protected() - fetch RCU pointer when updates prevented | ||
454 | * @p: The pointer to read, prior to dereferencing | ||
455 | * @c: The conditions under which the dereference will take place | ||
258 | * | 456 | * |
259 | * Return the value of the specified RCU-protected pointer, but omit | 457 | * Return the value of the specified RCU-protected pointer, but omit |
260 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This | 458 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This |
@@ -263,35 +461,61 @@ extern int rcu_my_thread_group_empty(void); | |||
263 | * prevent the compiler from repeating this reference or combining it | 461 | * prevent the compiler from repeating this reference or combining it |
264 | * with other references, so it should not be used without protection | 462 | * with other references, so it should not be used without protection |
265 | * of appropriate locks. | 463 | * of appropriate locks. |
464 | * | ||
465 | * This function is only for update-side use. Using this function | ||
466 | * when protected only by rcu_read_lock() will result in infrequent | ||
467 | * but very ugly failures. | ||
266 | */ | 468 | */ |
267 | #define rcu_dereference_protected(p, c) \ | 469 | #define rcu_dereference_protected(p, c) \ |
268 | ({ \ | 470 | __rcu_dereference_protected((p), (c), __rcu) |
269 | __do_rcu_dereference_check(c); \ | ||
270 | (p); \ | ||
271 | }) | ||
272 | 471 | ||
273 | #else /* #ifdef CONFIG_PROVE_RCU */ | 472 | /** |
473 | * rcu_dereference_bh_protected() - fetch RCU-bh pointer when updates prevented | ||
474 | * @p: The pointer to read, prior to dereferencing | ||
475 | * @c: The conditions under which the dereference will take place | ||
476 | * | ||
477 | * This is the RCU-bh counterpart to rcu_dereference_protected(). | ||
478 | */ | ||
479 | #define rcu_dereference_bh_protected(p, c) \ | ||
480 | __rcu_dereference_protected((p), (c), __rcu) | ||
274 | 481 | ||
275 | #define rcu_dereference_check(p, c) rcu_dereference_raw(p) | 482 | /** |
276 | #define rcu_dereference_protected(p, c) (p) | 483 | * rcu_dereference_sched_protected() - fetch RCU-sched pointer when updates prevented |
484 | * @p: The pointer to read, prior to dereferencing | ||
485 | * @c: The conditions under which the dereference will take place | ||
486 | * | ||
487 | * This is the RCU-sched counterpart to rcu_dereference_protected(). | ||
488 | */ | ||
489 | #define rcu_dereference_sched_protected(p, c) \ | ||
490 | __rcu_dereference_protected((p), (c), __rcu) | ||
277 | 491 | ||
278 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | ||
279 | 492 | ||
280 | /** | 493 | /** |
281 | * rcu_access_pointer - fetch RCU pointer with no dereferencing | 494 | * rcu_dereference() - fetch RCU-protected pointer for dereferencing |
495 | * @p: The pointer to read, prior to dereferencing | ||
282 | * | 496 | * |
283 | * Return the value of the specified RCU-protected pointer, but omit the | 497 | * This is a simple wrapper around rcu_dereference_check(). |
284 | * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful | 498 | */ |
285 | * when the value of this pointer is accessed, but the pointer is not | 499 | #define rcu_dereference(p) rcu_dereference_check(p, 0) |
286 | * dereferenced, for example, when testing an RCU-protected pointer against | 500 | |
287 | * NULL. This may also be used in cases where update-side locks prevent | 501 | /** |
288 | * the value of the pointer from changing, but rcu_dereference_protected() | 502 | * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing |
289 | * is a lighter-weight primitive for this use case. | 503 | * @p: The pointer to read, prior to dereferencing |
504 | * | ||
505 | * Makes rcu_dereference_check() do the dirty work. | ||
506 | */ | ||
507 | #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) | ||
508 | |||
509 | /** | ||
510 | * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing | ||
511 | * @p: The pointer to read, prior to dereferencing | ||
512 | * | ||
513 | * Makes rcu_dereference_check() do the dirty work. | ||
290 | */ | 514 | */ |
291 | #define rcu_access_pointer(p) ACCESS_ONCE(p) | 515 | #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) |
292 | 516 | ||
293 | /** | 517 | /** |
294 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 518 | * rcu_read_lock() - mark the beginning of an RCU read-side critical section |
295 | * | 519 | * |
296 | * When synchronize_rcu() is invoked on one CPU while other CPUs | 520 | * When synchronize_rcu() is invoked on one CPU while other CPUs |
297 | * are within RCU read-side critical sections, then the | 521 | * are within RCU read-side critical sections, then the |
@@ -302,7 +526,7 @@ extern int rcu_my_thread_group_empty(void); | |||
302 | * until after the all the other CPUs exit their critical sections. | 526 | * until after the all the other CPUs exit their critical sections. |
303 | * | 527 | * |
304 | * Note, however, that RCU callbacks are permitted to run concurrently | 528 | * Note, however, that RCU callbacks are permitted to run concurrently |
305 | * with RCU read-side critical sections. One way that this can happen | 529 | * with new RCU read-side critical sections. One way that this can happen |
306 | * is via the following sequence of events: (1) CPU 0 enters an RCU | 530 | * is via the following sequence of events: (1) CPU 0 enters an RCU |
307 | * read-side critical section, (2) CPU 1 invokes call_rcu() to register | 531 | * read-side critical section, (2) CPU 1 invokes call_rcu() to register |
308 | * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, | 532 | * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, |
@@ -317,7 +541,20 @@ extern int rcu_my_thread_group_empty(void); | |||
317 | * will be deferred until the outermost RCU read-side critical section | 541 | * will be deferred until the outermost RCU read-side critical section |
318 | * completes. | 542 | * completes. |
319 | * | 543 | * |
320 | * It is illegal to block while in an RCU read-side critical section. | 544 | * You can avoid reading and understanding the next paragraph by |
545 | * following this rule: don't put anything in an rcu_read_lock() RCU | ||
546 | * read-side critical section that would block in a !PREEMPT kernel. | ||
547 | * But if you want the full story, read on! | ||
548 | * | ||
549 | * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it | ||
550 | * is illegal to block while in an RCU read-side critical section. In | ||
551 | * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU) | ||
552 | * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may | ||
553 | * be preempted, but explicit blocking is illegal. Finally, in preemptible | ||
554 | * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds, | ||
555 | * RCU read-side critical sections may be preempted and they may also | ||
556 | * block, but only when acquiring spinlocks that are subject to priority | ||
557 | * inheritance. | ||
321 | */ | 558 | */ |
322 | static inline void rcu_read_lock(void) | 559 | static inline void rcu_read_lock(void) |
323 | { | 560 | { |
@@ -337,7 +574,7 @@ static inline void rcu_read_lock(void) | |||
337 | */ | 574 | */ |
338 | 575 | ||
339 | /** | 576 | /** |
340 | * rcu_read_unlock - marks the end of an RCU read-side critical section. | 577 | * rcu_read_unlock() - marks the end of an RCU read-side critical section. |
341 | * | 578 | * |
342 | * See rcu_read_lock() for more information. | 579 | * See rcu_read_lock() for more information. |
343 | */ | 580 | */ |
@@ -349,15 +586,16 @@ static inline void rcu_read_unlock(void) | |||
349 | } | 586 | } |
350 | 587 | ||
351 | /** | 588 | /** |
352 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section | 589 | * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section |
353 | * | 590 | * |
354 | * This is equivalent of rcu_read_lock(), but to be used when updates | 591 | * This is equivalent of rcu_read_lock(), but to be used when updates |
355 | * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks | 592 | * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since |
356 | * consider completion of a softirq handler to be a quiescent state, | 593 | * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a |
357 | * a process in RCU read-side critical section must be protected by | 594 | * softirq handler to be a quiescent state, a process in RCU read-side |
358 | * disabling softirqs. Read-side critical sections in interrupt context | 595 | * critical section must be protected by disabling softirqs. Read-side |
359 | * can use just rcu_read_lock(). | 596 | * critical sections in interrupt context can use just rcu_read_lock(), |
360 | * | 597 | * though this should at least be commented to avoid confusing people |
598 | * reading the code. | ||
361 | */ | 599 | */ |
362 | static inline void rcu_read_lock_bh(void) | 600 | static inline void rcu_read_lock_bh(void) |
363 | { | 601 | { |
@@ -379,13 +617,12 @@ static inline void rcu_read_unlock_bh(void) | |||
379 | } | 617 | } |
380 | 618 | ||
381 | /** | 619 | /** |
382 | * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section | 620 | * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section |
383 | * | 621 | * |
384 | * Should be used with either | 622 | * This is equivalent of rcu_read_lock(), but to be used when updates |
385 | * - synchronize_sched() | 623 | * are being done using call_rcu_sched() or synchronize_rcu_sched(). |
386 | * or | 624 | * Read-side critical sections can also be introduced by anything that |
387 | * - call_rcu_sched() and rcu_barrier_sched() | 625 | * disables preemption, including local_irq_disable() and friends. |
388 | * on the write-side to insure proper synchronization. | ||
389 | */ | 626 | */ |
390 | static inline void rcu_read_lock_sched(void) | 627 | static inline void rcu_read_lock_sched(void) |
391 | { | 628 | { |
@@ -420,54 +657,14 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
420 | preempt_enable_notrace(); | 657 | preempt_enable_notrace(); |
421 | } | 658 | } |
422 | 659 | ||
423 | |||
424 | /** | 660 | /** |
425 | * rcu_dereference_raw - fetch an RCU-protected pointer | 661 | * rcu_assign_pointer() - assign to RCU-protected pointer |
662 | * @p: pointer to assign to | ||
663 | * @v: value to assign (publish) | ||
426 | * | 664 | * |
427 | * The caller must be within some flavor of RCU read-side critical | 665 | * Assigns the specified value to the specified RCU-protected |
428 | * section, or must be otherwise preventing the pointer from changing, | 666 | * pointer, ensuring that any concurrent RCU readers will see |
429 | * for example, by holding an appropriate lock. This pointer may later | 667 | * any prior initialization. Returns the value assigned. |
430 | * be safely dereferenced. It is the caller's responsibility to have | ||
431 | * done the right thing, as this primitive does no checking of any kind. | ||
432 | * | ||
433 | * Inserts memory barriers on architectures that require them | ||
434 | * (currently only the Alpha), and, more importantly, documents | ||
435 | * exactly which pointers are protected by RCU. | ||
436 | */ | ||
437 | #define rcu_dereference_raw(p) ({ \ | ||
438 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | ||
439 | smp_read_barrier_depends(); \ | ||
440 | (_________p1); \ | ||
441 | }) | ||
442 | |||
443 | /** | ||
444 | * rcu_dereference - fetch an RCU-protected pointer, checking for RCU | ||
445 | * | ||
446 | * Makes rcu_dereference_check() do the dirty work. | ||
447 | */ | ||
448 | #define rcu_dereference(p) \ | ||
449 | rcu_dereference_check(p, rcu_read_lock_held()) | ||
450 | |||
451 | /** | ||
452 | * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh | ||
453 | * | ||
454 | * Makes rcu_dereference_check() do the dirty work. | ||
455 | */ | ||
456 | #define rcu_dereference_bh(p) \ | ||
457 | rcu_dereference_check(p, rcu_read_lock_bh_held() || irqs_disabled()) | ||
458 | |||
459 | /** | ||
460 | * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched | ||
461 | * | ||
462 | * Makes rcu_dereference_check() do the dirty work. | ||
463 | */ | ||
464 | #define rcu_dereference_sched(p) \ | ||
465 | rcu_dereference_check(p, rcu_read_lock_sched_held()) | ||
466 | |||
467 | /** | ||
468 | * rcu_assign_pointer - assign (publicize) a pointer to a newly | ||
469 | * initialized structure that will be dereferenced by RCU read-side | ||
470 | * critical sections. Returns the value assigned. | ||
471 | * | 668 | * |
472 | * Inserts memory barriers on architectures that require them | 669 | * Inserts memory barriers on architectures that require them |
473 | * (pretty much all of them other than x86), and also prevents | 670 | * (pretty much all of them other than x86), and also prevents |
@@ -476,14 +673,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
476 | * call documents which pointers will be dereferenced by RCU read-side | 673 | * call documents which pointers will be dereferenced by RCU read-side |
477 | * code. | 674 | * code. |
478 | */ | 675 | */ |
479 | |||
480 | #define rcu_assign_pointer(p, v) \ | 676 | #define rcu_assign_pointer(p, v) \ |
481 | ({ \ | 677 | __rcu_assign_pointer((p), (v), __rcu) |
482 | if (!__builtin_constant_p(v) || \ | 678 | |
483 | ((v) != NULL)) \ | 679 | /** |
484 | smp_wmb(); \ | 680 | * RCU_INIT_POINTER() - initialize an RCU protected pointer |
485 | (p) = (v); \ | 681 | * |
486 | }) | 682 | * Initialize an RCU-protected pointer in such a way to avoid RCU-lockdep |
683 | * splats. | ||
684 | */ | ||
685 | #define RCU_INIT_POINTER(p, v) \ | ||
686 | p = (typeof(*v) __force __rcu *)(v) | ||
487 | 687 | ||
488 | /* Infrastructure to implement the synchronize_() primitives. */ | 688 | /* Infrastructure to implement the synchronize_() primitives. */ |
489 | 689 | ||
@@ -494,26 +694,37 @@ struct rcu_synchronize { | |||
494 | 694 | ||
495 | extern void wakeme_after_rcu(struct rcu_head *head); | 695 | extern void wakeme_after_rcu(struct rcu_head *head); |
496 | 696 | ||
697 | #ifdef CONFIG_PREEMPT_RCU | ||
698 | |||
497 | /** | 699 | /** |
498 | * call_rcu - Queue an RCU callback for invocation after a grace period. | 700 | * call_rcu() - Queue an RCU callback for invocation after a grace period. |
499 | * @head: structure to be used for queueing the RCU updates. | 701 | * @head: structure to be used for queueing the RCU updates. |
500 | * @func: actual update function to be invoked after the grace period | 702 | * @func: actual callback function to be invoked after the grace period |
501 | * | 703 | * |
502 | * The update function will be invoked some time after a full grace | 704 | * The callback function will be invoked some time after a full grace |
503 | * period elapses, in other words after all currently executing RCU | 705 | * period elapses, in other words after all pre-existing RCU read-side |
504 | * read-side critical sections have completed. RCU read-side critical | 706 | * critical sections have completed. However, the callback function |
707 | * might well execute concurrently with RCU read-side critical sections | ||
708 | * that started after call_rcu() was invoked. RCU read-side critical | ||
505 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | 709 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
506 | * and may be nested. | 710 | * and may be nested. |
507 | */ | 711 | */ |
508 | extern void call_rcu(struct rcu_head *head, | 712 | extern void call_rcu(struct rcu_head *head, |
509 | void (*func)(struct rcu_head *head)); | 713 | void (*func)(struct rcu_head *head)); |
510 | 714 | ||
715 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | ||
716 | |||
717 | /* In classic RCU, call_rcu() is just call_rcu_sched(). */ | ||
718 | #define call_rcu call_rcu_sched | ||
719 | |||
720 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | ||
721 | |||
511 | /** | 722 | /** |
512 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. | 723 | * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. |
513 | * @head: structure to be used for queueing the RCU updates. | 724 | * @head: structure to be used for queueing the RCU updates. |
514 | * @func: actual update function to be invoked after the grace period | 725 | * @func: actual callback function to be invoked after the grace period |
515 | * | 726 | * |
516 | * The update function will be invoked some time after a full grace | 727 | * The callback function will be invoked some time after a full grace |
517 | * period elapses, in other words after all currently executing RCU | 728 | * period elapses, in other words after all currently executing RCU |
518 | * read-side critical sections have completed. call_rcu_bh() assumes | 729 | * read-side critical sections have completed. call_rcu_bh() assumes |
519 | * that the read-side critical sections end on completion of a softirq | 730 | * that the read-side critical sections end on completion of a softirq |
@@ -566,37 +777,4 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head) | |||
566 | } | 777 | } |
567 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 778 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
568 | 779 | ||
569 | #ifndef CONFIG_PROVE_RCU | ||
570 | #define __do_rcu_dereference_check(c) do { } while (0) | ||
571 | #endif /* #ifdef CONFIG_PROVE_RCU */ | ||
572 | |||
573 | #define __rcu_dereference_index_check(p, c) \ | ||
574 | ({ \ | ||
575 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | ||
576 | __do_rcu_dereference_check(c); \ | ||
577 | smp_read_barrier_depends(); \ | ||
578 | (_________p1); \ | ||
579 | }) | ||
580 | |||
581 | /** | ||
582 | * rcu_dereference_index_check() - rcu_dereference for indices with debug checking | ||
583 | * @p: The pointer to read, prior to dereferencing | ||
584 | * @c: The conditions under which the dereference will take place | ||
585 | * | ||
586 | * Similar to rcu_dereference_check(), but omits the sparse checking. | ||
587 | * This allows rcu_dereference_index_check() to be used on integers, | ||
588 | * which can then be used as array indices. Attempting to use | ||
589 | * rcu_dereference_check() on an integer will give compiler warnings | ||
590 | * because the sparse address-space mechanism relies on dereferencing | ||
591 | * the RCU-protected pointer. Dereferencing integers is not something | ||
592 | * that even gcc will put up with. | ||
593 | * | ||
594 | * Note that this function does not implicitly check for RCU read-side | ||
595 | * critical sections. If this function gains lots of uses, it might | ||
596 | * make sense to provide versions for each flavor of RCU, but it does | ||
597 | * not make sense as of early 2010. | ||
598 | */ | ||
599 | #define rcu_dereference_index_check(p, c) \ | ||
600 | __rcu_dereference_index_check((p), (c)) | ||
601 | |||
602 | #endif /* __LINUX_RCUPDATE_H */ | 780 | #endif /* __LINUX_RCUPDATE_H */ |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index e2e893144a84..13877cb93a60 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -27,103 +27,101 @@ | |||
27 | 27 | ||
28 | #include <linux/cache.h> | 28 | #include <linux/cache.h> |
29 | 29 | ||
30 | void rcu_sched_qs(int cpu); | 30 | #define rcu_init_sched() do { } while (0) |
31 | void rcu_bh_qs(int cpu); | ||
32 | static inline void rcu_note_context_switch(int cpu) | ||
33 | { | ||
34 | rcu_sched_qs(cpu); | ||
35 | } | ||
36 | 31 | ||
37 | #define __rcu_read_lock() preempt_disable() | 32 | #ifdef CONFIG_TINY_RCU |
38 | #define __rcu_read_unlock() preempt_enable() | ||
39 | #define __rcu_read_lock_bh() local_bh_disable() | ||
40 | #define __rcu_read_unlock_bh() local_bh_enable() | ||
41 | #define call_rcu_sched call_rcu | ||
42 | 33 | ||
43 | #define rcu_init_sched() do { } while (0) | 34 | static inline void synchronize_rcu_expedited(void) |
44 | extern void rcu_check_callbacks(int cpu, int user); | 35 | { |
36 | synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ | ||
37 | } | ||
45 | 38 | ||
46 | static inline int rcu_needs_cpu(int cpu) | 39 | static inline void rcu_barrier(void) |
47 | { | 40 | { |
48 | return 0; | 41 | rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ |
49 | } | 42 | } |
50 | 43 | ||
51 | /* | 44 | #else /* #ifdef CONFIG_TINY_RCU */ |
52 | * Return the number of grace periods. | 45 | |
53 | */ | 46 | void rcu_barrier(void); |
54 | static inline long rcu_batches_completed(void) | 47 | void synchronize_rcu_expedited(void); |
48 | |||
49 | #endif /* #else #ifdef CONFIG_TINY_RCU */ | ||
50 | |||
51 | static inline void synchronize_rcu_bh(void) | ||
55 | { | 52 | { |
56 | return 0; | 53 | synchronize_sched(); |
57 | } | 54 | } |
58 | 55 | ||
59 | /* | 56 | static inline void synchronize_rcu_bh_expedited(void) |
60 | * Return the number of bottom-half grace periods. | ||
61 | */ | ||
62 | static inline long rcu_batches_completed_bh(void) | ||
63 | { | 57 | { |
64 | return 0; | 58 | synchronize_sched(); |
65 | } | 59 | } |
66 | 60 | ||
67 | static inline void rcu_force_quiescent_state(void) | 61 | #ifdef CONFIG_TINY_RCU |
62 | |||
63 | static inline void rcu_preempt_note_context_switch(void) | ||
68 | { | 64 | { |
69 | } | 65 | } |
70 | 66 | ||
71 | static inline void rcu_bh_force_quiescent_state(void) | 67 | static inline void exit_rcu(void) |
72 | { | 68 | { |
73 | } | 69 | } |
74 | 70 | ||
75 | static inline void rcu_sched_force_quiescent_state(void) | 71 | static inline int rcu_needs_cpu(int cpu) |
76 | { | 72 | { |
73 | return 0; | ||
77 | } | 74 | } |
78 | 75 | ||
79 | extern void synchronize_sched(void); | 76 | #else /* #ifdef CONFIG_TINY_RCU */ |
77 | |||
78 | void rcu_preempt_note_context_switch(void); | ||
79 | extern void exit_rcu(void); | ||
80 | int rcu_preempt_needs_cpu(void); | ||
80 | 81 | ||
81 | static inline void synchronize_rcu(void) | 82 | static inline int rcu_needs_cpu(int cpu) |
82 | { | 83 | { |
83 | synchronize_sched(); | 84 | return rcu_preempt_needs_cpu(); |
84 | } | 85 | } |
85 | 86 | ||
86 | static inline void synchronize_rcu_bh(void) | 87 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
88 | |||
89 | static inline void rcu_note_context_switch(int cpu) | ||
87 | { | 90 | { |
88 | synchronize_sched(); | 91 | rcu_sched_qs(cpu); |
92 | rcu_preempt_note_context_switch(); | ||
89 | } | 93 | } |
90 | 94 | ||
91 | static inline void synchronize_rcu_expedited(void) | 95 | /* |
96 | * Return the number of grace periods. | ||
97 | */ | ||
98 | static inline long rcu_batches_completed(void) | ||
92 | { | 99 | { |
93 | synchronize_sched(); | 100 | return 0; |
94 | } | 101 | } |
95 | 102 | ||
96 | static inline void synchronize_rcu_bh_expedited(void) | 103 | /* |
104 | * Return the number of bottom-half grace periods. | ||
105 | */ | ||
106 | static inline long rcu_batches_completed_bh(void) | ||
97 | { | 107 | { |
98 | synchronize_sched(); | 108 | return 0; |
99 | } | 109 | } |
100 | 110 | ||
101 | struct notifier_block; | 111 | static inline void rcu_force_quiescent_state(void) |
102 | |||
103 | #ifdef CONFIG_NO_HZ | ||
104 | |||
105 | extern void rcu_enter_nohz(void); | ||
106 | extern void rcu_exit_nohz(void); | ||
107 | |||
108 | #else /* #ifdef CONFIG_NO_HZ */ | ||
109 | |||
110 | static inline void rcu_enter_nohz(void) | ||
111 | { | 112 | { |
112 | } | 113 | } |
113 | 114 | ||
114 | static inline void rcu_exit_nohz(void) | 115 | static inline void rcu_bh_force_quiescent_state(void) |
115 | { | 116 | { |
116 | } | 117 | } |
117 | 118 | ||
118 | #endif /* #else #ifdef CONFIG_NO_HZ */ | 119 | static inline void rcu_sched_force_quiescent_state(void) |
119 | |||
120 | static inline void exit_rcu(void) | ||
121 | { | 120 | { |
122 | } | 121 | } |
123 | 122 | ||
124 | static inline int rcu_preempt_depth(void) | 123 | static inline void rcu_cpu_stall_reset(void) |
125 | { | 124 | { |
126 | return 0; | ||
127 | } | 125 | } |
128 | 126 | ||
129 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 127 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index c0ed1c056f29..95518e628794 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -30,64 +30,23 @@ | |||
30 | #ifndef __LINUX_RCUTREE_H | 30 | #ifndef __LINUX_RCUTREE_H |
31 | #define __LINUX_RCUTREE_H | 31 | #define __LINUX_RCUTREE_H |
32 | 32 | ||
33 | struct notifier_block; | ||
34 | |||
35 | extern void rcu_sched_qs(int cpu); | ||
36 | extern void rcu_bh_qs(int cpu); | ||
37 | extern void rcu_note_context_switch(int cpu); | 33 | extern void rcu_note_context_switch(int cpu); |
38 | extern int rcu_needs_cpu(int cpu); | 34 | extern int rcu_needs_cpu(int cpu); |
35 | extern void rcu_cpu_stall_reset(void); | ||
39 | 36 | ||
40 | #ifdef CONFIG_TREE_PREEMPT_RCU | 37 | #ifdef CONFIG_TREE_PREEMPT_RCU |
41 | 38 | ||
42 | extern void __rcu_read_lock(void); | ||
43 | extern void __rcu_read_unlock(void); | ||
44 | extern void synchronize_rcu(void); | ||
45 | extern void exit_rcu(void); | 39 | extern void exit_rcu(void); |
46 | 40 | ||
47 | /* | ||
48 | * Defined as macro as it is a very low level header | ||
49 | * included from areas that don't even know about current | ||
50 | */ | ||
51 | #define rcu_preempt_depth() (current->rcu_read_lock_nesting) | ||
52 | |||
53 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 41 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
54 | 42 | ||
55 | static inline void __rcu_read_lock(void) | ||
56 | { | ||
57 | preempt_disable(); | ||
58 | } | ||
59 | |||
60 | static inline void __rcu_read_unlock(void) | ||
61 | { | ||
62 | preempt_enable(); | ||
63 | } | ||
64 | |||
65 | #define synchronize_rcu synchronize_sched | ||
66 | |||
67 | static inline void exit_rcu(void) | 43 | static inline void exit_rcu(void) |
68 | { | 44 | { |
69 | } | 45 | } |
70 | 46 | ||
71 | static inline int rcu_preempt_depth(void) | ||
72 | { | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | 47 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
77 | 48 | ||
78 | static inline void __rcu_read_lock_bh(void) | ||
79 | { | ||
80 | local_bh_disable(); | ||
81 | } | ||
82 | static inline void __rcu_read_unlock_bh(void) | ||
83 | { | ||
84 | local_bh_enable(); | ||
85 | } | ||
86 | |||
87 | extern void call_rcu_sched(struct rcu_head *head, | ||
88 | void (*func)(struct rcu_head *rcu)); | ||
89 | extern void synchronize_rcu_bh(void); | 49 | extern void synchronize_rcu_bh(void); |
90 | extern void synchronize_sched(void); | ||
91 | extern void synchronize_rcu_expedited(void); | 50 | extern void synchronize_rcu_expedited(void); |
92 | 51 | ||
93 | static inline void synchronize_rcu_bh_expedited(void) | 52 | static inline void synchronize_rcu_bh_expedited(void) |
@@ -95,7 +54,7 @@ static inline void synchronize_rcu_bh_expedited(void) | |||
95 | synchronize_sched_expedited(); | 54 | synchronize_sched_expedited(); |
96 | } | 55 | } |
97 | 56 | ||
98 | extern void rcu_check_callbacks(int cpu, int user); | 57 | extern void rcu_barrier(void); |
99 | 58 | ||
100 | extern long rcu_batches_completed(void); | 59 | extern long rcu_batches_completed(void); |
101 | extern long rcu_batches_completed_bh(void); | 60 | extern long rcu_batches_completed_bh(void); |
@@ -104,18 +63,6 @@ extern void rcu_force_quiescent_state(void); | |||
104 | extern void rcu_bh_force_quiescent_state(void); | 63 | extern void rcu_bh_force_quiescent_state(void); |
105 | extern void rcu_sched_force_quiescent_state(void); | 64 | extern void rcu_sched_force_quiescent_state(void); |
106 | 65 | ||
107 | #ifdef CONFIG_NO_HZ | ||
108 | void rcu_enter_nohz(void); | ||
109 | void rcu_exit_nohz(void); | ||
110 | #else /* CONFIG_NO_HZ */ | ||
111 | static inline void rcu_enter_nohz(void) | ||
112 | { | ||
113 | } | ||
114 | static inline void rcu_exit_nohz(void) | ||
115 | { | ||
116 | } | ||
117 | #endif /* CONFIG_NO_HZ */ | ||
118 | |||
119 | /* A context switch is a grace period for RCU-sched and RCU-bh. */ | 66 | /* A context switch is a grace period for RCU-sched and RCU-bh. */ |
120 | static inline int rcu_blocking_is_gp(void) | 67 | static inline int rcu_blocking_is_gp(void) |
121 | { | 68 | { |
diff --git a/include/linux/rds.h b/include/linux/rds.h index 24bce3ded9ea..91950950aa59 100644 --- a/include/linux/rds.h +++ b/include/linux/rds.h | |||
@@ -36,15 +36,6 @@ | |||
36 | 36 | ||
37 | #include <linux/types.h> | 37 | #include <linux/types.h> |
38 | 38 | ||
39 | /* These sparse annotated types shouldn't be in any user | ||
40 | * visible header file. We should clean this up rather | ||
41 | * than kludging around them. */ | ||
42 | #ifndef __KERNEL__ | ||
43 | #define __be16 u_int16_t | ||
44 | #define __be32 u_int32_t | ||
45 | #define __be64 u_int64_t | ||
46 | #endif | ||
47 | |||
48 | #define RDS_IB_ABI_VERSION 0x301 | 39 | #define RDS_IB_ABI_VERSION 0x301 |
49 | 40 | ||
50 | /* | 41 | /* |
@@ -82,6 +73,10 @@ | |||
82 | #define RDS_CMSG_RDMA_MAP 3 | 73 | #define RDS_CMSG_RDMA_MAP 3 |
83 | #define RDS_CMSG_RDMA_STATUS 4 | 74 | #define RDS_CMSG_RDMA_STATUS 4 |
84 | #define RDS_CMSG_CONG_UPDATE 5 | 75 | #define RDS_CMSG_CONG_UPDATE 5 |
76 | #define RDS_CMSG_ATOMIC_FADD 6 | ||
77 | #define RDS_CMSG_ATOMIC_CSWP 7 | ||
78 | #define RDS_CMSG_MASKED_ATOMIC_FADD 8 | ||
79 | #define RDS_CMSG_MASKED_ATOMIC_CSWP 9 | ||
85 | 80 | ||
86 | #define RDS_INFO_FIRST 10000 | 81 | #define RDS_INFO_FIRST 10000 |
87 | #define RDS_INFO_COUNTERS 10000 | 82 | #define RDS_INFO_COUNTERS 10000 |
@@ -98,9 +93,9 @@ | |||
98 | #define RDS_INFO_LAST 10010 | 93 | #define RDS_INFO_LAST 10010 |
99 | 94 | ||
100 | struct rds_info_counter { | 95 | struct rds_info_counter { |
101 | u_int8_t name[32]; | 96 | uint8_t name[32]; |
102 | u_int64_t value; | 97 | uint64_t value; |
103 | } __packed; | 98 | } __attribute__((packed)); |
104 | 99 | ||
105 | #define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 | 100 | #define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 |
106 | #define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02 | 101 | #define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02 |
@@ -109,56 +104,48 @@ struct rds_info_counter { | |||
109 | #define TRANSNAMSIZ 16 | 104 | #define TRANSNAMSIZ 16 |
110 | 105 | ||
111 | struct rds_info_connection { | 106 | struct rds_info_connection { |
112 | u_int64_t next_tx_seq; | 107 | uint64_t next_tx_seq; |
113 | u_int64_t next_rx_seq; | 108 | uint64_t next_rx_seq; |
114 | __be32 laddr; | ||
115 | __be32 faddr; | ||
116 | u_int8_t transport[TRANSNAMSIZ]; /* null term ascii */ | ||
117 | u_int8_t flags; | ||
118 | } __packed; | ||
119 | |||
120 | struct rds_info_flow { | ||
121 | __be32 laddr; | 109 | __be32 laddr; |
122 | __be32 faddr; | 110 | __be32 faddr; |
123 | u_int32_t bytes; | 111 | uint8_t transport[TRANSNAMSIZ]; /* null term ascii */ |
124 | __be16 lport; | 112 | uint8_t flags; |
125 | __be16 fport; | 113 | } __attribute__((packed)); |
126 | } __packed; | ||
127 | 114 | ||
128 | #define RDS_INFO_MESSAGE_FLAG_ACK 0x01 | 115 | #define RDS_INFO_MESSAGE_FLAG_ACK 0x01 |
129 | #define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 | 116 | #define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 |
130 | 117 | ||
131 | struct rds_info_message { | 118 | struct rds_info_message { |
132 | u_int64_t seq; | 119 | uint64_t seq; |
133 | u_int32_t len; | 120 | uint32_t len; |
134 | __be32 laddr; | 121 | __be32 laddr; |
135 | __be32 faddr; | 122 | __be32 faddr; |
136 | __be16 lport; | 123 | __be16 lport; |
137 | __be16 fport; | 124 | __be16 fport; |
138 | u_int8_t flags; | 125 | uint8_t flags; |
139 | } __packed; | 126 | } __attribute__((packed)); |
140 | 127 | ||
141 | struct rds_info_socket { | 128 | struct rds_info_socket { |
142 | u_int32_t sndbuf; | 129 | uint32_t sndbuf; |
143 | __be32 bound_addr; | 130 | __be32 bound_addr; |
144 | __be32 connected_addr; | 131 | __be32 connected_addr; |
145 | __be16 bound_port; | 132 | __be16 bound_port; |
146 | __be16 connected_port; | 133 | __be16 connected_port; |
147 | u_int32_t rcvbuf; | 134 | uint32_t rcvbuf; |
148 | u_int64_t inum; | 135 | uint64_t inum; |
149 | } __packed; | 136 | } __attribute__((packed)); |
150 | 137 | ||
151 | struct rds_info_tcp_socket { | 138 | struct rds_info_tcp_socket { |
152 | __be32 local_addr; | 139 | __be32 local_addr; |
153 | __be16 local_port; | 140 | __be16 local_port; |
154 | __be32 peer_addr; | 141 | __be32 peer_addr; |
155 | __be16 peer_port; | 142 | __be16 peer_port; |
156 | u_int64_t hdr_rem; | 143 | uint64_t hdr_rem; |
157 | u_int64_t data_rem; | 144 | uint64_t data_rem; |
158 | u_int32_t last_sent_nxt; | 145 | uint32_t last_sent_nxt; |
159 | u_int32_t last_expected_una; | 146 | uint32_t last_expected_una; |
160 | u_int32_t last_seen_una; | 147 | uint32_t last_seen_una; |
161 | } __packed; | 148 | } __attribute__((packed)); |
162 | 149 | ||
163 | #define RDS_IB_GID_LEN 16 | 150 | #define RDS_IB_GID_LEN 16 |
164 | struct rds_info_rdma_connection { | 151 | struct rds_info_rdma_connection { |
@@ -212,42 +199,69 @@ struct rds_info_rdma_connection { | |||
212 | * (so that the application does not have to worry about | 199 | * (so that the application does not have to worry about |
213 | * alignment). | 200 | * alignment). |
214 | */ | 201 | */ |
215 | typedef u_int64_t rds_rdma_cookie_t; | 202 | typedef uint64_t rds_rdma_cookie_t; |
216 | 203 | ||
217 | struct rds_iovec { | 204 | struct rds_iovec { |
218 | u_int64_t addr; | 205 | uint64_t addr; |
219 | u_int64_t bytes; | 206 | uint64_t bytes; |
220 | }; | 207 | }; |
221 | 208 | ||
222 | struct rds_get_mr_args { | 209 | struct rds_get_mr_args { |
223 | struct rds_iovec vec; | 210 | struct rds_iovec vec; |
224 | u_int64_t cookie_addr; | 211 | uint64_t cookie_addr; |
225 | uint64_t flags; | 212 | uint64_t flags; |
226 | }; | 213 | }; |
227 | 214 | ||
228 | struct rds_get_mr_for_dest_args { | 215 | struct rds_get_mr_for_dest_args { |
229 | struct sockaddr_storage dest_addr; | 216 | struct sockaddr_storage dest_addr; |
230 | struct rds_iovec vec; | 217 | struct rds_iovec vec; |
231 | u_int64_t cookie_addr; | 218 | uint64_t cookie_addr; |
232 | uint64_t flags; | 219 | uint64_t flags; |
233 | }; | 220 | }; |
234 | 221 | ||
235 | struct rds_free_mr_args { | 222 | struct rds_free_mr_args { |
236 | rds_rdma_cookie_t cookie; | 223 | rds_rdma_cookie_t cookie; |
237 | u_int64_t flags; | 224 | uint64_t flags; |
238 | }; | 225 | }; |
239 | 226 | ||
240 | struct rds_rdma_args { | 227 | struct rds_rdma_args { |
241 | rds_rdma_cookie_t cookie; | 228 | rds_rdma_cookie_t cookie; |
242 | struct rds_iovec remote_vec; | 229 | struct rds_iovec remote_vec; |
243 | u_int64_t local_vec_addr; | 230 | uint64_t local_vec_addr; |
244 | u_int64_t nr_local; | 231 | uint64_t nr_local; |
245 | u_int64_t flags; | 232 | uint64_t flags; |
246 | u_int64_t user_token; | 233 | uint64_t user_token; |
234 | }; | ||
235 | |||
236 | struct rds_atomic_args { | ||
237 | rds_rdma_cookie_t cookie; | ||
238 | uint64_t local_addr; | ||
239 | uint64_t remote_addr; | ||
240 | union { | ||
241 | struct { | ||
242 | uint64_t compare; | ||
243 | uint64_t swap; | ||
244 | } cswp; | ||
245 | struct { | ||
246 | uint64_t add; | ||
247 | } fadd; | ||
248 | struct { | ||
249 | uint64_t compare; | ||
250 | uint64_t swap; | ||
251 | uint64_t compare_mask; | ||
252 | uint64_t swap_mask; | ||
253 | } m_cswp; | ||
254 | struct { | ||
255 | uint64_t add; | ||
256 | uint64_t nocarry_mask; | ||
257 | } m_fadd; | ||
258 | }; | ||
259 | uint64_t flags; | ||
260 | uint64_t user_token; | ||
247 | }; | 261 | }; |
248 | 262 | ||
249 | struct rds_rdma_notify { | 263 | struct rds_rdma_notify { |
250 | u_int64_t user_token; | 264 | uint64_t user_token; |
251 | int32_t status; | 265 | int32_t status; |
252 | }; | 266 | }; |
253 | 267 | ||
@@ -266,5 +280,6 @@ struct rds_rdma_notify { | |||
266 | #define RDS_RDMA_USE_ONCE 0x0008 /* free MR after use */ | 280 | #define RDS_RDMA_USE_ONCE 0x0008 /* free MR after use */ |
267 | #define RDS_RDMA_DONTWAIT 0x0010 /* Don't wait in SET_BARRIER */ | 281 | #define RDS_RDMA_DONTWAIT 0x0010 /* Don't wait in SET_BARRIER */ |
268 | #define RDS_RDMA_NOTIFY_ME 0x0020 /* Notify when operation completes */ | 282 | #define RDS_RDMA_NOTIFY_ME 0x0020 /* Notify when operation completes */ |
283 | #define RDS_RDMA_SILENT 0x0040 /* Do not interrupt remote */ | ||
269 | 284 | ||
270 | #endif /* IB_RDS_H */ | 285 | #endif /* IB_RDS_H */ |
diff --git a/include/linux/resume-trace.h b/include/linux/resume-trace.h index bc8c3881c729..f31db2368782 100644 --- a/include/linux/resume-trace.h +++ b/include/linux/resume-trace.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #ifdef CONFIG_PM_TRACE | 4 | #ifdef CONFIG_PM_TRACE |
5 | #include <asm/resume-trace.h> | 5 | #include <asm/resume-trace.h> |
6 | #include <linux/types.h> | ||
6 | 7 | ||
7 | extern int pm_trace_enabled; | 8 | extern int pm_trace_enabled; |
8 | 9 | ||
@@ -14,6 +15,7 @@ static inline int pm_trace_is_enabled(void) | |||
14 | struct device; | 15 | struct device; |
15 | extern void set_trace_device(struct device *); | 16 | extern void set_trace_device(struct device *); |
16 | extern void generate_resume_trace(const void *tracedata, unsigned int user); | 17 | extern void generate_resume_trace(const void *tracedata, unsigned int user); |
18 | extern int show_trace_dev_match(char *buf, size_t size); | ||
17 | 19 | ||
18 | #define TRACE_DEVICE(dev) do { \ | 20 | #define TRACE_DEVICE(dev) do { \ |
19 | if (pm_trace_enabled) \ | 21 | if (pm_trace_enabled) \ |
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 58d44491880f..d42f274418b8 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/if_link.h> | 6 | #include <linux/if_link.h> |
7 | #include <linux/if_addr.h> | 7 | #include <linux/if_addr.h> |
8 | #include <linux/neighbour.h> | 8 | #include <linux/neighbour.h> |
9 | #include <linux/netdevice.h> | ||
9 | 10 | ||
10 | /* rtnetlink families. Values up to 127 are reserved for real address | 11 | /* rtnetlink families. Values up to 127 are reserved for real address |
11 | * families, values above 128 may be used arbitrarily. | 12 | * families, values above 128 may be used arbitrarily. |
@@ -749,6 +750,35 @@ extern int rtnl_is_locked(void); | |||
749 | extern int lockdep_rtnl_is_held(void); | 750 | extern int lockdep_rtnl_is_held(void); |
750 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | 751 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
751 | 752 | ||
753 | /** | ||
754 | * rcu_dereference_rtnl - rcu_dereference with debug checking | ||
755 | * @p: The pointer to read, prior to dereferencing | ||
756 | * | ||
757 | * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() | ||
758 | * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference() | ||
759 | */ | ||
760 | #define rcu_dereference_rtnl(p) \ | ||
761 | rcu_dereference_check(p, rcu_read_lock_held() || \ | ||
762 | lockdep_rtnl_is_held()) | ||
763 | |||
764 | /** | ||
765 | * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL | ||
766 | * @p: The pointer to read, prior to dereferencing | ||
767 | * | ||
768 | * Return the value of the specified RCU-protected pointer, but omit | ||
769 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because | ||
770 | * caller holds RTNL. | ||
771 | */ | ||
772 | #define rtnl_dereference(p) \ | ||
773 | rcu_dereference_protected(p, lockdep_rtnl_is_held()) | ||
774 | |||
775 | static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) | ||
776 | { | ||
777 | return rtnl_dereference(dev->ingress_queue); | ||
778 | } | ||
779 | |||
780 | extern struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); | ||
781 | |||
752 | extern void rtnetlink_init(void); | 782 | extern void rtnetlink_init(void); |
753 | extern void __rtnl_unlock(void); | 783 | extern void __rtnl_unlock(void); |
754 | 784 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 1e2a6db2d7dd..56154bbb8da9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -336,6 +336,9 @@ extern unsigned long sysctl_hung_task_warnings; | |||
336 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, | 336 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, |
337 | void __user *buffer, | 337 | void __user *buffer, |
338 | size_t *lenp, loff_t *ppos); | 338 | size_t *lenp, loff_t *ppos); |
339 | #else | ||
340 | /* Avoid need for ifdefs elsewhere in the code */ | ||
341 | enum { sysctl_hung_task_timeout_secs = 0 }; | ||
339 | #endif | 342 | #endif |
340 | 343 | ||
341 | /* Attach to any functions which should be ignored in wchan output. */ | 344 | /* Attach to any functions which should be ignored in wchan output. */ |
@@ -875,6 +878,7 @@ enum sched_domain_level { | |||
875 | SD_LV_NONE = 0, | 878 | SD_LV_NONE = 0, |
876 | SD_LV_SIBLING, | 879 | SD_LV_SIBLING, |
877 | SD_LV_MC, | 880 | SD_LV_MC, |
881 | SD_LV_BOOK, | ||
878 | SD_LV_CPU, | 882 | SD_LV_CPU, |
879 | SD_LV_NODE, | 883 | SD_LV_NODE, |
880 | SD_LV_ALLNODES, | 884 | SD_LV_ALLNODES, |
@@ -1160,6 +1164,13 @@ struct sched_rt_entity { | |||
1160 | 1164 | ||
1161 | struct rcu_node; | 1165 | struct rcu_node; |
1162 | 1166 | ||
1167 | enum perf_event_task_context { | ||
1168 | perf_invalid_context = -1, | ||
1169 | perf_hw_context = 0, | ||
1170 | perf_sw_context, | ||
1171 | perf_nr_task_contexts, | ||
1172 | }; | ||
1173 | |||
1163 | struct task_struct { | 1174 | struct task_struct { |
1164 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | 1175 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
1165 | void *stack; | 1176 | void *stack; |
@@ -1202,11 +1213,13 @@ struct task_struct { | |||
1202 | unsigned int policy; | 1213 | unsigned int policy; |
1203 | cpumask_t cpus_allowed; | 1214 | cpumask_t cpus_allowed; |
1204 | 1215 | ||
1205 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1216 | #ifdef CONFIG_PREEMPT_RCU |
1206 | int rcu_read_lock_nesting; | 1217 | int rcu_read_lock_nesting; |
1207 | char rcu_read_unlock_special; | 1218 | char rcu_read_unlock_special; |
1208 | struct rcu_node *rcu_blocked_node; | ||
1209 | struct list_head rcu_node_entry; | 1219 | struct list_head rcu_node_entry; |
1220 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | ||
1221 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
1222 | struct rcu_node *rcu_blocked_node; | ||
1210 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1223 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
1211 | 1224 | ||
1212 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 1225 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
@@ -1288,9 +1301,9 @@ struct task_struct { | |||
1288 | struct list_head cpu_timers[3]; | 1301 | struct list_head cpu_timers[3]; |
1289 | 1302 | ||
1290 | /* process credentials */ | 1303 | /* process credentials */ |
1291 | const struct cred *real_cred; /* objective and real subjective task | 1304 | const struct cred __rcu *real_cred; /* objective and real subjective task |
1292 | * credentials (COW) */ | 1305 | * credentials (COW) */ |
1293 | const struct cred *cred; /* effective (overridable) subjective task | 1306 | const struct cred __rcu *cred; /* effective (overridable) subjective task |
1294 | * credentials (COW) */ | 1307 | * credentials (COW) */ |
1295 | struct mutex cred_guard_mutex; /* guard against foreign influences on | 1308 | struct mutex cred_guard_mutex; /* guard against foreign influences on |
1296 | * credential calculations | 1309 | * credential calculations |
@@ -1418,7 +1431,7 @@ struct task_struct { | |||
1418 | #endif | 1431 | #endif |
1419 | #ifdef CONFIG_CGROUPS | 1432 | #ifdef CONFIG_CGROUPS |
1420 | /* Control Group info protected by css_set_lock */ | 1433 | /* Control Group info protected by css_set_lock */ |
1421 | struct css_set *cgroups; | 1434 | struct css_set __rcu *cgroups; |
1422 | /* cg_list protected by css_set_lock and tsk->alloc_lock */ | 1435 | /* cg_list protected by css_set_lock and tsk->alloc_lock */ |
1423 | struct list_head cg_list; | 1436 | struct list_head cg_list; |
1424 | #endif | 1437 | #endif |
@@ -1431,7 +1444,7 @@ struct task_struct { | |||
1431 | struct futex_pi_state *pi_state_cache; | 1444 | struct futex_pi_state *pi_state_cache; |
1432 | #endif | 1445 | #endif |
1433 | #ifdef CONFIG_PERF_EVENTS | 1446 | #ifdef CONFIG_PERF_EVENTS |
1434 | struct perf_event_context *perf_event_ctxp; | 1447 | struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; |
1435 | struct mutex perf_event_mutex; | 1448 | struct mutex perf_event_mutex; |
1436 | struct list_head perf_event_list; | 1449 | struct list_head perf_event_list; |
1437 | #endif | 1450 | #endif |
@@ -1681,8 +1694,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1681 | /* | 1694 | /* |
1682 | * Per process flags | 1695 | * Per process flags |
1683 | */ | 1696 | */ |
1684 | #define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ | 1697 | #define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */ |
1685 | /* Not implemented yet, only for 486*/ | ||
1686 | #define PF_STARTING 0x00000002 /* being created */ | 1698 | #define PF_STARTING 0x00000002 /* being created */ |
1687 | #define PF_EXITING 0x00000004 /* getting shut down */ | 1699 | #define PF_EXITING 0x00000004 /* getting shut down */ |
1688 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ | 1700 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ |
@@ -1740,7 +1752,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1740 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) | 1752 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
1741 | #define used_math() tsk_used_math(current) | 1753 | #define used_math() tsk_used_math(current) |
1742 | 1754 | ||
1743 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1755 | #ifdef CONFIG_PREEMPT_RCU |
1744 | 1756 | ||
1745 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ | 1757 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ |
1746 | #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ | 1758 | #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ |
@@ -1749,7 +1761,9 @@ static inline void rcu_copy_process(struct task_struct *p) | |||
1749 | { | 1761 | { |
1750 | p->rcu_read_lock_nesting = 0; | 1762 | p->rcu_read_lock_nesting = 0; |
1751 | p->rcu_read_unlock_special = 0; | 1763 | p->rcu_read_unlock_special = 0; |
1764 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
1752 | p->rcu_blocked_node = NULL; | 1765 | p->rcu_blocked_node = NULL; |
1766 | #endif | ||
1753 | INIT_LIST_HEAD(&p->rcu_node_entry); | 1767 | INIT_LIST_HEAD(&p->rcu_node_entry); |
1754 | } | 1768 | } |
1755 | 1769 | ||
@@ -1826,6 +1840,19 @@ extern void sched_clock_idle_sleep_event(void); | |||
1826 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 1840 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); |
1827 | #endif | 1841 | #endif |
1828 | 1842 | ||
1843 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
1844 | /* | ||
1845 | * An i/f to runtime opt-in for irq time accounting based off of sched_clock. | ||
1846 | * The reason for this explicit opt-in is not to have perf penalty with | ||
1847 | * slow sched_clocks. | ||
1848 | */ | ||
1849 | extern void enable_sched_clock_irqtime(void); | ||
1850 | extern void disable_sched_clock_irqtime(void); | ||
1851 | #else | ||
1852 | static inline void enable_sched_clock_irqtime(void) {} | ||
1853 | static inline void disable_sched_clock_irqtime(void) {} | ||
1854 | #endif | ||
1855 | |||
1829 | extern unsigned long long | 1856 | extern unsigned long long |
1830 | task_sched_runtime(struct task_struct *task); | 1857 | task_sched_runtime(struct task_struct *task); |
1831 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); | 1858 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); |
@@ -2367,9 +2394,9 @@ extern int __cond_resched_lock(spinlock_t *lock); | |||
2367 | 2394 | ||
2368 | extern int __cond_resched_softirq(void); | 2395 | extern int __cond_resched_softirq(void); |
2369 | 2396 | ||
2370 | #define cond_resched_softirq() ({ \ | 2397 | #define cond_resched_softirq() ({ \ |
2371 | __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \ | 2398 | __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ |
2372 | __cond_resched_softirq(); \ | 2399 | __cond_resched_softirq(); \ |
2373 | }) | 2400 | }) |
2374 | 2401 | ||
2375 | /* | 2402 | /* |
diff --git a/include/linux/security.h b/include/linux/security.h index a22219afff09..b8246a8df7d2 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -74,7 +74,7 @@ extern int cap_file_mmap(struct file *file, unsigned long reqprot, | |||
74 | extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); | 74 | extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); |
75 | extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, | 75 | extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, |
76 | unsigned long arg4, unsigned long arg5); | 76 | unsigned long arg4, unsigned long arg5); |
77 | extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp); | 77 | extern int cap_task_setscheduler(struct task_struct *p); |
78 | extern int cap_task_setioprio(struct task_struct *p, int ioprio); | 78 | extern int cap_task_setioprio(struct task_struct *p, int ioprio); |
79 | extern int cap_task_setnice(struct task_struct *p, int nice); | 79 | extern int cap_task_setnice(struct task_struct *p, int nice); |
80 | extern int cap_syslog(int type, bool from_file); | 80 | extern int cap_syslog(int type, bool from_file); |
@@ -959,6 +959,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
959 | * Sets the new child socket's sid to the openreq sid. | 959 | * Sets the new child socket's sid to the openreq sid. |
960 | * @inet_conn_established: | 960 | * @inet_conn_established: |
961 | * Sets the connection's peersid to the secmark on skb. | 961 | * Sets the connection's peersid to the secmark on skb. |
962 | * @secmark_relabel_packet: | ||
963 | * check if the process should be allowed to relabel packets to the given secid | ||
964 | * @security_secmark_refcount_inc | ||
965 | * tells the LSM to increment the number of secmark labeling rules loaded | ||
966 | * @security_secmark_refcount_dec | ||
967 | * tells the LSM to decrement the number of secmark labeling rules loaded | ||
962 | * @req_classify_flow: | 968 | * @req_classify_flow: |
963 | * Sets the flow's sid to the openreq sid. | 969 | * Sets the flow's sid to the openreq sid. |
964 | * @tun_dev_create: | 970 | * @tun_dev_create: |
@@ -1279,9 +1285,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
1279 | * Return 0 if permission is granted. | 1285 | * Return 0 if permission is granted. |
1280 | * | 1286 | * |
1281 | * @secid_to_secctx: | 1287 | * @secid_to_secctx: |
1282 | * Convert secid to security context. | 1288 | * Convert secid to security context. If secdata is NULL the length of |
1289 | * the result will be returned in seclen, but no secdata will be returned. | ||
1290 | * This does mean that the length could change between calls to check the | ||
1291 | * length and the next call which actually allocates and returns the secdata. | ||
1283 | * @secid contains the security ID. | 1292 | * @secid contains the security ID. |
1284 | * @secdata contains the pointer that stores the converted security context. | 1293 | * @secdata contains the pointer that stores the converted security context. |
1294 | * @seclen pointer which contains the length of the data | ||
1285 | * @secctx_to_secid: | 1295 | * @secctx_to_secid: |
1286 | * Convert security context to secid. | 1296 | * Convert security context to secid. |
1287 | * @secid contains the pointer to the generated security ID. | 1297 | * @secid contains the pointer to the generated security ID. |
@@ -1501,8 +1511,7 @@ struct security_operations { | |||
1501 | int (*task_getioprio) (struct task_struct *p); | 1511 | int (*task_getioprio) (struct task_struct *p); |
1502 | int (*task_setrlimit) (struct task_struct *p, unsigned int resource, | 1512 | int (*task_setrlimit) (struct task_struct *p, unsigned int resource, |
1503 | struct rlimit *new_rlim); | 1513 | struct rlimit *new_rlim); |
1504 | int (*task_setscheduler) (struct task_struct *p, int policy, | 1514 | int (*task_setscheduler) (struct task_struct *p); |
1505 | struct sched_param *lp); | ||
1506 | int (*task_getscheduler) (struct task_struct *p); | 1515 | int (*task_getscheduler) (struct task_struct *p); |
1507 | int (*task_movememory) (struct task_struct *p); | 1516 | int (*task_movememory) (struct task_struct *p); |
1508 | int (*task_kill) (struct task_struct *p, | 1517 | int (*task_kill) (struct task_struct *p, |
@@ -1594,6 +1603,9 @@ struct security_operations { | |||
1594 | struct request_sock *req); | 1603 | struct request_sock *req); |
1595 | void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); | 1604 | void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); |
1596 | void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); | 1605 | void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); |
1606 | int (*secmark_relabel_packet) (u32 secid); | ||
1607 | void (*secmark_refcount_inc) (void); | ||
1608 | void (*secmark_refcount_dec) (void); | ||
1597 | void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); | 1609 | void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); |
1598 | int (*tun_dev_create)(void); | 1610 | int (*tun_dev_create)(void); |
1599 | void (*tun_dev_post_create)(struct sock *sk); | 1611 | void (*tun_dev_post_create)(struct sock *sk); |
@@ -1752,8 +1764,7 @@ int security_task_setioprio(struct task_struct *p, int ioprio); | |||
1752 | int security_task_getioprio(struct task_struct *p); | 1764 | int security_task_getioprio(struct task_struct *p); |
1753 | int security_task_setrlimit(struct task_struct *p, unsigned int resource, | 1765 | int security_task_setrlimit(struct task_struct *p, unsigned int resource, |
1754 | struct rlimit *new_rlim); | 1766 | struct rlimit *new_rlim); |
1755 | int security_task_setscheduler(struct task_struct *p, | 1767 | int security_task_setscheduler(struct task_struct *p); |
1756 | int policy, struct sched_param *lp); | ||
1757 | int security_task_getscheduler(struct task_struct *p); | 1768 | int security_task_getscheduler(struct task_struct *p); |
1758 | int security_task_movememory(struct task_struct *p); | 1769 | int security_task_movememory(struct task_struct *p); |
1759 | int security_task_kill(struct task_struct *p, struct siginfo *info, | 1770 | int security_task_kill(struct task_struct *p, struct siginfo *info, |
@@ -2320,11 +2331,9 @@ static inline int security_task_setrlimit(struct task_struct *p, | |||
2320 | return 0; | 2331 | return 0; |
2321 | } | 2332 | } |
2322 | 2333 | ||
2323 | static inline int security_task_setscheduler(struct task_struct *p, | 2334 | static inline int security_task_setscheduler(struct task_struct *p) |
2324 | int policy, | ||
2325 | struct sched_param *lp) | ||
2326 | { | 2335 | { |
2327 | return cap_task_setscheduler(p, policy, lp); | 2336 | return cap_task_setscheduler(p); |
2328 | } | 2337 | } |
2329 | 2338 | ||
2330 | static inline int security_task_getscheduler(struct task_struct *p) | 2339 | static inline int security_task_getscheduler(struct task_struct *p) |
@@ -2551,6 +2560,9 @@ void security_inet_csk_clone(struct sock *newsk, | |||
2551 | const struct request_sock *req); | 2560 | const struct request_sock *req); |
2552 | void security_inet_conn_established(struct sock *sk, | 2561 | void security_inet_conn_established(struct sock *sk, |
2553 | struct sk_buff *skb); | 2562 | struct sk_buff *skb); |
2563 | int security_secmark_relabel_packet(u32 secid); | ||
2564 | void security_secmark_refcount_inc(void); | ||
2565 | void security_secmark_refcount_dec(void); | ||
2554 | int security_tun_dev_create(void); | 2566 | int security_tun_dev_create(void); |
2555 | void security_tun_dev_post_create(struct sock *sk); | 2567 | void security_tun_dev_post_create(struct sock *sk); |
2556 | int security_tun_dev_attach(struct sock *sk); | 2568 | int security_tun_dev_attach(struct sock *sk); |
@@ -2705,6 +2717,19 @@ static inline void security_inet_conn_established(struct sock *sk, | |||
2705 | { | 2717 | { |
2706 | } | 2718 | } |
2707 | 2719 | ||
2720 | static inline int security_secmark_relabel_packet(u32 secid) | ||
2721 | { | ||
2722 | return 0; | ||
2723 | } | ||
2724 | |||
2725 | static inline void security_secmark_refcount_inc(void) | ||
2726 | { | ||
2727 | } | ||
2728 | |||
2729 | static inline void security_secmark_refcount_dec(void) | ||
2730 | { | ||
2731 | } | ||
2732 | |||
2708 | static inline int security_tun_dev_create(void) | 2733 | static inline int security_tun_dev_create(void) |
2709 | { | 2734 | { |
2710 | return 0; | 2735 | return 0; |
diff --git a/include/linux/selection.h b/include/linux/selection.h index 8cdaa1151d2e..85193aa8c1e3 100644 --- a/include/linux/selection.h +++ b/include/linux/selection.h | |||
@@ -39,5 +39,6 @@ extern void putconsxy(struct vc_data *vc, unsigned char *p); | |||
39 | 39 | ||
40 | extern u16 vcs_scr_readw(struct vc_data *vc, const u16 *org); | 40 | extern u16 vcs_scr_readw(struct vc_data *vc, const u16 *org); |
41 | extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org); | 41 | extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org); |
42 | extern void vcs_scr_updated(struct vc_data *vc); | ||
42 | 43 | ||
43 | #endif | 44 | #endif |
diff --git a/include/linux/selinux.h b/include/linux/selinux.h index 82e0f26a1299..44f459612690 100644 --- a/include/linux/selinux.h +++ b/include/linux/selinux.h | |||
@@ -21,74 +21,11 @@ struct kern_ipc_perm; | |||
21 | #ifdef CONFIG_SECURITY_SELINUX | 21 | #ifdef CONFIG_SECURITY_SELINUX |
22 | 22 | ||
23 | /** | 23 | /** |
24 | * selinux_string_to_sid - map a security context string to a security ID | ||
25 | * @str: the security context string to be mapped | ||
26 | * @sid: ID value returned via this. | ||
27 | * | ||
28 | * Returns 0 if successful, with the SID stored in sid. A value | ||
29 | * of zero for sid indicates no SID could be determined (but no error | ||
30 | * occurred). | ||
31 | */ | ||
32 | int selinux_string_to_sid(char *str, u32 *sid); | ||
33 | |||
34 | /** | ||
35 | * selinux_secmark_relabel_packet_permission - secmark permission check | ||
36 | * @sid: SECMARK ID value to be applied to network packet | ||
37 | * | ||
38 | * Returns 0 if the current task is allowed to set the SECMARK label of | ||
39 | * packets with the supplied security ID. Note that it is implicit that | ||
40 | * the packet is always being relabeled from the default unlabeled value, | ||
41 | * and that the access control decision is made in the AVC. | ||
42 | */ | ||
43 | int selinux_secmark_relabel_packet_permission(u32 sid); | ||
44 | |||
45 | /** | ||
46 | * selinux_secmark_refcount_inc - increments the secmark use counter | ||
47 | * | ||
48 | * SELinux keeps track of the current SECMARK targets in use so it knows | ||
49 | * when to apply SECMARK label access checks to network packets. This | ||
50 | * function incements this reference count to indicate that a new SECMARK | ||
51 | * target has been configured. | ||
52 | */ | ||
53 | void selinux_secmark_refcount_inc(void); | ||
54 | |||
55 | /** | ||
56 | * selinux_secmark_refcount_dec - decrements the secmark use counter | ||
57 | * | ||
58 | * SELinux keeps track of the current SECMARK targets in use so it knows | ||
59 | * when to apply SECMARK label access checks to network packets. This | ||
60 | * function decements this reference count to indicate that one of the | ||
61 | * existing SECMARK targets has been removed/flushed. | ||
62 | */ | ||
63 | void selinux_secmark_refcount_dec(void); | ||
64 | |||
65 | /** | ||
66 | * selinux_is_enabled - is SELinux enabled? | 24 | * selinux_is_enabled - is SELinux enabled? |
67 | */ | 25 | */ |
68 | bool selinux_is_enabled(void); | 26 | bool selinux_is_enabled(void); |
69 | #else | 27 | #else |
70 | 28 | ||
71 | static inline int selinux_string_to_sid(const char *str, u32 *sid) | ||
72 | { | ||
73 | *sid = 0; | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static inline int selinux_secmark_relabel_packet_permission(u32 sid) | ||
78 | { | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | static inline void selinux_secmark_refcount_inc(void) | ||
83 | { | ||
84 | return; | ||
85 | } | ||
86 | |||
87 | static inline void selinux_secmark_refcount_dec(void) | ||
88 | { | ||
89 | return; | ||
90 | } | ||
91 | |||
92 | static inline bool selinux_is_enabled(void) | 29 | static inline bool selinux_is_enabled(void) |
93 | { | 30 | { |
94 | return false; | 31 | return false; |
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 7638deaaba65..97f5b45bbc07 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h | |||
@@ -35,6 +35,8 @@ struct plat_serial8250_port { | |||
35 | void (*set_termios)(struct uart_port *, | 35 | void (*set_termios)(struct uart_port *, |
36 | struct ktermios *new, | 36 | struct ktermios *new, |
37 | struct ktermios *old); | 37 | struct ktermios *old); |
38 | void (*pm)(struct uart_port *, unsigned int state, | ||
39 | unsigned old); | ||
38 | }; | 40 | }; |
39 | 41 | ||
40 | /* | 42 | /* |
@@ -76,5 +78,11 @@ extern int serial8250_find_port_for_earlycon(void); | |||
76 | extern int setup_early_serial8250_console(char *cmdline); | 78 | extern int setup_early_serial8250_console(char *cmdline); |
77 | extern void serial8250_do_set_termios(struct uart_port *port, | 79 | extern void serial8250_do_set_termios(struct uart_port *port, |
78 | struct ktermios *termios, struct ktermios *old); | 80 | struct ktermios *termios, struct ktermios *old); |
81 | extern void serial8250_do_pm(struct uart_port *port, unsigned int state, | ||
82 | unsigned int oldstate); | ||
83 | |||
84 | extern void serial8250_set_isa_configurator(void (*v) | ||
85 | (int port, struct uart_port *up, | ||
86 | unsigned short *capabilities)); | ||
79 | 87 | ||
80 | #endif | 88 | #endif |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 563e23400913..212eb4c67797 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -196,6 +196,9 @@ | |||
196 | /* High Speed UART for Medfield */ | 196 | /* High Speed UART for Medfield */ |
197 | #define PORT_MFD 95 | 197 | #define PORT_MFD 95 |
198 | 198 | ||
199 | /* TI OMAP-UART */ | ||
200 | #define PORT_OMAP 96 | ||
201 | |||
199 | #ifdef __KERNEL__ | 202 | #ifdef __KERNEL__ |
200 | 203 | ||
201 | #include <linux/compiler.h> | 204 | #include <linux/compiler.h> |
@@ -289,6 +292,8 @@ struct uart_port { | |||
289 | void (*set_termios)(struct uart_port *, | 292 | void (*set_termios)(struct uart_port *, |
290 | struct ktermios *new, | 293 | struct ktermios *new, |
291 | struct ktermios *old); | 294 | struct ktermios *old); |
295 | void (*pm)(struct uart_port *, unsigned int state, | ||
296 | unsigned int old); | ||
292 | unsigned int irq; /* irq number */ | 297 | unsigned int irq; /* irq number */ |
293 | unsigned long irqflags; /* irq flags */ | 298 | unsigned long irqflags; /* irq flags */ |
294 | unsigned int uartclk; /* base uart clock */ | 299 | unsigned int uartclk; /* base uart clock */ |
@@ -411,6 +416,14 @@ unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios | |||
411 | unsigned int max); | 416 | unsigned int max); |
412 | unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud); | 417 | unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud); |
413 | 418 | ||
419 | /* Base timer interval for polling */ | ||
420 | static inline int uart_poll_timeout(struct uart_port *port) | ||
421 | { | ||
422 | int timeout = port->timeout; | ||
423 | |||
424 | return timeout > 6 ? (timeout / 2 - 2) : 1; | ||
425 | } | ||
426 | |||
414 | /* | 427 | /* |
415 | * Console helpers. | 428 | * Console helpers. |
416 | */ | 429 | */ |
diff --git a/include/linux/serio.h b/include/linux/serio.h index b5552568178d..e26f4788845f 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h | |||
@@ -41,7 +41,9 @@ struct serio { | |||
41 | int (*start)(struct serio *); | 41 | int (*start)(struct serio *); |
42 | void (*stop)(struct serio *); | 42 | void (*stop)(struct serio *); |
43 | 43 | ||
44 | struct serio *parent, *child; | 44 | struct serio *parent; |
45 | struct list_head child_node; /* Entry in parent->children list */ | ||
46 | struct list_head children; | ||
45 | unsigned int depth; /* level of nesting in serio hierarchy */ | 47 | unsigned int depth; /* level of nesting in serio hierarchy */ |
46 | 48 | ||
47 | struct serio_driver *drv; /* accessed from interrupt, must be protected by serio->lock and serio->sem */ | 49 | struct serio_driver *drv; /* accessed from interrupt, must be protected by serio->lock and serio->sem */ |
@@ -54,10 +56,9 @@ struct serio { | |||
54 | #define to_serio_port(d) container_of(d, struct serio, dev) | 56 | #define to_serio_port(d) container_of(d, struct serio, dev) |
55 | 57 | ||
56 | struct serio_driver { | 58 | struct serio_driver { |
57 | void *private; | 59 | const char *description; |
58 | char *description; | ||
59 | 60 | ||
60 | struct serio_device_id *id_table; | 61 | const struct serio_device_id *id_table; |
61 | bool manual_bind; | 62 | bool manual_bind; |
62 | 63 | ||
63 | void (*write_wakeup)(struct serio *); | 64 | void (*write_wakeup)(struct serio *); |
@@ -197,5 +198,6 @@ static inline void serio_continue_rx(struct serio *serio) | |||
197 | #define SERIO_W8001 0x39 | 198 | #define SERIO_W8001 0x39 |
198 | #define SERIO_DYNAPRO 0x3a | 199 | #define SERIO_DYNAPRO 0x3a |
199 | #define SERIO_HAMPSHIRE 0x3b | 200 | #define SERIO_HAMPSHIRE 0x3b |
201 | #define SERIO_PS2MULT 0x3c | ||
200 | 202 | ||
201 | #endif | 203 | #endif |
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h index 875ce50719a9..4dca992f3093 100644 --- a/include/linux/sh_clk.h +++ b/include/linux/sh_clk.h | |||
@@ -4,11 +4,20 @@ | |||
4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
5 | #include <linux/seq_file.h> | 5 | #include <linux/seq_file.h> |
6 | #include <linux/cpufreq.h> | 6 | #include <linux/cpufreq.h> |
7 | #include <linux/types.h> | ||
8 | #include <linux/kref.h> | ||
7 | #include <linux/clk.h> | 9 | #include <linux/clk.h> |
8 | #include <linux/err.h> | 10 | #include <linux/err.h> |
9 | 11 | ||
10 | struct clk; | 12 | struct clk; |
11 | 13 | ||
14 | struct clk_mapping { | ||
15 | phys_addr_t phys; | ||
16 | void __iomem *base; | ||
17 | unsigned long len; | ||
18 | struct kref ref; | ||
19 | }; | ||
20 | |||
12 | struct clk_ops { | 21 | struct clk_ops { |
13 | void (*init)(struct clk *clk); | 22 | void (*init)(struct clk *clk); |
14 | int (*enable)(struct clk *clk); | 23 | int (*enable)(struct clk *clk); |
@@ -21,9 +30,6 @@ struct clk_ops { | |||
21 | 30 | ||
22 | struct clk { | 31 | struct clk { |
23 | struct list_head node; | 32 | struct list_head node; |
24 | const char *name; | ||
25 | int id; | ||
26 | |||
27 | struct clk *parent; | 33 | struct clk *parent; |
28 | struct clk **parent_table; /* list of parents to */ | 34 | struct clk **parent_table; /* list of parents to */ |
29 | unsigned short parent_num; /* choose between */ | 35 | unsigned short parent_num; /* choose between */ |
@@ -45,7 +51,9 @@ struct clk { | |||
45 | unsigned long arch_flags; | 51 | unsigned long arch_flags; |
46 | void *priv; | 52 | void *priv; |
47 | struct dentry *dentry; | 53 | struct dentry *dentry; |
54 | struct clk_mapping *mapping; | ||
48 | struct cpufreq_frequency_table *freq_table; | 55 | struct cpufreq_frequency_table *freq_table; |
56 | unsigned int nr_freqs; | ||
49 | }; | 57 | }; |
50 | 58 | ||
51 | #define CLK_ENABLE_ON_INIT (1 << 0) | 59 | #define CLK_ENABLE_ON_INIT (1 << 0) |
@@ -111,6 +119,9 @@ int clk_rate_table_find(struct clk *clk, | |||
111 | struct cpufreq_frequency_table *freq_table, | 119 | struct cpufreq_frequency_table *freq_table, |
112 | unsigned long rate); | 120 | unsigned long rate); |
113 | 121 | ||
122 | long clk_rate_div_range_round(struct clk *clk, unsigned int div_min, | ||
123 | unsigned int div_max, unsigned long rate); | ||
124 | |||
114 | #define SH_CLK_MSTP32(_parent, _enable_reg, _enable_bit, _flags) \ | 125 | #define SH_CLK_MSTP32(_parent, _enable_reg, _enable_bit, _flags) \ |
115 | { \ | 126 | { \ |
116 | .parent = _parent, \ | 127 | .parent = _parent, \ |
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h index 0d6cd38e673d..b4f183a31f13 100644 --- a/include/linux/sh_intc.h +++ b/include/linux/sh_intc.h | |||
@@ -20,6 +20,12 @@ struct intc_group { | |||
20 | 20 | ||
21 | #define INTC_GROUP(enum_id, ids...) { enum_id, { ids } } | 21 | #define INTC_GROUP(enum_id, ids...) { enum_id, { ids } } |
22 | 22 | ||
23 | struct intc_subgroup { | ||
24 | unsigned long reg, reg_width; | ||
25 | intc_enum parent_id; | ||
26 | intc_enum enum_ids[32]; | ||
27 | }; | ||
28 | |||
23 | struct intc_mask_reg { | 29 | struct intc_mask_reg { |
24 | unsigned long set_reg, clr_reg, reg_width; | 30 | unsigned long set_reg, clr_reg, reg_width; |
25 | intc_enum enum_ids[32]; | 31 | intc_enum enum_ids[32]; |
@@ -69,9 +75,12 @@ struct intc_hw_desc { | |||
69 | unsigned int nr_sense_regs; | 75 | unsigned int nr_sense_regs; |
70 | struct intc_mask_reg *ack_regs; | 76 | struct intc_mask_reg *ack_regs; |
71 | unsigned int nr_ack_regs; | 77 | unsigned int nr_ack_regs; |
78 | struct intc_subgroup *subgroups; | ||
79 | unsigned int nr_subgroups; | ||
72 | }; | 80 | }; |
73 | 81 | ||
74 | #define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a) | 82 | #define _INTC_ARRAY(a) a, a == NULL ? 0 : sizeof(a)/sizeof(*a) |
83 | |||
75 | #define INTC_HW_DESC(vectors, groups, mask_regs, \ | 84 | #define INTC_HW_DESC(vectors, groups, mask_regs, \ |
76 | prio_regs, sense_regs, ack_regs) \ | 85 | prio_regs, sense_regs, ack_regs) \ |
77 | { \ | 86 | { \ |
@@ -105,8 +114,11 @@ struct intc_desc symbol __initdata = { \ | |||
105 | prio_regs, sense_regs, ack_regs), \ | 114 | prio_regs, sense_regs, ack_regs), \ |
106 | } | 115 | } |
107 | 116 | ||
108 | int __init register_intc_controller(struct intc_desc *desc); | 117 | int register_intc_controller(struct intc_desc *desc); |
118 | void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs); | ||
109 | int intc_set_priority(unsigned int irq, unsigned int prio); | 119 | int intc_set_priority(unsigned int irq, unsigned int prio); |
120 | int intc_irq_lookup(const char *chipname, intc_enum enum_id); | ||
121 | void intc_finalize(void); | ||
110 | 122 | ||
111 | #ifdef CONFIG_INTC_USERIMASK | 123 | #ifdef CONFIG_INTC_USERIMASK |
112 | int register_intc_userimask(unsigned long addr); | 124 | int register_intc_userimask(unsigned long addr); |
diff --git a/include/linux/sh_pfc.h b/include/linux/sh_pfc.h index 07c08af9f8f6..30cae70874f4 100644 --- a/include/linux/sh_pfc.h +++ b/include/linux/sh_pfc.h | |||
@@ -92,5 +92,6 @@ struct pinmux_info { | |||
92 | }; | 92 | }; |
93 | 93 | ||
94 | int register_pinmux(struct pinmux_info *pip); | 94 | int register_pinmux(struct pinmux_info *pip); |
95 | int unregister_pinmux(struct pinmux_info *pip); | ||
95 | 96 | ||
96 | #endif /* __SH_PFC_H */ | 97 | #endif /* __SH_PFC_H */ |
diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h index b363b916c909..3ff4961da9b5 100644 --- a/include/linux/signalfd.h +++ b/include/linux/signalfd.h | |||
@@ -33,6 +33,7 @@ struct signalfd_siginfo { | |||
33 | __u64 ssi_utime; | 33 | __u64 ssi_utime; |
34 | __u64 ssi_stime; | 34 | __u64 ssi_stime; |
35 | __u64 ssi_addr; | 35 | __u64 ssi_addr; |
36 | __u16 ssi_addr_lsb; | ||
36 | 37 | ||
37 | /* | 38 | /* |
38 | * Pad strcture to 128 bytes. Remember to update the | 39 | * Pad strcture to 128 bytes. Remember to update the |
@@ -43,7 +44,7 @@ struct signalfd_siginfo { | |||
43 | * comes out of a read(2) and we really don't want to have | 44 | * comes out of a read(2) and we really don't want to have |
44 | * a compat on read(2). | 45 | * a compat on read(2). |
45 | */ | 46 | */ |
46 | __u8 __pad[48]; | 47 | __u8 __pad[46]; |
47 | }; | 48 | }; |
48 | 49 | ||
49 | 50 | ||
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 77eb60d2b496..e6ba898de61c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -129,8 +129,13 @@ typedef struct skb_frag_struct skb_frag_t; | |||
129 | 129 | ||
130 | struct skb_frag_struct { | 130 | struct skb_frag_struct { |
131 | struct page *page; | 131 | struct page *page; |
132 | #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) | ||
132 | __u32 page_offset; | 133 | __u32 page_offset; |
133 | __u32 size; | 134 | __u32 size; |
135 | #else | ||
136 | __u16 page_offset; | ||
137 | __u16 size; | ||
138 | #endif | ||
134 | }; | 139 | }; |
135 | 140 | ||
136 | #define HAVE_HW_TIME_STAMP | 141 | #define HAVE_HW_TIME_STAMP |
@@ -163,26 +168,19 @@ struct skb_shared_hwtstamps { | |||
163 | ktime_t syststamp; | 168 | ktime_t syststamp; |
164 | }; | 169 | }; |
165 | 170 | ||
166 | /** | 171 | /* Definitions for tx_flags in struct skb_shared_info */ |
167 | * struct skb_shared_tx - instructions for time stamping of outgoing packets | 172 | enum { |
168 | * @hardware: generate hardware time stamp | 173 | /* generate hardware time stamp */ |
169 | * @software: generate software time stamp | 174 | SKBTX_HW_TSTAMP = 1 << 0, |
170 | * @in_progress: device driver is going to provide | 175 | |
171 | * hardware time stamp | 176 | /* generate software time stamp */ |
172 | * @prevent_sk_orphan: make sk reference available on driver level | 177 | SKBTX_SW_TSTAMP = 1 << 1, |
173 | * @flags: all shared_tx flags | 178 | |
174 | * | 179 | /* device driver is going to provide hardware time stamp */ |
175 | * These flags are attached to packets as part of the | 180 | SKBTX_IN_PROGRESS = 1 << 2, |
176 | * &skb_shared_info. Use skb_tx() to get a pointer. | 181 | |
177 | */ | 182 | /* ensure the originating sk reference is available on driver level */ |
178 | union skb_shared_tx { | 183 | SKBTX_DRV_NEEDS_SK_REF = 1 << 3, |
179 | struct { | ||
180 | __u8 hardware:1, | ||
181 | software:1, | ||
182 | in_progress:1, | ||
183 | prevent_sk_orphan:1; | ||
184 | }; | ||
185 | __u8 flags; | ||
186 | }; | 184 | }; |
187 | 185 | ||
188 | /* This data is invariant across clones and lives at | 186 | /* This data is invariant across clones and lives at |
@@ -195,7 +193,7 @@ struct skb_shared_info { | |||
195 | unsigned short gso_segs; | 193 | unsigned short gso_segs; |
196 | unsigned short gso_type; | 194 | unsigned short gso_type; |
197 | __be32 ip6_frag_id; | 195 | __be32 ip6_frag_id; |
198 | union skb_shared_tx tx_flags; | 196 | __u8 tx_flags; |
199 | struct sk_buff *frag_list; | 197 | struct sk_buff *frag_list; |
200 | struct skb_shared_hwtstamps hwtstamps; | 198 | struct skb_shared_hwtstamps hwtstamps; |
201 | 199 | ||
@@ -462,19 +460,7 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) | |||
462 | skb->_skb_refdst = (unsigned long)dst; | 460 | skb->_skb_refdst = (unsigned long)dst; |
463 | } | 461 | } |
464 | 462 | ||
465 | /** | 463 | extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst); |
466 | * skb_dst_set_noref - sets skb dst, without a reference | ||
467 | * @skb: buffer | ||
468 | * @dst: dst entry | ||
469 | * | ||
470 | * Sets skb dst, assuming a reference was not taken on dst | ||
471 | * skb_dst_drop() should not dst_release() this dst | ||
472 | */ | ||
473 | static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) | ||
474 | { | ||
475 | WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); | ||
476 | skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; | ||
477 | } | ||
478 | 464 | ||
479 | /** | 465 | /** |
480 | * skb_dst_is_noref - Test if skb dst isnt refcounted | 466 | * skb_dst_is_noref - Test if skb dst isnt refcounted |
@@ -498,13 +484,13 @@ extern struct sk_buff *__alloc_skb(unsigned int size, | |||
498 | static inline struct sk_buff *alloc_skb(unsigned int size, | 484 | static inline struct sk_buff *alloc_skb(unsigned int size, |
499 | gfp_t priority) | 485 | gfp_t priority) |
500 | { | 486 | { |
501 | return __alloc_skb(size, priority, 0, -1); | 487 | return __alloc_skb(size, priority, 0, NUMA_NO_NODE); |
502 | } | 488 | } |
503 | 489 | ||
504 | static inline struct sk_buff *alloc_skb_fclone(unsigned int size, | 490 | static inline struct sk_buff *alloc_skb_fclone(unsigned int size, |
505 | gfp_t priority) | 491 | gfp_t priority) |
506 | { | 492 | { |
507 | return __alloc_skb(size, priority, 1, -1); | 493 | return __alloc_skb(size, priority, 1, NUMA_NO_NODE); |
508 | } | 494 | } |
509 | 495 | ||
510 | extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); | 496 | extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); |
@@ -558,6 +544,15 @@ extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, | |||
558 | unsigned int to, struct ts_config *config, | 544 | unsigned int to, struct ts_config *config, |
559 | struct ts_state *state); | 545 | struct ts_state *state); |
560 | 546 | ||
547 | extern __u32 __skb_get_rxhash(struct sk_buff *skb); | ||
548 | static inline __u32 skb_get_rxhash(struct sk_buff *skb) | ||
549 | { | ||
550 | if (!skb->rxhash) | ||
551 | skb->rxhash = __skb_get_rxhash(skb); | ||
552 | |||
553 | return skb->rxhash; | ||
554 | } | ||
555 | |||
561 | #ifdef NET_SKBUFF_DATA_USES_OFFSET | 556 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
562 | static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) | 557 | static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) |
563 | { | 558 | { |
@@ -578,11 +573,6 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) | |||
578 | return &skb_shinfo(skb)->hwtstamps; | 573 | return &skb_shinfo(skb)->hwtstamps; |
579 | } | 574 | } |
580 | 575 | ||
581 | static inline union skb_shared_tx *skb_tx(struct sk_buff *skb) | ||
582 | { | ||
583 | return &skb_shinfo(skb)->tx_flags; | ||
584 | } | ||
585 | |||
586 | /** | 576 | /** |
587 | * skb_queue_empty - check if a queue is empty | 577 | * skb_queue_empty - check if a queue is empty |
588 | * @list: queue head | 578 | * @list: queue head |
@@ -604,7 +594,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list) | |||
604 | static inline bool skb_queue_is_last(const struct sk_buff_head *list, | 594 | static inline bool skb_queue_is_last(const struct sk_buff_head *list, |
605 | const struct sk_buff *skb) | 595 | const struct sk_buff *skb) |
606 | { | 596 | { |
607 | return (skb->next == (struct sk_buff *) list); | 597 | return skb->next == (struct sk_buff *)list; |
608 | } | 598 | } |
609 | 599 | ||
610 | /** | 600 | /** |
@@ -617,7 +607,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list, | |||
617 | static inline bool skb_queue_is_first(const struct sk_buff_head *list, | 607 | static inline bool skb_queue_is_first(const struct sk_buff_head *list, |
618 | const struct sk_buff *skb) | 608 | const struct sk_buff *skb) |
619 | { | 609 | { |
620 | return (skb->prev == (struct sk_buff *) list); | 610 | return skb->prev == (struct sk_buff *)list; |
621 | } | 611 | } |
622 | 612 | ||
623 | /** | 613 | /** |
@@ -1123,7 +1113,7 @@ extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, | |||
1123 | int off, int size); | 1113 | int off, int size); |
1124 | 1114 | ||
1125 | #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) | 1115 | #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) |
1126 | #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frags(skb)) | 1116 | #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) |
1127 | #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) | 1117 | #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) |
1128 | 1118 | ||
1129 | #ifdef NET_SKBUFF_DATA_USES_OFFSET | 1119 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
@@ -1561,13 +1551,25 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, | |||
1561 | return skb; | 1551 | return skb; |
1562 | } | 1552 | } |
1563 | 1553 | ||
1564 | extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask); | 1554 | /** |
1555 | * __netdev_alloc_page - allocate a page for ps-rx on a specific device | ||
1556 | * @dev: network device to receive on | ||
1557 | * @gfp_mask: alloc_pages_node mask | ||
1558 | * | ||
1559 | * Allocate a new page. dev currently unused. | ||
1560 | * | ||
1561 | * %NULL is returned if there is no free memory. | ||
1562 | */ | ||
1563 | static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask) | ||
1564 | { | ||
1565 | return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0); | ||
1566 | } | ||
1565 | 1567 | ||
1566 | /** | 1568 | /** |
1567 | * netdev_alloc_page - allocate a page for ps-rx on a specific device | 1569 | * netdev_alloc_page - allocate a page for ps-rx on a specific device |
1568 | * @dev: network device to receive on | 1570 | * @dev: network device to receive on |
1569 | * | 1571 | * |
1570 | * Allocate a new page node local to the specified device. | 1572 | * Allocate a new page. dev currently unused. |
1571 | * | 1573 | * |
1572 | * %NULL is returned if there is no free memory. | 1574 | * %NULL is returned if there is no free memory. |
1573 | */ | 1575 | */ |
@@ -1787,7 +1789,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) | |||
1787 | skb = skb->prev) | 1789 | skb = skb->prev) |
1788 | 1790 | ||
1789 | 1791 | ||
1790 | static inline bool skb_has_frags(const struct sk_buff *skb) | 1792 | static inline bool skb_has_frag_list(const struct sk_buff *skb) |
1791 | { | 1793 | { |
1792 | return skb_shinfo(skb)->frag_list != NULL; | 1794 | return skb_shinfo(skb)->frag_list != NULL; |
1793 | } | 1795 | } |
@@ -1987,8 +1989,8 @@ extern void skb_tstamp_tx(struct sk_buff *orig_skb, | |||
1987 | 1989 | ||
1988 | static inline void sw_tx_timestamp(struct sk_buff *skb) | 1990 | static inline void sw_tx_timestamp(struct sk_buff *skb) |
1989 | { | 1991 | { |
1990 | union skb_shared_tx *shtx = skb_tx(skb); | 1992 | if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && |
1991 | if (shtx->software && !shtx->in_progress) | 1993 | !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) |
1992 | skb_tstamp_tx(skb, NULL); | 1994 | skb_tstamp_tx(skb, NULL); |
1993 | } | 1995 | } |
1994 | 1996 | ||
@@ -2159,7 +2161,7 @@ static inline u16 skb_get_rx_queue(const struct sk_buff *skb) | |||
2159 | 2161 | ||
2160 | static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) | 2162 | static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) |
2161 | { | 2163 | { |
2162 | return (skb->queue_mapping != 0); | 2164 | return skb->queue_mapping != 0; |
2163 | } | 2165 | } |
2164 | 2166 | ||
2165 | extern u16 skb_tx_hash(const struct net_device *dev, | 2167 | extern u16 skb_tx_hash(const struct net_device *dev, |
@@ -2209,6 +2211,21 @@ static inline void skb_forward_csum(struct sk_buff *skb) | |||
2209 | skb->ip_summed = CHECKSUM_NONE; | 2211 | skb->ip_summed = CHECKSUM_NONE; |
2210 | } | 2212 | } |
2211 | 2213 | ||
2214 | /** | ||
2215 | * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE | ||
2216 | * @skb: skb to check | ||
2217 | * | ||
2218 | * fresh skbs have their ip_summed set to CHECKSUM_NONE. | ||
2219 | * Instead of forcing ip_summed to CHECKSUM_NONE, we can | ||
2220 | * use this helper, to document places where we make this assertion. | ||
2221 | */ | ||
2222 | static inline void skb_checksum_none_assert(struct sk_buff *skb) | ||
2223 | { | ||
2224 | #ifdef DEBUG | ||
2225 | BUG_ON(skb->ip_summed != CHECKSUM_NONE); | ||
2226 | #endif | ||
2227 | } | ||
2228 | |||
2212 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); | 2229 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); |
2213 | #endif /* __KERNEL__ */ | 2230 | #endif /* __KERNEL__ */ |
2214 | #endif /* _LINUX_SKBUFF_H */ | 2231 | #endif /* _LINUX_SKBUFF_H */ |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 9f63538928c0..e4f5ed180b9b 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -87,7 +87,7 @@ struct kmem_cache { | |||
87 | unsigned long min_partial; | 87 | unsigned long min_partial; |
88 | const char *name; /* Name (only for display!) */ | 88 | const char *name; /* Name (only for display!) */ |
89 | struct list_head list; /* List of slab caches */ | 89 | struct list_head list; /* List of slab caches */ |
90 | #ifdef CONFIG_SLUB_DEBUG | 90 | #ifdef CONFIG_SYSFS |
91 | struct kobject kobj; /* For sysfs */ | 91 | struct kobject kobj; /* For sysfs */ |
92 | #endif | 92 | #endif |
93 | 93 | ||
@@ -96,11 +96,8 @@ struct kmem_cache { | |||
96 | * Defragmentation by allocating from a remote node. | 96 | * Defragmentation by allocating from a remote node. |
97 | */ | 97 | */ |
98 | int remote_node_defrag_ratio; | 98 | int remote_node_defrag_ratio; |
99 | struct kmem_cache_node *node[MAX_NUMNODES]; | ||
100 | #else | ||
101 | /* Avoid an extra cache line for UP */ | ||
102 | struct kmem_cache_node local_node; | ||
103 | #endif | 99 | #endif |
100 | struct kmem_cache_node *node[MAX_NUMNODES]; | ||
104 | }; | 101 | }; |
105 | 102 | ||
106 | /* | 103 | /* |
@@ -139,19 +136,16 @@ struct kmem_cache { | |||
139 | 136 | ||
140 | #ifdef CONFIG_ZONE_DMA | 137 | #ifdef CONFIG_ZONE_DMA |
141 | #define SLUB_DMA __GFP_DMA | 138 | #define SLUB_DMA __GFP_DMA |
142 | /* Reserve extra caches for potential DMA use */ | ||
143 | #define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT) | ||
144 | #else | 139 | #else |
145 | /* Disable DMA functionality */ | 140 | /* Disable DMA functionality */ |
146 | #define SLUB_DMA (__force gfp_t)0 | 141 | #define SLUB_DMA (__force gfp_t)0 |
147 | #define KMALLOC_CACHES SLUB_PAGE_SHIFT | ||
148 | #endif | 142 | #endif |
149 | 143 | ||
150 | /* | 144 | /* |
151 | * We keep the general caches in an array of slab caches that are used for | 145 | * We keep the general caches in an array of slab caches that are used for |
152 | * 2^x bytes of allocations. | 146 | * 2^x bytes of allocations. |
153 | */ | 147 | */ |
154 | extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES]; | 148 | extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; |
155 | 149 | ||
156 | /* | 150 | /* |
157 | * Sorry that the following has to be that ugly but some versions of GCC | 151 | * Sorry that the following has to be that ugly but some versions of GCC |
@@ -216,7 +210,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | |||
216 | if (index == 0) | 210 | if (index == 0) |
217 | return NULL; | 211 | return NULL; |
218 | 212 | ||
219 | return &kmalloc_caches[index]; | 213 | return kmalloc_caches[index]; |
220 | } | 214 | } |
221 | 215 | ||
222 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 216 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index 2ea1dd1ba21c..291f721144c2 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h | |||
@@ -54,12 +54,15 @@ static inline void cycle_kernel_lock(void) | |||
54 | 54 | ||
55 | #else | 55 | #else |
56 | 56 | ||
57 | #ifdef CONFIG_BKL /* provoke build bug if not set */ | ||
57 | #define lock_kernel() | 58 | #define lock_kernel() |
58 | #define unlock_kernel() | 59 | #define unlock_kernel() |
59 | #define release_kernel_lock(task) do { } while(0) | ||
60 | #define cycle_kernel_lock() do { } while(0) | 60 | #define cycle_kernel_lock() do { } while(0) |
61 | #define reacquire_kernel_lock(task) 0 | ||
62 | #define kernel_locked() 1 | 61 | #define kernel_locked() 1 |
62 | #endif /* CONFIG_BKL */ | ||
63 | |||
64 | #define release_kernel_lock(task) do { } while(0) | ||
65 | #define reacquire_kernel_lock(task) 0 | ||
63 | 66 | ||
64 | #endif /* CONFIG_LOCK_KERNEL */ | 67 | #endif /* CONFIG_LOCK_KERNEL */ |
65 | #endif /* __LINUX_SMPLOCK_H */ | 68 | #endif /* __LINUX_SMPLOCK_H */ |
diff --git a/include/linux/socket.h b/include/linux/socket.h index a8f56e1ec760..5146b50202ce 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h | |||
@@ -326,7 +326,6 @@ extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *a | |||
326 | extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); | 326 | extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); |
327 | extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, | 327 | extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, |
328 | int offset, int len); | 328 | int offset, int len); |
329 | extern int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr, int __user *ulen); | ||
330 | extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr); | 329 | extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr); |
331 | extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); | 330 | extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); |
332 | 331 | ||
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index f8854655860e..80e535897de6 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/preempt.h> | 50 | #include <linux/preempt.h> |
51 | #include <linux/linkage.h> | 51 | #include <linux/linkage.h> |
52 | #include <linux/compiler.h> | 52 | #include <linux/compiler.h> |
53 | #include <linux/irqflags.h> | ||
53 | #include <linux/thread_info.h> | 54 | #include <linux/thread_info.h> |
54 | #include <linux/kernel.h> | 55 | #include <linux/kernel.h> |
55 | #include <linux/stringify.h> | 56 | #include <linux/stringify.h> |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 4d5d2f546dbf..58971e891f48 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
@@ -108,19 +108,43 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp) | |||
108 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 108 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
109 | 109 | ||
110 | /** | 110 | /** |
111 | * srcu_dereference - fetch SRCU-protected pointer with checking | 111 | * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing |
112 | * @p: the pointer to fetch and protect for later dereferencing | ||
113 | * @sp: pointer to the srcu_struct, which is used to check that we | ||
114 | * really are in an SRCU read-side critical section. | ||
115 | * @c: condition to check for update-side use | ||
112 | * | 116 | * |
113 | * Makes rcu_dereference_check() do the dirty work. | 117 | * If PROVE_RCU is enabled, invoking this outside of an RCU read-side |
118 | * critical section will result in an RCU-lockdep splat, unless @c evaluates | ||
119 | * to 1. The @c argument will normally be a logical expression containing | ||
120 | * lockdep_is_held() calls. | ||
114 | */ | 121 | */ |
115 | #define srcu_dereference(p, sp) \ | 122 | #define srcu_dereference_check(p, sp, c) \ |
116 | rcu_dereference_check(p, srcu_read_lock_held(sp)) | 123 | __rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu) |
124 | |||
125 | /** | ||
126 | * srcu_dereference - fetch SRCU-protected pointer for later dereferencing | ||
127 | * @p: the pointer to fetch and protect for later dereferencing | ||
128 | * @sp: pointer to the srcu_struct, which is used to check that we | ||
129 | * really are in an SRCU read-side critical section. | ||
130 | * | ||
131 | * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU | ||
132 | * is enabled, invoking this outside of an RCU read-side critical | ||
133 | * section will result in an RCU-lockdep splat. | ||
134 | */ | ||
135 | #define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) | ||
117 | 136 | ||
118 | /** | 137 | /** |
119 | * srcu_read_lock - register a new reader for an SRCU-protected structure. | 138 | * srcu_read_lock - register a new reader for an SRCU-protected structure. |
120 | * @sp: srcu_struct in which to register the new reader. | 139 | * @sp: srcu_struct in which to register the new reader. |
121 | * | 140 | * |
122 | * Enter an SRCU read-side critical section. Note that SRCU read-side | 141 | * Enter an SRCU read-side critical section. Note that SRCU read-side |
123 | * critical sections may be nested. | 142 | * critical sections may be nested. However, it is illegal to |
143 | * call anything that waits on an SRCU grace period for the same | ||
144 | * srcu_struct, whether directly or indirectly. Please note that | ||
145 | * one way to indirectly wait on an SRCU grace period is to acquire | ||
146 | * a mutex that is held elsewhere while calling synchronize_srcu() or | ||
147 | * synchronize_srcu_expedited(). | ||
124 | */ | 148 | */ |
125 | static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) | 149 | static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) |
126 | { | 150 | { |
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index a6d5225b9275..11daf9c140e7 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h | |||
@@ -97,6 +97,7 @@ | |||
97 | #define SSB_TMSLOW_RESET 0x00000001 /* Reset */ | 97 | #define SSB_TMSLOW_RESET 0x00000001 /* Reset */ |
98 | #define SSB_TMSLOW_REJECT_22 0x00000002 /* Reject (Backplane rev 2.2) */ | 98 | #define SSB_TMSLOW_REJECT_22 0x00000002 /* Reject (Backplane rev 2.2) */ |
99 | #define SSB_TMSLOW_REJECT_23 0x00000004 /* Reject (Backplane rev 2.3) */ | 99 | #define SSB_TMSLOW_REJECT_23 0x00000004 /* Reject (Backplane rev 2.3) */ |
100 | #define SSB_TMSLOW_PHYCLK 0x00000010 /* MAC PHY Clock Control Enable */ | ||
100 | #define SSB_TMSLOW_CLOCK 0x00010000 /* Clock Enable */ | 101 | #define SSB_TMSLOW_CLOCK 0x00010000 /* Clock Enable */ |
101 | #define SSB_TMSLOW_FGC 0x00020000 /* Force Gated Clocks On */ | 102 | #define SSB_TMSLOW_FGC 0x00020000 /* Force Gated Clocks On */ |
102 | #define SSB_TMSLOW_PE 0x40000000 /* Power Management Enable */ | 103 | #define SSB_TMSLOW_PE 0x40000000 /* Power Management Enable */ |
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 632ff7c03280..d66c61774d95 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h | |||
@@ -32,10 +32,14 @@ | |||
32 | struct plat_stmmacenet_data { | 32 | struct plat_stmmacenet_data { |
33 | int bus_id; | 33 | int bus_id; |
34 | int pbl; | 34 | int pbl; |
35 | int clk_csr; | ||
35 | int has_gmac; | 36 | int has_gmac; |
36 | int enh_desc; | 37 | int enh_desc; |
38 | int tx_coe; | ||
39 | int bugged_jumbo; | ||
40 | int pmt; | ||
37 | void (*fix_mac_speed)(void *priv, unsigned int speed); | 41 | void (*fix_mac_speed)(void *priv, unsigned int speed); |
38 | void (*bus_setup)(unsigned long ioaddr); | 42 | void (*bus_setup)(void __iomem *ioaddr); |
39 | #ifdef CONFIG_STM_DRIVERS | 43 | #ifdef CONFIG_STM_DRIVERS |
40 | struct stm_pad_config *pad_config; | 44 | struct stm_pad_config *pad_config; |
41 | #endif | 45 | #endif |
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 6b524a0d02e4..1808960c5059 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h | |||
@@ -126,8 +126,8 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); | |||
126 | 126 | ||
127 | #else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ | 127 | #else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ |
128 | 128 | ||
129 | static inline int stop_machine(int (*fn)(void *), void *data, | 129 | static inline int __stop_machine(int (*fn)(void *), void *data, |
130 | const struct cpumask *cpus) | 130 | const struct cpumask *cpus) |
131 | { | 131 | { |
132 | int ret; | 132 | int ret; |
133 | local_irq_disable(); | 133 | local_irq_disable(); |
@@ -136,5 +136,11 @@ static inline int stop_machine(int (*fn)(void *), void *data, | |||
136 | return ret; | 136 | return ret; |
137 | } | 137 | } |
138 | 138 | ||
139 | static inline int stop_machine(int (*fn)(void *), void *data, | ||
140 | const struct cpumask *cpus) | ||
141 | { | ||
142 | return __stop_machine(fn, data, cpus); | ||
143 | } | ||
144 | |||
139 | #endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ | 145 | #endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ |
140 | #endif /* _LINUX_STOP_MACHINE */ | 146 | #endif /* _LINUX_STOP_MACHINE */ |
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 5bbc447175dc..b2024757edd5 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h | |||
@@ -122,8 +122,8 @@ extern const struct rpc_authops authnull_ops; | |||
122 | int __init rpc_init_authunix(void); | 122 | int __init rpc_init_authunix(void); |
123 | int __init rpc_init_generic_auth(void); | 123 | int __init rpc_init_generic_auth(void); |
124 | int __init rpcauth_init_module(void); | 124 | int __init rpcauth_init_module(void); |
125 | void __exit rpcauth_remove_module(void); | 125 | void rpcauth_remove_module(void); |
126 | void __exit rpc_destroy_generic_auth(void); | 126 | void rpc_destroy_generic_auth(void); |
127 | void rpc_destroy_authunix(void); | 127 | void rpc_destroy_authunix(void); |
128 | 128 | ||
129 | struct rpc_cred * rpc_lookup_cred(void); | 129 | struct rpc_cred * rpc_lookup_cred(void); |
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h index 671538d25bc1..8eee9dbbfe7a 100644 --- a/include/linux/sunrpc/auth_gss.h +++ b/include/linux/sunrpc/auth_gss.h | |||
@@ -69,7 +69,7 @@ struct gss_cl_ctx { | |||
69 | enum rpc_gss_proc gc_proc; | 69 | enum rpc_gss_proc gc_proc; |
70 | u32 gc_seq; | 70 | u32 gc_seq; |
71 | spinlock_t gc_seq_lock; | 71 | spinlock_t gc_seq_lock; |
72 | struct gss_ctx *gc_gss_ctx; | 72 | struct gss_ctx __rcu *gc_gss_ctx; |
73 | struct xdr_netobj gc_wire_ctx; | 73 | struct xdr_netobj gc_wire_ctx; |
74 | u32 gc_win; | 74 | u32 gc_win; |
75 | unsigned long gc_expiry; | 75 | unsigned long gc_expiry; |
@@ -80,7 +80,7 @@ struct gss_upcall_msg; | |||
80 | struct gss_cred { | 80 | struct gss_cred { |
81 | struct rpc_cred gc_base; | 81 | struct rpc_cred gc_base; |
82 | enum rpc_gss_svc gc_service; | 82 | enum rpc_gss_svc gc_service; |
83 | struct gss_cl_ctx *gc_ctx; | 83 | struct gss_cl_ctx __rcu *gc_ctx; |
84 | struct gss_upcall_msg *gc_upcall; | 84 | struct gss_upcall_msg *gc_upcall; |
85 | unsigned long gc_upcall_timestamp; | 85 | unsigned long gc_upcall_timestamp; |
86 | unsigned char gc_machine_cred : 1; | 86 | unsigned char gc_machine_cred : 1; |
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 7bf3e84b92f4..6950c981882d 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h | |||
@@ -125,12 +125,15 @@ struct cache_detail { | |||
125 | */ | 125 | */ |
126 | struct cache_req { | 126 | struct cache_req { |
127 | struct cache_deferred_req *(*defer)(struct cache_req *req); | 127 | struct cache_deferred_req *(*defer)(struct cache_req *req); |
128 | int thread_wait; /* How long (jiffies) we can block the | ||
129 | * current thread to wait for updates. | ||
130 | */ | ||
128 | }; | 131 | }; |
129 | /* this must be embedded in a deferred_request that is being | 132 | /* this must be embedded in a deferred_request that is being |
130 | * delayed awaiting cache-fill | 133 | * delayed awaiting cache-fill |
131 | */ | 134 | */ |
132 | struct cache_deferred_req { | 135 | struct cache_deferred_req { |
133 | struct list_head hash; /* on hash chain */ | 136 | struct hlist_node hash; /* on hash chain */ |
134 | struct list_head recent; /* on fifo */ | 137 | struct list_head recent; /* on fifo */ |
135 | struct cache_head *item; /* cache item we wait on */ | 138 | struct cache_head *item; /* cache item we wait on */ |
136 | void *owner; /* we might need to discard all defered requests | 139 | void *owner; /* we might need to discard all defered requests |
@@ -194,7 +197,9 @@ extern void cache_purge(struct cache_detail *detail); | |||
194 | #define NEVER (0x7FFFFFFF) | 197 | #define NEVER (0x7FFFFFFF) |
195 | extern void __init cache_initialize(void); | 198 | extern void __init cache_initialize(void); |
196 | extern int cache_register(struct cache_detail *cd); | 199 | extern int cache_register(struct cache_detail *cd); |
200 | extern int cache_register_net(struct cache_detail *cd, struct net *net); | ||
197 | extern void cache_unregister(struct cache_detail *cd); | 201 | extern void cache_unregister(struct cache_detail *cd); |
202 | extern void cache_unregister_net(struct cache_detail *cd, struct net *net); | ||
198 | 203 | ||
199 | extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *, | 204 | extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *, |
200 | mode_t, struct cache_detail *); | 205 | mode_t, struct cache_detail *); |
@@ -218,14 +223,42 @@ static inline int get_int(char **bpp, int *anint) | |||
218 | return 0; | 223 | return 0; |
219 | } | 224 | } |
220 | 225 | ||
226 | /* | ||
227 | * timestamps kept in the cache are expressed in seconds | ||
228 | * since boot. This is the best for measuring differences in | ||
229 | * real time. | ||
230 | */ | ||
231 | static inline time_t seconds_since_boot(void) | ||
232 | { | ||
233 | struct timespec boot; | ||
234 | getboottime(&boot); | ||
235 | return get_seconds() - boot.tv_sec; | ||
236 | } | ||
237 | |||
238 | static inline time_t convert_to_wallclock(time_t sinceboot) | ||
239 | { | ||
240 | struct timespec boot; | ||
241 | getboottime(&boot); | ||
242 | return boot.tv_sec + sinceboot; | ||
243 | } | ||
244 | |||
221 | static inline time_t get_expiry(char **bpp) | 245 | static inline time_t get_expiry(char **bpp) |
222 | { | 246 | { |
223 | int rv; | 247 | int rv; |
248 | struct timespec boot; | ||
249 | |||
224 | if (get_int(bpp, &rv)) | 250 | if (get_int(bpp, &rv)) |
225 | return 0; | 251 | return 0; |
226 | if (rv < 0) | 252 | if (rv < 0) |
227 | return 0; | 253 | return 0; |
228 | return rv; | 254 | getboottime(&boot); |
255 | return rv - boot.tv_sec; | ||
229 | } | 256 | } |
230 | 257 | ||
258 | static inline void sunrpc_invalidate(struct cache_head *h, | ||
259 | struct cache_detail *detail) | ||
260 | { | ||
261 | h->expiry_time = seconds_since_boot() - 1; | ||
262 | detail->nextcheck = seconds_since_boot(); | ||
263 | } | ||
231 | #endif /* _LINUX_SUNRPC_CACHE_H_ */ | 264 | #endif /* _LINUX_SUNRPC_CACHE_H_ */ |
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 85f38a63f098..a5a55f284b7d 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
@@ -102,6 +102,7 @@ struct rpc_procinfo { | |||
102 | #ifdef __KERNEL__ | 102 | #ifdef __KERNEL__ |
103 | 103 | ||
104 | struct rpc_create_args { | 104 | struct rpc_create_args { |
105 | struct net *net; | ||
105 | int protocol; | 106 | int protocol; |
106 | struct sockaddr *address; | 107 | struct sockaddr *address; |
107 | size_t addrsize; | 108 | size_t addrsize; |
@@ -137,7 +138,6 @@ int rpcb_register(u32, u32, int, unsigned short); | |||
137 | int rpcb_v4_register(const u32 program, const u32 version, | 138 | int rpcb_v4_register(const u32 program, const u32 version, |
138 | const struct sockaddr *address, | 139 | const struct sockaddr *address, |
139 | const char *netid); | 140 | const char *netid); |
140 | int rpcb_getport_sync(struct sockaddr_in *, u32, u32, int); | ||
141 | void rpcb_getport_async(struct rpc_task *); | 141 | void rpcb_getport_async(struct rpc_task *); |
142 | 142 | ||
143 | void rpc_call_start(struct rpc_task *); | 143 | void rpc_call_start(struct rpc_task *); |
diff --git a/include/linux/sunrpc/gss_spkm3.h b/include/linux/sunrpc/gss_spkm3.h deleted file mode 100644 index e3e6a3437f8b..000000000000 --- a/include/linux/sunrpc/gss_spkm3.h +++ /dev/null | |||
@@ -1,55 +0,0 @@ | |||
1 | /* | ||
2 | * linux/include/linux/sunrpc/gss_spkm3.h | ||
3 | * | ||
4 | * Copyright (c) 2000 The Regents of the University of Michigan. | ||
5 | * All rights reserved. | ||
6 | * | ||
7 | * Andy Adamson <andros@umich.edu> | ||
8 | */ | ||
9 | |||
10 | #include <linux/sunrpc/auth_gss.h> | ||
11 | #include <linux/sunrpc/gss_err.h> | ||
12 | #include <linux/sunrpc/gss_asn1.h> | ||
13 | |||
14 | struct spkm3_ctx { | ||
15 | struct xdr_netobj ctx_id; /* per message context id */ | ||
16 | int endtime; /* endtime of the context */ | ||
17 | struct xdr_netobj mech_used; | ||
18 | unsigned int ret_flags ; | ||
19 | struct xdr_netobj conf_alg; | ||
20 | struct xdr_netobj derived_conf_key; | ||
21 | struct xdr_netobj intg_alg; | ||
22 | struct xdr_netobj derived_integ_key; | ||
23 | }; | ||
24 | |||
25 | /* OIDs declarations for K-ALG, I-ALG, C-ALG, and OWF-ALG */ | ||
26 | extern const struct xdr_netobj hmac_md5_oid; | ||
27 | extern const struct xdr_netobj cast5_cbc_oid; | ||
28 | |||
29 | /* SPKM InnerContext Token types */ | ||
30 | |||
31 | #define SPKM_ERROR_TOK 3 | ||
32 | #define SPKM_MIC_TOK 4 | ||
33 | #define SPKM_WRAP_TOK 5 | ||
34 | #define SPKM_DEL_TOK 6 | ||
35 | |||
36 | u32 spkm3_make_token(struct spkm3_ctx *ctx, struct xdr_buf * text, struct xdr_netobj * token, int toktype); | ||
37 | |||
38 | u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int toktype); | ||
39 | |||
40 | #define CKSUMTYPE_RSA_MD5 0x0007 | ||
41 | #define CKSUMTYPE_HMAC_MD5 0x0008 | ||
42 | |||
43 | s32 make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header, | ||
44 | unsigned int hdrlen, struct xdr_buf *body, | ||
45 | unsigned int body_offset, struct xdr_netobj *cksum); | ||
46 | void asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits); | ||
47 | int decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, | ||
48 | int explen); | ||
49 | void spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, | ||
50 | unsigned char *ctxhdr, int elen, int zbit); | ||
51 | void spkm3_make_mic_token(unsigned char **tokp, int toklen, | ||
52 | struct xdr_netobj *mic_hdr, | ||
53 | struct xdr_netobj *md5cksum, int md5elen, int md5zbit); | ||
54 | u32 spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, | ||
55 | unsigned char **cksum); | ||
diff --git a/include/linux/sunrpc/stats.h b/include/linux/sunrpc/stats.h index 5fa0f2084307..680471d1f28a 100644 --- a/include/linux/sunrpc/stats.h +++ b/include/linux/sunrpc/stats.h | |||
@@ -38,8 +38,21 @@ struct svc_stat { | |||
38 | rpcbadclnt; | 38 | rpcbadclnt; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | void rpc_proc_init(void); | 41 | struct net; |
42 | void rpc_proc_exit(void); | 42 | #ifdef CONFIG_PROC_FS |
43 | int rpc_proc_init(struct net *); | ||
44 | void rpc_proc_exit(struct net *); | ||
45 | #else | ||
46 | static inline int rpc_proc_init(struct net *net) | ||
47 | { | ||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | static inline void rpc_proc_exit(struct net *net) | ||
52 | { | ||
53 | } | ||
54 | #endif | ||
55 | |||
43 | #ifdef MODULE | 56 | #ifdef MODULE |
44 | void rpc_modcount(struct inode *, int); | 57 | void rpc_modcount(struct inode *, int); |
45 | #endif | 58 | #endif |
@@ -54,9 +67,6 @@ void svc_proc_unregister(const char *); | |||
54 | 67 | ||
55 | void svc_seq_show(struct seq_file *, | 68 | void svc_seq_show(struct seq_file *, |
56 | const struct svc_stat *); | 69 | const struct svc_stat *); |
57 | |||
58 | extern struct proc_dir_entry *proc_net_rpc; | ||
59 | |||
60 | #else | 70 | #else |
61 | 71 | ||
62 | static inline struct proc_dir_entry *rpc_proc_register(struct rpc_stat *s) { return NULL; } | 72 | static inline struct proc_dir_entry *rpc_proc_register(struct rpc_stat *s) { return NULL; } |
@@ -69,9 +79,6 @@ static inline void svc_proc_unregister(const char *p) {} | |||
69 | 79 | ||
70 | static inline void svc_seq_show(struct seq_file *seq, | 80 | static inline void svc_seq_show(struct seq_file *seq, |
71 | const struct svc_stat *st) {} | 81 | const struct svc_stat *st) {} |
72 | |||
73 | #define proc_net_rpc NULL | ||
74 | |||
75 | #endif | 82 | #endif |
76 | 83 | ||
77 | #endif /* _LINUX_SUNRPC_STATS_H */ | 84 | #endif /* _LINUX_SUNRPC_STATS_H */ |
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 5f4e18b3ce73..bbdb680ffbe9 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | struct svc_xprt_ops { | 13 | struct svc_xprt_ops { |
14 | struct svc_xprt *(*xpo_create)(struct svc_serv *, | 14 | struct svc_xprt *(*xpo_create)(struct svc_serv *, |
15 | struct net *net, | ||
15 | struct sockaddr *, int, | 16 | struct sockaddr *, int, |
16 | int); | 17 | int); |
17 | struct svc_xprt *(*xpo_accept)(struct svc_xprt *); | 18 | struct svc_xprt *(*xpo_accept)(struct svc_xprt *); |
@@ -32,6 +33,16 @@ struct svc_xprt_class { | |||
32 | u32 xcl_max_payload; | 33 | u32 xcl_max_payload; |
33 | }; | 34 | }; |
34 | 35 | ||
36 | /* | ||
37 | * This is embedded in an object that wants a callback before deleting | ||
38 | * an xprt; intended for use by NFSv4.1, which needs to know when a | ||
39 | * client's tcp connection (and hence possibly a backchannel) goes away. | ||
40 | */ | ||
41 | struct svc_xpt_user { | ||
42 | struct list_head list; | ||
43 | void (*callback)(struct svc_xpt_user *); | ||
44 | }; | ||
45 | |||
35 | struct svc_xprt { | 46 | struct svc_xprt { |
36 | struct svc_xprt_class *xpt_class; | 47 | struct svc_xprt_class *xpt_class; |
37 | struct svc_xprt_ops *xpt_ops; | 48 | struct svc_xprt_ops *xpt_ops; |
@@ -66,14 +77,31 @@ struct svc_xprt { | |||
66 | struct sockaddr_storage xpt_remote; /* remote peer's address */ | 77 | struct sockaddr_storage xpt_remote; /* remote peer's address */ |
67 | size_t xpt_remotelen; /* length of address */ | 78 | size_t xpt_remotelen; /* length of address */ |
68 | struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */ | 79 | struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */ |
80 | struct list_head xpt_users; /* callbacks on free */ | ||
81 | |||
82 | struct net *xpt_net; | ||
69 | }; | 83 | }; |
70 | 84 | ||
85 | static inline void register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) | ||
86 | { | ||
87 | spin_lock(&xpt->xpt_lock); | ||
88 | list_add(&u->list, &xpt->xpt_users); | ||
89 | spin_unlock(&xpt->xpt_lock); | ||
90 | } | ||
91 | |||
92 | static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) | ||
93 | { | ||
94 | spin_lock(&xpt->xpt_lock); | ||
95 | list_del_init(&u->list); | ||
96 | spin_unlock(&xpt->xpt_lock); | ||
97 | } | ||
98 | |||
71 | int svc_reg_xprt_class(struct svc_xprt_class *); | 99 | int svc_reg_xprt_class(struct svc_xprt_class *); |
72 | void svc_unreg_xprt_class(struct svc_xprt_class *); | 100 | void svc_unreg_xprt_class(struct svc_xprt_class *); |
73 | void svc_xprt_init(struct svc_xprt_class *, struct svc_xprt *, | 101 | void svc_xprt_init(struct svc_xprt_class *, struct svc_xprt *, |
74 | struct svc_serv *); | 102 | struct svc_serv *); |
75 | int svc_create_xprt(struct svc_serv *, const char *, const int, | 103 | int svc_create_xprt(struct svc_serv *, const char *, struct net *, |
76 | const unsigned short, int); | 104 | const int, const unsigned short, int); |
77 | void svc_xprt_enqueue(struct svc_xprt *xprt); | 105 | void svc_xprt_enqueue(struct svc_xprt *xprt); |
78 | void svc_xprt_received(struct svc_xprt *); | 106 | void svc_xprt_received(struct svc_xprt *); |
79 | void svc_xprt_put(struct svc_xprt *xprt); | 107 | void svc_xprt_put(struct svc_xprt *xprt); |
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index d39dbdc7b10f..25d333c1b571 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h | |||
@@ -108,10 +108,15 @@ struct auth_ops { | |||
108 | #define SVC_NEGATIVE 4 | 108 | #define SVC_NEGATIVE 4 |
109 | #define SVC_OK 5 | 109 | #define SVC_OK 5 |
110 | #define SVC_DROP 6 | 110 | #define SVC_DROP 6 |
111 | #define SVC_DENIED 7 | 111 | #define SVC_CLOSE 7 /* Like SVC_DROP, but request is definitely |
112 | #define SVC_PENDING 8 | 112 | * lost so if there is a tcp connection, it |
113 | #define SVC_COMPLETE 9 | 113 | * should be closed |
114 | */ | ||
115 | #define SVC_DENIED 8 | ||
116 | #define SVC_PENDING 9 | ||
117 | #define SVC_COMPLETE 10 | ||
114 | 118 | ||
119 | struct svc_xprt; | ||
115 | 120 | ||
116 | extern int svc_authenticate(struct svc_rqst *rqstp, __be32 *authp); | 121 | extern int svc_authenticate(struct svc_rqst *rqstp, __be32 *authp); |
117 | extern int svc_authorise(struct svc_rqst *rqstp); | 122 | extern int svc_authorise(struct svc_rqst *rqstp); |
@@ -121,13 +126,13 @@ extern void svc_auth_unregister(rpc_authflavor_t flavor); | |||
121 | 126 | ||
122 | extern struct auth_domain *unix_domain_find(char *name); | 127 | extern struct auth_domain *unix_domain_find(char *name); |
123 | extern void auth_domain_put(struct auth_domain *item); | 128 | extern void auth_domain_put(struct auth_domain *item); |
124 | extern int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom); | 129 | extern int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom); |
125 | extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new); | 130 | extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new); |
126 | extern struct auth_domain *auth_domain_find(char *name); | 131 | extern struct auth_domain *auth_domain_find(char *name); |
127 | extern struct auth_domain *auth_unix_lookup(struct in6_addr *addr); | 132 | extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr); |
128 | extern int auth_unix_forget_old(struct auth_domain *dom); | 133 | extern int auth_unix_forget_old(struct auth_domain *dom); |
129 | extern void svcauth_unix_purge(void); | 134 | extern void svcauth_unix_purge(void); |
130 | extern void svcauth_unix_info_release(void *); | 135 | extern void svcauth_unix_info_release(struct svc_xprt *xpt); |
131 | extern int svcauth_unix_set_client(struct svc_rqst *rqstp); | 136 | extern int svcauth_unix_set_client(struct svc_rqst *rqstp); |
132 | 137 | ||
133 | static inline unsigned long hash_str(char *name, int bits) | 138 | static inline unsigned long hash_str(char *name, int bits) |
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 35cf2e8cd7c6..498ab93a81e4 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h | |||
@@ -108,6 +108,7 @@ void xdr_encode_pages(struct xdr_buf *, struct page **, unsigned int, | |||
108 | unsigned int); | 108 | unsigned int); |
109 | void xdr_inline_pages(struct xdr_buf *, unsigned int, | 109 | void xdr_inline_pages(struct xdr_buf *, unsigned int, |
110 | struct page **, unsigned int, unsigned int); | 110 | struct page **, unsigned int, unsigned int); |
111 | void xdr_terminate_string(struct xdr_buf *, const u32); | ||
111 | 112 | ||
112 | static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len) | 113 | static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len) |
113 | { | 114 | { |
@@ -131,6 +132,13 @@ xdr_decode_hyper(__be32 *p, __u64 *valp) | |||
131 | return p + 2; | 132 | return p + 2; |
132 | } | 133 | } |
133 | 134 | ||
135 | static inline __be32 * | ||
136 | xdr_decode_opaque_fixed(__be32 *p, void *ptr, unsigned int len) | ||
137 | { | ||
138 | memcpy(ptr, p, len); | ||
139 | return p + XDR_QUADLEN(len); | ||
140 | } | ||
141 | |||
134 | /* | 142 | /* |
135 | * Adjust kvec to reflect end of xdr'ed data (RPC client XDR) | 143 | * Adjust kvec to reflect end of xdr'ed data (RPC client XDR) |
136 | */ | 144 | */ |
@@ -200,6 +208,7 @@ extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); | |||
200 | extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, | 208 | extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, |
201 | unsigned int base, unsigned int len); | 209 | unsigned int base, unsigned int len); |
202 | extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); | 210 | extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); |
211 | extern __be32 *xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes); | ||
203 | extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes); | 212 | extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes); |
204 | extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len); | 213 | extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len); |
205 | extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); | 214 | extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index ff5a77b28c50..89d10d279a20 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
@@ -224,6 +224,7 @@ struct rpc_xprt { | |||
224 | bklog_u; /* backlog queue utilization */ | 224 | bklog_u; /* backlog queue utilization */ |
225 | } stat; | 225 | } stat; |
226 | 226 | ||
227 | struct net *xprt_net; | ||
227 | const char *address_strings[RPC_DISPLAY_MAX]; | 228 | const char *address_strings[RPC_DISPLAY_MAX]; |
228 | }; | 229 | }; |
229 | 230 | ||
@@ -249,6 +250,7 @@ static inline int bc_prealloc(struct rpc_rqst *req) | |||
249 | 250 | ||
250 | struct xprt_create { | 251 | struct xprt_create { |
251 | int ident; /* XPRT_TRANSPORT identifier */ | 252 | int ident; /* XPRT_TRANSPORT identifier */ |
253 | struct net * net; | ||
252 | struct sockaddr * srcaddr; /* optional local address */ | 254 | struct sockaddr * srcaddr; /* optional local address */ |
253 | struct sockaddr * dstaddr; /* remote peer address */ | 255 | struct sockaddr * dstaddr; /* remote peer address */ |
254 | size_t addrlen; | 256 | size_t addrlen; |
@@ -280,6 +282,8 @@ void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); | |||
280 | void xprt_release(struct rpc_task *task); | 282 | void xprt_release(struct rpc_task *task); |
281 | struct rpc_xprt * xprt_get(struct rpc_xprt *xprt); | 283 | struct rpc_xprt * xprt_get(struct rpc_xprt *xprt); |
282 | void xprt_put(struct rpc_xprt *xprt); | 284 | void xprt_put(struct rpc_xprt *xprt); |
285 | struct rpc_xprt * xprt_alloc(struct net *net, int size, int max_req); | ||
286 | void xprt_free(struct rpc_xprt *); | ||
283 | 287 | ||
284 | static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p) | 288 | static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p) |
285 | { | 289 | { |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 4af270ec2204..26697514c5ec 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -293,8 +293,8 @@ extern int unregister_pm_notifier(struct notifier_block *nb); | |||
293 | extern bool events_check_enabled; | 293 | extern bool events_check_enabled; |
294 | 294 | ||
295 | extern bool pm_check_wakeup_events(void); | 295 | extern bool pm_check_wakeup_events(void); |
296 | extern bool pm_get_wakeup_count(unsigned long *count); | 296 | extern bool pm_get_wakeup_count(unsigned int *count); |
297 | extern bool pm_save_wakeup_count(unsigned long count); | 297 | extern bool pm_save_wakeup_count(unsigned int count); |
298 | #else /* !CONFIG_PM_SLEEP */ | 298 | #else /* !CONFIG_PM_SLEEP */ |
299 | 299 | ||
300 | static inline int register_pm_notifier(struct notifier_block *nb) | 300 | static inline int register_pm_notifier(struct notifier_block *nb) |
@@ -308,6 +308,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) | |||
308 | } | 308 | } |
309 | 309 | ||
310 | #define pm_notifier(fn, pri) do { (void)(fn); } while (0) | 310 | #define pm_notifier(fn, pri) do { (void)(fn); } while (0) |
311 | |||
312 | static inline bool pm_check_wakeup_events(void) { return true; } | ||
311 | #endif /* !CONFIG_PM_SLEEP */ | 313 | #endif /* !CONFIG_PM_SLEEP */ |
312 | 314 | ||
313 | extern struct mutex pm_mutex; | 315 | extern struct mutex pm_mutex; |
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 96eb576d82fd..30b881555fa5 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h | |||
@@ -164,6 +164,10 @@ int sysfs_add_file_to_group(struct kobject *kobj, | |||
164 | const struct attribute *attr, const char *group); | 164 | const struct attribute *attr, const char *group); |
165 | void sysfs_remove_file_from_group(struct kobject *kobj, | 165 | void sysfs_remove_file_from_group(struct kobject *kobj, |
166 | const struct attribute *attr, const char *group); | 166 | const struct attribute *attr, const char *group); |
167 | int sysfs_merge_group(struct kobject *kobj, | ||
168 | const struct attribute_group *grp); | ||
169 | void sysfs_unmerge_group(struct kobject *kobj, | ||
170 | const struct attribute_group *grp); | ||
167 | 171 | ||
168 | void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); | 172 | void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); |
169 | void sysfs_notify_dirent(struct sysfs_dirent *sd); | 173 | void sysfs_notify_dirent(struct sysfs_dirent *sd); |
@@ -302,6 +306,17 @@ static inline void sysfs_remove_file_from_group(struct kobject *kobj, | |||
302 | { | 306 | { |
303 | } | 307 | } |
304 | 308 | ||
309 | static inline int sysfs_merge_group(struct kobject *kobj, | ||
310 | const struct attribute_group *grp) | ||
311 | { | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | static inline void sysfs_unmerge_group(struct kobject *kobj, | ||
316 | const struct attribute_group *grp) | ||
317 | { | ||
318 | } | ||
319 | |||
305 | static inline void sysfs_notify(struct kobject *kobj, const char *dir, | 320 | static inline void sysfs_notify(struct kobject *kobj, const char *dir, |
306 | const char *attr) | 321 | const char *attr) |
307 | { | 322 | { |
diff --git a/include/linux/tc_act/Kbuild b/include/linux/tc_act/Kbuild index 76990937f4c9..67b501c302b2 100644 --- a/include/linux/tc_act/Kbuild +++ b/include/linux/tc_act/Kbuild | |||
@@ -4,3 +4,4 @@ header-y += tc_mirred.h | |||
4 | header-y += tc_pedit.h | 4 | header-y += tc_pedit.h |
5 | header-y += tc_nat.h | 5 | header-y += tc_nat.h |
6 | header-y += tc_skbedit.h | 6 | header-y += tc_skbedit.h |
7 | header-y += tc_csum.h | ||
diff --git a/include/linux/tc_act/tc_csum.h b/include/linux/tc_act/tc_csum.h new file mode 100644 index 000000000000..a047c49a3153 --- /dev/null +++ b/include/linux/tc_act/tc_csum.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef __LINUX_TC_CSUM_H | ||
2 | #define __LINUX_TC_CSUM_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/pkt_cls.h> | ||
6 | |||
7 | #define TCA_ACT_CSUM 16 | ||
8 | |||
9 | enum { | ||
10 | TCA_CSUM_UNSPEC, | ||
11 | TCA_CSUM_PARMS, | ||
12 | TCA_CSUM_TM, | ||
13 | __TCA_CSUM_MAX | ||
14 | }; | ||
15 | #define TCA_CSUM_MAX (__TCA_CSUM_MAX - 1) | ||
16 | |||
17 | enum { | ||
18 | TCA_CSUM_UPDATE_FLAG_IPV4HDR = 1, | ||
19 | TCA_CSUM_UPDATE_FLAG_ICMP = 2, | ||
20 | TCA_CSUM_UPDATE_FLAG_IGMP = 4, | ||
21 | TCA_CSUM_UPDATE_FLAG_TCP = 8, | ||
22 | TCA_CSUM_UPDATE_FLAG_UDP = 16, | ||
23 | TCA_CSUM_UPDATE_FLAG_UDPLITE = 32 | ||
24 | }; | ||
25 | |||
26 | struct tc_csum { | ||
27 | tc_gen; | ||
28 | |||
29 | __u32 update_flags; | ||
30 | }; | ||
31 | |||
32 | #endif /* __LINUX_TC_CSUM_H */ | ||
diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h index 0864206ec1a3..7138962664f8 100644 --- a/include/linux/tc_ematch/tc_em_meta.h +++ b/include/linux/tc_ematch/tc_em_meta.h | |||
@@ -79,6 +79,7 @@ enum { | |||
79 | TCF_META_ID_SK_SENDMSG_OFF, | 79 | TCF_META_ID_SK_SENDMSG_OFF, |
80 | TCF_META_ID_SK_WRITE_PENDING, | 80 | TCF_META_ID_SK_WRITE_PENDING, |
81 | TCF_META_ID_VLAN_TAG, | 81 | TCF_META_ID_VLAN_TAG, |
82 | TCF_META_ID_RXHASH, | ||
82 | __TCF_META_ID_MAX | 83 | __TCF_META_ID_MAX |
83 | }; | 84 | }; |
84 | #define TCF_META_ID_MAX (__TCF_META_ID_MAX - 1) | 85 | #define TCF_META_ID_MAX (__TCF_META_ID_MAX - 1) |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index a778ee024590..e64f4c67d0ef 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
@@ -105,6 +105,7 @@ enum { | |||
105 | #define TCP_COOKIE_TRANSACTIONS 15 /* TCP Cookie Transactions */ | 105 | #define TCP_COOKIE_TRANSACTIONS 15 /* TCP Cookie Transactions */ |
106 | #define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/ | 106 | #define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/ |
107 | #define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */ | 107 | #define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */ |
108 | #define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */ | ||
108 | 109 | ||
109 | /* for TCP_INFO socket option */ | 110 | /* for TCP_INFO socket option */ |
110 | #define TCPI_OPT_TIMESTAMPS 1 | 111 | #define TCPI_OPT_TIMESTAMPS 1 |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index a8cc4e13434c..c90696544176 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
@@ -23,12 +23,12 @@ struct restart_block { | |||
23 | }; | 23 | }; |
24 | /* For futex_wait and futex_wait_requeue_pi */ | 24 | /* For futex_wait and futex_wait_requeue_pi */ |
25 | struct { | 25 | struct { |
26 | u32 *uaddr; | 26 | u32 __user *uaddr; |
27 | u32 val; | 27 | u32 val; |
28 | u32 flags; | 28 | u32 flags; |
29 | u32 bitset; | 29 | u32 bitset; |
30 | u64 time; | 30 | u64 time; |
31 | u32 *uaddr2; | 31 | u32 __user *uaddr2; |
32 | } futex; | 32 | } futex; |
33 | /* For nanosleep */ | 33 | /* For nanosleep */ |
34 | struct { | 34 | struct { |
diff --git a/include/linux/tipc.h b/include/linux/tipc.h index 181c8d0e6f73..d10614b29d59 100644 --- a/include/linux/tipc.h +++ b/include/linux/tipc.h | |||
@@ -127,17 +127,23 @@ static inline unsigned int tipc_node(__u32 addr) | |||
127 | * TIPC topology subscription service definitions | 127 | * TIPC topology subscription service definitions |
128 | */ | 128 | */ |
129 | 129 | ||
130 | #define TIPC_SUB_SERVICE 0x00 /* Filter for service availability */ | 130 | #define TIPC_SUB_PORTS 0x01 /* filter for port availability */ |
131 | #define TIPC_SUB_PORTS 0x01 /* Filter for port availability */ | 131 | #define TIPC_SUB_SERVICE 0x02 /* filter for service availability */ |
132 | #define TIPC_SUB_CANCEL 0x04 /* Cancel a subscription */ | 132 | #define TIPC_SUB_CANCEL 0x04 /* cancel a subscription */ |
133 | #if 0 | ||
134 | /* The following filter options are not currently implemented */ | ||
135 | #define TIPC_SUB_NO_BIND_EVTS 0x04 /* filter out "publish" events */ | ||
136 | #define TIPC_SUB_NO_UNBIND_EVTS 0x08 /* filter out "withdraw" events */ | ||
137 | #define TIPC_SUB_SINGLE_EVT 0x10 /* expire after first event */ | ||
138 | #endif | ||
133 | 139 | ||
134 | #define TIPC_WAIT_FOREVER ~0 /* timeout for permanent subscription */ | 140 | #define TIPC_WAIT_FOREVER ~0 /* timeout for permanent subscription */ |
135 | 141 | ||
136 | struct tipc_subscr { | 142 | struct tipc_subscr { |
137 | struct tipc_name_seq seq; /* NBO. Name sequence of interest */ | 143 | struct tipc_name_seq seq; /* name sequence of interest */ |
138 | __u32 timeout; /* NBO. Subscription duration (in ms) */ | 144 | __u32 timeout; /* subscription duration (in ms) */ |
139 | __u32 filter; /* NBO. Bitmask of filter options */ | 145 | __u32 filter; /* bitmask of filter options */ |
140 | char usr_handle[8]; /* Opaque. Available for subscriber use */ | 146 | char usr_handle[8]; /* available for subscriber use */ |
141 | }; | 147 | }; |
142 | 148 | ||
143 | #define TIPC_PUBLISHED 1 /* publication event */ | 149 | #define TIPC_PUBLISHED 1 /* publication event */ |
@@ -145,11 +151,11 @@ struct tipc_subscr { | |||
145 | #define TIPC_SUBSCR_TIMEOUT 3 /* subscription timeout event */ | 151 | #define TIPC_SUBSCR_TIMEOUT 3 /* subscription timeout event */ |
146 | 152 | ||
147 | struct tipc_event { | 153 | struct tipc_event { |
148 | __u32 event; /* NBO. Event type, as defined above */ | 154 | __u32 event; /* event type */ |
149 | __u32 found_lower; /* NBO. Matching name seq instances */ | 155 | __u32 found_lower; /* matching name seq instances */ |
150 | __u32 found_upper; /* " " " " " */ | 156 | __u32 found_upper; /* " " " " */ |
151 | struct tipc_portid port; /* NBO. Associated port */ | 157 | struct tipc_portid port; /* associated port */ |
152 | struct tipc_subscr s; /* Original, associated subscription */ | 158 | struct tipc_subscr s; /* associated subscription */ |
153 | }; | 159 | }; |
154 | 160 | ||
155 | /* | 161 | /* |
diff --git a/include/linux/topology.h b/include/linux/topology.h index 64e084ff5e5c..b91a40e847d2 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -201,6 +201,12 @@ int arch_update_cpu_topology(void); | |||
201 | .balance_interval = 64, \ | 201 | .balance_interval = 64, \ |
202 | } | 202 | } |
203 | 203 | ||
204 | #ifdef CONFIG_SCHED_BOOK | ||
205 | #ifndef SD_BOOK_INIT | ||
206 | #error Please define an appropriate SD_BOOK_INIT in include/asm/topology.h!!! | ||
207 | #endif | ||
208 | #endif /* CONFIG_SCHED_BOOK */ | ||
209 | |||
204 | #ifdef CONFIG_NUMA | 210 | #ifdef CONFIG_NUMA |
205 | #ifndef SD_NODE_INIT | 211 | #ifndef SD_NODE_INIT |
206 | #error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!! | 212 | #error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!! |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 103d1b61aacb..a4a90b6726ce 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/rcupdate.h> | 19 | #include <linux/rcupdate.h> |
20 | #include <linux/jump_label.h> | ||
20 | 21 | ||
21 | struct module; | 22 | struct module; |
22 | struct tracepoint; | 23 | struct tracepoint; |
@@ -145,7 +146,9 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, | |||
145 | extern struct tracepoint __tracepoint_##name; \ | 146 | extern struct tracepoint __tracepoint_##name; \ |
146 | static inline void trace_##name(proto) \ | 147 | static inline void trace_##name(proto) \ |
147 | { \ | 148 | { \ |
148 | if (unlikely(__tracepoint_##name.state)) \ | 149 | JUMP_LABEL(&__tracepoint_##name.state, do_trace); \ |
150 | return; \ | ||
151 | do_trace: \ | ||
149 | __DO_TRACE(&__tracepoint_##name, \ | 152 | __DO_TRACE(&__tracepoint_##name, \ |
150 | TP_PROTO(data_proto), \ | 153 | TP_PROTO(data_proto), \ |
151 | TP_ARGS(data_args)); \ | 154 | TP_ARGS(data_args)); \ |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 67d64e6efe7a..86be0cdeb11b 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -256,6 +256,7 @@ struct tty_operations; | |||
256 | struct tty_struct { | 256 | struct tty_struct { |
257 | int magic; | 257 | int magic; |
258 | struct kref kref; | 258 | struct kref kref; |
259 | struct device *dev; | ||
259 | struct tty_driver *driver; | 260 | struct tty_driver *driver; |
260 | const struct tty_operations *ops; | 261 | const struct tty_operations *ops; |
261 | int index; | 262 | int index; |
@@ -465,7 +466,7 @@ extern void proc_clear_tty(struct task_struct *p); | |||
465 | extern struct tty_struct *get_current_tty(void); | 466 | extern struct tty_struct *get_current_tty(void); |
466 | extern void tty_default_fops(struct file_operations *fops); | 467 | extern void tty_default_fops(struct file_operations *fops); |
467 | extern struct tty_struct *alloc_tty_struct(void); | 468 | extern struct tty_struct *alloc_tty_struct(void); |
468 | extern void tty_add_file(struct tty_struct *tty, struct file *file); | 469 | extern int tty_add_file(struct tty_struct *tty, struct file *file); |
469 | extern void free_tty_struct(struct tty_struct *tty); | 470 | extern void free_tty_struct(struct tty_struct *tty); |
470 | extern void initialize_tty_struct(struct tty_struct *tty, | 471 | extern void initialize_tty_struct(struct tty_struct *tty, |
471 | struct tty_driver *driver, int idx); | 472 | struct tty_driver *driver, int idx); |
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index b08677982525..db2d227694da 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h | |||
@@ -224,6 +224,12 @@ | |||
224 | * unless the tty also has a valid tty->termiox pointer. | 224 | * unless the tty also has a valid tty->termiox pointer. |
225 | * | 225 | * |
226 | * Optional: Called under the termios lock | 226 | * Optional: Called under the termios lock |
227 | * | ||
228 | * int (*get_icount)(struct tty_struct *tty, struct serial_icounter *icount); | ||
229 | * | ||
230 | * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel | ||
231 | * structure to complete. This method is optional and will only be called | ||
232 | * if provided (otherwise EINVAL will be returned). | ||
227 | */ | 233 | */ |
228 | 234 | ||
229 | #include <linux/fs.h> | 235 | #include <linux/fs.h> |
@@ -232,6 +238,7 @@ | |||
232 | 238 | ||
233 | struct tty_struct; | 239 | struct tty_struct; |
234 | struct tty_driver; | 240 | struct tty_driver; |
241 | struct serial_icounter_struct; | ||
235 | 242 | ||
236 | struct tty_operations { | 243 | struct tty_operations { |
237 | struct tty_struct * (*lookup)(struct tty_driver *driver, | 244 | struct tty_struct * (*lookup)(struct tty_driver *driver, |
@@ -268,6 +275,8 @@ struct tty_operations { | |||
268 | unsigned int set, unsigned int clear); | 275 | unsigned int set, unsigned int clear); |
269 | int (*resize)(struct tty_struct *tty, struct winsize *ws); | 276 | int (*resize)(struct tty_struct *tty, struct winsize *ws); |
270 | int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew); | 277 | int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew); |
278 | int (*get_icount)(struct tty_struct *tty, | ||
279 | struct serial_icounter_struct *icount); | ||
271 | #ifdef CONFIG_CONSOLE_POLL | 280 | #ifdef CONFIG_CONSOLE_POLL |
272 | int (*poll_init)(struct tty_driver *driver, int line, char *options); | 281 | int (*poll_init)(struct tty_driver *driver, int line, char *options); |
273 | int (*poll_get_char)(struct tty_driver *driver, int line); | 282 | int (*poll_get_char)(struct tty_driver *driver, int line); |
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 5dcc9ff72f69..d6188e5a52df 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h | |||
@@ -108,7 +108,7 @@ extern void uio_event_notify(struct uio_info *info); | |||
108 | 108 | ||
109 | /* defines for uio_info->irq */ | 109 | /* defines for uio_info->irq */ |
110 | #define UIO_IRQ_CUSTOM -1 | 110 | #define UIO_IRQ_CUSTOM -1 |
111 | #define UIO_IRQ_NONE -2 | 111 | #define UIO_IRQ_NONE 0 |
112 | 112 | ||
113 | /* defines for uio_mem->memtype */ | 113 | /* defines for uio_mem->memtype */ |
114 | #define UIO_MEM_NONE 0 | 114 | #define UIO_MEM_NONE 0 |
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h index c117a68d04a7..5e86dc771da4 100644 --- a/include/linux/usb/cdc.h +++ b/include/linux/usb/cdc.h | |||
@@ -32,6 +32,8 @@ | |||
32 | 32 | ||
33 | #define USB_CDC_PROTO_EEM 7 | 33 | #define USB_CDC_PROTO_EEM 7 |
34 | 34 | ||
35 | #define USB_CDC_NCM_PROTO_NTB 1 | ||
36 | |||
35 | /*-------------------------------------------------------------------------*/ | 37 | /*-------------------------------------------------------------------------*/ |
36 | 38 | ||
37 | /* | 39 | /* |
@@ -274,13 +276,13 @@ struct usb_cdc_notification { | |||
274 | /* | 276 | /* |
275 | * Class Specific structures and constants | 277 | * Class Specific structures and constants |
276 | * | 278 | * |
277 | * CDC NCM parameter structure, CDC NCM subclass 6.2.1 | 279 | * CDC NCM NTB parameters structure, CDC NCM subclass 6.2.1 |
278 | * | 280 | * |
279 | */ | 281 | */ |
280 | 282 | ||
281 | struct usb_cdc_ncm_ntb_parameter { | 283 | struct usb_cdc_ncm_ntb_parameters { |
282 | __le16 wLength; | 284 | __le16 wLength; |
283 | __le16 bmNtbFormatSupported; | 285 | __le16 bmNtbFormatsSupported; |
284 | __le32 dwNtbInMaxSize; | 286 | __le32 dwNtbInMaxSize; |
285 | __le16 wNdpInDivisor; | 287 | __le16 wNdpInDivisor; |
286 | __le16 wNdpInPayloadRemainder; | 288 | __le16 wNdpInPayloadRemainder; |
@@ -297,8 +299,8 @@ struct usb_cdc_ncm_ntb_parameter { | |||
297 | * CDC NCM transfer headers, CDC NCM subclass 3.2 | 299 | * CDC NCM transfer headers, CDC NCM subclass 3.2 |
298 | */ | 300 | */ |
299 | 301 | ||
300 | #define NCM_NTH16_SIGN 0x484D434E /* NCMH */ | 302 | #define USB_CDC_NCM_NTH16_SIGN 0x484D434E /* NCMH */ |
301 | #define NCM_NTH32_SIGN 0x686D636E /* ncmh */ | 303 | #define USB_CDC_NCM_NTH32_SIGN 0x686D636E /* ncmh */ |
302 | 304 | ||
303 | struct usb_cdc_ncm_nth16 { | 305 | struct usb_cdc_ncm_nth16 { |
304 | __le32 dwSignature; | 306 | __le32 dwSignature; |
@@ -320,25 +322,78 @@ struct usb_cdc_ncm_nth32 { | |||
320 | * CDC NCM datagram pointers, CDC NCM subclass 3.3 | 322 | * CDC NCM datagram pointers, CDC NCM subclass 3.3 |
321 | */ | 323 | */ |
322 | 324 | ||
323 | #define NCM_NDP16_CRC_SIGN 0x314D434E /* NCM1 */ | 325 | #define USB_CDC_NCM_NDP16_CRC_SIGN 0x314D434E /* NCM1 */ |
324 | #define NCM_NDP16_NOCRC_SIGN 0x304D434E /* NCM0 */ | 326 | #define USB_CDC_NCM_NDP16_NOCRC_SIGN 0x304D434E /* NCM0 */ |
325 | #define NCM_NDP32_CRC_SIGN 0x316D636E /* ncm1 */ | 327 | #define USB_CDC_NCM_NDP32_CRC_SIGN 0x316D636E /* ncm1 */ |
326 | #define NCM_NDP32_NOCRC_SIGN 0x306D636E /* ncm0 */ | 328 | #define USB_CDC_NCM_NDP32_NOCRC_SIGN 0x306D636E /* ncm0 */ |
329 | |||
330 | /* 16-bit NCM Datagram Pointer Entry */ | ||
331 | struct usb_cdc_ncm_dpe16 { | ||
332 | __le16 wDatagramIndex; | ||
333 | __le16 wDatagramLength; | ||
334 | } __attribute__((__packed__)); | ||
327 | 335 | ||
336 | /* 16-bit NCM Datagram Pointer Table */ | ||
328 | struct usb_cdc_ncm_ndp16 { | 337 | struct usb_cdc_ncm_ndp16 { |
329 | __le32 dwSignature; | 338 | __le32 dwSignature; |
330 | __le16 wLength; | 339 | __le16 wLength; |
331 | __le16 wNextFpIndex; | 340 | __le16 wNextFpIndex; |
332 | __u8 data[0]; | 341 | struct usb_cdc_ncm_dpe16 dpe16[0]; |
333 | } __attribute__ ((packed)); | 342 | } __attribute__ ((packed)); |
334 | 343 | ||
344 | /* 32-bit NCM Datagram Pointer Entry */ | ||
345 | struct usb_cdc_ncm_dpe32 { | ||
346 | __le32 dwDatagramIndex; | ||
347 | __le32 dwDatagramLength; | ||
348 | } __attribute__((__packed__)); | ||
349 | |||
350 | /* 32-bit NCM Datagram Pointer Table */ | ||
335 | struct usb_cdc_ncm_ndp32 { | 351 | struct usb_cdc_ncm_ndp32 { |
336 | __le32 dwSignature; | 352 | __le32 dwSignature; |
337 | __le16 wLength; | 353 | __le16 wLength; |
338 | __le16 wReserved6; | 354 | __le16 wReserved6; |
339 | __le32 dwNextFpIndex; | 355 | __le32 dwNextNdpIndex; |
340 | __le32 dwReserved12; | 356 | __le32 dwReserved12; |
341 | __u8 data[0]; | 357 | struct usb_cdc_ncm_dpe32 dpe32[0]; |
342 | } __attribute__ ((packed)); | 358 | } __attribute__ ((packed)); |
343 | 359 | ||
360 | /* CDC NCM subclass 3.2.1 and 3.2.2 */ | ||
361 | #define USB_CDC_NCM_NDP16_INDEX_MIN 0x000C | ||
362 | #define USB_CDC_NCM_NDP32_INDEX_MIN 0x0010 | ||
363 | |||
364 | /* CDC NCM subclass 3.3.3 Datagram Formatting */ | ||
365 | #define USB_CDC_NCM_DATAGRAM_FORMAT_CRC 0x30 | ||
366 | #define USB_CDC_NCM_DATAGRAM_FORMAT_NOCRC 0X31 | ||
367 | |||
368 | /* CDC NCM subclass 4.2 NCM Communications Interface Protocol Code */ | ||
369 | #define USB_CDC_NCM_PROTO_CODE_NO_ENCAP_COMMANDS 0x00 | ||
370 | #define USB_CDC_NCM_PROTO_CODE_EXTERN_PROTO 0xFE | ||
371 | |||
372 | /* CDC NCM subclass 5.2.1 NCM Functional Descriptor, bmNetworkCapabilities */ | ||
373 | #define USB_CDC_NCM_NCAP_ETH_FILTER (1 << 0) | ||
374 | #define USB_CDC_NCM_NCAP_NET_ADDRESS (1 << 1) | ||
375 | #define USB_CDC_NCM_NCAP_ENCAP_COMMAND (1 << 2) | ||
376 | #define USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE (1 << 3) | ||
377 | #define USB_CDC_NCM_NCAP_CRC_MODE (1 << 4) | ||
378 | |||
379 | /* CDC NCM subclass Table 6-3: NTB Parameter Structure */ | ||
380 | #define USB_CDC_NCM_NTB16_SUPPORTED (1 << 0) | ||
381 | #define USB_CDC_NCM_NTB32_SUPPORTED (1 << 1) | ||
382 | |||
383 | /* CDC NCM subclass Table 6-3: NTB Parameter Structure */ | ||
384 | #define USB_CDC_NCM_NDP_ALIGN_MIN_SIZE 0x04 | ||
385 | #define USB_CDC_NCM_NTB_MAX_LENGTH 0x1C | ||
386 | |||
387 | /* CDC NCM subclass 6.2.5 SetNtbFormat */ | ||
388 | #define USB_CDC_NCM_NTB16_FORMAT 0x00 | ||
389 | #define USB_CDC_NCM_NTB32_FORMAT 0x01 | ||
390 | |||
391 | /* CDC NCM subclass 6.2.7 SetNtbInputSize */ | ||
392 | #define USB_CDC_NCM_NTB_MIN_IN_SIZE 2048 | ||
393 | #define USB_CDC_NCM_NTB_MIN_OUT_SIZE 2048 | ||
394 | |||
395 | /* CDC NCM subclass 6.2.11 SetCrcMode */ | ||
396 | #define USB_CDC_NCM_CRC_NOT_APPENDED 0x00 | ||
397 | #define USB_CDC_NCM_CRC_APPENDED 0x01 | ||
398 | |||
344 | #endif /* __LINUX_USB_CDC_H */ | 399 | #endif /* __LINUX_USB_CDC_H */ |
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index da2ed77d3e8d..f917bbbc8901 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h | |||
@@ -123,8 +123,23 @@ | |||
123 | #define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* (otg) other RH port does */ | 123 | #define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* (otg) other RH port does */ |
124 | #define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */ | 124 | #define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */ |
125 | 125 | ||
126 | /* | ||
127 | * New Feature Selectors as added by USB 3.0 | ||
128 | * See USB 3.0 spec Table 9-6 | ||
129 | */ | ||
130 | #define USB_DEVICE_U1_ENABLE 48 /* dev may initiate U1 transition */ | ||
131 | #define USB_DEVICE_U2_ENABLE 49 /* dev may initiate U2 transition */ | ||
132 | #define USB_DEVICE_LTM_ENABLE 50 /* dev may send LTM */ | ||
133 | #define USB_INTRF_FUNC_SUSPEND 0 /* function suspend */ | ||
134 | |||
135 | #define USB_INTR_FUNC_SUSPEND_OPT_MASK 0xFF00 | ||
136 | |||
126 | #define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ | 137 | #define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ |
127 | 138 | ||
139 | /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */ | ||
140 | #define USB_DEV_STAT_U1_ENABLED 2 /* transition into U1 state */ | ||
141 | #define USB_DEV_STAT_U2_ENABLED 3 /* transition into U2 state */ | ||
142 | #define USB_DEV_STAT_LTM_ENABLED 4 /* Latency tolerance messages */ | ||
128 | 143 | ||
129 | /** | 144 | /** |
130 | * struct usb_ctrlrequest - SETUP data for a USB device control request | 145 | * struct usb_ctrlrequest - SETUP data for a USB device control request |
@@ -675,6 +690,7 @@ struct usb_bos_descriptor { | |||
675 | __u8 bNumDeviceCaps; | 690 | __u8 bNumDeviceCaps; |
676 | } __attribute__((packed)); | 691 | } __attribute__((packed)); |
677 | 692 | ||
693 | #define USB_DT_BOS_SIZE 5 | ||
678 | /*-------------------------------------------------------------------------*/ | 694 | /*-------------------------------------------------------------------------*/ |
679 | 695 | ||
680 | /* USB_DT_DEVICE_CAPABILITY: grouped with BOS */ | 696 | /* USB_DT_DEVICE_CAPABILITY: grouped with BOS */ |
@@ -712,16 +728,56 @@ struct usb_wireless_cap_descriptor { /* Ultra Wide Band */ | |||
712 | __u8 bReserved; | 728 | __u8 bReserved; |
713 | } __attribute__((packed)); | 729 | } __attribute__((packed)); |
714 | 730 | ||
731 | /* USB 2.0 Extension descriptor */ | ||
715 | #define USB_CAP_TYPE_EXT 2 | 732 | #define USB_CAP_TYPE_EXT 2 |
716 | 733 | ||
717 | struct usb_ext_cap_descriptor { /* Link Power Management */ | 734 | struct usb_ext_cap_descriptor { /* Link Power Management */ |
718 | __u8 bLength; | 735 | __u8 bLength; |
719 | __u8 bDescriptorType; | 736 | __u8 bDescriptorType; |
720 | __u8 bDevCapabilityType; | 737 | __u8 bDevCapabilityType; |
721 | __u8 bmAttributes; | 738 | __le32 bmAttributes; |
722 | #define USB_LPM_SUPPORT (1 << 1) /* supports LPM */ | 739 | #define USB_LPM_SUPPORT (1 << 1) /* supports LPM */ |
723 | } __attribute__((packed)); | 740 | } __attribute__((packed)); |
724 | 741 | ||
742 | #define USB_DT_USB_EXT_CAP_SIZE 7 | ||
743 | |||
744 | /* | ||
745 | * SuperSpeed USB Capability descriptor: Defines the set of SuperSpeed USB | ||
746 | * specific device level capabilities | ||
747 | */ | ||
748 | #define USB_SS_CAP_TYPE 3 | ||
749 | struct usb_ss_cap_descriptor { /* Link Power Management */ | ||
750 | __u8 bLength; | ||
751 | __u8 bDescriptorType; | ||
752 | __u8 bDevCapabilityType; | ||
753 | __u8 bmAttributes; | ||
754 | #define USB_LTM_SUPPORT (1 << 1) /* supports LTM */ | ||
755 | __le16 wSpeedSupported; | ||
756 | #define USB_LOW_SPEED_OPERATION (1) /* Low speed operation */ | ||
757 | #define USB_FULL_SPEED_OPERATION (1 << 1) /* Full speed operation */ | ||
758 | #define USB_HIGH_SPEED_OPERATION (1 << 2) /* High speed operation */ | ||
759 | #define USB_5GBPS_OPERATION (1 << 3) /* Operation at 5Gbps */ | ||
760 | __u8 bFunctionalitySupport; | ||
761 | __u8 bU1devExitLat; | ||
762 | __le16 bU2DevExitLat; | ||
763 | } __attribute__((packed)); | ||
764 | |||
765 | #define USB_DT_USB_SS_CAP_SIZE 10 | ||
766 | |||
767 | /* | ||
768 | * Container ID Capability descriptor: Defines the instance unique ID used to | ||
769 | * identify the instance across all operating modes | ||
770 | */ | ||
771 | #define CONTAINER_ID_TYPE 4 | ||
772 | struct usb_ss_container_id_descriptor { | ||
773 | __u8 bLength; | ||
774 | __u8 bDescriptorType; | ||
775 | __u8 bDevCapabilityType; | ||
776 | __u8 bReserved; | ||
777 | __u8 ContainerID[16]; /* 128-bit number */ | ||
778 | } __attribute__((packed)); | ||
779 | |||
780 | #define USB_DT_USB_SS_CONTN_ID_SIZE 20 | ||
725 | /*-------------------------------------------------------------------------*/ | 781 | /*-------------------------------------------------------------------------*/ |
726 | 782 | ||
727 | /* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with | 783 | /* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with |
@@ -808,4 +864,14 @@ enum usb_device_state { | |||
808 | */ | 864 | */ |
809 | }; | 865 | }; |
810 | 866 | ||
867 | /*-------------------------------------------------------------------------*/ | ||
868 | |||
869 | /* | ||
870 | * As per USB compliance update, a device that is actively drawing | ||
871 | * more than 100mA from USB must report itself as bus-powered in | ||
872 | * the GetStatus(DEVICE) call. | ||
873 | * http://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34 | ||
874 | */ | ||
875 | #define USB_SELF_POWER_VBUS_MAX_DRAW 100 | ||
876 | |||
811 | #endif /* __LINUX_USB_CH9_H */ | 877 | #endif /* __LINUX_USB_CH9_H */ |
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 617068134ae8..3d29a7dcac2d 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h | |||
@@ -161,8 +161,6 @@ ep_choose(struct usb_gadget *g, struct usb_endpoint_descriptor *hs, | |||
161 | * and by language IDs provided in control requests. | 161 | * and by language IDs provided in control requests. |
162 | * @descriptors: Table of descriptors preceding all function descriptors. | 162 | * @descriptors: Table of descriptors preceding all function descriptors. |
163 | * Examples include OTG and vendor-specific descriptors. | 163 | * Examples include OTG and vendor-specific descriptors. |
164 | * @bind: Called from @usb_add_config() to allocate resources unique to this | ||
165 | * configuration and to call @usb_add_function() for each function used. | ||
166 | * @unbind: Reverses @bind; called as a side effect of unregistering the | 164 | * @unbind: Reverses @bind; called as a side effect of unregistering the |
167 | * driver which added this configuration. | 165 | * driver which added this configuration. |
168 | * @setup: Used to delegate control requests that aren't handled by standard | 166 | * @setup: Used to delegate control requests that aren't handled by standard |
@@ -207,8 +205,7 @@ struct usb_configuration { | |||
207 | * we can't restructure things to avoid mismatching... | 205 | * we can't restructure things to avoid mismatching... |
208 | */ | 206 | */ |
209 | 207 | ||
210 | /* configuration management: bind/unbind */ | 208 | /* configuration management: unbind/setup */ |
211 | int (*bind)(struct usb_configuration *); | ||
212 | void (*unbind)(struct usb_configuration *); | 209 | void (*unbind)(struct usb_configuration *); |
213 | int (*setup)(struct usb_configuration *, | 210 | int (*setup)(struct usb_configuration *, |
214 | const struct usb_ctrlrequest *); | 211 | const struct usb_ctrlrequest *); |
@@ -232,20 +229,24 @@ struct usb_configuration { | |||
232 | }; | 229 | }; |
233 | 230 | ||
234 | int usb_add_config(struct usb_composite_dev *, | 231 | int usb_add_config(struct usb_composite_dev *, |
235 | struct usb_configuration *); | 232 | struct usb_configuration *, |
233 | int (*)(struct usb_configuration *)); | ||
236 | 234 | ||
237 | /** | 235 | /** |
238 | * struct usb_composite_driver - groups configurations into a gadget | 236 | * struct usb_composite_driver - groups configurations into a gadget |
239 | * @name: For diagnostics, identifies the driver. | 237 | * @name: For diagnostics, identifies the driver. |
238 | * @iProduct: Used as iProduct override if @dev->iProduct is not set. | ||
239 | * If NULL value of @name is taken. | ||
240 | * @iManufacturer: Used as iManufacturer override if @dev->iManufacturer is | ||
241 | * not set. If NULL a default "<system> <release> with <udc>" value | ||
242 | * will be used. | ||
240 | * @dev: Template descriptor for the device, including default device | 243 | * @dev: Template descriptor for the device, including default device |
241 | * identifiers. | 244 | * identifiers. |
242 | * @strings: tables of strings, keyed by identifiers assigned during bind() | 245 | * @strings: tables of strings, keyed by identifiers assigned during bind() |
243 | * and language IDs provided in control requests | 246 | * and language IDs provided in control requests |
244 | * @bind: (REQUIRED) Used to allocate resources that are shared across the | 247 | * @needs_serial: set to 1 if the gadget needs userspace to provide |
245 | * whole device, such as string IDs, and add its configurations using | 248 | * a serial number. If one is not provided, warning will be printed. |
246 | * @usb_add_config(). This may fail by returning a negative errno | 249 | * @unbind: Reverses bind; called as a side effect of unregistering |
247 | * value; it should return zero on successful initialization. | ||
248 | * @unbind: Reverses @bind(); called as a side effect of unregistering | ||
249 | * this driver. | 250 | * this driver. |
250 | * @disconnect: optional driver disconnect method | 251 | * @disconnect: optional driver disconnect method |
251 | * @suspend: Notifies when the host stops sending USB traffic, | 252 | * @suspend: Notifies when the host stops sending USB traffic, |
@@ -256,7 +257,7 @@ int usb_add_config(struct usb_composite_dev *, | |||
256 | * Devices default to reporting self powered operation. Devices which rely | 257 | * Devices default to reporting self powered operation. Devices which rely |
257 | * on bus powered operation should report this in their @bind() method. | 258 | * on bus powered operation should report this in their @bind() method. |
258 | * | 259 | * |
259 | * Before returning from @bind, various fields in the template descriptor | 260 | * Before returning from bind, various fields in the template descriptor |
260 | * may be overridden. These include the idVendor/idProduct/bcdDevice values | 261 | * may be overridden. These include the idVendor/idProduct/bcdDevice values |
261 | * normally to bind the appropriate host side driver, and the three strings | 262 | * normally to bind the appropriate host side driver, and the three strings |
262 | * (iManufacturer, iProduct, iSerialNumber) normally used to provide user | 263 | * (iManufacturer, iProduct, iSerialNumber) normally used to provide user |
@@ -266,15 +267,12 @@ int usb_add_config(struct usb_composite_dev *, | |||
266 | */ | 267 | */ |
267 | struct usb_composite_driver { | 268 | struct usb_composite_driver { |
268 | const char *name; | 269 | const char *name; |
270 | const char *iProduct; | ||
271 | const char *iManufacturer; | ||
269 | const struct usb_device_descriptor *dev; | 272 | const struct usb_device_descriptor *dev; |
270 | struct usb_gadget_strings **strings; | 273 | struct usb_gadget_strings **strings; |
274 | unsigned needs_serial:1; | ||
271 | 275 | ||
272 | /* REVISIT: bind() functions can be marked __init, which | ||
273 | * makes trouble for section mismatch analysis. See if | ||
274 | * we can't restructure things to avoid mismatching... | ||
275 | */ | ||
276 | |||
277 | int (*bind)(struct usb_composite_dev *); | ||
278 | int (*unbind)(struct usb_composite_dev *); | 276 | int (*unbind)(struct usb_composite_dev *); |
279 | 277 | ||
280 | void (*disconnect)(struct usb_composite_dev *); | 278 | void (*disconnect)(struct usb_composite_dev *); |
@@ -284,8 +282,9 @@ struct usb_composite_driver { | |||
284 | void (*resume)(struct usb_composite_dev *); | 282 | void (*resume)(struct usb_composite_dev *); |
285 | }; | 283 | }; |
286 | 284 | ||
287 | extern int usb_composite_register(struct usb_composite_driver *); | 285 | extern int usb_composite_probe(struct usb_composite_driver *driver, |
288 | extern void usb_composite_unregister(struct usb_composite_driver *); | 286 | int (*bind)(struct usb_composite_dev *cdev)); |
287 | extern void usb_composite_unregister(struct usb_composite_driver *driver); | ||
289 | 288 | ||
290 | 289 | ||
291 | /** | 290 | /** |
@@ -334,6 +333,9 @@ struct usb_composite_dev { | |||
334 | struct list_head configs; | 333 | struct list_head configs; |
335 | struct usb_composite_driver *driver; | 334 | struct usb_composite_driver *driver; |
336 | u8 next_string_id; | 335 | u8 next_string_id; |
336 | u8 manufacturer_override; | ||
337 | u8 product_override; | ||
338 | u8 serial_override; | ||
337 | 339 | ||
338 | /* the gadget driver won't enable the data pullup | 340 | /* the gadget driver won't enable the data pullup |
339 | * while the deactivation count is nonzero. | 341 | * while the deactivation count is nonzero. |
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index d3ef42d7d2f0..006412ce2303 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h | |||
@@ -705,11 +705,6 @@ static inline int usb_gadget_disconnect(struct usb_gadget *gadget) | |||
705 | * struct usb_gadget_driver - driver for usb 'slave' devices | 705 | * struct usb_gadget_driver - driver for usb 'slave' devices |
706 | * @function: String describing the gadget's function | 706 | * @function: String describing the gadget's function |
707 | * @speed: Highest speed the driver handles. | 707 | * @speed: Highest speed the driver handles. |
708 | * @bind: Invoked when the driver is bound to a gadget, usually | ||
709 | * after registering the driver. | ||
710 | * At that point, ep0 is fully initialized, and ep_list holds | ||
711 | * the currently-available endpoints. | ||
712 | * Called in a context that permits sleeping. | ||
713 | * @setup: Invoked for ep0 control requests that aren't handled by | 708 | * @setup: Invoked for ep0 control requests that aren't handled by |
714 | * the hardware level driver. Most calls must be handled by | 709 | * the hardware level driver. Most calls must be handled by |
715 | * the gadget driver, including descriptor and configuration | 710 | * the gadget driver, including descriptor and configuration |
@@ -774,7 +769,6 @@ static inline int usb_gadget_disconnect(struct usb_gadget *gadget) | |||
774 | struct usb_gadget_driver { | 769 | struct usb_gadget_driver { |
775 | char *function; | 770 | char *function; |
776 | enum usb_device_speed speed; | 771 | enum usb_device_speed speed; |
777 | int (*bind)(struct usb_gadget *); | ||
778 | void (*unbind)(struct usb_gadget *); | 772 | void (*unbind)(struct usb_gadget *); |
779 | int (*setup)(struct usb_gadget *, | 773 | int (*setup)(struct usb_gadget *, |
780 | const struct usb_ctrlrequest *); | 774 | const struct usb_ctrlrequest *); |
@@ -798,17 +792,19 @@ struct usb_gadget_driver { | |||
798 | */ | 792 | */ |
799 | 793 | ||
800 | /** | 794 | /** |
801 | * usb_gadget_register_driver - register a gadget driver | 795 | * usb_gadget_probe_driver - probe a gadget driver |
802 | * @driver:the driver being registered | 796 | * @driver: the driver being registered |
797 | * @bind: the driver's bind callback | ||
803 | * Context: can sleep | 798 | * Context: can sleep |
804 | * | 799 | * |
805 | * Call this in your gadget driver's module initialization function, | 800 | * Call this in your gadget driver's module initialization function, |
806 | * to tell the underlying usb controller driver about your driver. | 801 | * to tell the underlying usb controller driver about your driver. |
807 | * The driver's bind() function will be called to bind it to a | 802 | * The @bind() function will be called to bind it to a gadget before this |
808 | * gadget before this registration call returns. It's expected that | 803 | * registration call returns. It's expected that the @bind() function will |
809 | * the bind() functions will be in init sections. | 804 | * be in init sections. |
810 | */ | 805 | */ |
811 | int usb_gadget_register_driver(struct usb_gadget_driver *driver); | 806 | int usb_gadget_probe_driver(struct usb_gadget_driver *driver, |
807 | int (*bind)(struct usb_gadget *)); | ||
812 | 808 | ||
813 | /** | 809 | /** |
814 | * usb_gadget_unregister_driver - unregister a gadget driver | 810 | * usb_gadget_unregister_driver - unregister a gadget driver |
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 3b571f1ffbb3..0b6e751ea0b1 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h | |||
@@ -329,6 +329,8 @@ extern int usb_hcd_submit_urb(struct urb *urb, gfp_t mem_flags); | |||
329 | extern int usb_hcd_unlink_urb(struct urb *urb, int status); | 329 | extern int usb_hcd_unlink_urb(struct urb *urb, int status); |
330 | extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, | 330 | extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, |
331 | int status); | 331 | int status); |
332 | extern void unmap_urb_setup_for_dma(struct usb_hcd *, struct urb *); | ||
333 | extern void unmap_urb_for_dma(struct usb_hcd *, struct urb *); | ||
332 | extern void usb_hcd_flush_endpoint(struct usb_device *udev, | 334 | extern void usb_hcd_flush_endpoint(struct usb_device *udev, |
333 | struct usb_host_endpoint *ep); | 335 | struct usb_host_endpoint *ep); |
334 | extern void usb_hcd_disable_endpoint(struct usb_device *udev, | 336 | extern void usb_hcd_disable_endpoint(struct usb_device *udev, |
diff --git a/include/linux/usb/intel_mid_otg.h b/include/linux/usb/intel_mid_otg.h new file mode 100644 index 000000000000..a0ccf795f362 --- /dev/null +++ b/include/linux/usb/intel_mid_otg.h | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * Intel MID (Langwell/Penwell) USB OTG Transceiver driver | ||
3 | * Copyright (C) 2008 - 2010, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef __INTEL_MID_OTG_H | ||
21 | #define __INTEL_MID_OTG_H | ||
22 | |||
23 | #include <linux/pm.h> | ||
24 | #include <linux/usb/otg.h> | ||
25 | #include <linux/notifier.h> | ||
26 | |||
27 | struct intel_mid_otg_xceiv; | ||
28 | |||
29 | /* This is a common data structure for Intel MID platform to | ||
30 | * save values of the OTG state machine */ | ||
31 | struct otg_hsm { | ||
32 | /* Input */ | ||
33 | int a_bus_resume; | ||
34 | int a_bus_suspend; | ||
35 | int a_conn; | ||
36 | int a_sess_vld; | ||
37 | int a_srp_det; | ||
38 | int a_vbus_vld; | ||
39 | int b_bus_resume; | ||
40 | int b_bus_suspend; | ||
41 | int b_conn; | ||
42 | int b_se0_srp; | ||
43 | int b_ssend_srp; | ||
44 | int b_sess_end; | ||
45 | int b_sess_vld; | ||
46 | int id; | ||
47 | /* id values */ | ||
48 | #define ID_B 0x05 | ||
49 | #define ID_A 0x04 | ||
50 | #define ID_ACA_C 0x03 | ||
51 | #define ID_ACA_B 0x02 | ||
52 | #define ID_ACA_A 0x01 | ||
53 | int power_up; | ||
54 | int adp_change; | ||
55 | int test_device; | ||
56 | |||
57 | /* Internal variables */ | ||
58 | int a_set_b_hnp_en; | ||
59 | int b_srp_done; | ||
60 | int b_hnp_enable; | ||
61 | int hnp_poll_enable; | ||
62 | |||
63 | /* Timeout indicator for timers */ | ||
64 | int a_wait_vrise_tmout; | ||
65 | int a_wait_bcon_tmout; | ||
66 | int a_aidl_bdis_tmout; | ||
67 | int a_bidl_adis_tmout; | ||
68 | int a_bidl_adis_tmr; | ||
69 | int a_wait_vfall_tmout; | ||
70 | int b_ase0_brst_tmout; | ||
71 | int b_bus_suspend_tmout; | ||
72 | int b_srp_init_tmout; | ||
73 | int b_srp_fail_tmout; | ||
74 | int b_srp_fail_tmr; | ||
75 | int b_adp_sense_tmout; | ||
76 | |||
77 | /* Informative variables */ | ||
78 | int a_bus_drop; | ||
79 | int a_bus_req; | ||
80 | int a_clr_err; | ||
81 | int b_bus_req; | ||
82 | int a_suspend_req; | ||
83 | int b_bus_suspend_vld; | ||
84 | |||
85 | /* Output */ | ||
86 | int drv_vbus; | ||
87 | int loc_conn; | ||
88 | int loc_sof; | ||
89 | |||
90 | /* Others */ | ||
91 | int vbus_srp_up; | ||
92 | }; | ||
93 | |||
94 | /* must provide ULPI access function to read/write registers implemented in | ||
95 | * ULPI address space */ | ||
96 | struct iotg_ulpi_access_ops { | ||
97 | int (*read)(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 *val); | ||
98 | int (*write)(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 val); | ||
99 | }; | ||
100 | |||
101 | #define OTG_A_DEVICE 0x0 | ||
102 | #define OTG_B_DEVICE 0x1 | ||
103 | |||
104 | /* | ||
105 | * the Intel MID (Langwell/Penwell) otg transceiver driver needs to interact | ||
106 | * with device and host drivers to implement the USB OTG related feature. More | ||
107 | * function members are added based on otg_transceiver data structure for this | ||
108 | * purpose. | ||
109 | */ | ||
110 | struct intel_mid_otg_xceiv { | ||
111 | struct otg_transceiver otg; | ||
112 | struct otg_hsm hsm; | ||
113 | |||
114 | /* base address */ | ||
115 | void __iomem *base; | ||
116 | |||
117 | /* ops to access ulpi */ | ||
118 | struct iotg_ulpi_access_ops ulpi_ops; | ||
119 | |||
120 | /* atomic notifier for interrupt context */ | ||
121 | struct atomic_notifier_head iotg_notifier; | ||
122 | |||
123 | /* start/stop USB Host function */ | ||
124 | int (*start_host)(struct intel_mid_otg_xceiv *iotg); | ||
125 | int (*stop_host)(struct intel_mid_otg_xceiv *iotg); | ||
126 | |||
127 | /* start/stop USB Peripheral function */ | ||
128 | int (*start_peripheral)(struct intel_mid_otg_xceiv *iotg); | ||
129 | int (*stop_peripheral)(struct intel_mid_otg_xceiv *iotg); | ||
130 | |||
131 | /* start/stop ADP sense/probe function */ | ||
132 | int (*set_adp_probe)(struct intel_mid_otg_xceiv *iotg, | ||
133 | bool enabled, int dev); | ||
134 | int (*set_adp_sense)(struct intel_mid_otg_xceiv *iotg, | ||
135 | bool enabled); | ||
136 | |||
137 | #ifdef CONFIG_PM | ||
138 | /* suspend/resume USB host function */ | ||
139 | int (*suspend_host)(struct intel_mid_otg_xceiv *iotg, | ||
140 | pm_message_t message); | ||
141 | int (*resume_host)(struct intel_mid_otg_xceiv *iotg); | ||
142 | |||
143 | int (*suspend_peripheral)(struct intel_mid_otg_xceiv *iotg, | ||
144 | pm_message_t message); | ||
145 | int (*resume_peripheral)(struct intel_mid_otg_xceiv *iotg); | ||
146 | #endif | ||
147 | |||
148 | }; | ||
149 | static inline | ||
150 | struct intel_mid_otg_xceiv *otg_to_mid_xceiv(struct otg_transceiver *otg) | ||
151 | { | ||
152 | return container_of(otg, struct intel_mid_otg_xceiv, otg); | ||
153 | } | ||
154 | |||
155 | #define MID_OTG_NOTIFY_CONNECT 0x0001 | ||
156 | #define MID_OTG_NOTIFY_DISCONN 0x0002 | ||
157 | #define MID_OTG_NOTIFY_HSUSPEND 0x0003 | ||
158 | #define MID_OTG_NOTIFY_HRESUME 0x0004 | ||
159 | #define MID_OTG_NOTIFY_CSUSPEND 0x0005 | ||
160 | #define MID_OTG_NOTIFY_CRESUME 0x0006 | ||
161 | #define MID_OTG_NOTIFY_HOSTADD 0x0007 | ||
162 | #define MID_OTG_NOTIFY_HOSTREMOVE 0x0008 | ||
163 | #define MID_OTG_NOTIFY_CLIENTADD 0x0009 | ||
164 | #define MID_OTG_NOTIFY_CLIENTREMOVE 0x000a | ||
165 | |||
166 | static inline int | ||
167 | intel_mid_otg_register_notifier(struct intel_mid_otg_xceiv *iotg, | ||
168 | struct notifier_block *nb) | ||
169 | { | ||
170 | return atomic_notifier_chain_register(&iotg->iotg_notifier, nb); | ||
171 | } | ||
172 | |||
173 | static inline void | ||
174 | intel_mid_otg_unregister_notifier(struct intel_mid_otg_xceiv *iotg, | ||
175 | struct notifier_block *nb) | ||
176 | { | ||
177 | atomic_notifier_chain_unregister(&iotg->iotg_notifier, nb); | ||
178 | } | ||
179 | |||
180 | #endif /* __INTEL_MID_OTG_H */ | ||
diff --git a/include/linux/usb/langwell_otg.h b/include/linux/usb/langwell_otg.h new file mode 100644 index 000000000000..51f17b16d312 --- /dev/null +++ b/include/linux/usb/langwell_otg.h | |||
@@ -0,0 +1,139 @@ | |||
1 | /* | ||
2 | * Intel Langwell USB OTG transceiver driver | ||
3 | * Copyright (C) 2008 - 2010, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef __LANGWELL_OTG_H | ||
21 | #define __LANGWELL_OTG_H | ||
22 | |||
23 | #include <linux/usb/intel_mid_otg.h> | ||
24 | |||
25 | #define CI_USBCMD 0x30 | ||
26 | # define USBCMD_RST BIT(1) | ||
27 | # define USBCMD_RS BIT(0) | ||
28 | #define CI_USBSTS 0x34 | ||
29 | # define USBSTS_SLI BIT(8) | ||
30 | # define USBSTS_URI BIT(6) | ||
31 | # define USBSTS_PCI BIT(2) | ||
32 | #define CI_PORTSC1 0x74 | ||
33 | # define PORTSC_PP BIT(12) | ||
34 | # define PORTSC_LS (BIT(11) | BIT(10)) | ||
35 | # define PORTSC_SUSP BIT(7) | ||
36 | # define PORTSC_CCS BIT(0) | ||
37 | #define CI_HOSTPC1 0xb4 | ||
38 | # define HOSTPC1_PHCD BIT(22) | ||
39 | #define CI_OTGSC 0xf4 | ||
40 | # define OTGSC_DPIE BIT(30) | ||
41 | # define OTGSC_1MSE BIT(29) | ||
42 | # define OTGSC_BSEIE BIT(28) | ||
43 | # define OTGSC_BSVIE BIT(27) | ||
44 | # define OTGSC_ASVIE BIT(26) | ||
45 | # define OTGSC_AVVIE BIT(25) | ||
46 | # define OTGSC_IDIE BIT(24) | ||
47 | # define OTGSC_DPIS BIT(22) | ||
48 | # define OTGSC_1MSS BIT(21) | ||
49 | # define OTGSC_BSEIS BIT(20) | ||
50 | # define OTGSC_BSVIS BIT(19) | ||
51 | # define OTGSC_ASVIS BIT(18) | ||
52 | # define OTGSC_AVVIS BIT(17) | ||
53 | # define OTGSC_IDIS BIT(16) | ||
54 | # define OTGSC_DPS BIT(14) | ||
55 | # define OTGSC_1MST BIT(13) | ||
56 | # define OTGSC_BSE BIT(12) | ||
57 | # define OTGSC_BSV BIT(11) | ||
58 | # define OTGSC_ASV BIT(10) | ||
59 | # define OTGSC_AVV BIT(9) | ||
60 | # define OTGSC_ID BIT(8) | ||
61 | # define OTGSC_HABA BIT(7) | ||
62 | # define OTGSC_HADP BIT(6) | ||
63 | # define OTGSC_IDPU BIT(5) | ||
64 | # define OTGSC_DP BIT(4) | ||
65 | # define OTGSC_OT BIT(3) | ||
66 | # define OTGSC_HAAR BIT(2) | ||
67 | # define OTGSC_VC BIT(1) | ||
68 | # define OTGSC_VD BIT(0) | ||
69 | # define OTGSC_INTEN_MASK (0x7f << 24) | ||
70 | # define OTGSC_INT_MASK (0x5f << 24) | ||
71 | # define OTGSC_INTSTS_MASK (0x7f << 16) | ||
72 | #define CI_USBMODE 0xf8 | ||
73 | # define USBMODE_CM (BIT(1) | BIT(0)) | ||
74 | # define USBMODE_IDLE 0 | ||
75 | # define USBMODE_DEVICE 0x2 | ||
76 | # define USBMODE_HOST 0x3 | ||
77 | #define USBCFG_ADDR 0xff10801c | ||
78 | #define USBCFG_LEN 4 | ||
79 | # define USBCFG_VBUSVAL BIT(14) | ||
80 | # define USBCFG_AVALID BIT(13) | ||
81 | # define USBCFG_BVALID BIT(12) | ||
82 | # define USBCFG_SESEND BIT(11) | ||
83 | |||
84 | #define INTR_DUMMY_MASK (USBSTS_SLI | USBSTS_URI | USBSTS_PCI) | ||
85 | |||
86 | enum langwell_otg_timer_type { | ||
87 | TA_WAIT_VRISE_TMR, | ||
88 | TA_WAIT_BCON_TMR, | ||
89 | TA_AIDL_BDIS_TMR, | ||
90 | TB_ASE0_BRST_TMR, | ||
91 | TB_SE0_SRP_TMR, | ||
92 | TB_SRP_INIT_TMR, | ||
93 | TB_SRP_FAIL_TMR, | ||
94 | TB_BUS_SUSPEND_TMR | ||
95 | }; | ||
96 | |||
97 | #define TA_WAIT_VRISE 100 | ||
98 | #define TA_WAIT_BCON 30000 | ||
99 | #define TA_AIDL_BDIS 15000 | ||
100 | #define TB_ASE0_BRST 5000 | ||
101 | #define TB_SE0_SRP 2 | ||
102 | #define TB_SRP_INIT 100 | ||
103 | #define TB_SRP_FAIL 5500 | ||
104 | #define TB_BUS_SUSPEND 500 | ||
105 | |||
106 | struct langwell_otg_timer { | ||
107 | unsigned long expires; /* Number of count increase to timeout */ | ||
108 | unsigned long count; /* Tick counter */ | ||
109 | void (*function)(unsigned long); /* Timeout function */ | ||
110 | unsigned long data; /* Data passed to function */ | ||
111 | struct list_head list; | ||
112 | }; | ||
113 | |||
114 | struct langwell_otg { | ||
115 | struct intel_mid_otg_xceiv iotg; | ||
116 | struct device *dev; | ||
117 | |||
118 | void __iomem *usbcfg; /* SCCBUSB config Reg */ | ||
119 | |||
120 | unsigned region; | ||
121 | unsigned cfg_region; | ||
122 | |||
123 | struct work_struct work; | ||
124 | struct workqueue_struct *qwork; | ||
125 | struct timer_list hsm_timer; | ||
126 | |||
127 | spinlock_t lock; | ||
128 | spinlock_t wq_lock; | ||
129 | |||
130 | struct notifier_block iotg_notifier; | ||
131 | }; | ||
132 | |||
133 | static inline | ||
134 | struct langwell_otg *mid_xceiv_to_lnw(struct intel_mid_otg_xceiv *iotg) | ||
135 | { | ||
136 | return container_of(iotg, struct langwell_otg, iotg); | ||
137 | } | ||
138 | |||
139 | #endif /* __LANGWELL_OTG_H__ */ | ||
diff --git a/include/linux/usb/ncm.h b/include/linux/usb/ncm.h deleted file mode 100644 index 006d1064c8b2..000000000000 --- a/include/linux/usb/ncm.h +++ /dev/null | |||
@@ -1,114 +0,0 @@ | |||
1 | /* | ||
2 | * USB CDC NCM auxiliary definitions | ||
3 | */ | ||
4 | |||
5 | #ifndef __LINUX_USB_NCM_H | ||
6 | #define __LINUX_USB_NCM_H | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/usb/cdc.h> | ||
10 | #include <asm/unaligned.h> | ||
11 | |||
12 | #define NCM_NTB_MIN_IN_SIZE 2048 | ||
13 | #define NCM_NTB_MIN_OUT_SIZE 2048 | ||
14 | |||
15 | #define NCM_CONTROL_TIMEOUT (5 * 1000) | ||
16 | |||
17 | /* bmNetworkCapabilities */ | ||
18 | |||
19 | #define NCM_NCAP_ETH_FILTER (1 << 0) | ||
20 | #define NCM_NCAP_NET_ADDRESS (1 << 1) | ||
21 | #define NCM_NCAP_ENCAP_COMM (1 << 2) | ||
22 | #define NCM_NCAP_MAX_DGRAM (1 << 3) | ||
23 | #define NCM_NCAP_CRC_MODE (1 << 4) | ||
24 | |||
25 | /* | ||
26 | * Here are options for NCM Datagram Pointer table (NDP) parser. | ||
27 | * There are 2 different formats: NDP16 and NDP32 in the spec (ch. 3), | ||
28 | * in NDP16 offsets and sizes fields are 1 16bit word wide, | ||
29 | * in NDP32 -- 2 16bit words wide. Also signatures are different. | ||
30 | * To make the parser code the same, put the differences in the structure, | ||
31 | * and switch pointers to the structures when the format is changed. | ||
32 | */ | ||
33 | |||
34 | struct ndp_parser_opts { | ||
35 | u32 nth_sign; | ||
36 | u32 ndp_sign; | ||
37 | unsigned nth_size; | ||
38 | unsigned ndp_size; | ||
39 | unsigned ndplen_align; | ||
40 | /* sizes in u16 units */ | ||
41 | unsigned dgram_item_len; /* index or length */ | ||
42 | unsigned block_length; | ||
43 | unsigned fp_index; | ||
44 | unsigned reserved1; | ||
45 | unsigned reserved2; | ||
46 | unsigned next_fp_index; | ||
47 | }; | ||
48 | |||
49 | #define INIT_NDP16_OPTS { \ | ||
50 | .nth_sign = NCM_NTH16_SIGN, \ | ||
51 | .ndp_sign = NCM_NDP16_NOCRC_SIGN, \ | ||
52 | .nth_size = sizeof(struct usb_cdc_ncm_nth16), \ | ||
53 | .ndp_size = sizeof(struct usb_cdc_ncm_ndp16), \ | ||
54 | .ndplen_align = 4, \ | ||
55 | .dgram_item_len = 1, \ | ||
56 | .block_length = 1, \ | ||
57 | .fp_index = 1, \ | ||
58 | .reserved1 = 0, \ | ||
59 | .reserved2 = 0, \ | ||
60 | .next_fp_index = 1, \ | ||
61 | } | ||
62 | |||
63 | |||
64 | #define INIT_NDP32_OPTS { \ | ||
65 | .nth_sign = NCM_NTH32_SIGN, \ | ||
66 | .ndp_sign = NCM_NDP32_NOCRC_SIGN, \ | ||
67 | .nth_size = sizeof(struct usb_cdc_ncm_nth32), \ | ||
68 | .ndp_size = sizeof(struct usb_cdc_ncm_ndp32), \ | ||
69 | .ndplen_align = 8, \ | ||
70 | .dgram_item_len = 2, \ | ||
71 | .block_length = 2, \ | ||
72 | .fp_index = 2, \ | ||
73 | .reserved1 = 1, \ | ||
74 | .reserved2 = 2, \ | ||
75 | .next_fp_index = 2, \ | ||
76 | } | ||
77 | |||
78 | static inline void put_ncm(__le16 **p, unsigned size, unsigned val) | ||
79 | { | ||
80 | switch (size) { | ||
81 | case 1: | ||
82 | put_unaligned_le16((u16)val, *p); | ||
83 | break; | ||
84 | case 2: | ||
85 | put_unaligned_le32((u32)val, *p); | ||
86 | |||
87 | break; | ||
88 | default: | ||
89 | BUG(); | ||
90 | } | ||
91 | |||
92 | *p += size; | ||
93 | } | ||
94 | |||
95 | static inline unsigned get_ncm(__le16 **p, unsigned size) | ||
96 | { | ||
97 | unsigned tmp; | ||
98 | |||
99 | switch (size) { | ||
100 | case 1: | ||
101 | tmp = get_unaligned_le16(*p); | ||
102 | break; | ||
103 | case 2: | ||
104 | tmp = get_unaligned_le32(*p); | ||
105 | break; | ||
106 | default: | ||
107 | BUG(); | ||
108 | } | ||
109 | |||
110 | *p += size; | ||
111 | return tmp; | ||
112 | } | ||
113 | |||
114 | #endif /* __LINUX_USB_NCM_H */ | ||
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index 545cba73ccaf..0a5b3711e502 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h | |||
@@ -164,8 +164,19 @@ otg_shutdown(struct otg_transceiver *otg) | |||
164 | } | 164 | } |
165 | 165 | ||
166 | /* for usb host and peripheral controller drivers */ | 166 | /* for usb host and peripheral controller drivers */ |
167 | #ifdef CONFIG_USB_OTG_UTILS | ||
167 | extern struct otg_transceiver *otg_get_transceiver(void); | 168 | extern struct otg_transceiver *otg_get_transceiver(void); |
168 | extern void otg_put_transceiver(struct otg_transceiver *); | 169 | extern void otg_put_transceiver(struct otg_transceiver *); |
170 | #else | ||
171 | static inline struct otg_transceiver *otg_get_transceiver(void) | ||
172 | { | ||
173 | return NULL; | ||
174 | } | ||
175 | |||
176 | static inline void otg_put_transceiver(struct otg_transceiver *x) | ||
177 | { | ||
178 | } | ||
179 | #endif | ||
169 | 180 | ||
170 | /* Context: can sleep */ | 181 | /* Context: can sleep */ |
171 | static inline int | 182 | static inline int |
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 55675b1efb28..16d682f4f7c3 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h | |||
@@ -271,6 +271,8 @@ struct usb_serial_driver { | |||
271 | int (*tiocmget)(struct tty_struct *tty, struct file *file); | 271 | int (*tiocmget)(struct tty_struct *tty, struct file *file); |
272 | int (*tiocmset)(struct tty_struct *tty, struct file *file, | 272 | int (*tiocmset)(struct tty_struct *tty, struct file *file, |
273 | unsigned int set, unsigned int clear); | 273 | unsigned int set, unsigned int clear); |
274 | int (*get_icount)(struct tty_struct *tty, | ||
275 | struct serial_icounter_struct *icount); | ||
274 | /* Called by the tty layer for port level work. There may or may not | 276 | /* Called by the tty layer for port level work. There may or may not |
275 | be an attached tty at this point */ | 277 | be an attached tty at this point */ |
276 | void (*dtr_rts)(struct usb_serial_port *port, int on); | 278 | void (*dtr_rts)(struct usb_serial_port *port, int on); |
diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h new file mode 100644 index 000000000000..d7fc910f1dc4 --- /dev/null +++ b/include/linux/usb/storage.h | |||
@@ -0,0 +1,48 @@ | |||
1 | #ifndef __LINUX_USB_STORAGE_H | ||
2 | #define __LINUX_USB_STORAGE_H | ||
3 | |||
4 | /* | ||
5 | * linux/usb/storage.h | ||
6 | * | ||
7 | * Copyright Matthew Wilcox for Intel Corp, 2010 | ||
8 | * | ||
9 | * This file contains definitions taken from the | ||
10 | * USB Mass Storage Class Specification Overview | ||
11 | * | ||
12 | * Distributed under the terms of the GNU GPL, version two. | ||
13 | */ | ||
14 | |||
15 | /* Storage subclass codes */ | ||
16 | |||
17 | #define USB_SC_RBC 0x01 /* Typically, flash devices */ | ||
18 | #define USB_SC_8020 0x02 /* CD-ROM */ | ||
19 | #define USB_SC_QIC 0x03 /* QIC-157 Tapes */ | ||
20 | #define USB_SC_UFI 0x04 /* Floppy */ | ||
21 | #define USB_SC_8070 0x05 /* Removable media */ | ||
22 | #define USB_SC_SCSI 0x06 /* Transparent */ | ||
23 | #define USB_SC_LOCKABLE 0x07 /* Password-protected */ | ||
24 | |||
25 | #define USB_SC_ISD200 0xf0 /* ISD200 ATA */ | ||
26 | #define USB_SC_CYP_ATACB 0xf1 /* Cypress ATACB */ | ||
27 | #define USB_SC_DEVICE 0xff /* Use device's value */ | ||
28 | |||
29 | /* Storage protocol codes */ | ||
30 | |||
31 | #define USB_PR_CBI 0x00 /* Control/Bulk/Interrupt */ | ||
32 | #define USB_PR_CB 0x01 /* Control/Bulk w/o interrupt */ | ||
33 | #define USB_PR_BULK 0x50 /* bulk only */ | ||
34 | #define USB_PR_UAS 0x62 /* USB Attached SCSI */ | ||
35 | |||
36 | #define USB_PR_USBAT 0x80 /* SCM-ATAPI bridge */ | ||
37 | #define USB_PR_EUSB_SDDR09 0x81 /* SCM-SCSI bridge for SDDR-09 */ | ||
38 | #define USB_PR_SDDR55 0x82 /* SDDR-55 (made up) */ | ||
39 | #define USB_PR_DPCM_USB 0xf0 /* Combination CB/SDDR09 */ | ||
40 | #define USB_PR_FREECOM 0xf1 /* Freecom */ | ||
41 | #define USB_PR_DATAFAB 0xf2 /* Datafab chipsets */ | ||
42 | #define USB_PR_JUMPSHOT 0xf3 /* Lexar Jumpshot */ | ||
43 | #define USB_PR_ALAUDA 0xf4 /* Alauda chipsets */ | ||
44 | #define USB_PR_KARMA 0xf5 /* Rio Karma */ | ||
45 | |||
46 | #define USB_PR_DEVICE 0xff /* Use device's value */ | ||
47 | |||
48 | #endif | ||
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index a4b947e470a5..71693d4a4fe1 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h | |||
@@ -58,7 +58,11 @@ | |||
58 | US_FLAG(CAPACITY_OK, 0x00010000) \ | 58 | US_FLAG(CAPACITY_OK, 0x00010000) \ |
59 | /* READ CAPACITY response is correct */ \ | 59 | /* READ CAPACITY response is correct */ \ |
60 | US_FLAG(BAD_SENSE, 0x00020000) \ | 60 | US_FLAG(BAD_SENSE, 0x00020000) \ |
61 | /* Bad Sense (never more than 18 bytes) */ | 61 | /* Bad Sense (never more than 18 bytes) */ \ |
62 | US_FLAG(NO_READ_DISC_INFO, 0x00040000) \ | ||
63 | /* cannot handle READ_DISC_INFO */ \ | ||
64 | US_FLAG(NO_READ_CAPACITY_16, 0x00080000) \ | ||
65 | /* cannot handle READ_CAPACITY_16 */ | ||
62 | 66 | ||
63 | #define US_FLAG(name, value) US_FL_##name = value , | 67 | #define US_FLAG(name, value) US_FL_##name = value , |
64 | enum { US_DO_ALL_FLAGS }; | 68 | enum { US_DO_ALL_FLAGS }; |
@@ -74,42 +78,7 @@ enum { US_DO_ALL_FLAGS }; | |||
74 | #define USB_US_TYPE(flags) (((flags) >> 24) & 0xFF) | 78 | #define USB_US_TYPE(flags) (((flags) >> 24) & 0xFF) |
75 | #define USB_US_ORIG_FLAGS(flags) ((flags) & 0x00FFFFFF) | 79 | #define USB_US_ORIG_FLAGS(flags) ((flags) & 0x00FFFFFF) |
76 | 80 | ||
77 | /* | 81 | #include <linux/usb/storage.h> |
78 | * This is probably not the best place to keep these constants, conceptually. | ||
79 | * But it's the only header included into all places which need them. | ||
80 | */ | ||
81 | |||
82 | /* Sub Classes */ | ||
83 | |||
84 | #define US_SC_RBC 0x01 /* Typically, flash devices */ | ||
85 | #define US_SC_8020 0x02 /* CD-ROM */ | ||
86 | #define US_SC_QIC 0x03 /* QIC-157 Tapes */ | ||
87 | #define US_SC_UFI 0x04 /* Floppy */ | ||
88 | #define US_SC_8070 0x05 /* Removable media */ | ||
89 | #define US_SC_SCSI 0x06 /* Transparent */ | ||
90 | #define US_SC_LOCKABLE 0x07 /* Password-protected */ | ||
91 | |||
92 | #define US_SC_ISD200 0xf0 /* ISD200 ATA */ | ||
93 | #define US_SC_CYP_ATACB 0xf1 /* Cypress ATACB */ | ||
94 | #define US_SC_DEVICE 0xff /* Use device's value */ | ||
95 | |||
96 | /* Protocols */ | ||
97 | |||
98 | #define US_PR_CBI 0x00 /* Control/Bulk/Interrupt */ | ||
99 | #define US_PR_CB 0x01 /* Control/Bulk w/o interrupt */ | ||
100 | #define US_PR_BULK 0x50 /* bulk only */ | ||
101 | |||
102 | #define US_PR_USBAT 0x80 /* SCM-ATAPI bridge */ | ||
103 | #define US_PR_EUSB_SDDR09 0x81 /* SCM-SCSI bridge for SDDR-09 */ | ||
104 | #define US_PR_SDDR55 0x82 /* SDDR-55 (made up) */ | ||
105 | #define US_PR_DPCM_USB 0xf0 /* Combination CB/SDDR09 */ | ||
106 | #define US_PR_FREECOM 0xf1 /* Freecom */ | ||
107 | #define US_PR_DATAFAB 0xf2 /* Datafab chipsets */ | ||
108 | #define US_PR_JUMPSHOT 0xf3 /* Lexar Jumpshot */ | ||
109 | #define US_PR_ALAUDA 0xf4 /* Alauda chipsets */ | ||
110 | #define US_PR_KARMA 0xf5 /* Rio Karma */ | ||
111 | |||
112 | #define US_PR_DEVICE 0xff /* Use device's value */ | ||
113 | 82 | ||
114 | /* | 83 | /* |
115 | */ | 84 | */ |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 01c2145118dc..63a4fe6d51bd 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -117,10 +117,12 @@ extern rwlock_t vmlist_lock; | |||
117 | extern struct vm_struct *vmlist; | 117 | extern struct vm_struct *vmlist; |
118 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); | 118 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); |
119 | 119 | ||
120 | #ifdef CONFIG_SMP | ||
120 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, | 121 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, |
121 | const size_t *sizes, int nr_vms, | 122 | const size_t *sizes, int nr_vms, |
122 | size_t align, gfp_t gfp_mask); | 123 | size_t align, gfp_t gfp_mask); |
123 | 124 | ||
124 | void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); | 125 | void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); |
126 | #endif | ||
125 | 127 | ||
126 | #endif /* _LINUX_VMALLOC_H */ | 128 | #endif /* _LINUX_VMALLOC_H */ |
diff --git a/include/linux/wireless.h b/include/linux/wireless.h index e6827eedf18b..4395b28bb86c 100644 --- a/include/linux/wireless.h +++ b/include/linux/wireless.h | |||
@@ -1157,6 +1157,6 @@ struct __compat_iw_event { | |||
1157 | #define IW_EV_PARAM_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct iw_param)) | 1157 | #define IW_EV_PARAM_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct iw_param)) |
1158 | #define IW_EV_ADDR_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct sockaddr)) | 1158 | #define IW_EV_ADDR_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct sockaddr)) |
1159 | #define IW_EV_QUAL_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct iw_quality)) | 1159 | #define IW_EV_QUAL_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct iw_quality)) |
1160 | #define IW_EV_POINT_PK_LEN (IW_EV_LCP_LEN + 4) | 1160 | #define IW_EV_POINT_PK_LEN (IW_EV_LCP_PK_LEN + 4) |
1161 | 1161 | ||
1162 | #endif /* _LINUX_WIRELESS_H */ | 1162 | #endif /* _LINUX_WIRELESS_H */ |
diff --git a/include/linux/spi/wl12xx.h b/include/linux/wl12xx.h index a223ecbc71ef..4f902e1908aa 100644 --- a/include/linux/spi/wl12xx.h +++ b/include/linux/wl12xx.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2009 Nokia Corporation | 4 | * Copyright (C) 2009 Nokia Corporation |
5 | * | 5 | * |
6 | * Contact: Kalle Valo <kalle.valo@nokia.com> | 6 | * Contact: Luciano Coelho <luciano.coelho@nokia.com> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
@@ -21,14 +21,31 @@ | |||
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #ifndef _LINUX_SPI_WL12XX_H | 24 | #ifndef _LINUX_WL12XX_H |
25 | #define _LINUX_SPI_WL12XX_H | 25 | #define _LINUX_WL12XX_H |
26 | 26 | ||
27 | struct wl12xx_platform_data { | 27 | struct wl12xx_platform_data { |
28 | void (*set_power)(bool enable); | 28 | void (*set_power)(bool enable); |
29 | /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */ | 29 | /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */ |
30 | int irq; | 30 | int irq; |
31 | bool use_eeprom; | 31 | bool use_eeprom; |
32 | int board_ref_clock; | ||
32 | }; | 33 | }; |
33 | 34 | ||
35 | #ifdef CONFIG_WL12XX_PLATFORM_DATA | ||
36 | |||
37 | int wl12xx_set_platform_data(const struct wl12xx_platform_data *data); | ||
38 | |||
39 | #else | ||
40 | |||
41 | static inline | ||
42 | int wl12xx_set_platform_data(const struct wl12xx_platform_data *data) | ||
43 | { | ||
44 | return -ENOSYS; | ||
45 | } | ||
46 | |||
47 | #endif | ||
48 | |||
49 | const struct wl12xx_platform_data *wl12xx_get_platform_data(void); | ||
50 | |||
34 | #endif | 51 | #endif |
diff --git a/include/linux/wlp.h b/include/linux/wlp.h deleted file mode 100644 index c76fe2392506..000000000000 --- a/include/linux/wlp.h +++ /dev/null | |||
@@ -1,736 +0,0 @@ | |||
1 | /* | ||
2 | * WiMedia Logical Link Control Protocol (WLP) | ||
3 | * | ||
4 | * Copyright (C) 2005-2006 Intel Corporation | ||
5 | * Reinette Chatre <reinette.chatre@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | * | ||
21 | * | ||
22 | * FIXME: docs | ||
23 | * | ||
24 | * - Does not (yet) include support for WLP control frames | ||
25 | * WLP Draft 0.99 [6.5]. | ||
26 | * | ||
27 | * A visual representation of the data structures. | ||
28 | * | ||
29 | * wssidB wssidB | ||
30 | * ^ ^ | ||
31 | * | | | ||
32 | * wssidA wssidA | ||
33 | * wlp interface { ^ ^ | ||
34 | * ... | | | ||
35 | * ... ... wssid wssid ... | ||
36 | * wlp --- ... | | | ||
37 | * }; neighbors --> neighbA --> neighbB | ||
38 | * ... | ||
39 | * wss | ||
40 | * ... | ||
41 | * eda cache --> neighborA --> neighborB --> neighborC ... | ||
42 | */ | ||
43 | |||
44 | #ifndef __LINUX__WLP_H_ | ||
45 | #define __LINUX__WLP_H_ | ||
46 | |||
47 | #include <linux/netdevice.h> | ||
48 | #include <linux/skbuff.h> | ||
49 | #include <linux/list.h> | ||
50 | #include <linux/uwb.h> | ||
51 | |||
52 | /** | ||
53 | * WLP Protocol ID | ||
54 | * WLP Draft 0.99 [6.2] | ||
55 | * | ||
56 | * The MUX header for all WLP frames | ||
57 | */ | ||
58 | #define WLP_PROTOCOL_ID 0x0100 | ||
59 | |||
60 | /** | ||
61 | * WLP Version | ||
62 | * WLP version placed in the association frames (WLP 0.99 [6.6]) | ||
63 | */ | ||
64 | #define WLP_VERSION 0x10 | ||
65 | |||
66 | /** | ||
67 | * Bytes needed to print UUID as string | ||
68 | */ | ||
69 | #define WLP_WSS_UUID_STRSIZE 48 | ||
70 | |||
71 | /** | ||
72 | * Bytes needed to print nonce as string | ||
73 | */ | ||
74 | #define WLP_WSS_NONCE_STRSIZE 48 | ||
75 | |||
76 | |||
77 | /** | ||
78 | * Size used for WLP name size | ||
79 | * | ||
80 | * The WSS name is set to 65 bytes, 1 byte larger than the maximum | ||
81 | * allowed by the WLP spec. This is to have a null terminated string | ||
82 | * for display to the user. A maximum of 64 bytes will still be used | ||
83 | * when placing the WSS name field in association frames. | ||
84 | */ | ||
85 | #define WLP_WSS_NAME_SIZE 65 | ||
86 | |||
87 | /** | ||
88 | * Number of bytes added by WLP to data frame | ||
89 | * | ||
90 | * A data frame transmitted from a host will be placed in a Standard or | ||
91 | * Abbreviated WLP frame. These have an extra 4 bytes of header (struct | ||
92 | * wlp_frame_std_abbrv_hdr). | ||
93 | * When the stack sends this data frame for transmission it needs to ensure | ||
94 | * there is enough headroom for this header. | ||
95 | */ | ||
96 | #define WLP_DATA_HLEN 4 | ||
97 | |||
98 | /** | ||
99 | * State of device regarding WLP Service Set | ||
100 | * | ||
101 | * WLP_WSS_STATE_NONE: the host does not participate in any WSS | ||
102 | * WLP_WSS_STATE_PART_ENROLLED: used as part of the enrollment sequence | ||
103 | * ("Partial Enroll"). This state is used to | ||
104 | * indicate the first part of enrollment that is | ||
105 | * unsecure. If the WSS is unsecure then the | ||
106 | * state will promptly go to WLP_WSS_STATE_ENROLLED, | ||
107 | * if the WSS is not secure then the enrollment | ||
108 | * procedure is a few more steps before we are | ||
109 | * enrolled. | ||
110 | * WLP_WSS_STATE_ENROLLED: the host is enrolled in a WSS | ||
111 | * WLP_WSS_STATE_ACTIVE: WSS is activated | ||
112 | * WLP_WSS_STATE_CONNECTED: host is connected to neighbor in WSS | ||
113 | * | ||
114 | */ | ||
115 | enum wlp_wss_state { | ||
116 | WLP_WSS_STATE_NONE = 0, | ||
117 | WLP_WSS_STATE_PART_ENROLLED, | ||
118 | WLP_WSS_STATE_ENROLLED, | ||
119 | WLP_WSS_STATE_ACTIVE, | ||
120 | WLP_WSS_STATE_CONNECTED, | ||
121 | }; | ||
122 | |||
123 | /** | ||
124 | * WSS Secure status | ||
125 | * WLP 0.99 Table 6 | ||
126 | * | ||
127 | * Set to one if the WSS is secure, zero if it is not secure | ||
128 | */ | ||
129 | enum wlp_wss_sec_status { | ||
130 | WLP_WSS_UNSECURE = 0, | ||
131 | WLP_WSS_SECURE, | ||
132 | }; | ||
133 | |||
134 | /** | ||
135 | * WLP frame type | ||
136 | * WLP Draft 0.99 [6.2 Table 1] | ||
137 | */ | ||
138 | enum wlp_frame_type { | ||
139 | WLP_FRAME_STANDARD = 0, | ||
140 | WLP_FRAME_ABBREVIATED, | ||
141 | WLP_FRAME_CONTROL, | ||
142 | WLP_FRAME_ASSOCIATION, | ||
143 | }; | ||
144 | |||
145 | /** | ||
146 | * WLP Association Message Type | ||
147 | * WLP Draft 0.99 [6.6.1.2 Table 8] | ||
148 | */ | ||
149 | enum wlp_assoc_type { | ||
150 | WLP_ASSOC_D1 = 2, | ||
151 | WLP_ASSOC_D2 = 3, | ||
152 | WLP_ASSOC_M1 = 4, | ||
153 | WLP_ASSOC_M2 = 5, | ||
154 | WLP_ASSOC_M3 = 7, | ||
155 | WLP_ASSOC_M4 = 8, | ||
156 | WLP_ASSOC_M5 = 9, | ||
157 | WLP_ASSOC_M6 = 10, | ||
158 | WLP_ASSOC_M7 = 11, | ||
159 | WLP_ASSOC_M8 = 12, | ||
160 | WLP_ASSOC_F0 = 14, | ||
161 | WLP_ASSOC_E1 = 32, | ||
162 | WLP_ASSOC_E2 = 33, | ||
163 | WLP_ASSOC_C1 = 34, | ||
164 | WLP_ASSOC_C2 = 35, | ||
165 | WLP_ASSOC_C3 = 36, | ||
166 | WLP_ASSOC_C4 = 37, | ||
167 | }; | ||
168 | |||
169 | /** | ||
170 | * WLP Attribute Type | ||
171 | * WLP Draft 0.99 [6.6.1 Table 6] | ||
172 | */ | ||
173 | enum wlp_attr_type { | ||
174 | WLP_ATTR_AUTH = 0x1005, /* Authenticator */ | ||
175 | WLP_ATTR_DEV_NAME = 0x1011, /* Device Name */ | ||
176 | WLP_ATTR_DEV_PWD_ID = 0x1012, /* Device Password ID */ | ||
177 | WLP_ATTR_E_HASH1 = 0x1014, /* E-Hash1 */ | ||
178 | WLP_ATTR_E_HASH2 = 0x1015, /* E-Hash2 */ | ||
179 | WLP_ATTR_E_SNONCE1 = 0x1016, /* E-SNonce1 */ | ||
180 | WLP_ATTR_E_SNONCE2 = 0x1017, /* E-SNonce2 */ | ||
181 | WLP_ATTR_ENCR_SET = 0x1018, /* Encrypted Settings */ | ||
182 | WLP_ATTR_ENRL_NONCE = 0x101A, /* Enrollee Nonce */ | ||
183 | WLP_ATTR_KEYWRAP_AUTH = 0x101E, /* Key Wrap Authenticator */ | ||
184 | WLP_ATTR_MANUF = 0x1021, /* Manufacturer */ | ||
185 | WLP_ATTR_MSG_TYPE = 0x1022, /* Message Type */ | ||
186 | WLP_ATTR_MODEL_NAME = 0x1023, /* Model Name */ | ||
187 | WLP_ATTR_MODEL_NR = 0x1024, /* Model Number */ | ||
188 | WLP_ATTR_PUB_KEY = 0x1032, /* Public Key */ | ||
189 | WLP_ATTR_REG_NONCE = 0x1039, /* Registrar Nonce */ | ||
190 | WLP_ATTR_R_HASH1 = 0x103D, /* R-Hash1 */ | ||
191 | WLP_ATTR_R_HASH2 = 0x103E, /* R-Hash2 */ | ||
192 | WLP_ATTR_R_SNONCE1 = 0x103F, /* R-SNonce1 */ | ||
193 | WLP_ATTR_R_SNONCE2 = 0x1040, /* R-SNonce2 */ | ||
194 | WLP_ATTR_SERIAL = 0x1042, /* Serial number */ | ||
195 | WLP_ATTR_UUID_E = 0x1047, /* UUID-E */ | ||
196 | WLP_ATTR_UUID_R = 0x1048, /* UUID-R */ | ||
197 | WLP_ATTR_PRI_DEV_TYPE = 0x1054, /* Primary Device Type */ | ||
198 | WLP_ATTR_SEC_DEV_TYPE = 0x1055, /* Secondary Device Type */ | ||
199 | WLP_ATTR_PORT_DEV = 0x1056, /* Portable Device */ | ||
200 | WLP_ATTR_APP_EXT = 0x1058, /* Application Extension */ | ||
201 | WLP_ATTR_WLP_VER = 0x2000, /* WLP Version */ | ||
202 | WLP_ATTR_WSSID = 0x2001, /* WSSID */ | ||
203 | WLP_ATTR_WSS_NAME = 0x2002, /* WSS Name */ | ||
204 | WLP_ATTR_WSS_SEC_STAT = 0x2003, /* WSS Secure Status */ | ||
205 | WLP_ATTR_WSS_BCAST = 0x2004, /* WSS Broadcast Address */ | ||
206 | WLP_ATTR_WSS_M_KEY = 0x2005, /* WSS Master Key */ | ||
207 | WLP_ATTR_ACC_ENRL = 0x2006, /* Accepting Enrollment */ | ||
208 | WLP_ATTR_WSS_INFO = 0x2007, /* WSS Information */ | ||
209 | WLP_ATTR_WSS_SEL_MTHD = 0x2008, /* WSS Selection Method */ | ||
210 | WLP_ATTR_ASSC_MTHD_LIST = 0x2009, /* Association Methods List */ | ||
211 | WLP_ATTR_SEL_ASSC_MTHD = 0x200A, /* Selected Association Method */ | ||
212 | WLP_ATTR_ENRL_HASH_COMM = 0x200B, /* Enrollee Hash Commitment */ | ||
213 | WLP_ATTR_WSS_TAG = 0x200C, /* WSS Tag */ | ||
214 | WLP_ATTR_WSS_VIRT = 0x200D, /* WSS Virtual EUI-48 */ | ||
215 | WLP_ATTR_WLP_ASSC_ERR = 0x200E, /* WLP Association Error */ | ||
216 | WLP_ATTR_VNDR_EXT = 0x200F, /* Vendor Extension */ | ||
217 | }; | ||
218 | |||
219 | /** | ||
220 | * WLP Category ID of primary/secondary device | ||
221 | * WLP Draft 0.99 [6.6.1.8 Table 12] | ||
222 | */ | ||
223 | enum wlp_dev_category_id { | ||
224 | WLP_DEV_CAT_COMPUTER = 1, | ||
225 | WLP_DEV_CAT_INPUT, | ||
226 | WLP_DEV_CAT_PRINT_SCAN_FAX_COPIER, | ||
227 | WLP_DEV_CAT_CAMERA, | ||
228 | WLP_DEV_CAT_STORAGE, | ||
229 | WLP_DEV_CAT_INFRASTRUCTURE, | ||
230 | WLP_DEV_CAT_DISPLAY, | ||
231 | WLP_DEV_CAT_MULTIM, | ||
232 | WLP_DEV_CAT_GAMING, | ||
233 | WLP_DEV_CAT_TELEPHONE, | ||
234 | WLP_DEV_CAT_OTHER = 65535, | ||
235 | }; | ||
236 | |||
237 | /** | ||
238 | * WLP WSS selection method | ||
239 | * WLP Draft 0.99 [6.6.1.6 Table 10] | ||
240 | */ | ||
241 | enum wlp_wss_sel_mthd { | ||
242 | WLP_WSS_ENRL_SELECT = 1, /* Enrollee selects */ | ||
243 | WLP_WSS_REG_SELECT, /* Registrar selects */ | ||
244 | }; | ||
245 | |||
246 | /** | ||
247 | * WLP association error values | ||
248 | * WLP Draft 0.99 [6.6.1.5 Table 9] | ||
249 | */ | ||
250 | enum wlp_assc_error { | ||
251 | WLP_ASSOC_ERROR_NONE, | ||
252 | WLP_ASSOC_ERROR_AUTH, /* Authenticator Failure */ | ||
253 | WLP_ASSOC_ERROR_ROGUE, /* Rogue activity suspected */ | ||
254 | WLP_ASSOC_ERROR_BUSY, /* Device busy */ | ||
255 | WLP_ASSOC_ERROR_LOCK, /* Setup Locked */ | ||
256 | WLP_ASSOC_ERROR_NOT_READY, /* Registrar not ready */ | ||
257 | WLP_ASSOC_ERROR_INV, /* Invalid WSS selection */ | ||
258 | WLP_ASSOC_ERROR_MSG_TIME, /* Message timeout */ | ||
259 | WLP_ASSOC_ERROR_ENR_TIME, /* Enrollment session timeout */ | ||
260 | WLP_ASSOC_ERROR_PW, /* Device password invalid */ | ||
261 | WLP_ASSOC_ERROR_VER, /* Unsupported version */ | ||
262 | WLP_ASSOC_ERROR_INT, /* Internal error */ | ||
263 | WLP_ASSOC_ERROR_UNDEF, /* Undefined error */ | ||
264 | WLP_ASSOC_ERROR_NUM, /* Numeric comparison failure */ | ||
265 | WLP_ASSOC_ERROR_WAIT, /* Waiting for user input */ | ||
266 | }; | ||
267 | |||
268 | /** | ||
269 | * WLP Parameters | ||
270 | * WLP 0.99 [7.7] | ||
271 | */ | ||
272 | enum wlp_parameters { | ||
273 | WLP_PER_MSG_TIMEOUT = 15, /* Seconds to wait for response to | ||
274 | association message. */ | ||
275 | }; | ||
276 | |||
277 | /** | ||
278 | * WLP IE | ||
279 | * | ||
280 | * The WLP IE should be included in beacons by all devices. | ||
281 | * | ||
282 | * The driver can set only a few of the fields in this information element, | ||
283 | * most fields are managed by the device self. When the driver needs to set | ||
284 | * a field it will only provide values for the fields of interest, the rest | ||
285 | * will be filled with zeroes. The fields of interest are: | ||
286 | * | ||
287 | * Element ID | ||
288 | * Length | ||
289 | * Capabilities (only to include WSSID Hash list length) | ||
290 | * WSSID Hash List fields | ||
291 | * | ||
292 | * WLP 0.99 [6.7] | ||
293 | * | ||
294 | * Only the fields that will be used are detailed in this structure, rest | ||
295 | * are not detailed or marked as "notused". | ||
296 | */ | ||
297 | struct wlp_ie { | ||
298 | struct uwb_ie_hdr hdr; | ||
299 | __le16 capabilities; | ||
300 | __le16 cycle_param; | ||
301 | __le16 acw_anchor_addr; | ||
302 | u8 wssid_hash_list[]; | ||
303 | } __packed; | ||
304 | |||
305 | static inline int wlp_ie_hash_length(struct wlp_ie *ie) | ||
306 | { | ||
307 | return (le16_to_cpu(ie->capabilities) >> 12) & 0xf; | ||
308 | } | ||
309 | |||
310 | static inline void wlp_ie_set_hash_length(struct wlp_ie *ie, int hash_length) | ||
311 | { | ||
312 | u16 caps = le16_to_cpu(ie->capabilities); | ||
313 | caps = (caps & ~(0xf << 12)) | (hash_length << 12); | ||
314 | ie->capabilities = cpu_to_le16(caps); | ||
315 | } | ||
316 | |||
317 | /** | ||
318 | * WLP nonce | ||
319 | * WLP Draft 0.99 [6.6.1 Table 6] | ||
320 | * | ||
321 | * A 128-bit random number often used (E-SNonce1, E-SNonce2, Enrollee | ||
322 | * Nonce, Registrar Nonce, R-SNonce1, R-SNonce2). It is passed to HW so | ||
323 | * it is packed. | ||
324 | */ | ||
325 | struct wlp_nonce { | ||
326 | u8 data[16]; | ||
327 | } __packed; | ||
328 | |||
329 | /** | ||
330 | * WLP UUID | ||
331 | * WLP Draft 0.99 [6.6.1 Table 6] | ||
332 | * | ||
333 | * Universally Unique Identifier (UUID) encoded as an octet string in the | ||
334 | * order the octets are shown in string representation in RFC4122. A UUID | ||
335 | * is often used (UUID-E, UUID-R, WSSID). It is passed to HW so it is packed. | ||
336 | */ | ||
337 | struct wlp_uuid { | ||
338 | u8 data[16]; | ||
339 | } __packed; | ||
340 | |||
341 | |||
342 | /** | ||
343 | * Primary and secondary device type attributes | ||
344 | * WLP Draft 0.99 [6.6.1.8] | ||
345 | */ | ||
346 | struct wlp_dev_type { | ||
347 | enum wlp_dev_category_id category:16; | ||
348 | u8 OUI[3]; | ||
349 | u8 OUIsubdiv; | ||
350 | __le16 subID; | ||
351 | } __packed; | ||
352 | |||
353 | /** | ||
354 | * WLP frame header | ||
355 | * WLP Draft 0.99 [6.2] | ||
356 | */ | ||
357 | struct wlp_frame_hdr { | ||
358 | __le16 mux_hdr; /* WLP_PROTOCOL_ID */ | ||
359 | enum wlp_frame_type type:8; | ||
360 | } __packed; | ||
361 | |||
362 | /** | ||
363 | * WLP attribute field header | ||
364 | * WLP Draft 0.99 [6.6.1] | ||
365 | * | ||
366 | * Header of each attribute found in an association frame | ||
367 | */ | ||
368 | struct wlp_attr_hdr { | ||
369 | __le16 type; | ||
370 | __le16 length; | ||
371 | } __packed; | ||
372 | |||
373 | /** | ||
374 | * Device information commonly used together | ||
375 | * | ||
376 | * Each of these device information elements has a specified range in which it | ||
377 | * should fit (WLP 0.99 [Table 6]). This range provided in the spec does not | ||
378 | * include the termination null '\0' character (when used in the | ||
379 | * association protocol the attribute fields are accompanied | ||
380 | * with a "length" field so the full range from the spec can be used for | ||
381 | * the value). We thus allocate an extra byte to be able to store a string | ||
382 | * of max length with a terminating '\0'. | ||
383 | */ | ||
384 | struct wlp_device_info { | ||
385 | char name[33]; | ||
386 | char model_name[33]; | ||
387 | char manufacturer[65]; | ||
388 | char model_nr[33]; | ||
389 | char serial[33]; | ||
390 | struct wlp_dev_type prim_dev_type; | ||
391 | }; | ||
392 | |||
393 | /** | ||
394 | * Macros for the WLP attributes | ||
395 | * | ||
396 | * There are quite a few attributes (total is 43). The attribute layout can be | ||
397 | * in one of three categories: one value, an array, an enum forced to 8 bits. | ||
398 | * These macros help with their definitions. | ||
399 | */ | ||
400 | #define wlp_attr(type, name) \ | ||
401 | struct wlp_attr_##name { \ | ||
402 | struct wlp_attr_hdr hdr; \ | ||
403 | type name; \ | ||
404 | } __packed; | ||
405 | |||
406 | #define wlp_attr_array(type, name) \ | ||
407 | struct wlp_attr_##name { \ | ||
408 | struct wlp_attr_hdr hdr; \ | ||
409 | type name[]; \ | ||
410 | } __packed; | ||
411 | |||
412 | /** | ||
413 | * WLP association attribute fields | ||
414 | * WLP Draft 0.99 [6.6.1 Table 6] | ||
415 | * | ||
416 | * Attributes appear in same order as the Table in the spec | ||
417 | * FIXME Does not define all attributes yet | ||
418 | */ | ||
419 | |||
420 | /* Device name: Friendly name of sending device */ | ||
421 | wlp_attr_array(u8, dev_name) | ||
422 | |||
423 | /* Enrollee Nonce: Random number generated by enrollee for an enrollment | ||
424 | * session */ | ||
425 | wlp_attr(struct wlp_nonce, enonce) | ||
426 | |||
427 | /* Manufacturer name: Name of manufacturer of the sending device */ | ||
428 | wlp_attr_array(u8, manufacturer) | ||
429 | |||
430 | /* WLP Message Type */ | ||
431 | wlp_attr(u8, msg_type) | ||
432 | |||
433 | /* WLP Model name: Model name of sending device */ | ||
434 | wlp_attr_array(u8, model_name) | ||
435 | |||
436 | /* WLP Model number: Model number of sending device */ | ||
437 | wlp_attr_array(u8, model_nr) | ||
438 | |||
439 | /* Registrar Nonce: Random number generated by registrar for an enrollment | ||
440 | * session */ | ||
441 | wlp_attr(struct wlp_nonce, rnonce) | ||
442 | |||
443 | /* Serial number of device */ | ||
444 | wlp_attr_array(u8, serial) | ||
445 | |||
446 | /* UUID of enrollee */ | ||
447 | wlp_attr(struct wlp_uuid, uuid_e) | ||
448 | |||
449 | /* UUID of registrar */ | ||
450 | wlp_attr(struct wlp_uuid, uuid_r) | ||
451 | |||
452 | /* WLP Primary device type */ | ||
453 | wlp_attr(struct wlp_dev_type, prim_dev_type) | ||
454 | |||
455 | /* WLP Secondary device type */ | ||
456 | wlp_attr(struct wlp_dev_type, sec_dev_type) | ||
457 | |||
458 | /* WLP protocol version */ | ||
459 | wlp_attr(u8, version) | ||
460 | |||
461 | /* WLP service set identifier */ | ||
462 | wlp_attr(struct wlp_uuid, wssid) | ||
463 | |||
464 | /* WLP WSS name */ | ||
465 | wlp_attr_array(u8, wss_name) | ||
466 | |||
467 | /* WLP WSS Secure Status */ | ||
468 | wlp_attr(u8, wss_sec_status) | ||
469 | |||
470 | /* WSS Broadcast Address */ | ||
471 | wlp_attr(struct uwb_mac_addr, wss_bcast) | ||
472 | |||
473 | /* WLP Accepting Enrollment */ | ||
474 | wlp_attr(u8, accept_enrl) | ||
475 | |||
476 | /** | ||
477 | * WSS information attributes | ||
478 | * WLP Draft 0.99 [6.6.3 Table 15] | ||
479 | */ | ||
480 | struct wlp_wss_info { | ||
481 | struct wlp_attr_wssid wssid; | ||
482 | struct wlp_attr_wss_name name; | ||
483 | struct wlp_attr_accept_enrl accept; | ||
484 | struct wlp_attr_wss_sec_status sec_stat; | ||
485 | struct wlp_attr_wss_bcast bcast; | ||
486 | } __packed; | ||
487 | |||
488 | /* WLP WSS Information */ | ||
489 | wlp_attr_array(struct wlp_wss_info, wss_info) | ||
490 | |||
491 | /* WLP WSS Selection method */ | ||
492 | wlp_attr(u8, wss_sel_mthd) | ||
493 | |||
494 | /* WLP WSS tag */ | ||
495 | wlp_attr(u8, wss_tag) | ||
496 | |||
497 | /* WSS Virtual Address */ | ||
498 | wlp_attr(struct uwb_mac_addr, wss_virt) | ||
499 | |||
500 | /* WLP association error */ | ||
501 | wlp_attr(u8, wlp_assc_err) | ||
502 | |||
503 | /** | ||
504 | * WLP standard and abbreviated frames | ||
505 | * | ||
506 | * WLP Draft 0.99 [6.3] and [6.4] | ||
507 | * | ||
508 | * The difference between the WLP standard frame and the WLP | ||
509 | * abbreviated frame is that the standard frame includes the src | ||
510 | * and dest addresses from the Ethernet header, the abbreviated frame does | ||
511 | * not. | ||
512 | * The src/dest (as well as the type/length and client data) are already | ||
513 | * defined as part of the Ethernet header, we do not do this here. | ||
514 | * From this perspective the standard and abbreviated frames appear the | ||
515 | * same - they will be treated differently though. | ||
516 | * | ||
517 | * The size of this header is also captured in WLP_DATA_HLEN to enable | ||
518 | * interfaces to prepare their headroom. | ||
519 | */ | ||
520 | struct wlp_frame_std_abbrv_hdr { | ||
521 | struct wlp_frame_hdr hdr; | ||
522 | u8 tag; | ||
523 | } __packed; | ||
524 | |||
525 | /** | ||
526 | * WLP association frames | ||
527 | * | ||
528 | * WLP Draft 0.99 [6.6] | ||
529 | */ | ||
530 | struct wlp_frame_assoc { | ||
531 | struct wlp_frame_hdr hdr; | ||
532 | enum wlp_assoc_type type:8; | ||
533 | struct wlp_attr_version version; | ||
534 | struct wlp_attr_msg_type msg_type; | ||
535 | u8 attr[]; | ||
536 | } __packed; | ||
537 | |||
538 | /* Ethernet to dev address mapping */ | ||
539 | struct wlp_eda { | ||
540 | spinlock_t lock; | ||
541 | struct list_head cache; /* Eth<->Dev Addr cache */ | ||
542 | }; | ||
543 | |||
544 | /** | ||
545 | * WSS information temporary storage | ||
546 | * | ||
547 | * This information is only stored temporarily during discovery. It should | ||
548 | * not be stored unless the device is enrolled in the advertised WSS. This | ||
549 | * is done mainly because we follow the letter of the spec in this regard. | ||
550 | * See WLP 0.99 [7.2.3]. | ||
551 | * When the device does become enrolled in a WSS the WSS information will | ||
552 | * be stored as part of the more comprehensive struct wlp_wss. | ||
553 | */ | ||
554 | struct wlp_wss_tmp_info { | ||
555 | char name[WLP_WSS_NAME_SIZE]; | ||
556 | u8 accept_enroll; | ||
557 | u8 sec_status; | ||
558 | struct uwb_mac_addr bcast; | ||
559 | }; | ||
560 | |||
561 | struct wlp_wssid_e { | ||
562 | struct list_head node; | ||
563 | struct wlp_uuid wssid; | ||
564 | struct wlp_wss_tmp_info *info; | ||
565 | }; | ||
566 | |||
567 | /** | ||
568 | * A cache entry of WLP neighborhood | ||
569 | * | ||
570 | * @node: head of list is wlp->neighbors | ||
571 | * @wssid: list of wssids of this neighbor, element is wlp_wssid_e | ||
572 | * @info: temporary storage for information learned during discovery. This | ||
573 | * storage is used together with the wssid_e temporary storage | ||
574 | * during discovery. | ||
575 | */ | ||
576 | struct wlp_neighbor_e { | ||
577 | struct list_head node; | ||
578 | struct wlp_uuid uuid; | ||
579 | struct uwb_dev *uwb_dev; | ||
580 | struct list_head wssid; /* Elements are wlp_wssid_e */ | ||
581 | struct wlp_device_info *info; | ||
582 | }; | ||
583 | |||
584 | struct wlp; | ||
585 | /** | ||
586 | * Information for an association session in progress. | ||
587 | * | ||
588 | * @exp_message: The type of the expected message. Both this message and a | ||
589 | * F0 message (which can be sent in response to any | ||
590 | * association frame) will be accepted as a valid message for | ||
591 | * this session. | ||
592 | * @cb: The function that will be called upon receipt of this | ||
593 | * message. | ||
594 | * @cb_priv: Private data of callback | ||
595 | * @data: Data used in association process (always a sk_buff?) | ||
596 | * @neighbor: Address of neighbor with which association session is in | ||
597 | * progress. | ||
598 | */ | ||
599 | struct wlp_session { | ||
600 | enum wlp_assoc_type exp_message; | ||
601 | void (*cb)(struct wlp *); | ||
602 | void *cb_priv; | ||
603 | void *data; | ||
604 | struct uwb_dev_addr neighbor_addr; | ||
605 | }; | ||
606 | |||
607 | /** | ||
608 | * WLP Service Set | ||
609 | * | ||
610 | * @mutex: used to protect entire WSS structure. | ||
611 | * | ||
612 | * @name: The WSS name is set to 65 bytes, 1 byte larger than the maximum | ||
613 | * allowed by the WLP spec. This is to have a null terminated string | ||
614 | * for display to the user. A maximum of 64 bytes will still be used | ||
615 | * when placing the WSS name field in association frames. | ||
616 | * | ||
617 | * @accept_enroll: Accepting enrollment: Set to one if registrar is | ||
618 | * accepting enrollment in WSS, or zero otherwise. | ||
619 | * | ||
620 | * Global and local information for each WSS in which we are enrolled. | ||
621 | * WLP 0.99 Section 7.2.1 and Section 7.2.2 | ||
622 | */ | ||
623 | struct wlp_wss { | ||
624 | struct mutex mutex; | ||
625 | struct kobject kobj; | ||
626 | /* Global properties. */ | ||
627 | struct wlp_uuid wssid; | ||
628 | u8 hash; | ||
629 | char name[WLP_WSS_NAME_SIZE]; | ||
630 | struct uwb_mac_addr bcast; | ||
631 | u8 secure_status:1; | ||
632 | u8 master_key[16]; | ||
633 | /* Local properties. */ | ||
634 | u8 tag; | ||
635 | struct uwb_mac_addr virtual_addr; | ||
636 | /* Extra */ | ||
637 | u8 accept_enroll:1; | ||
638 | enum wlp_wss_state state; | ||
639 | }; | ||
640 | |||
641 | /** | ||
642 | * WLP main structure | ||
643 | * @mutex: protect changes to WLP structure. We only allow changes to the | ||
644 | * uuid, so currently this mutex only protects this field. | ||
645 | */ | ||
646 | struct wlp { | ||
647 | struct mutex mutex; | ||
648 | struct uwb_rc *rc; /* UWB radio controller */ | ||
649 | struct net_device *ndev; | ||
650 | struct uwb_pal pal; | ||
651 | struct wlp_eda eda; | ||
652 | struct wlp_uuid uuid; | ||
653 | struct wlp_session *session; | ||
654 | struct wlp_wss wss; | ||
655 | struct mutex nbmutex; /* Neighbor mutex protects neighbors list */ | ||
656 | struct list_head neighbors; /* Elements are wlp_neighbor_e */ | ||
657 | struct uwb_notifs_handler uwb_notifs_handler; | ||
658 | struct wlp_device_info *dev_info; | ||
659 | void (*fill_device_info)(struct wlp *wlp, struct wlp_device_info *info); | ||
660 | int (*xmit_frame)(struct wlp *, struct sk_buff *, | ||
661 | struct uwb_dev_addr *); | ||
662 | void (*stop_queue)(struct wlp *); | ||
663 | void (*start_queue)(struct wlp *); | ||
664 | }; | ||
665 | |||
666 | /* sysfs */ | ||
667 | |||
668 | |||
669 | struct wlp_wss_attribute { | ||
670 | struct attribute attr; | ||
671 | ssize_t (*show)(struct wlp_wss *wss, char *buf); | ||
672 | ssize_t (*store)(struct wlp_wss *wss, const char *buf, size_t count); | ||
673 | }; | ||
674 | |||
675 | #define WSS_ATTR(_name, _mode, _show, _store) \ | ||
676 | static struct wlp_wss_attribute wss_attr_##_name = __ATTR(_name, _mode, \ | ||
677 | _show, _store) | ||
678 | |||
679 | extern int wlp_setup(struct wlp *, struct uwb_rc *, struct net_device *ndev); | ||
680 | extern void wlp_remove(struct wlp *); | ||
681 | extern ssize_t wlp_neighborhood_show(struct wlp *, char *); | ||
682 | extern int wlp_wss_setup(struct net_device *, struct wlp_wss *); | ||
683 | extern void wlp_wss_remove(struct wlp_wss *); | ||
684 | extern ssize_t wlp_wss_activate_show(struct wlp_wss *, char *); | ||
685 | extern ssize_t wlp_wss_activate_store(struct wlp_wss *, const char *, size_t); | ||
686 | extern ssize_t wlp_eda_show(struct wlp *, char *); | ||
687 | extern ssize_t wlp_eda_store(struct wlp *, const char *, size_t); | ||
688 | extern ssize_t wlp_uuid_show(struct wlp *, char *); | ||
689 | extern ssize_t wlp_uuid_store(struct wlp *, const char *, size_t); | ||
690 | extern ssize_t wlp_dev_name_show(struct wlp *, char *); | ||
691 | extern ssize_t wlp_dev_name_store(struct wlp *, const char *, size_t); | ||
692 | extern ssize_t wlp_dev_manufacturer_show(struct wlp *, char *); | ||
693 | extern ssize_t wlp_dev_manufacturer_store(struct wlp *, const char *, size_t); | ||
694 | extern ssize_t wlp_dev_model_name_show(struct wlp *, char *); | ||
695 | extern ssize_t wlp_dev_model_name_store(struct wlp *, const char *, size_t); | ||
696 | extern ssize_t wlp_dev_model_nr_show(struct wlp *, char *); | ||
697 | extern ssize_t wlp_dev_model_nr_store(struct wlp *, const char *, size_t); | ||
698 | extern ssize_t wlp_dev_serial_show(struct wlp *, char *); | ||
699 | extern ssize_t wlp_dev_serial_store(struct wlp *, const char *, size_t); | ||
700 | extern ssize_t wlp_dev_prim_category_show(struct wlp *, char *); | ||
701 | extern ssize_t wlp_dev_prim_category_store(struct wlp *, const char *, | ||
702 | size_t); | ||
703 | extern ssize_t wlp_dev_prim_OUI_show(struct wlp *, char *); | ||
704 | extern ssize_t wlp_dev_prim_OUI_store(struct wlp *, const char *, size_t); | ||
705 | extern ssize_t wlp_dev_prim_OUI_sub_show(struct wlp *, char *); | ||
706 | extern ssize_t wlp_dev_prim_OUI_sub_store(struct wlp *, const char *, | ||
707 | size_t); | ||
708 | extern ssize_t wlp_dev_prim_subcat_show(struct wlp *, char *); | ||
709 | extern ssize_t wlp_dev_prim_subcat_store(struct wlp *, const char *, | ||
710 | size_t); | ||
711 | extern int wlp_receive_frame(struct device *, struct wlp *, struct sk_buff *, | ||
712 | struct uwb_dev_addr *); | ||
713 | extern int wlp_prepare_tx_frame(struct device *, struct wlp *, | ||
714 | struct sk_buff *, struct uwb_dev_addr *); | ||
715 | void wlp_reset_all(struct wlp *wlp); | ||
716 | |||
717 | /** | ||
718 | * Initialize WSS | ||
719 | */ | ||
720 | static inline | ||
721 | void wlp_wss_init(struct wlp_wss *wss) | ||
722 | { | ||
723 | mutex_init(&wss->mutex); | ||
724 | } | ||
725 | |||
726 | static inline | ||
727 | void wlp_init(struct wlp *wlp) | ||
728 | { | ||
729 | INIT_LIST_HEAD(&wlp->neighbors); | ||
730 | mutex_init(&wlp->mutex); | ||
731 | mutex_init(&wlp->nbmutex); | ||
732 | wlp_wss_init(&wlp->wss); | ||
733 | } | ||
734 | |||
735 | |||
736 | #endif /* #ifndef __LINUX__WLP_H_ */ | ||
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 25e02c941bac..070bb7a88936 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -243,11 +243,12 @@ enum { | |||
243 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ | 243 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ |
244 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ | 244 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ |
245 | WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ | 245 | WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ |
246 | WQ_RESCUER = 1 << 3, /* has an rescue worker */ | 246 | WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ |
247 | WQ_HIGHPRI = 1 << 4, /* high priority */ | 247 | WQ_HIGHPRI = 1 << 4, /* high priority */ |
248 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ | 248 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ |
249 | 249 | ||
250 | WQ_DYING = 1 << 6, /* internal: workqueue is dying */ | 250 | WQ_DYING = 1 << 6, /* internal: workqueue is dying */ |
251 | WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */ | ||
251 | 252 | ||
252 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ | 253 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ |
253 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ | 254 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ |
@@ -306,12 +307,30 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, | |||
306 | __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL) | 307 | __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL) |
307 | #endif | 308 | #endif |
308 | 309 | ||
310 | /** | ||
311 | * alloc_ordered_workqueue - allocate an ordered workqueue | ||
312 | * @name: name of the workqueue | ||
313 | * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful) | ||
314 | * | ||
315 | * Allocate an ordered workqueue. An ordered workqueue executes at | ||
316 | * most one work item at any given time in the queued order. They are | ||
317 | * implemented as unbound workqueues with @max_active of one. | ||
318 | * | ||
319 | * RETURNS: | ||
320 | * Pointer to the allocated workqueue on success, %NULL on failure. | ||
321 | */ | ||
322 | static inline struct workqueue_struct * | ||
323 | alloc_ordered_workqueue(const char *name, unsigned int flags) | ||
324 | { | ||
325 | return alloc_workqueue(name, WQ_UNBOUND | flags, 1); | ||
326 | } | ||
327 | |||
309 | #define create_workqueue(name) \ | 328 | #define create_workqueue(name) \ |
310 | alloc_workqueue((name), WQ_RESCUER, 1) | 329 | alloc_workqueue((name), WQ_MEM_RECLAIM, 1) |
311 | #define create_freezeable_workqueue(name) \ | 330 | #define create_freezeable_workqueue(name) \ |
312 | alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1) | 331 | alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
313 | #define create_singlethread_workqueue(name) \ | 332 | #define create_singlethread_workqueue(name) \ |
314 | alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1) | 333 | alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
315 | 334 | ||
316 | extern void destroy_workqueue(struct workqueue_struct *wq); | 335 | extern void destroy_workqueue(struct workqueue_struct *wq); |
317 | 336 | ||
@@ -325,7 +344,6 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
325 | 344 | ||
326 | extern void flush_workqueue(struct workqueue_struct *wq); | 345 | extern void flush_workqueue(struct workqueue_struct *wq); |
327 | extern void flush_scheduled_work(void); | 346 | extern void flush_scheduled_work(void); |
328 | extern void flush_delayed_work(struct delayed_work *work); | ||
329 | 347 | ||
330 | extern int schedule_work(struct work_struct *work); | 348 | extern int schedule_work(struct work_struct *work); |
331 | extern int schedule_work_on(int cpu, struct work_struct *work); | 349 | extern int schedule_work_on(int cpu, struct work_struct *work); |
@@ -337,8 +355,13 @@ extern int keventd_up(void); | |||
337 | 355 | ||
338 | int execute_in_process_context(work_func_t fn, struct execute_work *); | 356 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
339 | 357 | ||
340 | extern int flush_work(struct work_struct *work); | 358 | extern bool flush_work(struct work_struct *work); |
341 | extern int cancel_work_sync(struct work_struct *work); | 359 | extern bool flush_work_sync(struct work_struct *work); |
360 | extern bool cancel_work_sync(struct work_struct *work); | ||
361 | |||
362 | extern bool flush_delayed_work(struct delayed_work *dwork); | ||
363 | extern bool flush_delayed_work_sync(struct delayed_work *work); | ||
364 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); | ||
342 | 365 | ||
343 | extern void workqueue_set_max_active(struct workqueue_struct *wq, | 366 | extern void workqueue_set_max_active(struct workqueue_struct *wq, |
344 | int max_active); | 367 | int max_active); |
@@ -352,9 +375,9 @@ extern unsigned int work_busy(struct work_struct *work); | |||
352 | * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or | 375 | * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or |
353 | * cancel_work_sync() to wait on it. | 376 | * cancel_work_sync() to wait on it. |
354 | */ | 377 | */ |
355 | static inline int cancel_delayed_work(struct delayed_work *work) | 378 | static inline bool cancel_delayed_work(struct delayed_work *work) |
356 | { | 379 | { |
357 | int ret; | 380 | bool ret; |
358 | 381 | ||
359 | ret = del_timer_sync(&work->timer); | 382 | ret = del_timer_sync(&work->timer); |
360 | if (ret) | 383 | if (ret) |
@@ -367,9 +390,9 @@ static inline int cancel_delayed_work(struct delayed_work *work) | |||
367 | * if it returns 0 the timer function may be running and the queueing is in | 390 | * if it returns 0 the timer function may be running and the queueing is in |
368 | * progress. | 391 | * progress. |
369 | */ | 392 | */ |
370 | static inline int __cancel_delayed_work(struct delayed_work *work) | 393 | static inline bool __cancel_delayed_work(struct delayed_work *work) |
371 | { | 394 | { |
372 | int ret; | 395 | bool ret; |
373 | 396 | ||
374 | ret = del_timer(&work->timer); | 397 | ret = del_timer(&work->timer); |
375 | if (ret) | 398 | if (ret) |
@@ -377,8 +400,6 @@ static inline int __cancel_delayed_work(struct delayed_work *work) | |||
377 | return ret; | 400 | return ret; |
378 | } | 401 | } |
379 | 402 | ||
380 | extern int cancel_delayed_work_sync(struct delayed_work *work); | ||
381 | |||
382 | /* Obsolete. use cancel_delayed_work_sync() */ | 403 | /* Obsolete. use cancel_delayed_work_sync() */ |
383 | static inline | 404 | static inline |
384 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | 405 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, |
@@ -409,8 +430,4 @@ extern bool freeze_workqueues_busy(void); | |||
409 | extern void thaw_workqueues(void); | 430 | extern void thaw_workqueues(void); |
410 | #endif /* CONFIG_FREEZER */ | 431 | #endif /* CONFIG_FREEZER */ |
411 | 432 | ||
412 | #ifdef CONFIG_LOCKDEP | ||
413 | int in_workqueue_context(struct workqueue_struct *wq); | ||
414 | #endif | ||
415 | |||
416 | #endif | 433 | #endif |