diff options
Diffstat (limited to 'include/linux')
163 files changed, 3283 insertions, 1997 deletions
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h index 1f16d502600c..09a947e8bc87 100644 --- a/include/linux/ahci_platform.h +++ b/include/linux/ahci_platform.h | |||
@@ -43,9 +43,7 @@ struct ahci_host_priv *ahci_platform_get_resources( | |||
43 | struct platform_device *pdev); | 43 | struct platform_device *pdev); |
44 | int ahci_platform_init_host(struct platform_device *pdev, | 44 | int ahci_platform_init_host(struct platform_device *pdev, |
45 | struct ahci_host_priv *hpriv, | 45 | struct ahci_host_priv *hpriv, |
46 | const struct ata_port_info *pi_template, | 46 | const struct ata_port_info *pi_template); |
47 | unsigned int force_port_map, | ||
48 | unsigned int mask_port_map); | ||
49 | 47 | ||
50 | int ahci_platform_suspend_host(struct device *dev); | 48 | int ahci_platform_suspend_host(struct device *dev); |
51 | int ahci_platform_resume_host(struct device *dev); | 49 | int ahci_platform_resume_host(struct device *dev); |
diff --git a/include/linux/amba/xilinx_dma.h b/include/linux/amba/xilinx_dma.h new file mode 100644 index 000000000000..34b98f276ed0 --- /dev/null +++ b/include/linux/amba/xilinx_dma.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Xilinx DMA Engine drivers support header file | ||
3 | * | ||
4 | * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. | ||
5 | * | ||
6 | * This is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef __DMA_XILINX_DMA_H | ||
13 | #define __DMA_XILINX_DMA_H | ||
14 | |||
15 | #include <linux/dma-mapping.h> | ||
16 | #include <linux/dmaengine.h> | ||
17 | |||
18 | /** | ||
19 | * struct xilinx_vdma_config - VDMA Configuration structure | ||
20 | * @frm_dly: Frame delay | ||
21 | * @gen_lock: Whether in gen-lock mode | ||
22 | * @master: Master that it syncs to | ||
23 | * @frm_cnt_en: Enable frame count enable | ||
24 | * @park: Whether wants to park | ||
25 | * @park_frm: Frame to park on | ||
26 | * @coalesc: Interrupt coalescing threshold | ||
27 | * @delay: Delay counter | ||
28 | * @reset: Reset Channel | ||
29 | * @ext_fsync: External Frame Sync source | ||
30 | */ | ||
31 | struct xilinx_vdma_config { | ||
32 | int frm_dly; | ||
33 | int gen_lock; | ||
34 | int master; | ||
35 | int frm_cnt_en; | ||
36 | int park; | ||
37 | int park_frm; | ||
38 | int coalesc; | ||
39 | int delay; | ||
40 | int reset; | ||
41 | int ext_fsync; | ||
42 | }; | ||
43 | |||
44 | int xilinx_vdma_channel_set_config(struct dma_chan *dchan, | ||
45 | struct xilinx_vdma_config *cfg); | ||
46 | |||
47 | #endif | ||
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h index 8598f8eacb20..a495a959e8a7 100644 --- a/include/linux/ath9k_platform.h +++ b/include/linux/ath9k_platform.h | |||
@@ -36,6 +36,8 @@ struct ath9k_platform_data { | |||
36 | 36 | ||
37 | int (*get_mac_revision)(void); | 37 | int (*get_mac_revision)(void); |
38 | int (*external_reset)(void); | 38 | int (*external_reset)(void); |
39 | |||
40 | bool use_eeprom; | ||
39 | }; | 41 | }; |
40 | 42 | ||
41 | #endif /* _LINUX_ATH9K_PLATFORM_H */ | 43 | #endif /* _LINUX_ATH9K_PLATFORM_H */ |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 5a645769f020..d2633ee099d9 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -186,6 +186,15 @@ static inline void *bio_data(struct bio *bio) | |||
186 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ | 186 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ |
187 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) | 187 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) |
188 | 188 | ||
189 | /* | ||
190 | * Check if adding a bio_vec after bprv with offset would create a gap in | ||
191 | * the SG list. Most drivers don't care about this, but some do. | ||
192 | */ | ||
193 | static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset) | ||
194 | { | ||
195 | return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1)); | ||
196 | } | ||
197 | |||
189 | #define bio_io_error(bio) bio_endio((bio), -EIO) | 198 | #define bio_io_error(bio) bio_endio((bio), -EIO) |
190 | 199 | ||
191 | /* | 200 | /* |
@@ -644,10 +653,6 @@ struct biovec_slab { | |||
644 | 653 | ||
645 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 654 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
646 | 655 | ||
647 | |||
648 | |||
649 | #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) | ||
650 | |||
651 | #define bip_for_each_vec(bvl, bip, iter) \ | 656 | #define bip_for_each_vec(bvl, bip, iter) \ |
652 | for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) | 657 | for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) |
653 | 658 | ||
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 0feedebfde48..eb726b9c5762 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -42,7 +42,7 @@ struct blk_mq_hw_ctx { | |||
42 | unsigned int nr_ctx; | 42 | unsigned int nr_ctx; |
43 | struct blk_mq_ctx **ctxs; | 43 | struct blk_mq_ctx **ctxs; |
44 | 44 | ||
45 | unsigned int wait_index; | 45 | atomic_t wait_index; |
46 | 46 | ||
47 | struct blk_mq_tags *tags; | 47 | struct blk_mq_tags *tags; |
48 | 48 | ||
@@ -135,7 +135,7 @@ enum { | |||
135 | BLK_MQ_S_STOPPED = 0, | 135 | BLK_MQ_S_STOPPED = 0, |
136 | BLK_MQ_S_TAG_ACTIVE = 1, | 136 | BLK_MQ_S_TAG_ACTIVE = 1, |
137 | 137 | ||
138 | BLK_MQ_MAX_DEPTH = 2048, | 138 | BLK_MQ_MAX_DEPTH = 10240, |
139 | 139 | ||
140 | BLK_MQ_CPU_WORK_BATCH = 8, | 140 | BLK_MQ_CPU_WORK_BATCH = 8, |
141 | }; | 141 | }; |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index d8e4cea23a25..66c2167f04a9 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -5,8 +5,6 @@ | |||
5 | #ifndef __LINUX_BLK_TYPES_H | 5 | #ifndef __LINUX_BLK_TYPES_H |
6 | #define __LINUX_BLK_TYPES_H | 6 | #define __LINUX_BLK_TYPES_H |
7 | 7 | ||
8 | #ifdef CONFIG_BLOCK | ||
9 | |||
10 | #include <linux/types.h> | 8 | #include <linux/types.h> |
11 | 9 | ||
12 | struct bio_set; | 10 | struct bio_set; |
@@ -28,6 +26,8 @@ struct bio_vec { | |||
28 | unsigned int bv_offset; | 26 | unsigned int bv_offset; |
29 | }; | 27 | }; |
30 | 28 | ||
29 | #ifdef CONFIG_BLOCK | ||
30 | |||
31 | struct bvec_iter { | 31 | struct bvec_iter { |
32 | sector_t bi_sector; /* device address in 512 byte | 32 | sector_t bi_sector; /* device address in 512 byte |
33 | sectors */ | 33 | sectors */ |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3cd426e971db..8699bcf5f099 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -280,6 +280,7 @@ struct queue_limits { | |||
280 | unsigned long seg_boundary_mask; | 280 | unsigned long seg_boundary_mask; |
281 | 281 | ||
282 | unsigned int max_hw_sectors; | 282 | unsigned int max_hw_sectors; |
283 | unsigned int chunk_sectors; | ||
283 | unsigned int max_sectors; | 284 | unsigned int max_sectors; |
284 | unsigned int max_segment_size; | 285 | unsigned int max_segment_size; |
285 | unsigned int physical_block_size; | 286 | unsigned int physical_block_size; |
@@ -511,6 +512,7 @@ struct request_queue { | |||
511 | #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ | 512 | #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ |
512 | #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ | 513 | #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ |
513 | #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ | 514 | #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ |
515 | #define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */ | ||
514 | 516 | ||
515 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 517 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
516 | (1 << QUEUE_FLAG_STACKABLE) | \ | 518 | (1 << QUEUE_FLAG_STACKABLE) | \ |
@@ -795,6 +797,7 @@ extern void __blk_put_request(struct request_queue *, struct request *); | |||
795 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 797 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
796 | extern struct request *blk_make_request(struct request_queue *, struct bio *, | 798 | extern struct request *blk_make_request(struct request_queue *, struct bio *, |
797 | gfp_t); | 799 | gfp_t); |
800 | extern void blk_rq_set_block_pc(struct request *); | ||
798 | extern void blk_requeue_request(struct request_queue *, struct request *); | 801 | extern void blk_requeue_request(struct request_queue *, struct request *); |
799 | extern void blk_add_request_payload(struct request *rq, struct page *page, | 802 | extern void blk_add_request_payload(struct request *rq, struct page *page, |
800 | unsigned int len); | 803 | unsigned int len); |
@@ -910,6 +913,20 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, | |||
910 | return q->limits.max_sectors; | 913 | return q->limits.max_sectors; |
911 | } | 914 | } |
912 | 915 | ||
916 | /* | ||
917 | * Return maximum size of a request at given offset. Only valid for | ||
918 | * file system requests. | ||
919 | */ | ||
920 | static inline unsigned int blk_max_size_offset(struct request_queue *q, | ||
921 | sector_t offset) | ||
922 | { | ||
923 | if (!q->limits.chunk_sectors) | ||
924 | return q->limits.max_sectors; | ||
925 | |||
926 | return q->limits.chunk_sectors - | ||
927 | (offset & (q->limits.chunk_sectors - 1)); | ||
928 | } | ||
929 | |||
913 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq) | 930 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq) |
914 | { | 931 | { |
915 | struct request_queue *q = rq->q; | 932 | struct request_queue *q = rq->q; |
@@ -917,7 +934,11 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq) | |||
917 | if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) | 934 | if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) |
918 | return q->limits.max_hw_sectors; | 935 | return q->limits.max_hw_sectors; |
919 | 936 | ||
920 | return blk_queue_get_max_sectors(q, rq->cmd_flags); | 937 | if (!q->limits.chunk_sectors) |
938 | return blk_queue_get_max_sectors(q, rq->cmd_flags); | ||
939 | |||
940 | return min(blk_max_size_offset(q, blk_rq_pos(rq)), | ||
941 | blk_queue_get_max_sectors(q, rq->cmd_flags)); | ||
921 | } | 942 | } |
922 | 943 | ||
923 | static inline unsigned int blk_rq_count_bios(struct request *rq) | 944 | static inline unsigned int blk_rq_count_bios(struct request *rq) |
@@ -983,6 +1004,7 @@ extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | |||
983 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 1004 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
984 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); | 1005 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); |
985 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 1006 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
1007 | extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); | ||
986 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); | 1008 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); |
987 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 1009 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
988 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 1010 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
diff --git a/include/linux/can/core.h b/include/linux/can/core.h index 78c6c52073ad..a0875001b13c 100644 --- a/include/linux/can/core.h +++ b/include/linux/can/core.h | |||
@@ -10,8 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #ifndef CAN_CORE_H | 13 | #ifndef _CAN_CORE_H |
14 | #define CAN_CORE_H | 14 | #define _CAN_CORE_H |
15 | 15 | ||
16 | #include <linux/can.h> | 16 | #include <linux/can.h> |
17 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
@@ -58,4 +58,4 @@ extern void can_rx_unregister(struct net_device *dev, canid_t can_id, | |||
58 | extern int can_send(struct sk_buff *skb, int loop); | 58 | extern int can_send(struct sk_buff *skb, int loop); |
59 | extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); | 59 | extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); |
60 | 60 | ||
61 | #endif /* CAN_CORE_H */ | 61 | #endif /* !_CAN_CORE_H */ |
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 3ce5e526525f..6992afc6ba7f 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h | |||
@@ -10,8 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #ifndef CAN_DEV_H | 13 | #ifndef _CAN_DEV_H |
14 | #define CAN_DEV_H | 14 | #define _CAN_DEV_H |
15 | 15 | ||
16 | #include <linux/can.h> | 16 | #include <linux/can.h> |
17 | #include <linux/can/netlink.h> | 17 | #include <linux/can/netlink.h> |
@@ -132,4 +132,4 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, | |||
132 | struct sk_buff *alloc_can_err_skb(struct net_device *dev, | 132 | struct sk_buff *alloc_can_err_skb(struct net_device *dev, |
133 | struct can_frame **cf); | 133 | struct can_frame **cf); |
134 | 134 | ||
135 | #endif /* CAN_DEV_H */ | 135 | #endif /* !_CAN_DEV_H */ |
diff --git a/include/linux/can/led.h b/include/linux/can/led.h index 9c1167baf273..e0475c5cbb92 100644 --- a/include/linux/can/led.h +++ b/include/linux/can/led.h | |||
@@ -6,8 +6,8 @@ | |||
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #ifndef CAN_LED_H | 9 | #ifndef _CAN_LED_H |
10 | #define CAN_LED_H | 10 | #define _CAN_LED_H |
11 | 11 | ||
12 | #include <linux/if.h> | 12 | #include <linux/if.h> |
13 | #include <linux/leds.h> | 13 | #include <linux/leds.h> |
@@ -48,4 +48,4 @@ static inline void can_led_notifier_exit(void) | |||
48 | 48 | ||
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #endif | 51 | #endif /* !_CAN_LED_H */ |
diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h index 7702641f87ee..78b2d44f04cf 100644 --- a/include/linux/can/platform/cc770.h +++ b/include/linux/can/platform/cc770.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _CAN_PLATFORM_CC770_H_ | 1 | #ifndef _CAN_PLATFORM_CC770_H |
2 | #define _CAN_PLATFORM_CC770_H_ | 2 | #define _CAN_PLATFORM_CC770_H |
3 | 3 | ||
4 | /* CPU Interface Register (0x02) */ | 4 | /* CPU Interface Register (0x02) */ |
5 | #define CPUIF_CEN 0x01 /* Clock Out Enable */ | 5 | #define CPUIF_CEN 0x01 /* Clock Out Enable */ |
@@ -30,4 +30,4 @@ struct cc770_platform_data { | |||
30 | u8 bcr; /* Bus Configuration Register */ | 30 | u8 bcr; /* Bus Configuration Register */ |
31 | }; | 31 | }; |
32 | 32 | ||
33 | #endif /* !_CAN_PLATFORM_CC770_H_ */ | 33 | #endif /* !_CAN_PLATFORM_CC770_H */ |
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h index dc029dba7a03..d44fcae274ff 100644 --- a/include/linux/can/platform/mcp251x.h +++ b/include/linux/can/platform/mcp251x.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __CAN_PLATFORM_MCP251X_H__ | 1 | #ifndef _CAN_PLATFORM_MCP251X_H |
2 | #define __CAN_PLATFORM_MCP251X_H__ | 2 | #define _CAN_PLATFORM_MCP251X_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * | 5 | * |
@@ -18,4 +18,4 @@ struct mcp251x_platform_data { | |||
18 | unsigned long oscillator_frequency; | 18 | unsigned long oscillator_frequency; |
19 | }; | 19 | }; |
20 | 20 | ||
21 | #endif /* __CAN_PLATFORM_MCP251X_H__ */ | 21 | #endif /* !_CAN_PLATFORM_MCP251X_H */ |
diff --git a/include/linux/can/platform/rcar_can.h b/include/linux/can/platform/rcar_can.h new file mode 100644 index 000000000000..0f4a2f3df504 --- /dev/null +++ b/include/linux/can/platform/rcar_can.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef _CAN_PLATFORM_RCAR_CAN_H_ | ||
2 | #define _CAN_PLATFORM_RCAR_CAN_H_ | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | /* Clock Select Register settings */ | ||
7 | enum CLKR { | ||
8 | CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */ | ||
9 | CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */ | ||
10 | CLKR_CLKEXT = 3 /* Externally input clock */ | ||
11 | }; | ||
12 | |||
13 | struct rcar_can_platform_data { | ||
14 | enum CLKR clock_select; /* Clock source select */ | ||
15 | }; | ||
16 | |||
17 | #endif /* !_CAN_PLATFORM_RCAR_CAN_H_ */ | ||
diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h index 96f8fcc78d78..93570b61ec6c 100644 --- a/include/linux/can/platform/sja1000.h +++ b/include/linux/can/platform/sja1000.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _CAN_PLATFORM_SJA1000_H_ | 1 | #ifndef _CAN_PLATFORM_SJA1000_H |
2 | #define _CAN_PLATFORM_SJA1000_H_ | 2 | #define _CAN_PLATFORM_SJA1000_H |
3 | 3 | ||
4 | /* clock divider register */ | 4 | /* clock divider register */ |
5 | #define CDR_CLKOUT_MASK 0x07 | 5 | #define CDR_CLKOUT_MASK 0x07 |
@@ -32,4 +32,4 @@ struct sja1000_platform_data { | |||
32 | u8 cdr; /* clock divider register */ | 32 | u8 cdr; /* clock divider register */ |
33 | }; | 33 | }; |
34 | 34 | ||
35 | #endif /* !_CAN_PLATFORM_SJA1000_H_ */ | 35 | #endif /* !_CAN_PLATFORM_SJA1000_H */ |
diff --git a/include/linux/can/platform/ti_hecc.h b/include/linux/can/platform/ti_hecc.h index af17cb3f7a84..a52f47ca6c8a 100644 --- a/include/linux/can/platform/ti_hecc.h +++ b/include/linux/can/platform/ti_hecc.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __CAN_PLATFORM_TI_HECC_H__ | 1 | #ifndef _CAN_PLATFORM_TI_HECC_H |
2 | #define __CAN_PLATFORM_TI_HECC_H__ | 2 | #define _CAN_PLATFORM_TI_HECC_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * TI HECC (High End CAN Controller) driver platform header | 5 | * TI HECC (High End CAN Controller) driver platform header |
@@ -41,4 +41,4 @@ struct ti_hecc_platform_data { | |||
41 | u32 version; | 41 | u32 version; |
42 | void (*transceiver_switch) (int); | 42 | void (*transceiver_switch) (int); |
43 | }; | 43 | }; |
44 | #endif | 44 | #endif /* !_CAN_PLATFORM_TI_HECC_H */ |
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index f9bbbb472663..cc00d15c6107 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h | |||
@@ -7,8 +7,8 @@ | |||
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #ifndef CAN_SKB_H | 10 | #ifndef _CAN_SKB_H |
11 | #define CAN_SKB_H | 11 | #define _CAN_SKB_H |
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
@@ -80,4 +80,4 @@ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb) | |||
80 | return skb; | 80 | return skb; |
81 | } | 81 | } |
82 | 82 | ||
83 | #endif /* CAN_SKB_H */ | 83 | #endif /* !_CAN_SKB_H */ |
diff --git a/include/linux/capability.h b/include/linux/capability.h index a6ee1f9a5018..84b13ad67c1c 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h | |||
@@ -210,7 +210,7 @@ extern bool has_ns_capability_noaudit(struct task_struct *t, | |||
210 | struct user_namespace *ns, int cap); | 210 | struct user_namespace *ns, int cap); |
211 | extern bool capable(int cap); | 211 | extern bool capable(int cap); |
212 | extern bool ns_capable(struct user_namespace *ns, int cap); | 212 | extern bool ns_capable(struct user_namespace *ns, int cap); |
213 | extern bool inode_capable(const struct inode *inode, int cap); | 213 | extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); |
214 | extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); | 214 | extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); |
215 | 215 | ||
216 | /* audit system wants to get cap info from files as well */ | 216 | /* audit system wants to get cap info from files as well */ |
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 5f6db18d72e8..3c97d5e9b951 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h | |||
@@ -625,6 +625,8 @@ int ceph_flags_to_mode(int flags); | |||
625 | CEPH_CAP_LINK_EXCL | \ | 625 | CEPH_CAP_LINK_EXCL | \ |
626 | CEPH_CAP_XATTR_EXCL | \ | 626 | CEPH_CAP_XATTR_EXCL | \ |
627 | CEPH_CAP_FILE_EXCL) | 627 | CEPH_CAP_FILE_EXCL) |
628 | #define CEPH_CAP_ANY_FILE_RD (CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE | \ | ||
629 | CEPH_CAP_FILE_SHARED) | ||
628 | #define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \ | 630 | #define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \ |
629 | CEPH_CAP_FILE_EXCL) | 631 | CEPH_CAP_FILE_EXCL) |
630 | #define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR) | 632 | #define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR) |
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 2f49aa4c4f7f..279b0afac1c1 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h | |||
@@ -222,8 +222,6 @@ extern void ceph_copy_to_page_vector(struct page **pages, | |||
222 | extern void ceph_copy_from_page_vector(struct page **pages, | 222 | extern void ceph_copy_from_page_vector(struct page **pages, |
223 | void *data, | 223 | void *data, |
224 | loff_t off, size_t len); | 224 | loff_t off, size_t len); |
225 | extern int ceph_copy_page_vector_to_user(struct page **pages, void __user *data, | ||
226 | loff_t off, size_t len); | ||
227 | extern void ceph_zero_page_vector_range(int off, int len, struct page **pages); | 225 | extern void ceph_zero_page_vector_range(int off, int len, struct page **pages); |
228 | 226 | ||
229 | 227 | ||
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h index a486f390dfbe..deb47e45ac7c 100644 --- a/include/linux/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h | |||
@@ -40,9 +40,9 @@ struct ceph_mon_request { | |||
40 | }; | 40 | }; |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * ceph_mon_generic_request is being used for the statfs and poolop requests | 43 | * ceph_mon_generic_request is being used for the statfs, poolop and |
44 | * which are bening done a bit differently because we need to get data back | 44 | * mon_get_version requests which are being done a bit differently |
45 | * to the caller | 45 | * because we need to get data back to the caller |
46 | */ | 46 | */ |
47 | struct ceph_mon_generic_request { | 47 | struct ceph_mon_generic_request { |
48 | struct kref kref; | 48 | struct kref kref; |
@@ -104,10 +104,15 @@ extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 have); | |||
104 | extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have); | 104 | extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have); |
105 | 105 | ||
106 | extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc); | 106 | extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc); |
107 | extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, | ||
108 | unsigned long timeout); | ||
107 | 109 | ||
108 | extern int ceph_monc_do_statfs(struct ceph_mon_client *monc, | 110 | extern int ceph_monc_do_statfs(struct ceph_mon_client *monc, |
109 | struct ceph_statfs *buf); | 111 | struct ceph_statfs *buf); |
110 | 112 | ||
113 | extern int ceph_monc_do_get_version(struct ceph_mon_client *monc, | ||
114 | const char *what, u64 *newest); | ||
115 | |||
111 | extern int ceph_monc_open_session(struct ceph_mon_client *monc); | 116 | extern int ceph_monc_open_session(struct ceph_mon_client *monc); |
112 | 117 | ||
113 | extern int ceph_monc_validate_auth(struct ceph_mon_client *monc); | 118 | extern int ceph_monc_validate_auth(struct ceph_mon_client *monc); |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index d60904b9e505..b5223c570eba 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/percpu-refcount.h> | 21 | #include <linux/percpu-refcount.h> |
22 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
23 | #include <linux/kernfs.h> | 23 | #include <linux/kernfs.h> |
24 | #include <linux/wait.h> | ||
24 | 25 | ||
25 | #ifdef CONFIG_CGROUPS | 26 | #ifdef CONFIG_CGROUPS |
26 | 27 | ||
@@ -47,21 +48,45 @@ enum cgroup_subsys_id { | |||
47 | }; | 48 | }; |
48 | #undef SUBSYS | 49 | #undef SUBSYS |
49 | 50 | ||
50 | /* Per-subsystem/per-cgroup state maintained by the system. */ | 51 | /* |
52 | * Per-subsystem/per-cgroup state maintained by the system. This is the | ||
53 | * fundamental structural building block that controllers deal with. | ||
54 | * | ||
55 | * Fields marked with "PI:" are public and immutable and may be accessed | ||
56 | * directly without synchronization. | ||
57 | */ | ||
51 | struct cgroup_subsys_state { | 58 | struct cgroup_subsys_state { |
52 | /* the cgroup that this css is attached to */ | 59 | /* PI: the cgroup that this css is attached to */ |
53 | struct cgroup *cgroup; | 60 | struct cgroup *cgroup; |
54 | 61 | ||
55 | /* the cgroup subsystem that this css is attached to */ | 62 | /* PI: the cgroup subsystem that this css is attached to */ |
56 | struct cgroup_subsys *ss; | 63 | struct cgroup_subsys *ss; |
57 | 64 | ||
58 | /* reference count - access via css_[try]get() and css_put() */ | 65 | /* reference count - access via css_[try]get() and css_put() */ |
59 | struct percpu_ref refcnt; | 66 | struct percpu_ref refcnt; |
60 | 67 | ||
61 | /* the parent css */ | 68 | /* PI: the parent css */ |
62 | struct cgroup_subsys_state *parent; | 69 | struct cgroup_subsys_state *parent; |
63 | 70 | ||
64 | unsigned long flags; | 71 | /* siblings list anchored at the parent's ->children */ |
72 | struct list_head sibling; | ||
73 | struct list_head children; | ||
74 | |||
75 | /* | ||
76 | * PI: Subsys-unique ID. 0 is unused and root is always 1. The | ||
77 | * matching css can be looked up using css_from_id(). | ||
78 | */ | ||
79 | int id; | ||
80 | |||
81 | unsigned int flags; | ||
82 | |||
83 | /* | ||
84 | * Monotonically increasing unique serial number which defines a | ||
85 | * uniform order among all csses. It's guaranteed that all | ||
86 | * ->children lists are in the ascending order of ->serial_nr and | ||
87 | * used to allow interrupting and resuming iterations. | ||
88 | */ | ||
89 | u64 serial_nr; | ||
65 | 90 | ||
66 | /* percpu_ref killing and RCU release */ | 91 | /* percpu_ref killing and RCU release */ |
67 | struct rcu_head rcu_head; | 92 | struct rcu_head rcu_head; |
@@ -70,8 +95,9 @@ struct cgroup_subsys_state { | |||
70 | 95 | ||
71 | /* bits in struct cgroup_subsys_state flags field */ | 96 | /* bits in struct cgroup_subsys_state flags field */ |
72 | enum { | 97 | enum { |
73 | CSS_ROOT = (1 << 0), /* this CSS is the root of the subsystem */ | 98 | CSS_NO_REF = (1 << 0), /* no reference counting for this css */ |
74 | CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ | 99 | CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ |
100 | CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ | ||
75 | }; | 101 | }; |
76 | 102 | ||
77 | /** | 103 | /** |
@@ -82,8 +108,7 @@ enum { | |||
82 | */ | 108 | */ |
83 | static inline void css_get(struct cgroup_subsys_state *css) | 109 | static inline void css_get(struct cgroup_subsys_state *css) |
84 | { | 110 | { |
85 | /* We don't need to reference count the root state */ | 111 | if (!(css->flags & CSS_NO_REF)) |
86 | if (!(css->flags & CSS_ROOT)) | ||
87 | percpu_ref_get(&css->refcnt); | 112 | percpu_ref_get(&css->refcnt); |
88 | } | 113 | } |
89 | 114 | ||
@@ -91,35 +116,51 @@ static inline void css_get(struct cgroup_subsys_state *css) | |||
91 | * css_tryget - try to obtain a reference on the specified css | 116 | * css_tryget - try to obtain a reference on the specified css |
92 | * @css: target css | 117 | * @css: target css |
93 | * | 118 | * |
94 | * Obtain a reference on @css if it's alive. The caller naturally needs to | 119 | * Obtain a reference on @css unless it already has reached zero and is |
95 | * ensure that @css is accessible but doesn't have to be holding a | 120 | * being released. This function doesn't care whether @css is on or |
121 | * offline. The caller naturally needs to ensure that @css is accessible | ||
122 | * but doesn't have to be holding a reference on it - IOW, RCU protected | ||
123 | * access is good enough for this function. Returns %true if a reference | ||
124 | * count was successfully obtained; %false otherwise. | ||
125 | */ | ||
126 | static inline bool css_tryget(struct cgroup_subsys_state *css) | ||
127 | { | ||
128 | if (!(css->flags & CSS_NO_REF)) | ||
129 | return percpu_ref_tryget(&css->refcnt); | ||
130 | return true; | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * css_tryget_online - try to obtain a reference on the specified css if online | ||
135 | * @css: target css | ||
136 | * | ||
137 | * Obtain a reference on @css if it's online. The caller naturally needs | ||
138 | * to ensure that @css is accessible but doesn't have to be holding a | ||
96 | * reference on it - IOW, RCU protected access is good enough for this | 139 | * reference on it - IOW, RCU protected access is good enough for this |
97 | * function. Returns %true if a reference count was successfully obtained; | 140 | * function. Returns %true if a reference count was successfully obtained; |
98 | * %false otherwise. | 141 | * %false otherwise. |
99 | */ | 142 | */ |
100 | static inline bool css_tryget(struct cgroup_subsys_state *css) | 143 | static inline bool css_tryget_online(struct cgroup_subsys_state *css) |
101 | { | 144 | { |
102 | if (css->flags & CSS_ROOT) | 145 | if (!(css->flags & CSS_NO_REF)) |
103 | return true; | 146 | return percpu_ref_tryget_live(&css->refcnt); |
104 | return percpu_ref_tryget(&css->refcnt); | 147 | return true; |
105 | } | 148 | } |
106 | 149 | ||
107 | /** | 150 | /** |
108 | * css_put - put a css reference | 151 | * css_put - put a css reference |
109 | * @css: target css | 152 | * @css: target css |
110 | * | 153 | * |
111 | * Put a reference obtained via css_get() and css_tryget(). | 154 | * Put a reference obtained via css_get() and css_tryget_online(). |
112 | */ | 155 | */ |
113 | static inline void css_put(struct cgroup_subsys_state *css) | 156 | static inline void css_put(struct cgroup_subsys_state *css) |
114 | { | 157 | { |
115 | if (!(css->flags & CSS_ROOT)) | 158 | if (!(css->flags & CSS_NO_REF)) |
116 | percpu_ref_put(&css->refcnt); | 159 | percpu_ref_put(&css->refcnt); |
117 | } | 160 | } |
118 | 161 | ||
119 | /* bits in struct cgroup flags field */ | 162 | /* bits in struct cgroup flags field */ |
120 | enum { | 163 | enum { |
121 | /* Control Group is dead */ | ||
122 | CGRP_DEAD, | ||
123 | /* | 164 | /* |
124 | * Control Group has previously had a child cgroup or a task, | 165 | * Control Group has previously had a child cgroup or a task, |
125 | * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) | 166 | * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) |
@@ -133,48 +174,45 @@ enum { | |||
133 | * specified at mount time and thus is implemented here. | 174 | * specified at mount time and thus is implemented here. |
134 | */ | 175 | */ |
135 | CGRP_CPUSET_CLONE_CHILDREN, | 176 | CGRP_CPUSET_CLONE_CHILDREN, |
136 | /* see the comment above CGRP_ROOT_SANE_BEHAVIOR for details */ | ||
137 | CGRP_SANE_BEHAVIOR, | ||
138 | }; | 177 | }; |
139 | 178 | ||
140 | struct cgroup { | 179 | struct cgroup { |
180 | /* self css with NULL ->ss, points back to this cgroup */ | ||
181 | struct cgroup_subsys_state self; | ||
182 | |||
141 | unsigned long flags; /* "unsigned long" so bitops work */ | 183 | unsigned long flags; /* "unsigned long" so bitops work */ |
142 | 184 | ||
143 | /* | 185 | /* |
144 | * idr allocated in-hierarchy ID. | 186 | * idr allocated in-hierarchy ID. |
145 | * | 187 | * |
146 | * The ID of the root cgroup is always 0, and a new cgroup | 188 | * ID 0 is not used, the ID of the root cgroup is always 1, and a |
147 | * will be assigned with a smallest available ID. | 189 | * new cgroup will be assigned with a smallest available ID. |
148 | * | 190 | * |
149 | * Allocating/Removing ID must be protected by cgroup_mutex. | 191 | * Allocating/Removing ID must be protected by cgroup_mutex. |
150 | */ | 192 | */ |
151 | int id; | 193 | int id; |
152 | 194 | ||
153 | /* the number of attached css's */ | ||
154 | int nr_css; | ||
155 | |||
156 | atomic_t refcnt; | ||
157 | |||
158 | /* | 195 | /* |
159 | * We link our 'sibling' struct into our parent's 'children'. | 196 | * If this cgroup contains any tasks, it contributes one to |
160 | * Our children link their 'sibling' into our 'children'. | 197 | * populated_cnt. All children with non-zero popuplated_cnt of |
198 | * their own contribute one. The count is zero iff there's no task | ||
199 | * in this cgroup or its subtree. | ||
161 | */ | 200 | */ |
162 | struct list_head sibling; /* my parent's children */ | 201 | int populated_cnt; |
163 | struct list_head children; /* my children */ | ||
164 | 202 | ||
165 | struct cgroup *parent; /* my parent */ | ||
166 | struct kernfs_node *kn; /* cgroup kernfs entry */ | 203 | struct kernfs_node *kn; /* cgroup kernfs entry */ |
204 | struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */ | ||
167 | 205 | ||
168 | /* | 206 | /* |
169 | * Monotonically increasing unique serial number which defines a | 207 | * The bitmask of subsystems enabled on the child cgroups. |
170 | * uniform order among all cgroups. It's guaranteed that all | 208 | * ->subtree_control is the one configured through |
171 | * ->children lists are in the ascending order of ->serial_nr. | 209 | * "cgroup.subtree_control" while ->child_subsys_mask is the |
172 | * It's used to allow interrupting and resuming iterations. | 210 | * effective one which may have more subsystems enabled. |
211 | * Controller knobs are made available iff it's enabled in | ||
212 | * ->subtree_control. | ||
173 | */ | 213 | */ |
174 | u64 serial_nr; | 214 | unsigned int subtree_control; |
175 | 215 | unsigned int child_subsys_mask; | |
176 | /* The bitmask of subsystems attached to this cgroup */ | ||
177 | unsigned long subsys_mask; | ||
178 | 216 | ||
179 | /* Private pointers for each registered subsystem */ | 217 | /* Private pointers for each registered subsystem */ |
180 | struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; | 218 | struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; |
@@ -188,6 +226,15 @@ struct cgroup { | |||
188 | struct list_head cset_links; | 226 | struct list_head cset_links; |
189 | 227 | ||
190 | /* | 228 | /* |
229 | * On the default hierarchy, a css_set for a cgroup with some | ||
230 | * susbsys disabled will point to css's which are associated with | ||
231 | * the closest ancestor which has the subsys enabled. The | ||
232 | * following lists all css_sets which point to this cgroup's css | ||
233 | * for the given subsystem. | ||
234 | */ | ||
235 | struct list_head e_csets[CGROUP_SUBSYS_COUNT]; | ||
236 | |||
237 | /* | ||
191 | * Linked list running through all cgroups that can | 238 | * Linked list running through all cgroups that can |
192 | * potentially be reaped by the release agent. Protected by | 239 | * potentially be reaped by the release agent. Protected by |
193 | * release_list_lock | 240 | * release_list_lock |
@@ -201,77 +248,17 @@ struct cgroup { | |||
201 | struct list_head pidlists; | 248 | struct list_head pidlists; |
202 | struct mutex pidlist_mutex; | 249 | struct mutex pidlist_mutex; |
203 | 250 | ||
204 | /* dummy css with NULL ->ss, points back to this cgroup */ | 251 | /* used to wait for offlining of csses */ |
205 | struct cgroup_subsys_state dummy_css; | 252 | wait_queue_head_t offline_waitq; |
206 | |||
207 | /* For css percpu_ref killing and RCU-protected deletion */ | ||
208 | struct rcu_head rcu_head; | ||
209 | struct work_struct destroy_work; | ||
210 | }; | 253 | }; |
211 | 254 | ||
212 | #define MAX_CGROUP_ROOT_NAMELEN 64 | 255 | #define MAX_CGROUP_ROOT_NAMELEN 64 |
213 | 256 | ||
214 | /* cgroup_root->flags */ | 257 | /* cgroup_root->flags */ |
215 | enum { | 258 | enum { |
216 | /* | 259 | CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */ |
217 | * Unfortunately, cgroup core and various controllers are riddled | ||
218 | * with idiosyncrasies and pointless options. The following flag, | ||
219 | * when set, will force sane behavior - some options are forced on, | ||
220 | * others are disallowed, and some controllers will change their | ||
221 | * hierarchical or other behaviors. | ||
222 | * | ||
223 | * The set of behaviors affected by this flag are still being | ||
224 | * determined and developed and the mount option for this flag is | ||
225 | * prefixed with __DEVEL__. The prefix will be dropped once we | ||
226 | * reach the point where all behaviors are compatible with the | ||
227 | * planned unified hierarchy, which will automatically turn on this | ||
228 | * flag. | ||
229 | * | ||
230 | * The followings are the behaviors currently affected this flag. | ||
231 | * | ||
232 | * - Mount options "noprefix", "xattr", "clone_children", | ||
233 | * "release_agent" and "name" are disallowed. | ||
234 | * | ||
235 | * - When mounting an existing superblock, mount options should | ||
236 | * match. | ||
237 | * | ||
238 | * - Remount is disallowed. | ||
239 | * | ||
240 | * - rename(2) is disallowed. | ||
241 | * | ||
242 | * - "tasks" is removed. Everything should be at process | ||
243 | * granularity. Use "cgroup.procs" instead. | ||
244 | * | ||
245 | * - "cgroup.procs" is not sorted. pids will be unique unless they | ||
246 | * got recycled inbetween reads. | ||
247 | * | ||
248 | * - "release_agent" and "notify_on_release" are removed. | ||
249 | * Replacement notification mechanism will be implemented. | ||
250 | * | ||
251 | * - "cgroup.clone_children" is removed. | ||
252 | * | ||
253 | * - If mount is requested with sane_behavior but without any | ||
254 | * subsystem, the default unified hierarchy is mounted. | ||
255 | * | ||
256 | * - cpuset: tasks will be kept in empty cpusets when hotplug happens | ||
257 | * and take masks of ancestors with non-empty cpus/mems, instead of | ||
258 | * being moved to an ancestor. | ||
259 | * | ||
260 | * - cpuset: a task can be moved into an empty cpuset, and again it | ||
261 | * takes masks of ancestors. | ||
262 | * | ||
263 | * - memcg: use_hierarchy is on by default and the cgroup file for | ||
264 | * the flag is not created. | ||
265 | * | ||
266 | * - blkcg: blk-throttle becomes properly hierarchical. | ||
267 | */ | ||
268 | CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), | ||
269 | |||
270 | CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ | 260 | CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ |
271 | CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ | 261 | CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ |
272 | |||
273 | /* mount options live below bit 16 */ | ||
274 | CGRP_ROOT_OPTION_MASK = (1 << 16) - 1, | ||
275 | }; | 262 | }; |
276 | 263 | ||
277 | /* | 264 | /* |
@@ -282,6 +269,9 @@ enum { | |||
282 | struct cgroup_root { | 269 | struct cgroup_root { |
283 | struct kernfs_root *kf_root; | 270 | struct kernfs_root *kf_root; |
284 | 271 | ||
272 | /* The bitmask of subsystems attached to this hierarchy */ | ||
273 | unsigned int subsys_mask; | ||
274 | |||
285 | /* Unique id for this hierarchy. */ | 275 | /* Unique id for this hierarchy. */ |
286 | int hierarchy_id; | 276 | int hierarchy_id; |
287 | 277 | ||
@@ -295,7 +285,7 @@ struct cgroup_root { | |||
295 | struct list_head root_list; | 285 | struct list_head root_list; |
296 | 286 | ||
297 | /* Hierarchy-specific flags */ | 287 | /* Hierarchy-specific flags */ |
298 | unsigned long flags; | 288 | unsigned int flags; |
299 | 289 | ||
300 | /* IDs for cgroups in this hierarchy */ | 290 | /* IDs for cgroups in this hierarchy */ |
301 | struct idr cgroup_idr; | 291 | struct idr cgroup_idr; |
@@ -342,6 +332,9 @@ struct css_set { | |||
342 | */ | 332 | */ |
343 | struct list_head cgrp_links; | 333 | struct list_head cgrp_links; |
344 | 334 | ||
335 | /* the default cgroup associated with this css_set */ | ||
336 | struct cgroup *dfl_cgrp; | ||
337 | |||
345 | /* | 338 | /* |
346 | * Set of subsystem states, one for each subsystem. This array is | 339 | * Set of subsystem states, one for each subsystem. This array is |
347 | * immutable after creation apart from the init_css_set during | 340 | * immutable after creation apart from the init_css_set during |
@@ -366,6 +359,15 @@ struct css_set { | |||
366 | struct cgroup *mg_src_cgrp; | 359 | struct cgroup *mg_src_cgrp; |
367 | struct css_set *mg_dst_cset; | 360 | struct css_set *mg_dst_cset; |
368 | 361 | ||
362 | /* | ||
363 | * On the default hierarhcy, ->subsys[ssid] may point to a css | ||
364 | * attached to an ancestor instead of the cgroup this css_set is | ||
365 | * associated with. The following node is anchored at | ||
366 | * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to | ||
367 | * iterate through all css's attached to a given cgroup. | ||
368 | */ | ||
369 | struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; | ||
370 | |||
369 | /* For RCU-protected deletion */ | 371 | /* For RCU-protected deletion */ |
370 | struct rcu_head rcu_head; | 372 | struct rcu_head rcu_head; |
371 | }; | 373 | }; |
@@ -382,9 +384,11 @@ struct css_set { | |||
382 | enum { | 384 | enum { |
383 | CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ | 385 | CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ |
384 | CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ | 386 | CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ |
385 | CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */ | ||
386 | CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ | 387 | CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ |
387 | CFTYPE_ONLY_ON_DFL = (1 << 4), /* only on default hierarchy */ | 388 | |
389 | /* internal flags, do not use outside cgroup core proper */ | ||
390 | __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ | ||
391 | __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ | ||
388 | }; | 392 | }; |
389 | 393 | ||
390 | #define MAX_CFTYPE_NAME 64 | 394 | #define MAX_CFTYPE_NAME 64 |
@@ -405,8 +409,7 @@ struct cftype { | |||
405 | 409 | ||
406 | /* | 410 | /* |
407 | * The maximum length of string, excluding trailing nul, that can | 411 | * The maximum length of string, excluding trailing nul, that can |
408 | * be passed to write_string. If < PAGE_SIZE-1, PAGE_SIZE-1 is | 412 | * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed. |
409 | * assumed. | ||
410 | */ | 413 | */ |
411 | size_t max_write_len; | 414 | size_t max_write_len; |
412 | 415 | ||
@@ -453,19 +456,13 @@ struct cftype { | |||
453 | s64 val); | 456 | s64 val); |
454 | 457 | ||
455 | /* | 458 | /* |
456 | * write_string() is passed a nul-terminated kernelspace | 459 | * write() is the generic write callback which maps directly to |
457 | * buffer of maximum length determined by max_write_len. | 460 | * kernfs write operation and overrides all other operations. |
458 | * Returns 0 or -ve error code. | 461 | * Maximum write size is determined by ->max_write_len. Use |
462 | * of_css/cft() to access the associated css and cft. | ||
459 | */ | 463 | */ |
460 | int (*write_string)(struct cgroup_subsys_state *css, struct cftype *cft, | 464 | ssize_t (*write)(struct kernfs_open_file *of, |
461 | char *buffer); | 465 | char *buf, size_t nbytes, loff_t off); |
462 | /* | ||
463 | * trigger() callback can be used to get some kick from the | ||
464 | * userspace, when the actual string written is not important | ||
465 | * at all. The private field can be used to determine the | ||
466 | * kick type for multiplexing. | ||
467 | */ | ||
468 | int (*trigger)(struct cgroup_subsys_state *css, unsigned int event); | ||
469 | 466 | ||
470 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 467 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
471 | struct lock_class_key lockdep_key; | 468 | struct lock_class_key lockdep_key; |
@@ -475,20 +472,64 @@ struct cftype { | |||
475 | extern struct cgroup_root cgrp_dfl_root; | 472 | extern struct cgroup_root cgrp_dfl_root; |
476 | extern struct css_set init_css_set; | 473 | extern struct css_set init_css_set; |
477 | 474 | ||
475 | /** | ||
476 | * cgroup_on_dfl - test whether a cgroup is on the default hierarchy | ||
477 | * @cgrp: the cgroup of interest | ||
478 | * | ||
479 | * The default hierarchy is the v2 interface of cgroup and this function | ||
480 | * can be used to test whether a cgroup is on the default hierarchy for | ||
481 | * cases where a subsystem should behave differnetly depending on the | ||
482 | * interface version. | ||
483 | * | ||
484 | * The set of behaviors which change on the default hierarchy are still | ||
485 | * being determined and the mount option is prefixed with __DEVEL__. | ||
486 | * | ||
487 | * List of changed behaviors: | ||
488 | * | ||
489 | * - Mount options "noprefix", "xattr", "clone_children", "release_agent" | ||
490 | * and "name" are disallowed. | ||
491 | * | ||
492 | * - When mounting an existing superblock, mount options should match. | ||
493 | * | ||
494 | * - Remount is disallowed. | ||
495 | * | ||
496 | * - rename(2) is disallowed. | ||
497 | * | ||
498 | * - "tasks" is removed. Everything should be at process granularity. Use | ||
499 | * "cgroup.procs" instead. | ||
500 | * | ||
501 | * - "cgroup.procs" is not sorted. pids will be unique unless they got | ||
502 | * recycled inbetween reads. | ||
503 | * | ||
504 | * - "release_agent" and "notify_on_release" are removed. Replacement | ||
505 | * notification mechanism will be implemented. | ||
506 | * | ||
507 | * - "cgroup.clone_children" is removed. | ||
508 | * | ||
509 | * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup | ||
510 | * and its descendants contain no task; otherwise, 1. The file also | ||
511 | * generates kernfs notification which can be monitored through poll and | ||
512 | * [di]notify when the value of the file changes. | ||
513 | * | ||
514 | * - cpuset: tasks will be kept in empty cpusets when hotplug happens and | ||
515 | * take masks of ancestors with non-empty cpus/mems, instead of being | ||
516 | * moved to an ancestor. | ||
517 | * | ||
518 | * - cpuset: a task can be moved into an empty cpuset, and again it takes | ||
519 | * masks of ancestors. | ||
520 | * | ||
521 | * - memcg: use_hierarchy is on by default and the cgroup file for the flag | ||
522 | * is not created. | ||
523 | * | ||
524 | * - blkcg: blk-throttle becomes properly hierarchical. | ||
525 | * | ||
526 | * - debug: disallowed on the default hierarchy. | ||
527 | */ | ||
478 | static inline bool cgroup_on_dfl(const struct cgroup *cgrp) | 528 | static inline bool cgroup_on_dfl(const struct cgroup *cgrp) |
479 | { | 529 | { |
480 | return cgrp->root == &cgrp_dfl_root; | 530 | return cgrp->root == &cgrp_dfl_root; |
481 | } | 531 | } |
482 | 532 | ||
483 | /* | ||
484 | * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This | ||
485 | * function can be called as long as @cgrp is accessible. | ||
486 | */ | ||
487 | static inline bool cgroup_sane_behavior(const struct cgroup *cgrp) | ||
488 | { | ||
489 | return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR; | ||
490 | } | ||
491 | |||
492 | /* no synchronization, the result can only be used as a hint */ | 533 | /* no synchronization, the result can only be used as a hint */ |
493 | static inline bool cgroup_has_tasks(struct cgroup *cgrp) | 534 | static inline bool cgroup_has_tasks(struct cgroup *cgrp) |
494 | { | 535 | { |
@@ -504,14 +545,24 @@ static inline ino_t cgroup_ino(struct cgroup *cgrp) | |||
504 | return 0; | 545 | return 0; |
505 | } | 546 | } |
506 | 547 | ||
507 | static inline struct cftype *seq_cft(struct seq_file *seq) | 548 | /* cft/css accessors for cftype->write() operation */ |
549 | static inline struct cftype *of_cft(struct kernfs_open_file *of) | ||
508 | { | 550 | { |
509 | struct kernfs_open_file *of = seq->private; | ||
510 | |||
511 | return of->kn->priv; | 551 | return of->kn->priv; |
512 | } | 552 | } |
513 | 553 | ||
514 | struct cgroup_subsys_state *seq_css(struct seq_file *seq); | 554 | struct cgroup_subsys_state *of_css(struct kernfs_open_file *of); |
555 | |||
556 | /* cft/css accessors for cftype->seq_*() operations */ | ||
557 | static inline struct cftype *seq_cft(struct seq_file *seq) | ||
558 | { | ||
559 | return of_cft(seq->private); | ||
560 | } | ||
561 | |||
562 | static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq) | ||
563 | { | ||
564 | return of_css(seq->private); | ||
565 | } | ||
515 | 566 | ||
516 | /* | 567 | /* |
517 | * Name / path handling functions. All are thin wrappers around the kernfs | 568 | * Name / path handling functions. All are thin wrappers around the kernfs |
@@ -541,7 +592,8 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) | |||
541 | 592 | ||
542 | char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); | 593 | char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); |
543 | 594 | ||
544 | int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); | 595 | int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); |
596 | int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); | ||
545 | int cgroup_rm_cftypes(struct cftype *cfts); | 597 | int cgroup_rm_cftypes(struct cftype *cfts); |
546 | 598 | ||
547 | bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); | 599 | bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); |
@@ -573,6 +625,7 @@ struct cgroup_subsys { | |||
573 | int (*css_online)(struct cgroup_subsys_state *css); | 625 | int (*css_online)(struct cgroup_subsys_state *css); |
574 | void (*css_offline)(struct cgroup_subsys_state *css); | 626 | void (*css_offline)(struct cgroup_subsys_state *css); |
575 | void (*css_free)(struct cgroup_subsys_state *css); | 627 | void (*css_free)(struct cgroup_subsys_state *css); |
628 | void (*css_reset)(struct cgroup_subsys_state *css); | ||
576 | 629 | ||
577 | int (*can_attach)(struct cgroup_subsys_state *css, | 630 | int (*can_attach)(struct cgroup_subsys_state *css, |
578 | struct cgroup_taskset *tset); | 631 | struct cgroup_taskset *tset); |
@@ -612,14 +665,30 @@ struct cgroup_subsys { | |||
612 | /* link to parent, protected by cgroup_lock() */ | 665 | /* link to parent, protected by cgroup_lock() */ |
613 | struct cgroup_root *root; | 666 | struct cgroup_root *root; |
614 | 667 | ||
668 | /* idr for css->id */ | ||
669 | struct idr css_idr; | ||
670 | |||
615 | /* | 671 | /* |
616 | * List of cftypes. Each entry is the first entry of an array | 672 | * List of cftypes. Each entry is the first entry of an array |
617 | * terminated by zero length name. | 673 | * terminated by zero length name. |
618 | */ | 674 | */ |
619 | struct list_head cfts; | 675 | struct list_head cfts; |
620 | 676 | ||
621 | /* base cftypes, automatically registered with subsys itself */ | 677 | /* |
622 | struct cftype *base_cftypes; | 678 | * Base cftypes which are automatically registered. The two can |
679 | * point to the same array. | ||
680 | */ | ||
681 | struct cftype *dfl_cftypes; /* for the default hierarchy */ | ||
682 | struct cftype *legacy_cftypes; /* for the legacy hierarchies */ | ||
683 | |||
684 | /* | ||
685 | * A subsystem may depend on other subsystems. When such subsystem | ||
686 | * is enabled on a cgroup, the depended-upon subsystems are enabled | ||
687 | * together if available. Subsystems enabled due to dependency are | ||
688 | * not visible to userland until explicitly enabled. The following | ||
689 | * specifies the mask of subsystems that this one depends on. | ||
690 | */ | ||
691 | unsigned int depends_on; | ||
623 | }; | 692 | }; |
624 | 693 | ||
625 | #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; | 694 | #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; |
@@ -627,19 +696,6 @@ struct cgroup_subsys { | |||
627 | #undef SUBSYS | 696 | #undef SUBSYS |
628 | 697 | ||
629 | /** | 698 | /** |
630 | * css_parent - find the parent css | ||
631 | * @css: the target cgroup_subsys_state | ||
632 | * | ||
633 | * Return the parent css of @css. This function is guaranteed to return | ||
634 | * non-NULL parent as long as @css isn't the root. | ||
635 | */ | ||
636 | static inline | ||
637 | struct cgroup_subsys_state *css_parent(struct cgroup_subsys_state *css) | ||
638 | { | ||
639 | return css->parent; | ||
640 | } | ||
641 | |||
642 | /** | ||
643 | * task_css_set_check - obtain a task's css_set with extra access conditions | 699 | * task_css_set_check - obtain a task's css_set with extra access conditions |
644 | * @task: the task to obtain css_set for | 700 | * @task: the task to obtain css_set for |
645 | * @__c: extra condition expression to be passed to rcu_dereference_check() | 701 | * @__c: extra condition expression to be passed to rcu_dereference_check() |
@@ -731,14 +787,14 @@ struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); | |||
731 | * @pos: the css * to use as the loop cursor | 787 | * @pos: the css * to use as the loop cursor |
732 | * @parent: css whose children to walk | 788 | * @parent: css whose children to walk |
733 | * | 789 | * |
734 | * Walk @parent's children. Must be called under rcu_read_lock(). A child | 790 | * Walk @parent's children. Must be called under rcu_read_lock(). |
735 | * css which hasn't finished ->css_online() or already has finished | ||
736 | * ->css_offline() may show up during traversal and it's each subsystem's | ||
737 | * responsibility to verify that each @pos is alive. | ||
738 | * | 791 | * |
739 | * If a subsystem synchronizes against the parent in its ->css_online() and | 792 | * If a subsystem synchronizes ->css_online() and the start of iteration, a |
740 | * before starting iterating, a css which finished ->css_online() is | 793 | * css which finished ->css_online() is guaranteed to be visible in the |
741 | * guaranteed to be visible in the future iterations. | 794 | * future iterations and will stay visible until the last reference is put. |
795 | * A css which hasn't finished ->css_online() or already finished | ||
796 | * ->css_offline() may show up during traversal. It's each subsystem's | ||
797 | * responsibility to synchronize against on/offlining. | ||
742 | * | 798 | * |
743 | * It is allowed to temporarily drop RCU read lock during iteration. The | 799 | * It is allowed to temporarily drop RCU read lock during iteration. The |
744 | * caller is responsible for ensuring that @pos remains accessible until | 800 | * caller is responsible for ensuring that @pos remains accessible until |
@@ -761,17 +817,16 @@ css_rightmost_descendant(struct cgroup_subsys_state *pos); | |||
761 | * @root: css whose descendants to walk | 817 | * @root: css whose descendants to walk |
762 | * | 818 | * |
763 | * Walk @root's descendants. @root is included in the iteration and the | 819 | * Walk @root's descendants. @root is included in the iteration and the |
764 | * first node to be visited. Must be called under rcu_read_lock(). A | 820 | * first node to be visited. Must be called under rcu_read_lock(). |
765 | * descendant css which hasn't finished ->css_online() or already has | ||
766 | * finished ->css_offline() may show up during traversal and it's each | ||
767 | * subsystem's responsibility to verify that each @pos is alive. | ||
768 | * | 821 | * |
769 | * If a subsystem synchronizes against the parent in its ->css_online() and | 822 | * If a subsystem synchronizes ->css_online() and the start of iteration, a |
770 | * before starting iterating, and synchronizes against @pos on each | 823 | * css which finished ->css_online() is guaranteed to be visible in the |
771 | * iteration, any descendant css which finished ->css_online() is | 824 | * future iterations and will stay visible until the last reference is put. |
772 | * guaranteed to be visible in the future iterations. | 825 | * A css which hasn't finished ->css_online() or already finished |
826 | * ->css_offline() may show up during traversal. It's each subsystem's | ||
827 | * responsibility to synchronize against on/offlining. | ||
773 | * | 828 | * |
774 | * In other words, the following guarantees that a descendant can't escape | 829 | * For example, the following guarantees that a descendant can't escape |
775 | * state updates of its ancestors. | 830 | * state updates of its ancestors. |
776 | * | 831 | * |
777 | * my_online(@css) | 832 | * my_online(@css) |
@@ -827,18 +882,34 @@ css_next_descendant_post(struct cgroup_subsys_state *pos, | |||
827 | * | 882 | * |
828 | * Similar to css_for_each_descendant_pre() but performs post-order | 883 | * Similar to css_for_each_descendant_pre() but performs post-order |
829 | * traversal instead. @root is included in the iteration and the last | 884 | * traversal instead. @root is included in the iteration and the last |
830 | * node to be visited. Note that the walk visibility guarantee described | 885 | * node to be visited. |
831 | * in pre-order walk doesn't apply the same to post-order walks. | 886 | * |
887 | * If a subsystem synchronizes ->css_online() and the start of iteration, a | ||
888 | * css which finished ->css_online() is guaranteed to be visible in the | ||
889 | * future iterations and will stay visible until the last reference is put. | ||
890 | * A css which hasn't finished ->css_online() or already finished | ||
891 | * ->css_offline() may show up during traversal. It's each subsystem's | ||
892 | * responsibility to synchronize against on/offlining. | ||
893 | * | ||
894 | * Note that the walk visibility guarantee example described in pre-order | ||
895 | * walk doesn't apply the same to post-order walks. | ||
832 | */ | 896 | */ |
833 | #define css_for_each_descendant_post(pos, css) \ | 897 | #define css_for_each_descendant_post(pos, css) \ |
834 | for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ | 898 | for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ |
835 | (pos) = css_next_descendant_post((pos), (css))) | 899 | (pos) = css_next_descendant_post((pos), (css))) |
836 | 900 | ||
901 | bool css_has_online_children(struct cgroup_subsys_state *css); | ||
902 | |||
837 | /* A css_task_iter should be treated as an opaque object */ | 903 | /* A css_task_iter should be treated as an opaque object */ |
838 | struct css_task_iter { | 904 | struct css_task_iter { |
839 | struct cgroup_subsys_state *origin_css; | 905 | struct cgroup_subsys *ss; |
840 | struct list_head *cset_link; | 906 | |
841 | struct list_head *task; | 907 | struct list_head *cset_pos; |
908 | struct list_head *cset_head; | ||
909 | |||
910 | struct list_head *task_pos; | ||
911 | struct list_head *tasks_head; | ||
912 | struct list_head *mg_tasks_head; | ||
842 | }; | 913 | }; |
843 | 914 | ||
844 | void css_task_iter_start(struct cgroup_subsys_state *css, | 915 | void css_task_iter_start(struct cgroup_subsys_state *css, |
@@ -849,8 +920,8 @@ void css_task_iter_end(struct css_task_iter *it); | |||
849 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); | 920 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); |
850 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); | 921 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); |
851 | 922 | ||
852 | struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry, | 923 | struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, |
853 | struct cgroup_subsys *ss); | 924 | struct cgroup_subsys *ss); |
854 | 925 | ||
855 | #else /* !CONFIG_CGROUPS */ | 926 | #else /* !CONFIG_CGROUPS */ |
856 | 927 | ||
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 768fe44e19f0..98c4f9b12b03 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h | |||
@@ -7,10 +7,6 @@ | |||
7 | SUBSYS(cpuset) | 7 | SUBSYS(cpuset) |
8 | #endif | 8 | #endif |
9 | 9 | ||
10 | #if IS_ENABLED(CONFIG_CGROUP_DEBUG) | ||
11 | SUBSYS(debug) | ||
12 | #endif | ||
13 | |||
14 | #if IS_ENABLED(CONFIG_CGROUP_SCHED) | 10 | #if IS_ENABLED(CONFIG_CGROUP_SCHED) |
15 | SUBSYS(cpu) | 11 | SUBSYS(cpu) |
16 | #endif | 12 | #endif |
@@ -50,6 +46,13 @@ SUBSYS(net_prio) | |||
50 | #if IS_ENABLED(CONFIG_CGROUP_HUGETLB) | 46 | #if IS_ENABLED(CONFIG_CGROUP_HUGETLB) |
51 | SUBSYS(hugetlb) | 47 | SUBSYS(hugetlb) |
52 | #endif | 48 | #endif |
49 | |||
50 | /* | ||
51 | * The following subsystems are not supported on the default hierarchy. | ||
52 | */ | ||
53 | #if IS_ENABLED(CONFIG_CGROUP_DEBUG) | ||
54 | SUBSYS(debug) | ||
55 | #endif | ||
53 | /* | 56 | /* |
54 | * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. | 57 | * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. |
55 | */ | 58 | */ |
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 0c287dbbb144..411dd7eb2653 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h | |||
@@ -619,5 +619,10 @@ static inline void clk_writel(u32 val, u32 __iomem *reg) | |||
619 | 619 | ||
620 | #endif /* platform dependent I/O accessors */ | 620 | #endif /* platform dependent I/O accessors */ |
621 | 621 | ||
622 | #ifdef CONFIG_DEBUG_FS | ||
623 | struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode, | ||
624 | void *data, const struct file_operations *fops); | ||
625 | #endif | ||
626 | |||
622 | #endif /* CONFIG_COMMON_CLK */ | 627 | #endif /* CONFIG_COMMON_CLK */ |
623 | #endif /* CLK_PROVIDER_H */ | 628 | #endif /* CLK_PROVIDER_H */ |
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h new file mode 100644 index 000000000000..f3050e15f833 --- /dev/null +++ b/include/linux/clk/clk-conf.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Samsung Electronics Co., Ltd. | ||
3 | * Sylwester Nawrocki <s.nawrocki@samsung.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | struct device_node; | ||
11 | |||
12 | #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) | ||
13 | int of_clk_set_defaults(struct device_node *node, bool clk_supplier); | ||
14 | #else | ||
15 | static inline int of_clk_set_defaults(struct device_node *node, | ||
16 | bool clk_supplier) | ||
17 | { | ||
18 | return 0; | ||
19 | } | ||
20 | #endif | ||
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 4a21a872dbbd..e8d8a35034a5 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h | |||
@@ -41,6 +41,8 @@ | |||
41 | * @idlest_reg: register containing the DPLL idle status bitfield | 41 | * @idlest_reg: register containing the DPLL idle status bitfield |
42 | * @autoidle_mask: mask of the DPLL autoidle mode bitfield in @autoidle_reg | 42 | * @autoidle_mask: mask of the DPLL autoidle mode bitfield in @autoidle_reg |
43 | * @freqsel_mask: mask of the DPLL jitter correction bitfield in @control_reg | 43 | * @freqsel_mask: mask of the DPLL jitter correction bitfield in @control_reg |
44 | * @dcc_mask: mask of the DPLL DCC correction bitfield @mult_div1_reg | ||
45 | * @dcc_rate: rate atleast which DCC @dcc_mask must be set | ||
44 | * @idlest_mask: mask of the DPLL idle status bitfield in @idlest_reg | 46 | * @idlest_mask: mask of the DPLL idle status bitfield in @idlest_reg |
45 | * @lpmode_mask: mask of the DPLL low-power mode bitfield in @control_reg | 47 | * @lpmode_mask: mask of the DPLL low-power mode bitfield in @control_reg |
46 | * @m4xen_mask: mask of the DPLL M4X multiplier bitfield in @control_reg | 48 | * @m4xen_mask: mask of the DPLL M4X multiplier bitfield in @control_reg |
@@ -86,6 +88,8 @@ struct dpll_data { | |||
86 | u32 idlest_mask; | 88 | u32 idlest_mask; |
87 | u32 dco_mask; | 89 | u32 dco_mask; |
88 | u32 sddiv_mask; | 90 | u32 sddiv_mask; |
91 | u32 dcc_mask; | ||
92 | unsigned long dcc_rate; | ||
89 | u32 lpmode_mask; | 93 | u32 lpmode_mask; |
90 | u32 m4xen_mask; | 94 | u32 m4xen_mask; |
91 | u8 auto_recal_bit; | 95 | u8 auto_recal_bit; |
@@ -94,7 +98,26 @@ struct dpll_data { | |||
94 | u8 flags; | 98 | u8 flags; |
95 | }; | 99 | }; |
96 | 100 | ||
97 | struct clk_hw_omap_ops; | 101 | struct clk_hw_omap; |
102 | |||
103 | /** | ||
104 | * struct clk_hw_omap_ops - OMAP clk ops | ||
105 | * @find_idlest: find idlest register information for a clock | ||
106 | * @find_companion: find companion clock register information for a clock, | ||
107 | * basically converts CM_ICLKEN* <-> CM_FCLKEN* | ||
108 | * @allow_idle: enables autoidle hardware functionality for a clock | ||
109 | * @deny_idle: prevent autoidle hardware functionality for a clock | ||
110 | */ | ||
111 | struct clk_hw_omap_ops { | ||
112 | void (*find_idlest)(struct clk_hw_omap *oclk, | ||
113 | void __iomem **idlest_reg, | ||
114 | u8 *idlest_bit, u8 *idlest_val); | ||
115 | void (*find_companion)(struct clk_hw_omap *oclk, | ||
116 | void __iomem **other_reg, | ||
117 | u8 *other_bit); | ||
118 | void (*allow_idle)(struct clk_hw_omap *oclk); | ||
119 | void (*deny_idle)(struct clk_hw_omap *oclk); | ||
120 | }; | ||
98 | 121 | ||
99 | /** | 122 | /** |
100 | * struct clk_hw_omap - OMAP struct clk | 123 | * struct clk_hw_omap - OMAP struct clk |
@@ -259,6 +282,12 @@ int omap2_dflt_clk_enable(struct clk_hw *hw); | |||
259 | void omap2_dflt_clk_disable(struct clk_hw *hw); | 282 | void omap2_dflt_clk_disable(struct clk_hw *hw); |
260 | int omap2_dflt_clk_is_enabled(struct clk_hw *hw); | 283 | int omap2_dflt_clk_is_enabled(struct clk_hw *hw); |
261 | void omap3_clk_lock_dpll5(void); | 284 | void omap3_clk_lock_dpll5(void); |
285 | unsigned long omap2_dpllcore_recalc(struct clk_hw *hw, | ||
286 | unsigned long parent_rate); | ||
287 | int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate, | ||
288 | unsigned long parent_rate); | ||
289 | void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw); | ||
290 | void omap2xxx_clkt_vps_init(void); | ||
262 | 291 | ||
263 | void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index); | 292 | void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index); |
264 | void ti_dt_clocks_register(struct ti_dt_clk *oclks); | 293 | void ti_dt_clocks_register(struct ti_dt_clk *oclks); |
@@ -278,6 +307,8 @@ int omap5xxx_dt_clk_init(void); | |||
278 | int dra7xx_dt_clk_init(void); | 307 | int dra7xx_dt_clk_init(void); |
279 | int am33xx_dt_clk_init(void); | 308 | int am33xx_dt_clk_init(void); |
280 | int am43xx_dt_clk_init(void); | 309 | int am43xx_dt_clk_init(void); |
310 | int omap2420_dt_clk_init(void); | ||
311 | int omap2430_dt_clk_init(void); | ||
281 | 312 | ||
282 | #ifdef CONFIG_OF | 313 | #ifdef CONFIG_OF |
283 | void of_ti_clk_allow_autoidle_all(void); | 314 | void of_ti_clk_allow_autoidle_all(void); |
@@ -287,6 +318,8 @@ static inline void of_ti_clk_allow_autoidle_all(void) { } | |||
287 | static inline void of_ti_clk_deny_autoidle_all(void) { } | 318 | static inline void of_ti_clk_deny_autoidle_all(void) { } |
288 | #endif | 319 | #endif |
289 | 320 | ||
321 | extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll; | ||
322 | extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait; | ||
290 | extern const struct clk_hw_omap_ops clkhwops_omap3_dpll; | 323 | extern const struct clk_hw_omap_ops clkhwops_omap3_dpll; |
291 | extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx; | 324 | extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx; |
292 | extern const struct clk_hw_omap_ops clkhwops_wait; | 325 | extern const struct clk_hw_omap_ops clkhwops_wait; |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 64fdfe1cfcf0..d5ad7b1118fc 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -383,7 +383,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
383 | /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ | 383 | /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ |
384 | #ifdef CONFIG_KPROBES | 384 | #ifdef CONFIG_KPROBES |
385 | # define __kprobes __attribute__((__section__(".kprobes.text"))) | 385 | # define __kprobes __attribute__((__section__(".kprobes.text"))) |
386 | # define nokprobe_inline __always_inline | ||
386 | #else | 387 | #else |
387 | # define __kprobes | 388 | # define __kprobes |
389 | # define nokprobe_inline inline | ||
388 | #endif | 390 | #endif |
389 | #endif /* __LINUX_COMPILER_H */ | 391 | #endif /* __LINUX_COMPILER_H */ |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 3f458896d45c..8f8ae95c6e27 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -75,6 +75,7 @@ struct cpufreq_policy { | |||
75 | unsigned int max; /* in kHz */ | 75 | unsigned int max; /* in kHz */ |
76 | unsigned int cur; /* in kHz, only needed if cpufreq | 76 | unsigned int cur; /* in kHz, only needed if cpufreq |
77 | * governors are used */ | 77 | * governors are used */ |
78 | unsigned int restore_freq; /* = policy->cur before transition */ | ||
78 | unsigned int suspend_freq; /* freq to set during suspend */ | 79 | unsigned int suspend_freq; /* freq to set during suspend */ |
79 | 80 | ||
80 | unsigned int policy; /* see above */ | 81 | unsigned int policy; /* see above */ |
@@ -221,11 +222,35 @@ struct cpufreq_driver { | |||
221 | 222 | ||
222 | /* define one out of two */ | 223 | /* define one out of two */ |
223 | int (*setpolicy) (struct cpufreq_policy *policy); | 224 | int (*setpolicy) (struct cpufreq_policy *policy); |
225 | |||
226 | /* | ||
227 | * On failure, should always restore frequency to policy->restore_freq | ||
228 | * (i.e. old freq). | ||
229 | */ | ||
224 | int (*target) (struct cpufreq_policy *policy, /* Deprecated */ | 230 | int (*target) (struct cpufreq_policy *policy, /* Deprecated */ |
225 | unsigned int target_freq, | 231 | unsigned int target_freq, |
226 | unsigned int relation); | 232 | unsigned int relation); |
227 | int (*target_index) (struct cpufreq_policy *policy, | 233 | int (*target_index) (struct cpufreq_policy *policy, |
228 | unsigned int index); | 234 | unsigned int index); |
235 | /* | ||
236 | * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION | ||
237 | * unset. | ||
238 | * | ||
239 | * get_intermediate should return a stable intermediate frequency | ||
240 | * platform wants to switch to and target_intermediate() should set CPU | ||
241 | * to to that frequency, before jumping to the frequency corresponding | ||
242 | * to 'index'. Core will take care of sending notifications and driver | ||
243 | * doesn't have to handle them in target_intermediate() or | ||
244 | * target_index(). | ||
245 | * | ||
246 | * Drivers can return '0' from get_intermediate() in case they don't | ||
247 | * wish to switch to intermediate frequency for some target frequency. | ||
248 | * In that case core will directly call ->target_index(). | ||
249 | */ | ||
250 | unsigned int (*get_intermediate)(struct cpufreq_policy *policy, | ||
251 | unsigned int index); | ||
252 | int (*target_intermediate)(struct cpufreq_policy *policy, | ||
253 | unsigned int index); | ||
229 | 254 | ||
230 | /* should be defined, if possible */ | 255 | /* should be defined, if possible */ |
231 | unsigned int (*get) (unsigned int cpu); | 256 | unsigned int (*get) (unsigned int cpu); |
@@ -457,8 +482,8 @@ extern struct cpufreq_governor cpufreq_gov_conservative; | |||
457 | *********************************************************************/ | 482 | *********************************************************************/ |
458 | 483 | ||
459 | /* Special Values of .frequency field */ | 484 | /* Special Values of .frequency field */ |
460 | #define CPUFREQ_ENTRY_INVALID ~0 | 485 | #define CPUFREQ_ENTRY_INVALID ~0u |
461 | #define CPUFREQ_TABLE_END ~1 | 486 | #define CPUFREQ_TABLE_END ~1u |
462 | /* Special Values of .flags field */ | 487 | /* Special Values of .flags field */ |
463 | #define CPUFREQ_BOOST_FREQ (1 << 0) | 488 | #define CPUFREQ_BOOST_FREQ (1 << 0) |
464 | 489 | ||
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index c51a436135c4..25e0df6155a4 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
@@ -84,6 +84,7 @@ struct cpuidle_device { | |||
84 | }; | 84 | }; |
85 | 85 | ||
86 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | 86 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); |
87 | DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); | ||
87 | 88 | ||
88 | /** | 89 | /** |
89 | * cpuidle_get_last_residency - retrieves the last state's residency time | 90 | * cpuidle_get_last_residency - retrieves the last state's residency time |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index d08e4d2a9b92..2997af6d2ccd 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -142,6 +142,13 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask, | |||
142 | return 1; | 142 | return 1; |
143 | } | 143 | } |
144 | 144 | ||
145 | static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) | ||
146 | { | ||
147 | set_bit(0, cpumask_bits(dstp)); | ||
148 | |||
149 | return 0; | ||
150 | } | ||
151 | |||
145 | #define for_each_cpu(cpu, mask) \ | 152 | #define for_each_cpu(cpu, mask) \ |
146 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) | 153 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) |
147 | #define for_each_cpu_not(cpu, mask) \ | 154 | #define for_each_cpu_not(cpu, mask) \ |
@@ -192,6 +199,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) | |||
192 | 199 | ||
193 | int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); | 200 | int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); |
194 | int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); | 201 | int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); |
202 | int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp); | ||
195 | 203 | ||
196 | /** | 204 | /** |
197 | * for_each_cpu - iterate over every cpu in a mask | 205 | * for_each_cpu - iterate over every cpu in a mask |
@@ -600,7 +608,7 @@ static inline int cpulist_scnprintf(char *buf, int len, | |||
600 | static inline int cpumask_parse(const char *buf, struct cpumask *dstp) | 608 | static inline int cpumask_parse(const char *buf, struct cpumask *dstp) |
601 | { | 609 | { |
602 | char *nl = strchr(buf, '\n'); | 610 | char *nl = strchr(buf, '\n'); |
603 | int len = nl ? nl - buf : strlen(buf); | 611 | unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); |
604 | 612 | ||
605 | return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); | 613 | return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); |
606 | } | 614 | } |
diff --git a/include/linux/crc7.h b/include/linux/crc7.h index 1786e772d5c6..d590765106f3 100644 --- a/include/linux/crc7.h +++ b/include/linux/crc7.h | |||
@@ -2,13 +2,13 @@ | |||
2 | #define _LINUX_CRC7_H | 2 | #define _LINUX_CRC7_H |
3 | #include <linux/types.h> | 3 | #include <linux/types.h> |
4 | 4 | ||
5 | extern const u8 crc7_syndrome_table[256]; | 5 | extern const u8 crc7_be_syndrome_table[256]; |
6 | 6 | ||
7 | static inline u8 crc7_byte(u8 crc, u8 data) | 7 | static inline u8 crc7_be_byte(u8 crc, u8 data) |
8 | { | 8 | { |
9 | return crc7_syndrome_table[(crc << 1) ^ data]; | 9 | return crc7_be_syndrome_table[crc ^ data]; |
10 | } | 10 | } |
11 | 11 | ||
12 | extern u8 crc7(u8 crc, const u8 *buffer, size_t len); | 12 | extern u8 crc7_be(u8 crc, const u8 *buffer, size_t len); |
13 | 13 | ||
14 | #endif | 14 | #endif |
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index b92eadf92d72..d45e949699ea 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
@@ -710,9 +710,9 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req) | |||
710 | 710 | ||
711 | static inline void ablkcipher_request_set_callback( | 711 | static inline void ablkcipher_request_set_callback( |
712 | struct ablkcipher_request *req, | 712 | struct ablkcipher_request *req, |
713 | u32 flags, crypto_completion_t complete, void *data) | 713 | u32 flags, crypto_completion_t compl, void *data) |
714 | { | 714 | { |
715 | req->base.complete = complete; | 715 | req->base.complete = compl; |
716 | req->base.data = data; | 716 | req->base.data = data; |
717 | req->base.flags = flags; | 717 | req->base.flags = flags; |
718 | } | 718 | } |
@@ -841,10 +841,10 @@ static inline void aead_request_free(struct aead_request *req) | |||
841 | 841 | ||
842 | static inline void aead_request_set_callback(struct aead_request *req, | 842 | static inline void aead_request_set_callback(struct aead_request *req, |
843 | u32 flags, | 843 | u32 flags, |
844 | crypto_completion_t complete, | 844 | crypto_completion_t compl, |
845 | void *data) | 845 | void *data) |
846 | { | 846 | { |
847 | req->base.complete = complete; | 847 | req->base.complete = compl; |
848 | req->base.data = data; | 848 | req->base.data = data; |
849 | req->base.flags = flags; | 849 | req->base.flags = flags; |
850 | } | 850 | } |
diff --git a/include/linux/dell-led.h b/include/linux/dell-led.h new file mode 100644 index 000000000000..7009b8bec77b --- /dev/null +++ b/include/linux/dell-led.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef __DELL_LED_H__ | ||
2 | #define __DELL_LED_H__ | ||
3 | |||
4 | enum { | ||
5 | DELL_LED_MICMUTE, | ||
6 | }; | ||
7 | |||
8 | int dell_app_wmi_led_set(int whichled, int on); | ||
9 | |||
10 | #endif | ||
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 63da56ed9796..e1707de043ae 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -115,12 +115,6 @@ typedef int (*dm_busy_fn) (struct dm_target *ti); | |||
115 | 115 | ||
116 | void dm_error(const char *message); | 116 | void dm_error(const char *message); |
117 | 117 | ||
118 | /* | ||
119 | * Combine device limits. | ||
120 | */ | ||
121 | int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, | ||
122 | sector_t start, sector_t len, void *data); | ||
123 | |||
124 | struct dm_dev { | 118 | struct dm_dev { |
125 | struct block_device *bdev; | 119 | struct block_device *bdev; |
126 | fmode_t mode; | 120 | fmode_t mode; |
@@ -132,7 +126,7 @@ struct dm_dev { | |||
132 | * are opened/closed correctly. | 126 | * are opened/closed correctly. |
133 | */ | 127 | */ |
134 | int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, | 128 | int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, |
135 | struct dm_dev **result); | 129 | struct dm_dev **result); |
136 | void dm_put_device(struct dm_target *ti, struct dm_dev *d); | 130 | void dm_put_device(struct dm_target *ti, struct dm_dev *d); |
137 | 131 | ||
138 | /* | 132 | /* |
@@ -291,6 +285,7 @@ struct dm_target_io { | |||
291 | struct dm_io *io; | 285 | struct dm_io *io; |
292 | struct dm_target *ti; | 286 | struct dm_target *ti; |
293 | unsigned target_bio_nr; | 287 | unsigned target_bio_nr; |
288 | unsigned *len_ptr; | ||
294 | struct bio clone; | 289 | struct bio clone; |
295 | }; | 290 | }; |
296 | 291 | ||
@@ -401,6 +396,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); | |||
401 | struct gendisk *dm_disk(struct mapped_device *md); | 396 | struct gendisk *dm_disk(struct mapped_device *md); |
402 | int dm_suspended(struct dm_target *ti); | 397 | int dm_suspended(struct dm_target *ti); |
403 | int dm_noflush_suspending(struct dm_target *ti); | 398 | int dm_noflush_suspending(struct dm_target *ti); |
399 | void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); | ||
404 | union map_info *dm_get_rq_mapinfo(struct request *rq); | 400 | union map_info *dm_get_rq_mapinfo(struct request *rq); |
405 | 401 | ||
406 | struct queue_limits *dm_get_queue_limits(struct mapped_device *md); | 402 | struct queue_limits *dm_get_queue_limits(struct mapped_device *md); |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 72cb0ddb9678..d2c5cc7c583c 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -292,7 +292,7 @@ struct dma_chan_dev { | |||
292 | }; | 292 | }; |
293 | 293 | ||
294 | /** | 294 | /** |
295 | * enum dma_slave_buswidth - defines bus with of the DMA slave | 295 | * enum dma_slave_buswidth - defines bus width of the DMA slave |
296 | * device, source or target buses | 296 | * device, source or target buses |
297 | */ | 297 | */ |
298 | enum dma_slave_buswidth { | 298 | enum dma_slave_buswidth { |
diff --git a/include/linux/efi.h b/include/linux/efi.h index 41bbf8ba4ba8..efc681fd5895 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/ioport.h> | 20 | #include <linux/ioport.h> |
21 | #include <linux/pfn.h> | 21 | #include <linux/pfn.h> |
22 | #include <linux/pstore.h> | 22 | #include <linux/pstore.h> |
23 | #include <linux/reboot.h> | ||
23 | 24 | ||
24 | #include <asm/page.h> | 25 | #include <asm/page.h> |
25 | 26 | ||
@@ -521,6 +522,8 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules, | |||
521 | int *reset_type); | 522 | int *reset_type); |
522 | typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size); | 523 | typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size); |
523 | 524 | ||
525 | void efi_native_runtime_setup(void); | ||
526 | |||
524 | /* | 527 | /* |
525 | * EFI Configuration Table and GUID definitions | 528 | * EFI Configuration Table and GUID definitions |
526 | */ | 529 | */ |
@@ -870,11 +873,13 @@ extern int __init efi_uart_console_only (void); | |||
870 | extern void efi_initialize_iomem_resources(struct resource *code_resource, | 873 | extern void efi_initialize_iomem_resources(struct resource *code_resource, |
871 | struct resource *data_resource, struct resource *bss_resource); | 874 | struct resource *data_resource, struct resource *bss_resource); |
872 | extern void efi_get_time(struct timespec *now); | 875 | extern void efi_get_time(struct timespec *now); |
873 | extern int efi_set_rtc_mmss(const struct timespec *now); | ||
874 | extern void efi_reserve_boot_services(void); | 876 | extern void efi_reserve_boot_services(void); |
875 | extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose); | 877 | extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose); |
876 | extern struct efi_memory_map memmap; | 878 | extern struct efi_memory_map memmap; |
877 | 879 | ||
880 | extern int efi_reboot_quirk_mode; | ||
881 | extern bool efi_poweroff_required(void); | ||
882 | |||
878 | /* Iterate through an efi_memory_map */ | 883 | /* Iterate through an efi_memory_map */ |
879 | #define for_each_efi_memory_desc(m, md) \ | 884 | #define for_each_efi_memory_desc(m, md) \ |
880 | for ((md) = (m)->map; \ | 885 | for ((md) = (m)->map; \ |
@@ -916,7 +921,8 @@ extern int __init efi_setup_pcdp_console(char *); | |||
916 | #define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */ | 921 | #define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */ |
917 | #define EFI_MEMMAP 4 /* Can we use EFI memory map? */ | 922 | #define EFI_MEMMAP 4 /* Can we use EFI memory map? */ |
918 | #define EFI_64BIT 5 /* Is the firmware 64-bit? */ | 923 | #define EFI_64BIT 5 /* Is the firmware 64-bit? */ |
919 | #define EFI_ARCH_1 6 /* First arch-specific bit */ | 924 | #define EFI_PARAVIRT 6 /* Access is via a paravirt interface */ |
925 | #define EFI_ARCH_1 7 /* First arch-specific bit */ | ||
920 | 926 | ||
921 | #ifdef CONFIG_EFI | 927 | #ifdef CONFIG_EFI |
922 | /* | 928 | /* |
@@ -926,11 +932,14 @@ static inline bool efi_enabled(int feature) | |||
926 | { | 932 | { |
927 | return test_bit(feature, &efi.flags) != 0; | 933 | return test_bit(feature, &efi.flags) != 0; |
928 | } | 934 | } |
935 | extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); | ||
929 | #else | 936 | #else |
930 | static inline bool efi_enabled(int feature) | 937 | static inline bool efi_enabled(int feature) |
931 | { | 938 | { |
932 | return false; | 939 | return false; |
933 | } | 940 | } |
941 | static inline void | ||
942 | efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {} | ||
934 | #endif | 943 | #endif |
935 | 944 | ||
936 | /* | 945 | /* |
@@ -1031,12 +1040,8 @@ struct efivar_operations { | |||
1031 | struct efivars { | 1040 | struct efivars { |
1032 | /* | 1041 | /* |
1033 | * ->lock protects two things: | 1042 | * ->lock protects two things: |
1034 | * 1) ->list - adds, removals, reads, writes | 1043 | * 1) efivarfs_list and efivars_sysfs_list |
1035 | * 2) ops.[gs]et_variable() calls. | 1044 | * 2) ->ops calls |
1036 | * It must not be held when creating sysfs entries or calling kmalloc. | ||
1037 | * ops.get_next_variable() is only called from register_efivars() | ||
1038 | * or efivar_update_sysfs_entries(), | ||
1039 | * which is protected by the BKL, so that path is safe. | ||
1040 | */ | 1045 | */ |
1041 | spinlock_t lock; | 1046 | spinlock_t lock; |
1042 | struct kset *kset; | 1047 | struct kset *kset; |
@@ -1161,4 +1166,46 @@ static inline void | |||
1161 | efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {} | 1166 | efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {} |
1162 | #endif | 1167 | #endif |
1163 | 1168 | ||
1169 | /* prototypes shared between arch specific and generic stub code */ | ||
1170 | |||
1171 | #define pr_efi(sys_table, msg) efi_printk(sys_table, "EFI stub: "msg) | ||
1172 | #define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg) | ||
1173 | |||
1174 | void efi_printk(efi_system_table_t *sys_table_arg, char *str); | ||
1175 | |||
1176 | void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, | ||
1177 | unsigned long addr); | ||
1178 | |||
1179 | char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, | ||
1180 | efi_loaded_image_t *image, int *cmd_line_len); | ||
1181 | |||
1182 | efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, | ||
1183 | efi_memory_desc_t **map, | ||
1184 | unsigned long *map_size, | ||
1185 | unsigned long *desc_size, | ||
1186 | u32 *desc_ver, | ||
1187 | unsigned long *key_ptr); | ||
1188 | |||
1189 | efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, | ||
1190 | unsigned long size, unsigned long align, | ||
1191 | unsigned long *addr); | ||
1192 | |||
1193 | efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, | ||
1194 | unsigned long size, unsigned long align, | ||
1195 | unsigned long *addr, unsigned long max); | ||
1196 | |||
1197 | efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, | ||
1198 | unsigned long *image_addr, | ||
1199 | unsigned long image_size, | ||
1200 | unsigned long alloc_size, | ||
1201 | unsigned long preferred_addr, | ||
1202 | unsigned long alignment); | ||
1203 | |||
1204 | efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, | ||
1205 | efi_loaded_image_t *image, | ||
1206 | char *cmd_line, char *option_string, | ||
1207 | unsigned long max_addr, | ||
1208 | unsigned long *load_addr, | ||
1209 | unsigned long *load_size); | ||
1210 | |||
1164 | #endif /* _LINUX_EFI_H */ | 1211 | #endif /* _LINUX_EFI_H */ |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index df63bd3a8cf1..45a91474487d 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -133,7 +133,6 @@ extern struct request *elv_latter_request(struct request_queue *, struct request | |||
133 | extern int elv_register_queue(struct request_queue *q); | 133 | extern int elv_register_queue(struct request_queue *q); |
134 | extern void elv_unregister_queue(struct request_queue *q); | 134 | extern void elv_unregister_queue(struct request_queue *q); |
135 | extern int elv_may_queue(struct request_queue *, int); | 135 | extern int elv_may_queue(struct request_queue *, int); |
136 | extern void elv_abort_queue(struct request_queue *); | ||
137 | extern void elv_completed_request(struct request_queue *, struct request *); | 136 | extern void elv_completed_request(struct request_queue *, struct request *); |
138 | extern int elv_set_request(struct request_queue *q, struct request *rq, | 137 | extern int elv_set_request(struct request_queue *q, struct request *rq, |
139 | struct bio *bio, gfp_t gfp_mask); | 138 | struct bio *bio, gfp_t gfp_mask); |
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 0a114d05f68d..e658229fee39 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h | |||
@@ -154,13 +154,20 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) | |||
154 | * @reset: Reset (part of) the device, as specified by a bitmask of | 154 | * @reset: Reset (part of) the device, as specified by a bitmask of |
155 | * flags from &enum ethtool_reset_flags. Returns a negative | 155 | * flags from &enum ethtool_reset_flags. Returns a negative |
156 | * error code or zero. | 156 | * error code or zero. |
157 | * @get_rxfh_key_size: Get the size of the RX flow hash key. | ||
158 | * Returns zero if not supported for this specific device. | ||
157 | * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. | 159 | * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. |
158 | * Returns zero if not supported for this specific device. | 160 | * Returns zero if not supported for this specific device. |
159 | * @get_rxfh_indir: Get the contents of the RX flow hash indirection table. | 161 | * @get_rxfh: Get the contents of the RX flow hash indirection table and hash |
160 | * Will not be called if @get_rxfh_indir_size returns zero. | 162 | * key. |
163 | * Will only be called if one or both of @get_rxfh_indir_size and | ||
164 | * @get_rxfh_key_size are implemented and return non-zero. | ||
161 | * Returns a negative error code or zero. | 165 | * Returns a negative error code or zero. |
162 | * @set_rxfh_indir: Set the contents of the RX flow hash indirection table. | 166 | * @set_rxfh: Set the contents of the RX flow hash indirection table and/or |
163 | * Will not be called if @get_rxfh_indir_size returns zero. | 167 | * hash key. In case only the indirection table or hash key is to be |
168 | * changed, the other argument will be %NULL. | ||
169 | * Will only be called if one or both of @get_rxfh_indir_size and | ||
170 | * @get_rxfh_key_size are implemented and return non-zero. | ||
164 | * Returns a negative error code or zero. | 171 | * Returns a negative error code or zero. |
165 | * @get_channels: Get number of channels. | 172 | * @get_channels: Get number of channels. |
166 | * @set_channels: Set number of channels. Returns a negative error code or | 173 | * @set_channels: Set number of channels. Returns a negative error code or |
@@ -232,9 +239,11 @@ struct ethtool_ops { | |||
232 | int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); | 239 | int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); |
233 | int (*flash_device)(struct net_device *, struct ethtool_flash *); | 240 | int (*flash_device)(struct net_device *, struct ethtool_flash *); |
234 | int (*reset)(struct net_device *, u32 *); | 241 | int (*reset)(struct net_device *, u32 *); |
242 | u32 (*get_rxfh_key_size)(struct net_device *); | ||
235 | u32 (*get_rxfh_indir_size)(struct net_device *); | 243 | u32 (*get_rxfh_indir_size)(struct net_device *); |
236 | int (*get_rxfh_indir)(struct net_device *, u32 *); | 244 | int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key); |
237 | int (*set_rxfh_indir)(struct net_device *, const u32 *); | 245 | int (*set_rxfh)(struct net_device *, const u32 *indir, |
246 | const u8 *key); | ||
238 | void (*get_channels)(struct net_device *, struct ethtool_channels *); | 247 | void (*get_channels)(struct net_device *, struct ethtool_channels *); |
239 | int (*set_channels)(struct net_device *, struct ethtool_channels *); | 248 | int (*set_channels)(struct net_device *, struct ethtool_channels *); |
240 | int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); | 249 | int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); |
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index df53e1753a76..6ff0b0b42d47 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #define F2FS_LOG_SECTORS_PER_BLOCK 3 /* 4KB: F2FS_BLKSIZE */ | 19 | #define F2FS_LOG_SECTORS_PER_BLOCK 3 /* 4KB: F2FS_BLKSIZE */ |
20 | #define F2FS_BLKSIZE 4096 /* support only 4KB block */ | 20 | #define F2FS_BLKSIZE 4096 /* support only 4KB block */ |
21 | #define F2FS_MAX_EXTENSION 64 /* # of extension entries */ | 21 | #define F2FS_MAX_EXTENSION 64 /* # of extension entries */ |
22 | #define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE) | ||
22 | 23 | ||
23 | #define NULL_ADDR ((block_t)0) /* used as block_t addresses */ | 24 | #define NULL_ADDR ((block_t)0) /* used as block_t addresses */ |
24 | #define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ | 25 | #define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ |
@@ -75,6 +76,7 @@ struct f2fs_super_block { | |||
75 | __le16 volume_name[512]; /* volume name */ | 76 | __le16 volume_name[512]; /* volume name */ |
76 | __le32 extension_count; /* # of extensions below */ | 77 | __le32 extension_count; /* # of extensions below */ |
77 | __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */ | 78 | __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */ |
79 | __le32 cp_payload; | ||
78 | } __packed; | 80 | } __packed; |
79 | 81 | ||
80 | /* | 82 | /* |
@@ -146,6 +148,9 @@ struct f2fs_extent { | |||
146 | #define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ | 148 | #define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ |
147 | #define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */ | 149 | #define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */ |
148 | 150 | ||
151 | #define ADDRS_PER_PAGE(page, fi) \ | ||
152 | (IS_INODE(page) ? ADDRS_PER_INODE(fi) : ADDRS_PER_BLOCK) | ||
153 | |||
149 | #define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1) | 154 | #define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1) |
150 | #define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2) | 155 | #define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2) |
151 | #define NODE_IND1_BLOCK (DEF_ADDRS_PER_INODE + 3) | 156 | #define NODE_IND1_BLOCK (DEF_ADDRS_PER_INODE + 3) |
@@ -391,6 +396,9 @@ typedef __le32 f2fs_hash_t; | |||
391 | /* MAX level for dir lookup */ | 396 | /* MAX level for dir lookup */ |
392 | #define MAX_DIR_HASH_DEPTH 63 | 397 | #define MAX_DIR_HASH_DEPTH 63 |
393 | 398 | ||
399 | /* MAX buckets in one level of dir */ | ||
400 | #define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1)) | ||
401 | |||
394 | #define SIZE_OF_DIR_ENTRY 11 /* by byte */ | 402 | #define SIZE_OF_DIR_ENTRY 11 /* by byte */ |
395 | #define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \ | 403 | #define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \ |
396 | BITS_PER_BYTE) | 404 | BITS_PER_BYTE) |
diff --git a/include/linux/filter.h b/include/linux/filter.h index 024fd03e5d18..a7e3c48d73a7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -37,21 +37,270 @@ | |||
37 | #define BPF_CALL 0x80 /* function call */ | 37 | #define BPF_CALL 0x80 /* function call */ |
38 | #define BPF_EXIT 0x90 /* function return */ | 38 | #define BPF_EXIT 0x90 /* function return */ |
39 | 39 | ||
40 | /* Register numbers */ | ||
41 | enum { | ||
42 | BPF_REG_0 = 0, | ||
43 | BPF_REG_1, | ||
44 | BPF_REG_2, | ||
45 | BPF_REG_3, | ||
46 | BPF_REG_4, | ||
47 | BPF_REG_5, | ||
48 | BPF_REG_6, | ||
49 | BPF_REG_7, | ||
50 | BPF_REG_8, | ||
51 | BPF_REG_9, | ||
52 | BPF_REG_10, | ||
53 | __MAX_BPF_REG, | ||
54 | }; | ||
55 | |||
40 | /* BPF has 10 general purpose 64-bit registers and stack frame. */ | 56 | /* BPF has 10 general purpose 64-bit registers and stack frame. */ |
41 | #define MAX_BPF_REG 11 | 57 | #define MAX_BPF_REG __MAX_BPF_REG |
58 | |||
59 | /* ArgX, context and stack frame pointer register positions. Note, | ||
60 | * Arg1, Arg2, Arg3, etc are used as argument mappings of function | ||
61 | * calls in BPF_CALL instruction. | ||
62 | */ | ||
63 | #define BPF_REG_ARG1 BPF_REG_1 | ||
64 | #define BPF_REG_ARG2 BPF_REG_2 | ||
65 | #define BPF_REG_ARG3 BPF_REG_3 | ||
66 | #define BPF_REG_ARG4 BPF_REG_4 | ||
67 | #define BPF_REG_ARG5 BPF_REG_5 | ||
68 | #define BPF_REG_CTX BPF_REG_6 | ||
69 | #define BPF_REG_FP BPF_REG_10 | ||
70 | |||
71 | /* Additional register mappings for converted user programs. */ | ||
72 | #define BPF_REG_A BPF_REG_0 | ||
73 | #define BPF_REG_X BPF_REG_7 | ||
74 | #define BPF_REG_TMP BPF_REG_8 | ||
42 | 75 | ||
43 | /* BPF program can access up to 512 bytes of stack space. */ | 76 | /* BPF program can access up to 512 bytes of stack space. */ |
44 | #define MAX_BPF_STACK 512 | 77 | #define MAX_BPF_STACK 512 |
45 | 78 | ||
46 | /* Arg1, context and stack frame pointer register positions. */ | 79 | /* Helper macros for filter block array initializers. */ |
47 | #define ARG1_REG 1 | 80 | |
48 | #define CTX_REG 6 | 81 | /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ |
49 | #define FP_REG 10 | 82 | |
83 | #define BPF_ALU64_REG(OP, DST, SRC) \ | ||
84 | ((struct sock_filter_int) { \ | ||
85 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ | ||
86 | .dst_reg = DST, \ | ||
87 | .src_reg = SRC, \ | ||
88 | .off = 0, \ | ||
89 | .imm = 0 }) | ||
90 | |||
91 | #define BPF_ALU32_REG(OP, DST, SRC) \ | ||
92 | ((struct sock_filter_int) { \ | ||
93 | .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ | ||
94 | .dst_reg = DST, \ | ||
95 | .src_reg = SRC, \ | ||
96 | .off = 0, \ | ||
97 | .imm = 0 }) | ||
98 | |||
99 | /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ | ||
100 | |||
101 | #define BPF_ALU64_IMM(OP, DST, IMM) \ | ||
102 | ((struct sock_filter_int) { \ | ||
103 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ | ||
104 | .dst_reg = DST, \ | ||
105 | .src_reg = 0, \ | ||
106 | .off = 0, \ | ||
107 | .imm = IMM }) | ||
108 | |||
109 | #define BPF_ALU32_IMM(OP, DST, IMM) \ | ||
110 | ((struct sock_filter_int) { \ | ||
111 | .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ | ||
112 | .dst_reg = DST, \ | ||
113 | .src_reg = 0, \ | ||
114 | .off = 0, \ | ||
115 | .imm = IMM }) | ||
116 | |||
117 | /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ | ||
118 | |||
119 | #define BPF_ENDIAN(TYPE, DST, LEN) \ | ||
120 | ((struct sock_filter_int) { \ | ||
121 | .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ | ||
122 | .dst_reg = DST, \ | ||
123 | .src_reg = 0, \ | ||
124 | .off = 0, \ | ||
125 | .imm = LEN }) | ||
126 | |||
127 | /* Short form of mov, dst_reg = src_reg */ | ||
128 | |||
129 | #define BPF_MOV64_REG(DST, SRC) \ | ||
130 | ((struct sock_filter_int) { \ | ||
131 | .code = BPF_ALU64 | BPF_MOV | BPF_X, \ | ||
132 | .dst_reg = DST, \ | ||
133 | .src_reg = SRC, \ | ||
134 | .off = 0, \ | ||
135 | .imm = 0 }) | ||
136 | |||
137 | #define BPF_MOV32_REG(DST, SRC) \ | ||
138 | ((struct sock_filter_int) { \ | ||
139 | .code = BPF_ALU | BPF_MOV | BPF_X, \ | ||
140 | .dst_reg = DST, \ | ||
141 | .src_reg = SRC, \ | ||
142 | .off = 0, \ | ||
143 | .imm = 0 }) | ||
144 | |||
145 | /* Short form of mov, dst_reg = imm32 */ | ||
146 | |||
147 | #define BPF_MOV64_IMM(DST, IMM) \ | ||
148 | ((struct sock_filter_int) { \ | ||
149 | .code = BPF_ALU64 | BPF_MOV | BPF_K, \ | ||
150 | .dst_reg = DST, \ | ||
151 | .src_reg = 0, \ | ||
152 | .off = 0, \ | ||
153 | .imm = IMM }) | ||
154 | |||
155 | #define BPF_MOV32_IMM(DST, IMM) \ | ||
156 | ((struct sock_filter_int) { \ | ||
157 | .code = BPF_ALU | BPF_MOV | BPF_K, \ | ||
158 | .dst_reg = DST, \ | ||
159 | .src_reg = 0, \ | ||
160 | .off = 0, \ | ||
161 | .imm = IMM }) | ||
162 | |||
163 | /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ | ||
164 | |||
165 | #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ | ||
166 | ((struct sock_filter_int) { \ | ||
167 | .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ | ||
168 | .dst_reg = DST, \ | ||
169 | .src_reg = SRC, \ | ||
170 | .off = 0, \ | ||
171 | .imm = IMM }) | ||
172 | |||
173 | #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ | ||
174 | ((struct sock_filter_int) { \ | ||
175 | .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ | ||
176 | .dst_reg = DST, \ | ||
177 | .src_reg = SRC, \ | ||
178 | .off = 0, \ | ||
179 | .imm = IMM }) | ||
180 | |||
181 | /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ | ||
182 | |||
183 | #define BPF_LD_ABS(SIZE, IMM) \ | ||
184 | ((struct sock_filter_int) { \ | ||
185 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ | ||
186 | .dst_reg = 0, \ | ||
187 | .src_reg = 0, \ | ||
188 | .off = 0, \ | ||
189 | .imm = IMM }) | ||
190 | |||
191 | /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ | ||
192 | |||
193 | #define BPF_LD_IND(SIZE, SRC, IMM) \ | ||
194 | ((struct sock_filter_int) { \ | ||
195 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ | ||
196 | .dst_reg = 0, \ | ||
197 | .src_reg = SRC, \ | ||
198 | .off = 0, \ | ||
199 | .imm = IMM }) | ||
200 | |||
201 | /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ | ||
202 | |||
203 | #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ | ||
204 | ((struct sock_filter_int) { \ | ||
205 | .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ | ||
206 | .dst_reg = DST, \ | ||
207 | .src_reg = SRC, \ | ||
208 | .off = OFF, \ | ||
209 | .imm = 0 }) | ||
210 | |||
211 | /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ | ||
212 | |||
213 | #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ | ||
214 | ((struct sock_filter_int) { \ | ||
215 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ | ||
216 | .dst_reg = DST, \ | ||
217 | .src_reg = SRC, \ | ||
218 | .off = OFF, \ | ||
219 | .imm = 0 }) | ||
220 | |||
221 | /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ | ||
222 | |||
223 | #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ | ||
224 | ((struct sock_filter_int) { \ | ||
225 | .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ | ||
226 | .dst_reg = DST, \ | ||
227 | .src_reg = 0, \ | ||
228 | .off = OFF, \ | ||
229 | .imm = IMM }) | ||
230 | |||
231 | /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ | ||
232 | |||
233 | #define BPF_JMP_REG(OP, DST, SRC, OFF) \ | ||
234 | ((struct sock_filter_int) { \ | ||
235 | .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ | ||
236 | .dst_reg = DST, \ | ||
237 | .src_reg = SRC, \ | ||
238 | .off = OFF, \ | ||
239 | .imm = 0 }) | ||
240 | |||
241 | /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ | ||
242 | |||
243 | #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ | ||
244 | ((struct sock_filter_int) { \ | ||
245 | .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ | ||
246 | .dst_reg = DST, \ | ||
247 | .src_reg = 0, \ | ||
248 | .off = OFF, \ | ||
249 | .imm = IMM }) | ||
250 | |||
251 | /* Function call */ | ||
252 | |||
253 | #define BPF_EMIT_CALL(FUNC) \ | ||
254 | ((struct sock_filter_int) { \ | ||
255 | .code = BPF_JMP | BPF_CALL, \ | ||
256 | .dst_reg = 0, \ | ||
257 | .src_reg = 0, \ | ||
258 | .off = 0, \ | ||
259 | .imm = ((FUNC) - __bpf_call_base) }) | ||
260 | |||
261 | /* Raw code statement block */ | ||
262 | |||
263 | #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ | ||
264 | ((struct sock_filter_int) { \ | ||
265 | .code = CODE, \ | ||
266 | .dst_reg = DST, \ | ||
267 | .src_reg = SRC, \ | ||
268 | .off = OFF, \ | ||
269 | .imm = IMM }) | ||
270 | |||
271 | /* Program exit */ | ||
272 | |||
273 | #define BPF_EXIT_INSN() \ | ||
274 | ((struct sock_filter_int) { \ | ||
275 | .code = BPF_JMP | BPF_EXIT, \ | ||
276 | .dst_reg = 0, \ | ||
277 | .src_reg = 0, \ | ||
278 | .off = 0, \ | ||
279 | .imm = 0 }) | ||
280 | |||
281 | #define bytes_to_bpf_size(bytes) \ | ||
282 | ({ \ | ||
283 | int bpf_size = -EINVAL; \ | ||
284 | \ | ||
285 | if (bytes == sizeof(u8)) \ | ||
286 | bpf_size = BPF_B; \ | ||
287 | else if (bytes == sizeof(u16)) \ | ||
288 | bpf_size = BPF_H; \ | ||
289 | else if (bytes == sizeof(u32)) \ | ||
290 | bpf_size = BPF_W; \ | ||
291 | else if (bytes == sizeof(u64)) \ | ||
292 | bpf_size = BPF_DW; \ | ||
293 | \ | ||
294 | bpf_size; \ | ||
295 | }) | ||
296 | |||
297 | /* Macro to invoke filter function. */ | ||
298 | #define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) | ||
50 | 299 | ||
51 | struct sock_filter_int { | 300 | struct sock_filter_int { |
52 | __u8 code; /* opcode */ | 301 | __u8 code; /* opcode */ |
53 | __u8 a_reg:4; /* dest register */ | 302 | __u8 dst_reg:4; /* dest register */ |
54 | __u8 x_reg:4; /* source register */ | 303 | __u8 src_reg:4; /* source register */ |
55 | __s16 off; /* signed offset */ | 304 | __s16 off; /* signed offset */ |
56 | __s32 imm; /* signed immediate constant */ | 305 | __s32 imm; /* signed immediate constant */ |
57 | }; | 306 | }; |
@@ -97,21 +346,16 @@ static inline unsigned int sk_filter_size(unsigned int proglen) | |||
97 | #define sk_filter_proglen(fprog) \ | 346 | #define sk_filter_proglen(fprog) \ |
98 | (fprog->len * sizeof(fprog->filter[0])) | 347 | (fprog->len * sizeof(fprog->filter[0])) |
99 | 348 | ||
100 | #define SK_RUN_FILTER(filter, ctx) \ | ||
101 | (*filter->bpf_func)(ctx, filter->insnsi) | ||
102 | |||
103 | int sk_filter(struct sock *sk, struct sk_buff *skb); | 349 | int sk_filter(struct sock *sk, struct sk_buff *skb); |
104 | 350 | ||
105 | u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx, | 351 | void sk_filter_select_runtime(struct sk_filter *fp); |
106 | const struct sock_filter_int *insni); | 352 | void sk_filter_free(struct sk_filter *fp); |
107 | u32 sk_run_filter_int_skb(const struct sk_buff *ctx, | ||
108 | const struct sock_filter_int *insni); | ||
109 | 353 | ||
110 | int sk_convert_filter(struct sock_filter *prog, int len, | 354 | int sk_convert_filter(struct sock_filter *prog, int len, |
111 | struct sock_filter_int *new_prog, int *new_len); | 355 | struct sock_filter_int *new_prog, int *new_len); |
112 | 356 | ||
113 | int sk_unattached_filter_create(struct sk_filter **pfp, | 357 | int sk_unattached_filter_create(struct sk_filter **pfp, |
114 | struct sock_fprog *fprog); | 358 | struct sock_fprog_kern *fprog); |
115 | void sk_unattached_filter_destroy(struct sk_filter *fp); | 359 | void sk_unattached_filter_destroy(struct sk_filter *fp); |
116 | 360 | ||
117 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); | 361 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
@@ -120,11 +364,48 @@ int sk_detach_filter(struct sock *sk); | |||
120 | int sk_chk_filter(struct sock_filter *filter, unsigned int flen); | 364 | int sk_chk_filter(struct sock_filter *filter, unsigned int flen); |
121 | int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, | 365 | int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, |
122 | unsigned int len); | 366 | unsigned int len); |
123 | void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to); | ||
124 | 367 | ||
125 | void sk_filter_charge(struct sock *sk, struct sk_filter *fp); | 368 | void sk_filter_charge(struct sock *sk, struct sk_filter *fp); |
126 | void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); | 369 | void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); |
127 | 370 | ||
371 | u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); | ||
372 | void bpf_int_jit_compile(struct sk_filter *fp); | ||
373 | |||
374 | #define BPF_ANC BIT(15) | ||
375 | |||
376 | static inline u16 bpf_anc_helper(const struct sock_filter *ftest) | ||
377 | { | ||
378 | BUG_ON(ftest->code & BPF_ANC); | ||
379 | |||
380 | switch (ftest->code) { | ||
381 | case BPF_LD | BPF_W | BPF_ABS: | ||
382 | case BPF_LD | BPF_H | BPF_ABS: | ||
383 | case BPF_LD | BPF_B | BPF_ABS: | ||
384 | #define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ | ||
385 | return BPF_ANC | SKF_AD_##CODE | ||
386 | switch (ftest->k) { | ||
387 | BPF_ANCILLARY(PROTOCOL); | ||
388 | BPF_ANCILLARY(PKTTYPE); | ||
389 | BPF_ANCILLARY(IFINDEX); | ||
390 | BPF_ANCILLARY(NLATTR); | ||
391 | BPF_ANCILLARY(NLATTR_NEST); | ||
392 | BPF_ANCILLARY(MARK); | ||
393 | BPF_ANCILLARY(QUEUE); | ||
394 | BPF_ANCILLARY(HATYPE); | ||
395 | BPF_ANCILLARY(RXHASH); | ||
396 | BPF_ANCILLARY(CPU); | ||
397 | BPF_ANCILLARY(ALU_XOR_X); | ||
398 | BPF_ANCILLARY(VLAN_TAG); | ||
399 | BPF_ANCILLARY(VLAN_TAG_PRESENT); | ||
400 | BPF_ANCILLARY(PAY_OFFSET); | ||
401 | BPF_ANCILLARY(RANDOM); | ||
402 | } | ||
403 | /* Fallthrough. */ | ||
404 | default: | ||
405 | return ftest->code; | ||
406 | } | ||
407 | } | ||
408 | |||
128 | #ifdef CONFIG_BPF_JIT | 409 | #ifdef CONFIG_BPF_JIT |
129 | #include <stdarg.h> | 410 | #include <stdarg.h> |
130 | #include <linux/linkage.h> | 411 | #include <linux/linkage.h> |
@@ -144,85 +425,20 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, | |||
144 | } | 425 | } |
145 | #else | 426 | #else |
146 | #include <linux/slab.h> | 427 | #include <linux/slab.h> |
428 | |||
147 | static inline void bpf_jit_compile(struct sk_filter *fp) | 429 | static inline void bpf_jit_compile(struct sk_filter *fp) |
148 | { | 430 | { |
149 | } | 431 | } |
432 | |||
150 | static inline void bpf_jit_free(struct sk_filter *fp) | 433 | static inline void bpf_jit_free(struct sk_filter *fp) |
151 | { | 434 | { |
152 | kfree(fp); | 435 | kfree(fp); |
153 | } | 436 | } |
154 | #endif | 437 | #endif /* CONFIG_BPF_JIT */ |
155 | 438 | ||
156 | static inline int bpf_tell_extensions(void) | 439 | static inline int bpf_tell_extensions(void) |
157 | { | 440 | { |
158 | return SKF_AD_MAX; | 441 | return SKF_AD_MAX; |
159 | } | 442 | } |
160 | 443 | ||
161 | enum { | ||
162 | BPF_S_RET_K = 1, | ||
163 | BPF_S_RET_A, | ||
164 | BPF_S_ALU_ADD_K, | ||
165 | BPF_S_ALU_ADD_X, | ||
166 | BPF_S_ALU_SUB_K, | ||
167 | BPF_S_ALU_SUB_X, | ||
168 | BPF_S_ALU_MUL_K, | ||
169 | BPF_S_ALU_MUL_X, | ||
170 | BPF_S_ALU_DIV_X, | ||
171 | BPF_S_ALU_MOD_K, | ||
172 | BPF_S_ALU_MOD_X, | ||
173 | BPF_S_ALU_AND_K, | ||
174 | BPF_S_ALU_AND_X, | ||
175 | BPF_S_ALU_OR_K, | ||
176 | BPF_S_ALU_OR_X, | ||
177 | BPF_S_ALU_XOR_K, | ||
178 | BPF_S_ALU_XOR_X, | ||
179 | BPF_S_ALU_LSH_K, | ||
180 | BPF_S_ALU_LSH_X, | ||
181 | BPF_S_ALU_RSH_K, | ||
182 | BPF_S_ALU_RSH_X, | ||
183 | BPF_S_ALU_NEG, | ||
184 | BPF_S_LD_W_ABS, | ||
185 | BPF_S_LD_H_ABS, | ||
186 | BPF_S_LD_B_ABS, | ||
187 | BPF_S_LD_W_LEN, | ||
188 | BPF_S_LD_W_IND, | ||
189 | BPF_S_LD_H_IND, | ||
190 | BPF_S_LD_B_IND, | ||
191 | BPF_S_LD_IMM, | ||
192 | BPF_S_LDX_W_LEN, | ||
193 | BPF_S_LDX_B_MSH, | ||
194 | BPF_S_LDX_IMM, | ||
195 | BPF_S_MISC_TAX, | ||
196 | BPF_S_MISC_TXA, | ||
197 | BPF_S_ALU_DIV_K, | ||
198 | BPF_S_LD_MEM, | ||
199 | BPF_S_LDX_MEM, | ||
200 | BPF_S_ST, | ||
201 | BPF_S_STX, | ||
202 | BPF_S_JMP_JA, | ||
203 | BPF_S_JMP_JEQ_K, | ||
204 | BPF_S_JMP_JEQ_X, | ||
205 | BPF_S_JMP_JGE_K, | ||
206 | BPF_S_JMP_JGE_X, | ||
207 | BPF_S_JMP_JGT_K, | ||
208 | BPF_S_JMP_JGT_X, | ||
209 | BPF_S_JMP_JSET_K, | ||
210 | BPF_S_JMP_JSET_X, | ||
211 | /* Ancillary data */ | ||
212 | BPF_S_ANC_PROTOCOL, | ||
213 | BPF_S_ANC_PKTTYPE, | ||
214 | BPF_S_ANC_IFINDEX, | ||
215 | BPF_S_ANC_NLATTR, | ||
216 | BPF_S_ANC_NLATTR_NEST, | ||
217 | BPF_S_ANC_MARK, | ||
218 | BPF_S_ANC_QUEUE, | ||
219 | BPF_S_ANC_HATYPE, | ||
220 | BPF_S_ANC_RXHASH, | ||
221 | BPF_S_ANC_CPU, | ||
222 | BPF_S_ANC_ALU_XOR_X, | ||
223 | BPF_S_ANC_VLAN_TAG, | ||
224 | BPF_S_ANC_VLAN_TAG_PRESENT, | ||
225 | BPF_S_ANC_PAY_OFFSET, | ||
226 | }; | ||
227 | |||
228 | #endif /* __LINUX_FILTER_H__ */ | 444 | #endif /* __LINUX_FILTER_H__ */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index c3f46e499dd0..2daccaf4b547 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -128,6 +128,10 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | |||
128 | #define FMODE_ATOMIC_POS ((__force fmode_t)0x8000) | 128 | #define FMODE_ATOMIC_POS ((__force fmode_t)0x8000) |
129 | /* Write access to underlying fs */ | 129 | /* Write access to underlying fs */ |
130 | #define FMODE_WRITER ((__force fmode_t)0x10000) | 130 | #define FMODE_WRITER ((__force fmode_t)0x10000) |
131 | /* Has read method(s) */ | ||
132 | #define FMODE_CAN_READ ((__force fmode_t)0x20000) | ||
133 | /* Has write method(s) */ | ||
134 | #define FMODE_CAN_WRITE ((__force fmode_t)0x40000) | ||
131 | 135 | ||
132 | /* File was opened by fanotify and shouldn't generate fanotify events */ | 136 | /* File was opened by fanotify and shouldn't generate fanotify events */ |
133 | #define FMODE_NONOTIFY ((__force fmode_t)0x1000000) | 137 | #define FMODE_NONOTIFY ((__force fmode_t)0x1000000) |
@@ -343,8 +347,7 @@ struct address_space_operations { | |||
343 | void (*invalidatepage) (struct page *, unsigned int, unsigned int); | 347 | void (*invalidatepage) (struct page *, unsigned int, unsigned int); |
344 | int (*releasepage) (struct page *, gfp_t); | 348 | int (*releasepage) (struct page *, gfp_t); |
345 | void (*freepage)(struct page *); | 349 | void (*freepage)(struct page *); |
346 | ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, | 350 | ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset); |
347 | loff_t offset, unsigned long nr_segs); | ||
348 | int (*get_xip_mem)(struct address_space *, pgoff_t, int, | 351 | int (*get_xip_mem)(struct address_space *, pgoff_t, int, |
349 | void **, unsigned long *); | 352 | void **, unsigned long *); |
350 | /* | 353 | /* |
@@ -830,7 +833,7 @@ static inline struct file *get_file(struct file *f) | |||
830 | * | 833 | * |
831 | * Lockd stuffs a "host" pointer into this. | 834 | * Lockd stuffs a "host" pointer into this. |
832 | */ | 835 | */ |
833 | typedef struct files_struct *fl_owner_t; | 836 | typedef void *fl_owner_t; |
834 | 837 | ||
835 | struct file_lock_operations { | 838 | struct file_lock_operations { |
836 | void (*fl_copy_lock)(struct file_lock *, struct file_lock *); | 839 | void (*fl_copy_lock)(struct file_lock *, struct file_lock *); |
@@ -1448,6 +1451,8 @@ struct block_device_operations; | |||
1448 | #define HAVE_COMPAT_IOCTL 1 | 1451 | #define HAVE_COMPAT_IOCTL 1 |
1449 | #define HAVE_UNLOCKED_IOCTL 1 | 1452 | #define HAVE_UNLOCKED_IOCTL 1 |
1450 | 1453 | ||
1454 | struct iov_iter; | ||
1455 | |||
1451 | struct file_operations { | 1456 | struct file_operations { |
1452 | struct module *owner; | 1457 | struct module *owner; |
1453 | loff_t (*llseek) (struct file *, loff_t, int); | 1458 | loff_t (*llseek) (struct file *, loff_t, int); |
@@ -1455,6 +1460,8 @@ struct file_operations { | |||
1455 | ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); | 1460 | ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); |
1456 | ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); | 1461 | ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); |
1457 | ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); | 1462 | ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); |
1463 | ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); | ||
1464 | ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); | ||
1458 | int (*iterate) (struct file *, struct dir_context *); | 1465 | int (*iterate) (struct file *, struct dir_context *); |
1459 | unsigned int (*poll) (struct file *, struct poll_table_struct *); | 1466 | unsigned int (*poll) (struct file *, struct poll_table_struct *); |
1460 | long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); | 1467 | long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); |
@@ -1914,6 +1921,12 @@ static inline int break_lease(struct inode *inode, unsigned int mode) | |||
1914 | 1921 | ||
1915 | static inline int break_deleg(struct inode *inode, unsigned int mode) | 1922 | static inline int break_deleg(struct inode *inode, unsigned int mode) |
1916 | { | 1923 | { |
1924 | /* | ||
1925 | * Since this check is lockless, we must ensure that any refcounts | ||
1926 | * taken are done before checking inode->i_flock. Otherwise, we could | ||
1927 | * end up racing with tasks trying to set a new lease on this file. | ||
1928 | */ | ||
1929 | smp_mb(); | ||
1917 | if (inode->i_flock) | 1930 | if (inode->i_flock) |
1918 | return __break_lease(inode, mode, FL_DELEG); | 1931 | return __break_lease(inode, mode, FL_DELEG); |
1919 | return 0; | 1932 | return 0; |
@@ -2404,20 +2417,18 @@ extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); | |||
2404 | extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr, | 2417 | extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr, |
2405 | unsigned long size, pgoff_t pgoff); | 2418 | unsigned long size, pgoff_t pgoff); |
2406 | int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); | 2419 | int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); |
2407 | extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t); | 2420 | extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); |
2408 | extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long); | 2421 | extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); |
2409 | extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t); | 2422 | extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); |
2410 | extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *, | 2423 | extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *, loff_t); |
2411 | unsigned long *, loff_t, size_t, size_t); | ||
2412 | extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); | 2424 | extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); |
2413 | extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos); | 2425 | extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos); |
2414 | extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); | 2426 | extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); |
2415 | extern int generic_segment_checks(const struct iovec *iov, | 2427 | extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos); |
2416 | unsigned long *nr_segs, size_t *count, int access_flags); | 2428 | extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); |
2417 | 2429 | ||
2418 | /* fs/block_dev.c */ | 2430 | /* fs/block_dev.c */ |
2419 | extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, | 2431 | extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from); |
2420 | unsigned long nr_segs, loff_t pos); | ||
2421 | extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, | 2432 | extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, |
2422 | int datasync); | 2433 | int datasync); |
2423 | extern void block_sync_page(struct page *page); | 2434 | extern void block_sync_page(struct page *page); |
@@ -2427,7 +2438,7 @@ extern ssize_t generic_file_splice_read(struct file *, loff_t *, | |||
2427 | struct pipe_inode_info *, size_t, unsigned int); | 2438 | struct pipe_inode_info *, size_t, unsigned int); |
2428 | extern ssize_t default_file_splice_read(struct file *, loff_t *, | 2439 | extern ssize_t default_file_splice_read(struct file *, loff_t *, |
2429 | struct pipe_inode_info *, size_t, unsigned int); | 2440 | struct pipe_inode_info *, size_t, unsigned int); |
2430 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, | 2441 | extern ssize_t iter_file_splice_write(struct pipe_inode_info *, |
2431 | struct file *, loff_t *, size_t, unsigned int); | 2442 | struct file *, loff_t *, size_t, unsigned int); |
2432 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, | 2443 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, |
2433 | struct file *out, loff_t *, size_t len, unsigned int flags); | 2444 | struct file *out, loff_t *, size_t len, unsigned int flags); |
@@ -2477,16 +2488,16 @@ enum { | |||
2477 | void dio_end_io(struct bio *bio, int error); | 2488 | void dio_end_io(struct bio *bio, int error); |
2478 | 2489 | ||
2479 | ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | 2490 | ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, |
2480 | struct block_device *bdev, const struct iovec *iov, loff_t offset, | 2491 | struct block_device *bdev, struct iov_iter *iter, loff_t offset, |
2481 | unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, | 2492 | get_block_t get_block, dio_iodone_t end_io, |
2482 | dio_submit_t submit_io, int flags); | 2493 | dio_submit_t submit_io, int flags); |
2483 | 2494 | ||
2484 | static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, | 2495 | static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, |
2485 | struct inode *inode, const struct iovec *iov, loff_t offset, | 2496 | struct inode *inode, struct iov_iter *iter, loff_t offset, |
2486 | unsigned long nr_segs, get_block_t get_block) | 2497 | get_block_t get_block) |
2487 | { | 2498 | { |
2488 | return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, | 2499 | return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter, |
2489 | offset, nr_segs, get_block, NULL, NULL, | 2500 | offset, get_block, NULL, NULL, |
2490 | DIO_LOCKING | DIO_SKIP_HOLES); | 2501 | DIO_LOCKING | DIO_SKIP_HOLES); |
2491 | } | 2502 | } |
2492 | #endif | 2503 | #endif |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 2018751cad9e..6bb5e3f2a3b4 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -33,8 +33,7 @@ | |||
33 | * features, then it must call an indirect function that | 33 | * features, then it must call an indirect function that |
34 | * does. Or at least does enough to prevent any unwelcomed side effects. | 34 | * does. Or at least does enough to prevent any unwelcomed side effects. |
35 | */ | 35 | */ |
36 | #if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \ | 36 | #if !ARCH_SUPPORTS_FTRACE_OPS |
37 | !ARCH_SUPPORTS_FTRACE_OPS | ||
38 | # define FTRACE_FORCE_LIST_FUNC 1 | 37 | # define FTRACE_FORCE_LIST_FUNC 1 |
39 | #else | 38 | #else |
40 | # define FTRACE_FORCE_LIST_FUNC 0 | 39 | # define FTRACE_FORCE_LIST_FUNC 0 |
@@ -62,9 +61,6 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, | |||
62 | * set in the flags member. | 61 | * set in the flags member. |
63 | * | 62 | * |
64 | * ENABLED - set/unset when ftrace_ops is registered/unregistered | 63 | * ENABLED - set/unset when ftrace_ops is registered/unregistered |
65 | * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops | ||
66 | * is part of the global tracers sharing the same filter | ||
67 | * via set_ftrace_* debugfs files. | ||
68 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically | 64 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically |
69 | * allocated ftrace_ops which need special care | 65 | * allocated ftrace_ops which need special care |
70 | * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops | 66 | * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops |
@@ -96,15 +92,14 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, | |||
96 | */ | 92 | */ |
97 | enum { | 93 | enum { |
98 | FTRACE_OPS_FL_ENABLED = 1 << 0, | 94 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
99 | FTRACE_OPS_FL_GLOBAL = 1 << 1, | 95 | FTRACE_OPS_FL_DYNAMIC = 1 << 1, |
100 | FTRACE_OPS_FL_DYNAMIC = 1 << 2, | 96 | FTRACE_OPS_FL_CONTROL = 1 << 2, |
101 | FTRACE_OPS_FL_CONTROL = 1 << 3, | 97 | FTRACE_OPS_FL_SAVE_REGS = 1 << 3, |
102 | FTRACE_OPS_FL_SAVE_REGS = 1 << 4, | 98 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, |
103 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, | 99 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, |
104 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, | 100 | FTRACE_OPS_FL_STUB = 1 << 6, |
105 | FTRACE_OPS_FL_STUB = 1 << 7, | 101 | FTRACE_OPS_FL_INITIALIZED = 1 << 7, |
106 | FTRACE_OPS_FL_INITIALIZED = 1 << 8, | 102 | FTRACE_OPS_FL_DELETED = 1 << 8, |
107 | FTRACE_OPS_FL_DELETED = 1 << 9, | ||
108 | }; | 103 | }; |
109 | 104 | ||
110 | /* | 105 | /* |
@@ -122,17 +117,18 @@ struct ftrace_ops { | |||
122 | ftrace_func_t func; | 117 | ftrace_func_t func; |
123 | struct ftrace_ops *next; | 118 | struct ftrace_ops *next; |
124 | unsigned long flags; | 119 | unsigned long flags; |
125 | int __percpu *disabled; | ||
126 | void *private; | 120 | void *private; |
121 | int __percpu *disabled; | ||
127 | #ifdef CONFIG_DYNAMIC_FTRACE | 122 | #ifdef CONFIG_DYNAMIC_FTRACE |
123 | int nr_trampolines; | ||
128 | struct ftrace_hash *notrace_hash; | 124 | struct ftrace_hash *notrace_hash; |
129 | struct ftrace_hash *filter_hash; | 125 | struct ftrace_hash *filter_hash; |
126 | struct ftrace_hash *tramp_hash; | ||
130 | struct mutex regex_lock; | 127 | struct mutex regex_lock; |
128 | unsigned long trampoline; | ||
131 | #endif | 129 | #endif |
132 | }; | 130 | }; |
133 | 131 | ||
134 | extern int function_trace_stop; | ||
135 | |||
136 | /* | 132 | /* |
137 | * Type of the current tracing. | 133 | * Type of the current tracing. |
138 | */ | 134 | */ |
@@ -144,32 +140,6 @@ enum ftrace_tracing_type_t { | |||
144 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ | 140 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ |
145 | extern enum ftrace_tracing_type_t ftrace_tracing_type; | 141 | extern enum ftrace_tracing_type_t ftrace_tracing_type; |
146 | 142 | ||
147 | /** | ||
148 | * ftrace_stop - stop function tracer. | ||
149 | * | ||
150 | * A quick way to stop the function tracer. Note this an on off switch, | ||
151 | * it is not something that is recursive like preempt_disable. | ||
152 | * This does not disable the calling of mcount, it only stops the | ||
153 | * calling of functions from mcount. | ||
154 | */ | ||
155 | static inline void ftrace_stop(void) | ||
156 | { | ||
157 | function_trace_stop = 1; | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * ftrace_start - start the function tracer. | ||
162 | * | ||
163 | * This function is the inverse of ftrace_stop. This does not enable | ||
164 | * the function tracing if the function tracer is disabled. This only | ||
165 | * sets the function tracer flag to continue calling the functions | ||
166 | * from mcount. | ||
167 | */ | ||
168 | static inline void ftrace_start(void) | ||
169 | { | ||
170 | function_trace_stop = 0; | ||
171 | } | ||
172 | |||
173 | /* | 143 | /* |
174 | * The ftrace_ops must be a static and should also | 144 | * The ftrace_ops must be a static and should also |
175 | * be read_mostly. These functions do modify read_mostly variables | 145 | * be read_mostly. These functions do modify read_mostly variables |
@@ -246,8 +216,6 @@ static inline int ftrace_nr_registered_ops(void) | |||
246 | } | 216 | } |
247 | static inline void clear_ftrace_function(void) { } | 217 | static inline void clear_ftrace_function(void) { } |
248 | static inline void ftrace_kill(void) { } | 218 | static inline void ftrace_kill(void) { } |
249 | static inline void ftrace_stop(void) { } | ||
250 | static inline void ftrace_start(void) { } | ||
251 | #endif /* CONFIG_FUNCTION_TRACER */ | 219 | #endif /* CONFIG_FUNCTION_TRACER */ |
252 | 220 | ||
253 | #ifdef CONFIG_STACK_TRACER | 221 | #ifdef CONFIG_STACK_TRACER |
@@ -321,13 +289,20 @@ extern int ftrace_nr_registered_ops(void); | |||
321 | * from tracing that function. | 289 | * from tracing that function. |
322 | */ | 290 | */ |
323 | enum { | 291 | enum { |
324 | FTRACE_FL_ENABLED = (1UL << 29), | 292 | FTRACE_FL_ENABLED = (1UL << 31), |
325 | FTRACE_FL_REGS = (1UL << 30), | 293 | FTRACE_FL_REGS = (1UL << 30), |
326 | FTRACE_FL_REGS_EN = (1UL << 31) | 294 | FTRACE_FL_REGS_EN = (1UL << 29), |
295 | FTRACE_FL_TRAMP = (1UL << 28), | ||
296 | FTRACE_FL_TRAMP_EN = (1UL << 27), | ||
327 | }; | 297 | }; |
328 | 298 | ||
329 | #define FTRACE_FL_MASK (0x7UL << 29) | 299 | #define FTRACE_REF_MAX_SHIFT 27 |
330 | #define FTRACE_REF_MAX ((1UL << 29) - 1) | 300 | #define FTRACE_FL_BITS 5 |
301 | #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) | ||
302 | #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) | ||
303 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) | ||
304 | |||
305 | #define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK) | ||
331 | 306 | ||
332 | struct dyn_ftrace { | 307 | struct dyn_ftrace { |
333 | unsigned long ip; /* address of mcount call-site */ | 308 | unsigned long ip; /* address of mcount call-site */ |
@@ -366,14 +341,12 @@ enum { | |||
366 | * IGNORE - The function is already what we want it to be | 341 | * IGNORE - The function is already what we want it to be |
367 | * MAKE_CALL - Start tracing the function | 342 | * MAKE_CALL - Start tracing the function |
368 | * MODIFY_CALL - Stop saving regs for the function | 343 | * MODIFY_CALL - Stop saving regs for the function |
369 | * MODIFY_CALL_REGS - Start saving regs for the function | ||
370 | * MAKE_NOP - Stop tracing the function | 344 | * MAKE_NOP - Stop tracing the function |
371 | */ | 345 | */ |
372 | enum { | 346 | enum { |
373 | FTRACE_UPDATE_IGNORE, | 347 | FTRACE_UPDATE_IGNORE, |
374 | FTRACE_UPDATE_MAKE_CALL, | 348 | FTRACE_UPDATE_MAKE_CALL, |
375 | FTRACE_UPDATE_MODIFY_CALL, | 349 | FTRACE_UPDATE_MODIFY_CALL, |
376 | FTRACE_UPDATE_MODIFY_CALL_REGS, | ||
377 | FTRACE_UPDATE_MAKE_NOP, | 350 | FTRACE_UPDATE_MAKE_NOP, |
378 | }; | 351 | }; |
379 | 352 | ||
@@ -404,6 +377,8 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable); | |||
404 | int ftrace_test_record(struct dyn_ftrace *rec, int enable); | 377 | int ftrace_test_record(struct dyn_ftrace *rec, int enable); |
405 | void ftrace_run_stop_machine(int command); | 378 | void ftrace_run_stop_machine(int command); |
406 | unsigned long ftrace_location(unsigned long ip); | 379 | unsigned long ftrace_location(unsigned long ip); |
380 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); | ||
381 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); | ||
407 | 382 | ||
408 | extern ftrace_func_t ftrace_trace_function; | 383 | extern ftrace_func_t ftrace_trace_function; |
409 | 384 | ||
@@ -435,6 +410,10 @@ void ftrace_modify_all_code(int command); | |||
435 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) | 410 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) |
436 | #endif | 411 | #endif |
437 | 412 | ||
413 | #ifndef FTRACE_GRAPH_ADDR | ||
414 | #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) | ||
415 | #endif | ||
416 | |||
438 | #ifndef FTRACE_REGS_ADDR | 417 | #ifndef FTRACE_REGS_ADDR |
439 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS | 418 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
440 | # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) | 419 | # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) |
@@ -443,6 +422,16 @@ void ftrace_modify_all_code(int command); | |||
443 | #endif | 422 | #endif |
444 | #endif | 423 | #endif |
445 | 424 | ||
425 | /* | ||
426 | * If an arch would like functions that are only traced | ||
427 | * by the function graph tracer to jump directly to its own | ||
428 | * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR | ||
429 | * to be that address to jump to. | ||
430 | */ | ||
431 | #ifndef FTRACE_GRAPH_TRAMP_ADDR | ||
432 | #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) | ||
433 | #endif | ||
434 | |||
446 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 435 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
447 | extern void ftrace_graph_caller(void); | 436 | extern void ftrace_graph_caller(void); |
448 | extern int ftrace_enable_ftrace_graph_caller(void); | 437 | extern int ftrace_enable_ftrace_graph_caller(void); |
@@ -740,6 +729,7 @@ extern char __irqentry_text_end[]; | |||
740 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 729 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
741 | trace_func_graph_ent_t entryfunc); | 730 | trace_func_graph_ent_t entryfunc); |
742 | 731 | ||
732 | extern bool ftrace_graph_is_dead(void); | ||
743 | extern void ftrace_graph_stop(void); | 733 | extern void ftrace_graph_stop(void); |
744 | 734 | ||
745 | /* The current handlers in use */ | 735 | /* The current handlers in use */ |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index d16da3e53bc7..06c6faa9e5cc 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -38,6 +38,9 @@ const char *ftrace_print_symbols_seq_u64(struct trace_seq *p, | |||
38 | *symbol_array); | 38 | *symbol_array); |
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, | ||
42 | unsigned int bitmask_size); | ||
43 | |||
41 | const char *ftrace_print_hex_seq(struct trace_seq *p, | 44 | const char *ftrace_print_hex_seq(struct trace_seq *p, |
42 | const unsigned char *buf, int len); | 45 | const unsigned char *buf, int len); |
43 | 46 | ||
@@ -269,7 +272,6 @@ struct ftrace_event_call { | |||
269 | struct trace_event event; | 272 | struct trace_event event; |
270 | const char *print_fmt; | 273 | const char *print_fmt; |
271 | struct event_filter *filter; | 274 | struct event_filter *filter; |
272 | struct list_head *files; | ||
273 | void *mod; | 275 | void *mod; |
274 | void *data; | 276 | void *data; |
275 | /* | 277 | /* |
@@ -401,8 +403,6 @@ enum event_trigger_type { | |||
401 | ETT_EVENT_ENABLE = (1 << 3), | 403 | ETT_EVENT_ENABLE = (1 << 3), |
402 | }; | 404 | }; |
403 | 405 | ||
404 | extern void destroy_preds(struct ftrace_event_file *file); | ||
405 | extern void destroy_call_preds(struct ftrace_event_call *call); | ||
406 | extern int filter_match_preds(struct event_filter *filter, void *rec); | 406 | extern int filter_match_preds(struct event_filter *filter, void *rec); |
407 | 407 | ||
408 | extern int filter_check_discard(struct ftrace_event_file *file, void *rec, | 408 | extern int filter_check_discard(struct ftrace_event_file *file, void *rec, |
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index a7e977ff4abf..8b622468952c 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h | |||
@@ -3,29 +3,53 @@ | |||
3 | 3 | ||
4 | struct device; | 4 | struct device; |
5 | 5 | ||
6 | /** | ||
7 | * struct gpio_keys_button - configuration parameters | ||
8 | * @code: input event code (KEY_*, SW_*) | ||
9 | * @gpio: %-1 if this key does not support gpio | ||
10 | * @active_low: %true indicates that button is considered | ||
11 | * depressed when gpio is low | ||
12 | * @desc: label that will be attached to button's gpio | ||
13 | * @type: input event type (%EV_KEY, %EV_SW, %EV_ABS) | ||
14 | * @wakeup: configure the button as a wake-up source | ||
15 | * @debounce_interval: debounce ticks interval in msecs | ||
16 | * @can_disable: %true indicates that userspace is allowed to | ||
17 | * disable button via sysfs | ||
18 | * @value: axis value for %EV_ABS | ||
19 | * @irq: Irq number in case of interrupt keys | ||
20 | */ | ||
6 | struct gpio_keys_button { | 21 | struct gpio_keys_button { |
7 | /* Configuration parameters */ | 22 | unsigned int code; |
8 | unsigned int code; /* input event code (KEY_*, SW_*) */ | 23 | int gpio; |
9 | int gpio; /* -1 if this key does not support gpio */ | ||
10 | int active_low; | 24 | int active_low; |
11 | const char *desc; | 25 | const char *desc; |
12 | unsigned int type; /* input event type (EV_KEY, EV_SW, EV_ABS) */ | 26 | unsigned int type; |
13 | int wakeup; /* configure the button as a wake-up source */ | 27 | int wakeup; |
14 | int debounce_interval; /* debounce ticks interval in msecs */ | 28 | int debounce_interval; |
15 | bool can_disable; | 29 | bool can_disable; |
16 | int value; /* axis value for EV_ABS */ | 30 | int value; |
17 | unsigned int irq; /* Irq number in case of interrupt keys */ | 31 | unsigned int irq; |
18 | }; | 32 | }; |
19 | 33 | ||
34 | /** | ||
35 | * struct gpio_keys_platform_data - platform data for gpio_keys driver | ||
36 | * @buttons: pointer to array of &gpio_keys_button structures | ||
37 | * describing buttons attached to the device | ||
38 | * @nbuttons: number of elements in @buttons array | ||
39 | * @poll_interval: polling interval in msecs - for polling driver only | ||
40 | * @rep: enable input subsystem auto repeat | ||
41 | * @enable: platform hook for enabling the device | ||
42 | * @disable: platform hook for disabling the device | ||
43 | * @name: input device name | ||
44 | */ | ||
20 | struct gpio_keys_platform_data { | 45 | struct gpio_keys_platform_data { |
21 | struct gpio_keys_button *buttons; | 46 | struct gpio_keys_button *buttons; |
22 | int nbuttons; | 47 | int nbuttons; |
23 | unsigned int poll_interval; /* polling interval in msecs - | 48 | unsigned int poll_interval; |
24 | for polling driver only */ | 49 | unsigned int rep:1; |
25 | unsigned int rep:1; /* enable input subsystem auto repeat */ | ||
26 | int (*enable)(struct device *dev); | 50 | int (*enable)(struct device *dev); |
27 | void (*disable)(struct device *dev); | 51 | void (*disable)(struct device *dev); |
28 | const char *name; /* input device name */ | 52 | const char *name; |
29 | }; | 53 | }; |
30 | 54 | ||
31 | #endif | 55 | #endif |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 255cd5cc0754..a23c096b3080 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -80,6 +80,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page); | |||
80 | bool isolate_huge_page(struct page *page, struct list_head *list); | 80 | bool isolate_huge_page(struct page *page, struct list_head *list); |
81 | void putback_active_hugepage(struct page *page); | 81 | void putback_active_hugepage(struct page *page); |
82 | bool is_hugepage_active(struct page *page); | 82 | bool is_hugepage_active(struct page *page); |
83 | void free_huge_page(struct page *page); | ||
83 | 84 | ||
84 | #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE | 85 | #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE |
85 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); | 86 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); |
diff --git a/include/linux/i2c/atmel_mxt_ts.h b/include/linux/i2c/atmel_mxt_ts.h index 99e379b74398..3891dc1de21c 100644 --- a/include/linux/i2c/atmel_mxt_ts.h +++ b/include/linux/i2c/atmel_mxt_ts.h | |||
@@ -15,35 +15,14 @@ | |||
15 | 15 | ||
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | 17 | ||
18 | /* For key_map array */ | ||
19 | #define MXT_NUM_GPIO 4 | ||
20 | |||
21 | /* Orient */ | ||
22 | #define MXT_NORMAL 0x0 | ||
23 | #define MXT_DIAGONAL 0x1 | ||
24 | #define MXT_HORIZONTAL_FLIP 0x2 | ||
25 | #define MXT_ROTATED_90_COUNTER 0x3 | ||
26 | #define MXT_VERTICAL_FLIP 0x4 | ||
27 | #define MXT_ROTATED_90 0x5 | ||
28 | #define MXT_ROTATED_180 0x6 | ||
29 | #define MXT_DIAGONAL_COUNTER 0x7 | ||
30 | |||
31 | /* The platform data for the Atmel maXTouch touchscreen driver */ | 18 | /* The platform data for the Atmel maXTouch touchscreen driver */ |
32 | struct mxt_platform_data { | 19 | struct mxt_platform_data { |
33 | const u8 *config; | 20 | const u8 *config; |
34 | size_t config_length; | 21 | size_t config_length; |
35 | 22 | u32 config_crc; | |
36 | unsigned int x_line; | ||
37 | unsigned int y_line; | ||
38 | unsigned int x_size; | ||
39 | unsigned int y_size; | ||
40 | unsigned int blen; | ||
41 | unsigned int threshold; | ||
42 | unsigned int voltage; | ||
43 | unsigned char orient; | ||
44 | unsigned long irqflags; | 23 | unsigned long irqflags; |
45 | bool is_tp; | 24 | u8 t19_num_keys; |
46 | const unsigned int key_map[MXT_NUM_GPIO]; | 25 | const unsigned int *t19_keymap; |
47 | }; | 26 | }; |
48 | 27 | ||
49 | #endif /* __LINUX_ATMEL_MXT_TS_H */ | 28 | #endif /* __LINUX_ATMEL_MXT_TS_H */ |
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h index d2b16704624c..8cfb50f38529 100644 --- a/include/linux/i2c/twl.h +++ b/include/linux/i2c/twl.h | |||
@@ -498,7 +498,10 @@ static inline int twl6030_mmc_card_detect(struct device *dev, int slot) | |||
498 | #define RES_GRP_ALL 0x7 /* All resource groups */ | 498 | #define RES_GRP_ALL 0x7 /* All resource groups */ |
499 | 499 | ||
500 | #define RES_TYPE2_R0 0x0 | 500 | #define RES_TYPE2_R0 0x0 |
501 | #define RES_TYPE2_R1 0x1 | ||
502 | #define RES_TYPE2_R2 0x2 | ||
501 | 503 | ||
504 | #define RES_TYPE_R0 0x0 | ||
502 | #define RES_TYPE_ALL 0x7 | 505 | #define RES_TYPE_ALL 0x7 |
503 | 506 | ||
504 | /* Resource states */ | 507 | /* Resource states */ |
@@ -671,6 +674,7 @@ struct twl4030_power_data { | |||
671 | struct twl4030_script **scripts; | 674 | struct twl4030_script **scripts; |
672 | unsigned num; | 675 | unsigned num; |
673 | struct twl4030_resconfig *resource_config; | 676 | struct twl4030_resconfig *resource_config; |
677 | struct twl4030_resconfig *board_config; | ||
674 | #define TWL4030_RESCONFIG_UNDEF ((u8)-1) | 678 | #define TWL4030_RESCONFIG_UNDEF ((u8)-1) |
675 | bool use_poweroff; /* Board is wired for TWL poweroff */ | 679 | bool use_poweroff; /* Board is wired for TWL poweroff */ |
676 | }; | 680 | }; |
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index f194ccb8539c..6bff13f74050 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
@@ -1711,6 +1711,7 @@ enum ieee80211_eid { | |||
1711 | WLAN_EID_RRM_ENABLED_CAPABILITIES = 70, | 1711 | WLAN_EID_RRM_ENABLED_CAPABILITIES = 70, |
1712 | WLAN_EID_MULTIPLE_BSSID = 71, | 1712 | WLAN_EID_MULTIPLE_BSSID = 71, |
1713 | WLAN_EID_BSS_COEX_2040 = 72, | 1713 | WLAN_EID_BSS_COEX_2040 = 72, |
1714 | WLAN_EID_BSS_INTOLERANT_CHL_REPORT = 73, | ||
1714 | WLAN_EID_OVERLAP_BSS_SCAN_PARAM = 74, | 1715 | WLAN_EID_OVERLAP_BSS_SCAN_PARAM = 74, |
1715 | WLAN_EID_RIC_DESCRIPTOR = 75, | 1716 | WLAN_EID_RIC_DESCRIPTOR = 75, |
1716 | WLAN_EID_MMIE = 76, | 1717 | WLAN_EID_MMIE = 76, |
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 1085ffeef956..fd22789d7b2e 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h | |||
@@ -16,9 +16,28 @@ | |||
16 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
17 | #include <uapi/linux/if_bridge.h> | 17 | #include <uapi/linux/if_bridge.h> |
18 | 18 | ||
19 | struct br_ip { | ||
20 | union { | ||
21 | __be32 ip4; | ||
22 | #if IS_ENABLED(CONFIG_IPV6) | ||
23 | struct in6_addr ip6; | ||
24 | #endif | ||
25 | } u; | ||
26 | __be16 proto; | ||
27 | __u16 vid; | ||
28 | }; | ||
29 | |||
30 | struct br_ip_list { | ||
31 | struct list_head list; | ||
32 | struct br_ip addr; | ||
33 | }; | ||
34 | |||
19 | extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); | 35 | extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); |
20 | 36 | ||
21 | typedef int br_should_route_hook_t(struct sk_buff *skb); | 37 | typedef int br_should_route_hook_t(struct sk_buff *skb); |
22 | extern br_should_route_hook_t __rcu *br_should_route_hook; | 38 | extern br_should_route_hook_t __rcu *br_should_route_hook; |
39 | int br_multicast_list_adjacent(struct net_device *dev, | ||
40 | struct list_head *br_ip_list); | ||
41 | bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); | ||
23 | 42 | ||
24 | #endif | 43 | #endif |
diff --git a/include/linux/if_link.h b/include/linux/if_link.h index a86784dec3d3..119130e9298b 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h | |||
@@ -10,8 +10,9 @@ struct ifla_vf_info { | |||
10 | __u8 mac[32]; | 10 | __u8 mac[32]; |
11 | __u32 vlan; | 11 | __u32 vlan; |
12 | __u32 qos; | 12 | __u32 qos; |
13 | __u32 tx_rate; | ||
14 | __u32 spoofchk; | 13 | __u32 spoofchk; |
15 | __u32 linkstate; | 14 | __u32 linkstate; |
15 | __u32 min_tx_rate; | ||
16 | __u32 max_tx_rate; | ||
16 | }; | 17 | }; |
17 | #endif /* _LINUX_IF_LINK_H */ | 18 | #endif /* _LINUX_IF_LINK_H */ |
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index a9a53b12397b..6b2c7cf352a5 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h | |||
@@ -57,6 +57,9 @@ struct macvlan_dev { | |||
57 | netdev_features_t tap_features; | 57 | netdev_features_t tap_features; |
58 | int minor; | 58 | int minor; |
59 | int nest_level; | 59 | int nest_level; |
60 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
61 | struct netpoll *netpoll; | ||
62 | #endif | ||
60 | }; | 63 | }; |
61 | 64 | ||
62 | static inline void macvlan_count_rx(const struct macvlan_dev *vlan, | 65 | static inline void macvlan_count_rx(const struct macvlan_dev *vlan, |
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index b2acc4a1b13c..4967916fe4ac 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h | |||
@@ -106,7 +106,7 @@ struct vlan_pcpu_stats { | |||
106 | 106 | ||
107 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 107 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
108 | 108 | ||
109 | extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, | 109 | extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev, |
110 | __be16 vlan_proto, u16 vlan_id); | 110 | __be16 vlan_proto, u16 vlan_id); |
111 | extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); | 111 | extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); |
112 | extern u16 vlan_dev_vlan_id(const struct net_device *dev); | 112 | extern u16 vlan_dev_vlan_id(const struct net_device *dev); |
@@ -206,7 +206,7 @@ static inline int vlan_get_encap_level(struct net_device *dev) | |||
206 | } | 206 | } |
207 | #else | 207 | #else |
208 | static inline struct net_device * | 208 | static inline struct net_device * |
209 | __vlan_find_dev_deep(struct net_device *real_dev, | 209 | __vlan_find_dev_deep_rcu(struct net_device *real_dev, |
210 | __be16 vlan_proto, u16 vlan_id) | 210 | __be16 vlan_proto, u16 vlan_id) |
211 | { | 211 | { |
212 | return NULL; | 212 | return NULL; |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 6df7f9fe0d01..2bb4c4f3531a 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -102,12 +102,6 @@ extern struct group_info init_groups; | |||
102 | #define INIT_IDS | 102 | #define INIT_IDS |
103 | #endif | 103 | #endif |
104 | 104 | ||
105 | #ifdef CONFIG_RCU_BOOST | ||
106 | #define INIT_TASK_RCU_BOOST() \ | ||
107 | .rcu_boost_mutex = NULL, | ||
108 | #else | ||
109 | #define INIT_TASK_RCU_BOOST() | ||
110 | #endif | ||
111 | #ifdef CONFIG_TREE_PREEMPT_RCU | 105 | #ifdef CONFIG_TREE_PREEMPT_RCU |
112 | #define INIT_TASK_RCU_TREE_PREEMPT() \ | 106 | #define INIT_TASK_RCU_TREE_PREEMPT() \ |
113 | .rcu_blocked_node = NULL, | 107 | .rcu_blocked_node = NULL, |
@@ -119,8 +113,7 @@ extern struct group_info init_groups; | |||
119 | .rcu_read_lock_nesting = 0, \ | 113 | .rcu_read_lock_nesting = 0, \ |
120 | .rcu_read_unlock_special = 0, \ | 114 | .rcu_read_unlock_special = 0, \ |
121 | .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ | 115 | .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ |
122 | INIT_TASK_RCU_TREE_PREEMPT() \ | 116 | INIT_TASK_RCU_TREE_PREEMPT() |
123 | INIT_TASK_RCU_BOOST() | ||
124 | #else | 117 | #else |
125 | #define INIT_TASK_RCU_PREEMPT(tsk) | 118 | #define INIT_TASK_RCU_PREEMPT(tsk) |
126 | #endif | 119 | #endif |
diff --git a/include/linux/input-polldev.h b/include/linux/input-polldev.h index ce0b72464eb8..2465182670db 100644 --- a/include/linux/input-polldev.h +++ b/include/linux/input-polldev.h | |||
@@ -48,9 +48,12 @@ struct input_polled_dev { | |||
48 | 48 | ||
49 | /* private: */ | 49 | /* private: */ |
50 | struct delayed_work work; | 50 | struct delayed_work work; |
51 | |||
52 | bool devres_managed; | ||
51 | }; | 53 | }; |
52 | 54 | ||
53 | struct input_polled_dev *input_allocate_polled_device(void); | 55 | struct input_polled_dev *input_allocate_polled_device(void); |
56 | struct input_polled_dev *devm_input_allocate_polled_device(struct device *dev); | ||
54 | void input_free_polled_device(struct input_polled_dev *dev); | 57 | void input_free_polled_device(struct input_polled_dev *dev); |
55 | int input_register_polled_device(struct input_polled_dev *dev); | 58 | int input_register_polled_device(struct input_polled_dev *dev); |
56 | void input_unregister_polled_device(struct input_polled_dev *dev); | 59 | void input_unregister_polled_device(struct input_polled_dev *dev); |
diff --git a/include/linux/input/pixcir_ts.h b/include/linux/input/pixcir_ts.h index 7163d91c0373..160cf353aa39 100644 --- a/include/linux/input/pixcir_ts.h +++ b/include/linux/input/pixcir_ts.h | |||
@@ -1,10 +1,52 @@ | |||
1 | #ifndef _PIXCIR_I2C_TS_H | 1 | #ifndef _PIXCIR_I2C_TS_H |
2 | #define _PIXCIR_I2C_TS_H | 2 | #define _PIXCIR_I2C_TS_H |
3 | 3 | ||
4 | /* | ||
5 | * Register map | ||
6 | */ | ||
7 | #define PIXCIR_REG_POWER_MODE 51 | ||
8 | #define PIXCIR_REG_INT_MODE 52 | ||
9 | |||
10 | /* | ||
11 | * Power modes: | ||
12 | * active: max scan speed | ||
13 | * idle: lower scan speed with automatic transition to active on touch | ||
14 | * halt: datasheet says sleep but this is more like halt as the chip | ||
15 | * clocks are cut and it can only be brought out of this mode | ||
16 | * using the RESET pin. | ||
17 | */ | ||
18 | enum pixcir_power_mode { | ||
19 | PIXCIR_POWER_ACTIVE, | ||
20 | PIXCIR_POWER_IDLE, | ||
21 | PIXCIR_POWER_HALT, | ||
22 | }; | ||
23 | |||
24 | #define PIXCIR_POWER_MODE_MASK 0x03 | ||
25 | #define PIXCIR_POWER_ALLOW_IDLE (1UL << 2) | ||
26 | |||
27 | /* | ||
28 | * Interrupt modes: | ||
29 | * periodical: interrupt is asserted periodicaly | ||
30 | * diff coordinates: interrupt is asserted when coordinates change | ||
31 | * level on touch: interrupt level asserted during touch | ||
32 | * pulse on touch: interrupt pulse asserted druing touch | ||
33 | * | ||
34 | */ | ||
35 | enum pixcir_int_mode { | ||
36 | PIXCIR_INT_PERIODICAL, | ||
37 | PIXCIR_INT_DIFF_COORD, | ||
38 | PIXCIR_INT_LEVEL_TOUCH, | ||
39 | PIXCIR_INT_PULSE_TOUCH, | ||
40 | }; | ||
41 | |||
42 | #define PIXCIR_INT_MODE_MASK 0x03 | ||
43 | #define PIXCIR_INT_ENABLE (1UL << 3) | ||
44 | #define PIXCIR_INT_POL_HIGH (1UL << 2) | ||
45 | |||
4 | struct pixcir_ts_platform_data { | 46 | struct pixcir_ts_platform_data { |
5 | int (*attb_read_val)(void); | ||
6 | int x_max; | 47 | int x_max; |
7 | int y_max; | 48 | int y_max; |
49 | int gpio_attb; /* GPIO connected to ATTB line */ | ||
8 | }; | 50 | }; |
9 | 51 | ||
10 | #endif | 52 | #endif |
diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h new file mode 100644 index 000000000000..08a5ef6e8f25 --- /dev/null +++ b/include/linux/input/touchscreen.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014 Sebastian Reichel <sre@kernel.org> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License version 2 as published by | ||
6 | * the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef _TOUCHSCREEN_H | ||
10 | #define _TOUCHSCREEN_H | ||
11 | |||
12 | #include <linux/input.h> | ||
13 | |||
14 | #ifdef CONFIG_OF | ||
15 | void touchscreen_parse_of_params(struct input_dev *dev); | ||
16 | #else | ||
17 | static inline void touchscreen_parse_of_params(struct input_dev *dev) | ||
18 | { | ||
19 | } | ||
20 | #endif | ||
21 | |||
22 | #endif | ||
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 19ae05d4b8ec..bf9422c3aefe 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h | |||
@@ -33,6 +33,11 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) | |||
33 | #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), } | 33 | #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), } |
34 | 34 | ||
35 | bool irq_work_queue(struct irq_work *work); | 35 | bool irq_work_queue(struct irq_work *work); |
36 | |||
37 | #ifdef CONFIG_SMP | ||
38 | bool irq_work_queue_on(struct irq_work *work, int cpu); | ||
39 | #endif | ||
40 | |||
36 | void irq_work_run(void); | 41 | void irq_work_run(void); |
37 | void irq_work_sync(struct irq_work *work); | 42 | void irq_work_sync(struct irq_work *work); |
38 | 43 | ||
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h new file mode 100644 index 000000000000..03a4ea37ba86 --- /dev/null +++ b/include/linux/irqchip/arm-gic-v3.h | |||
@@ -0,0 +1,200 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H | ||
19 | #define __LINUX_IRQCHIP_ARM_GIC_V3_H | ||
20 | |||
21 | #include <asm/sysreg.h> | ||
22 | |||
23 | /* | ||
24 | * Distributor registers. We assume we're running non-secure, with ARE | ||
25 | * being set. Secure-only and non-ARE registers are not described. | ||
26 | */ | ||
27 | #define GICD_CTLR 0x0000 | ||
28 | #define GICD_TYPER 0x0004 | ||
29 | #define GICD_IIDR 0x0008 | ||
30 | #define GICD_STATUSR 0x0010 | ||
31 | #define GICD_SETSPI_NSR 0x0040 | ||
32 | #define GICD_CLRSPI_NSR 0x0048 | ||
33 | #define GICD_SETSPI_SR 0x0050 | ||
34 | #define GICD_CLRSPI_SR 0x0058 | ||
35 | #define GICD_SEIR 0x0068 | ||
36 | #define GICD_ISENABLER 0x0100 | ||
37 | #define GICD_ICENABLER 0x0180 | ||
38 | #define GICD_ISPENDR 0x0200 | ||
39 | #define GICD_ICPENDR 0x0280 | ||
40 | #define GICD_ISACTIVER 0x0300 | ||
41 | #define GICD_ICACTIVER 0x0380 | ||
42 | #define GICD_IPRIORITYR 0x0400 | ||
43 | #define GICD_ICFGR 0x0C00 | ||
44 | #define GICD_IROUTER 0x6000 | ||
45 | #define GICD_PIDR2 0xFFE8 | ||
46 | |||
47 | #define GICD_CTLR_RWP (1U << 31) | ||
48 | #define GICD_CTLR_ARE_NS (1U << 4) | ||
49 | #define GICD_CTLR_ENABLE_G1A (1U << 1) | ||
50 | #define GICD_CTLR_ENABLE_G1 (1U << 0) | ||
51 | |||
52 | #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) | ||
53 | #define GICD_IROUTER_SPI_MODE_ANY (1U << 31) | ||
54 | |||
55 | #define GIC_PIDR2_ARCH_MASK 0xf0 | ||
56 | #define GIC_PIDR2_ARCH_GICv3 0x30 | ||
57 | #define GIC_PIDR2_ARCH_GICv4 0x40 | ||
58 | |||
59 | /* | ||
60 | * Re-Distributor registers, offsets from RD_base | ||
61 | */ | ||
62 | #define GICR_CTLR GICD_CTLR | ||
63 | #define GICR_IIDR 0x0004 | ||
64 | #define GICR_TYPER 0x0008 | ||
65 | #define GICR_STATUSR GICD_STATUSR | ||
66 | #define GICR_WAKER 0x0014 | ||
67 | #define GICR_SETLPIR 0x0040 | ||
68 | #define GICR_CLRLPIR 0x0048 | ||
69 | #define GICR_SEIR GICD_SEIR | ||
70 | #define GICR_PROPBASER 0x0070 | ||
71 | #define GICR_PENDBASER 0x0078 | ||
72 | #define GICR_INVLPIR 0x00A0 | ||
73 | #define GICR_INVALLR 0x00B0 | ||
74 | #define GICR_SYNCR 0x00C0 | ||
75 | #define GICR_MOVLPIR 0x0100 | ||
76 | #define GICR_MOVALLR 0x0110 | ||
77 | #define GICR_PIDR2 GICD_PIDR2 | ||
78 | |||
79 | #define GICR_WAKER_ProcessorSleep (1U << 1) | ||
80 | #define GICR_WAKER_ChildrenAsleep (1U << 2) | ||
81 | |||
82 | /* | ||
83 | * Re-Distributor registers, offsets from SGI_base | ||
84 | */ | ||
85 | #define GICR_ISENABLER0 GICD_ISENABLER | ||
86 | #define GICR_ICENABLER0 GICD_ICENABLER | ||
87 | #define GICR_ISPENDR0 GICD_ISPENDR | ||
88 | #define GICR_ICPENDR0 GICD_ICPENDR | ||
89 | #define GICR_ISACTIVER0 GICD_ISACTIVER | ||
90 | #define GICR_ICACTIVER0 GICD_ICACTIVER | ||
91 | #define GICR_IPRIORITYR0 GICD_IPRIORITYR | ||
92 | #define GICR_ICFGR0 GICD_ICFGR | ||
93 | |||
94 | #define GICR_TYPER_VLPIS (1U << 1) | ||
95 | #define GICR_TYPER_LAST (1U << 4) | ||
96 | |||
97 | /* | ||
98 | * CPU interface registers | ||
99 | */ | ||
100 | #define ICC_CTLR_EL1_EOImode_drop_dir (0U << 1) | ||
101 | #define ICC_CTLR_EL1_EOImode_drop (1U << 1) | ||
102 | #define ICC_SRE_EL1_SRE (1U << 0) | ||
103 | |||
104 | /* | ||
105 | * Hypervisor interface registers (SRE only) | ||
106 | */ | ||
107 | #define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1) | ||
108 | |||
109 | #define ICH_LR_EOI (1UL << 41) | ||
110 | #define ICH_LR_GROUP (1UL << 60) | ||
111 | #define ICH_LR_STATE (3UL << 62) | ||
112 | #define ICH_LR_PENDING_BIT (1UL << 62) | ||
113 | #define ICH_LR_ACTIVE_BIT (1UL << 63) | ||
114 | |||
115 | #define ICH_MISR_EOI (1 << 0) | ||
116 | #define ICH_MISR_U (1 << 1) | ||
117 | |||
118 | #define ICH_HCR_EN (1 << 0) | ||
119 | #define ICH_HCR_UIE (1 << 1) | ||
120 | |||
121 | #define ICH_VMCR_CTLR_SHIFT 0 | ||
122 | #define ICH_VMCR_CTLR_MASK (0x21f << ICH_VMCR_CTLR_SHIFT) | ||
123 | #define ICH_VMCR_BPR1_SHIFT 18 | ||
124 | #define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) | ||
125 | #define ICH_VMCR_BPR0_SHIFT 21 | ||
126 | #define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT) | ||
127 | #define ICH_VMCR_PMR_SHIFT 24 | ||
128 | #define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) | ||
129 | |||
130 | #define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) | ||
131 | #define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) | ||
132 | #define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) | ||
133 | #define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) | ||
134 | #define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4) | ||
135 | #define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) | ||
136 | #define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) | ||
137 | |||
138 | #define ICC_IAR1_EL1_SPURIOUS 0x3ff | ||
139 | |||
140 | #define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5) | ||
141 | |||
142 | #define ICC_SRE_EL2_SRE (1 << 0) | ||
143 | #define ICC_SRE_EL2_ENABLE (1 << 3) | ||
144 | |||
145 | /* | ||
146 | * System register definitions | ||
147 | */ | ||
148 | #define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4) | ||
149 | #define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0) | ||
150 | #define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1) | ||
151 | #define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2) | ||
152 | #define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3) | ||
153 | #define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5) | ||
154 | #define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7) | ||
155 | |||
156 | #define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x) | ||
157 | #define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x) | ||
158 | |||
159 | #define ICH_LR0_EL2 __LR0_EL2(0) | ||
160 | #define ICH_LR1_EL2 __LR0_EL2(1) | ||
161 | #define ICH_LR2_EL2 __LR0_EL2(2) | ||
162 | #define ICH_LR3_EL2 __LR0_EL2(3) | ||
163 | #define ICH_LR4_EL2 __LR0_EL2(4) | ||
164 | #define ICH_LR5_EL2 __LR0_EL2(5) | ||
165 | #define ICH_LR6_EL2 __LR0_EL2(6) | ||
166 | #define ICH_LR7_EL2 __LR0_EL2(7) | ||
167 | #define ICH_LR8_EL2 __LR8_EL2(0) | ||
168 | #define ICH_LR9_EL2 __LR8_EL2(1) | ||
169 | #define ICH_LR10_EL2 __LR8_EL2(2) | ||
170 | #define ICH_LR11_EL2 __LR8_EL2(3) | ||
171 | #define ICH_LR12_EL2 __LR8_EL2(4) | ||
172 | #define ICH_LR13_EL2 __LR8_EL2(5) | ||
173 | #define ICH_LR14_EL2 __LR8_EL2(6) | ||
174 | #define ICH_LR15_EL2 __LR8_EL2(7) | ||
175 | |||
176 | #define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) | ||
177 | #define ICH_AP0R0_EL2 __AP0Rx_EL2(0) | ||
178 | #define ICH_AP0R1_EL2 __AP0Rx_EL2(1) | ||
179 | #define ICH_AP0R2_EL2 __AP0Rx_EL2(2) | ||
180 | #define ICH_AP0R3_EL2 __AP0Rx_EL2(3) | ||
181 | |||
182 | #define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x) | ||
183 | #define ICH_AP1R0_EL2 __AP1Rx_EL2(0) | ||
184 | #define ICH_AP1R1_EL2 __AP1Rx_EL2(1) | ||
185 | #define ICH_AP1R2_EL2 __AP1Rx_EL2(2) | ||
186 | #define ICH_AP1R3_EL2 __AP1Rx_EL2(3) | ||
187 | |||
188 | #ifndef __ASSEMBLY__ | ||
189 | |||
190 | #include <linux/stringify.h> | ||
191 | |||
192 | static inline void gic_write_eoir(u64 irq) | ||
193 | { | ||
194 | asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq)); | ||
195 | isb(); | ||
196 | } | ||
197 | |||
198 | #endif | ||
199 | |||
200 | #endif | ||
diff --git a/include/linux/isdn/capiutil.h b/include/linux/isdn/capiutil.h index 5a52f2c94f3f..44bd6046e6e2 100644 --- a/include/linux/isdn/capiutil.h +++ b/include/linux/isdn/capiutil.h | |||
@@ -164,11 +164,6 @@ unsigned capi_cmsg_header(_cmsg * cmsg, __u16 _ApplId, | |||
164 | __u8 _Command, __u8 _Subcommand, | 164 | __u8 _Command, __u8 _Subcommand, |
165 | __u16 _Messagenumber, __u32 _Controller); | 165 | __u16 _Messagenumber, __u32 _Controller); |
166 | 166 | ||
167 | /* | ||
168 | * capi_info2str generated a readable string for Capi2.0 reasons. | ||
169 | */ | ||
170 | char *capi_info2str(__u16 reason); | ||
171 | |||
172 | /*-----------------------------------------------------------------------*/ | 167 | /*-----------------------------------------------------------------------*/ |
173 | 168 | ||
174 | /* | 169 | /* |
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 17aa1cce6f8e..30faf797c2c3 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h | |||
@@ -91,6 +91,7 @@ struct kernfs_elem_attr { | |||
91 | const struct kernfs_ops *ops; | 91 | const struct kernfs_ops *ops; |
92 | struct kernfs_open_node *open; | 92 | struct kernfs_open_node *open; |
93 | loff_t size; | 93 | loff_t size; |
94 | struct kernfs_node *notify_next; /* for kernfs_notify() */ | ||
94 | }; | 95 | }; |
95 | 96 | ||
96 | /* | 97 | /* |
@@ -304,6 +305,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, | |||
304 | struct kernfs_root *root, unsigned long magic, | 305 | struct kernfs_root *root, unsigned long magic, |
305 | bool *new_sb_created, const void *ns); | 306 | bool *new_sb_created, const void *ns); |
306 | void kernfs_kill_sb(struct super_block *sb); | 307 | void kernfs_kill_sb(struct super_block *sb); |
308 | struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns); | ||
307 | 309 | ||
308 | void kernfs_init(void); | 310 | void kernfs_init(void); |
309 | 311 | ||
diff --git a/include/linux/key.h b/include/linux/key.h index 3ae45f09589b..017b0826642f 100644 --- a/include/linux/key.h +++ b/include/linux/key.h | |||
@@ -309,6 +309,17 @@ static inline key_serial_t key_serial(const struct key *key) | |||
309 | 309 | ||
310 | extern void key_set_timeout(struct key *, unsigned); | 310 | extern void key_set_timeout(struct key *, unsigned); |
311 | 311 | ||
312 | /* | ||
313 | * The permissions required on a key that we're looking up. | ||
314 | */ | ||
315 | #define KEY_NEED_VIEW 0x01 /* Require permission to view attributes */ | ||
316 | #define KEY_NEED_READ 0x02 /* Require permission to read content */ | ||
317 | #define KEY_NEED_WRITE 0x04 /* Require permission to update / modify */ | ||
318 | #define KEY_NEED_SEARCH 0x08 /* Require permission to search (keyring) or find (key) */ | ||
319 | #define KEY_NEED_LINK 0x10 /* Require permission to link */ | ||
320 | #define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */ | ||
321 | #define KEY_NEED_ALL 0x3f /* All the above permissions */ | ||
322 | |||
312 | /** | 323 | /** |
313 | * key_is_instantiated - Determine if a key has been positively instantiated | 324 | * key_is_instantiated - Determine if a key has been positively instantiated |
314 | * @key: The key to check. | 325 | * @key: The key to check. |
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 925eaf28fca9..f7296e57d614 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -205,10 +205,10 @@ struct kretprobe_blackpoint { | |||
205 | void *addr; | 205 | void *addr; |
206 | }; | 206 | }; |
207 | 207 | ||
208 | struct kprobe_blackpoint { | 208 | struct kprobe_blacklist_entry { |
209 | const char *name; | 209 | struct list_head list; |
210 | unsigned long start_addr; | 210 | unsigned long start_addr; |
211 | unsigned long range; | 211 | unsigned long end_addr; |
212 | }; | 212 | }; |
213 | 213 | ||
214 | #ifdef CONFIG_KPROBES | 214 | #ifdef CONFIG_KPROBES |
@@ -265,6 +265,7 @@ extern void arch_disarm_kprobe(struct kprobe *p); | |||
265 | extern int arch_init_kprobes(void); | 265 | extern int arch_init_kprobes(void); |
266 | extern void show_registers(struct pt_regs *regs); | 266 | extern void show_registers(struct pt_regs *regs); |
267 | extern void kprobes_inc_nmissed_count(struct kprobe *p); | 267 | extern void kprobes_inc_nmissed_count(struct kprobe *p); |
268 | extern bool arch_within_kprobe_blacklist(unsigned long addr); | ||
268 | 269 | ||
269 | struct kprobe_insn_cache { | 270 | struct kprobe_insn_cache { |
270 | struct mutex mutex; | 271 | struct mutex mutex; |
@@ -355,7 +356,7 @@ static inline void reset_current_kprobe(void) | |||
355 | 356 | ||
356 | static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) | 357 | static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) |
357 | { | 358 | { |
358 | return (&__get_cpu_var(kprobe_ctlblk)); | 359 | return this_cpu_ptr(&kprobe_ctlblk); |
359 | } | 360 | } |
360 | 361 | ||
361 | int register_kprobe(struct kprobe *p); | 362 | int register_kprobe(struct kprobe *p); |
@@ -476,4 +477,18 @@ static inline int enable_jprobe(struct jprobe *jp) | |||
476 | return enable_kprobe(&jp->kp); | 477 | return enable_kprobe(&jp->kp); |
477 | } | 478 | } |
478 | 479 | ||
480 | #ifdef CONFIG_KPROBES | ||
481 | /* | ||
482 | * Blacklist ganerating macro. Specify functions which is not probed | ||
483 | * by using this macro. | ||
484 | */ | ||
485 | #define __NOKPROBE_SYMBOL(fname) \ | ||
486 | static unsigned long __used \ | ||
487 | __attribute__((section("_kprobe_blacklist"))) \ | ||
488 | _kbl_addr_##fname = (unsigned long)fname; | ||
489 | #define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) | ||
490 | #else | ||
491 | #define NOKPROBE_SYMBOL(fname) | ||
492 | #endif | ||
493 | |||
479 | #endif /* _LINUX_KPROBES_H */ | 494 | #endif /* _LINUX_KPROBES_H */ |
diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 7dcef3317689..13d55206ccf6 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h | |||
@@ -73,7 +73,6 @@ struct kthread_worker { | |||
73 | struct kthread_work { | 73 | struct kthread_work { |
74 | struct list_head node; | 74 | struct list_head node; |
75 | kthread_work_func_t func; | 75 | kthread_work_func_t func; |
76 | wait_queue_head_t done; | ||
77 | struct kthread_worker *worker; | 76 | struct kthread_worker *worker; |
78 | }; | 77 | }; |
79 | 78 | ||
@@ -85,7 +84,6 @@ struct kthread_work { | |||
85 | #define KTHREAD_WORK_INIT(work, fn) { \ | 84 | #define KTHREAD_WORK_INIT(work, fn) { \ |
86 | .node = LIST_HEAD_INIT((work).node), \ | 85 | .node = LIST_HEAD_INIT((work).node), \ |
87 | .func = (fn), \ | 86 | .func = (fn), \ |
88 | .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \ | ||
89 | } | 87 | } |
90 | 88 | ||
91 | #define DEFINE_KTHREAD_WORKER(worker) \ | 89 | #define DEFINE_KTHREAD_WORKER(worker) \ |
@@ -95,22 +93,16 @@ struct kthread_work { | |||
95 | struct kthread_work work = KTHREAD_WORK_INIT(work, fn) | 93 | struct kthread_work work = KTHREAD_WORK_INIT(work, fn) |
96 | 94 | ||
97 | /* | 95 | /* |
98 | * kthread_worker.lock and kthread_work.done need their own lockdep class | 96 | * kthread_worker.lock needs its own lockdep class key when defined on |
99 | * keys if they are defined on stack with lockdep enabled. Use the | 97 | * stack with lockdep enabled. Use the following macros in such cases. |
100 | * following macros when defining them on stack. | ||
101 | */ | 98 | */ |
102 | #ifdef CONFIG_LOCKDEP | 99 | #ifdef CONFIG_LOCKDEP |
103 | # define KTHREAD_WORKER_INIT_ONSTACK(worker) \ | 100 | # define KTHREAD_WORKER_INIT_ONSTACK(worker) \ |
104 | ({ init_kthread_worker(&worker); worker; }) | 101 | ({ init_kthread_worker(&worker); worker; }) |
105 | # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ | 102 | # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ |
106 | struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) | 103 | struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) |
107 | # define KTHREAD_WORK_INIT_ONSTACK(work, fn) \ | ||
108 | ({ init_kthread_work((&work), fn); work; }) | ||
109 | # define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) \ | ||
110 | struct kthread_work work = KTHREAD_WORK_INIT_ONSTACK(work, fn) | ||
111 | #else | 104 | #else |
112 | # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) | 105 | # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) |
113 | # define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) DEFINE_KTHREAD_WORK(work, fn) | ||
114 | #endif | 106 | #endif |
115 | 107 | ||
116 | extern void __init_kthread_worker(struct kthread_worker *worker, | 108 | extern void __init_kthread_worker(struct kthread_worker *worker, |
@@ -127,7 +119,6 @@ extern void __init_kthread_worker(struct kthread_worker *worker, | |||
127 | memset((work), 0, sizeof(struct kthread_work)); \ | 119 | memset((work), 0, sizeof(struct kthread_work)); \ |
128 | INIT_LIST_HEAD(&(work)->node); \ | 120 | INIT_LIST_HEAD(&(work)->node); \ |
129 | (work)->func = (fn); \ | 121 | (work)->func = (fn); \ |
130 | init_waitqueue_head(&(work)->done); \ | ||
131 | } while (0) | 122 | } while (0) |
132 | 123 | ||
133 | int kthread_worker_fn(void *worker_ptr); | 124 | int kthread_worker_fn(void *worker_ptr); |
diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 31c0cd1c941a..de9e46e6bcc9 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h | |||
@@ -304,6 +304,30 @@ static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) | |||
304 | return 0; | 304 | return 0; |
305 | } | 305 | } |
306 | 306 | ||
307 | /** | ||
308 | * ktime_after - Compare if a ktime_t value is bigger than another one. | ||
309 | * @cmp1: comparable1 | ||
310 | * @cmp2: comparable2 | ||
311 | * | ||
312 | * Return: true if cmp1 happened after cmp2. | ||
313 | */ | ||
314 | static inline bool ktime_after(const ktime_t cmp1, const ktime_t cmp2) | ||
315 | { | ||
316 | return ktime_compare(cmp1, cmp2) > 0; | ||
317 | } | ||
318 | |||
319 | /** | ||
320 | * ktime_before - Compare if a ktime_t value is smaller than another one. | ||
321 | * @cmp1: comparable1 | ||
322 | * @cmp2: comparable2 | ||
323 | * | ||
324 | * Return: true if cmp1 happened before cmp2. | ||
325 | */ | ||
326 | static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2) | ||
327 | { | ||
328 | return ktime_compare(cmp1, cmp2) < 0; | ||
329 | } | ||
330 | |||
307 | static inline s64 ktime_to_us(const ktime_t kt) | 331 | static inline s64 ktime_to_us(const ktime_t kt) |
308 | { | 332 | { |
309 | struct timeval tv = ktime_to_timeval(kt); | 333 | struct timeval tv = ktime_to_timeval(kt); |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 970c68197c69..ec4e3bd83d47 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -586,7 +586,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn); | |||
586 | 586 | ||
587 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); | 587 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
588 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); | 588 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
589 | bool kvm_vcpu_yield_to(struct kvm_vcpu *target); | 589 | int kvm_vcpu_yield_to(struct kvm_vcpu *target); |
590 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); | 590 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); |
591 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); | 591 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); |
592 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); | 592 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 5ab4e3a76721..92abb497ab14 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -593,6 +593,7 @@ struct ata_host { | |||
593 | struct device *dev; | 593 | struct device *dev; |
594 | void __iomem * const *iomap; | 594 | void __iomem * const *iomap; |
595 | unsigned int n_ports; | 595 | unsigned int n_ports; |
596 | unsigned int n_tags; /* nr of NCQ tags */ | ||
596 | void *private_data; | 597 | void *private_data; |
597 | struct ata_port_operations *ops; | 598 | struct ata_port_operations *ops; |
598 | unsigned long flags; | 599 | unsigned long flags; |
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index dcaad79f54ed..219d79627c05 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h | |||
@@ -17,13 +17,13 @@ | |||
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/kref.h> | 18 | #include <linux/kref.h> |
19 | #include <linux/utsname.h> | 19 | #include <linux/utsname.h> |
20 | #include <linux/nfsd/nfsfh.h> | ||
21 | #include <linux/lockd/bind.h> | 20 | #include <linux/lockd/bind.h> |
22 | #include <linux/lockd/xdr.h> | 21 | #include <linux/lockd/xdr.h> |
23 | #ifdef CONFIG_LOCKD_V4 | 22 | #ifdef CONFIG_LOCKD_V4 |
24 | #include <linux/lockd/xdr4.h> | 23 | #include <linux/lockd/xdr4.h> |
25 | #endif | 24 | #endif |
26 | #include <linux/lockd/debug.h> | 25 | #include <linux/lockd/debug.h> |
26 | #include <linux/sunrpc/svc.h> | ||
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Version string | 29 | * Version string |
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h index 3e02b768d537..b6401e7661c7 100644 --- a/include/linux/mfd/samsung/rtc.h +++ b/include/linux/mfd/samsung/rtc.h | |||
@@ -18,38 +18,38 @@ | |||
18 | #ifndef __LINUX_MFD_SEC_RTC_H | 18 | #ifndef __LINUX_MFD_SEC_RTC_H |
19 | #define __LINUX_MFD_SEC_RTC_H | 19 | #define __LINUX_MFD_SEC_RTC_H |
20 | 20 | ||
21 | enum sec_rtc_reg { | 21 | enum s5m_rtc_reg { |
22 | SEC_RTC_SEC, | 22 | S5M_RTC_SEC, |
23 | SEC_RTC_MIN, | 23 | S5M_RTC_MIN, |
24 | SEC_RTC_HOUR, | 24 | S5M_RTC_HOUR, |
25 | SEC_RTC_WEEKDAY, | 25 | S5M_RTC_WEEKDAY, |
26 | SEC_RTC_DATE, | 26 | S5M_RTC_DATE, |
27 | SEC_RTC_MONTH, | 27 | S5M_RTC_MONTH, |
28 | SEC_RTC_YEAR1, | 28 | S5M_RTC_YEAR1, |
29 | SEC_RTC_YEAR2, | 29 | S5M_RTC_YEAR2, |
30 | SEC_ALARM0_SEC, | 30 | S5M_ALARM0_SEC, |
31 | SEC_ALARM0_MIN, | 31 | S5M_ALARM0_MIN, |
32 | SEC_ALARM0_HOUR, | 32 | S5M_ALARM0_HOUR, |
33 | SEC_ALARM0_WEEKDAY, | 33 | S5M_ALARM0_WEEKDAY, |
34 | SEC_ALARM0_DATE, | 34 | S5M_ALARM0_DATE, |
35 | SEC_ALARM0_MONTH, | 35 | S5M_ALARM0_MONTH, |
36 | SEC_ALARM0_YEAR1, | 36 | S5M_ALARM0_YEAR1, |
37 | SEC_ALARM0_YEAR2, | 37 | S5M_ALARM0_YEAR2, |
38 | SEC_ALARM1_SEC, | 38 | S5M_ALARM1_SEC, |
39 | SEC_ALARM1_MIN, | 39 | S5M_ALARM1_MIN, |
40 | SEC_ALARM1_HOUR, | 40 | S5M_ALARM1_HOUR, |
41 | SEC_ALARM1_WEEKDAY, | 41 | S5M_ALARM1_WEEKDAY, |
42 | SEC_ALARM1_DATE, | 42 | S5M_ALARM1_DATE, |
43 | SEC_ALARM1_MONTH, | 43 | S5M_ALARM1_MONTH, |
44 | SEC_ALARM1_YEAR1, | 44 | S5M_ALARM1_YEAR1, |
45 | SEC_ALARM1_YEAR2, | 45 | S5M_ALARM1_YEAR2, |
46 | SEC_ALARM0_CONF, | 46 | S5M_ALARM0_CONF, |
47 | SEC_ALARM1_CONF, | 47 | S5M_ALARM1_CONF, |
48 | SEC_RTC_STATUS, | 48 | S5M_RTC_STATUS, |
49 | SEC_WTSR_SMPL_CNTL, | 49 | S5M_WTSR_SMPL_CNTL, |
50 | SEC_RTC_UDR_CON, | 50 | S5M_RTC_UDR_CON, |
51 | 51 | ||
52 | SEC_RTC_REG_MAX, | 52 | S5M_RTC_REG_MAX, |
53 | }; | 53 | }; |
54 | 54 | ||
55 | enum s2mps_rtc_reg { | 55 | enum s2mps_rtc_reg { |
@@ -88,9 +88,9 @@ enum s2mps_rtc_reg { | |||
88 | #define HOUR_12 (1 << 7) | 88 | #define HOUR_12 (1 << 7) |
89 | #define HOUR_AMPM (1 << 6) | 89 | #define HOUR_AMPM (1 << 6) |
90 | #define HOUR_PM (1 << 5) | 90 | #define HOUR_PM (1 << 5) |
91 | #define ALARM0_STATUS (1 << 1) | 91 | #define S5M_ALARM0_STATUS (1 << 1) |
92 | #define ALARM1_STATUS (1 << 2) | 92 | #define S5M_ALARM1_STATUS (1 << 2) |
93 | #define UPDATE_AD (1 << 0) | 93 | #define S5M_UPDATE_AD (1 << 0) |
94 | 94 | ||
95 | #define S2MPS_ALARM0_STATUS (1 << 2) | 95 | #define S2MPS_ALARM0_STATUS (1 << 2) |
96 | #define S2MPS_ALARM1_STATUS (1 << 1) | 96 | #define S2MPS_ALARM1_STATUS (1 << 1) |
@@ -101,16 +101,26 @@ enum s2mps_rtc_reg { | |||
101 | #define MODEL24_SHIFT 1 | 101 | #define MODEL24_SHIFT 1 |
102 | #define MODEL24_MASK (1 << MODEL24_SHIFT) | 102 | #define MODEL24_MASK (1 << MODEL24_SHIFT) |
103 | /* RTC Update Register1 */ | 103 | /* RTC Update Register1 */ |
104 | #define RTC_UDR_SHIFT 0 | 104 | #define S5M_RTC_UDR_SHIFT 0 |
105 | #define RTC_UDR_MASK (1 << RTC_UDR_SHIFT) | 105 | #define S5M_RTC_UDR_MASK (1 << S5M_RTC_UDR_SHIFT) |
106 | #define S2MPS_RTC_WUDR_SHIFT 4 | 106 | #define S2MPS_RTC_WUDR_SHIFT 4 |
107 | #define S2MPS_RTC_WUDR_MASK (1 << S2MPS_RTC_WUDR_SHIFT) | 107 | #define S2MPS_RTC_WUDR_MASK (1 << S2MPS_RTC_WUDR_SHIFT) |
108 | #define S2MPS_RTC_RUDR_SHIFT 0 | 108 | #define S2MPS_RTC_RUDR_SHIFT 0 |
109 | #define S2MPS_RTC_RUDR_MASK (1 << S2MPS_RTC_RUDR_SHIFT) | 109 | #define S2MPS_RTC_RUDR_MASK (1 << S2MPS_RTC_RUDR_SHIFT) |
110 | #define RTC_TCON_SHIFT 1 | 110 | #define RTC_TCON_SHIFT 1 |
111 | #define RTC_TCON_MASK (1 << RTC_TCON_SHIFT) | 111 | #define RTC_TCON_MASK (1 << RTC_TCON_SHIFT) |
112 | #define RTC_TIME_EN_SHIFT 3 | 112 | #define S5M_RTC_TIME_EN_SHIFT 3 |
113 | #define RTC_TIME_EN_MASK (1 << RTC_TIME_EN_SHIFT) | 113 | #define S5M_RTC_TIME_EN_MASK (1 << S5M_RTC_TIME_EN_SHIFT) |
114 | /* | ||
115 | * UDR_T field in S5M_RTC_UDR_CON register determines the time needed | ||
116 | * for updating alarm and time registers. Default is 7.32 ms. | ||
117 | */ | ||
118 | #define S5M_RTC_UDR_T_SHIFT 6 | ||
119 | #define S5M_RTC_UDR_T_MASK (0x3 << S5M_RTC_UDR_T_SHIFT) | ||
120 | #define S5M_RTC_UDR_T_7320_US (0x0 << S5M_RTC_UDR_T_SHIFT) | ||
121 | #define S5M_RTC_UDR_T_1830_US (0x1 << S5M_RTC_UDR_T_SHIFT) | ||
122 | #define S5M_RTC_UDR_T_3660_US (0x2 << S5M_RTC_UDR_T_SHIFT) | ||
123 | #define S5M_RTC_UDR_T_450_US (0x3 << S5M_RTC_UDR_T_SHIFT) | ||
114 | 124 | ||
115 | /* RTC Hour register */ | 125 | /* RTC Hour register */ |
116 | #define HOUR_PM_SHIFT 6 | 126 | #define HOUR_PM_SHIFT 6 |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index ba87bd21295a..35b51e7af886 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -401,6 +401,7 @@ struct mlx4_caps { | |||
401 | int max_rq_desc_sz; | 401 | int max_rq_desc_sz; |
402 | int max_qp_init_rdma; | 402 | int max_qp_init_rdma; |
403 | int max_qp_dest_rdma; | 403 | int max_qp_dest_rdma; |
404 | u32 *qp0_qkey; | ||
404 | u32 *qp0_proxy; | 405 | u32 *qp0_proxy; |
405 | u32 *qp1_proxy; | 406 | u32 *qp1_proxy; |
406 | u32 *qp0_tunnel; | 407 | u32 *qp0_tunnel; |
@@ -449,7 +450,6 @@ struct mlx4_caps { | |||
449 | int reserved_qps_base[MLX4_NUM_QP_REGION]; | 450 | int reserved_qps_base[MLX4_NUM_QP_REGION]; |
450 | int log_num_macs; | 451 | int log_num_macs; |
451 | int log_num_vlans; | 452 | int log_num_vlans; |
452 | int log_num_prios; | ||
453 | enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; | 453 | enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; |
454 | u8 supported_type[MLX4_MAX_PORTS + 1]; | 454 | u8 supported_type[MLX4_MAX_PORTS + 1]; |
455 | u8 suggested_type[MLX4_MAX_PORTS + 1]; | 455 | u8 suggested_type[MLX4_MAX_PORTS + 1]; |
@@ -577,6 +577,7 @@ struct mlx4_cq { | |||
577 | 577 | ||
578 | u32 cons_index; | 578 | u32 cons_index; |
579 | 579 | ||
580 | u16 irq; | ||
580 | __be32 *set_ci_db; | 581 | __be32 *set_ci_db; |
581 | __be32 *arm_db; | 582 | __be32 *arm_db; |
582 | int arm_sn; | 583 | int arm_sn; |
@@ -837,7 +838,7 @@ static inline int mlx4_is_slave(struct mlx4_dev *dev) | |||
837 | } | 838 | } |
838 | 839 | ||
839 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | 840 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, |
840 | struct mlx4_buf *buf); | 841 | struct mlx4_buf *buf, gfp_t gfp); |
841 | void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); | 842 | void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); |
842 | static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) | 843 | static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) |
843 | { | 844 | { |
@@ -874,9 +875,10 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw); | |||
874 | int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | 875 | int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
875 | int start_index, int npages, u64 *page_list); | 876 | int start_index, int npages, u64 *page_list); |
876 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | 877 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
877 | struct mlx4_buf *buf); | 878 | struct mlx4_buf *buf, gfp_t gfp); |
878 | 879 | ||
879 | int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); | 880 | int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, |
881 | gfp_t gfp); | ||
880 | void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); | 882 | void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); |
881 | 883 | ||
882 | int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, | 884 | int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, |
@@ -892,7 +894,8 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); | |||
892 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); | 894 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); |
893 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); | 895 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); |
894 | 896 | ||
895 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp); | 897 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, |
898 | gfp_t gfp); | ||
896 | void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); | 899 | void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); |
897 | 900 | ||
898 | int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn, | 901 | int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn, |
@@ -1162,6 +1165,8 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, | |||
1162 | int *vector); | 1165 | int *vector); |
1163 | void mlx4_release_eq(struct mlx4_dev *dev, int vec); | 1166 | void mlx4_release_eq(struct mlx4_dev *dev, int vec); |
1164 | 1167 | ||
1168 | int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec); | ||
1169 | |||
1165 | int mlx4_get_phys_port_id(struct mlx4_dev *dev); | 1170 | int mlx4_get_phys_port_id(struct mlx4_dev *dev); |
1166 | int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); | 1171 | int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); |
1167 | int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); | 1172 | int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); |
@@ -1234,4 +1239,8 @@ int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port); | |||
1234 | int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port); | 1239 | int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port); |
1235 | 1240 | ||
1236 | int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port); | 1241 | int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port); |
1242 | int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port); | ||
1243 | int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); | ||
1244 | int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, | ||
1245 | int enable); | ||
1237 | #endif /* MLX4_DEVICE_H */ | 1246 | #endif /* MLX4_DEVICE_H */ |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 93cef6313e72..2bce4aad2570 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -427,7 +427,6 @@ struct mlx5_core_mr { | |||
427 | u64 size; | 427 | u64 size; |
428 | u32 key; | 428 | u32 key; |
429 | u32 pd; | 429 | u32 pd; |
430 | u32 access; | ||
431 | }; | 430 | }; |
432 | 431 | ||
433 | struct mlx5_core_srq { | 432 | struct mlx5_core_srq { |
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index b73027298b3a..d424b9de3aff 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
@@ -63,12 +63,12 @@ struct mmc_ext_csd { | |||
63 | unsigned int power_off_longtime; /* Units: ms */ | 63 | unsigned int power_off_longtime; /* Units: ms */ |
64 | u8 power_off_notification; /* state */ | 64 | u8 power_off_notification; /* state */ |
65 | unsigned int hs_max_dtr; | 65 | unsigned int hs_max_dtr; |
66 | unsigned int hs200_max_dtr; | ||
66 | #define MMC_HIGH_26_MAX_DTR 26000000 | 67 | #define MMC_HIGH_26_MAX_DTR 26000000 |
67 | #define MMC_HIGH_52_MAX_DTR 52000000 | 68 | #define MMC_HIGH_52_MAX_DTR 52000000 |
68 | #define MMC_HIGH_DDR_MAX_DTR 52000000 | 69 | #define MMC_HIGH_DDR_MAX_DTR 52000000 |
69 | #define MMC_HS200_MAX_DTR 200000000 | 70 | #define MMC_HS200_MAX_DTR 200000000 |
70 | unsigned int sectors; | 71 | unsigned int sectors; |
71 | unsigned int card_type; | ||
72 | unsigned int hc_erase_size; /* In sectors */ | 72 | unsigned int hc_erase_size; /* In sectors */ |
73 | unsigned int hc_erase_timeout; /* In milliseconds */ | 73 | unsigned int hc_erase_timeout; /* In milliseconds */ |
74 | unsigned int sec_trim_mult; /* Secure trim multiplier */ | 74 | unsigned int sec_trim_mult; /* Secure trim multiplier */ |
@@ -110,6 +110,7 @@ struct mmc_ext_csd { | |||
110 | u8 raw_pwr_cl_200_360; /* 237 */ | 110 | u8 raw_pwr_cl_200_360; /* 237 */ |
111 | u8 raw_pwr_cl_ddr_52_195; /* 238 */ | 111 | u8 raw_pwr_cl_ddr_52_195; /* 238 */ |
112 | u8 raw_pwr_cl_ddr_52_360; /* 239 */ | 112 | u8 raw_pwr_cl_ddr_52_360; /* 239 */ |
113 | u8 raw_pwr_cl_ddr_200_360; /* 253 */ | ||
113 | u8 raw_bkops_status; /* 246 */ | 114 | u8 raw_bkops_status; /* 246 */ |
114 | u8 raw_sectors[4]; /* 212 - 4 bytes */ | 115 | u8 raw_sectors[4]; /* 212 - 4 bytes */ |
115 | 116 | ||
@@ -194,6 +195,7 @@ struct sdio_cis { | |||
194 | }; | 195 | }; |
195 | 196 | ||
196 | struct mmc_host; | 197 | struct mmc_host; |
198 | struct mmc_ios; | ||
197 | struct sdio_func; | 199 | struct sdio_func; |
198 | struct sdio_func_tuple; | 200 | struct sdio_func_tuple; |
199 | 201 | ||
@@ -250,15 +252,11 @@ struct mmc_card { | |||
250 | unsigned int state; /* (our) card state */ | 252 | unsigned int state; /* (our) card state */ |
251 | #define MMC_STATE_PRESENT (1<<0) /* present in sysfs */ | 253 | #define MMC_STATE_PRESENT (1<<0) /* present in sysfs */ |
252 | #define MMC_STATE_READONLY (1<<1) /* card is read-only */ | 254 | #define MMC_STATE_READONLY (1<<1) /* card is read-only */ |
253 | #define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */ | 255 | #define MMC_STATE_BLOCKADDR (1<<2) /* card uses block-addressing */ |
254 | #define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */ | 256 | #define MMC_CARD_SDXC (1<<3) /* card is SDXC */ |
255 | #define MMC_STATE_HIGHSPEED_DDR (1<<4) /* card is in high speed mode */ | 257 | #define MMC_CARD_REMOVED (1<<4) /* card has been removed */ |
256 | #define MMC_STATE_ULTRAHIGHSPEED (1<<5) /* card is in ultra high speed mode */ | 258 | #define MMC_STATE_DOING_BKOPS (1<<5) /* card is doing BKOPS */ |
257 | #define MMC_CARD_SDXC (1<<6) /* card is SDXC */ | 259 | #define MMC_STATE_SUSPENDED (1<<6) /* card is suspended */ |
258 | #define MMC_CARD_REMOVED (1<<7) /* card has been removed */ | ||
259 | #define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */ | ||
260 | #define MMC_STATE_DOING_BKOPS (1<<10) /* card is doing BKOPS */ | ||
261 | #define MMC_STATE_SUSPENDED (1<<11) /* card is suspended */ | ||
262 | unsigned int quirks; /* card quirks */ | 260 | unsigned int quirks; /* card quirks */ |
263 | #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ | 261 | #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ |
264 | #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ | 262 | #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ |
@@ -301,6 +299,7 @@ struct mmc_card { | |||
301 | struct sdio_func_tuple *tuples; /* unknown common tuples */ | 299 | struct sdio_func_tuple *tuples; /* unknown common tuples */ |
302 | 300 | ||
303 | unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */ | 301 | unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */ |
302 | unsigned int mmc_avail_type; /* supported device type by both host and card */ | ||
304 | 303 | ||
305 | struct dentry *debugfs_root; | 304 | struct dentry *debugfs_root; |
306 | struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */ | 305 | struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */ |
@@ -353,7 +352,7 @@ struct mmc_fixup { | |||
353 | #define CID_OEMID_ANY ((unsigned short) -1) | 352 | #define CID_OEMID_ANY ((unsigned short) -1) |
354 | #define CID_NAME_ANY (NULL) | 353 | #define CID_NAME_ANY (NULL) |
355 | 354 | ||
356 | #define END_FIXUP { 0 } | 355 | #define END_FIXUP { NULL } |
357 | 356 | ||
358 | #define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \ | 357 | #define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \ |
359 | _cis_vendor, _cis_device, \ | 358 | _cis_vendor, _cis_device, \ |
@@ -418,11 +417,7 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data) | |||
418 | 417 | ||
419 | #define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT) | 418 | #define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT) |
420 | #define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY) | 419 | #define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY) |
421 | #define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED) | ||
422 | #define mmc_card_hs200(c) ((c)->state & MMC_STATE_HIGHSPEED_200) | ||
423 | #define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR) | 420 | #define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR) |
424 | #define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR) | ||
425 | #define mmc_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) | ||
426 | #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) | 421 | #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) |
427 | #define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) | 422 | #define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) |
428 | #define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS) | 423 | #define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS) |
@@ -430,11 +425,7 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data) | |||
430 | 425 | ||
431 | #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) | 426 | #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) |
432 | #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) | 427 | #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) |
433 | #define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED) | ||
434 | #define mmc_card_set_hs200(c) ((c)->state |= MMC_STATE_HIGHSPEED_200) | ||
435 | #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) | 428 | #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) |
436 | #define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR) | ||
437 | #define mmc_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) | ||
438 | #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) | 429 | #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) |
439 | #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) | 430 | #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) |
440 | #define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS) | 431 | #define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS) |
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 6ce7d2cd3c7a..babaea93bca6 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h | |||
@@ -248,20 +248,6 @@ struct dw_mci_board { | |||
248 | /* delay in mS before detecting cards after interrupt */ | 248 | /* delay in mS before detecting cards after interrupt */ |
249 | u32 detect_delay_ms; | 249 | u32 detect_delay_ms; |
250 | 250 | ||
251 | int (*init)(u32 slot_id, irq_handler_t , void *); | ||
252 | int (*get_ro)(u32 slot_id); | ||
253 | int (*get_cd)(u32 slot_id); | ||
254 | int (*get_ocr)(u32 slot_id); | ||
255 | int (*get_bus_wd)(u32 slot_id); | ||
256 | /* | ||
257 | * Enable power to selected slot and set voltage to desired level. | ||
258 | * Voltage levels are specified using MMC_VDD_xxx defines defined | ||
259 | * in linux/mmc/host.h file. | ||
260 | */ | ||
261 | void (*setpower)(u32 slot_id, u32 volt); | ||
262 | void (*exit)(u32 slot_id); | ||
263 | void (*select_slot)(u32 slot_id); | ||
264 | |||
265 | struct dw_mci_dma_ops *dma_ops; | 251 | struct dw_mci_dma_ops *dma_ops; |
266 | struct dma_pdata *data; | 252 | struct dma_pdata *data; |
267 | struct block_settings *blk_settings; | 253 | struct block_settings *blk_settings; |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index cb61ea4d6945..7960424d0bc0 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/fault-inject.h> | 17 | #include <linux/fault-inject.h> |
18 | 18 | ||
19 | #include <linux/mmc/core.h> | 19 | #include <linux/mmc/core.h> |
20 | #include <linux/mmc/card.h> | ||
20 | #include <linux/mmc/pm.h> | 21 | #include <linux/mmc/pm.h> |
21 | 22 | ||
22 | struct mmc_ios { | 23 | struct mmc_ios { |
@@ -58,13 +59,9 @@ struct mmc_ios { | |||
58 | #define MMC_TIMING_UHS_SDR50 5 | 59 | #define MMC_TIMING_UHS_SDR50 5 |
59 | #define MMC_TIMING_UHS_SDR104 6 | 60 | #define MMC_TIMING_UHS_SDR104 6 |
60 | #define MMC_TIMING_UHS_DDR50 7 | 61 | #define MMC_TIMING_UHS_DDR50 7 |
61 | #define MMC_TIMING_MMC_HS200 8 | 62 | #define MMC_TIMING_MMC_DDR52 8 |
62 | 63 | #define MMC_TIMING_MMC_HS200 9 | |
63 | #define MMC_SDR_MODE 0 | 64 | #define MMC_TIMING_MMC_HS400 10 |
64 | #define MMC_1_2V_DDR_MODE 1 | ||
65 | #define MMC_1_8V_DDR_MODE 2 | ||
66 | #define MMC_1_2V_SDR_MODE 3 | ||
67 | #define MMC_1_8V_SDR_MODE 4 | ||
68 | 65 | ||
69 | unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */ | 66 | unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */ |
70 | 67 | ||
@@ -136,6 +133,9 @@ struct mmc_host_ops { | |||
136 | 133 | ||
137 | /* The tuning command opcode value is different for SD and eMMC cards */ | 134 | /* The tuning command opcode value is different for SD and eMMC cards */ |
138 | int (*execute_tuning)(struct mmc_host *host, u32 opcode); | 135 | int (*execute_tuning)(struct mmc_host *host, u32 opcode); |
136 | |||
137 | /* Prepare HS400 target operating frequency depending host driver */ | ||
138 | int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios); | ||
139 | int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv); | 139 | int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv); |
140 | void (*hw_reset)(struct mmc_host *host); | 140 | void (*hw_reset)(struct mmc_host *host); |
141 | void (*card_event)(struct mmc_host *host); | 141 | void (*card_event)(struct mmc_host *host); |
@@ -278,6 +278,11 @@ struct mmc_host { | |||
278 | #define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \ | 278 | #define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \ |
279 | MMC_CAP2_PACKED_WR) | 279 | MMC_CAP2_PACKED_WR) |
280 | #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */ | 280 | #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */ |
281 | #define MMC_CAP2_HS400_1_8V (1 << 15) /* Can support HS400 1.8V */ | ||
282 | #define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */ | ||
283 | #define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \ | ||
284 | MMC_CAP2_HS400_1_2V) | ||
285 | #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) | ||
281 | 286 | ||
282 | mmc_pm_flag_t pm_caps; /* supported pm features */ | 287 | mmc_pm_flag_t pm_caps; /* supported pm features */ |
283 | 288 | ||
@@ -318,6 +323,8 @@ struct mmc_host { | |||
318 | int rescan_disable; /* disable card detection */ | 323 | int rescan_disable; /* disable card detection */ |
319 | int rescan_entered; /* used with nonremovable devices */ | 324 | int rescan_entered; /* used with nonremovable devices */ |
320 | 325 | ||
326 | bool trigger_card_event; /* card_event necessary */ | ||
327 | |||
321 | struct mmc_card *card; /* device attached to this host */ | 328 | struct mmc_card *card; /* device attached to this host */ |
322 | 329 | ||
323 | wait_queue_head_t wq; | 330 | wait_queue_head_t wq; |
@@ -391,12 +398,13 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host) | |||
391 | wake_up_process(host->sdio_irq_thread); | 398 | wake_up_process(host->sdio_irq_thread); |
392 | } | 399 | } |
393 | 400 | ||
401 | void sdio_run_irqs(struct mmc_host *host); | ||
402 | |||
394 | #ifdef CONFIG_REGULATOR | 403 | #ifdef CONFIG_REGULATOR |
395 | int mmc_regulator_get_ocrmask(struct regulator *supply); | 404 | int mmc_regulator_get_ocrmask(struct regulator *supply); |
396 | int mmc_regulator_set_ocr(struct mmc_host *mmc, | 405 | int mmc_regulator_set_ocr(struct mmc_host *mmc, |
397 | struct regulator *supply, | 406 | struct regulator *supply, |
398 | unsigned short vdd_bit); | 407 | unsigned short vdd_bit); |
399 | int mmc_regulator_get_supply(struct mmc_host *mmc); | ||
400 | #else | 408 | #else |
401 | static inline int mmc_regulator_get_ocrmask(struct regulator *supply) | 409 | static inline int mmc_regulator_get_ocrmask(struct regulator *supply) |
402 | { | 410 | { |
@@ -409,13 +417,10 @@ static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, | |||
409 | { | 417 | { |
410 | return 0; | 418 | return 0; |
411 | } | 419 | } |
412 | |||
413 | static inline int mmc_regulator_get_supply(struct mmc_host *mmc) | ||
414 | { | ||
415 | return 0; | ||
416 | } | ||
417 | #endif | 420 | #endif |
418 | 421 | ||
422 | int mmc_regulator_get_supply(struct mmc_host *mmc); | ||
423 | |||
419 | int mmc_pm_notify(struct notifier_block *notify_block, unsigned long, void *); | 424 | int mmc_pm_notify(struct notifier_block *notify_block, unsigned long, void *); |
420 | 425 | ||
421 | static inline int mmc_card_is_removable(struct mmc_host *host) | 426 | static inline int mmc_card_is_removable(struct mmc_host *host) |
@@ -475,4 +480,32 @@ static inline unsigned int mmc_host_clk_rate(struct mmc_host *host) | |||
475 | return host->ios.clock; | 480 | return host->ios.clock; |
476 | } | 481 | } |
477 | #endif | 482 | #endif |
483 | |||
484 | static inline int mmc_card_hs(struct mmc_card *card) | ||
485 | { | ||
486 | return card->host->ios.timing == MMC_TIMING_SD_HS || | ||
487 | card->host->ios.timing == MMC_TIMING_MMC_HS; | ||
488 | } | ||
489 | |||
490 | static inline int mmc_card_uhs(struct mmc_card *card) | ||
491 | { | ||
492 | return card->host->ios.timing >= MMC_TIMING_UHS_SDR12 && | ||
493 | card->host->ios.timing <= MMC_TIMING_UHS_DDR50; | ||
494 | } | ||
495 | |||
496 | static inline bool mmc_card_hs200(struct mmc_card *card) | ||
497 | { | ||
498 | return card->host->ios.timing == MMC_TIMING_MMC_HS200; | ||
499 | } | ||
500 | |||
501 | static inline bool mmc_card_ddr52(struct mmc_card *card) | ||
502 | { | ||
503 | return card->host->ios.timing == MMC_TIMING_MMC_DDR52; | ||
504 | } | ||
505 | |||
506 | static inline bool mmc_card_hs400(struct mmc_card *card) | ||
507 | { | ||
508 | return card->host->ios.timing == MMC_TIMING_MMC_HS400; | ||
509 | } | ||
510 | |||
478 | #endif /* LINUX_MMC_HOST_H */ | 511 | #endif /* LINUX_MMC_HOST_H */ |
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 50bcde3677ca..64ec963ed347 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h | |||
@@ -325,6 +325,7 @@ struct _mmc_csd { | |||
325 | #define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */ | 325 | #define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */ |
326 | #define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ | 326 | #define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ |
327 | #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ | 327 | #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ |
328 | #define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ | ||
328 | #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ | 329 | #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ |
329 | #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ | 330 | #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ |
330 | #define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */ | 331 | #define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */ |
@@ -354,18 +355,25 @@ struct _mmc_csd { | |||
354 | #define EXT_CSD_CMD_SET_SECURE (1<<1) | 355 | #define EXT_CSD_CMD_SET_SECURE (1<<1) |
355 | #define EXT_CSD_CMD_SET_CPSECURE (1<<2) | 356 | #define EXT_CSD_CMD_SET_CPSECURE (1<<2) |
356 | 357 | ||
357 | #define EXT_CSD_CARD_TYPE_26 (1<<0) /* Card can run at 26MHz */ | 358 | #define EXT_CSD_CARD_TYPE_HS_26 (1<<0) /* Card can run at 26MHz */ |
358 | #define EXT_CSD_CARD_TYPE_52 (1<<1) /* Card can run at 52MHz */ | 359 | #define EXT_CSD_CARD_TYPE_HS_52 (1<<1) /* Card can run at 52MHz */ |
359 | #define EXT_CSD_CARD_TYPE_MASK 0x3F /* Mask out reserved bits */ | 360 | #define EXT_CSD_CARD_TYPE_HS (EXT_CSD_CARD_TYPE_HS_26 | \ |
361 | EXT_CSD_CARD_TYPE_HS_52) | ||
360 | #define EXT_CSD_CARD_TYPE_DDR_1_8V (1<<2) /* Card can run at 52MHz */ | 362 | #define EXT_CSD_CARD_TYPE_DDR_1_8V (1<<2) /* Card can run at 52MHz */ |
361 | /* DDR mode @1.8V or 3V I/O */ | 363 | /* DDR mode @1.8V or 3V I/O */ |
362 | #define EXT_CSD_CARD_TYPE_DDR_1_2V (1<<3) /* Card can run at 52MHz */ | 364 | #define EXT_CSD_CARD_TYPE_DDR_1_2V (1<<3) /* Card can run at 52MHz */ |
363 | /* DDR mode @1.2V I/O */ | 365 | /* DDR mode @1.2V I/O */ |
364 | #define EXT_CSD_CARD_TYPE_DDR_52 (EXT_CSD_CARD_TYPE_DDR_1_8V \ | 366 | #define EXT_CSD_CARD_TYPE_DDR_52 (EXT_CSD_CARD_TYPE_DDR_1_8V \ |
365 | | EXT_CSD_CARD_TYPE_DDR_1_2V) | 367 | | EXT_CSD_CARD_TYPE_DDR_1_2V) |
366 | #define EXT_CSD_CARD_TYPE_SDR_1_8V (1<<4) /* Card can run at 200MHz */ | 368 | #define EXT_CSD_CARD_TYPE_HS200_1_8V (1<<4) /* Card can run at 200MHz */ |
367 | #define EXT_CSD_CARD_TYPE_SDR_1_2V (1<<5) /* Card can run at 200MHz */ | 369 | #define EXT_CSD_CARD_TYPE_HS200_1_2V (1<<5) /* Card can run at 200MHz */ |
368 | /* SDR mode @1.2V I/O */ | 370 | /* SDR mode @1.2V I/O */ |
371 | #define EXT_CSD_CARD_TYPE_HS200 (EXT_CSD_CARD_TYPE_HS200_1_8V | \ | ||
372 | EXT_CSD_CARD_TYPE_HS200_1_2V) | ||
373 | #define EXT_CSD_CARD_TYPE_HS400_1_8V (1<<6) /* Card can run at 200MHz DDR, 1.8V */ | ||
374 | #define EXT_CSD_CARD_TYPE_HS400_1_2V (1<<7) /* Card can run at 200MHz DDR, 1.2V */ | ||
375 | #define EXT_CSD_CARD_TYPE_HS400 (EXT_CSD_CARD_TYPE_HS400_1_8V | \ | ||
376 | EXT_CSD_CARD_TYPE_HS400_1_2V) | ||
369 | 377 | ||
370 | #define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */ | 378 | #define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */ |
371 | #define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */ | 379 | #define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */ |
@@ -373,6 +381,11 @@ struct _mmc_csd { | |||
373 | #define EXT_CSD_DDR_BUS_WIDTH_4 5 /* Card is in 4 bit DDR mode */ | 381 | #define EXT_CSD_DDR_BUS_WIDTH_4 5 /* Card is in 4 bit DDR mode */ |
374 | #define EXT_CSD_DDR_BUS_WIDTH_8 6 /* Card is in 8 bit DDR mode */ | 382 | #define EXT_CSD_DDR_BUS_WIDTH_8 6 /* Card is in 8 bit DDR mode */ |
375 | 383 | ||
384 | #define EXT_CSD_TIMING_BC 0 /* Backwards compatility */ | ||
385 | #define EXT_CSD_TIMING_HS 1 /* High speed */ | ||
386 | #define EXT_CSD_TIMING_HS200 2 /* HS200 */ | ||
387 | #define EXT_CSD_TIMING_HS400 3 /* HS400 */ | ||
388 | |||
376 | #define EXT_CSD_SEC_ER_EN BIT(0) | 389 | #define EXT_CSD_SEC_ER_EN BIT(0) |
377 | #define EXT_CSD_SEC_BD_BLK_EN BIT(2) | 390 | #define EXT_CSD_SEC_BD_BLK_EN BIT(2) |
378 | #define EXT_CSD_SEC_GB_CL_EN BIT(4) | 391 | #define EXT_CSD_SEC_GB_CL_EN BIT(4) |
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h index 7be12b883485..08abe9941884 100644 --- a/include/linux/mmc/sdhci.h +++ b/include/linux/mmc/sdhci.h | |||
@@ -57,12 +57,8 @@ struct sdhci_host { | |||
57 | #define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15) | 57 | #define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15) |
58 | /* Controller reports inverted write-protect state */ | 58 | /* Controller reports inverted write-protect state */ |
59 | #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16) | 59 | #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16) |
60 | /* Controller has nonstandard clock management */ | ||
61 | #define SDHCI_QUIRK_NONSTANDARD_CLOCK (1<<17) | ||
62 | /* Controller does not like fast PIO transfers */ | 60 | /* Controller does not like fast PIO transfers */ |
63 | #define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18) | 61 | #define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18) |
64 | /* Controller losing signal/interrupt enable states after reset */ | ||
65 | #define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19) | ||
66 | /* Controller has to be forced to use block size of 2048 bytes */ | 62 | /* Controller has to be forced to use block size of 2048 bytes */ |
67 | #define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20) | 63 | #define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20) |
68 | /* Controller cannot do multi-block transfers */ | 64 | /* Controller cannot do multi-block transfers */ |
@@ -147,6 +143,7 @@ struct sdhci_host { | |||
147 | 143 | ||
148 | bool runtime_suspended; /* Host is runtime suspended */ | 144 | bool runtime_suspended; /* Host is runtime suspended */ |
149 | bool bus_on; /* Bus power prevents runtime suspend */ | 145 | bool bus_on; /* Bus power prevents runtime suspend */ |
146 | bool preset_enabled; /* Preset is enabled */ | ||
150 | 147 | ||
151 | struct mmc_request *mrq; /* Current request */ | 148 | struct mmc_request *mrq; /* Current request */ |
152 | struct mmc_command *cmd; /* Current command */ | 149 | struct mmc_command *cmd; /* Current command */ |
@@ -164,8 +161,7 @@ struct sdhci_host { | |||
164 | dma_addr_t adma_addr; /* Mapped ADMA descr. table */ | 161 | dma_addr_t adma_addr; /* Mapped ADMA descr. table */ |
165 | dma_addr_t align_addr; /* Mapped bounce buffer */ | 162 | dma_addr_t align_addr; /* Mapped bounce buffer */ |
166 | 163 | ||
167 | struct tasklet_struct card_tasklet; /* Tasklet structures */ | 164 | struct tasklet_struct finish_tasklet; /* Tasklet structures */ |
168 | struct tasklet_struct finish_tasklet; | ||
169 | 165 | ||
170 | struct timer_list timer; /* Timer for timeouts */ | 166 | struct timer_list timer; /* Timer for timeouts */ |
171 | 167 | ||
@@ -177,6 +173,13 @@ struct sdhci_host { | |||
177 | unsigned int ocr_avail_mmc; | 173 | unsigned int ocr_avail_mmc; |
178 | u32 ocr_mask; /* available voltages */ | 174 | u32 ocr_mask; /* available voltages */ |
179 | 175 | ||
176 | unsigned timing; /* Current timing */ | ||
177 | |||
178 | u32 thread_isr; | ||
179 | |||
180 | /* cached registers */ | ||
181 | u32 ier; | ||
182 | |||
180 | wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */ | 183 | wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */ |
181 | unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */ | 184 | unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */ |
182 | 185 | ||
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 204a67743804..b1990c5524e1 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h | |||
@@ -321,7 +321,7 @@ extern bool parameq(const char *name1, const char *name2); | |||
321 | extern bool parameqn(const char *name1, const char *name2, size_t n); | 321 | extern bool parameqn(const char *name1, const char *name2, size_t n); |
322 | 322 | ||
323 | /* Called on module insert or kernel boot */ | 323 | /* Called on module insert or kernel boot */ |
324 | extern int parse_args(const char *name, | 324 | extern char *parse_args(const char *name, |
325 | char *args, | 325 | char *args, |
326 | const struct kernel_param *params, | 326 | const struct kernel_param *params, |
327 | unsigned num, | 327 | unsigned num, |
diff --git a/include/linux/msi.h b/include/linux/msi.h index 92a2f991262a..8103f32f6d87 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
@@ -25,7 +25,8 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg); | |||
25 | struct msi_desc { | 25 | struct msi_desc { |
26 | struct { | 26 | struct { |
27 | __u8 is_msix : 1; | 27 | __u8 is_msix : 1; |
28 | __u8 multiple: 3; /* log2 number of messages */ | 28 | __u8 multiple: 3; /* log2 num of messages allocated */ |
29 | __u8 multi_cap : 3; /* log2 num of messages supported */ | ||
29 | __u8 maskbit : 1; /* mask-pending bit supported ? */ | 30 | __u8 maskbit : 1; /* mask-pending bit supported ? */ |
30 | __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ | 31 | __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ |
31 | __u8 pos; /* Location of the msi capability */ | 32 | __u8 pos; /* Location of the msi capability */ |
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 450d61ec7f06..2f0af2891f0f 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
@@ -176,6 +176,11 @@ typedef enum { | |||
176 | /* Chip may not exist, so silence any errors in scan */ | 176 | /* Chip may not exist, so silence any errors in scan */ |
177 | #define NAND_SCAN_SILENT_NODEV 0x00040000 | 177 | #define NAND_SCAN_SILENT_NODEV 0x00040000 |
178 | /* | 178 | /* |
179 | * This option could be defined by controller drivers to protect against | ||
180 | * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers | ||
181 | */ | ||
182 | #define NAND_USE_BOUNCE_BUFFER 0x00080000 | ||
183 | /* | ||
179 | * Autodetect nand buswidth with readid/onfi. | 184 | * Autodetect nand buswidth with readid/onfi. |
180 | * This suppose the driver will configure the hardware in 8 bits mode | 185 | * This suppose the driver will configure the hardware in 8 bits mode |
181 | * when calling nand_scan_ident, and update its configuration | 186 | * when calling nand_scan_ident, and update its configuration |
@@ -552,8 +557,7 @@ struct nand_buffers { | |||
552 | * @ecc: [BOARDSPECIFIC] ECC control structure | 557 | * @ecc: [BOARDSPECIFIC] ECC control structure |
553 | * @buffers: buffer structure for read/write | 558 | * @buffers: buffer structure for read/write |
554 | * @hwcontrol: platform-specific hardware control structure | 559 | * @hwcontrol: platform-specific hardware control structure |
555 | * @erase_cmd: [INTERN] erase command write function, selectable due | 560 | * @erase: [REPLACEABLE] erase function |
556 | * to AND support. | ||
557 | * @scan_bbt: [REPLACEABLE] function to scan bad block table | 561 | * @scan_bbt: [REPLACEABLE] function to scan bad block table |
558 | * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring | 562 | * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring |
559 | * data from array to read regs (tR). | 563 | * data from array to read regs (tR). |
@@ -637,7 +641,7 @@ struct nand_chip { | |||
637 | void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, | 641 | void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, |
638 | int page_addr); | 642 | int page_addr); |
639 | int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this); | 643 | int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this); |
640 | void (*erase_cmd)(struct mtd_info *mtd, int page); | 644 | int (*erase)(struct mtd_info *mtd, int page); |
641 | int (*scan_bbt)(struct mtd_info *mtd); | 645 | int (*scan_bbt)(struct mtd_info *mtd); |
642 | int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state, | 646 | int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state, |
643 | int status, int page); | 647 | int status, int page); |
diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h index b730d4f84655..42ff7ff09bf5 100644 --- a/include/linux/mtd/pfow.h +++ b/include/linux/mtd/pfow.h | |||
@@ -101,9 +101,6 @@ static inline void send_pfow_command(struct map_info *map, | |||
101 | unsigned long len, map_word *datum) | 101 | unsigned long len, map_word *datum) |
102 | { | 102 | { |
103 | int bits_per_chip = map_bankwidth(map) * 8; | 103 | int bits_per_chip = map_bankwidth(map) * 8; |
104 | int chipnum; | ||
105 | struct lpddr_private *lpddr = map->fldrv_priv; | ||
106 | chipnum = adr >> lpddr->chipshift; | ||
107 | 104 | ||
108 | map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE); | 105 | map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE); |
109 | map_write(map, CMD(adr & ((1<<bits_per_chip) - 1)), | 106 | map_write(map, CMD(adr & ((1<<bits_per_chip) - 1)), |
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h new file mode 100644 index 000000000000..53241842a7ab --- /dev/null +++ b/include/linux/mtd/spi-nor.h | |||
@@ -0,0 +1,214 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Freescale Semiconductor, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #ifndef __LINUX_MTD_SPI_NOR_H | ||
11 | #define __LINUX_MTD_SPI_NOR_H | ||
12 | |||
13 | /* | ||
14 | * Note on opcode nomenclature: some opcodes have a format like | ||
15 | * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number | ||
16 | * of I/O lines used for the opcode, address, and data (respectively). The | ||
17 | * FUNCTION has an optional suffix of '4', to represent an opcode which | ||
18 | * requires a 4-byte (32-bit) address. | ||
19 | */ | ||
20 | |||
21 | /* Flash opcodes. */ | ||
22 | #define SPINOR_OP_WREN 0x06 /* Write enable */ | ||
23 | #define SPINOR_OP_RDSR 0x05 /* Read status register */ | ||
24 | #define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */ | ||
25 | #define SPINOR_OP_READ 0x03 /* Read data bytes (low frequency) */ | ||
26 | #define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */ | ||
27 | #define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual SPI) */ | ||
28 | #define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad SPI) */ | ||
29 | #define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */ | ||
30 | #define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */ | ||
31 | #define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */ | ||
32 | #define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */ | ||
33 | #define SPINOR_OP_CHIP_ERASE 0xc7 /* Erase whole flash chip */ | ||
34 | #define SPINOR_OP_SE 0xd8 /* Sector erase (usually 64KiB) */ | ||
35 | #define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */ | ||
36 | #define SPINOR_OP_RDCR 0x35 /* Read configuration register */ | ||
37 | |||
38 | /* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ | ||
39 | #define SPINOR_OP_READ4 0x13 /* Read data bytes (low frequency) */ | ||
40 | #define SPINOR_OP_READ4_FAST 0x0c /* Read data bytes (high frequency) */ | ||
41 | #define SPINOR_OP_READ4_1_1_2 0x3c /* Read data bytes (Dual SPI) */ | ||
42 | #define SPINOR_OP_READ4_1_1_4 0x6c /* Read data bytes (Quad SPI) */ | ||
43 | #define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */ | ||
44 | #define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */ | ||
45 | |||
46 | /* Used for SST flashes only. */ | ||
47 | #define SPINOR_OP_BP 0x02 /* Byte program */ | ||
48 | #define SPINOR_OP_WRDI 0x04 /* Write disable */ | ||
49 | #define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */ | ||
50 | |||
51 | /* Used for Macronix and Winbond flashes. */ | ||
52 | #define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */ | ||
53 | #define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */ | ||
54 | |||
55 | /* Used for Spansion flashes only. */ | ||
56 | #define SPINOR_OP_BRWR 0x17 /* Bank register write */ | ||
57 | |||
58 | /* Status Register bits. */ | ||
59 | #define SR_WIP 1 /* Write in progress */ | ||
60 | #define SR_WEL 2 /* Write enable latch */ | ||
61 | /* meaning of other SR_* bits may differ between vendors */ | ||
62 | #define SR_BP0 4 /* Block protect 0 */ | ||
63 | #define SR_BP1 8 /* Block protect 1 */ | ||
64 | #define SR_BP2 0x10 /* Block protect 2 */ | ||
65 | #define SR_SRWD 0x80 /* SR write protect */ | ||
66 | |||
67 | #define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */ | ||
68 | |||
69 | /* Configuration Register bits. */ | ||
70 | #define CR_QUAD_EN_SPAN 0x2 /* Spansion Quad I/O */ | ||
71 | |||
72 | enum read_mode { | ||
73 | SPI_NOR_NORMAL = 0, | ||
74 | SPI_NOR_FAST, | ||
75 | SPI_NOR_DUAL, | ||
76 | SPI_NOR_QUAD, | ||
77 | }; | ||
78 | |||
79 | /** | ||
80 | * struct spi_nor_xfer_cfg - Structure for defining a Serial Flash transfer | ||
81 | * @wren: command for "Write Enable", or 0x00 for not required | ||
82 | * @cmd: command for operation | ||
83 | * @cmd_pins: number of pins to send @cmd (1, 2, 4) | ||
84 | * @addr: address for operation | ||
85 | * @addr_pins: number of pins to send @addr (1, 2, 4) | ||
86 | * @addr_width: number of address bytes | ||
87 | * (3,4, or 0 for address not required) | ||
88 | * @mode: mode data | ||
89 | * @mode_pins: number of pins to send @mode (1, 2, 4) | ||
90 | * @mode_cycles: number of mode cycles (0 for mode not required) | ||
91 | * @dummy_cycles: number of dummy cycles (0 for dummy not required) | ||
92 | */ | ||
93 | struct spi_nor_xfer_cfg { | ||
94 | u8 wren; | ||
95 | u8 cmd; | ||
96 | u8 cmd_pins; | ||
97 | u32 addr; | ||
98 | u8 addr_pins; | ||
99 | u8 addr_width; | ||
100 | u8 mode; | ||
101 | u8 mode_pins; | ||
102 | u8 mode_cycles; | ||
103 | u8 dummy_cycles; | ||
104 | }; | ||
105 | |||
106 | #define SPI_NOR_MAX_CMD_SIZE 8 | ||
107 | enum spi_nor_ops { | ||
108 | SPI_NOR_OPS_READ = 0, | ||
109 | SPI_NOR_OPS_WRITE, | ||
110 | SPI_NOR_OPS_ERASE, | ||
111 | SPI_NOR_OPS_LOCK, | ||
112 | SPI_NOR_OPS_UNLOCK, | ||
113 | }; | ||
114 | |||
115 | /** | ||
116 | * struct spi_nor - Structure for defining a the SPI NOR layer | ||
117 | * @mtd: point to a mtd_info structure | ||
118 | * @lock: the lock for the read/write/erase/lock/unlock operations | ||
119 | * @dev: point to a spi device, or a spi nor controller device. | ||
120 | * @page_size: the page size of the SPI NOR | ||
121 | * @addr_width: number of address bytes | ||
122 | * @erase_opcode: the opcode for erasing a sector | ||
123 | * @read_opcode: the read opcode | ||
124 | * @read_dummy: the dummy needed by the read operation | ||
125 | * @program_opcode: the program opcode | ||
126 | * @flash_read: the mode of the read | ||
127 | * @sst_write_second: used by the SST write operation | ||
128 | * @cfg: used by the read_xfer/write_xfer | ||
129 | * @cmd_buf: used by the write_reg | ||
130 | * @prepare: [OPTIONAL] do some preparations for the | ||
131 | * read/write/erase/lock/unlock operations | ||
132 | * @unprepare: [OPTIONAL] do some post work after the | ||
133 | * read/write/erase/lock/unlock operations | ||
134 | * @read_xfer: [OPTIONAL] the read fundamental primitive | ||
135 | * @write_xfer: [OPTIONAL] the writefundamental primitive | ||
136 | * @read_reg: [DRIVER-SPECIFIC] read out the register | ||
137 | * @write_reg: [DRIVER-SPECIFIC] write data to the register | ||
138 | * @read_id: [REPLACEABLE] read out the ID data, and find | ||
139 | * the proper spi_device_id | ||
140 | * @wait_till_ready: [REPLACEABLE] wait till the NOR becomes ready | ||
141 | * @read: [DRIVER-SPECIFIC] read data from the SPI NOR | ||
142 | * @write: [DRIVER-SPECIFIC] write data to the SPI NOR | ||
143 | * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR | ||
144 | * at the offset @offs | ||
145 | * @priv: the private data | ||
146 | */ | ||
147 | struct spi_nor { | ||
148 | struct mtd_info *mtd; | ||
149 | struct mutex lock; | ||
150 | struct device *dev; | ||
151 | u32 page_size; | ||
152 | u8 addr_width; | ||
153 | u8 erase_opcode; | ||
154 | u8 read_opcode; | ||
155 | u8 read_dummy; | ||
156 | u8 program_opcode; | ||
157 | enum read_mode flash_read; | ||
158 | bool sst_write_second; | ||
159 | struct spi_nor_xfer_cfg cfg; | ||
160 | u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; | ||
161 | |||
162 | int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); | ||
163 | void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); | ||
164 | int (*read_xfer)(struct spi_nor *nor, struct spi_nor_xfer_cfg *cfg, | ||
165 | u8 *buf, size_t len); | ||
166 | int (*write_xfer)(struct spi_nor *nor, struct spi_nor_xfer_cfg *cfg, | ||
167 | u8 *buf, size_t len); | ||
168 | int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); | ||
169 | int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len, | ||
170 | int write_enable); | ||
171 | const struct spi_device_id *(*read_id)(struct spi_nor *nor); | ||
172 | int (*wait_till_ready)(struct spi_nor *nor); | ||
173 | |||
174 | int (*read)(struct spi_nor *nor, loff_t from, | ||
175 | size_t len, size_t *retlen, u_char *read_buf); | ||
176 | void (*write)(struct spi_nor *nor, loff_t to, | ||
177 | size_t len, size_t *retlen, const u_char *write_buf); | ||
178 | int (*erase)(struct spi_nor *nor, loff_t offs); | ||
179 | |||
180 | void *priv; | ||
181 | }; | ||
182 | |||
183 | /** | ||
184 | * spi_nor_scan() - scan the SPI NOR | ||
185 | * @nor: the spi_nor structure | ||
186 | * @id: the spi_device_id provided by the driver | ||
187 | * @mode: the read mode supported by the driver | ||
188 | * | ||
189 | * The drivers can use this fuction to scan the SPI NOR. | ||
190 | * In the scanning, it will try to get all the necessary information to | ||
191 | * fill the mtd_info{} and the spi_nor{}. | ||
192 | * | ||
193 | * The board may assigns a spi_device_id with @id which be used to compared with | ||
194 | * the spi_device_id detected by the scanning. | ||
195 | * | ||
196 | * Return: 0 for success, others for failure. | ||
197 | */ | ||
198 | int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id, | ||
199 | enum read_mode mode); | ||
200 | extern const struct spi_device_id spi_nor_ids[]; | ||
201 | |||
202 | /** | ||
203 | * spi_nor_match_id() - find the spi_device_id by the name | ||
204 | * @name: the name of the spi_device_id | ||
205 | * | ||
206 | * The drivers use this function to find the spi_device_id | ||
207 | * specified by the @name. | ||
208 | * | ||
209 | * Return: returns the right spi_device_id pointer on success, | ||
210 | * and returns NULL on failure. | ||
211 | */ | ||
212 | const struct spi_device_id *spi_nor_match_id(char *name); | ||
213 | |||
214 | #endif | ||
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 11692dea18aa..8d5535c58cc2 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/lockdep.h> | 17 | #include <linux/lockdep.h> |
18 | #include <linux/atomic.h> | 18 | #include <linux/atomic.h> |
19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
20 | #include <linux/osq_lock.h> | ||
20 | 21 | ||
21 | /* | 22 | /* |
22 | * Simple, straightforward mutexes with strict semantics: | 23 | * Simple, straightforward mutexes with strict semantics: |
@@ -46,7 +47,6 @@ | |||
46 | * - detects multi-task circular deadlocks and prints out all affected | 47 | * - detects multi-task circular deadlocks and prints out all affected |
47 | * locks and tasks (and only those tasks) | 48 | * locks and tasks (and only those tasks) |
48 | */ | 49 | */ |
49 | struct optimistic_spin_queue; | ||
50 | struct mutex { | 50 | struct mutex { |
51 | /* 1: unlocked, 0: locked, negative: locked, possible waiters */ | 51 | /* 1: unlocked, 0: locked, negative: locked, possible waiters */ |
52 | atomic_t count; | 52 | atomic_t count; |
@@ -56,7 +56,7 @@ struct mutex { | |||
56 | struct task_struct *owner; | 56 | struct task_struct *owner; |
57 | #endif | 57 | #endif |
58 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 58 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
59 | struct optimistic_spin_queue *osq; /* Spinner MCS lock */ | 59 | struct optimistic_spin_queue osq; /* Spinner MCS lock */ |
60 | #endif | 60 | #endif |
61 | #ifdef CONFIG_DEBUG_MUTEXES | 61 | #ifdef CONFIG_DEBUG_MUTEXES |
62 | const char *name; | 62 | const char *name; |
@@ -176,8 +176,4 @@ extern void mutex_unlock(struct mutex *lock); | |||
176 | 176 | ||
177 | extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | 177 | extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); |
178 | 178 | ||
179 | #ifndef arch_mutex_cpu_relax | ||
180 | # define arch_mutex_cpu_relax() cpu_relax() | ||
181 | #endif | ||
182 | |||
183 | #endif /* __LINUX_MUTEX_H */ | 179 | #endif /* __LINUX_MUTEX_H */ |
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index c26d0ec2ef3a..d99800cbdcf3 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h | |||
@@ -42,9 +42,11 @@ enum { | |||
42 | NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ | 42 | NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ |
43 | NETIF_F_FSO_BIT, /* ... FCoE segmentation */ | 43 | NETIF_F_FSO_BIT, /* ... FCoE segmentation */ |
44 | NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ | 44 | NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ |
45 | NETIF_F_GSO_GRE_CSUM_BIT, /* ... GRE with csum with TSO */ | ||
45 | NETIF_F_GSO_IPIP_BIT, /* ... IPIP tunnel with TSO */ | 46 | NETIF_F_GSO_IPIP_BIT, /* ... IPIP tunnel with TSO */ |
46 | NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ | 47 | NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ |
47 | NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ | 48 | NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ |
49 | NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ | ||
48 | NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ | 50 | NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ |
49 | /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ | 51 | /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ |
50 | NETIF_F_GSO_MPLS_BIT, | 52 | NETIF_F_GSO_MPLS_BIT, |
@@ -111,9 +113,11 @@ enum { | |||
111 | #define NETIF_F_RXFCS __NETIF_F(RXFCS) | 113 | #define NETIF_F_RXFCS __NETIF_F(RXFCS) |
112 | #define NETIF_F_RXALL __NETIF_F(RXALL) | 114 | #define NETIF_F_RXALL __NETIF_F(RXALL) |
113 | #define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE) | 115 | #define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE) |
116 | #define NETIF_F_GSO_GRE_CSUM __NETIF_F(GSO_GRE_CSUM) | ||
114 | #define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP) | 117 | #define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP) |
115 | #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) | 118 | #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) |
116 | #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) | 119 | #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) |
120 | #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) | ||
117 | #define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) | 121 | #define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) |
118 | #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) | 122 | #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) |
119 | #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) | 123 | #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6c1ae9fd9505..66f9a04ec270 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -56,9 +56,6 @@ struct device; | |||
56 | struct phy_device; | 56 | struct phy_device; |
57 | /* 802.11 specific */ | 57 | /* 802.11 specific */ |
58 | struct wireless_dev; | 58 | struct wireless_dev; |
59 | /* source back-compat hooks */ | ||
60 | #define SET_ETHTOOL_OPS(netdev,ops) \ | ||
61 | ( (netdev)->ethtool_ops = (ops) ) | ||
62 | 59 | ||
63 | void netdev_set_default_ethtool_ops(struct net_device *dev, | 60 | void netdev_set_default_ethtool_ops(struct net_device *dev, |
64 | const struct ethtool_ops *ops); | 61 | const struct ethtool_ops *ops); |
@@ -853,7 +850,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | |||
853 | * SR-IOV management functions. | 850 | * SR-IOV management functions. |
854 | * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); | 851 | * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); |
855 | * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); | 852 | * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); |
856 | * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate); | 853 | * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, |
854 | * int max_tx_rate); | ||
857 | * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); | 855 | * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); |
858 | * int (*ndo_get_vf_config)(struct net_device *dev, | 856 | * int (*ndo_get_vf_config)(struct net_device *dev, |
859 | * int vf, struct ifla_vf_info *ivf); | 857 | * int vf, struct ifla_vf_info *ivf); |
@@ -1047,8 +1045,9 @@ struct net_device_ops { | |||
1047 | int queue, u8 *mac); | 1045 | int queue, u8 *mac); |
1048 | int (*ndo_set_vf_vlan)(struct net_device *dev, | 1046 | int (*ndo_set_vf_vlan)(struct net_device *dev, |
1049 | int queue, u16 vlan, u8 qos); | 1047 | int queue, u16 vlan, u8 qos); |
1050 | int (*ndo_set_vf_tx_rate)(struct net_device *dev, | 1048 | int (*ndo_set_vf_rate)(struct net_device *dev, |
1051 | int vf, int rate); | 1049 | int vf, int min_tx_rate, |
1050 | int max_tx_rate); | ||
1052 | int (*ndo_set_vf_spoofchk)(struct net_device *dev, | 1051 | int (*ndo_set_vf_spoofchk)(struct net_device *dev, |
1053 | int vf, bool setting); | 1052 | int vf, bool setting); |
1054 | int (*ndo_get_vf_config)(struct net_device *dev, | 1053 | int (*ndo_get_vf_config)(struct net_device *dev, |
@@ -2634,6 +2633,7 @@ int dev_get_phys_port_id(struct net_device *dev, | |||
2634 | struct netdev_phys_port_id *ppid); | 2633 | struct netdev_phys_port_id *ppid); |
2635 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 2634 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
2636 | struct netdev_queue *txq); | 2635 | struct netdev_queue *txq); |
2636 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); | ||
2637 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); | 2637 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
2638 | bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); | 2638 | bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); |
2639 | 2639 | ||
@@ -3003,6 +3003,15 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | |||
3003 | struct netdev_hw_addr_list *from_list, int addr_len); | 3003 | struct netdev_hw_addr_list *from_list, int addr_len); |
3004 | void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | 3004 | void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, |
3005 | struct netdev_hw_addr_list *from_list, int addr_len); | 3005 | struct netdev_hw_addr_list *from_list, int addr_len); |
3006 | int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, | ||
3007 | struct net_device *dev, | ||
3008 | int (*sync)(struct net_device *, const unsigned char *), | ||
3009 | int (*unsync)(struct net_device *, | ||
3010 | const unsigned char *)); | ||
3011 | void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, | ||
3012 | struct net_device *dev, | ||
3013 | int (*unsync)(struct net_device *, | ||
3014 | const unsigned char *)); | ||
3006 | void __hw_addr_init(struct netdev_hw_addr_list *list); | 3015 | void __hw_addr_init(struct netdev_hw_addr_list *list); |
3007 | 3016 | ||
3008 | /* Functions used for device addresses handling */ | 3017 | /* Functions used for device addresses handling */ |
@@ -3023,6 +3032,38 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from); | |||
3023 | void dev_uc_flush(struct net_device *dev); | 3032 | void dev_uc_flush(struct net_device *dev); |
3024 | void dev_uc_init(struct net_device *dev); | 3033 | void dev_uc_init(struct net_device *dev); |
3025 | 3034 | ||
3035 | /** | ||
3036 | * __dev_uc_sync - Synchonize device's unicast list | ||
3037 | * @dev: device to sync | ||
3038 | * @sync: function to call if address should be added | ||
3039 | * @unsync: function to call if address should be removed | ||
3040 | * | ||
3041 | * Add newly added addresses to the interface, and release | ||
3042 | * addresses that have been deleted. | ||
3043 | **/ | ||
3044 | static inline int __dev_uc_sync(struct net_device *dev, | ||
3045 | int (*sync)(struct net_device *, | ||
3046 | const unsigned char *), | ||
3047 | int (*unsync)(struct net_device *, | ||
3048 | const unsigned char *)) | ||
3049 | { | ||
3050 | return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); | ||
3051 | } | ||
3052 | |||
3053 | /** | ||
3054 | * __dev_uc_unsync - Remove synchonized addresses from device | ||
3055 | * @dev: device to sync | ||
3056 | * @unsync: function to call if address should be removed | ||
3057 | * | ||
3058 | * Remove all addresses that were added to the device by dev_uc_sync(). | ||
3059 | **/ | ||
3060 | static inline void __dev_uc_unsync(struct net_device *dev, | ||
3061 | int (*unsync)(struct net_device *, | ||
3062 | const unsigned char *)) | ||
3063 | { | ||
3064 | __hw_addr_unsync_dev(&dev->uc, dev, unsync); | ||
3065 | } | ||
3066 | |||
3026 | /* Functions used for multicast addresses handling */ | 3067 | /* Functions used for multicast addresses handling */ |
3027 | int dev_mc_add(struct net_device *dev, const unsigned char *addr); | 3068 | int dev_mc_add(struct net_device *dev, const unsigned char *addr); |
3028 | int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); | 3069 | int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); |
@@ -3035,6 +3076,38 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from); | |||
3035 | void dev_mc_flush(struct net_device *dev); | 3076 | void dev_mc_flush(struct net_device *dev); |
3036 | void dev_mc_init(struct net_device *dev); | 3077 | void dev_mc_init(struct net_device *dev); |
3037 | 3078 | ||
3079 | /** | ||
3080 | * __dev_mc_sync - Synchonize device's multicast list | ||
3081 | * @dev: device to sync | ||
3082 | * @sync: function to call if address should be added | ||
3083 | * @unsync: function to call if address should be removed | ||
3084 | * | ||
3085 | * Add newly added addresses to the interface, and release | ||
3086 | * addresses that have been deleted. | ||
3087 | **/ | ||
3088 | static inline int __dev_mc_sync(struct net_device *dev, | ||
3089 | int (*sync)(struct net_device *, | ||
3090 | const unsigned char *), | ||
3091 | int (*unsync)(struct net_device *, | ||
3092 | const unsigned char *)) | ||
3093 | { | ||
3094 | return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); | ||
3095 | } | ||
3096 | |||
3097 | /** | ||
3098 | * __dev_mc_unsync - Remove synchonized addresses from device | ||
3099 | * @dev: device to sync | ||
3100 | * @unsync: function to call if address should be removed | ||
3101 | * | ||
3102 | * Remove all addresses that were added to the device by dev_mc_sync(). | ||
3103 | **/ | ||
3104 | static inline void __dev_mc_unsync(struct net_device *dev, | ||
3105 | int (*unsync)(struct net_device *, | ||
3106 | const unsigned char *)) | ||
3107 | { | ||
3108 | __hw_addr_unsync_dev(&dev->mc, dev, unsync); | ||
3109 | } | ||
3110 | |||
3038 | /* Functions used for secondary unicast and multicast support */ | 3111 | /* Functions used for secondary unicast and multicast support */ |
3039 | void dev_set_rx_mode(struct net_device *dev); | 3112 | void dev_set_rx_mode(struct net_device *dev); |
3040 | void __dev_set_rx_mode(struct net_device *dev); | 3113 | void __dev_set_rx_mode(struct net_device *dev); |
@@ -3180,6 +3253,20 @@ const char *netdev_drivername(const struct net_device *dev); | |||
3180 | 3253 | ||
3181 | void linkwatch_run_queue(void); | 3254 | void linkwatch_run_queue(void); |
3182 | 3255 | ||
3256 | static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, | ||
3257 | netdev_features_t f2) | ||
3258 | { | ||
3259 | if (f1 & NETIF_F_GEN_CSUM) | ||
3260 | f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); | ||
3261 | if (f2 & NETIF_F_GEN_CSUM) | ||
3262 | f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); | ||
3263 | f1 &= f2; | ||
3264 | if (f1 & NETIF_F_GEN_CSUM) | ||
3265 | f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); | ||
3266 | |||
3267 | return f1; | ||
3268 | } | ||
3269 | |||
3183 | static inline netdev_features_t netdev_get_wanted_features( | 3270 | static inline netdev_features_t netdev_get_wanted_features( |
3184 | struct net_device *dev) | 3271 | struct net_device *dev) |
3185 | { | 3272 | { |
@@ -3218,6 +3305,13 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) | |||
3218 | BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); | 3305 | BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); |
3219 | BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); | 3306 | BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); |
3220 | BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); | 3307 | BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); |
3308 | BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); | ||
3309 | BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); | ||
3310 | BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT)); | ||
3311 | BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); | ||
3312 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); | ||
3313 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); | ||
3314 | BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT)); | ||
3221 | 3315 | ||
3222 | return (features & feature) == feature; | 3316 | return (features & feature) == feature; |
3223 | } | 3317 | } |
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h index b2e85e59f760..6ec975748742 100644 --- a/include/linux/netfilter/nfnetlink_acct.h +++ b/include/linux/netfilter/nfnetlink_acct.h | |||
@@ -3,11 +3,17 @@ | |||
3 | 3 | ||
4 | #include <uapi/linux/netfilter/nfnetlink_acct.h> | 4 | #include <uapi/linux/netfilter/nfnetlink_acct.h> |
5 | 5 | ||
6 | enum { | ||
7 | NFACCT_NO_QUOTA = -1, | ||
8 | NFACCT_UNDERQUOTA, | ||
9 | NFACCT_OVERQUOTA, | ||
10 | }; | ||
6 | 11 | ||
7 | struct nf_acct; | 12 | struct nf_acct; |
8 | 13 | ||
9 | struct nf_acct *nfnl_acct_find_get(const char *filter_name); | 14 | struct nf_acct *nfnl_acct_find_get(const char *filter_name); |
10 | void nfnl_acct_put(struct nf_acct *acct); | 15 | void nfnl_acct_put(struct nf_acct *acct); |
11 | void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); | 16 | void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); |
12 | 17 | extern int nfnl_acct_overquota(const struct sk_buff *skb, | |
18 | struct nf_acct *nfacct); | ||
13 | #endif /* _NFNL_ACCT_H */ | 19 | #endif /* _NFNL_ACCT_H */ |
diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 034cda789a15..9e572daa15d5 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h | |||
@@ -46,7 +46,8 @@ struct netlink_kernel_cfg { | |||
46 | unsigned int flags; | 46 | unsigned int flags; |
47 | void (*input)(struct sk_buff *skb); | 47 | void (*input)(struct sk_buff *skb); |
48 | struct mutex *cb_mutex; | 48 | struct mutex *cb_mutex; |
49 | void (*bind)(int group); | 49 | int (*bind)(int group); |
50 | void (*unbind)(int group); | ||
50 | bool (*compare)(struct net *net, struct sock *sk); | 51 | bool (*compare)(struct net *net, struct sock *sk); |
51 | }; | 52 | }; |
52 | 53 | ||
diff --git a/include/linux/nfs.h b/include/linux/nfs.h index 3e794c12e90a..610af5155ef2 100644 --- a/include/linux/nfs.h +++ b/include/linux/nfs.h | |||
@@ -46,6 +46,9 @@ static inline void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *sourc | |||
46 | enum nfs3_stable_how { | 46 | enum nfs3_stable_how { |
47 | NFS_UNSTABLE = 0, | 47 | NFS_UNSTABLE = 0, |
48 | NFS_DATA_SYNC = 1, | 48 | NFS_DATA_SYNC = 1, |
49 | NFS_FILE_SYNC = 2 | 49 | NFS_FILE_SYNC = 2, |
50 | |||
51 | /* used by direct.c to mark verf as invalid */ | ||
52 | NFS_INVALID_STABLE_HOW = -1 | ||
50 | }; | 53 | }; |
51 | #endif /* _LINUX_NFS_H */ | 54 | #endif /* _LINUX_NFS_H */ |
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 12c2cb947df5..a1e3064a8d99 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h | |||
@@ -399,8 +399,6 @@ enum lock_type4 { | |||
399 | #define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) | 399 | #define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) |
400 | #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) | 400 | #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) |
401 | #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) | 401 | #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) |
402 | #define FATTR4_WORD2_CHANGE_SECURITY_LABEL \ | ||
403 | (1UL << 17) | ||
404 | 402 | ||
405 | /* MDS threshold bitmap bits */ | 403 | /* MDS threshold bitmap bits */ |
406 | #define THRESHOLD_RD (1UL << 0) | 404 | #define THRESHOLD_RD (1UL << 0) |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index fa6918b0f829..e30f6059ecd6 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -459,13 +459,12 @@ extern int nfs3_removexattr (struct dentry *, const char *name); | |||
459 | /* | 459 | /* |
460 | * linux/fs/nfs/direct.c | 460 | * linux/fs/nfs/direct.c |
461 | */ | 461 | */ |
462 | extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t, | 462 | extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t); |
463 | unsigned long); | ||
464 | extern ssize_t nfs_file_direct_read(struct kiocb *iocb, | 463 | extern ssize_t nfs_file_direct_read(struct kiocb *iocb, |
465 | const struct iovec *iov, unsigned long nr_segs, | 464 | struct iov_iter *iter, |
466 | loff_t pos, bool uio); | 465 | loff_t pos, bool uio); |
467 | extern ssize_t nfs_file_direct_write(struct kiocb *iocb, | 466 | extern ssize_t nfs_file_direct_write(struct kiocb *iocb, |
468 | const struct iovec *iov, unsigned long nr_segs, | 467 | struct iov_iter *iter, |
469 | loff_t pos, bool uio); | 468 | loff_t pos, bool uio); |
470 | 469 | ||
471 | /* | 470 | /* |
@@ -520,7 +519,6 @@ extern int nfs_writepage(struct page *page, struct writeback_control *wbc); | |||
520 | extern int nfs_writepages(struct address_space *, struct writeback_control *); | 519 | extern int nfs_writepages(struct address_space *, struct writeback_control *); |
521 | extern int nfs_flush_incompatible(struct file *file, struct page *page); | 520 | extern int nfs_flush_incompatible(struct file *file, struct page *page); |
522 | extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int); | 521 | extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int); |
523 | extern void nfs_writeback_done(struct rpc_task *, struct nfs_write_data *); | ||
524 | 522 | ||
525 | /* | 523 | /* |
526 | * Try to write back everything synchronously (but check the | 524 | * Try to write back everything synchronously (but check the |
@@ -553,7 +551,6 @@ nfs_have_writebacks(struct inode *inode) | |||
553 | extern int nfs_readpage(struct file *, struct page *); | 551 | extern int nfs_readpage(struct file *, struct page *); |
554 | extern int nfs_readpages(struct file *, struct address_space *, | 552 | extern int nfs_readpages(struct file *, struct address_space *, |
555 | struct list_head *, unsigned); | 553 | struct list_head *, unsigned); |
556 | extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); | ||
557 | extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, | 554 | extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, |
558 | struct page *); | 555 | struct page *); |
559 | 556 | ||
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 92ce5783b707..7d9096d95d4a 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h | |||
@@ -22,12 +22,17 @@ | |||
22 | * Valid flags for a dirty buffer | 22 | * Valid flags for a dirty buffer |
23 | */ | 23 | */ |
24 | enum { | 24 | enum { |
25 | PG_BUSY = 0, | 25 | PG_BUSY = 0, /* nfs_{un}lock_request */ |
26 | PG_MAPPED, | 26 | PG_MAPPED, /* page private set for buffered io */ |
27 | PG_CLEAN, | 27 | PG_CLEAN, /* write succeeded */ |
28 | PG_NEED_COMMIT, | 28 | PG_COMMIT_TO_DS, /* used by pnfs layouts */ |
29 | PG_NEED_RESCHED, | 29 | PG_INODE_REF, /* extra ref held by inode (head req only) */ |
30 | PG_COMMIT_TO_DS, | 30 | PG_HEADLOCK, /* page group lock of wb_head */ |
31 | PG_TEARDOWN, /* page group sync for destroy */ | ||
32 | PG_UNLOCKPAGE, /* page group sync bit in read path */ | ||
33 | PG_UPTODATE, /* page group sync bit in read path */ | ||
34 | PG_WB_END, /* page group sync bit in write path */ | ||
35 | PG_REMOVE, /* page group sync bit in write path */ | ||
31 | }; | 36 | }; |
32 | 37 | ||
33 | struct nfs_inode; | 38 | struct nfs_inode; |
@@ -43,15 +48,29 @@ struct nfs_page { | |||
43 | struct kref wb_kref; /* reference count */ | 48 | struct kref wb_kref; /* reference count */ |
44 | unsigned long wb_flags; | 49 | unsigned long wb_flags; |
45 | struct nfs_write_verifier wb_verf; /* Commit cookie */ | 50 | struct nfs_write_verifier wb_verf; /* Commit cookie */ |
51 | struct nfs_page *wb_this_page; /* list of reqs for this page */ | ||
52 | struct nfs_page *wb_head; /* head pointer for req list */ | ||
46 | }; | 53 | }; |
47 | 54 | ||
48 | struct nfs_pageio_descriptor; | 55 | struct nfs_pageio_descriptor; |
49 | struct nfs_pageio_ops { | 56 | struct nfs_pageio_ops { |
50 | void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *); | 57 | void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *); |
51 | bool (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *); | 58 | size_t (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, |
59 | struct nfs_page *); | ||
52 | int (*pg_doio)(struct nfs_pageio_descriptor *); | 60 | int (*pg_doio)(struct nfs_pageio_descriptor *); |
53 | }; | 61 | }; |
54 | 62 | ||
63 | struct nfs_rw_ops { | ||
64 | const fmode_t rw_mode; | ||
65 | struct nfs_rw_header *(*rw_alloc_header)(void); | ||
66 | void (*rw_free_header)(struct nfs_rw_header *); | ||
67 | void (*rw_release)(struct nfs_pgio_data *); | ||
68 | int (*rw_done)(struct rpc_task *, struct nfs_pgio_data *, struct inode *); | ||
69 | void (*rw_result)(struct rpc_task *, struct nfs_pgio_data *); | ||
70 | void (*rw_initiate)(struct nfs_pgio_data *, struct rpc_message *, | ||
71 | struct rpc_task_setup *, int); | ||
72 | }; | ||
73 | |||
55 | struct nfs_pageio_descriptor { | 74 | struct nfs_pageio_descriptor { |
56 | struct list_head pg_list; | 75 | struct list_head pg_list; |
57 | unsigned long pg_bytes_written; | 76 | unsigned long pg_bytes_written; |
@@ -63,6 +82,7 @@ struct nfs_pageio_descriptor { | |||
63 | 82 | ||
64 | struct inode *pg_inode; | 83 | struct inode *pg_inode; |
65 | const struct nfs_pageio_ops *pg_ops; | 84 | const struct nfs_pageio_ops *pg_ops; |
85 | const struct nfs_rw_ops *pg_rw_ops; | ||
66 | int pg_ioflags; | 86 | int pg_ioflags; |
67 | int pg_error; | 87 | int pg_error; |
68 | const struct rpc_call_ops *pg_rpc_callops; | 88 | const struct rpc_call_ops *pg_rpc_callops; |
@@ -75,29 +95,33 @@ struct nfs_pageio_descriptor { | |||
75 | #define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) | 95 | #define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) |
76 | 96 | ||
77 | extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx, | 97 | extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx, |
78 | struct inode *inode, | ||
79 | struct page *page, | 98 | struct page *page, |
99 | struct nfs_page *last, | ||
80 | unsigned int offset, | 100 | unsigned int offset, |
81 | unsigned int count); | 101 | unsigned int count); |
82 | extern void nfs_release_request(struct nfs_page *req); | 102 | extern void nfs_release_request(struct nfs_page *); |
83 | 103 | ||
84 | 104 | ||
85 | extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, | 105 | extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, |
86 | struct inode *inode, | 106 | struct inode *inode, |
87 | const struct nfs_pageio_ops *pg_ops, | 107 | const struct nfs_pageio_ops *pg_ops, |
88 | const struct nfs_pgio_completion_ops *compl_ops, | 108 | const struct nfs_pgio_completion_ops *compl_ops, |
109 | const struct nfs_rw_ops *rw_ops, | ||
89 | size_t bsize, | 110 | size_t bsize, |
90 | int how); | 111 | int how); |
91 | extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *, | 112 | extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *, |
92 | struct nfs_page *); | 113 | struct nfs_page *); |
93 | extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); | 114 | extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); |
94 | extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); | 115 | extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); |
95 | extern bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, | 116 | extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, |
96 | struct nfs_page *prev, | 117 | struct nfs_page *prev, |
97 | struct nfs_page *req); | 118 | struct nfs_page *req); |
98 | extern int nfs_wait_on_request(struct nfs_page *); | 119 | extern int nfs_wait_on_request(struct nfs_page *); |
99 | extern void nfs_unlock_request(struct nfs_page *req); | 120 | extern void nfs_unlock_request(struct nfs_page *req); |
100 | extern void nfs_unlock_and_release_request(struct nfs_page *req); | 121 | extern void nfs_unlock_and_release_request(struct nfs_page *); |
122 | extern void nfs_page_group_lock(struct nfs_page *); | ||
123 | extern void nfs_page_group_unlock(struct nfs_page *); | ||
124 | extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); | ||
101 | 125 | ||
102 | /* | 126 | /* |
103 | * Lock the page of an asynchronous request | 127 | * Lock the page of an asynchronous request |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 6fb5b2335b59..9a1396e70310 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -489,31 +489,21 @@ struct nfs4_delegreturnres { | |||
489 | }; | 489 | }; |
490 | 490 | ||
491 | /* | 491 | /* |
492 | * Arguments to the read call. | 492 | * Arguments to the write call. |
493 | */ | 493 | */ |
494 | struct nfs_readargs { | 494 | struct nfs_write_verifier { |
495 | struct nfs4_sequence_args seq_args; | 495 | char data[8]; |
496 | struct nfs_fh * fh; | ||
497 | struct nfs_open_context *context; | ||
498 | struct nfs_lock_context *lock_context; | ||
499 | nfs4_stateid stateid; | ||
500 | __u64 offset; | ||
501 | __u32 count; | ||
502 | unsigned int pgbase; | ||
503 | struct page ** pages; | ||
504 | }; | 496 | }; |
505 | 497 | ||
506 | struct nfs_readres { | 498 | struct nfs_writeverf { |
507 | struct nfs4_sequence_res seq_res; | 499 | struct nfs_write_verifier verifier; |
508 | struct nfs_fattr * fattr; | 500 | enum nfs3_stable_how committed; |
509 | __u32 count; | ||
510 | int eof; | ||
511 | }; | 501 | }; |
512 | 502 | ||
513 | /* | 503 | /* |
514 | * Arguments to the write call. | 504 | * Arguments shared by the read and write call. |
515 | */ | 505 | */ |
516 | struct nfs_writeargs { | 506 | struct nfs_pgio_args { |
517 | struct nfs4_sequence_args seq_args; | 507 | struct nfs4_sequence_args seq_args; |
518 | struct nfs_fh * fh; | 508 | struct nfs_fh * fh; |
519 | struct nfs_open_context *context; | 509 | struct nfs_open_context *context; |
@@ -521,27 +511,20 @@ struct nfs_writeargs { | |||
521 | nfs4_stateid stateid; | 511 | nfs4_stateid stateid; |
522 | __u64 offset; | 512 | __u64 offset; |
523 | __u32 count; | 513 | __u32 count; |
524 | enum nfs3_stable_how stable; | ||
525 | unsigned int pgbase; | 514 | unsigned int pgbase; |
526 | struct page ** pages; | 515 | struct page ** pages; |
527 | const u32 * bitmask; | 516 | const u32 * bitmask; /* used by write */ |
528 | }; | 517 | enum nfs3_stable_how stable; /* used by write */ |
529 | |||
530 | struct nfs_write_verifier { | ||
531 | char data[8]; | ||
532 | }; | 518 | }; |
533 | 519 | ||
534 | struct nfs_writeverf { | 520 | struct nfs_pgio_res { |
535 | struct nfs_write_verifier verifier; | ||
536 | enum nfs3_stable_how committed; | ||
537 | }; | ||
538 | |||
539 | struct nfs_writeres { | ||
540 | struct nfs4_sequence_res seq_res; | 521 | struct nfs4_sequence_res seq_res; |
541 | struct nfs_fattr * fattr; | 522 | struct nfs_fattr * fattr; |
542 | struct nfs_writeverf * verf; | ||
543 | __u32 count; | 523 | __u32 count; |
544 | const struct nfs_server *server; | 524 | int eof; /* used by read */ |
525 | struct nfs_writeverf * verf; /* used by write */ | ||
526 | const struct nfs_server *server; /* used by write */ | ||
527 | |||
545 | }; | 528 | }; |
546 | 529 | ||
547 | /* | 530 | /* |
@@ -1129,6 +1112,7 @@ struct pnfs_commit_bucket { | |||
1129 | struct list_head committing; | 1112 | struct list_head committing; |
1130 | struct pnfs_layout_segment *wlseg; | 1113 | struct pnfs_layout_segment *wlseg; |
1131 | struct pnfs_layout_segment *clseg; | 1114 | struct pnfs_layout_segment *clseg; |
1115 | struct nfs_writeverf direct_verf; | ||
1132 | }; | 1116 | }; |
1133 | 1117 | ||
1134 | struct pnfs_ds_commit_info { | 1118 | struct pnfs_ds_commit_info { |
@@ -1264,20 +1248,6 @@ struct nfs_page_array { | |||
1264 | struct page *page_array[NFS_PAGEVEC_SIZE]; | 1248 | struct page *page_array[NFS_PAGEVEC_SIZE]; |
1265 | }; | 1249 | }; |
1266 | 1250 | ||
1267 | struct nfs_read_data { | ||
1268 | struct nfs_pgio_header *header; | ||
1269 | struct list_head list; | ||
1270 | struct rpc_task task; | ||
1271 | struct nfs_fattr fattr; /* fattr storage */ | ||
1272 | struct nfs_readargs args; | ||
1273 | struct nfs_readres res; | ||
1274 | unsigned long timestamp; /* For lease renewal */ | ||
1275 | int (*read_done_cb) (struct rpc_task *task, struct nfs_read_data *data); | ||
1276 | __u64 mds_offset; | ||
1277 | struct nfs_page_array pages; | ||
1278 | struct nfs_client *ds_clp; /* pNFS data server */ | ||
1279 | }; | ||
1280 | |||
1281 | /* used as flag bits in nfs_pgio_header */ | 1251 | /* used as flag bits in nfs_pgio_header */ |
1282 | enum { | 1252 | enum { |
1283 | NFS_IOHDR_ERROR = 0, | 1253 | NFS_IOHDR_ERROR = 0, |
@@ -1287,19 +1257,22 @@ enum { | |||
1287 | NFS_IOHDR_NEED_RESCHED, | 1257 | NFS_IOHDR_NEED_RESCHED, |
1288 | }; | 1258 | }; |
1289 | 1259 | ||
1260 | struct nfs_pgio_data; | ||
1261 | |||
1290 | struct nfs_pgio_header { | 1262 | struct nfs_pgio_header { |
1291 | struct inode *inode; | 1263 | struct inode *inode; |
1292 | struct rpc_cred *cred; | 1264 | struct rpc_cred *cred; |
1293 | struct list_head pages; | 1265 | struct list_head pages; |
1294 | struct list_head rpc_list; | 1266 | struct nfs_pgio_data *data; |
1295 | atomic_t refcnt; | 1267 | atomic_t refcnt; |
1296 | struct nfs_page *req; | 1268 | struct nfs_page *req; |
1297 | struct nfs_writeverf *verf; | 1269 | struct nfs_writeverf verf; /* Used for writes */ |
1298 | struct pnfs_layout_segment *lseg; | 1270 | struct pnfs_layout_segment *lseg; |
1299 | loff_t io_start; | 1271 | loff_t io_start; |
1300 | const struct rpc_call_ops *mds_ops; | 1272 | const struct rpc_call_ops *mds_ops; |
1301 | void (*release) (struct nfs_pgio_header *hdr); | 1273 | void (*release) (struct nfs_pgio_header *hdr); |
1302 | const struct nfs_pgio_completion_ops *completion_ops; | 1274 | const struct nfs_pgio_completion_ops *completion_ops; |
1275 | const struct nfs_rw_ops *rw_ops; | ||
1303 | struct nfs_direct_req *dreq; | 1276 | struct nfs_direct_req *dreq; |
1304 | void *layout_private; | 1277 | void *layout_private; |
1305 | spinlock_t lock; | 1278 | spinlock_t lock; |
@@ -1310,30 +1283,24 @@ struct nfs_pgio_header { | |||
1310 | unsigned long flags; | 1283 | unsigned long flags; |
1311 | }; | 1284 | }; |
1312 | 1285 | ||
1313 | struct nfs_read_header { | 1286 | struct nfs_pgio_data { |
1314 | struct nfs_pgio_header header; | ||
1315 | struct nfs_read_data rpc_data; | ||
1316 | }; | ||
1317 | |||
1318 | struct nfs_write_data { | ||
1319 | struct nfs_pgio_header *header; | 1287 | struct nfs_pgio_header *header; |
1320 | struct list_head list; | ||
1321 | struct rpc_task task; | 1288 | struct rpc_task task; |
1322 | struct nfs_fattr fattr; | 1289 | struct nfs_fattr fattr; |
1323 | struct nfs_writeverf verf; | 1290 | struct nfs_writeverf verf; /* Used for writes */ |
1324 | struct nfs_writeargs args; /* argument struct */ | 1291 | struct nfs_pgio_args args; /* argument struct */ |
1325 | struct nfs_writeres res; /* result struct */ | 1292 | struct nfs_pgio_res res; /* result struct */ |
1326 | unsigned long timestamp; /* For lease renewal */ | 1293 | unsigned long timestamp; /* For lease renewal */ |
1327 | int (*write_done_cb) (struct rpc_task *task, struct nfs_write_data *data); | 1294 | int (*pgio_done_cb) (struct rpc_task *task, struct nfs_pgio_data *data); |
1328 | __u64 mds_offset; /* Filelayout dense stripe */ | 1295 | __u64 mds_offset; /* Filelayout dense stripe */ |
1329 | struct nfs_page_array pages; | 1296 | struct nfs_page_array pages; |
1330 | struct nfs_client *ds_clp; /* pNFS data server */ | 1297 | struct nfs_client *ds_clp; /* pNFS data server */ |
1298 | int ds_idx; /* ds index if ds_clp is set */ | ||
1331 | }; | 1299 | }; |
1332 | 1300 | ||
1333 | struct nfs_write_header { | 1301 | struct nfs_rw_header { |
1334 | struct nfs_pgio_header header; | 1302 | struct nfs_pgio_header header; |
1335 | struct nfs_write_data rpc_data; | 1303 | struct nfs_pgio_data rpc_data; |
1336 | struct nfs_writeverf verf; | ||
1337 | }; | 1304 | }; |
1338 | 1305 | ||
1339 | struct nfs_mds_commit_info { | 1306 | struct nfs_mds_commit_info { |
@@ -1465,16 +1432,11 @@ struct nfs_rpc_ops { | |||
1465 | struct nfs_pathconf *); | 1432 | struct nfs_pathconf *); |
1466 | int (*set_capabilities)(struct nfs_server *, struct nfs_fh *); | 1433 | int (*set_capabilities)(struct nfs_server *, struct nfs_fh *); |
1467 | int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, int); | 1434 | int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, int); |
1468 | void (*read_setup) (struct nfs_read_data *, struct rpc_message *); | 1435 | int (*pgio_rpc_prepare)(struct rpc_task *, struct nfs_pgio_data *); |
1469 | void (*read_pageio_init)(struct nfs_pageio_descriptor *, struct inode *, | 1436 | void (*read_setup) (struct nfs_pgio_data *, struct rpc_message *); |
1470 | const struct nfs_pgio_completion_ops *); | 1437 | int (*read_done) (struct rpc_task *, struct nfs_pgio_data *); |
1471 | int (*read_rpc_prepare)(struct rpc_task *, struct nfs_read_data *); | 1438 | void (*write_setup) (struct nfs_pgio_data *, struct rpc_message *); |
1472 | int (*read_done) (struct rpc_task *, struct nfs_read_data *); | 1439 | int (*write_done) (struct rpc_task *, struct nfs_pgio_data *); |
1473 | void (*write_setup) (struct nfs_write_data *, struct rpc_message *); | ||
1474 | void (*write_pageio_init)(struct nfs_pageio_descriptor *, struct inode *, int, | ||
1475 | const struct nfs_pgio_completion_ops *); | ||
1476 | int (*write_rpc_prepare)(struct rpc_task *, struct nfs_write_data *); | ||
1477 | int (*write_done) (struct rpc_task *, struct nfs_write_data *); | ||
1478 | void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *); | 1440 | void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *); |
1479 | void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *); | 1441 | void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *); |
1480 | int (*commit_done) (struct rpc_task *, struct nfs_commit_data *); | 1442 | int (*commit_done) (struct rpc_task *, struct nfs_commit_data *); |
diff --git a/include/linux/nfsd/debug.h b/include/linux/nfsd/debug.h deleted file mode 100644 index 19ef8375b577..000000000000 --- a/include/linux/nfsd/debug.h +++ /dev/null | |||
@@ -1,19 +0,0 @@ | |||
1 | /* | ||
2 | * linux/include/linux/nfsd/debug.h | ||
3 | * | ||
4 | * Debugging-related stuff for nfsd | ||
5 | * | ||
6 | * Copyright (C) 1995 Olaf Kirch <okir@monad.swb.de> | ||
7 | */ | ||
8 | #ifndef LINUX_NFSD_DEBUG_H | ||
9 | #define LINUX_NFSD_DEBUG_H | ||
10 | |||
11 | #include <uapi/linux/nfsd/debug.h> | ||
12 | |||
13 | # undef ifdebug | ||
14 | # ifdef NFSD_DEBUG | ||
15 | # define ifdebug(flag) if (nfsd_debug & NFSDDBG_##flag) | ||
16 | # else | ||
17 | # define ifdebug(flag) if (0) | ||
18 | # endif | ||
19 | #endif /* LINUX_NFSD_DEBUG_H */ | ||
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h deleted file mode 100644 index 7898c997dfea..000000000000 --- a/include/linux/nfsd/export.h +++ /dev/null | |||
@@ -1,110 +0,0 @@ | |||
1 | /* | ||
2 | * include/linux/nfsd/export.h | ||
3 | * | ||
4 | * Public declarations for NFS exports. The definitions for the | ||
5 | * syscall interface are in nfsctl.h | ||
6 | * | ||
7 | * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de> | ||
8 | */ | ||
9 | #ifndef NFSD_EXPORT_H | ||
10 | #define NFSD_EXPORT_H | ||
11 | |||
12 | # include <linux/nfsd/nfsfh.h> | ||
13 | #include <uapi/linux/nfsd/export.h> | ||
14 | |||
15 | /* | ||
16 | * FS Locations | ||
17 | */ | ||
18 | |||
19 | #define MAX_FS_LOCATIONS 128 | ||
20 | |||
21 | struct nfsd4_fs_location { | ||
22 | char *hosts; /* colon separated list of hosts */ | ||
23 | char *path; /* slash separated list of path components */ | ||
24 | }; | ||
25 | |||
26 | struct nfsd4_fs_locations { | ||
27 | uint32_t locations_count; | ||
28 | struct nfsd4_fs_location *locations; | ||
29 | /* If we're not actually serving this data ourselves (only providing a | ||
30 | * list of replicas that do serve it) then we set "migrated": */ | ||
31 | int migrated; | ||
32 | }; | ||
33 | |||
34 | /* | ||
35 | * We keep an array of pseudoflavors with the export, in order from most | ||
36 | * to least preferred. For the foreseeable future, we don't expect more | ||
37 | * than the eight pseudoflavors null, unix, krb5, krb5i, krb5p, skpm3, | ||
38 | * spkm3i, and spkm3p (and using all 8 at once should be rare). | ||
39 | */ | ||
40 | #define MAX_SECINFO_LIST 8 | ||
41 | |||
42 | struct exp_flavor_info { | ||
43 | u32 pseudoflavor; | ||
44 | u32 flags; | ||
45 | }; | ||
46 | |||
47 | struct svc_export { | ||
48 | struct cache_head h; | ||
49 | struct auth_domain * ex_client; | ||
50 | int ex_flags; | ||
51 | struct path ex_path; | ||
52 | kuid_t ex_anon_uid; | ||
53 | kgid_t ex_anon_gid; | ||
54 | int ex_fsid; | ||
55 | unsigned char * ex_uuid; /* 16 byte fsid */ | ||
56 | struct nfsd4_fs_locations ex_fslocs; | ||
57 | int ex_nflavors; | ||
58 | struct exp_flavor_info ex_flavors[MAX_SECINFO_LIST]; | ||
59 | struct cache_detail *cd; | ||
60 | }; | ||
61 | |||
62 | /* an "export key" (expkey) maps a filehandlefragement to an | ||
63 | * svc_export for a given client. There can be several per export, | ||
64 | * for the different fsid types. | ||
65 | */ | ||
66 | struct svc_expkey { | ||
67 | struct cache_head h; | ||
68 | |||
69 | struct auth_domain * ek_client; | ||
70 | int ek_fsidtype; | ||
71 | u32 ek_fsid[6]; | ||
72 | |||
73 | struct path ek_path; | ||
74 | }; | ||
75 | |||
76 | #define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC)) | ||
77 | #define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE) | ||
78 | #define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES) | ||
79 | |||
80 | int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp); | ||
81 | __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp); | ||
82 | |||
83 | /* | ||
84 | * Function declarations | ||
85 | */ | ||
86 | int nfsd_export_init(struct net *); | ||
87 | void nfsd_export_shutdown(struct net *); | ||
88 | void nfsd_export_flush(struct net *); | ||
89 | struct svc_export * rqst_exp_get_by_name(struct svc_rqst *, | ||
90 | struct path *); | ||
91 | struct svc_export * rqst_exp_parent(struct svc_rqst *, | ||
92 | struct path *); | ||
93 | struct svc_export * rqst_find_fsidzero_export(struct svc_rqst *); | ||
94 | int exp_rootfh(struct net *, struct auth_domain *, | ||
95 | char *path, struct knfsd_fh *, int maxsize); | ||
96 | __be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *); | ||
97 | __be32 nfserrno(int errno); | ||
98 | |||
99 | static inline void exp_put(struct svc_export *exp) | ||
100 | { | ||
101 | cache_put(&exp->h, exp->cd); | ||
102 | } | ||
103 | |||
104 | static inline void exp_get(struct svc_export *exp) | ||
105 | { | ||
106 | cache_get(&exp->h); | ||
107 | } | ||
108 | struct svc_export * rqst_exp_find(struct svc_rqst *, int, u32 *); | ||
109 | |||
110 | #endif /* NFSD_EXPORT_H */ | ||
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h deleted file mode 100644 index a93593f1fa4e..000000000000 --- a/include/linux/nfsd/nfsfh.h +++ /dev/null | |||
@@ -1,63 +0,0 @@ | |||
1 | /* | ||
2 | * include/linux/nfsd/nfsfh.h | ||
3 | * | ||
4 | * This file describes the layout of the file handles as passed | ||
5 | * over the wire. | ||
6 | * | ||
7 | * Earlier versions of knfsd used to sign file handles using keyed MD5 | ||
8 | * or SHA. I've removed this code, because it doesn't give you more | ||
9 | * security than blocking external access to port 2049 on your firewall. | ||
10 | * | ||
11 | * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> | ||
12 | */ | ||
13 | #ifndef _LINUX_NFSD_FH_H | ||
14 | #define _LINUX_NFSD_FH_H | ||
15 | |||
16 | # include <linux/sunrpc/svc.h> | ||
17 | #include <uapi/linux/nfsd/nfsfh.h> | ||
18 | |||
19 | static inline __u32 ino_t_to_u32(ino_t ino) | ||
20 | { | ||
21 | return (__u32) ino; | ||
22 | } | ||
23 | |||
24 | static inline ino_t u32_to_ino_t(__u32 uino) | ||
25 | { | ||
26 | return (ino_t) uino; | ||
27 | } | ||
28 | |||
29 | /* | ||
30 | * This is the internal representation of an NFS handle used in knfsd. | ||
31 | * pre_mtime/post_version will be used to support wcc_attr's in NFSv3. | ||
32 | */ | ||
33 | typedef struct svc_fh { | ||
34 | struct knfsd_fh fh_handle; /* FH data */ | ||
35 | struct dentry * fh_dentry; /* validated dentry */ | ||
36 | struct svc_export * fh_export; /* export pointer */ | ||
37 | int fh_maxsize; /* max size for fh_handle */ | ||
38 | |||
39 | unsigned char fh_locked; /* inode locked by us */ | ||
40 | unsigned char fh_want_write; /* remount protection taken */ | ||
41 | |||
42 | #ifdef CONFIG_NFSD_V3 | ||
43 | unsigned char fh_post_saved; /* post-op attrs saved */ | ||
44 | unsigned char fh_pre_saved; /* pre-op attrs saved */ | ||
45 | |||
46 | /* Pre-op attributes saved during fh_lock */ | ||
47 | __u64 fh_pre_size; /* size before operation */ | ||
48 | struct timespec fh_pre_mtime; /* mtime before oper */ | ||
49 | struct timespec fh_pre_ctime; /* ctime before oper */ | ||
50 | /* | ||
51 | * pre-op nfsv4 change attr: note must check IS_I_VERSION(inode) | ||
52 | * to find out if it is valid. | ||
53 | */ | ||
54 | u64 fh_pre_change; | ||
55 | |||
56 | /* Post-op attributes saved in fh_unlock */ | ||
57 | struct kstat fh_post_attr; /* full attrs after operation */ | ||
58 | u64 fh_post_change; /* nfsv4 change; see above */ | ||
59 | #endif /* CONFIG_NFSD_V3 */ | ||
60 | |||
61 | } svc_fh; | ||
62 | |||
63 | #endif /* _LINUX_NFSD_FH_H */ | ||
diff --git a/include/linux/nfsd/stats.h b/include/linux/nfsd/stats.h deleted file mode 100644 index e75b2544ff12..000000000000 --- a/include/linux/nfsd/stats.h +++ /dev/null | |||
@@ -1,45 +0,0 @@ | |||
1 | /* | ||
2 | * linux/include/linux/nfsd/stats.h | ||
3 | * | ||
4 | * Statistics for NFS server. | ||
5 | * | ||
6 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | ||
7 | */ | ||
8 | #ifndef LINUX_NFSD_STATS_H | ||
9 | #define LINUX_NFSD_STATS_H | ||
10 | |||
11 | #include <uapi/linux/nfsd/stats.h> | ||
12 | |||
13 | |||
14 | struct nfsd_stats { | ||
15 | unsigned int rchits; /* repcache hits */ | ||
16 | unsigned int rcmisses; /* repcache hits */ | ||
17 | unsigned int rcnocache; /* uncached reqs */ | ||
18 | unsigned int fh_stale; /* FH stale error */ | ||
19 | unsigned int fh_lookup; /* dentry cached */ | ||
20 | unsigned int fh_anon; /* anon file dentry returned */ | ||
21 | unsigned int fh_nocache_dir; /* filehandle not found in dcache */ | ||
22 | unsigned int fh_nocache_nondir; /* filehandle not found in dcache */ | ||
23 | unsigned int io_read; /* bytes returned to read requests */ | ||
24 | unsigned int io_write; /* bytes passed in write requests */ | ||
25 | unsigned int th_cnt; /* number of available threads */ | ||
26 | unsigned int th_usage[10]; /* number of ticks during which n perdeciles | ||
27 | * of available threads were in use */ | ||
28 | unsigned int th_fullcnt; /* number of times last free thread was used */ | ||
29 | unsigned int ra_size; /* size of ra cache */ | ||
30 | unsigned int ra_depth[11]; /* number of times ra entry was found that deep | ||
31 | * in the cache (10percentiles). [10] = not found */ | ||
32 | #ifdef CONFIG_NFSD_V4 | ||
33 | unsigned int nfs4_opcount[LAST_NFS4_OP + 1]; /* count of individual nfsv4 operations */ | ||
34 | #endif | ||
35 | |||
36 | }; | ||
37 | |||
38 | |||
39 | extern struct nfsd_stats nfsdstats; | ||
40 | extern struct svc_stat nfsd_svcstats; | ||
41 | |||
42 | void nfsd_stat_init(void); | ||
43 | void nfsd_stat_shutdown(void); | ||
44 | |||
45 | #endif /* LINUX_NFSD_STATS_H */ | ||
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h index c8d7f3965fff..20163b9a0eae 100644 --- a/include/linux/nl802154.h +++ b/include/linux/nl802154.h | |||
@@ -80,6 +80,22 @@ enum { | |||
80 | 80 | ||
81 | IEEE802154_ATTR_FRAME_RETRIES, | 81 | IEEE802154_ATTR_FRAME_RETRIES, |
82 | 82 | ||
83 | IEEE802154_ATTR_LLSEC_ENABLED, | ||
84 | IEEE802154_ATTR_LLSEC_SECLEVEL, | ||
85 | IEEE802154_ATTR_LLSEC_KEY_MODE, | ||
86 | IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT, | ||
87 | IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED, | ||
88 | IEEE802154_ATTR_LLSEC_KEY_ID, | ||
89 | IEEE802154_ATTR_LLSEC_FRAME_COUNTER, | ||
90 | IEEE802154_ATTR_LLSEC_KEY_BYTES, | ||
91 | IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES, | ||
92 | IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS, | ||
93 | IEEE802154_ATTR_LLSEC_FRAME_TYPE, | ||
94 | IEEE802154_ATTR_LLSEC_CMD_FRAME_ID, | ||
95 | IEEE802154_ATTR_LLSEC_SECLEVELS, | ||
96 | IEEE802154_ATTR_LLSEC_DEV_OVERRIDE, | ||
97 | IEEE802154_ATTR_LLSEC_DEV_KEY_MODE, | ||
98 | |||
83 | __IEEE802154_ATTR_MAX, | 99 | __IEEE802154_ATTR_MAX, |
84 | }; | 100 | }; |
85 | 101 | ||
@@ -134,6 +150,21 @@ enum { | |||
134 | 150 | ||
135 | IEEE802154_SET_MACPARAMS, | 151 | IEEE802154_SET_MACPARAMS, |
136 | 152 | ||
153 | IEEE802154_LLSEC_GETPARAMS, | ||
154 | IEEE802154_LLSEC_SETPARAMS, | ||
155 | IEEE802154_LLSEC_LIST_KEY, | ||
156 | IEEE802154_LLSEC_ADD_KEY, | ||
157 | IEEE802154_LLSEC_DEL_KEY, | ||
158 | IEEE802154_LLSEC_LIST_DEV, | ||
159 | IEEE802154_LLSEC_ADD_DEV, | ||
160 | IEEE802154_LLSEC_DEL_DEV, | ||
161 | IEEE802154_LLSEC_LIST_DEVKEY, | ||
162 | IEEE802154_LLSEC_ADD_DEVKEY, | ||
163 | IEEE802154_LLSEC_DEL_DEVKEY, | ||
164 | IEEE802154_LLSEC_LIST_SECLEVEL, | ||
165 | IEEE802154_LLSEC_ADD_SECLEVEL, | ||
166 | IEEE802154_LLSEC_DEL_SECLEVEL, | ||
167 | |||
137 | __IEEE802154_CMD_MAX, | 168 | __IEEE802154_CMD_MAX, |
138 | }; | 169 | }; |
139 | 170 | ||
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 6a45fb583ff1..447775ee2c4b 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h | |||
@@ -32,15 +32,24 @@ static inline void touch_nmi_watchdog(void) | |||
32 | #ifdef arch_trigger_all_cpu_backtrace | 32 | #ifdef arch_trigger_all_cpu_backtrace |
33 | static inline bool trigger_all_cpu_backtrace(void) | 33 | static inline bool trigger_all_cpu_backtrace(void) |
34 | { | 34 | { |
35 | arch_trigger_all_cpu_backtrace(); | 35 | arch_trigger_all_cpu_backtrace(true); |
36 | 36 | ||
37 | return true; | 37 | return true; |
38 | } | 38 | } |
39 | static inline bool trigger_allbutself_cpu_backtrace(void) | ||
40 | { | ||
41 | arch_trigger_all_cpu_backtrace(false); | ||
42 | return true; | ||
43 | } | ||
39 | #else | 44 | #else |
40 | static inline bool trigger_all_cpu_backtrace(void) | 45 | static inline bool trigger_all_cpu_backtrace(void) |
41 | { | 46 | { |
42 | return false; | 47 | return false; |
43 | } | 48 | } |
49 | static inline bool trigger_allbutself_cpu_backtrace(void) | ||
50 | { | ||
51 | return false; | ||
52 | } | ||
44 | #endif | 53 | #endif |
45 | 54 | ||
46 | #ifdef CONFIG_LOCKUP_DETECTOR | 55 | #ifdef CONFIG_LOCKUP_DETECTOR |
@@ -48,6 +57,7 @@ int hw_nmi_is_cpu_stuck(struct pt_regs *); | |||
48 | u64 hw_nmi_get_sample_period(int watchdog_thresh); | 57 | u64 hw_nmi_get_sample_period(int watchdog_thresh); |
49 | extern int watchdog_user_enabled; | 58 | extern int watchdog_user_enabled; |
50 | extern int watchdog_thresh; | 59 | extern int watchdog_thresh; |
60 | extern int sysctl_softlockup_all_cpu_backtrace; | ||
51 | struct ctl_table; | 61 | struct ctl_table; |
52 | extern int proc_dowatchdog(struct ctl_table *, int , | 62 | extern int proc_dowatchdog(struct ctl_table *, int , |
53 | void __user *, size_t *, loff_t *); | 63 | void __user *, size_t *, loff_t *); |
diff --git a/include/linux/nvme.h b/include/linux/nvme.h index a50173ca1d72..2bf403195c09 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Definitions for the NVM Express interface | 2 | * Definitions for the NVM Express interface |
3 | * Copyright (c) 2011-2013, Intel Corporation. | 3 | * Copyright (c) 2011-2014, Intel Corporation. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms and conditions of the GNU General Public License, | 6 | * under the terms and conditions of the GNU General Public License, |
@@ -10,10 +10,6 @@ | |||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
12 | * more details. | 12 | * more details. |
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | */ | 13 | */ |
18 | 14 | ||
19 | #ifndef _LINUX_NVME_H | 15 | #ifndef _LINUX_NVME_H |
@@ -66,8 +62,8 @@ enum { | |||
66 | 62 | ||
67 | #define NVME_VS(major, minor) (major << 16 | minor) | 63 | #define NVME_VS(major, minor) (major << 16 | minor) |
68 | 64 | ||
69 | extern unsigned char io_timeout; | 65 | extern unsigned char nvme_io_timeout; |
70 | #define NVME_IO_TIMEOUT (io_timeout * HZ) | 66 | #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) |
71 | 67 | ||
72 | /* | 68 | /* |
73 | * Represents an NVM Express device. Each nvme_dev is a PCI function. | 69 | * Represents an NVM Express device. Each nvme_dev is a PCI function. |
@@ -94,7 +90,7 @@ struct nvme_dev { | |||
94 | struct miscdevice miscdev; | 90 | struct miscdevice miscdev; |
95 | work_func_t reset_workfn; | 91 | work_func_t reset_workfn; |
96 | struct work_struct reset_work; | 92 | struct work_struct reset_work; |
97 | struct notifier_block nb; | 93 | struct work_struct cpu_work; |
98 | char name[12]; | 94 | char name[12]; |
99 | char serial[20]; | 95 | char serial[20]; |
100 | char model[40]; | 96 | char model[40]; |
@@ -103,6 +99,7 @@ struct nvme_dev { | |||
103 | u32 stripe_size; | 99 | u32 stripe_size; |
104 | u16 oncs; | 100 | u16 oncs; |
105 | u16 abort_limit; | 101 | u16 abort_limit; |
102 | u8 vwc; | ||
106 | u8 initialized; | 103 | u8 initialized; |
107 | }; | 104 | }; |
108 | 105 | ||
@@ -159,7 +156,6 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, | |||
159 | void nvme_unmap_user_pages(struct nvme_dev *dev, int write, | 156 | void nvme_unmap_user_pages(struct nvme_dev *dev, int write, |
160 | struct nvme_iod *iod); | 157 | struct nvme_iod *iod); |
161 | int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *); | 158 | int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *); |
162 | int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns); | ||
163 | int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *, | 159 | int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *, |
164 | u32 *result); | 160 | u32 *result); |
165 | int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns, | 161 | int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns, |
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 05117899fcb4..0ff360d5b3b3 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h | |||
@@ -73,6 +73,8 @@ extern int early_init_dt_scan_root(unsigned long node, const char *uname, | |||
73 | int depth, void *data); | 73 | int depth, void *data); |
74 | 74 | ||
75 | extern bool early_init_dt_scan(void *params); | 75 | extern bool early_init_dt_scan(void *params); |
76 | extern bool early_init_dt_verify(void *params); | ||
77 | extern void early_init_dt_scan_nodes(void); | ||
76 | 78 | ||
77 | extern const char *of_flat_dt_get_machine_name(void); | 79 | extern const char *of_flat_dt_get_machine_name(void); |
78 | extern const void *of_flat_dt_match_machine(const void *default_match, | 80 | extern const void *of_flat_dt_match_machine(const void *default_match, |
@@ -84,6 +86,7 @@ extern void unflatten_and_copy_device_tree(void); | |||
84 | extern void early_init_devtree(void *); | 86 | extern void early_init_devtree(void *); |
85 | extern void early_get_first_memblock_info(void *, phys_addr_t *); | 87 | extern void early_get_first_memblock_info(void *, phys_addr_t *); |
86 | extern u64 fdt_translate_address(const void *blob, int node_offset); | 88 | extern u64 fdt_translate_address(const void *blob, int node_offset); |
89 | extern void of_fdt_limit_memory(int limit); | ||
87 | #else /* CONFIG_OF_FLATTREE */ | 90 | #else /* CONFIG_OF_FLATTREE */ |
88 | static inline void early_init_fdt_scan_reserved_mem(void) {} | 91 | static inline void early_init_fdt_scan_reserved_mem(void) {} |
89 | static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } | 92 | static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } |
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 881a7c3571f4..d449018d0726 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h | |||
@@ -22,9 +22,6 @@ extern struct phy_device *of_phy_connect(struct net_device *dev, | |||
22 | struct phy_device *of_phy_attach(struct net_device *dev, | 22 | struct phy_device *of_phy_attach(struct net_device *dev, |
23 | struct device_node *phy_np, u32 flags, | 23 | struct device_node *phy_np, u32 flags, |
24 | phy_interface_t iface); | 24 | phy_interface_t iface); |
25 | extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev, | ||
26 | void (*hndlr)(struct net_device *), | ||
27 | phy_interface_t iface); | ||
28 | 25 | ||
29 | extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); | 26 | extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); |
30 | 27 | ||
@@ -59,17 +56,25 @@ static inline struct phy_device *of_phy_attach(struct net_device *dev, | |||
59 | return NULL; | 56 | return NULL; |
60 | } | 57 | } |
61 | 58 | ||
62 | static inline struct phy_device *of_phy_connect_fixed_link(struct net_device *dev, | 59 | static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np) |
63 | void (*hndlr)(struct net_device *), | ||
64 | phy_interface_t iface) | ||
65 | { | 60 | { |
66 | return NULL; | 61 | return NULL; |
67 | } | 62 | } |
63 | #endif /* CONFIG_OF */ | ||
68 | 64 | ||
69 | static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np) | 65 | #if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY) |
66 | extern int of_phy_register_fixed_link(struct device_node *np); | ||
67 | extern bool of_phy_is_fixed_link(struct device_node *np); | ||
68 | #else | ||
69 | static inline int of_phy_register_fixed_link(struct device_node *np) | ||
70 | { | 70 | { |
71 | return NULL; | 71 | return -ENOSYS; |
72 | } | 72 | } |
73 | #endif /* CONFIG_OF */ | 73 | static inline bool of_phy_is_fixed_link(struct device_node *np) |
74 | { | ||
75 | return false; | ||
76 | } | ||
77 | #endif | ||
78 | |||
74 | 79 | ||
75 | #endif /* __LINUX_OF_MDIO_H */ | 80 | #endif /* __LINUX_OF_MDIO_H */ |
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h index c29a6dee6bec..88e6ea4a5d36 100644 --- a/include/linux/omap-dma.h +++ b/include/linux/omap-dma.h | |||
@@ -1,23 +1,6 @@ | |||
1 | /* | ||
2 | * OMAP DMA Engine support | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #ifndef __LINUX_OMAP_DMA_H | 1 | #ifndef __LINUX_OMAP_DMA_H |
9 | #define __LINUX_OMAP_DMA_H | 2 | #define __LINUX_OMAP_DMA_H |
10 | 3 | #include <linux/omap-dmaengine.h> | |
11 | struct dma_chan; | ||
12 | |||
13 | #if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE)) | ||
14 | bool omap_dma_filter_fn(struct dma_chan *, void *); | ||
15 | #else | ||
16 | static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d) | ||
17 | { | ||
18 | return false; | ||
19 | } | ||
20 | #endif | ||
21 | 4 | ||
22 | /* | 5 | /* |
23 | * Legacy OMAP DMA handling defines and functions | 6 | * Legacy OMAP DMA handling defines and functions |
diff --git a/include/linux/omap-dmaengine.h b/include/linux/omap-dmaengine.h new file mode 100644 index 000000000000..8e6906c72e90 --- /dev/null +++ b/include/linux/omap-dmaengine.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * OMAP DMA Engine support | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #ifndef __LINUX_OMAP_DMAENGINE_H | ||
9 | #define __LINUX_OMAP_DMAENGINE_H | ||
10 | |||
11 | struct dma_chan; | ||
12 | |||
13 | #if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE)) | ||
14 | bool omap_dma_filter_fn(struct dma_chan *, void *); | ||
15 | #else | ||
16 | static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d) | ||
17 | { | ||
18 | return false; | ||
19 | } | ||
20 | #endif | ||
21 | #endif /* __LINUX_OMAP_DMAENGINE_H */ | ||
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h new file mode 100644 index 000000000000..90230d5811c5 --- /dev/null +++ b/include/linux/osq_lock.h | |||
@@ -0,0 +1,27 @@ | |||
1 | #ifndef __LINUX_OSQ_LOCK_H | ||
2 | #define __LINUX_OSQ_LOCK_H | ||
3 | |||
4 | /* | ||
5 | * An MCS like lock especially tailored for optimistic spinning for sleeping | ||
6 | * lock implementations (mutex, rwsem, etc). | ||
7 | */ | ||
8 | |||
9 | #define OSQ_UNLOCKED_VAL (0) | ||
10 | |||
11 | struct optimistic_spin_queue { | ||
12 | /* | ||
13 | * Stores an encoded value of the CPU # of the tail node in the queue. | ||
14 | * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL. | ||
15 | */ | ||
16 | atomic_t tail; | ||
17 | }; | ||
18 | |||
19 | /* Init macro and function. */ | ||
20 | #define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) } | ||
21 | |||
22 | static inline void osq_lock_init(struct optimistic_spin_queue *lock) | ||
23 | { | ||
24 | atomic_set(&lock->tail, OSQ_UNLOCKED_VAL); | ||
25 | } | ||
26 | |||
27 | #endif | ||
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 2093eb72785e..8304959ad336 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -319,13 +319,23 @@ CLEARPAGEFLAG(Uptodate, uptodate) | |||
319 | extern void cancel_dirty_page(struct page *page, unsigned int account_size); | 319 | extern void cancel_dirty_page(struct page *page, unsigned int account_size); |
320 | 320 | ||
321 | int test_clear_page_writeback(struct page *page); | 321 | int test_clear_page_writeback(struct page *page); |
322 | int test_set_page_writeback(struct page *page); | 322 | int __test_set_page_writeback(struct page *page, bool keep_write); |
323 | |||
324 | #define test_set_page_writeback(page) \ | ||
325 | __test_set_page_writeback(page, false) | ||
326 | #define test_set_page_writeback_keepwrite(page) \ | ||
327 | __test_set_page_writeback(page, true) | ||
323 | 328 | ||
324 | static inline void set_page_writeback(struct page *page) | 329 | static inline void set_page_writeback(struct page *page) |
325 | { | 330 | { |
326 | test_set_page_writeback(page); | 331 | test_set_page_writeback(page); |
327 | } | 332 | } |
328 | 333 | ||
334 | static inline void set_page_writeback_keepwrite(struct page *page) | ||
335 | { | ||
336 | test_set_page_writeback_keepwrite(page); | ||
337 | } | ||
338 | |||
329 | #ifdef CONFIG_PAGEFLAGS_EXTENDED | 339 | #ifdef CONFIG_PAGEFLAGS_EXTENDED |
330 | /* | 340 | /* |
331 | * System with lots of page flags available. This allows separate | 341 | * System with lots of page flags available. This allows separate |
@@ -350,6 +360,9 @@ static inline void ClearPageCompound(struct page *page) | |||
350 | ClearPageHead(page); | 360 | ClearPageHead(page); |
351 | } | 361 | } |
352 | #endif | 362 | #endif |
363 | |||
364 | #define PG_head_mask ((1L << PG_head)) | ||
365 | |||
353 | #else | 366 | #else |
354 | /* | 367 | /* |
355 | * Reduce page flag use as much as possible by overlapping | 368 | * Reduce page flag use as much as possible by overlapping |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 0a97b583ee8d..e1474ae18c88 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -399,6 +399,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping, | |||
399 | } | 399 | } |
400 | 400 | ||
401 | /* | 401 | /* |
402 | * Get the offset in PAGE_SIZE. | ||
403 | * (TODO: hugepage should have ->index in PAGE_SIZE) | ||
404 | */ | ||
405 | static inline pgoff_t page_to_pgoff(struct page *page) | ||
406 | { | ||
407 | if (unlikely(PageHeadHuge(page))) | ||
408 | return page->index << compound_order(page); | ||
409 | else | ||
410 | return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | ||
411 | } | ||
412 | |||
413 | /* | ||
402 | * Return byte-offset into filesystem object for page. | 414 | * Return byte-offset into filesystem object for page. |
403 | */ | 415 | */ |
404 | static inline loff_t page_offset(struct page *page) | 416 | static inline loff_t page_offset(struct page *page) |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 71d9673c1b2c..6ed3647b38df 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -164,13 +164,17 @@ enum pci_dev_flags { | |||
164 | /* INTX_DISABLE in PCI_COMMAND register disables MSI | 164 | /* INTX_DISABLE in PCI_COMMAND register disables MSI |
165 | * generation too. | 165 | * generation too. |
166 | */ | 166 | */ |
167 | PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1, | 167 | PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0), |
168 | /* Device configuration is irrevocably lost if disabled into D3 */ | 168 | /* Device configuration is irrevocably lost if disabled into D3 */ |
169 | PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, | 169 | PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1), |
170 | /* Provide indication device is assigned by a Virtual Machine Manager */ | 170 | /* Provide indication device is assigned by a Virtual Machine Manager */ |
171 | PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4, | 171 | PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2), |
172 | /* Flag for quirk use to store if quirk-specific ACS is enabled */ | 172 | /* Flag for quirk use to store if quirk-specific ACS is enabled */ |
173 | PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) 8, | 173 | PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3), |
174 | /* Flag to indicate the device uses dma_alias_devfn */ | ||
175 | PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4), | ||
176 | /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */ | ||
177 | PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), | ||
174 | }; | 178 | }; |
175 | 179 | ||
176 | enum pci_irq_reroute_variant { | 180 | enum pci_irq_reroute_variant { |
@@ -268,6 +272,7 @@ struct pci_dev { | |||
268 | u8 rom_base_reg; /* which config register controls the ROM */ | 272 | u8 rom_base_reg; /* which config register controls the ROM */ |
269 | u8 pin; /* which interrupt pin this device uses */ | 273 | u8 pin; /* which interrupt pin this device uses */ |
270 | u16 pcie_flags_reg; /* cached PCIe Capabilities Register */ | 274 | u16 pcie_flags_reg; /* cached PCIe Capabilities Register */ |
275 | u8 dma_alias_devfn;/* devfn of DMA alias, if any */ | ||
271 | 276 | ||
272 | struct pci_driver *driver; /* which driver has allocated this device */ | 277 | struct pci_driver *driver; /* which driver has allocated this device */ |
273 | u64 dma_mask; /* Mask of the bits of bus address this | 278 | u64 dma_mask; /* Mask of the bits of bus address this |
@@ -973,6 +978,8 @@ int pci_try_reset_slot(struct pci_slot *slot); | |||
973 | int pci_probe_reset_bus(struct pci_bus *bus); | 978 | int pci_probe_reset_bus(struct pci_bus *bus); |
974 | int pci_reset_bus(struct pci_bus *bus); | 979 | int pci_reset_bus(struct pci_bus *bus); |
975 | int pci_try_reset_bus(struct pci_bus *bus); | 980 | int pci_try_reset_bus(struct pci_bus *bus); |
981 | void pci_reset_secondary_bus(struct pci_dev *dev); | ||
982 | void pcibios_reset_secondary_bus(struct pci_dev *dev); | ||
976 | void pci_reset_bridge_secondary_bus(struct pci_dev *dev); | 983 | void pci_reset_bridge_secondary_bus(struct pci_dev *dev); |
977 | void pci_update_resource(struct pci_dev *dev, int resno); | 984 | void pci_update_resource(struct pci_dev *dev, int resno); |
978 | int __must_check pci_assign_resource(struct pci_dev *dev, int i); | 985 | int __must_check pci_assign_resource(struct pci_dev *dev, int i); |
@@ -1181,7 +1188,6 @@ int pci_msix_vec_count(struct pci_dev *dev); | |||
1181 | int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec); | 1188 | int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec); |
1182 | void pci_msix_shutdown(struct pci_dev *dev); | 1189 | void pci_msix_shutdown(struct pci_dev *dev); |
1183 | void pci_disable_msix(struct pci_dev *dev); | 1190 | void pci_disable_msix(struct pci_dev *dev); |
1184 | void msi_remove_pci_irq_vectors(struct pci_dev *dev); | ||
1185 | void pci_restore_msi_state(struct pci_dev *dev); | 1191 | void pci_restore_msi_state(struct pci_dev *dev); |
1186 | int pci_msi_enabled(void); | 1192 | int pci_msi_enabled(void); |
1187 | int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec); | 1193 | int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec); |
@@ -1212,7 +1218,6 @@ static inline int pci_enable_msix(struct pci_dev *dev, | |||
1212 | { return -ENOSYS; } | 1218 | { return -ENOSYS; } |
1213 | static inline void pci_msix_shutdown(struct pci_dev *dev) { } | 1219 | static inline void pci_msix_shutdown(struct pci_dev *dev) { } |
1214 | static inline void pci_disable_msix(struct pci_dev *dev) { } | 1220 | static inline void pci_disable_msix(struct pci_dev *dev) { } |
1215 | static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) { } | ||
1216 | static inline void pci_restore_msi_state(struct pci_dev *dev) { } | 1221 | static inline void pci_restore_msi_state(struct pci_dev *dev) { } |
1217 | static inline int pci_msi_enabled(void) { return 0; } | 1222 | static inline int pci_msi_enabled(void) { return 0; } |
1218 | static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec, | 1223 | static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec, |
@@ -1809,6 +1814,10 @@ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) | |||
1809 | } | 1814 | } |
1810 | #endif | 1815 | #endif |
1811 | 1816 | ||
1817 | int pci_for_each_dma_alias(struct pci_dev *pdev, | ||
1818 | int (*fn)(struct pci_dev *pdev, | ||
1819 | u16 alias, void *data), void *data); | ||
1820 | |||
1812 | /** | 1821 | /** |
1813 | * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device | 1822 | * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device |
1814 | * @pdev: the PCI device | 1823 | * @pdev: the PCI device |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 7fa31731c854..6ed0bb73a864 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -6,6 +6,8 @@ | |||
6 | * Do not add new entries to this file unless the definitions | 6 | * Do not add new entries to this file unless the definitions |
7 | * are shared between multiple drivers. | 7 | * are shared between multiple drivers. |
8 | */ | 8 | */ |
9 | #ifndef _LINUX_PCI_IDS_H | ||
10 | #define _LINUX_PCI_IDS_H | ||
9 | 11 | ||
10 | /* Device classes and subclasses */ | 12 | /* Device classes and subclasses */ |
11 | 13 | ||
@@ -2968,3 +2970,5 @@ | |||
2968 | #define PCI_DEVICE_ID_XEN_PLATFORM 0x0001 | 2970 | #define PCI_DEVICE_ID_XEN_PLATFORM 0x0001 |
2969 | 2971 | ||
2970 | #define PCI_VENDOR_ID_OCZ 0x1b85 | 2972 | #define PCI_VENDOR_ID_OCZ 0x1b85 |
2973 | |||
2974 | #endif /* _LINUX_PCI_IDS_H */ | ||
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index a5fc7d01aad6..cfd56046ecec 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h | |||
@@ -1,6 +1,40 @@ | |||
1 | /* | ||
2 | * linux/percpu-defs.h - basic definitions for percpu areas | ||
3 | * | ||
4 | * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER. | ||
5 | * | ||
6 | * This file is separate from linux/percpu.h to avoid cyclic inclusion | ||
7 | * dependency from arch header files. Only to be included from | ||
8 | * asm/percpu.h. | ||
9 | * | ||
10 | * This file includes macros necessary to declare percpu sections and | ||
11 | * variables, and definitions of percpu accessors and operations. It | ||
12 | * should provide enough percpu features to arch header files even when | ||
13 | * they can only include asm/percpu.h to avoid cyclic inclusion dependency. | ||
14 | */ | ||
15 | |||
1 | #ifndef _LINUX_PERCPU_DEFS_H | 16 | #ifndef _LINUX_PERCPU_DEFS_H |
2 | #define _LINUX_PERCPU_DEFS_H | 17 | #define _LINUX_PERCPU_DEFS_H |
3 | 18 | ||
19 | #ifdef CONFIG_SMP | ||
20 | |||
21 | #ifdef MODULE | ||
22 | #define PER_CPU_SHARED_ALIGNED_SECTION "" | ||
23 | #define PER_CPU_ALIGNED_SECTION "" | ||
24 | #else | ||
25 | #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" | ||
26 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" | ||
27 | #endif | ||
28 | #define PER_CPU_FIRST_SECTION "..first" | ||
29 | |||
30 | #else | ||
31 | |||
32 | #define PER_CPU_SHARED_ALIGNED_SECTION "" | ||
33 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" | ||
34 | #define PER_CPU_FIRST_SECTION "" | ||
35 | |||
36 | #endif | ||
37 | |||
4 | /* | 38 | /* |
5 | * Base implementations of per-CPU variable declarations and definitions, where | 39 | * Base implementations of per-CPU variable declarations and definitions, where |
6 | * the section in which the variable is to be placed is provided by the | 40 | * the section in which the variable is to be placed is provided by the |
@@ -19,19 +53,6 @@ | |||
19 | __attribute__((section(".discard"), unused)) | 53 | __attribute__((section(".discard"), unused)) |
20 | 54 | ||
21 | /* | 55 | /* |
22 | * Macro which verifies @ptr is a percpu pointer without evaluating | ||
23 | * @ptr. This is to be used in percpu accessors to verify that the | ||
24 | * input parameter is a percpu pointer. | ||
25 | * | ||
26 | * + 0 is required in order to convert the pointer type from a | ||
27 | * potential array type to a pointer to a single item of the array. | ||
28 | */ | ||
29 | #define __verify_pcpu_ptr(ptr) do { \ | ||
30 | const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ | ||
31 | (void)__vpp_verify; \ | ||
32 | } while (0) | ||
33 | |||
34 | /* | ||
35 | * s390 and alpha modules require percpu variables to be defined as | 56 | * s390 and alpha modules require percpu variables to be defined as |
36 | * weak to force the compiler to generate GOT based external | 57 | * weak to force the compiler to generate GOT based external |
37 | * references for them. This is necessary because percpu sections | 58 | * references for them. This is necessary because percpu sections |
@@ -146,10 +167,10 @@ | |||
146 | * Declaration/definition used for per-CPU variables that must be read mostly. | 167 | * Declaration/definition used for per-CPU variables that must be read mostly. |
147 | */ | 168 | */ |
148 | #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ | 169 | #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ |
149 | DECLARE_PER_CPU_SECTION(type, name, "..readmostly") | 170 | DECLARE_PER_CPU_SECTION(type, name, "..read_mostly") |
150 | 171 | ||
151 | #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ | 172 | #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ |
152 | DEFINE_PER_CPU_SECTION(type, name, "..readmostly") | 173 | DEFINE_PER_CPU_SECTION(type, name, "..read_mostly") |
153 | 174 | ||
154 | /* | 175 | /* |
155 | * Intermodule exports for per-CPU variables. sparse forgets about | 176 | * Intermodule exports for per-CPU variables. sparse forgets about |
@@ -164,4 +185,337 @@ | |||
164 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) | 185 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) |
165 | #endif | 186 | #endif |
166 | 187 | ||
188 | /* | ||
189 | * Accessors and operations. | ||
190 | */ | ||
191 | #ifndef __ASSEMBLY__ | ||
192 | |||
193 | /* | ||
194 | * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating | ||
195 | * @ptr and is invoked once before a percpu area is accessed by all | ||
196 | * accessors and operations. This is performed in the generic part of | ||
197 | * percpu and arch overrides don't need to worry about it; however, if an | ||
198 | * arch wants to implement an arch-specific percpu accessor or operation, | ||
199 | * it may use __verify_pcpu_ptr() to verify the parameters. | ||
200 | * | ||
201 | * + 0 is required in order to convert the pointer type from a | ||
202 | * potential array type to a pointer to a single item of the array. | ||
203 | */ | ||
204 | #define __verify_pcpu_ptr(ptr) \ | ||
205 | do { \ | ||
206 | const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ | ||
207 | (void)__vpp_verify; \ | ||
208 | } while (0) | ||
209 | |||
210 | #ifdef CONFIG_SMP | ||
211 | |||
212 | /* | ||
213 | * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE() | ||
214 | * to prevent the compiler from making incorrect assumptions about the | ||
215 | * pointer value. The weird cast keeps both GCC and sparse happy. | ||
216 | */ | ||
217 | #define SHIFT_PERCPU_PTR(__p, __offset) \ | ||
218 | RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)) | ||
219 | |||
220 | #define per_cpu_ptr(ptr, cpu) \ | ||
221 | ({ \ | ||
222 | __verify_pcpu_ptr(ptr); \ | ||
223 | SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \ | ||
224 | }) | ||
225 | |||
226 | #define raw_cpu_ptr(ptr) \ | ||
227 | ({ \ | ||
228 | __verify_pcpu_ptr(ptr); \ | ||
229 | arch_raw_cpu_ptr(ptr); \ | ||
230 | }) | ||
231 | |||
232 | #ifdef CONFIG_DEBUG_PREEMPT | ||
233 | #define this_cpu_ptr(ptr) \ | ||
234 | ({ \ | ||
235 | __verify_pcpu_ptr(ptr); \ | ||
236 | SHIFT_PERCPU_PTR(ptr, my_cpu_offset); \ | ||
237 | }) | ||
238 | #else | ||
239 | #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) | ||
240 | #endif | ||
241 | |||
242 | #else /* CONFIG_SMP */ | ||
243 | |||
244 | #define VERIFY_PERCPU_PTR(__p) \ | ||
245 | ({ \ | ||
246 | __verify_pcpu_ptr(__p); \ | ||
247 | (typeof(*(__p)) __kernel __force *)(__p); \ | ||
248 | }) | ||
249 | |||
250 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) | ||
251 | #define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) | ||
252 | #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) | ||
253 | |||
254 | #endif /* CONFIG_SMP */ | ||
255 | |||
256 | #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) | ||
257 | #define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) | ||
258 | #define __get_cpu_var(var) (*this_cpu_ptr(&(var))) | ||
259 | |||
260 | /* keep until we have removed all uses of __this_cpu_ptr */ | ||
261 | #define __this_cpu_ptr(ptr) raw_cpu_ptr(ptr) | ||
262 | |||
263 | /* | ||
264 | * Must be an lvalue. Since @var must be a simple identifier, | ||
265 | * we force a syntax error here if it isn't. | ||
266 | */ | ||
267 | #define get_cpu_var(var) \ | ||
268 | (*({ \ | ||
269 | preempt_disable(); \ | ||
270 | this_cpu_ptr(&var); \ | ||
271 | })) | ||
272 | |||
273 | /* | ||
274 | * The weird & is necessary because sparse considers (void)(var) to be | ||
275 | * a direct dereference of percpu variable (var). | ||
276 | */ | ||
277 | #define put_cpu_var(var) \ | ||
278 | do { \ | ||
279 | (void)&(var); \ | ||
280 | preempt_enable(); \ | ||
281 | } while (0) | ||
282 | |||
283 | #define get_cpu_ptr(var) \ | ||
284 | ({ \ | ||
285 | preempt_disable(); \ | ||
286 | this_cpu_ptr(var); \ | ||
287 | }) | ||
288 | |||
289 | #define put_cpu_ptr(var) \ | ||
290 | do { \ | ||
291 | (void)(var); \ | ||
292 | preempt_enable(); \ | ||
293 | } while (0) | ||
294 | |||
295 | /* | ||
296 | * Branching function to split up a function into a set of functions that | ||
297 | * are called for different scalar sizes of the objects handled. | ||
298 | */ | ||
299 | |||
300 | extern void __bad_size_call_parameter(void); | ||
301 | |||
302 | #ifdef CONFIG_DEBUG_PREEMPT | ||
303 | extern void __this_cpu_preempt_check(const char *op); | ||
304 | #else | ||
305 | static inline void __this_cpu_preempt_check(const char *op) { } | ||
306 | #endif | ||
307 | |||
308 | #define __pcpu_size_call_return(stem, variable) \ | ||
309 | ({ \ | ||
310 | typeof(variable) pscr_ret__; \ | ||
311 | __verify_pcpu_ptr(&(variable)); \ | ||
312 | switch(sizeof(variable)) { \ | ||
313 | case 1: pscr_ret__ = stem##1(variable); break; \ | ||
314 | case 2: pscr_ret__ = stem##2(variable); break; \ | ||
315 | case 4: pscr_ret__ = stem##4(variable); break; \ | ||
316 | case 8: pscr_ret__ = stem##8(variable); break; \ | ||
317 | default: \ | ||
318 | __bad_size_call_parameter(); break; \ | ||
319 | } \ | ||
320 | pscr_ret__; \ | ||
321 | }) | ||
322 | |||
323 | #define __pcpu_size_call_return2(stem, variable, ...) \ | ||
324 | ({ \ | ||
325 | typeof(variable) pscr2_ret__; \ | ||
326 | __verify_pcpu_ptr(&(variable)); \ | ||
327 | switch(sizeof(variable)) { \ | ||
328 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ | ||
329 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ | ||
330 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ | ||
331 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ | ||
332 | default: \ | ||
333 | __bad_size_call_parameter(); break; \ | ||
334 | } \ | ||
335 | pscr2_ret__; \ | ||
336 | }) | ||
337 | |||
338 | /* | ||
339 | * Special handling for cmpxchg_double. cmpxchg_double is passed two | ||
340 | * percpu variables. The first has to be aligned to a double word | ||
341 | * boundary and the second has to follow directly thereafter. | ||
342 | * We enforce this on all architectures even if they don't support | ||
343 | * a double cmpxchg instruction, since it's a cheap requirement, and it | ||
344 | * avoids breaking the requirement for architectures with the instruction. | ||
345 | */ | ||
346 | #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ | ||
347 | ({ \ | ||
348 | bool pdcrb_ret__; \ | ||
349 | __verify_pcpu_ptr(&(pcp1)); \ | ||
350 | BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ | ||
351 | VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \ | ||
352 | VM_BUG_ON((unsigned long)(&(pcp2)) != \ | ||
353 | (unsigned long)(&(pcp1)) + sizeof(pcp1)); \ | ||
354 | switch(sizeof(pcp1)) { \ | ||
355 | case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ | ||
356 | case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ | ||
357 | case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ | ||
358 | case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ | ||
359 | default: \ | ||
360 | __bad_size_call_parameter(); break; \ | ||
361 | } \ | ||
362 | pdcrb_ret__; \ | ||
363 | }) | ||
364 | |||
365 | #define __pcpu_size_call(stem, variable, ...) \ | ||
366 | do { \ | ||
367 | __verify_pcpu_ptr(&(variable)); \ | ||
368 | switch(sizeof(variable)) { \ | ||
369 | case 1: stem##1(variable, __VA_ARGS__);break; \ | ||
370 | case 2: stem##2(variable, __VA_ARGS__);break; \ | ||
371 | case 4: stem##4(variable, __VA_ARGS__);break; \ | ||
372 | case 8: stem##8(variable, __VA_ARGS__);break; \ | ||
373 | default: \ | ||
374 | __bad_size_call_parameter();break; \ | ||
375 | } \ | ||
376 | } while (0) | ||
377 | |||
378 | /* | ||
379 | * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com> | ||
380 | * | ||
381 | * Optimized manipulation for memory allocated through the per cpu | ||
382 | * allocator or for addresses of per cpu variables. | ||
383 | * | ||
384 | * These operation guarantee exclusivity of access for other operations | ||
385 | * on the *same* processor. The assumption is that per cpu data is only | ||
386 | * accessed by a single processor instance (the current one). | ||
387 | * | ||
388 | * The arch code can provide optimized implementation by defining macros | ||
389 | * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per | ||
390 | * cpu atomic operations for 2 byte sized RMW actions. If arch code does | ||
391 | * not provide operations for a scalar size then the fallback in the | ||
392 | * generic code will be used. | ||
393 | * | ||
394 | * cmpxchg_double replaces two adjacent scalars at once. The first two | ||
395 | * parameters are per cpu variables which have to be of the same size. A | ||
396 | * truth value is returned to indicate success or failure (since a double | ||
397 | * register result is difficult to handle). There is very limited hardware | ||
398 | * support for these operations, so only certain sizes may work. | ||
399 | */ | ||
400 | |||
401 | /* | ||
402 | * Operations for contexts where we do not want to do any checks for | ||
403 | * preemptions. Unless strictly necessary, always use [__]this_cpu_*() | ||
404 | * instead. | ||
405 | * | ||
406 | * If there is no other protection through preempt disable and/or disabling | ||
407 | * interupts then one of these RMW operations can show unexpected behavior | ||
408 | * because the execution thread was rescheduled on another processor or an | ||
409 | * interrupt occurred and the same percpu variable was modified from the | ||
410 | * interrupt context. | ||
411 | */ | ||
412 | #define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, pcp) | ||
413 | #define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, pcp, val) | ||
414 | #define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, pcp, val) | ||
415 | #define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, pcp, val) | ||
416 | #define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, pcp, val) | ||
417 | #define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) | ||
418 | #define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) | ||
419 | #define raw_cpu_cmpxchg(pcp, oval, nval) \ | ||
420 | __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) | ||
421 | #define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
422 | __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
423 | |||
424 | #define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val)) | ||
425 | #define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1) | ||
426 | #define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1) | ||
427 | #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) | ||
428 | #define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) | ||
429 | #define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) | ||
430 | |||
431 | /* | ||
432 | * Operations for contexts that are safe from preemption/interrupts. These | ||
433 | * operations verify that preemption is disabled. | ||
434 | */ | ||
435 | #define __this_cpu_read(pcp) \ | ||
436 | ({ \ | ||
437 | __this_cpu_preempt_check("read"); \ | ||
438 | raw_cpu_read(pcp); \ | ||
439 | }) | ||
440 | |||
441 | #define __this_cpu_write(pcp, val) \ | ||
442 | ({ \ | ||
443 | __this_cpu_preempt_check("write"); \ | ||
444 | raw_cpu_write(pcp, val); \ | ||
445 | }) | ||
446 | |||
447 | #define __this_cpu_add(pcp, val) \ | ||
448 | ({ \ | ||
449 | __this_cpu_preempt_check("add"); \ | ||
450 | raw_cpu_add(pcp, val); \ | ||
451 | }) | ||
452 | |||
453 | #define __this_cpu_and(pcp, val) \ | ||
454 | ({ \ | ||
455 | __this_cpu_preempt_check("and"); \ | ||
456 | raw_cpu_and(pcp, val); \ | ||
457 | }) | ||
458 | |||
459 | #define __this_cpu_or(pcp, val) \ | ||
460 | ({ \ | ||
461 | __this_cpu_preempt_check("or"); \ | ||
462 | raw_cpu_or(pcp, val); \ | ||
463 | }) | ||
464 | |||
465 | #define __this_cpu_add_return(pcp, val) \ | ||
466 | ({ \ | ||
467 | __this_cpu_preempt_check("add_return"); \ | ||
468 | raw_cpu_add_return(pcp, val); \ | ||
469 | }) | ||
470 | |||
471 | #define __this_cpu_xchg(pcp, nval) \ | ||
472 | ({ \ | ||
473 | __this_cpu_preempt_check("xchg"); \ | ||
474 | raw_cpu_xchg(pcp, nval); \ | ||
475 | }) | ||
476 | |||
477 | #define __this_cpu_cmpxchg(pcp, oval, nval) \ | ||
478 | ({ \ | ||
479 | __this_cpu_preempt_check("cmpxchg"); \ | ||
480 | raw_cpu_cmpxchg(pcp, oval, nval); \ | ||
481 | }) | ||
482 | |||
483 | #define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
484 | ({ __this_cpu_preempt_check("cmpxchg_double"); \ | ||
485 | raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \ | ||
486 | }) | ||
487 | |||
488 | #define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) | ||
489 | #define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) | ||
490 | #define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) | ||
491 | #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) | ||
492 | #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) | ||
493 | #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) | ||
494 | |||
495 | /* | ||
496 | * Operations with implied preemption protection. These operations can be | ||
497 | * used without worrying about preemption. Note that interrupts may still | ||
498 | * occur while an operation is in progress and if the interrupt modifies | ||
499 | * the variable too then RMW actions may not be reliable. | ||
500 | */ | ||
501 | #define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) | ||
502 | #define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) | ||
503 | #define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, pcp, val) | ||
504 | #define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, pcp, val) | ||
505 | #define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, pcp, val) | ||
506 | #define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | ||
507 | #define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) | ||
508 | #define this_cpu_cmpxchg(pcp, oval, nval) \ | ||
509 | __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) | ||
510 | #define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
511 | __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
512 | |||
513 | #define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val)) | ||
514 | #define this_cpu_inc(pcp) this_cpu_add(pcp, 1) | ||
515 | #define this_cpu_dec(pcp) this_cpu_sub(pcp, 1) | ||
516 | #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) | ||
517 | #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) | ||
518 | #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) | ||
519 | |||
520 | #endif /* __ASSEMBLY__ */ | ||
167 | #endif /* _LINUX_PERCPU_DEFS_H */ | 521 | #endif /* _LINUX_PERCPU_DEFS_H */ |
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 0afb48fd449d..3dfbf237cd8f 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
@@ -57,11 +57,9 @@ struct percpu_ref { | |||
57 | atomic_t count; | 57 | atomic_t count; |
58 | /* | 58 | /* |
59 | * The low bit of the pointer indicates whether the ref is in percpu | 59 | * The low bit of the pointer indicates whether the ref is in percpu |
60 | * mode; if set, then get/put will manipulate the atomic_t (this is a | 60 | * mode; if set, then get/put will manipulate the atomic_t. |
61 | * hack because we need to keep the pointer around for | ||
62 | * percpu_ref_kill_rcu()) | ||
63 | */ | 61 | */ |
64 | unsigned __percpu *pcpu_count; | 62 | unsigned long pcpu_count_ptr; |
65 | percpu_ref_func_t *release; | 63 | percpu_ref_func_t *release; |
66 | percpu_ref_func_t *confirm_kill; | 64 | percpu_ref_func_t *confirm_kill; |
67 | struct rcu_head rcu; | 65 | struct rcu_head rcu; |
@@ -69,7 +67,8 @@ struct percpu_ref { | |||
69 | 67 | ||
70 | int __must_check percpu_ref_init(struct percpu_ref *ref, | 68 | int __must_check percpu_ref_init(struct percpu_ref *ref, |
71 | percpu_ref_func_t *release); | 69 | percpu_ref_func_t *release); |
72 | void percpu_ref_cancel_init(struct percpu_ref *ref); | 70 | void percpu_ref_reinit(struct percpu_ref *ref); |
71 | void percpu_ref_exit(struct percpu_ref *ref); | ||
73 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | 72 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
74 | percpu_ref_func_t *confirm_kill); | 73 | percpu_ref_func_t *confirm_kill); |
75 | 74 | ||
@@ -88,12 +87,28 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) | |||
88 | return percpu_ref_kill_and_confirm(ref, NULL); | 87 | return percpu_ref_kill_and_confirm(ref, NULL); |
89 | } | 88 | } |
90 | 89 | ||
91 | #define PCPU_STATUS_BITS 2 | ||
92 | #define PCPU_STATUS_MASK ((1 << PCPU_STATUS_BITS) - 1) | ||
93 | #define PCPU_REF_PTR 0 | ||
94 | #define PCPU_REF_DEAD 1 | 90 | #define PCPU_REF_DEAD 1 |
95 | 91 | ||
96 | #define REF_STATUS(count) (((unsigned long) count) & PCPU_STATUS_MASK) | 92 | /* |
93 | * Internal helper. Don't use outside percpu-refcount proper. The | ||
94 | * function doesn't return the pointer and let the caller test it for NULL | ||
95 | * because doing so forces the compiler to generate two conditional | ||
96 | * branches as it can't assume that @ref->pcpu_count is not NULL. | ||
97 | */ | ||
98 | static inline bool __pcpu_ref_alive(struct percpu_ref *ref, | ||
99 | unsigned __percpu **pcpu_countp) | ||
100 | { | ||
101 | unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr); | ||
102 | |||
103 | /* paired with smp_store_release() in percpu_ref_reinit() */ | ||
104 | smp_read_barrier_depends(); | ||
105 | |||
106 | if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) | ||
107 | return false; | ||
108 | |||
109 | *pcpu_countp = (unsigned __percpu *)pcpu_ptr; | ||
110 | return true; | ||
111 | } | ||
97 | 112 | ||
98 | /** | 113 | /** |
99 | * percpu_ref_get - increment a percpu refcount | 114 | * percpu_ref_get - increment a percpu refcount |
@@ -107,9 +122,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref) | |||
107 | 122 | ||
108 | rcu_read_lock_sched(); | 123 | rcu_read_lock_sched(); |
109 | 124 | ||
110 | pcpu_count = ACCESS_ONCE(ref->pcpu_count); | 125 | if (__pcpu_ref_alive(ref, &pcpu_count)) |
111 | |||
112 | if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) | ||
113 | this_cpu_inc(*pcpu_count); | 126 | this_cpu_inc(*pcpu_count); |
114 | else | 127 | else |
115 | atomic_inc(&ref->count); | 128 | atomic_inc(&ref->count); |
@@ -121,6 +134,34 @@ static inline void percpu_ref_get(struct percpu_ref *ref) | |||
121 | * percpu_ref_tryget - try to increment a percpu refcount | 134 | * percpu_ref_tryget - try to increment a percpu refcount |
122 | * @ref: percpu_ref to try-get | 135 | * @ref: percpu_ref to try-get |
123 | * | 136 | * |
137 | * Increment a percpu refcount unless its count already reached zero. | ||
138 | * Returns %true on success; %false on failure. | ||
139 | * | ||
140 | * The caller is responsible for ensuring that @ref stays accessible. | ||
141 | */ | ||
142 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) | ||
143 | { | ||
144 | unsigned __percpu *pcpu_count; | ||
145 | int ret = false; | ||
146 | |||
147 | rcu_read_lock_sched(); | ||
148 | |||
149 | if (__pcpu_ref_alive(ref, &pcpu_count)) { | ||
150 | this_cpu_inc(*pcpu_count); | ||
151 | ret = true; | ||
152 | } else { | ||
153 | ret = atomic_inc_not_zero(&ref->count); | ||
154 | } | ||
155 | |||
156 | rcu_read_unlock_sched(); | ||
157 | |||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * percpu_ref_tryget_live - try to increment a live percpu refcount | ||
163 | * @ref: percpu_ref to try-get | ||
164 | * | ||
124 | * Increment a percpu refcount unless it has already been killed. Returns | 165 | * Increment a percpu refcount unless it has already been killed. Returns |
125 | * %true on success; %false on failure. | 166 | * %true on success; %false on failure. |
126 | * | 167 | * |
@@ -128,17 +169,17 @@ static inline void percpu_ref_get(struct percpu_ref *ref) | |||
128 | * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be | 169 | * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be |
129 | * used. After the confirm_kill callback is invoked, it's guaranteed that | 170 | * used. After the confirm_kill callback is invoked, it's guaranteed that |
130 | * no new reference will be given out by percpu_ref_tryget(). | 171 | * no new reference will be given out by percpu_ref_tryget(). |
172 | * | ||
173 | * The caller is responsible for ensuring that @ref stays accessible. | ||
131 | */ | 174 | */ |
132 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) | 175 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) |
133 | { | 176 | { |
134 | unsigned __percpu *pcpu_count; | 177 | unsigned __percpu *pcpu_count; |
135 | int ret = false; | 178 | int ret = false; |
136 | 179 | ||
137 | rcu_read_lock_sched(); | 180 | rcu_read_lock_sched(); |
138 | 181 | ||
139 | pcpu_count = ACCESS_ONCE(ref->pcpu_count); | 182 | if (__pcpu_ref_alive(ref, &pcpu_count)) { |
140 | |||
141 | if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { | ||
142 | this_cpu_inc(*pcpu_count); | 183 | this_cpu_inc(*pcpu_count); |
143 | ret = true; | 184 | ret = true; |
144 | } | 185 | } |
@@ -161,9 +202,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref) | |||
161 | 202 | ||
162 | rcu_read_lock_sched(); | 203 | rcu_read_lock_sched(); |
163 | 204 | ||
164 | pcpu_count = ACCESS_ONCE(ref->pcpu_count); | 205 | if (__pcpu_ref_alive(ref, &pcpu_count)) |
165 | |||
166 | if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) | ||
167 | this_cpu_dec(*pcpu_count); | 206 | this_cpu_dec(*pcpu_count); |
168 | else if (unlikely(atomic_dec_and_test(&ref->count))) | 207 | else if (unlikely(atomic_dec_and_test(&ref->count))) |
169 | ref->release(ref); | 208 | ref->release(ref); |
@@ -171,4 +210,19 @@ static inline void percpu_ref_put(struct percpu_ref *ref) | |||
171 | rcu_read_unlock_sched(); | 210 | rcu_read_unlock_sched(); |
172 | } | 211 | } |
173 | 212 | ||
213 | /** | ||
214 | * percpu_ref_is_zero - test whether a percpu refcount reached zero | ||
215 | * @ref: percpu_ref to test | ||
216 | * | ||
217 | * Returns %true if @ref reached zero. | ||
218 | */ | ||
219 | static inline bool percpu_ref_is_zero(struct percpu_ref *ref) | ||
220 | { | ||
221 | unsigned __percpu *pcpu_count; | ||
222 | |||
223 | if (__pcpu_ref_alive(ref, &pcpu_count)) | ||
224 | return false; | ||
225 | return !atomic_read(&ref->count); | ||
226 | } | ||
227 | |||
174 | #endif | 228 | #endif |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 495c6543a8f2..6f61b61b7996 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -23,32 +23,6 @@ | |||
23 | PERCPU_MODULE_RESERVE) | 23 | PERCPU_MODULE_RESERVE) |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | /* | ||
27 | * Must be an lvalue. Since @var must be a simple identifier, | ||
28 | * we force a syntax error here if it isn't. | ||
29 | */ | ||
30 | #define get_cpu_var(var) (*({ \ | ||
31 | preempt_disable(); \ | ||
32 | &__get_cpu_var(var); })) | ||
33 | |||
34 | /* | ||
35 | * The weird & is necessary because sparse considers (void)(var) to be | ||
36 | * a direct dereference of percpu variable (var). | ||
37 | */ | ||
38 | #define put_cpu_var(var) do { \ | ||
39 | (void)&(var); \ | ||
40 | preempt_enable(); \ | ||
41 | } while (0) | ||
42 | |||
43 | #define get_cpu_ptr(var) ({ \ | ||
44 | preempt_disable(); \ | ||
45 | this_cpu_ptr(var); }) | ||
46 | |||
47 | #define put_cpu_ptr(var) do { \ | ||
48 | (void)(var); \ | ||
49 | preempt_enable(); \ | ||
50 | } while (0) | ||
51 | |||
52 | /* minimum unit size, also is the maximum supported allocation size */ | 26 | /* minimum unit size, also is the maximum supported allocation size */ |
53 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) | 27 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) |
54 | 28 | ||
@@ -140,17 +114,6 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, | |||
140 | pcpu_fc_populate_pte_fn_t populate_pte_fn); | 114 | pcpu_fc_populate_pte_fn_t populate_pte_fn); |
141 | #endif | 115 | #endif |
142 | 116 | ||
143 | /* | ||
144 | * Use this to get to a cpu's version of the per-cpu object | ||
145 | * dynamically allocated. Non-atomic access to the current CPU's | ||
146 | * version should probably be combined with get_cpu()/put_cpu(). | ||
147 | */ | ||
148 | #ifdef CONFIG_SMP | ||
149 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) | ||
150 | #else | ||
151 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) | ||
152 | #endif | ||
153 | |||
154 | extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); | 117 | extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); |
155 | extern bool is_kernel_percpu_address(unsigned long addr); | 118 | extern bool is_kernel_percpu_address(unsigned long addr); |
156 | 119 | ||
@@ -166,640 +129,4 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | |||
166 | #define alloc_percpu(type) \ | 129 | #define alloc_percpu(type) \ |
167 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) | 130 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) |
168 | 131 | ||
169 | /* | ||
170 | * Branching function to split up a function into a set of functions that | ||
171 | * are called for different scalar sizes of the objects handled. | ||
172 | */ | ||
173 | |||
174 | extern void __bad_size_call_parameter(void); | ||
175 | |||
176 | #ifdef CONFIG_DEBUG_PREEMPT | ||
177 | extern void __this_cpu_preempt_check(const char *op); | ||
178 | #else | ||
179 | static inline void __this_cpu_preempt_check(const char *op) { } | ||
180 | #endif | ||
181 | |||
182 | #define __pcpu_size_call_return(stem, variable) \ | ||
183 | ({ typeof(variable) pscr_ret__; \ | ||
184 | __verify_pcpu_ptr(&(variable)); \ | ||
185 | switch(sizeof(variable)) { \ | ||
186 | case 1: pscr_ret__ = stem##1(variable);break; \ | ||
187 | case 2: pscr_ret__ = stem##2(variable);break; \ | ||
188 | case 4: pscr_ret__ = stem##4(variable);break; \ | ||
189 | case 8: pscr_ret__ = stem##8(variable);break; \ | ||
190 | default: \ | ||
191 | __bad_size_call_parameter();break; \ | ||
192 | } \ | ||
193 | pscr_ret__; \ | ||
194 | }) | ||
195 | |||
196 | #define __pcpu_size_call_return2(stem, variable, ...) \ | ||
197 | ({ \ | ||
198 | typeof(variable) pscr2_ret__; \ | ||
199 | __verify_pcpu_ptr(&(variable)); \ | ||
200 | switch(sizeof(variable)) { \ | ||
201 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ | ||
202 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ | ||
203 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ | ||
204 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ | ||
205 | default: \ | ||
206 | __bad_size_call_parameter(); break; \ | ||
207 | } \ | ||
208 | pscr2_ret__; \ | ||
209 | }) | ||
210 | |||
211 | /* | ||
212 | * Special handling for cmpxchg_double. cmpxchg_double is passed two | ||
213 | * percpu variables. The first has to be aligned to a double word | ||
214 | * boundary and the second has to follow directly thereafter. | ||
215 | * We enforce this on all architectures even if they don't support | ||
216 | * a double cmpxchg instruction, since it's a cheap requirement, and it | ||
217 | * avoids breaking the requirement for architectures with the instruction. | ||
218 | */ | ||
219 | #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ | ||
220 | ({ \ | ||
221 | bool pdcrb_ret__; \ | ||
222 | __verify_pcpu_ptr(&pcp1); \ | ||
223 | BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ | ||
224 | VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \ | ||
225 | VM_BUG_ON((unsigned long)(&pcp2) != \ | ||
226 | (unsigned long)(&pcp1) + sizeof(pcp1)); \ | ||
227 | switch(sizeof(pcp1)) { \ | ||
228 | case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ | ||
229 | case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ | ||
230 | case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ | ||
231 | case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ | ||
232 | default: \ | ||
233 | __bad_size_call_parameter(); break; \ | ||
234 | } \ | ||
235 | pdcrb_ret__; \ | ||
236 | }) | ||
237 | |||
238 | #define __pcpu_size_call(stem, variable, ...) \ | ||
239 | do { \ | ||
240 | __verify_pcpu_ptr(&(variable)); \ | ||
241 | switch(sizeof(variable)) { \ | ||
242 | case 1: stem##1(variable, __VA_ARGS__);break; \ | ||
243 | case 2: stem##2(variable, __VA_ARGS__);break; \ | ||
244 | case 4: stem##4(variable, __VA_ARGS__);break; \ | ||
245 | case 8: stem##8(variable, __VA_ARGS__);break; \ | ||
246 | default: \ | ||
247 | __bad_size_call_parameter();break; \ | ||
248 | } \ | ||
249 | } while (0) | ||
250 | |||
251 | /* | ||
252 | * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com> | ||
253 | * | ||
254 | * Optimized manipulation for memory allocated through the per cpu | ||
255 | * allocator or for addresses of per cpu variables. | ||
256 | * | ||
257 | * These operation guarantee exclusivity of access for other operations | ||
258 | * on the *same* processor. The assumption is that per cpu data is only | ||
259 | * accessed by a single processor instance (the current one). | ||
260 | * | ||
261 | * The first group is used for accesses that must be done in a | ||
262 | * preemption safe way since we know that the context is not preempt | ||
263 | * safe. Interrupts may occur. If the interrupt modifies the variable | ||
264 | * too then RMW actions will not be reliable. | ||
265 | * | ||
266 | * The arch code can provide optimized functions in two ways: | ||
267 | * | ||
268 | * 1. Override the function completely. F.e. define this_cpu_add(). | ||
269 | * The arch must then ensure that the various scalar format passed | ||
270 | * are handled correctly. | ||
271 | * | ||
272 | * 2. Provide functions for certain scalar sizes. F.e. provide | ||
273 | * this_cpu_add_2() to provide per cpu atomic operations for 2 byte | ||
274 | * sized RMW actions. If arch code does not provide operations for | ||
275 | * a scalar size then the fallback in the generic code will be | ||
276 | * used. | ||
277 | */ | ||
278 | |||
279 | #define _this_cpu_generic_read(pcp) \ | ||
280 | ({ typeof(pcp) ret__; \ | ||
281 | preempt_disable(); \ | ||
282 | ret__ = *this_cpu_ptr(&(pcp)); \ | ||
283 | preempt_enable(); \ | ||
284 | ret__; \ | ||
285 | }) | ||
286 | |||
287 | #ifndef this_cpu_read | ||
288 | # ifndef this_cpu_read_1 | ||
289 | # define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp) | ||
290 | # endif | ||
291 | # ifndef this_cpu_read_2 | ||
292 | # define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp) | ||
293 | # endif | ||
294 | # ifndef this_cpu_read_4 | ||
295 | # define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp) | ||
296 | # endif | ||
297 | # ifndef this_cpu_read_8 | ||
298 | # define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp) | ||
299 | # endif | ||
300 | # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) | ||
301 | #endif | ||
302 | |||
303 | #define _this_cpu_generic_to_op(pcp, val, op) \ | ||
304 | do { \ | ||
305 | unsigned long flags; \ | ||
306 | raw_local_irq_save(flags); \ | ||
307 | *raw_cpu_ptr(&(pcp)) op val; \ | ||
308 | raw_local_irq_restore(flags); \ | ||
309 | } while (0) | ||
310 | |||
311 | #ifndef this_cpu_write | ||
312 | # ifndef this_cpu_write_1 | ||
313 | # define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | ||
314 | # endif | ||
315 | # ifndef this_cpu_write_2 | ||
316 | # define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | ||
317 | # endif | ||
318 | # ifndef this_cpu_write_4 | ||
319 | # define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | ||
320 | # endif | ||
321 | # ifndef this_cpu_write_8 | ||
322 | # define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | ||
323 | # endif | ||
324 | # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) | ||
325 | #endif | ||
326 | |||
327 | #ifndef this_cpu_add | ||
328 | # ifndef this_cpu_add_1 | ||
329 | # define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | ||
330 | # endif | ||
331 | # ifndef this_cpu_add_2 | ||
332 | # define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | ||
333 | # endif | ||
334 | # ifndef this_cpu_add_4 | ||
335 | # define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | ||
336 | # endif | ||
337 | # ifndef this_cpu_add_8 | ||
338 | # define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | ||
339 | # endif | ||
340 | # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) | ||
341 | #endif | ||
342 | |||
343 | #ifndef this_cpu_sub | ||
344 | # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val)) | ||
345 | #endif | ||
346 | |||
347 | #ifndef this_cpu_inc | ||
348 | # define this_cpu_inc(pcp) this_cpu_add((pcp), 1) | ||
349 | #endif | ||
350 | |||
351 | #ifndef this_cpu_dec | ||
352 | # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) | ||
353 | #endif | ||
354 | |||
355 | #ifndef this_cpu_and | ||
356 | # ifndef this_cpu_and_1 | ||
357 | # define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | ||
358 | # endif | ||
359 | # ifndef this_cpu_and_2 | ||
360 | # define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | ||
361 | # endif | ||
362 | # ifndef this_cpu_and_4 | ||
363 | # define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | ||
364 | # endif | ||
365 | # ifndef this_cpu_and_8 | ||
366 | # define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | ||
367 | # endif | ||
368 | # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) | ||
369 | #endif | ||
370 | |||
371 | #ifndef this_cpu_or | ||
372 | # ifndef this_cpu_or_1 | ||
373 | # define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | ||
374 | # endif | ||
375 | # ifndef this_cpu_or_2 | ||
376 | # define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | ||
377 | # endif | ||
378 | # ifndef this_cpu_or_4 | ||
379 | # define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | ||
380 | # endif | ||
381 | # ifndef this_cpu_or_8 | ||
382 | # define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | ||
383 | # endif | ||
384 | # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) | ||
385 | #endif | ||
386 | |||
387 | #define _this_cpu_generic_add_return(pcp, val) \ | ||
388 | ({ \ | ||
389 | typeof(pcp) ret__; \ | ||
390 | unsigned long flags; \ | ||
391 | raw_local_irq_save(flags); \ | ||
392 | raw_cpu_add(pcp, val); \ | ||
393 | ret__ = raw_cpu_read(pcp); \ | ||
394 | raw_local_irq_restore(flags); \ | ||
395 | ret__; \ | ||
396 | }) | ||
397 | |||
398 | #ifndef this_cpu_add_return | ||
399 | # ifndef this_cpu_add_return_1 | ||
400 | # define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val) | ||
401 | # endif | ||
402 | # ifndef this_cpu_add_return_2 | ||
403 | # define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val) | ||
404 | # endif | ||
405 | # ifndef this_cpu_add_return_4 | ||
406 | # define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val) | ||
407 | # endif | ||
408 | # ifndef this_cpu_add_return_8 | ||
409 | # define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val) | ||
410 | # endif | ||
411 | # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | ||
412 | #endif | ||
413 | |||
414 | #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) | ||
415 | #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) | ||
416 | #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) | ||
417 | |||
418 | #define _this_cpu_generic_xchg(pcp, nval) \ | ||
419 | ({ typeof(pcp) ret__; \ | ||
420 | unsigned long flags; \ | ||
421 | raw_local_irq_save(flags); \ | ||
422 | ret__ = raw_cpu_read(pcp); \ | ||
423 | raw_cpu_write(pcp, nval); \ | ||
424 | raw_local_irq_restore(flags); \ | ||
425 | ret__; \ | ||
426 | }) | ||
427 | |||
428 | #ifndef this_cpu_xchg | ||
429 | # ifndef this_cpu_xchg_1 | ||
430 | # define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | ||
431 | # endif | ||
432 | # ifndef this_cpu_xchg_2 | ||
433 | # define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | ||
434 | # endif | ||
435 | # ifndef this_cpu_xchg_4 | ||
436 | # define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | ||
437 | # endif | ||
438 | # ifndef this_cpu_xchg_8 | ||
439 | # define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | ||
440 | # endif | ||
441 | # define this_cpu_xchg(pcp, nval) \ | ||
442 | __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval) | ||
443 | #endif | ||
444 | |||
445 | #define _this_cpu_generic_cmpxchg(pcp, oval, nval) \ | ||
446 | ({ \ | ||
447 | typeof(pcp) ret__; \ | ||
448 | unsigned long flags; \ | ||
449 | raw_local_irq_save(flags); \ | ||
450 | ret__ = raw_cpu_read(pcp); \ | ||
451 | if (ret__ == (oval)) \ | ||
452 | raw_cpu_write(pcp, nval); \ | ||
453 | raw_local_irq_restore(flags); \ | ||
454 | ret__; \ | ||
455 | }) | ||
456 | |||
457 | #ifndef this_cpu_cmpxchg | ||
458 | # ifndef this_cpu_cmpxchg_1 | ||
459 | # define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
460 | # endif | ||
461 | # ifndef this_cpu_cmpxchg_2 | ||
462 | # define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
463 | # endif | ||
464 | # ifndef this_cpu_cmpxchg_4 | ||
465 | # define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
466 | # endif | ||
467 | # ifndef this_cpu_cmpxchg_8 | ||
468 | # define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
469 | # endif | ||
470 | # define this_cpu_cmpxchg(pcp, oval, nval) \ | ||
471 | __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) | ||
472 | #endif | ||
473 | |||
474 | /* | ||
475 | * cmpxchg_double replaces two adjacent scalars at once. The first | ||
476 | * two parameters are per cpu variables which have to be of the same | ||
477 | * size. A truth value is returned to indicate success or failure | ||
478 | * (since a double register result is difficult to handle). There is | ||
479 | * very limited hardware support for these operations, so only certain | ||
480 | * sizes may work. | ||
481 | */ | ||
482 | #define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
483 | ({ \ | ||
484 | int ret__; \ | ||
485 | unsigned long flags; \ | ||
486 | raw_local_irq_save(flags); \ | ||
487 | ret__ = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \ | ||
488 | oval1, oval2, nval1, nval2); \ | ||
489 | raw_local_irq_restore(flags); \ | ||
490 | ret__; \ | ||
491 | }) | ||
492 | |||
493 | #ifndef this_cpu_cmpxchg_double | ||
494 | # ifndef this_cpu_cmpxchg_double_1 | ||
495 | # define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
496 | _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
497 | # endif | ||
498 | # ifndef this_cpu_cmpxchg_double_2 | ||
499 | # define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
500 | _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
501 | # endif | ||
502 | # ifndef this_cpu_cmpxchg_double_4 | ||
503 | # define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
504 | _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
505 | # endif | ||
506 | # ifndef this_cpu_cmpxchg_double_8 | ||
507 | # define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
508 | _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
509 | # endif | ||
510 | # define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
511 | __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) | ||
512 | #endif | ||
513 | |||
514 | /* | ||
515 | * Generic percpu operations for contexts where we do not want to do | ||
516 | * any checks for preemptiosn. | ||
517 | * | ||
518 | * If there is no other protection through preempt disable and/or | ||
519 | * disabling interupts then one of these RMW operations can show unexpected | ||
520 | * behavior because the execution thread was rescheduled on another processor | ||
521 | * or an interrupt occurred and the same percpu variable was modified from | ||
522 | * the interrupt context. | ||
523 | */ | ||
524 | #ifndef raw_cpu_read | ||
525 | # ifndef raw_cpu_read_1 | ||
526 | # define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp))) | ||
527 | # endif | ||
528 | # ifndef raw_cpu_read_2 | ||
529 | # define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp))) | ||
530 | # endif | ||
531 | # ifndef raw_cpu_read_4 | ||
532 | # define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp))) | ||
533 | # endif | ||
534 | # ifndef raw_cpu_read_8 | ||
535 | # define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp))) | ||
536 | # endif | ||
537 | # define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp)) | ||
538 | #endif | ||
539 | |||
540 | #define raw_cpu_generic_to_op(pcp, val, op) \ | ||
541 | do { \ | ||
542 | *raw_cpu_ptr(&(pcp)) op val; \ | ||
543 | } while (0) | ||
544 | |||
545 | |||
546 | #ifndef raw_cpu_write | ||
547 | # ifndef raw_cpu_write_1 | ||
548 | # define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), =) | ||
549 | # endif | ||
550 | # ifndef raw_cpu_write_2 | ||
551 | # define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), =) | ||
552 | # endif | ||
553 | # ifndef raw_cpu_write_4 | ||
554 | # define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), =) | ||
555 | # endif | ||
556 | # ifndef raw_cpu_write_8 | ||
557 | # define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), =) | ||
558 | # endif | ||
559 | # define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val)) | ||
560 | #endif | ||
561 | |||
562 | #ifndef raw_cpu_add | ||
563 | # ifndef raw_cpu_add_1 | ||
564 | # define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=) | ||
565 | # endif | ||
566 | # ifndef raw_cpu_add_2 | ||
567 | # define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=) | ||
568 | # endif | ||
569 | # ifndef raw_cpu_add_4 | ||
570 | # define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=) | ||
571 | # endif | ||
572 | # ifndef raw_cpu_add_8 | ||
573 | # define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=) | ||
574 | # endif | ||
575 | # define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val)) | ||
576 | #endif | ||
577 | |||
578 | #ifndef raw_cpu_sub | ||
579 | # define raw_cpu_sub(pcp, val) raw_cpu_add((pcp), -(val)) | ||
580 | #endif | ||
581 | |||
582 | #ifndef raw_cpu_inc | ||
583 | # define raw_cpu_inc(pcp) raw_cpu_add((pcp), 1) | ||
584 | #endif | ||
585 | |||
586 | #ifndef raw_cpu_dec | ||
587 | # define raw_cpu_dec(pcp) raw_cpu_sub((pcp), 1) | ||
588 | #endif | ||
589 | |||
590 | #ifndef raw_cpu_and | ||
591 | # ifndef raw_cpu_and_1 | ||
592 | # define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=) | ||
593 | # endif | ||
594 | # ifndef raw_cpu_and_2 | ||
595 | # define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=) | ||
596 | # endif | ||
597 | # ifndef raw_cpu_and_4 | ||
598 | # define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=) | ||
599 | # endif | ||
600 | # ifndef raw_cpu_and_8 | ||
601 | # define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=) | ||
602 | # endif | ||
603 | # define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val)) | ||
604 | #endif | ||
605 | |||
606 | #ifndef raw_cpu_or | ||
607 | # ifndef raw_cpu_or_1 | ||
608 | # define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=) | ||
609 | # endif | ||
610 | # ifndef raw_cpu_or_2 | ||
611 | # define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=) | ||
612 | # endif | ||
613 | # ifndef raw_cpu_or_4 | ||
614 | # define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=) | ||
615 | # endif | ||
616 | # ifndef raw_cpu_or_8 | ||
617 | # define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=) | ||
618 | # endif | ||
619 | # define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val)) | ||
620 | #endif | ||
621 | |||
622 | #define raw_cpu_generic_add_return(pcp, val) \ | ||
623 | ({ \ | ||
624 | raw_cpu_add(pcp, val); \ | ||
625 | raw_cpu_read(pcp); \ | ||
626 | }) | ||
627 | |||
628 | #ifndef raw_cpu_add_return | ||
629 | # ifndef raw_cpu_add_return_1 | ||
630 | # define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val) | ||
631 | # endif | ||
632 | # ifndef raw_cpu_add_return_2 | ||
633 | # define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val) | ||
634 | # endif | ||
635 | # ifndef raw_cpu_add_return_4 | ||
636 | # define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val) | ||
637 | # endif | ||
638 | # ifndef raw_cpu_add_return_8 | ||
639 | # define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val) | ||
640 | # endif | ||
641 | # define raw_cpu_add_return(pcp, val) \ | ||
642 | __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) | ||
643 | #endif | ||
644 | |||
645 | #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) | ||
646 | #define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) | ||
647 | #define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) | ||
648 | |||
649 | #define raw_cpu_generic_xchg(pcp, nval) \ | ||
650 | ({ typeof(pcp) ret__; \ | ||
651 | ret__ = raw_cpu_read(pcp); \ | ||
652 | raw_cpu_write(pcp, nval); \ | ||
653 | ret__; \ | ||
654 | }) | ||
655 | |||
656 | #ifndef raw_cpu_xchg | ||
657 | # ifndef raw_cpu_xchg_1 | ||
658 | # define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval) | ||
659 | # endif | ||
660 | # ifndef raw_cpu_xchg_2 | ||
661 | # define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval) | ||
662 | # endif | ||
663 | # ifndef raw_cpu_xchg_4 | ||
664 | # define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval) | ||
665 | # endif | ||
666 | # ifndef raw_cpu_xchg_8 | ||
667 | # define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval) | ||
668 | # endif | ||
669 | # define raw_cpu_xchg(pcp, nval) \ | ||
670 | __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval) | ||
671 | #endif | ||
672 | |||
673 | #define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ | ||
674 | ({ \ | ||
675 | typeof(pcp) ret__; \ | ||
676 | ret__ = raw_cpu_read(pcp); \ | ||
677 | if (ret__ == (oval)) \ | ||
678 | raw_cpu_write(pcp, nval); \ | ||
679 | ret__; \ | ||
680 | }) | ||
681 | |||
682 | #ifndef raw_cpu_cmpxchg | ||
683 | # ifndef raw_cpu_cmpxchg_1 | ||
684 | # define raw_cpu_cmpxchg_1(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval) | ||
685 | # endif | ||
686 | # ifndef raw_cpu_cmpxchg_2 | ||
687 | # define raw_cpu_cmpxchg_2(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval) | ||
688 | # endif | ||
689 | # ifndef raw_cpu_cmpxchg_4 | ||
690 | # define raw_cpu_cmpxchg_4(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval) | ||
691 | # endif | ||
692 | # ifndef raw_cpu_cmpxchg_8 | ||
693 | # define raw_cpu_cmpxchg_8(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval) | ||
694 | # endif | ||
695 | # define raw_cpu_cmpxchg(pcp, oval, nval) \ | ||
696 | __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) | ||
697 | #endif | ||
698 | |||
699 | #define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
700 | ({ \ | ||
701 | int __ret = 0; \ | ||
702 | if (raw_cpu_read(pcp1) == (oval1) && \ | ||
703 | raw_cpu_read(pcp2) == (oval2)) { \ | ||
704 | raw_cpu_write(pcp1, (nval1)); \ | ||
705 | raw_cpu_write(pcp2, (nval2)); \ | ||
706 | __ret = 1; \ | ||
707 | } \ | ||
708 | (__ret); \ | ||
709 | }) | ||
710 | |||
711 | #ifndef raw_cpu_cmpxchg_double | ||
712 | # ifndef raw_cpu_cmpxchg_double_1 | ||
713 | # define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
714 | raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
715 | # endif | ||
716 | # ifndef raw_cpu_cmpxchg_double_2 | ||
717 | # define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
718 | raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
719 | # endif | ||
720 | # ifndef raw_cpu_cmpxchg_double_4 | ||
721 | # define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
722 | raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
723 | # endif | ||
724 | # ifndef raw_cpu_cmpxchg_double_8 | ||
725 | # define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
726 | raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) | ||
727 | # endif | ||
728 | # define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
729 | __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) | ||
730 | #endif | ||
731 | |||
732 | /* | ||
733 | * Generic percpu operations for context that are safe from preemption/interrupts. | ||
734 | */ | ||
735 | #ifndef __this_cpu_read | ||
736 | # define __this_cpu_read(pcp) \ | ||
737 | (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp))) | ||
738 | #endif | ||
739 | |||
740 | #ifndef __this_cpu_write | ||
741 | # define __this_cpu_write(pcp, val) \ | ||
742 | do { __this_cpu_preempt_check("write"); \ | ||
743 | __pcpu_size_call(raw_cpu_write_, (pcp), (val)); \ | ||
744 | } while (0) | ||
745 | #endif | ||
746 | |||
747 | #ifndef __this_cpu_add | ||
748 | # define __this_cpu_add(pcp, val) \ | ||
749 | do { __this_cpu_preempt_check("add"); \ | ||
750 | __pcpu_size_call(raw_cpu_add_, (pcp), (val)); \ | ||
751 | } while (0) | ||
752 | #endif | ||
753 | |||
754 | #ifndef __this_cpu_sub | ||
755 | # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val)) | ||
756 | #endif | ||
757 | |||
758 | #ifndef __this_cpu_inc | ||
759 | # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) | ||
760 | #endif | ||
761 | |||
762 | #ifndef __this_cpu_dec | ||
763 | # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) | ||
764 | #endif | ||
765 | |||
766 | #ifndef __this_cpu_and | ||
767 | # define __this_cpu_and(pcp, val) \ | ||
768 | do { __this_cpu_preempt_check("and"); \ | ||
769 | __pcpu_size_call(raw_cpu_and_, (pcp), (val)); \ | ||
770 | } while (0) | ||
771 | |||
772 | #endif | ||
773 | |||
774 | #ifndef __this_cpu_or | ||
775 | # define __this_cpu_or(pcp, val) \ | ||
776 | do { __this_cpu_preempt_check("or"); \ | ||
777 | __pcpu_size_call(raw_cpu_or_, (pcp), (val)); \ | ||
778 | } while (0) | ||
779 | #endif | ||
780 | |||
781 | #ifndef __this_cpu_add_return | ||
782 | # define __this_cpu_add_return(pcp, val) \ | ||
783 | (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)) | ||
784 | #endif | ||
785 | |||
786 | #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) | ||
787 | #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) | ||
788 | #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) | ||
789 | |||
790 | #ifndef __this_cpu_xchg | ||
791 | # define __this_cpu_xchg(pcp, nval) \ | ||
792 | (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)) | ||
793 | #endif | ||
794 | |||
795 | #ifndef __this_cpu_cmpxchg | ||
796 | # define __this_cpu_cmpxchg(pcp, oval, nval) \ | ||
797 | (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)) | ||
798 | #endif | ||
799 | |||
800 | #ifndef __this_cpu_cmpxchg_double | ||
801 | # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ | ||
802 | (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))) | ||
803 | #endif | ||
804 | |||
805 | #endif /* __LINUX_PERCPU_H */ | 132 | #endif /* __LINUX_PERCPU_H */ |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index a9209118d80f..707617a8c0f6 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -167,6 +167,11 @@ struct perf_event; | |||
167 | #define PERF_EVENT_TXN 0x1 | 167 | #define PERF_EVENT_TXN 0x1 |
168 | 168 | ||
169 | /** | 169 | /** |
170 | * pmu::capabilities flags | ||
171 | */ | ||
172 | #define PERF_PMU_CAP_NO_INTERRUPT 0x01 | ||
173 | |||
174 | /** | ||
170 | * struct pmu - generic performance monitoring unit | 175 | * struct pmu - generic performance monitoring unit |
171 | */ | 176 | */ |
172 | struct pmu { | 177 | struct pmu { |
@@ -178,6 +183,11 @@ struct pmu { | |||
178 | const char *name; | 183 | const char *name; |
179 | int type; | 184 | int type; |
180 | 185 | ||
186 | /* | ||
187 | * various common per-pmu feature flags | ||
188 | */ | ||
189 | int capabilities; | ||
190 | |||
181 | int * __percpu pmu_disable_count; | 191 | int * __percpu pmu_disable_count; |
182 | struct perf_cpu_context * __percpu pmu_cpu_context; | 192 | struct perf_cpu_context * __percpu pmu_cpu_context; |
183 | int task_ctx_nr; | 193 | int task_ctx_nr; |
@@ -696,7 +706,8 @@ extern struct perf_guest_info_callbacks *perf_guest_cbs; | |||
696 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | 706 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |
697 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | 707 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |
698 | 708 | ||
699 | extern void perf_event_comm(struct task_struct *tsk); | 709 | extern void perf_event_exec(void); |
710 | extern void perf_event_comm(struct task_struct *tsk, bool exec); | ||
700 | extern void perf_event_fork(struct task_struct *tsk); | 711 | extern void perf_event_fork(struct task_struct *tsk); |
701 | 712 | ||
702 | /* Callchains */ | 713 | /* Callchains */ |
@@ -773,7 +784,7 @@ extern void perf_event_enable(struct perf_event *event); | |||
773 | extern void perf_event_disable(struct perf_event *event); | 784 | extern void perf_event_disable(struct perf_event *event); |
774 | extern int __perf_event_disable(void *info); | 785 | extern int __perf_event_disable(void *info); |
775 | extern void perf_event_task_tick(void); | 786 | extern void perf_event_task_tick(void); |
776 | #else | 787 | #else /* !CONFIG_PERF_EVENTS: */ |
777 | static inline void | 788 | static inline void |
778 | perf_event_task_sched_in(struct task_struct *prev, | 789 | perf_event_task_sched_in(struct task_struct *prev, |
779 | struct task_struct *task) { } | 790 | struct task_struct *task) { } |
@@ -803,7 +814,8 @@ static inline int perf_unregister_guest_info_callbacks | |||
803 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | 814 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
804 | 815 | ||
805 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 816 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
806 | static inline void perf_event_comm(struct task_struct *tsk) { } | 817 | static inline void perf_event_exec(void) { } |
818 | static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } | ||
807 | static inline void perf_event_fork(struct task_struct *tsk) { } | 819 | static inline void perf_event_fork(struct task_struct *tsk) { } |
808 | static inline void perf_event_init(void) { } | 820 | static inline void perf_event_init(void) { } |
809 | static inline int perf_swevent_get_recursion_context(void) { return -1; } | 821 | static inline int perf_swevent_get_recursion_context(void) { return -1; } |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 4d0221fd0688..68041446c450 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -198,6 +198,13 @@ static inline struct mii_bus *mdiobus_alloc(void) | |||
198 | int mdiobus_register(struct mii_bus *bus); | 198 | int mdiobus_register(struct mii_bus *bus); |
199 | void mdiobus_unregister(struct mii_bus *bus); | 199 | void mdiobus_unregister(struct mii_bus *bus); |
200 | void mdiobus_free(struct mii_bus *bus); | 200 | void mdiobus_free(struct mii_bus *bus); |
201 | struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); | ||
202 | static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) | ||
203 | { | ||
204 | return devm_mdiobus_alloc_size(dev, 0); | ||
205 | } | ||
206 | |||
207 | void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); | ||
201 | struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); | 208 | struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); |
202 | int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); | 209 | int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); |
203 | int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); | 210 | int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); |
@@ -529,6 +536,15 @@ struct phy_driver { | |||
529 | /* See set_wol, but for checking whether Wake on LAN is enabled. */ | 536 | /* See set_wol, but for checking whether Wake on LAN is enabled. */ |
530 | void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); | 537 | void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); |
531 | 538 | ||
539 | /* | ||
540 | * Called to inform a PHY device driver when the core is about to | ||
541 | * change the link state. This callback is supposed to be used as | ||
542 | * fixup hook for drivers that need to take action when the link | ||
543 | * state changes. Drivers are by no means allowed to mess with the | ||
544 | * PHY device structure in their implementations. | ||
545 | */ | ||
546 | void (*link_change_notify)(struct phy_device *dev); | ||
547 | |||
532 | struct device_driver driver; | 548 | struct device_driver driver; |
533 | }; | 549 | }; |
534 | #define to_phy_driver(d) container_of(d, struct phy_driver, driver) | 550 | #define to_phy_driver(d) container_of(d, struct phy_driver, driver) |
@@ -666,6 +682,7 @@ static inline int phy_read_status(struct phy_device *phydev) | |||
666 | return phydev->drv->read_status(phydev); | 682 | return phydev->drv->read_status(phydev); |
667 | } | 683 | } |
668 | 684 | ||
685 | int genphy_config_init(struct phy_device *phydev); | ||
669 | int genphy_setup_forced(struct phy_device *phydev); | 686 | int genphy_setup_forced(struct phy_device *phydev); |
670 | int genphy_restart_aneg(struct phy_device *phydev); | 687 | int genphy_restart_aneg(struct phy_device *phydev); |
671 | int genphy_config_aneg(struct phy_device *phydev); | 688 | int genphy_config_aneg(struct phy_device *phydev); |
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index 509d8f5f984e..ae612acebb53 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h | |||
@@ -9,15 +9,31 @@ struct fixed_phy_status { | |||
9 | int asym_pause; | 9 | int asym_pause; |
10 | }; | 10 | }; |
11 | 11 | ||
12 | struct device_node; | ||
13 | |||
12 | #ifdef CONFIG_FIXED_PHY | 14 | #ifdef CONFIG_FIXED_PHY |
13 | extern int fixed_phy_add(unsigned int irq, int phy_id, | 15 | extern int fixed_phy_add(unsigned int irq, int phy_id, |
14 | struct fixed_phy_status *status); | 16 | struct fixed_phy_status *status); |
17 | extern int fixed_phy_register(unsigned int irq, | ||
18 | struct fixed_phy_status *status, | ||
19 | struct device_node *np); | ||
20 | extern void fixed_phy_del(int phy_addr); | ||
15 | #else | 21 | #else |
16 | static inline int fixed_phy_add(unsigned int irq, int phy_id, | 22 | static inline int fixed_phy_add(unsigned int irq, int phy_id, |
17 | struct fixed_phy_status *status) | 23 | struct fixed_phy_status *status) |
18 | { | 24 | { |
19 | return -ENODEV; | 25 | return -ENODEV; |
20 | } | 26 | } |
27 | static inline int fixed_phy_register(unsigned int irq, | ||
28 | struct fixed_phy_status *status, | ||
29 | struct device_node *np) | ||
30 | { | ||
31 | return -ENODEV; | ||
32 | } | ||
33 | static inline int fixed_phy_del(int phy_addr) | ||
34 | { | ||
35 | return -ENODEV; | ||
36 | } | ||
21 | #endif /* CONFIG_FIXED_PHY */ | 37 | #endif /* CONFIG_FIXED_PHY */ |
22 | 38 | ||
23 | /* | 39 | /* |
diff --git a/include/linux/platform_data/ata-samsung_cf.h b/include/linux/platform_data/ata-samsung_cf.h index c2049e3d7444..748e71642c4a 100644 --- a/include/linux/platform_data/ata-samsung_cf.h +++ b/include/linux/platform_data/ata-samsung_cf.h | |||
@@ -29,7 +29,6 @@ extern void s3c_ide_set_platdata(struct s3c_ide_platdata *pdata); | |||
29 | 29 | ||
30 | /* architecture-specific IDE configuration */ | 30 | /* architecture-specific IDE configuration */ |
31 | extern void s3c64xx_ide_setup_gpio(void); | 31 | extern void s3c64xx_ide_setup_gpio(void); |
32 | extern void s5pc100_ide_setup_gpio(void); | ||
33 | extern void s5pv210_ide_setup_gpio(void); | 32 | extern void s5pv210_ide_setup_gpio(void); |
34 | 33 | ||
35 | #endif /*__ATA_SAMSUNG_CF_H */ | 34 | #endif /*__ATA_SAMSUNG_CF_H */ |
diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h index 4edb40676b3f..780d1e97f620 100644 --- a/include/linux/platform_data/elm.h +++ b/include/linux/platform_data/elm.h | |||
@@ -21,6 +21,7 @@ | |||
21 | enum bch_ecc { | 21 | enum bch_ecc { |
22 | BCH4_ECC = 0, | 22 | BCH4_ECC = 0, |
23 | BCH8_ECC, | 23 | BCH8_ECC, |
24 | BCH16_ECC, | ||
24 | }; | 25 | }; |
25 | 26 | ||
26 | /* ELM support 8 error syndrome process */ | 27 | /* ELM support 8 error syndrome process */ |
@@ -38,7 +39,7 @@ struct elm_errorvec { | |||
38 | bool error_reported; | 39 | bool error_reported; |
39 | bool error_uncorrectable; | 40 | bool error_uncorrectable; |
40 | int error_count; | 41 | int error_count; |
41 | int error_loc[ERROR_VECTOR_MAX]; | 42 | int error_loc[16]; |
42 | }; | 43 | }; |
43 | 44 | ||
44 | void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc, | 45 | void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc, |
diff --git a/include/linux/platform_data/intel-mid_wdt.h b/include/linux/platform_data/intel-mid_wdt.h new file mode 100644 index 000000000000..b98253466ace --- /dev/null +++ b/include/linux/platform_data/intel-mid_wdt.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * intel-mid_wdt: generic Intel MID SCU watchdog driver | ||
3 | * | ||
4 | * Copyright (C) 2014 Intel Corporation. All rights reserved. | ||
5 | * Contact: David Cohen <david.a.cohen@linux.intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of version 2 of the GNU General | ||
9 | * Public License as published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #ifndef __INTEL_MID_WDT_H__ | ||
13 | #define __INTEL_MID_WDT_H__ | ||
14 | |||
15 | #include <linux/platform_device.h> | ||
16 | |||
17 | struct intel_mid_wdt_pdata { | ||
18 | int irq; | ||
19 | int (*probe)(struct platform_device *pdev); | ||
20 | }; | ||
21 | |||
22 | #endif /*__INTEL_MID_WDT_H__*/ | ||
diff --git a/include/linux/platform_data/leds-pca9685.h b/include/linux/platform_data/leds-pca9685.h deleted file mode 100644 index 778e9e4249cc..000000000000 --- a/include/linux/platform_data/leds-pca9685.h +++ /dev/null | |||
@@ -1,35 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Maximilian Güntner <maximilian.guentner@gmail.com> | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of version 2 of | ||
5 | * the GNU General Public License. See the file COPYING in the main | ||
6 | * directory of this archive for more details. | ||
7 | * | ||
8 | * Based on leds-pca963x.h by Peter Meerwald <p.meerwald@bct-electronic.com> | ||
9 | * | ||
10 | * LED driver for the NXP PCA9685 PWM chip | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #ifndef __LINUX_PCA9685_H | ||
15 | #define __LINUX_PCA9685_H | ||
16 | |||
17 | #include <linux/leds.h> | ||
18 | |||
19 | enum pca9685_outdrv { | ||
20 | PCA9685_OPEN_DRAIN, | ||
21 | PCA9685_TOTEM_POLE, | ||
22 | }; | ||
23 | |||
24 | enum pca9685_inverted { | ||
25 | PCA9685_NOT_INVERTED, | ||
26 | PCA9685_INVERTED, | ||
27 | }; | ||
28 | |||
29 | struct pca9685_platform_data { | ||
30 | struct led_platform_data leds; | ||
31 | enum pca9685_outdrv outdrv; | ||
32 | enum pca9685_inverted inverted; | ||
33 | }; | ||
34 | |||
35 | #endif /* __LINUX_PCA9685_H */ | ||
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h index 3e9dd6676b97..660c029d694f 100644 --- a/include/linux/platform_data/mtd-nand-omap2.h +++ b/include/linux/platform_data/mtd-nand-omap2.h | |||
@@ -31,6 +31,8 @@ enum omap_ecc { | |||
31 | OMAP_ECC_BCH8_CODE_HW_DETECTION_SW, | 31 | OMAP_ECC_BCH8_CODE_HW_DETECTION_SW, |
32 | /* 8-bit ECC calculation by GPMC, Error detection by ELM */ | 32 | /* 8-bit ECC calculation by GPMC, Error detection by ELM */ |
33 | OMAP_ECC_BCH8_CODE_HW, | 33 | OMAP_ECC_BCH8_CODE_HW, |
34 | /* 16-bit ECC calculation by GPMC, Error detection by ELM */ | ||
35 | OMAP_ECC_BCH16_CODE_HW, | ||
34 | }; | 36 | }; |
35 | 37 | ||
36 | struct gpmc_nand_regs { | 38 | struct gpmc_nand_regs { |
@@ -50,6 +52,9 @@ struct gpmc_nand_regs { | |||
50 | void __iomem *gpmc_bch_result1[GPMC_BCH_NUM_REMAINDER]; | 52 | void __iomem *gpmc_bch_result1[GPMC_BCH_NUM_REMAINDER]; |
51 | void __iomem *gpmc_bch_result2[GPMC_BCH_NUM_REMAINDER]; | 53 | void __iomem *gpmc_bch_result2[GPMC_BCH_NUM_REMAINDER]; |
52 | void __iomem *gpmc_bch_result3[GPMC_BCH_NUM_REMAINDER]; | 54 | void __iomem *gpmc_bch_result3[GPMC_BCH_NUM_REMAINDER]; |
55 | void __iomem *gpmc_bch_result4[GPMC_BCH_NUM_REMAINDER]; | ||
56 | void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER]; | ||
57 | void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER]; | ||
53 | }; | 58 | }; |
54 | 59 | ||
55 | struct omap_nand_platform_data { | 60 | struct omap_nand_platform_data { |
diff --git a/include/linux/platform_data/mtd-nand-pxa3xx.h b/include/linux/platform_data/mtd-nand-pxa3xx.h index a94147124929..ac4ea2e641c7 100644 --- a/include/linux/platform_data/mtd-nand-pxa3xx.h +++ b/include/linux/platform_data/mtd-nand-pxa3xx.h | |||
@@ -58,6 +58,9 @@ struct pxa3xx_nand_platform_data { | |||
58 | /* use an flash-based bad block table */ | 58 | /* use an flash-based bad block table */ |
59 | bool flash_bbt; | 59 | bool flash_bbt; |
60 | 60 | ||
61 | /* requested ECC strength and ECC step size */ | ||
62 | int ecc_strength, ecc_step_size; | ||
63 | |||
61 | const struct mtd_partition *parts[NUM_CHIP_SELECT]; | 64 | const struct mtd_partition *parts[NUM_CHIP_SELECT]; |
62 | unsigned int nr_parts[NUM_CHIP_SELECT]; | 65 | unsigned int nr_parts[NUM_CHIP_SELECT]; |
63 | 66 | ||
diff --git a/include/linux/platform_data/omap4-keypad.h b/include/linux/platform_data/omap4-keypad.h deleted file mode 100644 index 4eef5fb05a17..000000000000 --- a/include/linux/platform_data/omap4-keypad.h +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | #ifndef __LINUX_INPUT_OMAP4_KEYPAD_H | ||
2 | #define __LINUX_INPUT_OMAP4_KEYPAD_H | ||
3 | |||
4 | #include <linux/input/matrix_keypad.h> | ||
5 | |||
6 | struct omap4_keypad_platform_data { | ||
7 | const struct matrix_keymap_data *keymap_data; | ||
8 | |||
9 | u8 rows; | ||
10 | u8 cols; | ||
11 | }; | ||
12 | |||
13 | #endif /* __LINUX_INPUT_OMAP4_KEYPAD_H */ | ||
diff --git a/include/linux/platform_data/pwm-renesas-tpu.h b/include/linux/platform_data/pwm-renesas-tpu.h deleted file mode 100644 index a7220b10ddab..000000000000 --- a/include/linux/platform_data/pwm-renesas-tpu.h +++ /dev/null | |||
@@ -1,16 +0,0 @@ | |||
1 | #ifndef __PWM_RENESAS_TPU_H__ | ||
2 | #define __PWM_RENESAS_TPU_H__ | ||
3 | |||
4 | #include <linux/pwm.h> | ||
5 | |||
6 | #define TPU_CHANNEL_MAX 4 | ||
7 | |||
8 | struct tpu_pwm_channel_data { | ||
9 | enum pwm_polarity polarity; | ||
10 | }; | ||
11 | |||
12 | struct tpu_pwm_platform_data { | ||
13 | struct tpu_pwm_channel_data channels[TPU_CHANNEL_MAX]; | ||
14 | }; | ||
15 | |||
16 | #endif /* __PWM_RENESAS_TPU_H__ */ | ||
diff --git a/include/linux/platform_data/shtc1.h b/include/linux/platform_data/shtc1.h new file mode 100644 index 000000000000..7b8c353f7dc8 --- /dev/null +++ b/include/linux/platform_data/shtc1.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Sensirion AG, Switzerland | ||
3 | * Author: Johannes Winkelmann <johannes.winkelmann@sensirion.com> | ||
4 | * | ||
5 | * This software is licensed under the terms of the GNU General Public | ||
6 | * License version 2, as published by the Free Software Foundation, and | ||
7 | * may be copied, distributed, and modified under those terms. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #ifndef __SHTC1_H_ | ||
17 | #define __SHTC1_H_ | ||
18 | |||
19 | struct shtc1_platform_data { | ||
20 | bool blocking_io; | ||
21 | bool high_precision; | ||
22 | }; | ||
23 | #endif /* __SHTC1_H_ */ | ||
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h new file mode 100644 index 000000000000..1730312398ff --- /dev/null +++ b/include/linux/platform_data/st21nfca.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Driver include for the ST21NFCA NFC chip. | ||
3 | * | ||
4 | * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #ifndef _ST21NFCA_HCI_H_ | ||
20 | #define _ST21NFCA_HCI_H_ | ||
21 | |||
22 | #include <linux/i2c.h> | ||
23 | |||
24 | #define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci" | ||
25 | |||
26 | struct st21nfca_nfc_platform_data { | ||
27 | unsigned int gpio_irq; | ||
28 | unsigned int gpio_ena; | ||
29 | unsigned int irq_polarity; | ||
30 | }; | ||
31 | |||
32 | #endif /* _ST21NFCA_HCI_H_ */ | ||
diff --git a/include/linux/profile.h b/include/linux/profile.h index aaad3861beb8..b537a25ffa17 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h | |||
@@ -44,6 +44,7 @@ extern int prof_on __read_mostly; | |||
44 | int profile_init(void); | 44 | int profile_init(void); |
45 | int profile_setup(char *str); | 45 | int profile_setup(char *str); |
46 | void profile_tick(int type); | 46 | void profile_tick(int type); |
47 | int setup_profiling_timer(unsigned int multiplier); | ||
47 | 48 | ||
48 | /* | 49 | /* |
49 | * Add multiple profiler hits to a given address: | 50 | * Add multiple profiler hits to a given address: |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 077904c8b70d..cc79eff4a1ad 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -334,6 +334,9 @@ static inline void user_single_step_siginfo(struct task_struct *tsk, | |||
334 | * calling arch_ptrace_stop() when it would be superfluous. For example, | 334 | * calling arch_ptrace_stop() when it would be superfluous. For example, |
335 | * if the thread has not been back to user mode since the last stop, the | 335 | * if the thread has not been back to user mode since the last stop, the |
336 | * thread state might indicate that nothing needs to be done. | 336 | * thread state might indicate that nothing needs to be done. |
337 | * | ||
338 | * This is guaranteed to be invoked once before a task stops for ptrace and | ||
339 | * may include arch-specific operations necessary prior to a ptrace stop. | ||
337 | */ | 340 | */ |
338 | #define arch_ptrace_stop_needed(code, info) (0) | 341 | #define arch_ptrace_stop_needed(code, info) (0) |
339 | #endif | 342 | #endif |
diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 4717f54051cb..e90628cac8fa 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h | |||
@@ -274,14 +274,18 @@ struct pwm_lookup { | |||
274 | unsigned int index; | 274 | unsigned int index; |
275 | const char *dev_id; | 275 | const char *dev_id; |
276 | const char *con_id; | 276 | const char *con_id; |
277 | unsigned int period; | ||
278 | enum pwm_polarity polarity; | ||
277 | }; | 279 | }; |
278 | 280 | ||
279 | #define PWM_LOOKUP(_provider, _index, _dev_id, _con_id) \ | 281 | #define PWM_LOOKUP(_provider, _index, _dev_id, _con_id, _period, _polarity) \ |
280 | { \ | 282 | { \ |
281 | .provider = _provider, \ | 283 | .provider = _provider, \ |
282 | .index = _index, \ | 284 | .index = _index, \ |
283 | .dev_id = _dev_id, \ | 285 | .dev_id = _dev_id, \ |
284 | .con_id = _con_id, \ | 286 | .con_id = _con_id, \ |
287 | .period = _period, \ | ||
288 | .polarity = _polarity \ | ||
285 | } | 289 | } |
286 | 290 | ||
287 | #if IS_ENABLED(CONFIG_PWM) | 291 | #if IS_ENABLED(CONFIG_PWM) |
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h index 2de2e275b2cb..efdd9227a49c 100644 --- a/include/linux/pwm_backlight.h +++ b/include/linux/pwm_backlight.h | |||
@@ -6,9 +6,6 @@ | |||
6 | 6 | ||
7 | #include <linux/backlight.h> | 7 | #include <linux/backlight.h> |
8 | 8 | ||
9 | /* TODO: convert to gpiod_*() API once it has been merged */ | ||
10 | #define PWM_BACKLIGHT_GPIO_ACTIVE_LOW (1 << 0) | ||
11 | |||
12 | struct platform_pwm_backlight_data { | 9 | struct platform_pwm_backlight_data { |
13 | int pwm_id; | 10 | int pwm_id; |
14 | unsigned int max_brightness; | 11 | unsigned int max_brightness; |
@@ -16,8 +13,8 @@ struct platform_pwm_backlight_data { | |||
16 | unsigned int lth_brightness; | 13 | unsigned int lth_brightness; |
17 | unsigned int pwm_period_ns; | 14 | unsigned int pwm_period_ns; |
18 | unsigned int *levels; | 15 | unsigned int *levels; |
16 | /* TODO remove once all users are switched to gpiod_* API */ | ||
19 | int enable_gpio; | 17 | int enable_gpio; |
20 | unsigned long enable_gpio_flags; | ||
21 | int (*init)(struct device *dev); | 18 | int (*init)(struct device *dev); |
22 | int (*notify)(struct device *dev, int brightness); | 19 | int (*notify)(struct device *dev, int brightness); |
23 | void (*notify_after)(struct device *dev, int brightness); | 20 | void (*notify_after)(struct device *dev, int brightness); |
diff --git a/include/linux/quota.h b/include/linux/quota.h index cc7494a35429..0f3c5d38da1f 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h | |||
@@ -329,6 +329,7 @@ struct quotactl_ops { | |||
329 | int (*get_xstate)(struct super_block *, struct fs_quota_stat *); | 329 | int (*get_xstate)(struct super_block *, struct fs_quota_stat *); |
330 | int (*set_xstate)(struct super_block *, unsigned int, int); | 330 | int (*set_xstate)(struct super_block *, unsigned int, int); |
331 | int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); | 331 | int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); |
332 | int (*rm_xquota)(struct super_block *, unsigned int); | ||
332 | }; | 333 | }; |
333 | 334 | ||
334 | struct quota_format_type { | 335 | struct quota_format_type { |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 5a75d19aa661..d231aa17b1d7 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <linux/debugobjects.h> | 44 | #include <linux/debugobjects.h> |
45 | #include <linux/bug.h> | 45 | #include <linux/bug.h> |
46 | #include <linux/compiler.h> | 46 | #include <linux/compiler.h> |
47 | #include <linux/percpu.h> | ||
48 | #include <asm/barrier.h> | 47 | #include <asm/barrier.h> |
49 | 48 | ||
50 | extern int rcu_expedited; /* for sysctl */ | 49 | extern int rcu_expedited; /* for sysctl */ |
@@ -300,41 +299,6 @@ bool __rcu_is_watching(void); | |||
300 | #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ | 299 | #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ |
301 | 300 | ||
302 | /* | 301 | /* |
303 | * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings. | ||
304 | */ | ||
305 | |||
306 | #define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */ | ||
307 | DECLARE_PER_CPU(int, rcu_cond_resched_count); | ||
308 | void rcu_resched(void); | ||
309 | |||
310 | /* | ||
311 | * Is it time to report RCU quiescent states? | ||
312 | * | ||
313 | * Note unsynchronized access to rcu_cond_resched_count. Yes, we might | ||
314 | * increment some random CPU's count, and possibly also load the result from | ||
315 | * yet another CPU's count. We might even clobber some other CPU's attempt | ||
316 | * to zero its counter. This is all OK because the goal is not precision, | ||
317 | * but rather reasonable amortization of rcu_note_context_switch() overhead | ||
318 | * and extremely high probability of avoiding RCU CPU stall warnings. | ||
319 | * Note that this function has to be preempted in just the wrong place, | ||
320 | * many thousands of times in a row, for anything bad to happen. | ||
321 | */ | ||
322 | static inline bool rcu_should_resched(void) | ||
323 | { | ||
324 | return raw_cpu_inc_return(rcu_cond_resched_count) >= | ||
325 | RCU_COND_RESCHED_LIM; | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * Report quiscent states to RCU if it is time to do so. | ||
330 | */ | ||
331 | static inline void rcu_cond_resched(void) | ||
332 | { | ||
333 | if (unlikely(rcu_should_resched())) | ||
334 | rcu_resched(); | ||
335 | } | ||
336 | |||
337 | /* | ||
338 | * Infrastructure to implement the synchronize_() primitives in | 302 | * Infrastructure to implement the synchronize_() primitives in |
339 | * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. | 303 | * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. |
340 | */ | 304 | */ |
@@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf); | |||
358 | * initialization. | 322 | * initialization. |
359 | */ | 323 | */ |
360 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD | 324 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
325 | void init_rcu_head(struct rcu_head *head); | ||
326 | void destroy_rcu_head(struct rcu_head *head); | ||
361 | void init_rcu_head_on_stack(struct rcu_head *head); | 327 | void init_rcu_head_on_stack(struct rcu_head *head); |
362 | void destroy_rcu_head_on_stack(struct rcu_head *head); | 328 | void destroy_rcu_head_on_stack(struct rcu_head *head); |
363 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 329 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
330 | static inline void init_rcu_head(struct rcu_head *head) | ||
331 | { | ||
332 | } | ||
333 | |||
334 | static inline void destroy_rcu_head(struct rcu_head *head) | ||
335 | { | ||
336 | } | ||
337 | |||
364 | static inline void init_rcu_head_on_stack(struct rcu_head *head) | 338 | static inline void init_rcu_head_on_stack(struct rcu_head *head) |
365 | { | 339 | { |
366 | } | 340 | } |
@@ -852,15 +826,14 @@ static inline void rcu_preempt_sleep_check(void) | |||
852 | * read-side critical section that would block in a !PREEMPT kernel. | 826 | * read-side critical section that would block in a !PREEMPT kernel. |
853 | * But if you want the full story, read on! | 827 | * But if you want the full story, read on! |
854 | * | 828 | * |
855 | * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it | 829 | * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), |
856 | * is illegal to block while in an RCU read-side critical section. In | 830 | * it is illegal to block while in an RCU read-side critical section. |
857 | * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU) | 831 | * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT |
858 | * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may | 832 | * kernel builds, RCU read-side critical sections may be preempted, |
859 | * be preempted, but explicit blocking is illegal. Finally, in preemptible | 833 | * but explicit blocking is illegal. Finally, in preemptible RCU |
860 | * RCU implementations in real-time (with -rt patchset) kernel builds, | 834 | * implementations in real-time (with -rt patchset) kernel builds, RCU |
861 | * RCU read-side critical sections may be preempted and they may also | 835 | * read-side critical sections may be preempted and they may also block, but |
862 | * block, but only when acquiring spinlocks that are subject to priority | 836 | * only when acquiring spinlocks that are subject to priority inheritance. |
863 | * inheritance. | ||
864 | */ | 837 | */ |
865 | static inline void rcu_read_lock(void) | 838 | static inline void rcu_read_lock(void) |
866 | { | 839 | { |
@@ -884,6 +857,34 @@ static inline void rcu_read_lock(void) | |||
884 | /** | 857 | /** |
885 | * rcu_read_unlock() - marks the end of an RCU read-side critical section. | 858 | * rcu_read_unlock() - marks the end of an RCU read-side critical section. |
886 | * | 859 | * |
860 | * In most situations, rcu_read_unlock() is immune from deadlock. | ||
861 | * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock() | ||
862 | * is responsible for deboosting, which it does via rt_mutex_unlock(). | ||
863 | * Unfortunately, this function acquires the scheduler's runqueue and | ||
864 | * priority-inheritance spinlocks. This means that deadlock could result | ||
865 | * if the caller of rcu_read_unlock() already holds one of these locks or | ||
866 | * any lock that is ever acquired while holding them. | ||
867 | * | ||
868 | * That said, RCU readers are never priority boosted unless they were | ||
869 | * preempted. Therefore, one way to avoid deadlock is to make sure | ||
870 | * that preemption never happens within any RCU read-side critical | ||
871 | * section whose outermost rcu_read_unlock() is called with one of | ||
872 | * rt_mutex_unlock()'s locks held. Such preemption can be avoided in | ||
873 | * a number of ways, for example, by invoking preempt_disable() before | ||
874 | * critical section's outermost rcu_read_lock(). | ||
875 | * | ||
876 | * Given that the set of locks acquired by rt_mutex_unlock() might change | ||
877 | * at any time, a somewhat more future-proofed approach is to make sure | ||
878 | * that that preemption never happens within any RCU read-side critical | ||
879 | * section whose outermost rcu_read_unlock() is called with irqs disabled. | ||
880 | * This approach relies on the fact that rt_mutex_unlock() currently only | ||
881 | * acquires irq-disabled locks. | ||
882 | * | ||
883 | * The second of these two approaches is best in most situations, | ||
884 | * however, the first approach can also be useful, at least to those | ||
885 | * developers willing to keep abreast of the set of locks acquired by | ||
886 | * rt_mutex_unlock(). | ||
887 | * | ||
887 | * See rcu_read_lock() for more information. | 888 | * See rcu_read_lock() for more information. |
888 | */ | 889 | */ |
889 | static inline void rcu_read_unlock(void) | 890 | static inline void rcu_read_unlock(void) |
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index a2d9d81038d1..14ec18d5e18b 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h | |||
@@ -395,6 +395,11 @@ static inline void regulator_bulk_free(int num_consumers, | |||
395 | { | 395 | { |
396 | } | 396 | } |
397 | 397 | ||
398 | static inline int regulator_can_change_voltage(struct regulator *regulator) | ||
399 | { | ||
400 | return 0; | ||
401 | } | ||
402 | |||
398 | static inline int regulator_set_voltage(struct regulator *regulator, | 403 | static inline int regulator_set_voltage(struct regulator *regulator, |
399 | int min_uV, int max_uV) | 404 | int min_uV, int max_uV) |
400 | { | 405 | { |
diff --git a/include/linux/rfkill-gpio.h b/include/linux/rfkill-gpio.h index 4d09f6eab359..20bcb55498cd 100644 --- a/include/linux/rfkill-gpio.h +++ b/include/linux/rfkill-gpio.h | |||
@@ -27,21 +27,11 @@ | |||
27 | * struct rfkill_gpio_platform_data - platform data for rfkill gpio device. | 27 | * struct rfkill_gpio_platform_data - platform data for rfkill gpio device. |
28 | * for unused gpio's, the expected value is -1. | 28 | * for unused gpio's, the expected value is -1. |
29 | * @name: name for the gpio rf kill instance | 29 | * @name: name for the gpio rf kill instance |
30 | * @reset_gpio: GPIO which is used for reseting rfkill switch | ||
31 | * @shutdown_gpio: GPIO which is used for shutdown of rfkill switch | ||
32 | * @power_clk_name: [optional] name of clk to turn off while blocked | ||
33 | * @gpio_runtime_close: clean up platform specific gpio configuration | ||
34 | * @gpio_runtime_setup: set up platform specific gpio configuration | ||
35 | */ | 30 | */ |
36 | 31 | ||
37 | struct rfkill_gpio_platform_data { | 32 | struct rfkill_gpio_platform_data { |
38 | char *name; | 33 | char *name; |
39 | int reset_gpio; | ||
40 | int shutdown_gpio; | ||
41 | const char *power_clk_name; | ||
42 | enum rfkill_type type; | 34 | enum rfkill_type type; |
43 | void (*gpio_runtime_close)(struct platform_device *); | ||
44 | int (*gpio_runtime_setup)(struct platform_device *); | ||
45 | }; | 35 | }; |
46 | 36 | ||
47 | #endif /* __RFKILL_GPIO_H */ | 37 | #endif /* __RFKILL_GPIO_H */ |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index d69cf637a15a..49a4d6f59108 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k | |||
97 | __ring_buffer_alloc((size), (flags), &__key); \ | 97 | __ring_buffer_alloc((size), (flags), &__key); \ |
98 | }) | 98 | }) |
99 | 99 | ||
100 | void ring_buffer_wait(struct ring_buffer *buffer, int cpu); | 100 | int ring_buffer_wait(struct ring_buffer *buffer, int cpu); |
101 | int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, | 101 | int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, |
102 | struct file *filp, poll_table *poll_table); | 102 | struct file *filp, poll_table *poll_table); |
103 | 103 | ||
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 3aed8d737e1a..1abba5ce2a2f 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h | |||
@@ -90,11 +90,9 @@ extern void __rt_mutex_init(struct rt_mutex *lock, const char *name); | |||
90 | extern void rt_mutex_destroy(struct rt_mutex *lock); | 90 | extern void rt_mutex_destroy(struct rt_mutex *lock); |
91 | 91 | ||
92 | extern void rt_mutex_lock(struct rt_mutex *lock); | 92 | extern void rt_mutex_lock(struct rt_mutex *lock); |
93 | extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, | 93 | extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); |
94 | int detect_deadlock); | ||
95 | extern int rt_mutex_timed_lock(struct rt_mutex *lock, | 94 | extern int rt_mutex_timed_lock(struct rt_mutex *lock, |
96 | struct hrtimer_sleeper *timeout, | 95 | struct hrtimer_sleeper *timeout); |
97 | int detect_deadlock); | ||
98 | 96 | ||
99 | extern int rt_mutex_trylock(struct rt_mutex *lock); | 97 | extern int rt_mutex_trylock(struct rt_mutex *lock); |
100 | 98 | ||
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h index d5b13bc07a0b..561e8615528d 100644 --- a/include/linux/rwsem-spinlock.h +++ b/include/linux/rwsem-spinlock.h | |||
@@ -15,13 +15,13 @@ | |||
15 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
16 | /* | 16 | /* |
17 | * the rw-semaphore definition | 17 | * the rw-semaphore definition |
18 | * - if activity is 0 then there are no active readers or writers | 18 | * - if count is 0 then there are no active readers or writers |
19 | * - if activity is +ve then that is the number of active readers | 19 | * - if count is +ve then that is the number of active readers |
20 | * - if activity is -1 then there is one active writer | 20 | * - if count is -1 then there is one active writer |
21 | * - if wait_list is not empty, then there are processes waiting for the semaphore | 21 | * - if wait_list is not empty, then there are processes waiting for the semaphore |
22 | */ | 22 | */ |
23 | struct rw_semaphore { | 23 | struct rw_semaphore { |
24 | __s32 activity; | 24 | __s32 count; |
25 | raw_spinlock_t wait_lock; | 25 | raw_spinlock_t wait_lock; |
26 | struct list_head wait_list; | 26 | struct list_head wait_list; |
27 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 27 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 03f3b05e8ec1..035d3c57fc8a 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h | |||
@@ -13,8 +13,10 @@ | |||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | |||
17 | #include <linux/atomic.h> | 16 | #include <linux/atomic.h> |
17 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER | ||
18 | #include <linux/osq_lock.h> | ||
19 | #endif | ||
18 | 20 | ||
19 | struct rw_semaphore; | 21 | struct rw_semaphore; |
20 | 22 | ||
@@ -23,9 +25,17 @@ struct rw_semaphore; | |||
23 | #else | 25 | #else |
24 | /* All arch specific implementations share the same struct */ | 26 | /* All arch specific implementations share the same struct */ |
25 | struct rw_semaphore { | 27 | struct rw_semaphore { |
26 | long count; | 28 | long count; |
27 | raw_spinlock_t wait_lock; | 29 | struct list_head wait_list; |
28 | struct list_head wait_list; | 30 | raw_spinlock_t wait_lock; |
31 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER | ||
32 | struct optimistic_spin_queue osq; /* spinner MCS lock */ | ||
33 | /* | ||
34 | * Write owner. Used as a speculative check to see | ||
35 | * if the owner is running on the cpu. | ||
36 | */ | ||
37 | struct task_struct *owner; | ||
38 | #endif | ||
29 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 39 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
30 | struct lockdep_map dep_map; | 40 | struct lockdep_map dep_map; |
31 | #endif | 41 | #endif |
@@ -55,10 +65,17 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) | |||
55 | # define __RWSEM_DEP_MAP_INIT(lockname) | 65 | # define __RWSEM_DEP_MAP_INIT(lockname) |
56 | #endif | 66 | #endif |
57 | 67 | ||
58 | #define __RWSEM_INITIALIZER(name) \ | 68 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
59 | { RWSEM_UNLOCKED_VALUE, \ | 69 | #define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL |
60 | __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ | 70 | #else |
61 | LIST_HEAD_INIT((name).wait_list) \ | 71 | #define __RWSEM_OPT_INIT(lockname) |
72 | #endif | ||
73 | |||
74 | #define __RWSEM_INITIALIZER(name) \ | ||
75 | { .count = RWSEM_UNLOCKED_VALUE, \ | ||
76 | .wait_list = LIST_HEAD_INIT((name).wait_list), \ | ||
77 | .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ | ||
78 | __RWSEM_OPT_INIT(name) \ | ||
62 | __RWSEM_DEP_MAP_INIT(name) } | 79 | __RWSEM_DEP_MAP_INIT(name) } |
63 | 80 | ||
64 | #define DECLARE_RWSEM(name) \ | 81 | #define DECLARE_RWSEM(name) \ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index ea74596014a2..42cac4dc2157 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -847,10 +847,10 @@ enum cpu_idle_type { | |||
847 | }; | 847 | }; |
848 | 848 | ||
849 | /* | 849 | /* |
850 | * Increase resolution of cpu_power calculations | 850 | * Increase resolution of cpu_capacity calculations |
851 | */ | 851 | */ |
852 | #define SCHED_POWER_SHIFT 10 | 852 | #define SCHED_CAPACITY_SHIFT 10 |
853 | #define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT) | 853 | #define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) |
854 | 854 | ||
855 | /* | 855 | /* |
856 | * sched-domains (multiprocessor balancing) declarations: | 856 | * sched-domains (multiprocessor balancing) declarations: |
@@ -862,7 +862,7 @@ enum cpu_idle_type { | |||
862 | #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ | 862 | #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ |
863 | #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ | 863 | #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ |
864 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ | 864 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ |
865 | #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ | 865 | #define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */ |
866 | #define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ | 866 | #define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ |
867 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ | 867 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ |
868 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ | 868 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ |
@@ -872,21 +872,21 @@ enum cpu_idle_type { | |||
872 | #define SD_NUMA 0x4000 /* cross-node balancing */ | 872 | #define SD_NUMA 0x4000 /* cross-node balancing */ |
873 | 873 | ||
874 | #ifdef CONFIG_SCHED_SMT | 874 | #ifdef CONFIG_SCHED_SMT |
875 | static inline const int cpu_smt_flags(void) | 875 | static inline int cpu_smt_flags(void) |
876 | { | 876 | { |
877 | return SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES; | 877 | return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; |
878 | } | 878 | } |
879 | #endif | 879 | #endif |
880 | 880 | ||
881 | #ifdef CONFIG_SCHED_MC | 881 | #ifdef CONFIG_SCHED_MC |
882 | static inline const int cpu_core_flags(void) | 882 | static inline int cpu_core_flags(void) |
883 | { | 883 | { |
884 | return SD_SHARE_PKG_RESOURCES; | 884 | return SD_SHARE_PKG_RESOURCES; |
885 | } | 885 | } |
886 | #endif | 886 | #endif |
887 | 887 | ||
888 | #ifdef CONFIG_NUMA | 888 | #ifdef CONFIG_NUMA |
889 | static inline const int cpu_numa_flags(void) | 889 | static inline int cpu_numa_flags(void) |
890 | { | 890 | { |
891 | return SD_NUMA; | 891 | return SD_NUMA; |
892 | } | 892 | } |
@@ -999,14 +999,14 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); | |||
999 | bool cpus_share_cache(int this_cpu, int that_cpu); | 999 | bool cpus_share_cache(int this_cpu, int that_cpu); |
1000 | 1000 | ||
1001 | typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); | 1001 | typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); |
1002 | typedef const int (*sched_domain_flags_f)(void); | 1002 | typedef int (*sched_domain_flags_f)(void); |
1003 | 1003 | ||
1004 | #define SDTL_OVERLAP 0x01 | 1004 | #define SDTL_OVERLAP 0x01 |
1005 | 1005 | ||
1006 | struct sd_data { | 1006 | struct sd_data { |
1007 | struct sched_domain **__percpu sd; | 1007 | struct sched_domain **__percpu sd; |
1008 | struct sched_group **__percpu sg; | 1008 | struct sched_group **__percpu sg; |
1009 | struct sched_group_power **__percpu sgp; | 1009 | struct sched_group_capacity **__percpu sgc; |
1010 | }; | 1010 | }; |
1011 | 1011 | ||
1012 | struct sched_domain_topology_level { | 1012 | struct sched_domain_topology_level { |
@@ -1270,9 +1270,6 @@ struct task_struct { | |||
1270 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1270 | #ifdef CONFIG_TREE_PREEMPT_RCU |
1271 | struct rcu_node *rcu_blocked_node; | 1271 | struct rcu_node *rcu_blocked_node; |
1272 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1272 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
1273 | #ifdef CONFIG_RCU_BOOST | ||
1274 | struct rt_mutex *rcu_boost_mutex; | ||
1275 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
1276 | 1273 | ||
1277 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 1274 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
1278 | struct sched_info sched_info; | 1275 | struct sched_info sched_info; |
@@ -1440,8 +1437,6 @@ struct task_struct { | |||
1440 | struct rb_node *pi_waiters_leftmost; | 1437 | struct rb_node *pi_waiters_leftmost; |
1441 | /* Deadlock detection and priority inheritance handling */ | 1438 | /* Deadlock detection and priority inheritance handling */ |
1442 | struct rt_mutex_waiter *pi_blocked_on; | 1439 | struct rt_mutex_waiter *pi_blocked_on; |
1443 | /* Top pi_waiters task */ | ||
1444 | struct task_struct *pi_top_task; | ||
1445 | #endif | 1440 | #endif |
1446 | 1441 | ||
1447 | #ifdef CONFIG_DEBUG_MUTEXES | 1442 | #ifdef CONFIG_DEBUG_MUTEXES |
@@ -2009,9 +2004,6 @@ static inline void rcu_copy_process(struct task_struct *p) | |||
2009 | #ifdef CONFIG_TREE_PREEMPT_RCU | 2004 | #ifdef CONFIG_TREE_PREEMPT_RCU |
2010 | p->rcu_blocked_node = NULL; | 2005 | p->rcu_blocked_node = NULL; |
2011 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 2006 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
2012 | #ifdef CONFIG_RCU_BOOST | ||
2013 | p->rcu_boost_mutex = NULL; | ||
2014 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
2015 | INIT_LIST_HEAD(&p->rcu_node_entry); | 2007 | INIT_LIST_HEAD(&p->rcu_node_entry); |
2016 | } | 2008 | } |
2017 | 2009 | ||
@@ -2173,7 +2165,7 @@ static inline void sched_autogroup_fork(struct signal_struct *sig) { } | |||
2173 | static inline void sched_autogroup_exit(struct signal_struct *sig) { } | 2165 | static inline void sched_autogroup_exit(struct signal_struct *sig) { } |
2174 | #endif | 2166 | #endif |
2175 | 2167 | ||
2176 | extern bool yield_to(struct task_struct *p, bool preempt); | 2168 | extern int yield_to(struct task_struct *p, bool preempt); |
2177 | extern void set_user_nice(struct task_struct *p, long nice); | 2169 | extern void set_user_nice(struct task_struct *p, long nice); |
2178 | extern int task_prio(const struct task_struct *p); | 2170 | extern int task_prio(const struct task_struct *p); |
2179 | /** | 2171 | /** |
@@ -2421,7 +2413,11 @@ extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, i | |||
2421 | struct task_struct *fork_idle(int); | 2413 | struct task_struct *fork_idle(int); |
2422 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | 2414 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); |
2423 | 2415 | ||
2424 | extern void set_task_comm(struct task_struct *tsk, const char *from); | 2416 | extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); |
2417 | static inline void set_task_comm(struct task_struct *tsk, const char *from) | ||
2418 | { | ||
2419 | __set_task_comm(tsk, from, false); | ||
2420 | } | ||
2425 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 2421 | extern char *get_task_comm(char *to, struct task_struct *tsk); |
2426 | 2422 | ||
2427 | #ifdef CONFIG_SMP | 2423 | #ifdef CONFIG_SMP |
@@ -2784,7 +2780,7 @@ static inline bool __must_check current_set_polling_and_test(void) | |||
2784 | 2780 | ||
2785 | /* | 2781 | /* |
2786 | * Polling state must be visible before we test NEED_RESCHED, | 2782 | * Polling state must be visible before we test NEED_RESCHED, |
2787 | * paired by resched_task() | 2783 | * paired by resched_curr() |
2788 | */ | 2784 | */ |
2789 | smp_mb__after_atomic(); | 2785 | smp_mb__after_atomic(); |
2790 | 2786 | ||
@@ -2802,7 +2798,7 @@ static inline bool __must_check current_clr_polling_and_test(void) | |||
2802 | 2798 | ||
2803 | /* | 2799 | /* |
2804 | * Polling state must be visible before we test NEED_RESCHED, | 2800 | * Polling state must be visible before we test NEED_RESCHED, |
2805 | * paired by resched_task() | 2801 | * paired by resched_curr() |
2806 | */ | 2802 | */ |
2807 | smp_mb__after_atomic(); | 2803 | smp_mb__after_atomic(); |
2808 | 2804 | ||
@@ -2834,7 +2830,7 @@ static inline void current_clr_polling(void) | |||
2834 | * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also | 2830 | * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also |
2835 | * fold. | 2831 | * fold. |
2836 | */ | 2832 | */ |
2837 | smp_mb(); /* paired with resched_task() */ | 2833 | smp_mb(); /* paired with resched_curr() */ |
2838 | 2834 | ||
2839 | preempt_fold_need_resched(); | 2835 | preempt_fold_need_resched(); |
2840 | } | 2836 | } |
diff --git a/include/linux/security.h b/include/linux/security.h index 6478ce3252c7..9c6b9722ff48 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -1708,7 +1708,7 @@ struct security_operations { | |||
1708 | void (*key_free) (struct key *key); | 1708 | void (*key_free) (struct key *key); |
1709 | int (*key_permission) (key_ref_t key_ref, | 1709 | int (*key_permission) (key_ref_t key_ref, |
1710 | const struct cred *cred, | 1710 | const struct cred *cred, |
1711 | key_perm_t perm); | 1711 | unsigned perm); |
1712 | int (*key_getsecurity)(struct key *key, char **_buffer); | 1712 | int (*key_getsecurity)(struct key *key, char **_buffer); |
1713 | #endif /* CONFIG_KEYS */ | 1713 | #endif /* CONFIG_KEYS */ |
1714 | 1714 | ||
@@ -3034,7 +3034,7 @@ static inline int security_path_chroot(struct path *path) | |||
3034 | int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags); | 3034 | int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags); |
3035 | void security_key_free(struct key *key); | 3035 | void security_key_free(struct key *key); |
3036 | int security_key_permission(key_ref_t key_ref, | 3036 | int security_key_permission(key_ref_t key_ref, |
3037 | const struct cred *cred, key_perm_t perm); | 3037 | const struct cred *cred, unsigned perm); |
3038 | int security_key_getsecurity(struct key *key, char **_buffer); | 3038 | int security_key_getsecurity(struct key *key, char **_buffer); |
3039 | 3039 | ||
3040 | #else | 3040 | #else |
@@ -3052,7 +3052,7 @@ static inline void security_key_free(struct key *key) | |||
3052 | 3052 | ||
3053 | static inline int security_key_permission(key_ref_t key_ref, | 3053 | static inline int security_key_permission(key_ref_t key_ref, |
3054 | const struct cred *cred, | 3054 | const struct cred *cred, |
3055 | key_perm_t perm) | 3055 | unsigned perm) |
3056 | { | 3056 | { |
3057 | return 0; | 3057 | return 0; |
3058 | } | 3058 | } |
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 535f158977b9..8cf350325dc6 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h | |||
@@ -164,8 +164,6 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s) | |||
164 | static inline unsigned raw_seqcount_begin(const seqcount_t *s) | 164 | static inline unsigned raw_seqcount_begin(const seqcount_t *s) |
165 | { | 165 | { |
166 | unsigned ret = ACCESS_ONCE(s->sequence); | 166 | unsigned ret = ACCESS_ONCE(s->sequence); |
167 | |||
168 | seqcount_lockdep_reader_access(s); | ||
169 | smp_rmb(); | 167 | smp_rmb(); |
170 | return ret & ~1; | 168 | return ret & ~1; |
171 | } | 169 | } |
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h index f92c0a43c54c..abdf1f229dc3 100644 --- a/include/linux/shdma-base.h +++ b/include/linux/shdma-base.h | |||
@@ -54,6 +54,7 @@ struct shdma_desc { | |||
54 | dma_cookie_t cookie; | 54 | dma_cookie_t cookie; |
55 | int chunks; | 55 | int chunks; |
56 | int mark; | 56 | int mark; |
57 | bool cyclic; /* used as cyclic transfer */ | ||
57 | }; | 58 | }; |
58 | 59 | ||
59 | struct shdma_chan { | 60 | struct shdma_chan { |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 08074a810164..ec89301ada41 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -338,13 +338,18 @@ enum { | |||
338 | 338 | ||
339 | SKB_GSO_GRE = 1 << 6, | 339 | SKB_GSO_GRE = 1 << 6, |
340 | 340 | ||
341 | SKB_GSO_IPIP = 1 << 7, | 341 | SKB_GSO_GRE_CSUM = 1 << 7, |
342 | 342 | ||
343 | SKB_GSO_SIT = 1 << 8, | 343 | SKB_GSO_IPIP = 1 << 8, |
344 | 344 | ||
345 | SKB_GSO_UDP_TUNNEL = 1 << 9, | 345 | SKB_GSO_SIT = 1 << 9, |
346 | |||
347 | SKB_GSO_UDP_TUNNEL = 1 << 10, | ||
348 | |||
349 | SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, | ||
350 | |||
351 | SKB_GSO_MPLS = 1 << 12, | ||
346 | 352 | ||
347 | SKB_GSO_MPLS = 1 << 10, | ||
348 | }; | 353 | }; |
349 | 354 | ||
350 | #if BITS_PER_LONG > 32 | 355 | #if BITS_PER_LONG > 32 |
@@ -426,7 +431,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, | |||
426 | * @csum_start: Offset from skb->head where checksumming should start | 431 | * @csum_start: Offset from skb->head where checksumming should start |
427 | * @csum_offset: Offset from csum_start where checksum should be stored | 432 | * @csum_offset: Offset from csum_start where checksum should be stored |
428 | * @priority: Packet queueing priority | 433 | * @priority: Packet queueing priority |
429 | * @local_df: allow local fragmentation | 434 | * @ignore_df: allow local fragmentation |
430 | * @cloned: Head may be cloned (check refcnt to be sure) | 435 | * @cloned: Head may be cloned (check refcnt to be sure) |
431 | * @ip_summed: Driver fed us an IP checksum | 436 | * @ip_summed: Driver fed us an IP checksum |
432 | * @nohdr: Payload reference only, must not modify header | 437 | * @nohdr: Payload reference only, must not modify header |
@@ -514,7 +519,7 @@ struct sk_buff { | |||
514 | }; | 519 | }; |
515 | __u32 priority; | 520 | __u32 priority; |
516 | kmemcheck_bitfield_begin(flags1); | 521 | kmemcheck_bitfield_begin(flags1); |
517 | __u8 local_df:1, | 522 | __u8 ignore_df:1, |
518 | cloned:1, | 523 | cloned:1, |
519 | ip_summed:2, | 524 | ip_summed:2, |
520 | nohdr:1, | 525 | nohdr:1, |
@@ -567,7 +572,10 @@ struct sk_buff { | |||
567 | * headers if needed | 572 | * headers if needed |
568 | */ | 573 | */ |
569 | __u8 encapsulation:1; | 574 | __u8 encapsulation:1; |
570 | /* 6/8 bit hole (depending on ndisc_nodetype presence) */ | 575 | __u8 encap_hdr_csum:1; |
576 | __u8 csum_valid:1; | ||
577 | __u8 csum_complete_sw:1; | ||
578 | /* 3/5 bit hole (depending on ndisc_nodetype presence) */ | ||
571 | kmemcheck_bitfield_end(flags2); | 579 | kmemcheck_bitfield_end(flags2); |
572 | 580 | ||
573 | #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL | 581 | #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL |
@@ -739,7 +747,13 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); | |||
739 | int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); | 747 | int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); |
740 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); | 748 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); |
741 | struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); | 749 | struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); |
742 | struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask); | 750 | struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, |
751 | gfp_t gfp_mask, bool fclone); | ||
752 | static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, | ||
753 | gfp_t gfp_mask) | ||
754 | { | ||
755 | return __pskb_copy_fclone(skb, headroom, gfp_mask, false); | ||
756 | } | ||
743 | 757 | ||
744 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); | 758 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); |
745 | struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, | 759 | struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, |
@@ -1840,6 +1854,18 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) | |||
1840 | return pskb_may_pull(skb, skb_network_offset(skb) + len); | 1854 | return pskb_may_pull(skb, skb_network_offset(skb) + len); |
1841 | } | 1855 | } |
1842 | 1856 | ||
1857 | static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb) | ||
1858 | { | ||
1859 | /* Only continue with checksum unnecessary if device indicated | ||
1860 | * it is valid across encapsulation (skb->encapsulation was set). | ||
1861 | */ | ||
1862 | if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation) | ||
1863 | skb->ip_summed = CHECKSUM_NONE; | ||
1864 | |||
1865 | skb->encapsulation = 0; | ||
1866 | skb->csum_valid = 0; | ||
1867 | } | ||
1868 | |||
1843 | /* | 1869 | /* |
1844 | * CPUs often take a performance hit when accessing unaligned memory | 1870 | * CPUs often take a performance hit when accessing unaligned memory |
1845 | * locations. The actual performance hit varies, it can be small if the | 1871 | * locations. The actual performance hit varies, it can be small if the |
@@ -2233,6 +2259,14 @@ static inline struct sk_buff *pskb_copy(struct sk_buff *skb, | |||
2233 | return __pskb_copy(skb, skb_headroom(skb), gfp_mask); | 2259 | return __pskb_copy(skb, skb_headroom(skb), gfp_mask); |
2234 | } | 2260 | } |
2235 | 2261 | ||
2262 | |||
2263 | static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb, | ||
2264 | gfp_t gfp_mask) | ||
2265 | { | ||
2266 | return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true); | ||
2267 | } | ||
2268 | |||
2269 | |||
2236 | /** | 2270 | /** |
2237 | * skb_clone_writable - is the header of a clone writable | 2271 | * skb_clone_writable - is the header of a clone writable |
2238 | * @skb: buffer to check | 2272 | * @skb: buffer to check |
@@ -2716,7 +2750,7 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb); | |||
2716 | 2750 | ||
2717 | static inline int skb_csum_unnecessary(const struct sk_buff *skb) | 2751 | static inline int skb_csum_unnecessary(const struct sk_buff *skb) |
2718 | { | 2752 | { |
2719 | return skb->ip_summed & CHECKSUM_UNNECESSARY; | 2753 | return ((skb->ip_summed & CHECKSUM_UNNECESSARY) || skb->csum_valid); |
2720 | } | 2754 | } |
2721 | 2755 | ||
2722 | /** | 2756 | /** |
@@ -2741,6 +2775,103 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb) | |||
2741 | 0 : __skb_checksum_complete(skb); | 2775 | 0 : __skb_checksum_complete(skb); |
2742 | } | 2776 | } |
2743 | 2777 | ||
2778 | /* Check if we need to perform checksum complete validation. | ||
2779 | * | ||
2780 | * Returns true if checksum complete is needed, false otherwise | ||
2781 | * (either checksum is unnecessary or zero checksum is allowed). | ||
2782 | */ | ||
2783 | static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, | ||
2784 | bool zero_okay, | ||
2785 | __sum16 check) | ||
2786 | { | ||
2787 | if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { | ||
2788 | skb->csum_valid = 1; | ||
2789 | return false; | ||
2790 | } | ||
2791 | |||
2792 | return true; | ||
2793 | } | ||
2794 | |||
2795 | /* For small packets <= CHECKSUM_BREAK peform checksum complete directly | ||
2796 | * in checksum_init. | ||
2797 | */ | ||
2798 | #define CHECKSUM_BREAK 76 | ||
2799 | |||
2800 | /* Validate (init) checksum based on checksum complete. | ||
2801 | * | ||
2802 | * Return values: | ||
2803 | * 0: checksum is validated or try to in skb_checksum_complete. In the latter | ||
2804 | * case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo | ||
2805 | * checksum is stored in skb->csum for use in __skb_checksum_complete | ||
2806 | * non-zero: value of invalid checksum | ||
2807 | * | ||
2808 | */ | ||
2809 | static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, | ||
2810 | bool complete, | ||
2811 | __wsum psum) | ||
2812 | { | ||
2813 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | ||
2814 | if (!csum_fold(csum_add(psum, skb->csum))) { | ||
2815 | skb->csum_valid = 1; | ||
2816 | return 0; | ||
2817 | } | ||
2818 | } | ||
2819 | |||
2820 | skb->csum = psum; | ||
2821 | |||
2822 | if (complete || skb->len <= CHECKSUM_BREAK) { | ||
2823 | __sum16 csum; | ||
2824 | |||
2825 | csum = __skb_checksum_complete(skb); | ||
2826 | skb->csum_valid = !csum; | ||
2827 | return csum; | ||
2828 | } | ||
2829 | |||
2830 | return 0; | ||
2831 | } | ||
2832 | |||
2833 | static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) | ||
2834 | { | ||
2835 | return 0; | ||
2836 | } | ||
2837 | |||
2838 | /* Perform checksum validate (init). Note that this is a macro since we only | ||
2839 | * want to calculate the pseudo header which is an input function if necessary. | ||
2840 | * First we try to validate without any computation (checksum unnecessary) and | ||
2841 | * then calculate based on checksum complete calling the function to compute | ||
2842 | * pseudo header. | ||
2843 | * | ||
2844 | * Return values: | ||
2845 | * 0: checksum is validated or try to in skb_checksum_complete | ||
2846 | * non-zero: value of invalid checksum | ||
2847 | */ | ||
2848 | #define __skb_checksum_validate(skb, proto, complete, \ | ||
2849 | zero_okay, check, compute_pseudo) \ | ||
2850 | ({ \ | ||
2851 | __sum16 __ret = 0; \ | ||
2852 | skb->csum_valid = 0; \ | ||
2853 | if (__skb_checksum_validate_needed(skb, zero_okay, check)) \ | ||
2854 | __ret = __skb_checksum_validate_complete(skb, \ | ||
2855 | complete, compute_pseudo(skb, proto)); \ | ||
2856 | __ret; \ | ||
2857 | }) | ||
2858 | |||
2859 | #define skb_checksum_init(skb, proto, compute_pseudo) \ | ||
2860 | __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo) | ||
2861 | |||
2862 | #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \ | ||
2863 | __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo) | ||
2864 | |||
2865 | #define skb_checksum_validate(skb, proto, compute_pseudo) \ | ||
2866 | __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo) | ||
2867 | |||
2868 | #define skb_checksum_validate_zero_check(skb, proto, check, \ | ||
2869 | compute_pseudo) \ | ||
2870 | __skb_checksum_validate_(skb, proto, true, true, check, compute_pseudo) | ||
2871 | |||
2872 | #define skb_checksum_simple_validate(skb) \ | ||
2873 | __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) | ||
2874 | |||
2744 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | 2875 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
2745 | void nf_conntrack_destroy(struct nf_conntrack *nfct); | 2876 | void nf_conntrack_destroy(struct nf_conntrack *nfct); |
2746 | static inline void nf_conntrack_put(struct nf_conntrack *nfct) | 2877 | static inline void nf_conntrack_put(struct nf_conntrack *nfct) |
@@ -2895,6 +3026,7 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb) | |||
2895 | struct skb_gso_cb { | 3026 | struct skb_gso_cb { |
2896 | int mac_offset; | 3027 | int mac_offset; |
2897 | int encap_level; | 3028 | int encap_level; |
3029 | __u16 csum_start; | ||
2898 | }; | 3030 | }; |
2899 | #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb) | 3031 | #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb) |
2900 | 3032 | ||
@@ -2919,6 +3051,28 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) | |||
2919 | return 0; | 3051 | return 0; |
2920 | } | 3052 | } |
2921 | 3053 | ||
3054 | /* Compute the checksum for a gso segment. First compute the checksum value | ||
3055 | * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and | ||
3056 | * then add in skb->csum (checksum from csum_start to end of packet). | ||
3057 | * skb->csum and csum_start are then updated to reflect the checksum of the | ||
3058 | * resultant packet starting from the transport header-- the resultant checksum | ||
3059 | * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo | ||
3060 | * header. | ||
3061 | */ | ||
3062 | static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) | ||
3063 | { | ||
3064 | int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) - | ||
3065 | skb_transport_offset(skb); | ||
3066 | __u16 csum; | ||
3067 | |||
3068 | csum = csum_fold(csum_partial(skb_transport_header(skb), | ||
3069 | plen, skb->csum)); | ||
3070 | skb->csum = res; | ||
3071 | SKB_GSO_CB(skb)->csum_start -= plen; | ||
3072 | |||
3073 | return csum; | ||
3074 | } | ||
3075 | |||
2922 | static inline bool skb_is_gso(const struct sk_buff *skb) | 3076 | static inline bool skb_is_gso(const struct sk_buff *skb) |
2923 | { | 3077 | { |
2924 | return skb_shinfo(skb)->gso_size; | 3078 | return skb_shinfo(skb)->gso_size; |
diff --git a/include/linux/socket.h b/include/linux/socket.h index 8e98297f1388..ec538fc287a6 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h | |||
@@ -305,8 +305,6 @@ struct ucred { | |||
305 | /* IPX options */ | 305 | /* IPX options */ |
306 | #define IPX_TYPE 1 | 306 | #define IPX_TYPE 1 |
307 | 307 | ||
308 | extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, | ||
309 | int offset, int len); | ||
310 | extern int csum_partial_copy_fromiovecend(unsigned char *kdata, | 308 | extern int csum_partial_copy_fromiovecend(unsigned char *kdata, |
311 | struct iovec *iov, | 309 | struct iovec *iov, |
312 | int offset, | 310 | int offset, |
@@ -315,8 +313,6 @@ extern unsigned long iov_pages(const struct iovec *iov, int offset, | |||
315 | unsigned long nr_segs); | 313 | unsigned long nr_segs); |
316 | 314 | ||
317 | extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); | 315 | extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); |
318 | extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, | ||
319 | int offset, int len); | ||
320 | extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); | 316 | extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); |
321 | extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); | 317 | extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); |
322 | 318 | ||
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h index aa327a8105ad..b2b1afbb3202 100644 --- a/include/linux/spi/at86rf230.h +++ b/include/linux/spi/at86rf230.h | |||
@@ -26,20 +26,6 @@ struct at86rf230_platform_data { | |||
26 | int rstn; | 26 | int rstn; |
27 | int slp_tr; | 27 | int slp_tr; |
28 | int dig2; | 28 | int dig2; |
29 | |||
30 | /* Setting the irq_type will configure the driver to request | ||
31 | * the platform irq trigger type according to the given value | ||
32 | * and configure the interrupt polarity of the device to the | ||
33 | * corresponding polarity. | ||
34 | * | ||
35 | * Allowed values are: IRQF_TRIGGER_RISING, IRQF_TRIGGER_FALLING, | ||
36 | * IRQF_TRIGGER_HIGH and IRQF_TRIGGER_LOW | ||
37 | * | ||
38 | * Setting it to 0, the driver does not touch the trigger type | ||
39 | * configuration of the interrupt and sets the interrupt polarity | ||
40 | * of the device to high active (the default value). | ||
41 | */ | ||
42 | int irq_type; | ||
43 | }; | 29 | }; |
44 | 30 | ||
45 | #endif | 31 | #endif |
diff --git a/include/linux/splice.h b/include/linux/splice.h index 0e43906d2fda..da2751d3b93d 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h | |||
@@ -70,16 +70,6 @@ extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *, | |||
70 | splice_actor *); | 70 | splice_actor *); |
71 | extern ssize_t __splice_from_pipe(struct pipe_inode_info *, | 71 | extern ssize_t __splice_from_pipe(struct pipe_inode_info *, |
72 | struct splice_desc *, splice_actor *); | 72 | struct splice_desc *, splice_actor *); |
73 | extern int splice_from_pipe_feed(struct pipe_inode_info *, struct splice_desc *, | ||
74 | splice_actor *); | ||
75 | extern int splice_from_pipe_next(struct pipe_inode_info *, | ||
76 | struct splice_desc *); | ||
77 | extern void splice_from_pipe_begin(struct splice_desc *); | ||
78 | extern void splice_from_pipe_end(struct pipe_inode_info *, | ||
79 | struct splice_desc *); | ||
80 | extern int pipe_to_file(struct pipe_inode_info *, struct pipe_buffer *, | ||
81 | struct splice_desc *); | ||
82 | |||
83 | extern ssize_t splice_to_pipe(struct pipe_inode_info *, | 73 | extern ssize_t splice_to_pipe(struct pipe_inode_info *, |
84 | struct splice_pipe_desc *); | 74 | struct splice_pipe_desc *); |
85 | extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, | 75 | extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, |
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 07ef9b82b66d..4568a5cc9ab8 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h | |||
@@ -33,6 +33,7 @@ struct ssb_sprom { | |||
33 | u8 et1phyaddr; /* MII address for enet1 */ | 33 | u8 et1phyaddr; /* MII address for enet1 */ |
34 | u8 et0mdcport; /* MDIO for enet0 */ | 34 | u8 et0mdcport; /* MDIO for enet0 */ |
35 | u8 et1mdcport; /* MDIO for enet1 */ | 35 | u8 et1mdcport; /* MDIO for enet1 */ |
36 | u16 dev_id; /* Device ID overriding e.g. PCI ID */ | ||
36 | u16 board_rev; /* Board revision number from SPROM. */ | 37 | u16 board_rev; /* Board revision number from SPROM. */ |
37 | u16 board_num; /* Board number from SPROM. */ | 38 | u16 board_num; /* Board number from SPROM. */ |
38 | u16 board_type; /* Board type from SPROM. */ | 39 | u16 board_type; /* Board type from SPROM. */ |
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index ad7dbe2cfecd..1a8959944c5f 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -236,7 +236,7 @@ void * rpc_malloc(struct rpc_task *, size_t); | |||
236 | void rpc_free(void *); | 236 | void rpc_free(void *); |
237 | int rpciod_up(void); | 237 | int rpciod_up(void); |
238 | void rpciod_down(void); | 238 | void rpciod_down(void); |
239 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*)(void *)); | 239 | int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); |
240 | #ifdef RPC_DEBUG | 240 | #ifdef RPC_DEBUG |
241 | struct net; | 241 | struct net; |
242 | void rpc_show_tasks(struct net *); | 242 | void rpc_show_tasks(struct net *); |
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 04e763221246..1bc7cd05b22e 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h | |||
@@ -244,6 +244,7 @@ struct svc_rqst { | |||
244 | struct page * rq_pages[RPCSVC_MAXPAGES]; | 244 | struct page * rq_pages[RPCSVC_MAXPAGES]; |
245 | struct page * *rq_respages; /* points into rq_pages */ | 245 | struct page * *rq_respages; /* points into rq_pages */ |
246 | struct page * *rq_next_page; /* next reply page to use */ | 246 | struct page * *rq_next_page; /* next reply page to use */ |
247 | struct page * *rq_page_end; /* one past the last page */ | ||
247 | 248 | ||
248 | struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ | 249 | struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ |
249 | 250 | ||
@@ -254,11 +255,15 @@ struct svc_rqst { | |||
254 | u32 rq_prot; /* IP protocol */ | 255 | u32 rq_prot; /* IP protocol */ |
255 | unsigned short | 256 | unsigned short |
256 | rq_secure : 1; /* secure port */ | 257 | rq_secure : 1; /* secure port */ |
258 | unsigned short rq_local : 1; /* local request */ | ||
257 | 259 | ||
258 | void * rq_argp; /* decoded arguments */ | 260 | void * rq_argp; /* decoded arguments */ |
259 | void * rq_resp; /* xdr'd results */ | 261 | void * rq_resp; /* xdr'd results */ |
260 | void * rq_auth_data; /* flavor-specific data */ | 262 | void * rq_auth_data; /* flavor-specific data */ |
261 | 263 | int rq_auth_slack; /* extra space xdr code | |
264 | * should leave in head | ||
265 | * for krb5i, krb5p. | ||
266 | */ | ||
262 | int rq_reserved; /* space on socket outq | 267 | int rq_reserved; /* space on socket outq |
263 | * reserved for this request | 268 | * reserved for this request |
264 | */ | 269 | */ |
@@ -454,11 +459,7 @@ char * svc_print_addr(struct svc_rqst *, char *, size_t); | |||
454 | */ | 459 | */ |
455 | static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space) | 460 | static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space) |
456 | { | 461 | { |
457 | int added_space = 0; | 462 | svc_reserve(rqstp, space + rqstp->rq_auth_slack); |
458 | |||
459 | if (rqstp->rq_authop->flavour) | ||
460 | added_space = RPC_MAX_AUTH_SIZE; | ||
461 | svc_reserve(rqstp, space + added_space); | ||
462 | } | 463 | } |
463 | 464 | ||
464 | #endif /* SUNRPC_SVC_H */ | 465 | #endif /* SUNRPC_SVC_H */ |
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 0b8e3e6bdacf..5cf99a016368 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h | |||
@@ -115,14 +115,13 @@ struct svc_rdma_fastreg_mr { | |||
115 | struct list_head frmr_list; | 115 | struct list_head frmr_list; |
116 | }; | 116 | }; |
117 | struct svc_rdma_req_map { | 117 | struct svc_rdma_req_map { |
118 | struct svc_rdma_fastreg_mr *frmr; | ||
119 | unsigned long count; | 118 | unsigned long count; |
120 | union { | 119 | union { |
121 | struct kvec sge[RPCSVC_MAXPAGES]; | 120 | struct kvec sge[RPCSVC_MAXPAGES]; |
122 | struct svc_rdma_chunk_sge ch[RPCSVC_MAXPAGES]; | 121 | struct svc_rdma_chunk_sge ch[RPCSVC_MAXPAGES]; |
122 | unsigned long lkey[RPCSVC_MAXPAGES]; | ||
123 | }; | 123 | }; |
124 | }; | 124 | }; |
125 | #define RDMACTXT_F_FAST_UNREG 1 | ||
126 | #define RDMACTXT_F_LAST_CTXT 2 | 125 | #define RDMACTXT_F_LAST_CTXT 2 |
127 | 126 | ||
128 | #define SVCRDMA_DEVCAP_FAST_REG 1 /* fast mr registration */ | 127 | #define SVCRDMA_DEVCAP_FAST_REG 1 /* fast mr registration */ |
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index b05963f09ebf..7235040a19b2 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h | |||
@@ -24,6 +24,7 @@ struct svc_xprt_ops { | |||
24 | void (*xpo_release_rqst)(struct svc_rqst *); | 24 | void (*xpo_release_rqst)(struct svc_rqst *); |
25 | void (*xpo_detach)(struct svc_xprt *); | 25 | void (*xpo_detach)(struct svc_xprt *); |
26 | void (*xpo_free)(struct svc_xprt *); | 26 | void (*xpo_free)(struct svc_xprt *); |
27 | int (*xpo_secure_port)(struct svc_rqst *); | ||
27 | }; | 28 | }; |
28 | 29 | ||
29 | struct svc_xprt_class { | 30 | struct svc_xprt_class { |
@@ -63,6 +64,7 @@ struct svc_xprt { | |||
63 | #define XPT_DETACHED 10 /* detached from tempsocks list */ | 64 | #define XPT_DETACHED 10 /* detached from tempsocks list */ |
64 | #define XPT_LISTENER 11 /* listening endpoint */ | 65 | #define XPT_LISTENER 11 /* listening endpoint */ |
65 | #define XPT_CACHE_AUTH 12 /* cache auth info */ | 66 | #define XPT_CACHE_AUTH 12 /* cache auth info */ |
67 | #define XPT_LOCAL 13 /* connection from loopback interface */ | ||
66 | 68 | ||
67 | struct svc_serv *xpt_server; /* service for transport */ | 69 | struct svc_serv *xpt_server; /* service for transport */ |
68 | atomic_t xpt_reserved; /* space on outq that is rsvd */ | 70 | atomic_t xpt_reserved; /* space on outq that is rsvd */ |
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 15f9204ee70b..70c6b92e15a7 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h | |||
@@ -215,6 +215,9 @@ typedef int (*kxdrdproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj); | |||
215 | 215 | ||
216 | extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); | 216 | extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); |
217 | extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); | 217 | extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); |
218 | extern void xdr_commit_encode(struct xdr_stream *xdr); | ||
219 | extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len); | ||
220 | extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen); | ||
218 | extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, | 221 | extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, |
219 | unsigned int base, unsigned int len); | 222 | unsigned int base, unsigned int len); |
220 | extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr); | 223 | extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr); |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 3876f0f1dfd3..fcbfe8783243 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
@@ -24,6 +24,12 @@ | |||
24 | #define RPC_MAX_SLOT_TABLE_LIMIT (65536U) | 24 | #define RPC_MAX_SLOT_TABLE_LIMIT (65536U) |
25 | #define RPC_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE_LIMIT | 25 | #define RPC_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE_LIMIT |
26 | 26 | ||
27 | #define RPC_CWNDSHIFT (8U) | ||
28 | #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) | ||
29 | #define RPC_INITCWND RPC_CWNDSCALE | ||
30 | #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) | ||
31 | #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) | ||
32 | |||
27 | /* | 33 | /* |
28 | * This describes a timeout strategy | 34 | * This describes a timeout strategy |
29 | */ | 35 | */ |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index f76994b9396c..519064e0c943 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -327,6 +327,7 @@ extern unsigned long get_safe_page(gfp_t gfp_mask); | |||
327 | extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); | 327 | extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); |
328 | extern int hibernate(void); | 328 | extern int hibernate(void); |
329 | extern bool system_entering_hibernation(void); | 329 | extern bool system_entering_hibernation(void); |
330 | extern bool hibernation_available(void); | ||
330 | asmlinkage int swsusp_save(void); | 331 | asmlinkage int swsusp_save(void); |
331 | extern struct pbe *restore_pblist; | 332 | extern struct pbe *restore_pblist; |
332 | #else /* CONFIG_HIBERNATION */ | 333 | #else /* CONFIG_HIBERNATION */ |
@@ -339,6 +340,7 @@ static inline void swsusp_unset_page_free(struct page *p) {} | |||
339 | static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {} | 340 | static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {} |
340 | static inline int hibernate(void) { return -ENOSYS; } | 341 | static inline int hibernate(void) { return -ENOSYS; } |
341 | static inline bool system_entering_hibernation(void) { return false; } | 342 | static inline bool system_entering_hibernation(void) { return false; } |
343 | static inline bool hibernation_available(void) { return false; } | ||
342 | #endif /* CONFIG_HIBERNATION */ | 344 | #endif /* CONFIG_HIBERNATION */ |
343 | 345 | ||
344 | /* Hibernation and suspend events */ | 346 | /* Hibernation and suspend events */ |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 239946868142..a0513210798f 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
@@ -197,7 +197,8 @@ struct tcp_sock { | |||
197 | u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ | 197 | u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ |
198 | syn_data:1, /* SYN includes data */ | 198 | syn_data:1, /* SYN includes data */ |
199 | syn_fastopen:1, /* SYN includes Fast Open option */ | 199 | syn_fastopen:1, /* SYN includes Fast Open option */ |
200 | syn_data_acked:1;/* data in SYN is acked by SYN-ACK */ | 200 | syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ |
201 | is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ | ||
201 | u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ | 202 | u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ |
202 | 203 | ||
203 | /* RTT measurement */ | 204 | /* RTT measurement */ |
@@ -209,6 +210,8 @@ struct tcp_sock { | |||
209 | 210 | ||
210 | u32 packets_out; /* Packets which are "in flight" */ | 211 | u32 packets_out; /* Packets which are "in flight" */ |
211 | u32 retrans_out; /* Retransmitted packets out */ | 212 | u32 retrans_out; /* Retransmitted packets out */ |
213 | u32 max_packets_out; /* max packets_out in last window */ | ||
214 | u32 max_packets_seq; /* right edge of max_packets_out flight */ | ||
212 | 215 | ||
213 | u16 urg_data; /* Saved octet of OOB data and control flags */ | 216 | u16 urg_data; /* Saved octet of OOB data and control flags */ |
214 | u8 ecn_flags; /* ECN status bits. */ | 217 | u8 ecn_flags; /* ECN status bits. */ |
@@ -365,11 +368,6 @@ static inline bool tcp_passive_fastopen(const struct sock *sk) | |||
365 | tcp_sk(sk)->fastopen_rsk != NULL); | 368 | tcp_sk(sk)->fastopen_rsk != NULL); |
366 | } | 369 | } |
367 | 370 | ||
368 | static inline bool fastopen_cookie_present(struct tcp_fastopen_cookie *foc) | ||
369 | { | ||
370 | return foc->len != -1; | ||
371 | } | ||
372 | |||
373 | extern void tcp_sock_destruct(struct sock *sk); | 371 | extern void tcp_sock_destruct(struct sock *sk); |
374 | 372 | ||
375 | static inline int fastopen_init_queue(struct sock *sk, int backlog) | 373 | static inline int fastopen_init_queue(struct sock *sk, int backlog) |
diff --git a/include/linux/tick.h b/include/linux/tick.h index b84773cb9f4c..059052306831 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/hrtimer.h> | 12 | #include <linux/hrtimer.h> |
13 | #include <linux/context_tracking_state.h> | 13 | #include <linux/context_tracking_state.h> |
14 | #include <linux/cpumask.h> | 14 | #include <linux/cpumask.h> |
15 | #include <linux/sched.h> | ||
15 | 16 | ||
16 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | 17 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
17 | 18 | ||
@@ -162,6 +163,7 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } | |||
162 | #ifdef CONFIG_NO_HZ_FULL | 163 | #ifdef CONFIG_NO_HZ_FULL |
163 | extern bool tick_nohz_full_running; | 164 | extern bool tick_nohz_full_running; |
164 | extern cpumask_var_t tick_nohz_full_mask; | 165 | extern cpumask_var_t tick_nohz_full_mask; |
166 | extern cpumask_var_t housekeeping_mask; | ||
165 | 167 | ||
166 | static inline bool tick_nohz_full_enabled(void) | 168 | static inline bool tick_nohz_full_enabled(void) |
167 | { | 169 | { |
@@ -181,7 +183,13 @@ static inline bool tick_nohz_full_cpu(int cpu) | |||
181 | 183 | ||
182 | extern void tick_nohz_init(void); | 184 | extern void tick_nohz_init(void); |
183 | extern void __tick_nohz_full_check(void); | 185 | extern void __tick_nohz_full_check(void); |
184 | extern void tick_nohz_full_kick(void); | 186 | extern void tick_nohz_full_kick_cpu(int cpu); |
187 | |||
188 | static inline void tick_nohz_full_kick(void) | ||
189 | { | ||
190 | tick_nohz_full_kick_cpu(smp_processor_id()); | ||
191 | } | ||
192 | |||
185 | extern void tick_nohz_full_kick_all(void); | 193 | extern void tick_nohz_full_kick_all(void); |
186 | extern void __tick_nohz_task_switch(struct task_struct *tsk); | 194 | extern void __tick_nohz_task_switch(struct task_struct *tsk); |
187 | #else | 195 | #else |
@@ -189,11 +197,30 @@ static inline void tick_nohz_init(void) { } | |||
189 | static inline bool tick_nohz_full_enabled(void) { return false; } | 197 | static inline bool tick_nohz_full_enabled(void) { return false; } |
190 | static inline bool tick_nohz_full_cpu(int cpu) { return false; } | 198 | static inline bool tick_nohz_full_cpu(int cpu) { return false; } |
191 | static inline void __tick_nohz_full_check(void) { } | 199 | static inline void __tick_nohz_full_check(void) { } |
200 | static inline void tick_nohz_full_kick_cpu(int cpu) { } | ||
192 | static inline void tick_nohz_full_kick(void) { } | 201 | static inline void tick_nohz_full_kick(void) { } |
193 | static inline void tick_nohz_full_kick_all(void) { } | 202 | static inline void tick_nohz_full_kick_all(void) { } |
194 | static inline void __tick_nohz_task_switch(struct task_struct *tsk) { } | 203 | static inline void __tick_nohz_task_switch(struct task_struct *tsk) { } |
195 | #endif | 204 | #endif |
196 | 205 | ||
206 | static inline bool is_housekeeping_cpu(int cpu) | ||
207 | { | ||
208 | #ifdef CONFIG_NO_HZ_FULL | ||
209 | if (tick_nohz_full_enabled()) | ||
210 | return cpumask_test_cpu(cpu, housekeeping_mask); | ||
211 | #endif | ||
212 | return true; | ||
213 | } | ||
214 | |||
215 | static inline void housekeeping_affine(struct task_struct *t) | ||
216 | { | ||
217 | #ifdef CONFIG_NO_HZ_FULL | ||
218 | if (tick_nohz_full_enabled()) | ||
219 | set_cpus_allowed_ptr(t, housekeeping_mask); | ||
220 | |||
221 | #endif | ||
222 | } | ||
223 | |||
197 | static inline void tick_nohz_full_check(void) | 224 | static inline void tick_nohz_full_check(void) |
198 | { | 225 | { |
199 | if (tick_nohz_full_enabled()) | 226 | if (tick_nohz_full_enabled()) |
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index a32d86ec8bf2..ea6c9dea79e3 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h | |||
@@ -25,6 +25,21 @@ trace_seq_init(struct trace_seq *s) | |||
25 | s->full = 0; | 25 | s->full = 0; |
26 | } | 26 | } |
27 | 27 | ||
28 | /** | ||
29 | * trace_seq_buffer_ptr - return pointer to next location in buffer | ||
30 | * @s: trace sequence descriptor | ||
31 | * | ||
32 | * Returns the pointer to the buffer where the next write to | ||
33 | * the buffer will happen. This is useful to save the location | ||
34 | * that is about to be written to and then return the result | ||
35 | * of that write. | ||
36 | */ | ||
37 | static inline unsigned char * | ||
38 | trace_seq_buffer_ptr(struct trace_seq *s) | ||
39 | { | ||
40 | return s->buffer + s->len; | ||
41 | } | ||
42 | |||
28 | /* | 43 | /* |
29 | * Currently only defined when tracing is enabled. | 44 | * Currently only defined when tracing is enabled. |
30 | */ | 45 | */ |
@@ -36,16 +51,18 @@ int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); | |||
36 | extern int | 51 | extern int |
37 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); | 52 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); |
38 | extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); | 53 | extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); |
39 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | 54 | extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
40 | size_t cnt); | 55 | int cnt); |
41 | extern int trace_seq_puts(struct trace_seq *s, const char *str); | 56 | extern int trace_seq_puts(struct trace_seq *s, const char *str); |
42 | extern int trace_seq_putc(struct trace_seq *s, unsigned char c); | 57 | extern int trace_seq_putc(struct trace_seq *s, unsigned char c); |
43 | extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len); | 58 | extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); |
44 | extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | 59 | extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, |
45 | size_t len); | 60 | unsigned int len); |
46 | extern void *trace_seq_reserve(struct trace_seq *s, size_t len); | ||
47 | extern int trace_seq_path(struct trace_seq *s, const struct path *path); | 61 | extern int trace_seq_path(struct trace_seq *s, const struct path *path); |
48 | 62 | ||
63 | extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, | ||
64 | int nmaskbits); | ||
65 | |||
49 | #else /* CONFIG_TRACING */ | 66 | #else /* CONFIG_TRACING */ |
50 | static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | 67 | static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) |
51 | { | 68 | { |
@@ -57,12 +74,19 @@ trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) | |||
57 | return 0; | 74 | return 0; |
58 | } | 75 | } |
59 | 76 | ||
77 | static inline int | ||
78 | trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, | ||
79 | int nmaskbits) | ||
80 | { | ||
81 | return 0; | ||
82 | } | ||
83 | |||
60 | static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) | 84 | static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) |
61 | { | 85 | { |
62 | return 0; | 86 | return 0; |
63 | } | 87 | } |
64 | static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | 88 | static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
65 | size_t cnt) | 89 | int cnt) |
66 | { | 90 | { |
67 | return 0; | 91 | return 0; |
68 | } | 92 | } |
@@ -75,19 +99,15 @@ static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) | |||
75 | return 0; | 99 | return 0; |
76 | } | 100 | } |
77 | static inline int | 101 | static inline int |
78 | trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) | 102 | trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) |
79 | { | 103 | { |
80 | return 0; | 104 | return 0; |
81 | } | 105 | } |
82 | static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | 106 | static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, |
83 | size_t len) | 107 | unsigned int len) |
84 | { | 108 | { |
85 | return 0; | 109 | return 0; |
86 | } | 110 | } |
87 | static inline void *trace_seq_reserve(struct trace_seq *s, size_t len) | ||
88 | { | ||
89 | return NULL; | ||
90 | } | ||
91 | static inline int trace_seq_path(struct trace_seq *s, const struct path *path) | 111 | static inline int trace_seq_path(struct trace_seq *s, const struct path *path) |
92 | { | 112 | { |
93 | return 0; | 113 | return 0; |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 9d30ee469c2a..2e2a5f7717e5 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -185,6 +185,11 @@ extern void syscall_unregfunc(void); | |||
185 | static inline void \ | 185 | static inline void \ |
186 | check_trace_callback_type_##name(void (*cb)(data_proto)) \ | 186 | check_trace_callback_type_##name(void (*cb)(data_proto)) \ |
187 | { \ | 187 | { \ |
188 | } \ | ||
189 | static inline bool \ | ||
190 | trace_##name##_enabled(void) \ | ||
191 | { \ | ||
192 | return static_key_false(&__tracepoint_##name.key); \ | ||
188 | } | 193 | } |
189 | 194 | ||
190 | /* | 195 | /* |
@@ -230,6 +235,11 @@ extern void syscall_unregfunc(void); | |||
230 | } \ | 235 | } \ |
231 | static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \ | 236 | static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \ |
232 | { \ | 237 | { \ |
238 | } \ | ||
239 | static inline bool \ | ||
240 | trace_##name##_enabled(void) \ | ||
241 | { \ | ||
242 | return false; \ | ||
233 | } | 243 | } |
234 | 244 | ||
235 | #define DEFINE_TRACE_FN(name, reg, unreg) | 245 | #define DEFINE_TRACE_FN(name, reg, unreg) |
diff --git a/include/linux/udp.h b/include/linux/udp.h index 42278bbf7a88..247cfdcc4b08 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h | |||
@@ -47,7 +47,9 @@ struct udp_sock { | |||
47 | #define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node | 47 | #define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node |
48 | int pending; /* Any pending frames ? */ | 48 | int pending; /* Any pending frames ? */ |
49 | unsigned int corkflag; /* Cork is required */ | 49 | unsigned int corkflag; /* Cork is required */ |
50 | __u16 encap_type; /* Is this an Encapsulation socket? */ | 50 | __u8 encap_type; /* Is this an Encapsulation socket? */ |
51 | unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */ | ||
52 | no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */ | ||
51 | /* | 53 | /* |
52 | * Following member retains the information to create a UDP header | 54 | * Following member retains the information to create a UDP header |
53 | * when the socket is uncorked. | 55 | * when the socket is uncorked. |
@@ -76,6 +78,26 @@ static inline struct udp_sock *udp_sk(const struct sock *sk) | |||
76 | return (struct udp_sock *)sk; | 78 | return (struct udp_sock *)sk; |
77 | } | 79 | } |
78 | 80 | ||
81 | static inline void udp_set_no_check6_tx(struct sock *sk, bool val) | ||
82 | { | ||
83 | udp_sk(sk)->no_check6_tx = val; | ||
84 | } | ||
85 | |||
86 | static inline void udp_set_no_check6_rx(struct sock *sk, bool val) | ||
87 | { | ||
88 | udp_sk(sk)->no_check6_rx = val; | ||
89 | } | ||
90 | |||
91 | static inline bool udp_get_no_check6_tx(struct sock *sk) | ||
92 | { | ||
93 | return udp_sk(sk)->no_check6_tx; | ||
94 | } | ||
95 | |||
96 | static inline bool udp_get_no_check6_rx(struct sock *sk) | ||
97 | { | ||
98 | return udp_sk(sk)->no_check6_rx; | ||
99 | } | ||
100 | |||
79 | #define udp_portaddr_for_each_entry(__sk, node, list) \ | 101 | #define udp_portaddr_for_each_entry(__sk, node, list) \ |
80 | hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node) | 102 | hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node) |
81 | 103 | ||
diff --git a/include/linux/uio.h b/include/linux/uio.h index 199bcc34241b..09a7cffc224e 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
@@ -19,11 +19,21 @@ struct kvec { | |||
19 | size_t iov_len; | 19 | size_t iov_len; |
20 | }; | 20 | }; |
21 | 21 | ||
22 | enum { | ||
23 | ITER_IOVEC = 0, | ||
24 | ITER_KVEC = 2, | ||
25 | ITER_BVEC = 4, | ||
26 | }; | ||
27 | |||
22 | struct iov_iter { | 28 | struct iov_iter { |
23 | const struct iovec *iov; | 29 | int type; |
24 | unsigned long nr_segs; | ||
25 | size_t iov_offset; | 30 | size_t iov_offset; |
26 | size_t count; | 31 | size_t count; |
32 | union { | ||
33 | const struct iovec *iov; | ||
34 | const struct bio_vec *bvec; | ||
35 | }; | ||
36 | unsigned long nr_segs; | ||
27 | }; | 37 | }; |
28 | 38 | ||
29 | /* | 39 | /* |
@@ -53,6 +63,7 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) | |||
53 | } | 63 | } |
54 | 64 | ||
55 | #define iov_for_each(iov, iter, start) \ | 65 | #define iov_for_each(iov, iter, start) \ |
66 | if (!((start).type & ITER_BVEC)) \ | ||
56 | for (iter = (start); \ | 67 | for (iter = (start); \ |
57 | (iter).count && \ | 68 | (iter).count && \ |
58 | ((iov = iov_iter_iovec(&(iter))), 1); \ | 69 | ((iov = iov_iter_iovec(&(iter))), 1); \ |
@@ -62,32 +73,59 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); | |||
62 | 73 | ||
63 | size_t iov_iter_copy_from_user_atomic(struct page *page, | 74 | size_t iov_iter_copy_from_user_atomic(struct page *page, |
64 | struct iov_iter *i, unsigned long offset, size_t bytes); | 75 | struct iov_iter *i, unsigned long offset, size_t bytes); |
65 | size_t iov_iter_copy_from_user(struct page *page, | ||
66 | struct iov_iter *i, unsigned long offset, size_t bytes); | ||
67 | void iov_iter_advance(struct iov_iter *i, size_t bytes); | 76 | void iov_iter_advance(struct iov_iter *i, size_t bytes); |
68 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); | 77 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); |
69 | size_t iov_iter_single_seg_count(const struct iov_iter *i); | 78 | size_t iov_iter_single_seg_count(const struct iov_iter *i); |
70 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | 79 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, |
71 | struct iov_iter *i); | 80 | struct iov_iter *i); |
81 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | ||
82 | struct iov_iter *i); | ||
83 | unsigned long iov_iter_alignment(const struct iov_iter *i); | ||
84 | void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, | ||
85 | unsigned long nr_segs, size_t count); | ||
86 | ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, | ||
87 | size_t maxsize, size_t *start); | ||
88 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, | ||
89 | size_t maxsize, size_t *start); | ||
90 | int iov_iter_npages(const struct iov_iter *i, int maxpages); | ||
72 | 91 | ||
73 | static inline void iov_iter_init(struct iov_iter *i, | 92 | static inline size_t iov_iter_count(struct iov_iter *i) |
74 | const struct iovec *iov, unsigned long nr_segs, | ||
75 | size_t count, size_t written) | ||
76 | { | 93 | { |
77 | i->iov = iov; | 94 | return i->count; |
78 | i->nr_segs = nr_segs; | 95 | } |
79 | i->iov_offset = 0; | ||
80 | i->count = count + written; | ||
81 | 96 | ||
82 | iov_iter_advance(i, written); | 97 | /* |
98 | * Cap the iov_iter by given limit; note that the second argument is | ||
99 | * *not* the new size - it's upper limit for such. Passing it a value | ||
100 | * greater than the amount of data in iov_iter is fine - it'll just do | ||
101 | * nothing in that case. | ||
102 | */ | ||
103 | static inline void iov_iter_truncate(struct iov_iter *i, u64 count) | ||
104 | { | ||
105 | /* | ||
106 | * count doesn't have to fit in size_t - comparison extends both | ||
107 | * operands to u64 here and any value that would be truncated by | ||
108 | * conversion in assignement is by definition greater than all | ||
109 | * values of size_t, including old i->count. | ||
110 | */ | ||
111 | if (i->count > count) | ||
112 | i->count = count; | ||
83 | } | 113 | } |
84 | 114 | ||
85 | static inline size_t iov_iter_count(struct iov_iter *i) | 115 | /* |
116 | * reexpand a previously truncated iterator; count must be no more than how much | ||
117 | * we had shrunk it. | ||
118 | */ | ||
119 | static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) | ||
86 | { | 120 | { |
87 | return i->count; | 121 | i->count = count; |
88 | } | 122 | } |
89 | 123 | ||
90 | int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); | 124 | int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); |
91 | int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); | 125 | int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); |
126 | int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, | ||
127 | int offset, int len); | ||
128 | int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, | ||
129 | int offset, int len); | ||
92 | 130 | ||
93 | #endif | 131 | #endif |
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index c52f827ba6ce..4f844c6b03ee 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h | |||
@@ -103,6 +103,7 @@ extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, u | |||
103 | extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); | 103 | extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); |
104 | extern bool __weak is_trap_insn(uprobe_opcode_t *insn); | 104 | extern bool __weak is_trap_insn(uprobe_opcode_t *insn); |
105 | extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); | 105 | extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); |
106 | extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs); | ||
106 | extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); | 107 | extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); |
107 | extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); | 108 | extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); |
108 | extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); | 109 | extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); |
@@ -133,6 +134,9 @@ extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, | |||
133 | #else /* !CONFIG_UPROBES */ | 134 | #else /* !CONFIG_UPROBES */ |
134 | struct uprobes_state { | 135 | struct uprobes_state { |
135 | }; | 136 | }; |
137 | |||
138 | #define uprobe_get_trap_addr(regs) instruction_pointer(regs) | ||
139 | |||
136 | static inline int | 140 | static inline int |
137 | uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) | 141 | uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) |
138 | { | 142 | { |
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 44b38b92236a..7c9b484735c5 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h | |||
@@ -52,6 +52,10 @@ | |||
52 | #define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */ | 52 | #define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */ |
53 | #define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */ | 53 | #define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */ |
54 | 54 | ||
55 | /* Initial NTB length */ | ||
56 | #define CDC_NCM_NTB_DEF_SIZE_TX 16384 /* bytes */ | ||
57 | #define CDC_NCM_NTB_DEF_SIZE_RX 16384 /* bytes */ | ||
58 | |||
55 | /* Minimum value for MaxDatagramSize, ch. 6.2.9 */ | 59 | /* Minimum value for MaxDatagramSize, ch. 6.2.9 */ |
56 | #define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */ | 60 | #define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */ |
57 | 61 | ||
@@ -72,16 +76,9 @@ | |||
72 | /* Restart the timer, if amount of datagrams is less than given value */ | 76 | /* Restart the timer, if amount of datagrams is less than given value */ |
73 | #define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 | 77 | #define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 |
74 | #define CDC_NCM_TIMER_PENDING_CNT 2 | 78 | #define CDC_NCM_TIMER_PENDING_CNT 2 |
75 | #define CDC_NCM_TIMER_INTERVAL (400UL * NSEC_PER_USEC) | 79 | #define CDC_NCM_TIMER_INTERVAL_USEC 400UL |
76 | 80 | #define CDC_NCM_TIMER_INTERVAL_MIN 5UL | |
77 | /* The following macro defines the minimum header space */ | 81 | #define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC) |
78 | #define CDC_NCM_MIN_HDR_SIZE \ | ||
79 | (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \ | ||
80 | (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) | ||
81 | |||
82 | #define CDC_NCM_NDP_SIZE \ | ||
83 | (sizeof(struct usb_cdc_ncm_ndp16) + \ | ||
84 | (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) | ||
85 | 82 | ||
86 | #define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ | 83 | #define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ |
87 | (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) | 84 | (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) |
@@ -107,6 +104,9 @@ struct cdc_ncm_ctx { | |||
107 | spinlock_t mtx; | 104 | spinlock_t mtx; |
108 | atomic_t stop; | 105 | atomic_t stop; |
109 | 106 | ||
107 | u32 timer_interval; | ||
108 | u32 max_ndp_size; | ||
109 | |||
110 | u32 tx_timer_pending; | 110 | u32 tx_timer_pending; |
111 | u32 tx_curr_frame_num; | 111 | u32 tx_curr_frame_num; |
112 | u32 rx_max; | 112 | u32 rx_max; |
@@ -118,10 +118,21 @@ struct cdc_ncm_ctx { | |||
118 | u16 tx_ndp_modulus; | 118 | u16 tx_ndp_modulus; |
119 | u16 tx_seq; | 119 | u16 tx_seq; |
120 | u16 rx_seq; | 120 | u16 rx_seq; |
121 | u16 connected; | 121 | u16 min_tx_pkt; |
122 | |||
123 | /* statistics */ | ||
124 | u32 tx_curr_frame_payload; | ||
125 | u32 tx_reason_ntb_full; | ||
126 | u32 tx_reason_ndp_full; | ||
127 | u32 tx_reason_timeout; | ||
128 | u32 tx_reason_max_datagram; | ||
129 | u64 tx_overhead; | ||
130 | u64 tx_ntbs; | ||
131 | u64 rx_overhead; | ||
132 | u64 rx_ntbs; | ||
122 | }; | 133 | }; |
123 | 134 | ||
124 | u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf); | 135 | u8 cdc_ncm_select_altsetting(struct usb_interface *intf); |
125 | int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); | 136 | int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); |
126 | void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); | 137 | void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); |
127 | struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); | 138 | struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); |
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index 1a64b26046ed..9b7de1b46437 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h | |||
@@ -70,7 +70,9 @@ | |||
70 | US_FLAG(NEEDS_CAP16, 0x00400000) \ | 70 | US_FLAG(NEEDS_CAP16, 0x00400000) \ |
71 | /* cannot handle READ_CAPACITY_10 */ \ | 71 | /* cannot handle READ_CAPACITY_10 */ \ |
72 | US_FLAG(IGNORE_UAS, 0x00800000) \ | 72 | US_FLAG(IGNORE_UAS, 0x00800000) \ |
73 | /* Device advertises UAS but it is broken */ | 73 | /* Device advertises UAS but it is broken */ \ |
74 | US_FLAG(BROKEN_FUA, 0x01000000) \ | ||
75 | /* Cannot handle FUA in WRITE or READ CDBs */ \ | ||
74 | 76 | ||
75 | #define US_FLAG(name, value) US_FL_##name = value , | 77 | #define US_FLAG(name, value) US_FL_##name = value , |
76 | enum { US_DO_ALL_FLAGS }; | 78 | enum { US_DO_ALL_FLAGS }; |
diff --git a/include/linux/virtio.h b/include/linux/virtio.h index e4abb84199be..b46671e28de2 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h | |||
@@ -106,6 +106,8 @@ static inline struct virtio_device *dev_to_virtio(struct device *_dev) | |||
106 | int register_virtio_device(struct virtio_device *dev); | 106 | int register_virtio_device(struct virtio_device *dev); |
107 | void unregister_virtio_device(struct virtio_device *dev); | 107 | void unregister_virtio_device(struct virtio_device *dev); |
108 | 108 | ||
109 | void virtio_break_device(struct virtio_device *dev); | ||
110 | |||
109 | /** | 111 | /** |
110 | * virtio_driver - operations for a virtio I/O driver | 112 | * virtio_driver - operations for a virtio I/O driver |
111 | * @driver: underlying device driver (populate name and owner). | 113 | * @driver: underlying device driver (populate name and owner). |
diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h index 4195b97a3def..de429d1f4357 100644 --- a/include/linux/virtio_scsi.h +++ b/include/linux/virtio_scsi.h | |||
@@ -35,11 +35,23 @@ struct virtio_scsi_cmd_req { | |||
35 | u8 lun[8]; /* Logical Unit Number */ | 35 | u8 lun[8]; /* Logical Unit Number */ |
36 | u64 tag; /* Command identifier */ | 36 | u64 tag; /* Command identifier */ |
37 | u8 task_attr; /* Task attribute */ | 37 | u8 task_attr; /* Task attribute */ |
38 | u8 prio; | 38 | u8 prio; /* SAM command priority field */ |
39 | u8 crn; | 39 | u8 crn; |
40 | u8 cdb[VIRTIO_SCSI_CDB_SIZE]; | 40 | u8 cdb[VIRTIO_SCSI_CDB_SIZE]; |
41 | } __packed; | 41 | } __packed; |
42 | 42 | ||
43 | /* SCSI command request, followed by protection information */ | ||
44 | struct virtio_scsi_cmd_req_pi { | ||
45 | u8 lun[8]; /* Logical Unit Number */ | ||
46 | u64 tag; /* Command identifier */ | ||
47 | u8 task_attr; /* Task attribute */ | ||
48 | u8 prio; /* SAM command priority field */ | ||
49 | u8 crn; | ||
50 | u32 pi_bytesout; /* DataOUT PI Number of bytes */ | ||
51 | u32 pi_bytesin; /* DataIN PI Number of bytes */ | ||
52 | u8 cdb[VIRTIO_SCSI_CDB_SIZE]; | ||
53 | } __packed; | ||
54 | |||
43 | /* Response, followed by sense data and data-in */ | 55 | /* Response, followed by sense data and data-in */ |
44 | struct virtio_scsi_cmd_resp { | 56 | struct virtio_scsi_cmd_resp { |
45 | u32 sense_len; /* Sense data length */ | 57 | u32 sense_len; /* Sense data length */ |
@@ -97,6 +109,7 @@ struct virtio_scsi_config { | |||
97 | #define VIRTIO_SCSI_F_INOUT 0 | 109 | #define VIRTIO_SCSI_F_INOUT 0 |
98 | #define VIRTIO_SCSI_F_HOTPLUG 1 | 110 | #define VIRTIO_SCSI_F_HOTPLUG 1 |
99 | #define VIRTIO_SCSI_F_CHANGE 2 | 111 | #define VIRTIO_SCSI_F_CHANGE 2 |
112 | #define VIRTIO_SCSI_F_T10_PI 3 | ||
100 | 113 | ||
101 | /* Response codes */ | 114 | /* Response codes */ |
102 | #define VIRTIO_SCSI_S_OK 0 | 115 | #define VIRTIO_SCSI_S_OK 0 |
diff --git a/include/linux/wait.h b/include/linux/wait.h index bd68819f0815..6fb1ba5f9b2f 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -25,6 +25,7 @@ struct wait_bit_key { | |||
25 | void *flags; | 25 | void *flags; |
26 | int bit_nr; | 26 | int bit_nr; |
27 | #define WAIT_ATOMIC_T_BIT_NR -1 | 27 | #define WAIT_ATOMIC_T_BIT_NR -1 |
28 | unsigned long private; | ||
28 | }; | 29 | }; |
29 | 30 | ||
30 | struct wait_bit_queue { | 31 | struct wait_bit_queue { |
@@ -141,18 +142,19 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) | |||
141 | list_del(&old->task_list); | 142 | list_del(&old->task_list); |
142 | } | 143 | } |
143 | 144 | ||
145 | typedef int wait_bit_action_f(struct wait_bit_key *); | ||
144 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | 146 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
145 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); | 147 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); |
146 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | 148 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
147 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); | 149 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); |
148 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); | 150 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); |
149 | void __wake_up_bit(wait_queue_head_t *, void *, int); | 151 | void __wake_up_bit(wait_queue_head_t *, void *, int); |
150 | int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); | 152 | int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned); |
151 | int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); | 153 | int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned); |
152 | void wake_up_bit(void *, int); | 154 | void wake_up_bit(void *, int); |
153 | void wake_up_atomic_t(atomic_t *); | 155 | void wake_up_atomic_t(atomic_t *); |
154 | int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned); | 156 | int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned); |
155 | int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned); | 157 | int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned); |
156 | int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned); | 158 | int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned); |
157 | wait_queue_head_t *bit_waitqueue(void *, int); | 159 | wait_queue_head_t *bit_waitqueue(void *, int); |
158 | 160 | ||
@@ -854,11 +856,14 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | |||
854 | (wait)->flags = 0; \ | 856 | (wait)->flags = 0; \ |
855 | } while (0) | 857 | } while (0) |
856 | 858 | ||
859 | |||
860 | extern int bit_wait(struct wait_bit_key *); | ||
861 | extern int bit_wait_io(struct wait_bit_key *); | ||
862 | |||
857 | /** | 863 | /** |
858 | * wait_on_bit - wait for a bit to be cleared | 864 | * wait_on_bit - wait for a bit to be cleared |
859 | * @word: the word being waited on, a kernel virtual address | 865 | * @word: the word being waited on, a kernel virtual address |
860 | * @bit: the bit of the word being waited on | 866 | * @bit: the bit of the word being waited on |
861 | * @action: the function used to sleep, which may take special actions | ||
862 | * @mode: the task state to sleep in | 867 | * @mode: the task state to sleep in |
863 | * | 868 | * |
864 | * There is a standard hashed waitqueue table for generic use. This | 869 | * There is a standard hashed waitqueue table for generic use. This |
@@ -867,9 +872,62 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | |||
867 | * call wait_on_bit() in threads waiting for the bit to clear. | 872 | * call wait_on_bit() in threads waiting for the bit to clear. |
868 | * One uses wait_on_bit() where one is waiting for the bit to clear, | 873 | * One uses wait_on_bit() where one is waiting for the bit to clear, |
869 | * but has no intention of setting it. | 874 | * but has no intention of setting it. |
875 | * Returned value will be zero if the bit was cleared, or non-zero | ||
876 | * if the process received a signal and the mode permitted wakeup | ||
877 | * on that signal. | ||
878 | */ | ||
879 | static inline int | ||
880 | wait_on_bit(void *word, int bit, unsigned mode) | ||
881 | { | ||
882 | if (!test_bit(bit, word)) | ||
883 | return 0; | ||
884 | return out_of_line_wait_on_bit(word, bit, | ||
885 | bit_wait, | ||
886 | mode); | ||
887 | } | ||
888 | |||
889 | /** | ||
890 | * wait_on_bit_io - wait for a bit to be cleared | ||
891 | * @word: the word being waited on, a kernel virtual address | ||
892 | * @bit: the bit of the word being waited on | ||
893 | * @mode: the task state to sleep in | ||
894 | * | ||
895 | * Use the standard hashed waitqueue table to wait for a bit | ||
896 | * to be cleared. This is similar to wait_on_bit(), but calls | ||
897 | * io_schedule() instead of schedule() for the actual waiting. | ||
898 | * | ||
899 | * Returned value will be zero if the bit was cleared, or non-zero | ||
900 | * if the process received a signal and the mode permitted wakeup | ||
901 | * on that signal. | ||
902 | */ | ||
903 | static inline int | ||
904 | wait_on_bit_io(void *word, int bit, unsigned mode) | ||
905 | { | ||
906 | if (!test_bit(bit, word)) | ||
907 | return 0; | ||
908 | return out_of_line_wait_on_bit(word, bit, | ||
909 | bit_wait_io, | ||
910 | mode); | ||
911 | } | ||
912 | |||
913 | /** | ||
914 | * wait_on_bit_action - wait for a bit to be cleared | ||
915 | * @word: the word being waited on, a kernel virtual address | ||
916 | * @bit: the bit of the word being waited on | ||
917 | * @action: the function used to sleep, which may take special actions | ||
918 | * @mode: the task state to sleep in | ||
919 | * | ||
920 | * Use the standard hashed waitqueue table to wait for a bit | ||
921 | * to be cleared, and allow the waiting action to be specified. | ||
922 | * This is like wait_on_bit() but allows fine control of how the waiting | ||
923 | * is done. | ||
924 | * | ||
925 | * Returned value will be zero if the bit was cleared, or non-zero | ||
926 | * if the process received a signal and the mode permitted wakeup | ||
927 | * on that signal. | ||
870 | */ | 928 | */ |
871 | static inline int | 929 | static inline int |
872 | wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) | 930 | wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) |
873 | { | 931 | { |
874 | if (!test_bit(bit, word)) | 932 | if (!test_bit(bit, word)) |
875 | return 0; | 933 | return 0; |
@@ -880,7 +938,6 @@ wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) | |||
880 | * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it | 938 | * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it |
881 | * @word: the word being waited on, a kernel virtual address | 939 | * @word: the word being waited on, a kernel virtual address |
882 | * @bit: the bit of the word being waited on | 940 | * @bit: the bit of the word being waited on |
883 | * @action: the function used to sleep, which may take special actions | ||
884 | * @mode: the task state to sleep in | 941 | * @mode: the task state to sleep in |
885 | * | 942 | * |
886 | * There is a standard hashed waitqueue table for generic use. This | 943 | * There is a standard hashed waitqueue table for generic use. This |
@@ -891,9 +948,61 @@ wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) | |||
891 | * wait_on_bit() in threads waiting to be able to set the bit. | 948 | * wait_on_bit() in threads waiting to be able to set the bit. |
892 | * One uses wait_on_bit_lock() where one is waiting for the bit to | 949 | * One uses wait_on_bit_lock() where one is waiting for the bit to |
893 | * clear with the intention of setting it, and when done, clearing it. | 950 | * clear with the intention of setting it, and when done, clearing it. |
951 | * | ||
952 | * Returns zero if the bit was (eventually) found to be clear and was | ||
953 | * set. Returns non-zero if a signal was delivered to the process and | ||
954 | * the @mode allows that signal to wake the process. | ||
955 | */ | ||
956 | static inline int | ||
957 | wait_on_bit_lock(void *word, int bit, unsigned mode) | ||
958 | { | ||
959 | if (!test_and_set_bit(bit, word)) | ||
960 | return 0; | ||
961 | return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode); | ||
962 | } | ||
963 | |||
964 | /** | ||
965 | * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it | ||
966 | * @word: the word being waited on, a kernel virtual address | ||
967 | * @bit: the bit of the word being waited on | ||
968 | * @mode: the task state to sleep in | ||
969 | * | ||
970 | * Use the standard hashed waitqueue table to wait for a bit | ||
971 | * to be cleared and then to atomically set it. This is similar | ||
972 | * to wait_on_bit(), but calls io_schedule() instead of schedule() | ||
973 | * for the actual waiting. | ||
974 | * | ||
975 | * Returns zero if the bit was (eventually) found to be clear and was | ||
976 | * set. Returns non-zero if a signal was delivered to the process and | ||
977 | * the @mode allows that signal to wake the process. | ||
978 | */ | ||
979 | static inline int | ||
980 | wait_on_bit_lock_io(void *word, int bit, unsigned mode) | ||
981 | { | ||
982 | if (!test_and_set_bit(bit, word)) | ||
983 | return 0; | ||
984 | return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode); | ||
985 | } | ||
986 | |||
987 | /** | ||
988 | * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it | ||
989 | * @word: the word being waited on, a kernel virtual address | ||
990 | * @bit: the bit of the word being waited on | ||
991 | * @action: the function used to sleep, which may take special actions | ||
992 | * @mode: the task state to sleep in | ||
993 | * | ||
994 | * Use the standard hashed waitqueue table to wait for a bit | ||
995 | * to be cleared and then to set it, and allow the waiting action | ||
996 | * to be specified. | ||
997 | * This is like wait_on_bit() but allows fine control of how the waiting | ||
998 | * is done. | ||
999 | * | ||
1000 | * Returns zero if the bit was (eventually) found to be clear and was | ||
1001 | * set. Returns non-zero if a signal was delivered to the process and | ||
1002 | * the @mode allows that signal to wake the process. | ||
894 | */ | 1003 | */ |
895 | static inline int | 1004 | static inline int |
896 | wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode) | 1005 | wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) |
897 | { | 1006 | { |
898 | if (!test_and_set_bit(bit, word)) | 1007 | if (!test_and_set_bit(bit, word)) |
899 | return 0; | 1008 | return 0; |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 1b22c42e9c2d..a0cc2e95ed1b 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -56,9 +56,8 @@ enum { | |||
56 | WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, | 56 | WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, |
57 | WORK_NO_COLOR = WORK_NR_COLORS, | 57 | WORK_NO_COLOR = WORK_NR_COLORS, |
58 | 58 | ||
59 | /* special cpu IDs */ | 59 | /* not bound to any CPU, prefer the local CPU */ |
60 | WORK_CPU_UNBOUND = NR_CPUS, | 60 | WORK_CPU_UNBOUND = NR_CPUS, |
61 | WORK_CPU_END = NR_CPUS + 1, | ||
62 | 61 | ||
63 | /* | 62 | /* |
64 | * Reserve 7 bits off of pwq pointer w/ debugobjects turned off. | 63 | * Reserve 7 bits off of pwq pointer w/ debugobjects turned off. |
@@ -274,13 +273,6 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } | |||
274 | #define delayed_work_pending(w) \ | 273 | #define delayed_work_pending(w) \ |
275 | work_pending(&(w)->work) | 274 | work_pending(&(w)->work) |
276 | 275 | ||
277 | /** | ||
278 | * work_clear_pending - for internal use only, mark a work item as not pending | ||
279 | * @work: The work item in question | ||
280 | */ | ||
281 | #define work_clear_pending(work) \ | ||
282 | clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) | ||
283 | |||
284 | /* | 276 | /* |
285 | * Workqueue flags and constants. For details, please refer to | 277 | * Workqueue flags and constants. For details, please refer to |
286 | * Documentation/workqueue.txt. | 278 | * Documentation/workqueue.txt. |
@@ -340,6 +332,9 @@ enum { | |||
340 | * short queue flush time. Don't queue works which can run for too | 332 | * short queue flush time. Don't queue works which can run for too |
341 | * long. | 333 | * long. |
342 | * | 334 | * |
335 | * system_highpri_wq is similar to system_wq but for work items which | ||
336 | * require WQ_HIGHPRI. | ||
337 | * | ||
343 | * system_long_wq is similar to system_wq but may host long running | 338 | * system_long_wq is similar to system_wq but may host long running |
344 | * works. Queue flushing might take relatively long. | 339 | * works. Queue flushing might take relatively long. |
345 | * | 340 | * |
@@ -358,26 +353,13 @@ enum { | |||
358 | * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. | 353 | * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. |
359 | */ | 354 | */ |
360 | extern struct workqueue_struct *system_wq; | 355 | extern struct workqueue_struct *system_wq; |
356 | extern struct workqueue_struct *system_highpri_wq; | ||
361 | extern struct workqueue_struct *system_long_wq; | 357 | extern struct workqueue_struct *system_long_wq; |
362 | extern struct workqueue_struct *system_unbound_wq; | 358 | extern struct workqueue_struct *system_unbound_wq; |
363 | extern struct workqueue_struct *system_freezable_wq; | 359 | extern struct workqueue_struct *system_freezable_wq; |
364 | extern struct workqueue_struct *system_power_efficient_wq; | 360 | extern struct workqueue_struct *system_power_efficient_wq; |
365 | extern struct workqueue_struct *system_freezable_power_efficient_wq; | 361 | extern struct workqueue_struct *system_freezable_power_efficient_wq; |
366 | 362 | ||
367 | static inline struct workqueue_struct * __deprecated __system_nrt_wq(void) | ||
368 | { | ||
369 | return system_wq; | ||
370 | } | ||
371 | |||
372 | static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void) | ||
373 | { | ||
374 | return system_freezable_wq; | ||
375 | } | ||
376 | |||
377 | /* equivlalent to system_wq and system_freezable_wq, deprecated */ | ||
378 | #define system_nrt_wq __system_nrt_wq() | ||
379 | #define system_nrt_freezable_wq __system_nrt_freezable_wq() | ||
380 | |||
381 | extern struct workqueue_struct * | 363 | extern struct workqueue_struct * |
382 | __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, | 364 | __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, |
383 | struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6); | 365 | struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6); |
@@ -587,18 +569,6 @@ static inline bool keventd_up(void) | |||
587 | return system_wq != NULL; | 569 | return system_wq != NULL; |
588 | } | 570 | } |
589 | 571 | ||
590 | /* used to be different but now identical to flush_work(), deprecated */ | ||
591 | static inline bool __deprecated flush_work_sync(struct work_struct *work) | ||
592 | { | ||
593 | return flush_work(work); | ||
594 | } | ||
595 | |||
596 | /* used to be different but now identical to flush_delayed_work(), deprecated */ | ||
597 | static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork) | ||
598 | { | ||
599 | return flush_delayed_work(dwork); | ||
600 | } | ||
601 | |||
602 | #ifndef CONFIG_SMP | 572 | #ifndef CONFIG_SMP |
603 | static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) | 573 | static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) |
604 | { | 574 | { |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 5777c13849ba..a219be961c0a 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -90,7 +90,6 @@ struct writeback_control { | |||
90 | * fs/fs-writeback.c | 90 | * fs/fs-writeback.c |
91 | */ | 91 | */ |
92 | struct bdi_writeback; | 92 | struct bdi_writeback; |
93 | int inode_wait(void *); | ||
94 | void writeback_inodes_sb(struct super_block *, enum wb_reason reason); | 93 | void writeback_inodes_sb(struct super_block *, enum wb_reason reason); |
95 | void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, | 94 | void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, |
96 | enum wb_reason reason); | 95 | enum wb_reason reason); |
@@ -105,7 +104,7 @@ void inode_wait_for_writeback(struct inode *inode); | |||
105 | static inline void wait_on_inode(struct inode *inode) | 104 | static inline void wait_on_inode(struct inode *inode) |
106 | { | 105 | { |
107 | might_sleep(); | 106 | might_sleep(); |
108 | wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE); | 107 | wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE); |
109 | } | 108 | } |
110 | 109 | ||
111 | /* | 110 | /* |