diff options
Diffstat (limited to 'include/linux')
304 files changed, 7617 insertions, 5869 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 61a3d90f32b3..5b36974ed60a 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
| @@ -56,6 +56,27 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) | |||
| 56 | acpi_fwnode_handle(adev) : NULL) | 56 | acpi_fwnode_handle(adev) : NULL) |
| 57 | #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) | 57 | #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) |
| 58 | 58 | ||
| 59 | static inline struct fwnode_handle *acpi_alloc_fwnode_static(void) | ||
| 60 | { | ||
| 61 | struct fwnode_handle *fwnode; | ||
| 62 | |||
| 63 | fwnode = kzalloc(sizeof(struct fwnode_handle), GFP_KERNEL); | ||
| 64 | if (!fwnode) | ||
| 65 | return NULL; | ||
| 66 | |||
| 67 | fwnode->type = FWNODE_ACPI_STATIC; | ||
| 68 | |||
| 69 | return fwnode; | ||
| 70 | } | ||
| 71 | |||
| 72 | static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode) | ||
| 73 | { | ||
| 74 | if (WARN_ON(!fwnode || fwnode->type != FWNODE_ACPI_STATIC)) | ||
| 75 | return; | ||
| 76 | |||
| 77 | kfree(fwnode); | ||
| 78 | } | ||
| 79 | |||
| 59 | /** | 80 | /** |
| 60 | * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with | 81 | * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with |
| 61 | * the PCI-defined class-code information | 82 | * the PCI-defined class-code information |
| @@ -220,10 +241,6 @@ int __init acpi_table_parse_entries(char *id, unsigned long table_size, | |||
| 220 | int entry_id, | 241 | int entry_id, |
| 221 | acpi_tbl_entry_handler handler, | 242 | acpi_tbl_entry_handler handler, |
| 222 | unsigned int max_entries); | 243 | unsigned int max_entries); |
| 223 | int __init acpi_table_parse_entries(char *id, unsigned long table_size, | ||
| 224 | int entry_id, | ||
| 225 | acpi_tbl_entry_handler handler, | ||
| 226 | unsigned int max_entries); | ||
| 227 | int __init acpi_table_parse_entries_array(char *id, unsigned long table_size, | 244 | int __init acpi_table_parse_entries_array(char *id, unsigned long table_size, |
| 228 | struct acpi_subtable_proc *proc, int proc_num, | 245 | struct acpi_subtable_proc *proc, int proc_num, |
| 229 | unsigned int max_entries); | 246 | unsigned int max_entries); |
| @@ -420,6 +437,8 @@ static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares, | |||
| 420 | return acpi_dev_filter_resource_type(ares, (unsigned long)arg); | 437 | return acpi_dev_filter_resource_type(ares, (unsigned long)arg); |
| 421 | } | 438 | } |
| 422 | 439 | ||
| 440 | struct acpi_device *acpi_resource_consumer(struct resource *res); | ||
| 441 | |||
| 423 | int acpi_check_resource_conflict(const struct resource *res); | 442 | int acpi_check_resource_conflict(const struct resource *res); |
| 424 | 443 | ||
| 425 | int acpi_check_region(resource_size_t start, resource_size_t n, | 444 | int acpi_check_region(resource_size_t start, resource_size_t n, |
| @@ -469,6 +488,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); | |||
| 469 | #define OSC_SB_CPCV2_SUPPORT 0x00000040 | 488 | #define OSC_SB_CPCV2_SUPPORT 0x00000040 |
| 470 | #define OSC_SB_PCLPI_SUPPORT 0x00000080 | 489 | #define OSC_SB_PCLPI_SUPPORT 0x00000080 |
| 471 | #define OSC_SB_OSLPI_SUPPORT 0x00000100 | 490 | #define OSC_SB_OSLPI_SUPPORT 0x00000100 |
| 491 | #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 | ||
| 472 | 492 | ||
| 473 | extern bool osc_sb_apei_support_acked; | 493 | extern bool osc_sb_apei_support_acked; |
| 474 | extern bool osc_pc_lpi_support_confirmed; | 494 | extern bool osc_pc_lpi_support_confirmed; |
| @@ -744,6 +764,11 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) | |||
| 744 | return DEV_DMA_NOT_SUPPORTED; | 764 | return DEV_DMA_NOT_SUPPORTED; |
| 745 | } | 765 | } |
| 746 | 766 | ||
| 767 | static inline void acpi_dma_configure(struct device *dev, | ||
| 768 | enum dev_dma_attr attr) { } | ||
| 769 | |||
| 770 | static inline void acpi_dma_deconfigure(struct device *dev) { } | ||
| 771 | |||
| 747 | #define ACPI_PTR(_ptr) (NULL) | 772 | #define ACPI_PTR(_ptr) (NULL) |
| 748 | 773 | ||
| 749 | static inline void acpi_device_set_enumerated(struct acpi_device *adev) | 774 | static inline void acpi_device_set_enumerated(struct acpi_device *adev) |
| @@ -764,6 +789,11 @@ static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb) | |||
| 764 | return -EINVAL; | 789 | return -EINVAL; |
| 765 | } | 790 | } |
| 766 | 791 | ||
| 792 | static inline struct acpi_device *acpi_resource_consumer(struct resource *res) | ||
| 793 | { | ||
| 794 | return NULL; | ||
| 795 | } | ||
| 796 | |||
| 767 | #endif /* !CONFIG_ACPI */ | 797 | #endif /* !CONFIG_ACPI */ |
| 768 | 798 | ||
| 769 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC | 799 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC |
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 0e32dac8fd03..77e08099e554 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h | |||
| @@ -23,20 +23,36 @@ | |||
| 23 | #include <linux/fwnode.h> | 23 | #include <linux/fwnode.h> |
| 24 | #include <linux/irqdomain.h> | 24 | #include <linux/irqdomain.h> |
| 25 | 25 | ||
| 26 | #define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL) | ||
| 27 | #define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL) | ||
| 28 | |||
| 26 | int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node); | 29 | int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node); |
| 27 | void iort_deregister_domain_token(int trans_id); | 30 | void iort_deregister_domain_token(int trans_id); |
| 28 | struct fwnode_handle *iort_find_domain_token(int trans_id); | 31 | struct fwnode_handle *iort_find_domain_token(int trans_id); |
| 29 | #ifdef CONFIG_ACPI_IORT | 32 | #ifdef CONFIG_ACPI_IORT |
| 30 | void acpi_iort_init(void); | 33 | void acpi_iort_init(void); |
| 34 | bool iort_node_match(u8 type); | ||
| 31 | u32 iort_msi_map_rid(struct device *dev, u32 req_id); | 35 | u32 iort_msi_map_rid(struct device *dev, u32 req_id); |
| 32 | struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id); | 36 | struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id); |
| 37 | /* IOMMU interface */ | ||
| 38 | void iort_set_dma_mask(struct device *dev); | ||
| 39 | const struct iommu_ops *iort_iommu_configure(struct device *dev); | ||
| 33 | #else | 40 | #else |
| 34 | static inline void acpi_iort_init(void) { } | 41 | static inline void acpi_iort_init(void) { } |
| 42 | static inline bool iort_node_match(u8 type) { return false; } | ||
| 35 | static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) | 43 | static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) |
| 36 | { return req_id; } | 44 | { return req_id; } |
| 37 | static inline struct irq_domain *iort_get_device_domain(struct device *dev, | 45 | static inline struct irq_domain *iort_get_device_domain(struct device *dev, |
| 38 | u32 req_id) | 46 | u32 req_id) |
| 39 | { return NULL; } | 47 | { return NULL; } |
| 48 | /* IOMMU interface */ | ||
| 49 | static inline void iort_set_dma_mask(struct device *dev) { } | ||
| 50 | static inline | ||
| 51 | const struct iommu_ops *iort_iommu_configure(struct device *dev) | ||
| 52 | { return NULL; } | ||
| 40 | #endif | 53 | #endif |
| 41 | 54 | ||
| 55 | #define IORT_ACPI_DECLARE(name, table_id, fn) \ | ||
| 56 | ACPI_DECLARE_PROBE_ENTRY(iort, name, table_id, 0, NULL, 0, fn) | ||
| 57 | |||
| 42 | #endif /* __ACPI_IORT_H__ */ | 58 | #endif /* __ACPI_IORT_H__ */ |
diff --git a/include/linux/ahci-remap.h b/include/linux/ahci-remap.h new file mode 100644 index 000000000000..62be3a40239d --- /dev/null +++ b/include/linux/ahci-remap.h | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | #ifndef _LINUX_AHCI_REMAP_H | ||
| 2 | #define _LINUX_AHCI_REMAP_H | ||
| 3 | |||
| 4 | #include <linux/sizes.h> | ||
| 5 | |||
| 6 | #define AHCI_VSCAP 0xa4 | ||
| 7 | #define AHCI_REMAP_CAP 0x800 | ||
| 8 | |||
| 9 | /* device class code */ | ||
| 10 | #define AHCI_REMAP_N_DCC 0x880 | ||
| 11 | |||
| 12 | /* remap-device base relative to ahci-bar */ | ||
| 13 | #define AHCI_REMAP_N_OFFSET SZ_16K | ||
| 14 | #define AHCI_REMAP_N_SIZE SZ_16K | ||
| 15 | |||
| 16 | #define AHCI_MAX_REMAP 3 | ||
| 17 | |||
| 18 | static inline unsigned int ahci_remap_dcc(int i) | ||
| 19 | { | ||
| 20 | return AHCI_REMAP_N_DCC + i * 0x80; | ||
| 21 | } | ||
| 22 | |||
| 23 | static inline unsigned int ahci_remap_base(int i) | ||
| 24 | { | ||
| 25 | return AHCI_REMAP_N_OFFSET + i * AHCI_REMAP_N_SIZE; | ||
| 26 | } | ||
| 27 | |||
| 28 | #endif /* _LINUX_AHCI_REMAP_H */ | ||
diff --git a/include/linux/aio.h b/include/linux/aio.h index 9eb42dbc5582..fdd0a343f455 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h | |||
| @@ -14,14 +14,9 @@ typedef int (kiocb_cancel_fn)(struct kiocb *); | |||
| 14 | /* prototypes */ | 14 | /* prototypes */ |
| 15 | #ifdef CONFIG_AIO | 15 | #ifdef CONFIG_AIO |
| 16 | extern void exit_aio(struct mm_struct *mm); | 16 | extern void exit_aio(struct mm_struct *mm); |
| 17 | extern long do_io_submit(aio_context_t ctx_id, long nr, | ||
| 18 | struct iocb __user *__user *iocbpp, bool compat); | ||
| 19 | void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel); | 17 | void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel); |
| 20 | #else | 18 | #else |
| 21 | static inline void exit_aio(struct mm_struct *mm) { } | 19 | static inline void exit_aio(struct mm_struct *mm) { } |
| 22 | static inline long do_io_submit(aio_context_t ctx_id, long nr, | ||
| 23 | struct iocb __user * __user *iocbpp, | ||
| 24 | bool compat) { return 0; } | ||
| 25 | static inline void kiocb_set_cancel_fn(struct kiocb *req, | 20 | static inline void kiocb_set_cancel_fn(struct kiocb *req, |
| 26 | kiocb_cancel_fn *cancel) { } | 21 | kiocb_cancel_fn *cancel) { } |
| 27 | #endif /* CONFIG_AIO */ | 22 | #endif /* CONFIG_AIO */ |
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index 9d8031257a90..c70aac13244a 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h | |||
| @@ -10,7 +10,12 @@ enum alarmtimer_type { | |||
| 10 | ALARM_REALTIME, | 10 | ALARM_REALTIME, |
| 11 | ALARM_BOOTTIME, | 11 | ALARM_BOOTTIME, |
| 12 | 12 | ||
| 13 | /* Supported types end here */ | ||
| 13 | ALARM_NUMTYPE, | 14 | ALARM_NUMTYPE, |
| 15 | |||
| 16 | /* Used for tracing information. No usable types. */ | ||
| 17 | ALARM_REALTIME_FREEZER, | ||
| 18 | ALARM_BOOTTIME_FREEZER, | ||
| 14 | }; | 19 | }; |
| 15 | 20 | ||
| 16 | enum alarmtimer_restart { | 21 | enum alarmtimer_restart { |
diff --git a/include/linux/amba/pl061.h b/include/linux/amba/pl061.h deleted file mode 100644 index fb83c0453489..000000000000 --- a/include/linux/amba/pl061.h +++ /dev/null | |||
| @@ -1,16 +0,0 @@ | |||
| 1 | #include <linux/types.h> | ||
| 2 | |||
| 3 | /* platform data for the PL061 GPIO driver */ | ||
| 4 | |||
| 5 | struct pl061_platform_data { | ||
| 6 | /* number of the first GPIO */ | ||
| 7 | unsigned gpio_base; | ||
| 8 | |||
| 9 | /* number of the first IRQ. | ||
| 10 | * If the IRQ functionality in not desired this must be set to 0. | ||
| 11 | */ | ||
| 12 | unsigned irq_base; | ||
| 13 | |||
| 14 | u8 directions; /* startup directions, 1: out, 0: in */ | ||
| 15 | u8 values; /* startup values */ | ||
| 16 | }; | ||
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 27e9ec8778eb..5308eae9ce35 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h | |||
| @@ -84,6 +84,8 @@ struct pl08x_channel_data { | |||
| 84 | * running any DMA transfer and multiplexing can be recycled | 84 | * running any DMA transfer and multiplexing can be recycled |
| 85 | * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2 | 85 | * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2 |
| 86 | * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 | 86 | * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 |
| 87 | * @slave_map: DMA slave matching table | ||
| 88 | * @slave_map_len: number of elements in @slave_map | ||
| 87 | */ | 89 | */ |
| 88 | struct pl08x_platform_data { | 90 | struct pl08x_platform_data { |
| 89 | struct pl08x_channel_data *slave_channels; | 91 | struct pl08x_channel_data *slave_channels; |
| @@ -93,6 +95,8 @@ struct pl08x_platform_data { | |||
| 93 | void (*put_xfer_signal)(const struct pl08x_channel_data *, int); | 95 | void (*put_xfer_signal)(const struct pl08x_channel_data *, int); |
| 94 | u8 lli_buses; | 96 | u8 lli_buses; |
| 95 | u8 mem_buses; | 97 | u8 mem_buses; |
| 98 | const struct dma_slave_map *slave_map; | ||
| 99 | int slave_map_len; | ||
| 96 | }; | 100 | }; |
| 97 | 101 | ||
| 98 | #ifdef CONFIG_AMBA_PL08X | 102 | #ifdef CONFIG_AMBA_PL08X |
diff --git a/include/linux/ata.h b/include/linux/ata.h index fdb180367ba1..af6859b3a93d 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
| @@ -348,6 +348,7 @@ enum { | |||
| 348 | ATA_LOG_DEVSLP_DETO = 0x01, | 348 | ATA_LOG_DEVSLP_DETO = 0x01, |
| 349 | ATA_LOG_DEVSLP_VALID = 0x07, | 349 | ATA_LOG_DEVSLP_VALID = 0x07, |
| 350 | ATA_LOG_DEVSLP_VALID_MASK = 0x80, | 350 | ATA_LOG_DEVSLP_VALID_MASK = 0x80, |
| 351 | ATA_LOG_NCQ_PRIO_OFFSET = 0x09, | ||
| 351 | 352 | ||
| 352 | /* NCQ send and receive log */ | 353 | /* NCQ send and receive log */ |
| 353 | ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0x00, | 354 | ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0x00, |
| @@ -940,6 +941,11 @@ static inline bool ata_id_has_ncq_non_data(const u16 *id) | |||
| 940 | return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5); | 941 | return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5); |
| 941 | } | 942 | } |
| 942 | 943 | ||
| 944 | static inline bool ata_id_has_ncq_prio(const u16 *id) | ||
| 945 | { | ||
| 946 | return id[ATA_ID_SATA_CAPABILITY] & BIT(12); | ||
| 947 | } | ||
| 948 | |||
| 943 | static inline bool ata_id_has_trim(const u16 *id) | 949 | static inline bool ata_id_has_trim(const u16 *id) |
| 944 | { | 950 | { |
| 945 | if (ata_id_major_version(id) >= 7 && | 951 | if (ata_id_major_version(id) >= 7 && |
diff --git a/include/linux/audit.h b/include/linux/audit.h index 9d4443f93db6..f51fca8d0b6f 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
| @@ -147,7 +147,7 @@ extern void audit_log_d_path(struct audit_buffer *ab, | |||
| 147 | extern void audit_log_key(struct audit_buffer *ab, | 147 | extern void audit_log_key(struct audit_buffer *ab, |
| 148 | char *key); | 148 | char *key); |
| 149 | extern void audit_log_link_denied(const char *operation, | 149 | extern void audit_log_link_denied(const char *operation, |
| 150 | struct path *link); | 150 | const struct path *link); |
| 151 | extern void audit_log_lost(const char *message); | 151 | extern void audit_log_lost(const char *message); |
| 152 | #ifdef CONFIG_SECURITY | 152 | #ifdef CONFIG_SECURITY |
| 153 | extern void audit_log_secctx(struct audit_buffer *ab, u32 secid); | 153 | extern void audit_log_secctx(struct audit_buffer *ab, u32 secid); |
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index c357f27d5483..e850e76acaaf 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h | |||
| @@ -116,6 +116,8 @@ struct bdi_writeback { | |||
| 116 | struct list_head work_list; | 116 | struct list_head work_list; |
| 117 | struct delayed_work dwork; /* work item used for writeback */ | 117 | struct delayed_work dwork; /* work item used for writeback */ |
| 118 | 118 | ||
| 119 | unsigned long dirty_sleep; /* last wait */ | ||
| 120 | |||
| 119 | struct list_head bdi_node; /* anchored at bdi->wb_list */ | 121 | struct list_head bdi_node; /* anchored at bdi->wb_list */ |
| 120 | 122 | ||
| 121 | #ifdef CONFIG_CGROUP_WRITEBACK | 123 | #ifdef CONFIG_CGROUP_WRITEBACK |
| @@ -136,12 +138,13 @@ struct bdi_writeback { | |||
| 136 | struct backing_dev_info { | 138 | struct backing_dev_info { |
| 137 | struct list_head bdi_list; | 139 | struct list_head bdi_list; |
| 138 | unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ | 140 | unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ |
| 139 | unsigned int capabilities; /* Device capabilities */ | 141 | unsigned long io_pages; /* max allowed IO size */ |
| 140 | congested_fn *congested_fn; /* Function pointer if device is md/dm */ | 142 | congested_fn *congested_fn; /* Function pointer if device is md/dm */ |
| 141 | void *congested_data; /* Pointer to aux data for congested func */ | 143 | void *congested_data; /* Pointer to aux data for congested func */ |
| 142 | 144 | ||
| 143 | char *name; | 145 | char *name; |
| 144 | 146 | ||
| 147 | unsigned int capabilities; /* Device capabilities */ | ||
| 145 | unsigned int min_ratio; | 148 | unsigned int min_ratio; |
| 146 | unsigned int max_ratio, max_prop_frac; | 149 | unsigned int max_ratio, max_prop_frac; |
| 147 | 150 | ||
diff --git a/include/linux/bio.h b/include/linux/bio.h index 97cb48f03dc7..7cf8a6c70a3f 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
| @@ -63,6 +63,12 @@ | |||
| 63 | #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) | 63 | #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) |
| 64 | 64 | ||
| 65 | /* | 65 | /* |
| 66 | * Return the data direction, READ or WRITE. | ||
| 67 | */ | ||
| 68 | #define bio_data_dir(bio) \ | ||
| 69 | (op_is_write(bio_op(bio)) ? WRITE : READ) | ||
| 70 | |||
| 71 | /* | ||
| 66 | * Check whether this bio carries any data or not. A NULL bio is allowed. | 72 | * Check whether this bio carries any data or not. A NULL bio is allowed. |
| 67 | */ | 73 | */ |
| 68 | static inline bool bio_has_data(struct bio *bio) | 74 | static inline bool bio_has_data(struct bio *bio) |
| @@ -70,7 +76,8 @@ static inline bool bio_has_data(struct bio *bio) | |||
| 70 | if (bio && | 76 | if (bio && |
| 71 | bio->bi_iter.bi_size && | 77 | bio->bi_iter.bi_size && |
| 72 | bio_op(bio) != REQ_OP_DISCARD && | 78 | bio_op(bio) != REQ_OP_DISCARD && |
| 73 | bio_op(bio) != REQ_OP_SECURE_ERASE) | 79 | bio_op(bio) != REQ_OP_SECURE_ERASE && |
| 80 | bio_op(bio) != REQ_OP_WRITE_ZEROES) | ||
| 74 | return true; | 81 | return true; |
| 75 | 82 | ||
| 76 | return false; | 83 | return false; |
| @@ -80,18 +87,8 @@ static inline bool bio_no_advance_iter(struct bio *bio) | |||
| 80 | { | 87 | { |
| 81 | return bio_op(bio) == REQ_OP_DISCARD || | 88 | return bio_op(bio) == REQ_OP_DISCARD || |
| 82 | bio_op(bio) == REQ_OP_SECURE_ERASE || | 89 | bio_op(bio) == REQ_OP_SECURE_ERASE || |
| 83 | bio_op(bio) == REQ_OP_WRITE_SAME; | 90 | bio_op(bio) == REQ_OP_WRITE_SAME || |
| 84 | } | 91 | bio_op(bio) == REQ_OP_WRITE_ZEROES; |
| 85 | |||
| 86 | static inline bool bio_is_rw(struct bio *bio) | ||
| 87 | { | ||
| 88 | if (!bio_has_data(bio)) | ||
| 89 | return false; | ||
| 90 | |||
| 91 | if (bio_no_advance_iter(bio)) | ||
| 92 | return false; | ||
| 93 | |||
| 94 | return true; | ||
| 95 | } | 92 | } |
| 96 | 93 | ||
| 97 | static inline bool bio_mergeable(struct bio *bio) | 94 | static inline bool bio_mergeable(struct bio *bio) |
| @@ -193,18 +190,20 @@ static inline unsigned bio_segments(struct bio *bio) | |||
| 193 | struct bvec_iter iter; | 190 | struct bvec_iter iter; |
| 194 | 191 | ||
| 195 | /* | 192 | /* |
| 196 | * We special case discard/write same, because they interpret bi_size | 193 | * We special case discard/write same/write zeroes, because they |
| 197 | * differently: | 194 | * interpret bi_size differently: |
| 198 | */ | 195 | */ |
| 199 | 196 | ||
| 200 | if (bio_op(bio) == REQ_OP_DISCARD) | 197 | switch (bio_op(bio)) { |
| 201 | return 1; | 198 | case REQ_OP_DISCARD: |
| 202 | 199 | case REQ_OP_SECURE_ERASE: | |
| 203 | if (bio_op(bio) == REQ_OP_SECURE_ERASE) | 200 | case REQ_OP_WRITE_ZEROES: |
| 204 | return 1; | 201 | return 0; |
| 205 | 202 | case REQ_OP_WRITE_SAME: | |
| 206 | if (bio_op(bio) == REQ_OP_WRITE_SAME) | ||
| 207 | return 1; | 203 | return 1; |
| 204 | default: | ||
| 205 | break; | ||
| 206 | } | ||
| 208 | 207 | ||
| 209 | bio_for_each_segment(bv, bio, iter) | 208 | bio_for_each_segment(bv, bio, iter) |
| 210 | segs++; | 209 | segs++; |
| @@ -409,6 +408,8 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask) | |||
| 409 | 408 | ||
| 410 | } | 409 | } |
| 411 | 410 | ||
| 411 | extern blk_qc_t submit_bio(struct bio *); | ||
| 412 | |||
| 412 | extern void bio_endio(struct bio *); | 413 | extern void bio_endio(struct bio *); |
| 413 | 414 | ||
| 414 | static inline void bio_io_error(struct bio *bio) | 415 | static inline void bio_io_error(struct bio *bio) |
| @@ -423,13 +424,15 @@ extern int bio_phys_segments(struct request_queue *, struct bio *); | |||
| 423 | extern int submit_bio_wait(struct bio *bio); | 424 | extern int submit_bio_wait(struct bio *bio); |
| 424 | extern void bio_advance(struct bio *, unsigned); | 425 | extern void bio_advance(struct bio *, unsigned); |
| 425 | 426 | ||
| 426 | extern void bio_init(struct bio *); | 427 | extern void bio_init(struct bio *bio, struct bio_vec *table, |
| 428 | unsigned short max_vecs); | ||
| 427 | extern void bio_reset(struct bio *); | 429 | extern void bio_reset(struct bio *); |
| 428 | void bio_chain(struct bio *, struct bio *); | 430 | void bio_chain(struct bio *, struct bio *); |
| 429 | 431 | ||
| 430 | extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); | 432 | extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); |
| 431 | extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, | 433 | extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, |
| 432 | unsigned int, unsigned int); | 434 | unsigned int, unsigned int); |
| 435 | int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); | ||
| 433 | struct rq_map_data; | 436 | struct rq_map_data; |
| 434 | extern struct bio *bio_map_user_iov(struct request_queue *, | 437 | extern struct bio *bio_map_user_iov(struct request_queue *, |
| 435 | const struct iov_iter *, gfp_t); | 438 | const struct iov_iter *, gfp_t); |
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 3bf5d33800ab..01b62e7bac74 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h | |||
| @@ -581,15 +581,14 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat) | |||
| 581 | /** | 581 | /** |
| 582 | * blkg_rwstat_add - add a value to a blkg_rwstat | 582 | * blkg_rwstat_add - add a value to a blkg_rwstat |
| 583 | * @rwstat: target blkg_rwstat | 583 | * @rwstat: target blkg_rwstat |
| 584 | * @op: REQ_OP | 584 | * @op: REQ_OP and flags |
| 585 | * @op_flags: rq_flag_bits | ||
| 586 | * @val: value to add | 585 | * @val: value to add |
| 587 | * | 586 | * |
| 588 | * Add @val to @rwstat. The counters are chosen according to @rw. The | 587 | * Add @val to @rwstat. The counters are chosen according to @rw. The |
| 589 | * caller is responsible for synchronizing calls to this function. | 588 | * caller is responsible for synchronizing calls to this function. |
| 590 | */ | 589 | */ |
| 591 | static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, | 590 | static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, |
| 592 | int op, int op_flags, uint64_t val) | 591 | unsigned int op, uint64_t val) |
| 593 | { | 592 | { |
| 594 | struct percpu_counter *cnt; | 593 | struct percpu_counter *cnt; |
| 595 | 594 | ||
| @@ -600,7 +599,7 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, | |||
| 600 | 599 | ||
| 601 | __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); | 600 | __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); |
| 602 | 601 | ||
| 603 | if (op_flags & REQ_SYNC) | 602 | if (op_is_sync(op)) |
| 604 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; | 603 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; |
| 605 | else | 604 | else |
| 606 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; | 605 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; |
| @@ -705,9 +704,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, | |||
| 705 | 704 | ||
| 706 | if (!throtl) { | 705 | if (!throtl) { |
| 707 | blkg = blkg ?: q->root_blkg; | 706 | blkg = blkg ?: q->root_blkg; |
| 708 | blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf, | 707 | blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf, |
| 709 | bio->bi_iter.bi_size); | 708 | bio->bi_iter.bi_size); |
| 710 | blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1); | 709 | blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); |
| 711 | } | 710 | } |
| 712 | 711 | ||
| 713 | rcu_read_unlock(); | 712 | rcu_read_unlock(); |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 535ab2e13d2e..4a2ab5d99ff7 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/blkdev.h> | 4 | #include <linux/blkdev.h> |
| 5 | #include <linux/sbitmap.h> | 5 | #include <linux/sbitmap.h> |
| 6 | #include <linux/srcu.h> | ||
| 6 | 7 | ||
| 7 | struct blk_mq_tags; | 8 | struct blk_mq_tags; |
| 8 | struct blk_flush_queue; | 9 | struct blk_flush_queue; |
| @@ -35,6 +36,8 @@ struct blk_mq_hw_ctx { | |||
| 35 | 36 | ||
| 36 | struct blk_mq_tags *tags; | 37 | struct blk_mq_tags *tags; |
| 37 | 38 | ||
| 39 | struct srcu_struct queue_rq_srcu; | ||
| 40 | |||
| 38 | unsigned long queued; | 41 | unsigned long queued; |
| 39 | unsigned long run; | 42 | unsigned long run; |
| 40 | #define BLK_MQ_MAX_DISPATCH_ORDER 7 | 43 | #define BLK_MQ_MAX_DISPATCH_ORDER 7 |
| @@ -215,18 +218,20 @@ void blk_mq_start_request(struct request *rq); | |||
| 215 | void blk_mq_end_request(struct request *rq, int error); | 218 | void blk_mq_end_request(struct request *rq, int error); |
| 216 | void __blk_mq_end_request(struct request *rq, int error); | 219 | void __blk_mq_end_request(struct request *rq, int error); |
| 217 | 220 | ||
| 218 | void blk_mq_requeue_request(struct request *rq); | 221 | void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); |
| 219 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); | 222 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, |
| 220 | void blk_mq_cancel_requeue_work(struct request_queue *q); | 223 | bool kick_requeue_list); |
| 221 | void blk_mq_kick_requeue_list(struct request_queue *q); | 224 | void blk_mq_kick_requeue_list(struct request_queue *q); |
| 222 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); | 225 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); |
| 223 | void blk_mq_abort_requeue_list(struct request_queue *q); | 226 | void blk_mq_abort_requeue_list(struct request_queue *q); |
| 224 | void blk_mq_complete_request(struct request *rq, int error); | 227 | void blk_mq_complete_request(struct request *rq, int error); |
| 225 | 228 | ||
| 229 | bool blk_mq_queue_stopped(struct request_queue *q); | ||
| 226 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); | 230 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
| 227 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); | 231 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); |
| 228 | void blk_mq_stop_hw_queues(struct request_queue *q); | 232 | void blk_mq_stop_hw_queues(struct request_queue *q); |
| 229 | void blk_mq_start_hw_queues(struct request_queue *q); | 233 | void blk_mq_start_hw_queues(struct request_queue *q); |
| 234 | void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); | ||
| 230 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); | 235 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
| 231 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); | 236 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
| 232 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); | 237 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
| @@ -237,6 +242,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q); | |||
| 237 | void blk_mq_freeze_queue_start(struct request_queue *q); | 242 | void blk_mq_freeze_queue_start(struct request_queue *q); |
| 238 | int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); | 243 | int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); |
| 239 | 244 | ||
| 245 | int blk_mq_map_queues(struct blk_mq_tag_set *set); | ||
| 240 | void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); | 246 | void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); |
| 241 | 247 | ||
| 242 | /* | 248 | /* |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index cd395ecec99d..519ea2c9df61 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
| @@ -17,7 +17,6 @@ struct io_context; | |||
| 17 | struct cgroup_subsys_state; | 17 | struct cgroup_subsys_state; |
| 18 | typedef void (bio_end_io_t) (struct bio *); | 18 | typedef void (bio_end_io_t) (struct bio *); |
| 19 | 19 | ||
| 20 | #ifdef CONFIG_BLOCK | ||
| 21 | /* | 20 | /* |
| 22 | * main unit of I/O for the block layer and lower layers (ie drivers and | 21 | * main unit of I/O for the block layer and lower layers (ie drivers and |
| 23 | * stacking drivers) | 22 | * stacking drivers) |
| @@ -88,24 +87,6 @@ struct bio { | |||
| 88 | struct bio_vec bi_inline_vecs[0]; | 87 | struct bio_vec bi_inline_vecs[0]; |
| 89 | }; | 88 | }; |
| 90 | 89 | ||
| 91 | #define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS) | ||
| 92 | #define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1)) | ||
| 93 | #define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT) | ||
| 94 | |||
| 95 | #define bio_set_op_attrs(bio, op, op_flags) do { \ | ||
| 96 | if (__builtin_constant_p(op)) \ | ||
| 97 | BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \ | ||
| 98 | else \ | ||
| 99 | WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \ | ||
| 100 | if (__builtin_constant_p(op_flags)) \ | ||
| 101 | BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ | ||
| 102 | else \ | ||
| 103 | WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ | ||
| 104 | (bio)->bi_opf = bio_flags(bio); \ | ||
| 105 | (bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \ | ||
| 106 | (bio)->bi_opf |= (op_flags); \ | ||
| 107 | } while (0) | ||
| 108 | |||
| 109 | #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) | 90 | #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) |
| 110 | 91 | ||
| 111 | /* | 92 | /* |
| @@ -119,6 +100,8 @@ struct bio { | |||
| 119 | #define BIO_QUIET 6 /* Make BIO Quiet */ | 100 | #define BIO_QUIET 6 /* Make BIO Quiet */ |
| 120 | #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */ | 101 | #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */ |
| 121 | #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */ | 102 | #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */ |
| 103 | #define BIO_THROTTLED 9 /* This bio has already been subjected to | ||
| 104 | * throttling rules. Don't do it again. */ | ||
| 122 | 105 | ||
| 123 | /* | 106 | /* |
| 124 | * Flags starting here get preserved by bio_reset() - this includes | 107 | * Flags starting here get preserved by bio_reset() - this includes |
| @@ -142,53 +125,61 @@ struct bio { | |||
| 142 | #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) | 125 | #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) |
| 143 | #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) | 126 | #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) |
| 144 | 127 | ||
| 145 | #endif /* CONFIG_BLOCK */ | ||
| 146 | |||
| 147 | /* | 128 | /* |
| 148 | * Request flags. For use in the cmd_flags field of struct request, and in | 129 | * Operations and flags common to the bio and request structures. |
| 149 | * bi_opf of struct bio. Note that some flags are only valid in either one. | 130 | * We use 8 bits for encoding the operation, and the remaining 24 for flags. |
| 131 | * | ||
| 132 | * The least significant bit of the operation number indicates the data | ||
| 133 | * transfer direction: | ||
| 134 | * | ||
| 135 | * - if the least significant bit is set transfers are TO the device | ||
| 136 | * - if the least significant bit is not set transfers are FROM the device | ||
| 137 | * | ||
| 138 | * If a operation does not transfer data the least significant bit has no | ||
| 139 | * meaning. | ||
| 150 | */ | 140 | */ |
| 151 | enum rq_flag_bits { | 141 | #define REQ_OP_BITS 8 |
| 152 | /* common flags */ | 142 | #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) |
| 153 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ | 143 | #define REQ_FLAG_BITS 24 |
| 144 | |||
| 145 | enum req_opf { | ||
| 146 | /* read sectors from the device */ | ||
| 147 | REQ_OP_READ = 0, | ||
| 148 | /* write sectors to the device */ | ||
| 149 | REQ_OP_WRITE = 1, | ||
| 150 | /* flush the volatile write cache */ | ||
| 151 | REQ_OP_FLUSH = 2, | ||
| 152 | /* discard sectors */ | ||
| 153 | REQ_OP_DISCARD = 3, | ||
| 154 | /* get zone information */ | ||
| 155 | REQ_OP_ZONE_REPORT = 4, | ||
| 156 | /* securely erase sectors */ | ||
| 157 | REQ_OP_SECURE_ERASE = 5, | ||
| 158 | /* seset a zone write pointer */ | ||
| 159 | REQ_OP_ZONE_RESET = 6, | ||
| 160 | /* write the same sector many times */ | ||
| 161 | REQ_OP_WRITE_SAME = 7, | ||
| 162 | /* write the zero filled sector many times */ | ||
| 163 | REQ_OP_WRITE_ZEROES = 8, | ||
| 164 | |||
| 165 | REQ_OP_LAST, | ||
| 166 | }; | ||
| 167 | |||
| 168 | enum req_flag_bits { | ||
| 169 | __REQ_FAILFAST_DEV = /* no driver retries of device errors */ | ||
| 170 | REQ_OP_BITS, | ||
| 154 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ | 171 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ |
| 155 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ | 172 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ |
| 156 | |||
| 157 | __REQ_SYNC, /* request is sync (sync write or read) */ | 173 | __REQ_SYNC, /* request is sync (sync write or read) */ |
| 158 | __REQ_META, /* metadata io request */ | 174 | __REQ_META, /* metadata io request */ |
| 159 | __REQ_PRIO, /* boost priority in cfq */ | 175 | __REQ_PRIO, /* boost priority in cfq */ |
| 160 | 176 | __REQ_NOMERGE, /* don't touch this for merging */ | |
| 161 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ | 177 | __REQ_IDLE, /* anticipate more IO after this one */ |
| 162 | __REQ_INTEGRITY, /* I/O includes block integrity payload */ | 178 | __REQ_INTEGRITY, /* I/O includes block integrity payload */ |
| 163 | __REQ_FUA, /* forced unit access */ | 179 | __REQ_FUA, /* forced unit access */ |
| 164 | __REQ_PREFLUSH, /* request for cache flush */ | 180 | __REQ_PREFLUSH, /* request for cache flush */ |
| 165 | |||
| 166 | /* bio only flags */ | ||
| 167 | __REQ_RAHEAD, /* read ahead, can fail anytime */ | 181 | __REQ_RAHEAD, /* read ahead, can fail anytime */ |
| 168 | __REQ_THROTTLED, /* This bio has already been subjected to | 182 | __REQ_BACKGROUND, /* background IO */ |
| 169 | * throttling rules. Don't do it again. */ | ||
| 170 | |||
| 171 | /* request only flags */ | ||
| 172 | __REQ_SORTED, /* elevator knows about this request */ | ||
| 173 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | ||
| 174 | __REQ_NOMERGE, /* don't touch this for merging */ | ||
| 175 | __REQ_STARTED, /* drive already may have started this one */ | ||
| 176 | __REQ_DONTPREP, /* don't call prep for this one */ | ||
| 177 | __REQ_QUEUED, /* uses queueing */ | ||
| 178 | __REQ_ELVPRIV, /* elevator private data attached */ | ||
| 179 | __REQ_FAILED, /* set if the request failed */ | ||
| 180 | __REQ_QUIET, /* don't worry about errors */ | ||
| 181 | __REQ_PREEMPT, /* set for "ide_preempt" requests and also | ||
| 182 | for requests for which the SCSI "quiesce" | ||
| 183 | state must be ignored. */ | ||
| 184 | __REQ_ALLOCED, /* request came from our alloc pool */ | ||
| 185 | __REQ_COPY_USER, /* contains copies of user pages */ | ||
| 186 | __REQ_FLUSH_SEQ, /* request for flush sequence */ | ||
| 187 | __REQ_IO_STAT, /* account I/O stat */ | ||
| 188 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ | ||
| 189 | __REQ_PM, /* runtime pm request */ | ||
| 190 | __REQ_HASHED, /* on IO scheduler merge hash */ | ||
| 191 | __REQ_MQ_INFLIGHT, /* track inflight for MQ */ | ||
| 192 | __REQ_NR_BITS, /* stops here */ | 183 | __REQ_NR_BITS, /* stops here */ |
| 193 | }; | 184 | }; |
| 194 | 185 | ||
| @@ -198,54 +189,47 @@ enum rq_flag_bits { | |||
| 198 | #define REQ_SYNC (1ULL << __REQ_SYNC) | 189 | #define REQ_SYNC (1ULL << __REQ_SYNC) |
| 199 | #define REQ_META (1ULL << __REQ_META) | 190 | #define REQ_META (1ULL << __REQ_META) |
| 200 | #define REQ_PRIO (1ULL << __REQ_PRIO) | 191 | #define REQ_PRIO (1ULL << __REQ_PRIO) |
| 201 | #define REQ_NOIDLE (1ULL << __REQ_NOIDLE) | 192 | #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) |
| 193 | #define REQ_IDLE (1ULL << __REQ_IDLE) | ||
| 202 | #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) | 194 | #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) |
| 195 | #define REQ_FUA (1ULL << __REQ_FUA) | ||
| 196 | #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) | ||
| 197 | #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) | ||
| 198 | #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) | ||
| 203 | 199 | ||
| 204 | #define REQ_FAILFAST_MASK \ | 200 | #define REQ_FAILFAST_MASK \ |
| 205 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) | 201 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) |
| 206 | #define REQ_COMMON_MASK \ | ||
| 207 | (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \ | ||
| 208 | REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE) | ||
| 209 | #define REQ_CLONE_MASK REQ_COMMON_MASK | ||
| 210 | 202 | ||
| 211 | /* This mask is used for both bio and request merge checking */ | ||
| 212 | #define REQ_NOMERGE_FLAGS \ | 203 | #define REQ_NOMERGE_FLAGS \ |
| 213 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ) | 204 | (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) |
| 214 | 205 | ||
| 215 | #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) | 206 | #define bio_op(bio) \ |
| 216 | #define REQ_THROTTLED (1ULL << __REQ_THROTTLED) | 207 | ((bio)->bi_opf & REQ_OP_MASK) |
| 208 | #define req_op(req) \ | ||
| 209 | ((req)->cmd_flags & REQ_OP_MASK) | ||
| 217 | 210 | ||
| 218 | #define REQ_SORTED (1ULL << __REQ_SORTED) | 211 | /* obsolete, don't use in new code */ |
| 219 | #define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER) | 212 | static inline void bio_set_op_attrs(struct bio *bio, unsigned op, |
| 220 | #define REQ_FUA (1ULL << __REQ_FUA) | 213 | unsigned op_flags) |
| 221 | #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) | 214 | { |
| 222 | #define REQ_STARTED (1ULL << __REQ_STARTED) | 215 | bio->bi_opf = op | op_flags; |
| 223 | #define REQ_DONTPREP (1ULL << __REQ_DONTPREP) | 216 | } |
| 224 | #define REQ_QUEUED (1ULL << __REQ_QUEUED) | ||
| 225 | #define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV) | ||
| 226 | #define REQ_FAILED (1ULL << __REQ_FAILED) | ||
| 227 | #define REQ_QUIET (1ULL << __REQ_QUIET) | ||
| 228 | #define REQ_PREEMPT (1ULL << __REQ_PREEMPT) | ||
| 229 | #define REQ_ALLOCED (1ULL << __REQ_ALLOCED) | ||
| 230 | #define REQ_COPY_USER (1ULL << __REQ_COPY_USER) | ||
| 231 | #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) | ||
| 232 | #define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ) | ||
| 233 | #define REQ_IO_STAT (1ULL << __REQ_IO_STAT) | ||
| 234 | #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) | ||
| 235 | #define REQ_PM (1ULL << __REQ_PM) | ||
| 236 | #define REQ_HASHED (1ULL << __REQ_HASHED) | ||
| 237 | #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) | ||
| 238 | |||
| 239 | enum req_op { | ||
| 240 | REQ_OP_READ, | ||
| 241 | REQ_OP_WRITE, | ||
| 242 | REQ_OP_DISCARD, /* request to discard sectors */ | ||
| 243 | REQ_OP_SECURE_ERASE, /* request to securely erase sectors */ | ||
| 244 | REQ_OP_WRITE_SAME, /* write same block many times */ | ||
| 245 | REQ_OP_FLUSH, /* request for cache flush */ | ||
| 246 | }; | ||
| 247 | 217 | ||
| 248 | #define REQ_OP_BITS 3 | 218 | static inline bool op_is_write(unsigned int op) |
| 219 | { | ||
| 220 | return (op & 1); | ||
| 221 | } | ||
| 222 | |||
| 223 | /* | ||
| 224 | * Reads are always treated as synchronous, as are requests with the FUA or | ||
| 225 | * PREFLUSH flag. Other operations may be marked as synchronous using the | ||
| 226 | * REQ_SYNC flag. | ||
| 227 | */ | ||
| 228 | static inline bool op_is_sync(unsigned int op) | ||
| 229 | { | ||
| 230 | return (op & REQ_OP_MASK) == REQ_OP_READ || | ||
| 231 | (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); | ||
| 232 | } | ||
| 249 | 233 | ||
| 250 | typedef unsigned int blk_qc_t; | 234 | typedef unsigned int blk_qc_t; |
| 251 | #define BLK_QC_T_NONE -1U | 235 | #define BLK_QC_T_NONE -1U |
| @@ -271,4 +255,20 @@ static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) | |||
| 271 | return cookie & ((1u << BLK_QC_T_SHIFT) - 1); | 255 | return cookie & ((1u << BLK_QC_T_SHIFT) - 1); |
| 272 | } | 256 | } |
| 273 | 257 | ||
| 258 | struct blk_issue_stat { | ||
| 259 | u64 time; | ||
| 260 | }; | ||
| 261 | |||
| 262 | #define BLK_RQ_STAT_BATCH 64 | ||
| 263 | |||
| 264 | struct blk_rq_stat { | ||
| 265 | s64 mean; | ||
| 266 | u64 min; | ||
| 267 | u64 max; | ||
| 268 | s32 nr_samples; | ||
| 269 | s32 nr_batch; | ||
| 270 | u64 batch; | ||
| 271 | s64 time; | ||
| 272 | }; | ||
| 273 | |||
| 274 | #endif /* __LINUX_BLK_TYPES_H */ | 274 | #endif /* __LINUX_BLK_TYPES_H */ |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c47c358ba052..83695641bd5e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/rcupdate.h> | 24 | #include <linux/rcupdate.h> |
| 25 | #include <linux/percpu-refcount.h> | 25 | #include <linux/percpu-refcount.h> |
| 26 | #include <linux/scatterlist.h> | 26 | #include <linux/scatterlist.h> |
| 27 | #include <linux/blkzoned.h> | ||
| 27 | 28 | ||
| 28 | struct module; | 29 | struct module; |
| 29 | struct scsi_ioctl_command; | 30 | struct scsi_ioctl_command; |
| @@ -37,6 +38,7 @@ struct bsg_job; | |||
| 37 | struct blkcg_gq; | 38 | struct blkcg_gq; |
| 38 | struct blk_flush_queue; | 39 | struct blk_flush_queue; |
| 39 | struct pr_ops; | 40 | struct pr_ops; |
| 41 | struct rq_wb; | ||
| 40 | 42 | ||
| 41 | #define BLKDEV_MIN_RQ 4 | 43 | #define BLKDEV_MIN_RQ 4 |
| 42 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ | 44 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
| @@ -77,6 +79,55 @@ enum rq_cmd_type_bits { | |||
| 77 | REQ_TYPE_DRV_PRIV, /* driver defined types from here */ | 79 | REQ_TYPE_DRV_PRIV, /* driver defined types from here */ |
| 78 | }; | 80 | }; |
| 79 | 81 | ||
| 82 | /* | ||
| 83 | * request flags */ | ||
| 84 | typedef __u32 __bitwise req_flags_t; | ||
| 85 | |||
| 86 | /* elevator knows about this request */ | ||
| 87 | #define RQF_SORTED ((__force req_flags_t)(1 << 0)) | ||
| 88 | /* drive already may have started this one */ | ||
| 89 | #define RQF_STARTED ((__force req_flags_t)(1 << 1)) | ||
| 90 | /* uses tagged queueing */ | ||
| 91 | #define RQF_QUEUED ((__force req_flags_t)(1 << 2)) | ||
| 92 | /* may not be passed by ioscheduler */ | ||
| 93 | #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) | ||
| 94 | /* request for flush sequence */ | ||
| 95 | #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) | ||
| 96 | /* merge of different types, fail separately */ | ||
| 97 | #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) | ||
| 98 | /* track inflight for MQ */ | ||
| 99 | #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) | ||
| 100 | /* don't call prep for this one */ | ||
| 101 | #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) | ||
| 102 | /* set for "ide_preempt" requests and also for requests for which the SCSI | ||
| 103 | "quiesce" state must be ignored. */ | ||
| 104 | #define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) | ||
| 105 | /* contains copies of user pages */ | ||
| 106 | #define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) | ||
| 107 | /* vaguely specified driver internal error. Ignored by the block layer */ | ||
| 108 | #define RQF_FAILED ((__force req_flags_t)(1 << 10)) | ||
| 109 | /* don't warn about errors */ | ||
| 110 | #define RQF_QUIET ((__force req_flags_t)(1 << 11)) | ||
| 111 | /* elevator private data attached */ | ||
| 112 | #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) | ||
| 113 | /* account I/O stat */ | ||
| 114 | #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) | ||
| 115 | /* request came from our alloc pool */ | ||
| 116 | #define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) | ||
| 117 | /* runtime pm request */ | ||
| 118 | #define RQF_PM ((__force req_flags_t)(1 << 15)) | ||
| 119 | /* on IO scheduler merge hash */ | ||
| 120 | #define RQF_HASHED ((__force req_flags_t)(1 << 16)) | ||
| 121 | /* IO stats tracking on */ | ||
| 122 | #define RQF_STATS ((__force req_flags_t)(1 << 17)) | ||
| 123 | /* Look at ->special_vec for the actual data payload instead of the | ||
| 124 | bio chain. */ | ||
| 125 | #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) | ||
| 126 | |||
| 127 | /* flags that prevent us from merging requests: */ | ||
| 128 | #define RQF_NOMERGE_FLAGS \ | ||
| 129 | (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) | ||
| 130 | |||
| 80 | #define BLK_MAX_CDB 16 | 131 | #define BLK_MAX_CDB 16 |
| 81 | 132 | ||
| 82 | /* | 133 | /* |
| @@ -97,7 +148,8 @@ struct request { | |||
| 97 | 148 | ||
| 98 | int cpu; | 149 | int cpu; |
| 99 | unsigned cmd_type; | 150 | unsigned cmd_type; |
| 100 | u64 cmd_flags; | 151 | unsigned int cmd_flags; /* op and common flags */ |
| 152 | req_flags_t rq_flags; | ||
| 101 | unsigned long atomic_flags; | 153 | unsigned long atomic_flags; |
| 102 | 154 | ||
| 103 | /* the following two fields are internal, NEVER access directly */ | 155 | /* the following two fields are internal, NEVER access directly */ |
| @@ -126,6 +178,7 @@ struct request { | |||
| 126 | */ | 178 | */ |
| 127 | union { | 179 | union { |
| 128 | struct rb_node rb_node; /* sort/lookup */ | 180 | struct rb_node rb_node; /* sort/lookup */ |
| 181 | struct bio_vec special_vec; | ||
| 129 | void *completion_data; | 182 | void *completion_data; |
| 130 | }; | 183 | }; |
| 131 | 184 | ||
| @@ -151,6 +204,7 @@ struct request { | |||
| 151 | struct gendisk *rq_disk; | 204 | struct gendisk *rq_disk; |
| 152 | struct hd_struct *part; | 205 | struct hd_struct *part; |
| 153 | unsigned long start_time; | 206 | unsigned long start_time; |
| 207 | struct blk_issue_stat issue_stat; | ||
| 154 | #ifdef CONFIG_BLK_CGROUP | 208 | #ifdef CONFIG_BLK_CGROUP |
| 155 | struct request_list *rl; /* rl this rq is alloced from */ | 209 | struct request_list *rl; /* rl this rq is alloced from */ |
| 156 | unsigned long long start_time_ns; | 210 | unsigned long long start_time_ns; |
| @@ -198,20 +252,6 @@ struct request { | |||
| 198 | struct request *next_rq; | 252 | struct request *next_rq; |
| 199 | }; | 253 | }; |
| 200 | 254 | ||
| 201 | #define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS) | ||
| 202 | #define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT) | ||
| 203 | |||
| 204 | #define req_set_op(req, op) do { \ | ||
| 205 | WARN_ON(op >= (1 << REQ_OP_BITS)); \ | ||
| 206 | (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \ | ||
| 207 | (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \ | ||
| 208 | } while (0) | ||
| 209 | |||
| 210 | #define req_set_op_attrs(req, op, flags) do { \ | ||
| 211 | req_set_op(req, op); \ | ||
| 212 | (req)->cmd_flags |= flags; \ | ||
| 213 | } while (0) | ||
| 214 | |||
| 215 | static inline unsigned short req_get_ioprio(struct request *req) | 255 | static inline unsigned short req_get_ioprio(struct request *req) |
| 216 | { | 256 | { |
| 217 | return req->ioprio; | 257 | return req->ioprio; |
| @@ -248,7 +288,6 @@ enum blk_queue_state { | |||
| 248 | struct blk_queue_tag { | 288 | struct blk_queue_tag { |
| 249 | struct request **tag_index; /* map of busy tags */ | 289 | struct request **tag_index; /* map of busy tags */ |
| 250 | unsigned long *tag_map; /* bit map of free/busy tags */ | 290 | unsigned long *tag_map; /* bit map of free/busy tags */ |
| 251 | int busy; /* current depth */ | ||
| 252 | int max_depth; /* what we will send to device */ | 291 | int max_depth; /* what we will send to device */ |
| 253 | int real_max_depth; /* what the array can hold */ | 292 | int real_max_depth; /* what the array can hold */ |
| 254 | atomic_t refcnt; /* map can be shared */ | 293 | atomic_t refcnt; /* map can be shared */ |
| @@ -261,6 +300,15 @@ struct blk_queue_tag { | |||
| 261 | #define BLK_SCSI_MAX_CMDS (256) | 300 | #define BLK_SCSI_MAX_CMDS (256) |
| 262 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) | 301 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) |
| 263 | 302 | ||
| 303 | /* | ||
| 304 | * Zoned block device models (zoned limit). | ||
| 305 | */ | ||
| 306 | enum blk_zoned_model { | ||
| 307 | BLK_ZONED_NONE, /* Regular block device */ | ||
| 308 | BLK_ZONED_HA, /* Host-aware zoned block device */ | ||
| 309 | BLK_ZONED_HM, /* Host-managed zoned block device */ | ||
| 310 | }; | ||
| 311 | |||
| 264 | struct queue_limits { | 312 | struct queue_limits { |
| 265 | unsigned long bounce_pfn; | 313 | unsigned long bounce_pfn; |
| 266 | unsigned long seg_boundary_mask; | 314 | unsigned long seg_boundary_mask; |
| @@ -278,6 +326,7 @@ struct queue_limits { | |||
| 278 | unsigned int max_discard_sectors; | 326 | unsigned int max_discard_sectors; |
| 279 | unsigned int max_hw_discard_sectors; | 327 | unsigned int max_hw_discard_sectors; |
| 280 | unsigned int max_write_same_sectors; | 328 | unsigned int max_write_same_sectors; |
| 329 | unsigned int max_write_zeroes_sectors; | ||
| 281 | unsigned int discard_granularity; | 330 | unsigned int discard_granularity; |
| 282 | unsigned int discard_alignment; | 331 | unsigned int discard_alignment; |
| 283 | 332 | ||
| @@ -290,8 +339,45 @@ struct queue_limits { | |||
| 290 | unsigned char cluster; | 339 | unsigned char cluster; |
| 291 | unsigned char discard_zeroes_data; | 340 | unsigned char discard_zeroes_data; |
| 292 | unsigned char raid_partial_stripes_expensive; | 341 | unsigned char raid_partial_stripes_expensive; |
| 342 | enum blk_zoned_model zoned; | ||
| 293 | }; | 343 | }; |
| 294 | 344 | ||
| 345 | #ifdef CONFIG_BLK_DEV_ZONED | ||
| 346 | |||
| 347 | struct blk_zone_report_hdr { | ||
| 348 | unsigned int nr_zones; | ||
| 349 | u8 padding[60]; | ||
| 350 | }; | ||
| 351 | |||
| 352 | extern int blkdev_report_zones(struct block_device *bdev, | ||
| 353 | sector_t sector, struct blk_zone *zones, | ||
| 354 | unsigned int *nr_zones, gfp_t gfp_mask); | ||
| 355 | extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, | ||
| 356 | sector_t nr_sectors, gfp_t gfp_mask); | ||
| 357 | |||
| 358 | extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, | ||
| 359 | unsigned int cmd, unsigned long arg); | ||
| 360 | extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, | ||
| 361 | unsigned int cmd, unsigned long arg); | ||
| 362 | |||
| 363 | #else /* CONFIG_BLK_DEV_ZONED */ | ||
| 364 | |||
| 365 | static inline int blkdev_report_zones_ioctl(struct block_device *bdev, | ||
| 366 | fmode_t mode, unsigned int cmd, | ||
| 367 | unsigned long arg) | ||
| 368 | { | ||
| 369 | return -ENOTTY; | ||
| 370 | } | ||
| 371 | |||
| 372 | static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, | ||
| 373 | fmode_t mode, unsigned int cmd, | ||
| 374 | unsigned long arg) | ||
| 375 | { | ||
| 376 | return -ENOTTY; | ||
| 377 | } | ||
| 378 | |||
| 379 | #endif /* CONFIG_BLK_DEV_ZONED */ | ||
| 380 | |||
| 295 | struct request_queue { | 381 | struct request_queue { |
| 296 | /* | 382 | /* |
| 297 | * Together with queue_head for cacheline sharing | 383 | * Together with queue_head for cacheline sharing |
| @@ -302,6 +388,8 @@ struct request_queue { | |||
| 302 | int nr_rqs[2]; /* # allocated [a]sync rqs */ | 388 | int nr_rqs[2]; /* # allocated [a]sync rqs */ |
| 303 | int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ | 389 | int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ |
| 304 | 390 | ||
| 391 | struct rq_wb *rq_wb; | ||
| 392 | |||
| 305 | /* | 393 | /* |
| 306 | * If blkcg is not used, @q->root_rl serves all requests. If blkcg | 394 | * If blkcg is not used, @q->root_rl serves all requests. If blkcg |
| 307 | * is used, root blkg allocates from @q->root_rl and all other | 395 | * is used, root blkg allocates from @q->root_rl and all other |
| @@ -327,6 +415,8 @@ struct request_queue { | |||
| 327 | struct blk_mq_ctx __percpu *queue_ctx; | 415 | struct blk_mq_ctx __percpu *queue_ctx; |
| 328 | unsigned int nr_queues; | 416 | unsigned int nr_queues; |
| 329 | 417 | ||
| 418 | unsigned int queue_depth; | ||
| 419 | |||
| 330 | /* hw dispatch queues */ | 420 | /* hw dispatch queues */ |
| 331 | struct blk_mq_hw_ctx **queue_hw_ctx; | 421 | struct blk_mq_hw_ctx **queue_hw_ctx; |
| 332 | unsigned int nr_hw_queues; | 422 | unsigned int nr_hw_queues; |
| @@ -412,6 +502,9 @@ struct request_queue { | |||
| 412 | 502 | ||
| 413 | unsigned int nr_sorted; | 503 | unsigned int nr_sorted; |
| 414 | unsigned int in_flight[2]; | 504 | unsigned int in_flight[2]; |
| 505 | |||
| 506 | struct blk_rq_stat rq_stats[2]; | ||
| 507 | |||
| 415 | /* | 508 | /* |
| 416 | * Number of active block driver functions for which blk_drain_queue() | 509 | * Number of active block driver functions for which blk_drain_queue() |
| 417 | * must wait. Must be incremented around functions that unlock the | 510 | * must wait. Must be incremented around functions that unlock the |
| @@ -420,6 +513,7 @@ struct request_queue { | |||
| 420 | unsigned int request_fn_active; | 513 | unsigned int request_fn_active; |
| 421 | 514 | ||
| 422 | unsigned int rq_timeout; | 515 | unsigned int rq_timeout; |
| 516 | int poll_nsec; | ||
| 423 | struct timer_list timeout; | 517 | struct timer_list timeout; |
| 424 | struct work_struct timeout_work; | 518 | struct work_struct timeout_work; |
| 425 | struct list_head timeout_list; | 519 | struct list_head timeout_list; |
| @@ -505,6 +599,7 @@ struct request_queue { | |||
| 505 | #define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ | 599 | #define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ |
| 506 | #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ | 600 | #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ |
| 507 | #define QUEUE_FLAG_DAX 26 /* device supports DAX */ | 601 | #define QUEUE_FLAG_DAX 26 /* device supports DAX */ |
| 602 | #define QUEUE_FLAG_STATS 27 /* track rq completion times */ | ||
| 508 | 603 | ||
| 509 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 604 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
| 510 | (1 << QUEUE_FLAG_STACKABLE) | \ | 605 | (1 << QUEUE_FLAG_STACKABLE) | \ |
| @@ -601,7 +696,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
| 601 | REQ_FAILFAST_DRIVER)) | 696 | REQ_FAILFAST_DRIVER)) |
| 602 | 697 | ||
| 603 | #define blk_account_rq(rq) \ | 698 | #define blk_account_rq(rq) \ |
| 604 | (((rq)->cmd_flags & REQ_STARTED) && \ | 699 | (((rq)->rq_flags & RQF_STARTED) && \ |
| 605 | ((rq)->cmd_type == REQ_TYPE_FS)) | 700 | ((rq)->cmd_type == REQ_TYPE_FS)) |
| 606 | 701 | ||
| 607 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) | 702 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) |
| @@ -627,17 +722,31 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q) | |||
| 627 | return q->limits.cluster; | 722 | return q->limits.cluster; |
| 628 | } | 723 | } |
| 629 | 724 | ||
| 630 | /* | 725 | static inline enum blk_zoned_model |
| 631 | * We regard a request as sync, if either a read or a sync write | 726 | blk_queue_zoned_model(struct request_queue *q) |
| 632 | */ | 727 | { |
| 633 | static inline bool rw_is_sync(int op, unsigned int rw_flags) | 728 | return q->limits.zoned; |
| 729 | } | ||
| 730 | |||
| 731 | static inline bool blk_queue_is_zoned(struct request_queue *q) | ||
| 732 | { | ||
| 733 | switch (blk_queue_zoned_model(q)) { | ||
| 734 | case BLK_ZONED_HA: | ||
| 735 | case BLK_ZONED_HM: | ||
| 736 | return true; | ||
| 737 | default: | ||
| 738 | return false; | ||
| 739 | } | ||
| 740 | } | ||
| 741 | |||
| 742 | static inline unsigned int blk_queue_zone_size(struct request_queue *q) | ||
| 634 | { | 743 | { |
| 635 | return op == REQ_OP_READ || (rw_flags & REQ_SYNC); | 744 | return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; |
| 636 | } | 745 | } |
| 637 | 746 | ||
| 638 | static inline bool rq_is_sync(struct request *rq) | 747 | static inline bool rq_is_sync(struct request *rq) |
| 639 | { | 748 | { |
| 640 | return rw_is_sync(req_op(rq), rq->cmd_flags); | 749 | return op_is_sync(rq->cmd_flags); |
| 641 | } | 750 | } |
| 642 | 751 | ||
| 643 | static inline bool blk_rl_full(struct request_list *rl, bool sync) | 752 | static inline bool blk_rl_full(struct request_list *rl, bool sync) |
| @@ -669,8 +778,13 @@ static inline bool rq_mergeable(struct request *rq) | |||
| 669 | if (req_op(rq) == REQ_OP_FLUSH) | 778 | if (req_op(rq) == REQ_OP_FLUSH) |
| 670 | return false; | 779 | return false; |
| 671 | 780 | ||
| 781 | if (req_op(rq) == REQ_OP_WRITE_ZEROES) | ||
| 782 | return false; | ||
| 783 | |||
| 672 | if (rq->cmd_flags & REQ_NOMERGE_FLAGS) | 784 | if (rq->cmd_flags & REQ_NOMERGE_FLAGS) |
| 673 | return false; | 785 | return false; |
| 786 | if (rq->rq_flags & RQF_NOMERGE_FLAGS) | ||
| 787 | return false; | ||
| 674 | 788 | ||
| 675 | return true; | 789 | return true; |
| 676 | } | 790 | } |
| @@ -683,6 +797,14 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) | |||
| 683 | return false; | 797 | return false; |
| 684 | } | 798 | } |
| 685 | 799 | ||
| 800 | static inline unsigned int blk_queue_depth(struct request_queue *q) | ||
| 801 | { | ||
| 802 | if (q->queue_depth) | ||
| 803 | return q->queue_depth; | ||
| 804 | |||
| 805 | return q->nr_requests; | ||
| 806 | } | ||
| 807 | |||
| 686 | /* | 808 | /* |
| 687 | * q->prep_rq_fn return values | 809 | * q->prep_rq_fn return values |
| 688 | */ | 810 | */ |
| @@ -790,8 +912,6 @@ extern void __blk_put_request(struct request_queue *, struct request *); | |||
| 790 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 912 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
| 791 | extern void blk_rq_set_block_pc(struct request *); | 913 | extern void blk_rq_set_block_pc(struct request *); |
| 792 | extern void blk_requeue_request(struct request_queue *, struct request *); | 914 | extern void blk_requeue_request(struct request_queue *, struct request *); |
| 793 | extern void blk_add_request_payload(struct request *rq, struct page *page, | ||
| 794 | int offset, unsigned int len); | ||
| 795 | extern int blk_lld_busy(struct request_queue *q); | 915 | extern int blk_lld_busy(struct request_queue *q); |
| 796 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | 916 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, |
| 797 | struct bio_set *bs, gfp_t gfp_mask, | 917 | struct bio_set *bs, gfp_t gfp_mask, |
| @@ -824,6 +944,7 @@ extern void __blk_run_queue(struct request_queue *q); | |||
| 824 | extern void __blk_run_queue_uncond(struct request_queue *q); | 944 | extern void __blk_run_queue_uncond(struct request_queue *q); |
| 825 | extern void blk_run_queue(struct request_queue *); | 945 | extern void blk_run_queue(struct request_queue *); |
| 826 | extern void blk_run_queue_async(struct request_queue *q); | 946 | extern void blk_run_queue_async(struct request_queue *q); |
| 947 | extern void blk_mq_quiesce_queue(struct request_queue *q); | ||
| 827 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 948 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
| 828 | struct rq_map_data *, void __user *, unsigned long, | 949 | struct rq_map_data *, void __user *, unsigned long, |
| 829 | gfp_t); | 950 | gfp_t); |
| @@ -837,7 +958,7 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *, | |||
| 837 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 958 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
| 838 | struct request *, int, rq_end_io_fn *); | 959 | struct request *, int, rq_end_io_fn *); |
| 839 | 960 | ||
| 840 | bool blk_poll(struct request_queue *q, blk_qc_t cookie); | 961 | bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie); |
| 841 | 962 | ||
| 842 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | 963 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
| 843 | { | 964 | { |
| @@ -888,6 +1009,9 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, | |||
| 888 | if (unlikely(op == REQ_OP_WRITE_SAME)) | 1009 | if (unlikely(op == REQ_OP_WRITE_SAME)) |
| 889 | return q->limits.max_write_same_sectors; | 1010 | return q->limits.max_write_same_sectors; |
| 890 | 1011 | ||
| 1012 | if (unlikely(op == REQ_OP_WRITE_ZEROES)) | ||
| 1013 | return q->limits.max_write_zeroes_sectors; | ||
| 1014 | |||
| 891 | return q->limits.max_sectors; | 1015 | return q->limits.max_sectors; |
| 892 | } | 1016 | } |
| 893 | 1017 | ||
| @@ -934,6 +1058,20 @@ static inline unsigned int blk_rq_count_bios(struct request *rq) | |||
| 934 | } | 1058 | } |
| 935 | 1059 | ||
| 936 | /* | 1060 | /* |
| 1061 | * blk_rq_set_prio - associate a request with prio from ioc | ||
| 1062 | * @rq: request of interest | ||
| 1063 | * @ioc: target iocontext | ||
| 1064 | * | ||
| 1065 | * Assocate request prio with ioc prio so request based drivers | ||
| 1066 | * can leverage priority information. | ||
| 1067 | */ | ||
| 1068 | static inline void blk_rq_set_prio(struct request *rq, struct io_context *ioc) | ||
| 1069 | { | ||
| 1070 | if (ioc) | ||
| 1071 | rq->ioprio = ioc->ioprio; | ||
| 1072 | } | ||
| 1073 | |||
| 1074 | /* | ||
| 937 | * Request issue related functions. | 1075 | * Request issue related functions. |
| 938 | */ | 1076 | */ |
| 939 | extern struct request *blk_peek_request(struct request_queue *q); | 1077 | extern struct request *blk_peek_request(struct request_queue *q); |
| @@ -991,6 +1129,8 @@ extern void blk_queue_max_discard_sectors(struct request_queue *q, | |||
| 991 | unsigned int max_discard_sectors); | 1129 | unsigned int max_discard_sectors); |
| 992 | extern void blk_queue_max_write_same_sectors(struct request_queue *q, | 1130 | extern void blk_queue_max_write_same_sectors(struct request_queue *q, |
| 993 | unsigned int max_write_same_sectors); | 1131 | unsigned int max_write_same_sectors); |
| 1132 | extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, | ||
| 1133 | unsigned int max_write_same_sectors); | ||
| 994 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 1134 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
| 995 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); | 1135 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); |
| 996 | extern void blk_queue_alignment_offset(struct request_queue *q, | 1136 | extern void blk_queue_alignment_offset(struct request_queue *q, |
| @@ -999,6 +1139,7 @@ extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); | |||
| 999 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | 1139 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); |
| 1000 | extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); | 1140 | extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); |
| 1001 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | 1141 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); |
| 1142 | extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); | ||
| 1002 | extern void blk_set_default_limits(struct queue_limits *lim); | 1143 | extern void blk_set_default_limits(struct queue_limits *lim); |
| 1003 | extern void blk_set_stacking_limits(struct queue_limits *lim); | 1144 | extern void blk_set_stacking_limits(struct queue_limits *lim); |
| 1004 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | 1145 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
| @@ -1027,6 +1168,13 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); | |||
| 1027 | extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); | 1168 | extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); |
| 1028 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 1169 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
| 1029 | 1170 | ||
| 1171 | static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) | ||
| 1172 | { | ||
| 1173 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) | ||
| 1174 | return 1; | ||
| 1175 | return rq->nr_phys_segments; | ||
| 1176 | } | ||
| 1177 | |||
| 1030 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); | 1178 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
| 1031 | extern void blk_dump_rq_flags(struct request *, char *); | 1179 | extern void blk_dump_rq_flags(struct request *, char *); |
| 1032 | extern long nr_blockdev_pages(void); | 1180 | extern long nr_blockdev_pages(void); |
| @@ -1057,7 +1205,7 @@ static inline int blk_pre_runtime_suspend(struct request_queue *q) | |||
| 1057 | static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} | 1205 | static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} |
| 1058 | static inline void blk_pre_runtime_resume(struct request_queue *q) {} | 1206 | static inline void blk_pre_runtime_resume(struct request_queue *q) {} |
| 1059 | static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} | 1207 | static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} |
| 1060 | extern inline void blk_set_runtime_active(struct request_queue *q) {} | 1208 | static inline void blk_set_runtime_active(struct request_queue *q) {} |
| 1061 | #endif | 1209 | #endif |
| 1062 | 1210 | ||
| 1063 | /* | 1211 | /* |
| @@ -1078,6 +1226,7 @@ struct blk_plug { | |||
| 1078 | struct list_head cb_list; /* md requires an unplug callback */ | 1226 | struct list_head cb_list; /* md requires an unplug callback */ |
| 1079 | }; | 1227 | }; |
| 1080 | #define BLK_MAX_REQUEST_COUNT 16 | 1228 | #define BLK_MAX_REQUEST_COUNT 16 |
| 1229 | #define BLK_PLUG_FLUSH_SIZE (128 * 1024) | ||
| 1081 | 1230 | ||
| 1082 | struct blk_plug_cb; | 1231 | struct blk_plug_cb; |
| 1083 | typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); | 1232 | typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); |
| @@ -1151,6 +1300,9 @@ extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
| 1151 | struct bio **biop); | 1300 | struct bio **biop); |
| 1152 | extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | 1301 | extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
| 1153 | sector_t nr_sects, gfp_t gfp_mask, struct page *page); | 1302 | sector_t nr_sects, gfp_t gfp_mask, struct page *page); |
| 1303 | extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | ||
| 1304 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, | ||
| 1305 | bool discard); | ||
| 1154 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | 1306 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
| 1155 | sector_t nr_sects, gfp_t gfp_mask, bool discard); | 1307 | sector_t nr_sects, gfp_t gfp_mask, bool discard); |
| 1156 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, | 1308 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, |
| @@ -1354,6 +1506,46 @@ static inline unsigned int bdev_write_same(struct block_device *bdev) | |||
| 1354 | return 0; | 1506 | return 0; |
| 1355 | } | 1507 | } |
| 1356 | 1508 | ||
| 1509 | static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) | ||
| 1510 | { | ||
| 1511 | struct request_queue *q = bdev_get_queue(bdev); | ||
| 1512 | |||
| 1513 | if (q) | ||
| 1514 | return q->limits.max_write_zeroes_sectors; | ||
| 1515 | |||
| 1516 | return 0; | ||
| 1517 | } | ||
| 1518 | |||
| 1519 | static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) | ||
| 1520 | { | ||
| 1521 | struct request_queue *q = bdev_get_queue(bdev); | ||
| 1522 | |||
| 1523 | if (q) | ||
| 1524 | return blk_queue_zoned_model(q); | ||
| 1525 | |||
| 1526 | return BLK_ZONED_NONE; | ||
| 1527 | } | ||
| 1528 | |||
| 1529 | static inline bool bdev_is_zoned(struct block_device *bdev) | ||
| 1530 | { | ||
| 1531 | struct request_queue *q = bdev_get_queue(bdev); | ||
| 1532 | |||
| 1533 | if (q) | ||
| 1534 | return blk_queue_is_zoned(q); | ||
| 1535 | |||
| 1536 | return false; | ||
| 1537 | } | ||
| 1538 | |||
| 1539 | static inline unsigned int bdev_zone_size(struct block_device *bdev) | ||
| 1540 | { | ||
| 1541 | struct request_queue *q = bdev_get_queue(bdev); | ||
| 1542 | |||
| 1543 | if (q) | ||
| 1544 | return blk_queue_zone_size(q); | ||
| 1545 | |||
| 1546 | return 0; | ||
| 1547 | } | ||
| 1548 | |||
| 1357 | static inline int queue_dma_alignment(struct request_queue *q) | 1549 | static inline int queue_dma_alignment(struct request_queue *q) |
| 1358 | { | 1550 | { |
| 1359 | return q ? q->dma_alignment : 511; | 1551 | return q ? q->dma_alignment : 511; |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index cceb72f9e29f..e417f080219a 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
| @@ -118,7 +118,7 @@ static inline int blk_cmd_buf_len(struct request *rq) | |||
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | extern void blk_dump_cmd(char *buf, struct request *rq); | 120 | extern void blk_dump_cmd(char *buf, struct request *rq); |
| 121 | extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes); | 121 | extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes); |
| 122 | 122 | ||
| 123 | #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ | 123 | #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ |
| 124 | 124 | ||
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h new file mode 100644 index 000000000000..92bc89ae7e20 --- /dev/null +++ b/include/linux/bpf-cgroup.h | |||
| @@ -0,0 +1,92 @@ | |||
| 1 | #ifndef _BPF_CGROUP_H | ||
| 2 | #define _BPF_CGROUP_H | ||
| 3 | |||
| 4 | #include <linux/jump_label.h> | ||
| 5 | #include <uapi/linux/bpf.h> | ||
| 6 | |||
| 7 | struct sock; | ||
| 8 | struct cgroup; | ||
| 9 | struct sk_buff; | ||
| 10 | |||
| 11 | #ifdef CONFIG_CGROUP_BPF | ||
| 12 | |||
| 13 | extern struct static_key_false cgroup_bpf_enabled_key; | ||
| 14 | #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) | ||
| 15 | |||
| 16 | struct cgroup_bpf { | ||
| 17 | /* | ||
| 18 | * Store two sets of bpf_prog pointers, one for programs that are | ||
| 19 | * pinned directly to this cgroup, and one for those that are effective | ||
| 20 | * when this cgroup is accessed. | ||
| 21 | */ | ||
| 22 | struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE]; | ||
| 23 | struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE]; | ||
| 24 | }; | ||
| 25 | |||
| 26 | void cgroup_bpf_put(struct cgroup *cgrp); | ||
| 27 | void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent); | ||
| 28 | |||
| 29 | void __cgroup_bpf_update(struct cgroup *cgrp, | ||
| 30 | struct cgroup *parent, | ||
| 31 | struct bpf_prog *prog, | ||
| 32 | enum bpf_attach_type type); | ||
| 33 | |||
| 34 | /* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */ | ||
| 35 | void cgroup_bpf_update(struct cgroup *cgrp, | ||
| 36 | struct bpf_prog *prog, | ||
| 37 | enum bpf_attach_type type); | ||
| 38 | |||
| 39 | int __cgroup_bpf_run_filter_skb(struct sock *sk, | ||
| 40 | struct sk_buff *skb, | ||
| 41 | enum bpf_attach_type type); | ||
| 42 | |||
| 43 | int __cgroup_bpf_run_filter_sk(struct sock *sk, | ||
| 44 | enum bpf_attach_type type); | ||
| 45 | |||
| 46 | /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ | ||
| 47 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ | ||
| 48 | ({ \ | ||
| 49 | int __ret = 0; \ | ||
| 50 | if (cgroup_bpf_enabled) \ | ||
| 51 | __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ | ||
| 52 | BPF_CGROUP_INET_INGRESS); \ | ||
| 53 | \ | ||
| 54 | __ret; \ | ||
| 55 | }) | ||
| 56 | |||
| 57 | #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ | ||
| 58 | ({ \ | ||
| 59 | int __ret = 0; \ | ||
| 60 | if (cgroup_bpf_enabled && sk && sk == skb->sk) { \ | ||
| 61 | typeof(sk) __sk = sk_to_full_sk(sk); \ | ||
| 62 | if (sk_fullsock(__sk)) \ | ||
| 63 | __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ | ||
| 64 | BPF_CGROUP_INET_EGRESS); \ | ||
| 65 | } \ | ||
| 66 | __ret; \ | ||
| 67 | }) | ||
| 68 | |||
| 69 | #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ | ||
| 70 | ({ \ | ||
| 71 | int __ret = 0; \ | ||
| 72 | if (cgroup_bpf_enabled && sk) { \ | ||
| 73 | __ret = __cgroup_bpf_run_filter_sk(sk, \ | ||
| 74 | BPF_CGROUP_INET_SOCK_CREATE); \ | ||
| 75 | } \ | ||
| 76 | __ret; \ | ||
| 77 | }) | ||
| 78 | |||
| 79 | #else | ||
| 80 | |||
| 81 | struct cgroup_bpf {}; | ||
| 82 | static inline void cgroup_bpf_put(struct cgroup *cgrp) {} | ||
| 83 | static inline void cgroup_bpf_inherit(struct cgroup *cgrp, | ||
| 84 | struct cgroup *parent) {} | ||
| 85 | |||
| 86 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) | ||
| 87 | #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) | ||
| 88 | #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) | ||
| 89 | |||
| 90 | #endif /* CONFIG_CGROUP_BPF */ | ||
| 91 | |||
| 92 | #endif /* _BPF_CGROUP_H */ | ||
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c201017b5730..f74ae68086dc 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
| @@ -216,6 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); | |||
| 216 | u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); | 216 | u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
| 217 | 217 | ||
| 218 | bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); | 218 | bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); |
| 219 | int bpf_prog_calc_digest(struct bpf_prog *fp); | ||
| 219 | 220 | ||
| 220 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void); | 221 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void); |
| 221 | 222 | ||
| @@ -233,13 +234,16 @@ void bpf_register_map_type(struct bpf_map_type_list *tl); | |||
| 233 | 234 | ||
| 234 | struct bpf_prog *bpf_prog_get(u32 ufd); | 235 | struct bpf_prog *bpf_prog_get(u32 ufd); |
| 235 | struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type); | 236 | struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type); |
| 236 | struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i); | 237 | struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); |
| 237 | struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog); | 238 | void bpf_prog_sub(struct bpf_prog *prog, int i); |
| 239 | struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); | ||
| 238 | void bpf_prog_put(struct bpf_prog *prog); | 240 | void bpf_prog_put(struct bpf_prog *prog); |
| 241 | int __bpf_prog_charge(struct user_struct *user, u32 pages); | ||
| 242 | void __bpf_prog_uncharge(struct user_struct *user, u32 pages); | ||
| 239 | 243 | ||
| 240 | struct bpf_map *bpf_map_get_with_uref(u32 ufd); | 244 | struct bpf_map *bpf_map_get_with_uref(u32 ufd); |
| 241 | struct bpf_map *__bpf_map_get(struct fd f); | 245 | struct bpf_map *__bpf_map_get(struct fd f); |
| 242 | struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref); | 246 | struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); |
| 243 | void bpf_map_put_with_uref(struct bpf_map *map); | 247 | void bpf_map_put_with_uref(struct bpf_map *map); |
| 244 | void bpf_map_put(struct bpf_map *map); | 248 | void bpf_map_put(struct bpf_map *map); |
| 245 | int bpf_map_precharge_memlock(u32 pages); | 249 | int bpf_map_precharge_memlock(u32 pages); |
| @@ -298,18 +302,33 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, | |||
| 298 | { | 302 | { |
| 299 | return ERR_PTR(-EOPNOTSUPP); | 303 | return ERR_PTR(-EOPNOTSUPP); |
| 300 | } | 304 | } |
| 301 | static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i) | 305 | static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, |
| 306 | int i) | ||
| 302 | { | 307 | { |
| 303 | return ERR_PTR(-EOPNOTSUPP); | 308 | return ERR_PTR(-EOPNOTSUPP); |
| 304 | } | 309 | } |
| 305 | 310 | ||
| 311 | static inline void bpf_prog_sub(struct bpf_prog *prog, int i) | ||
| 312 | { | ||
| 313 | } | ||
| 314 | |||
| 306 | static inline void bpf_prog_put(struct bpf_prog *prog) | 315 | static inline void bpf_prog_put(struct bpf_prog *prog) |
| 307 | { | 316 | { |
| 308 | } | 317 | } |
| 309 | static inline struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) | 318 | |
| 319 | static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) | ||
| 310 | { | 320 | { |
| 311 | return ERR_PTR(-EOPNOTSUPP); | 321 | return ERR_PTR(-EOPNOTSUPP); |
| 312 | } | 322 | } |
| 323 | |||
| 324 | static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) | ||
| 325 | { | ||
| 326 | return 0; | ||
| 327 | } | ||
| 328 | |||
| 329 | static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) | ||
| 330 | { | ||
| 331 | } | ||
| 313 | #endif /* CONFIG_BPF_SYSCALL */ | 332 | #endif /* CONFIG_BPF_SYSCALL */ |
| 314 | 333 | ||
| 315 | /* verifier prototypes for helper functions called from eBPF programs */ | 334 | /* verifier prototypes for helper functions called from eBPF programs */ |
| @@ -319,6 +338,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto; | |||
| 319 | 338 | ||
| 320 | extern const struct bpf_func_proto bpf_get_prandom_u32_proto; | 339 | extern const struct bpf_func_proto bpf_get_prandom_u32_proto; |
| 321 | extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; | 340 | extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; |
| 341 | extern const struct bpf_func_proto bpf_get_numa_node_id_proto; | ||
| 322 | extern const struct bpf_func_proto bpf_tail_call_proto; | 342 | extern const struct bpf_func_proto bpf_tail_call_proto; |
| 323 | extern const struct bpf_func_proto bpf_ktime_get_ns_proto; | 343 | extern const struct bpf_func_proto bpf_ktime_get_ns_proto; |
| 324 | extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; | 344 | extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; |
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 6aaf425cebc3..a13b031dc6b8 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h | |||
| @@ -18,19 +18,12 @@ | |||
| 18 | 18 | ||
| 19 | struct bpf_reg_state { | 19 | struct bpf_reg_state { |
| 20 | enum bpf_reg_type type; | 20 | enum bpf_reg_type type; |
| 21 | /* | ||
| 22 | * Used to determine if any memory access using this register will | ||
| 23 | * result in a bad access. | ||
| 24 | */ | ||
| 25 | s64 min_value; | ||
| 26 | u64 max_value; | ||
| 27 | union { | 21 | union { |
| 28 | /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ | 22 | /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ |
| 29 | s64 imm; | 23 | s64 imm; |
| 30 | 24 | ||
| 31 | /* valid when type == PTR_TO_PACKET* */ | 25 | /* valid when type == PTR_TO_PACKET* */ |
| 32 | struct { | 26 | struct { |
| 33 | u32 id; | ||
| 34 | u16 off; | 27 | u16 off; |
| 35 | u16 range; | 28 | u16 range; |
| 36 | }; | 29 | }; |
| @@ -40,6 +33,13 @@ struct bpf_reg_state { | |||
| 40 | */ | 33 | */ |
| 41 | struct bpf_map *map_ptr; | 34 | struct bpf_map *map_ptr; |
| 42 | }; | 35 | }; |
| 36 | u32 id; | ||
| 37 | /* Used to determine if any memory access using this register will | ||
| 38 | * result in a bad access. These two fields must be last. | ||
| 39 | * See states_equal() | ||
| 40 | */ | ||
| 41 | s64 min_value; | ||
| 42 | u64 max_value; | ||
| 43 | }; | 43 | }; |
| 44 | 44 | ||
| 45 | enum bpf_stack_slot_type { | 45 | enum bpf_stack_slot_type { |
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index e3354b74286c..4f7d8be9ddbf 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h | |||
| @@ -13,11 +13,13 @@ | |||
| 13 | #define PHY_ID_BCM5241 0x0143bc30 | 13 | #define PHY_ID_BCM5241 0x0143bc30 |
| 14 | #define PHY_ID_BCMAC131 0x0143bc70 | 14 | #define PHY_ID_BCMAC131 0x0143bc70 |
| 15 | #define PHY_ID_BCM5481 0x0143bca0 | 15 | #define PHY_ID_BCM5481 0x0143bca0 |
| 16 | #define PHY_ID_BCM54810 0x03625d00 | ||
| 16 | #define PHY_ID_BCM5482 0x0143bcb0 | 17 | #define PHY_ID_BCM5482 0x0143bcb0 |
| 17 | #define PHY_ID_BCM5411 0x00206070 | 18 | #define PHY_ID_BCM5411 0x00206070 |
| 18 | #define PHY_ID_BCM5421 0x002060e0 | 19 | #define PHY_ID_BCM5421 0x002060e0 |
| 19 | #define PHY_ID_BCM5464 0x002060b0 | 20 | #define PHY_ID_BCM5464 0x002060b0 |
| 20 | #define PHY_ID_BCM5461 0x002060c0 | 21 | #define PHY_ID_BCM5461 0x002060c0 |
| 22 | #define PHY_ID_BCM54612E 0x03625e60 | ||
| 21 | #define PHY_ID_BCM54616S 0x03625d10 | 23 | #define PHY_ID_BCM54616S 0x03625d10 |
| 22 | #define PHY_ID_BCM57780 0x03625d90 | 24 | #define PHY_ID_BCM57780 0x03625d90 |
| 23 | 25 | ||
| @@ -55,6 +57,7 @@ | |||
| 55 | #define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000 | 57 | #define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000 |
| 56 | #define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 | 58 | #define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 |
| 57 | #define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 | 59 | #define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 |
| 60 | |||
| 58 | /* Broadcom BCM7xxx specific workarounds */ | 61 | /* Broadcom BCM7xxx specific workarounds */ |
| 59 | #define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff) | 62 | #define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff) |
| 60 | #define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff) | 63 | #define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff) |
| @@ -105,11 +108,15 @@ | |||
| 105 | #define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800 | 108 | #define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800 |
| 106 | 109 | ||
| 107 | #define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 | 110 | #define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 |
| 111 | #define MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW 0x0100 | ||
| 108 | #define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 | 112 | #define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 |
| 109 | #define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000 | 113 | #define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000 |
| 110 | #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007 | 114 | #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007 |
| 115 | #define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12 | ||
| 116 | #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN (1 << 8) | ||
| 117 | #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN (1 << 4) | ||
| 111 | 118 | ||
| 112 | #define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000 | 119 | #define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007 |
| 113 | 120 | ||
| 114 | /* | 121 | /* |
| 115 | * Broadcom LED source encodings. These are used in BCM5461, BCM5481, | 122 | * Broadcom LED source encodings. These are used in BCM5461, BCM5481, |
| @@ -124,6 +131,7 @@ | |||
| 124 | #define BCM_LED_SRC_INTR 0x6 | 131 | #define BCM_LED_SRC_INTR 0x6 |
| 125 | #define BCM_LED_SRC_QUALITY 0x7 | 132 | #define BCM_LED_SRC_QUALITY 0x7 |
| 126 | #define BCM_LED_SRC_RCVLED 0x8 | 133 | #define BCM_LED_SRC_RCVLED 0x8 |
| 134 | #define BCM_LED_SRC_WIRESPEED 0x9 | ||
| 127 | #define BCM_LED_SRC_MULTICOLOR1 0xa | 135 | #define BCM_LED_SRC_MULTICOLOR1 0xa |
| 128 | #define BCM_LED_SRC_OPENSHORT 0xb | 136 | #define BCM_LED_SRC_OPENSHORT 0xb |
| 129 | #define BCM_LED_SRC_OFF 0xe /* Tied high */ | 137 | #define BCM_LED_SRC_OFF 0xe /* Tied high */ |
| @@ -135,6 +143,14 @@ | |||
| 135 | * Shadow values go into bits [14:10] of register 0x1c to select a shadow | 143 | * Shadow values go into bits [14:10] of register 0x1c to select a shadow |
| 136 | * register to access. | 144 | * register to access. |
| 137 | */ | 145 | */ |
| 146 | |||
| 147 | /* 00100: Reserved control register 2 */ | ||
| 148 | #define BCM54XX_SHD_SCR2 0x04 | ||
| 149 | #define BCM54XX_SHD_SCR2_WSPD_RTRY_DIS 0x100 | ||
| 150 | #define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT 2 | ||
| 151 | #define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET 2 | ||
| 152 | #define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK 0x7 | ||
| 153 | |||
| 138 | /* 00101: Spare Control Register 3 */ | 154 | /* 00101: Spare Control Register 3 */ |
| 139 | #define BCM54XX_SHD_SCR3 0x05 | 155 | #define BCM54XX_SHD_SCR3 0x05 |
| 140 | #define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001 | 156 | #define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001 |
| @@ -189,6 +205,12 @@ | |||
| 189 | #define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ | 205 | #define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ |
| 190 | #define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ | 206 | #define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ |
| 191 | 207 | ||
| 208 | /* BCM54810 Registers */ | ||
| 209 | #define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90) | ||
| 210 | #define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0) | ||
| 211 | #define BCM54810_SHD_CLK_CTL 0x3 | ||
| 212 | #define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9) | ||
| 213 | |||
| 192 | 214 | ||
| 193 | /*****************************************************************************/ | 215 | /*****************************************************************************/ |
| 194 | /* Fast Ethernet Transceiver definitions. */ | 216 | /* Fast Ethernet Transceiver definitions. */ |
| @@ -222,6 +244,9 @@ | |||
| 222 | #define LPI_FEATURE_EN_DIG1000X 0x4000 | 244 | #define LPI_FEATURE_EN_DIG1000X 0x4000 |
| 223 | 245 | ||
| 224 | /* Core register definitions*/ | 246 | /* Core register definitions*/ |
| 247 | #define MII_BRCM_CORE_BASE12 0x12 | ||
| 248 | #define MII_BRCM_CORE_BASE13 0x13 | ||
| 249 | #define MII_BRCM_CORE_BASE14 0x14 | ||
| 225 | #define MII_BRCM_CORE_BASE1E 0x1E | 250 | #define MII_BRCM_CORE_BASE1E 0x1E |
| 226 | #define MII_BRCM_CORE_EXPB0 0xB0 | 251 | #define MII_BRCM_CORE_EXPB0 0xB0 |
| 227 | #define MII_BRCM_CORE_EXPB1 0xB1 | 252 | #define MII_BRCM_CORE_EXPB1 0xB1 |
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index a226652a5a6c..657a718c27d2 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h | |||
| @@ -40,6 +40,8 @@ struct bsg_job { | |||
| 40 | struct device *dev; | 40 | struct device *dev; |
| 41 | struct request *req; | 41 | struct request *req; |
| 42 | 42 | ||
| 43 | struct kref kref; | ||
| 44 | |||
| 43 | /* Transport/driver specific request/reply structs */ | 45 | /* Transport/driver specific request/reply structs */ |
| 44 | void *request; | 46 | void *request; |
| 45 | void *reply; | 47 | void *reply; |
| @@ -67,5 +69,7 @@ void bsg_job_done(struct bsg_job *job, int result, | |||
| 67 | int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, | 69 | int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, |
| 68 | bsg_job_fn *job_fn, int dd_job_size); | 70 | bsg_job_fn *job_fn, int dd_job_size); |
| 69 | void bsg_request_fn(struct request_queue *q); | 71 | void bsg_request_fn(struct request_queue *q); |
| 72 | void bsg_job_put(struct bsg_job *job); | ||
| 73 | int __must_check bsg_job_get(struct bsg_job *job); | ||
| 70 | 74 | ||
| 71 | #endif | 75 | #endif |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index ebbacd14d450..d67ab83823ad 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
| @@ -168,7 +168,12 @@ int inode_has_buffers(struct inode *); | |||
| 168 | void invalidate_inode_buffers(struct inode *); | 168 | void invalidate_inode_buffers(struct inode *); |
| 169 | int remove_inode_buffers(struct inode *inode); | 169 | int remove_inode_buffers(struct inode *inode); |
| 170 | int sync_mapping_buffers(struct address_space *mapping); | 170 | int sync_mapping_buffers(struct address_space *mapping); |
| 171 | void unmap_underlying_metadata(struct block_device *bdev, sector_t block); | 171 | void clean_bdev_aliases(struct block_device *bdev, sector_t block, |
| 172 | sector_t len); | ||
| 173 | static inline void clean_bdev_bh_alias(struct buffer_head *bh) | ||
| 174 | { | ||
| 175 | clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1); | ||
| 176 | } | ||
| 172 | 177 | ||
| 173 | void mark_buffer_async_write(struct buffer_head *bh); | 178 | void mark_buffer_async_write(struct buffer_head *bh); |
| 174 | void __wait_on_buffer(struct buffer_head *); | 179 | void __wait_on_buffer(struct buffer_head *); |
diff --git a/include/linux/bug.h b/include/linux/bug.h index 292d6a10b0c2..baff2e8fc8a8 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h | |||
| @@ -121,4 +121,21 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr, | |||
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | #endif /* CONFIG_GENERIC_BUG */ | 123 | #endif /* CONFIG_GENERIC_BUG */ |
| 124 | |||
| 125 | /* | ||
| 126 | * Since detected data corruption should stop operation on the affected | ||
| 127 | * structures, this returns false if the corruption condition is found. | ||
| 128 | */ | ||
| 129 | #define CHECK_DATA_CORRUPTION(condition, fmt, ...) \ | ||
| 130 | do { \ | ||
| 131 | if (unlikely(condition)) { \ | ||
| 132 | if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ | ||
| 133 | pr_err(fmt, ##__VA_ARGS__); \ | ||
| 134 | BUG(); \ | ||
| 135 | } else \ | ||
| 136 | WARN(1, fmt, ##__VA_ARGS__); \ | ||
| 137 | return false; \ | ||
| 138 | } \ | ||
| 139 | } while (0) | ||
| 140 | |||
| 124 | #endif /* _LINUX_BUG_H */ | 141 | #endif /* _LINUX_BUG_H */ |
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 2189935075b4..6a524bf6a06d 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h | |||
| @@ -18,6 +18,7 @@ enum cache_type { | |||
| 18 | 18 | ||
| 19 | /** | 19 | /** |
| 20 | * struct cacheinfo - represent a cache leaf node | 20 | * struct cacheinfo - represent a cache leaf node |
| 21 | * @id: This cache's id. It is unique among caches with the same (type, level). | ||
| 21 | * @type: type of the cache - data, inst or unified | 22 | * @type: type of the cache - data, inst or unified |
| 22 | * @level: represents the hierarchy in the multi-level cache | 23 | * @level: represents the hierarchy in the multi-level cache |
| 23 | * @coherency_line_size: size of each cache line usually representing | 24 | * @coherency_line_size: size of each cache line usually representing |
| @@ -44,6 +45,7 @@ enum cache_type { | |||
| 44 | * keeping, the remaining members form the core properties of the cache | 45 | * keeping, the remaining members form the core properties of the cache |
| 45 | */ | 46 | */ |
| 46 | struct cacheinfo { | 47 | struct cacheinfo { |
| 48 | unsigned int id; | ||
| 47 | enum cache_type type; | 49 | enum cache_type type; |
| 48 | unsigned int level; | 50 | unsigned int level; |
| 49 | unsigned int coherency_line_size; | 51 | unsigned int coherency_line_size; |
| @@ -61,6 +63,7 @@ struct cacheinfo { | |||
| 61 | #define CACHE_WRITE_ALLOCATE BIT(3) | 63 | #define CACHE_WRITE_ALLOCATE BIT(3) |
| 62 | #define CACHE_ALLOCATE_POLICY_MASK \ | 64 | #define CACHE_ALLOCATE_POLICY_MASK \ |
| 63 | (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE) | 65 | (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE) |
| 66 | #define CACHE_ID BIT(4) | ||
| 64 | 67 | ||
| 65 | struct device_node *of_node; | 68 | struct device_node *of_node; |
| 66 | bool disable_sysfs; | 69 | bool disable_sysfs; |
| @@ -71,6 +74,7 @@ struct cpu_cacheinfo { | |||
| 71 | struct cacheinfo *info_list; | 74 | struct cacheinfo *info_list; |
| 72 | unsigned int num_levels; | 75 | unsigned int num_levels; |
| 73 | unsigned int num_leaves; | 76 | unsigned int num_leaves; |
| 77 | bool cpu_map_populated; | ||
| 74 | }; | 78 | }; |
| 75 | 79 | ||
| 76 | /* | 80 | /* |
diff --git a/include/linux/capability.h b/include/linux/capability.h index dbc21c719ce6..6ffb67e10c06 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h | |||
| @@ -240,8 +240,10 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap) | |||
| 240 | return true; | 240 | return true; |
| 241 | } | 241 | } |
| 242 | #endif /* CONFIG_MULTIUSER */ | 242 | #endif /* CONFIG_MULTIUSER */ |
| 243 | extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode); | ||
| 243 | extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); | 244 | extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); |
| 244 | extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); | 245 | extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); |
| 246 | extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns); | ||
| 245 | 247 | ||
| 246 | /* audit system wants to get cap info from files as well */ | 248 | /* audit system wants to get cap info from files as well */ |
| 247 | extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); | 249 | extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); |
diff --git a/include/linux/ccp.h b/include/linux/ccp.h index a7653339fedb..c71dd8fa5764 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h | |||
| @@ -11,8 +11,8 @@ | |||
| 11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #ifndef __CPP_H__ | 14 | #ifndef __CCP_H__ |
| 15 | #define __CPP_H__ | 15 | #define __CCP_H__ |
| 16 | 16 | ||
| 17 | #include <linux/scatterlist.h> | 17 | #include <linux/scatterlist.h> |
| 18 | #include <linux/workqueue.h> | 18 | #include <linux/workqueue.h> |
| @@ -553,7 +553,7 @@ enum ccp_engine { | |||
| 553 | #define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002 | 553 | #define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002 |
| 554 | 554 | ||
| 555 | /** | 555 | /** |
| 556 | * struct ccp_cmd - CPP operation request | 556 | * struct ccp_cmd - CCP operation request |
| 557 | * @entry: list element (ccp driver use only) | 557 | * @entry: list element (ccp driver use only) |
| 558 | * @work: work element used for callbacks (ccp driver use only) | 558 | * @work: work element used for callbacks (ccp driver use only) |
| 559 | * @ccp: CCP device to be run on (ccp driver use only) | 559 | * @ccp: CCP device to be run on (ccp driver use only) |
diff --git a/include/linux/cec-funcs.h b/include/linux/cec-funcs.h deleted file mode 100644 index 138bbf721e70..000000000000 --- a/include/linux/cec-funcs.h +++ /dev/null | |||
| @@ -1,1971 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * cec - HDMI Consumer Electronics Control message functions | ||
| 3 | * | ||
| 4 | * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. | ||
| 5 | * | ||
| 6 | * This program is free software; you may redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; version 2 of the License. | ||
| 9 | * | ||
| 10 | * Alternatively you can redistribute this file under the terms of the | ||
| 11 | * BSD license as stated below: | ||
| 12 | * | ||
| 13 | * Redistribution and use in source and binary forms, with or without | ||
| 14 | * modification, are permitted provided that the following conditions | ||
| 15 | * are met: | ||
| 16 | * 1. Redistributions of source code must retain the above copyright | ||
| 17 | * notice, this list of conditions and the following disclaimer. | ||
| 18 | * 2. Redistributions in binary form must reproduce the above copyright | ||
| 19 | * notice, this list of conditions and the following disclaimer in | ||
| 20 | * the documentation and/or other materials provided with the | ||
| 21 | * distribution. | ||
| 22 | * 3. The names of its contributors may not be used to endorse or promote | ||
| 23 | * products derived from this software without specific prior written | ||
| 24 | * permission. | ||
| 25 | * | ||
| 26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 33 | * SOFTWARE. | ||
| 34 | */ | ||
| 35 | |||
| 36 | /* | ||
| 37 | * Note: this framework is still in staging and it is likely the API | ||
| 38 | * will change before it goes out of staging. | ||
| 39 | * | ||
| 40 | * Once it is moved out of staging this header will move to uapi. | ||
| 41 | */ | ||
| 42 | #ifndef _CEC_UAPI_FUNCS_H | ||
| 43 | #define _CEC_UAPI_FUNCS_H | ||
| 44 | |||
| 45 | #include <linux/cec.h> | ||
| 46 | |||
| 47 | /* One Touch Play Feature */ | ||
| 48 | static inline void cec_msg_active_source(struct cec_msg *msg, __u16 phys_addr) | ||
| 49 | { | ||
| 50 | msg->len = 4; | ||
| 51 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 52 | msg->msg[1] = CEC_MSG_ACTIVE_SOURCE; | ||
| 53 | msg->msg[2] = phys_addr >> 8; | ||
| 54 | msg->msg[3] = phys_addr & 0xff; | ||
| 55 | } | ||
| 56 | |||
| 57 | static inline void cec_ops_active_source(const struct cec_msg *msg, | ||
| 58 | __u16 *phys_addr) | ||
| 59 | { | ||
| 60 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 61 | } | ||
| 62 | |||
| 63 | static inline void cec_msg_image_view_on(struct cec_msg *msg) | ||
| 64 | { | ||
| 65 | msg->len = 2; | ||
| 66 | msg->msg[1] = CEC_MSG_IMAGE_VIEW_ON; | ||
| 67 | } | ||
| 68 | |||
| 69 | static inline void cec_msg_text_view_on(struct cec_msg *msg) | ||
| 70 | { | ||
| 71 | msg->len = 2; | ||
| 72 | msg->msg[1] = CEC_MSG_TEXT_VIEW_ON; | ||
| 73 | } | ||
| 74 | |||
| 75 | |||
| 76 | /* Routing Control Feature */ | ||
| 77 | static inline void cec_msg_inactive_source(struct cec_msg *msg, | ||
| 78 | __u16 phys_addr) | ||
| 79 | { | ||
| 80 | msg->len = 4; | ||
| 81 | msg->msg[1] = CEC_MSG_INACTIVE_SOURCE; | ||
| 82 | msg->msg[2] = phys_addr >> 8; | ||
| 83 | msg->msg[3] = phys_addr & 0xff; | ||
| 84 | } | ||
| 85 | |||
| 86 | static inline void cec_ops_inactive_source(const struct cec_msg *msg, | ||
| 87 | __u16 *phys_addr) | ||
| 88 | { | ||
| 89 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 90 | } | ||
| 91 | |||
| 92 | static inline void cec_msg_request_active_source(struct cec_msg *msg, | ||
| 93 | bool reply) | ||
| 94 | { | ||
| 95 | msg->len = 2; | ||
| 96 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 97 | msg->msg[1] = CEC_MSG_REQUEST_ACTIVE_SOURCE; | ||
| 98 | msg->reply = reply ? CEC_MSG_ACTIVE_SOURCE : 0; | ||
| 99 | } | ||
| 100 | |||
| 101 | static inline void cec_msg_routing_information(struct cec_msg *msg, | ||
| 102 | __u16 phys_addr) | ||
| 103 | { | ||
| 104 | msg->len = 4; | ||
| 105 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 106 | msg->msg[1] = CEC_MSG_ROUTING_INFORMATION; | ||
| 107 | msg->msg[2] = phys_addr >> 8; | ||
| 108 | msg->msg[3] = phys_addr & 0xff; | ||
| 109 | } | ||
| 110 | |||
| 111 | static inline void cec_ops_routing_information(const struct cec_msg *msg, | ||
| 112 | __u16 *phys_addr) | ||
| 113 | { | ||
| 114 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 115 | } | ||
| 116 | |||
| 117 | static inline void cec_msg_routing_change(struct cec_msg *msg, | ||
| 118 | bool reply, | ||
| 119 | __u16 orig_phys_addr, | ||
| 120 | __u16 new_phys_addr) | ||
| 121 | { | ||
| 122 | msg->len = 6; | ||
| 123 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 124 | msg->msg[1] = CEC_MSG_ROUTING_CHANGE; | ||
| 125 | msg->msg[2] = orig_phys_addr >> 8; | ||
| 126 | msg->msg[3] = orig_phys_addr & 0xff; | ||
| 127 | msg->msg[4] = new_phys_addr >> 8; | ||
| 128 | msg->msg[5] = new_phys_addr & 0xff; | ||
| 129 | msg->reply = reply ? CEC_MSG_ROUTING_INFORMATION : 0; | ||
| 130 | } | ||
| 131 | |||
| 132 | static inline void cec_ops_routing_change(const struct cec_msg *msg, | ||
| 133 | __u16 *orig_phys_addr, | ||
| 134 | __u16 *new_phys_addr) | ||
| 135 | { | ||
| 136 | *orig_phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 137 | *new_phys_addr = (msg->msg[4] << 8) | msg->msg[5]; | ||
| 138 | } | ||
| 139 | |||
| 140 | static inline void cec_msg_set_stream_path(struct cec_msg *msg, __u16 phys_addr) | ||
| 141 | { | ||
| 142 | msg->len = 4; | ||
| 143 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 144 | msg->msg[1] = CEC_MSG_SET_STREAM_PATH; | ||
| 145 | msg->msg[2] = phys_addr >> 8; | ||
| 146 | msg->msg[3] = phys_addr & 0xff; | ||
| 147 | } | ||
| 148 | |||
| 149 | static inline void cec_ops_set_stream_path(const struct cec_msg *msg, | ||
| 150 | __u16 *phys_addr) | ||
| 151 | { | ||
| 152 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 153 | } | ||
| 154 | |||
| 155 | |||
| 156 | /* Standby Feature */ | ||
| 157 | static inline void cec_msg_standby(struct cec_msg *msg) | ||
| 158 | { | ||
| 159 | msg->len = 2; | ||
| 160 | msg->msg[1] = CEC_MSG_STANDBY; | ||
| 161 | } | ||
| 162 | |||
| 163 | |||
| 164 | /* One Touch Record Feature */ | ||
| 165 | static inline void cec_msg_record_off(struct cec_msg *msg, bool reply) | ||
| 166 | { | ||
| 167 | msg->len = 2; | ||
| 168 | msg->msg[1] = CEC_MSG_RECORD_OFF; | ||
| 169 | msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0; | ||
| 170 | } | ||
| 171 | |||
| 172 | struct cec_op_arib_data { | ||
| 173 | __u16 transport_id; | ||
| 174 | __u16 service_id; | ||
| 175 | __u16 orig_network_id; | ||
| 176 | }; | ||
| 177 | |||
| 178 | struct cec_op_atsc_data { | ||
| 179 | __u16 transport_id; | ||
| 180 | __u16 program_number; | ||
| 181 | }; | ||
| 182 | |||
| 183 | struct cec_op_dvb_data { | ||
| 184 | __u16 transport_id; | ||
| 185 | __u16 service_id; | ||
| 186 | __u16 orig_network_id; | ||
| 187 | }; | ||
| 188 | |||
| 189 | struct cec_op_channel_data { | ||
| 190 | __u8 channel_number_fmt; | ||
| 191 | __u16 major; | ||
| 192 | __u16 minor; | ||
| 193 | }; | ||
| 194 | |||
| 195 | struct cec_op_digital_service_id { | ||
| 196 | __u8 service_id_method; | ||
| 197 | __u8 dig_bcast_system; | ||
| 198 | union { | ||
| 199 | struct cec_op_arib_data arib; | ||
| 200 | struct cec_op_atsc_data atsc; | ||
| 201 | struct cec_op_dvb_data dvb; | ||
| 202 | struct cec_op_channel_data channel; | ||
| 203 | }; | ||
| 204 | }; | ||
| 205 | |||
| 206 | struct cec_op_record_src { | ||
| 207 | __u8 type; | ||
| 208 | union { | ||
| 209 | struct cec_op_digital_service_id digital; | ||
| 210 | struct { | ||
| 211 | __u8 ana_bcast_type; | ||
| 212 | __u16 ana_freq; | ||
| 213 | __u8 bcast_system; | ||
| 214 | } analog; | ||
| 215 | struct { | ||
| 216 | __u8 plug; | ||
| 217 | } ext_plug; | ||
| 218 | struct { | ||
| 219 | __u16 phys_addr; | ||
| 220 | } ext_phys_addr; | ||
| 221 | }; | ||
| 222 | }; | ||
| 223 | |||
| 224 | static inline void cec_set_digital_service_id(__u8 *msg, | ||
| 225 | const struct cec_op_digital_service_id *digital) | ||
| 226 | { | ||
| 227 | *msg++ = (digital->service_id_method << 7) | digital->dig_bcast_system; | ||
| 228 | if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) { | ||
| 229 | *msg++ = (digital->channel.channel_number_fmt << 2) | | ||
| 230 | (digital->channel.major >> 8); | ||
| 231 | *msg++ = digital->channel.major & 0xff; | ||
| 232 | *msg++ = digital->channel.minor >> 8; | ||
| 233 | *msg++ = digital->channel.minor & 0xff; | ||
| 234 | *msg++ = 0; | ||
| 235 | *msg++ = 0; | ||
| 236 | return; | ||
| 237 | } | ||
| 238 | switch (digital->dig_bcast_system) { | ||
| 239 | case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN: | ||
| 240 | case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE: | ||
| 241 | case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT: | ||
| 242 | case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T: | ||
| 243 | *msg++ = digital->atsc.transport_id >> 8; | ||
| 244 | *msg++ = digital->atsc.transport_id & 0xff; | ||
| 245 | *msg++ = digital->atsc.program_number >> 8; | ||
| 246 | *msg++ = digital->atsc.program_number & 0xff; | ||
| 247 | *msg++ = 0; | ||
| 248 | *msg++ = 0; | ||
| 249 | break; | ||
| 250 | default: | ||
| 251 | *msg++ = digital->dvb.transport_id >> 8; | ||
| 252 | *msg++ = digital->dvb.transport_id & 0xff; | ||
| 253 | *msg++ = digital->dvb.service_id >> 8; | ||
| 254 | *msg++ = digital->dvb.service_id & 0xff; | ||
| 255 | *msg++ = digital->dvb.orig_network_id >> 8; | ||
| 256 | *msg++ = digital->dvb.orig_network_id & 0xff; | ||
| 257 | break; | ||
| 258 | } | ||
| 259 | } | ||
| 260 | |||
| 261 | static inline void cec_get_digital_service_id(const __u8 *msg, | ||
| 262 | struct cec_op_digital_service_id *digital) | ||
| 263 | { | ||
| 264 | digital->service_id_method = msg[0] >> 7; | ||
| 265 | digital->dig_bcast_system = msg[0] & 0x7f; | ||
| 266 | if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) { | ||
| 267 | digital->channel.channel_number_fmt = msg[1] >> 2; | ||
| 268 | digital->channel.major = ((msg[1] & 3) << 6) | msg[2]; | ||
| 269 | digital->channel.minor = (msg[3] << 8) | msg[4]; | ||
| 270 | return; | ||
| 271 | } | ||
| 272 | digital->dvb.transport_id = (msg[1] << 8) | msg[2]; | ||
| 273 | digital->dvb.service_id = (msg[3] << 8) | msg[4]; | ||
| 274 | digital->dvb.orig_network_id = (msg[5] << 8) | msg[6]; | ||
| 275 | } | ||
| 276 | |||
| 277 | static inline void cec_msg_record_on_own(struct cec_msg *msg) | ||
| 278 | { | ||
| 279 | msg->len = 3; | ||
| 280 | msg->msg[1] = CEC_MSG_RECORD_ON; | ||
| 281 | msg->msg[2] = CEC_OP_RECORD_SRC_OWN; | ||
| 282 | } | ||
| 283 | |||
| 284 | static inline void cec_msg_record_on_digital(struct cec_msg *msg, | ||
| 285 | const struct cec_op_digital_service_id *digital) | ||
| 286 | { | ||
| 287 | msg->len = 10; | ||
| 288 | msg->msg[1] = CEC_MSG_RECORD_ON; | ||
| 289 | msg->msg[2] = CEC_OP_RECORD_SRC_DIGITAL; | ||
| 290 | cec_set_digital_service_id(msg->msg + 3, digital); | ||
| 291 | } | ||
| 292 | |||
| 293 | static inline void cec_msg_record_on_analog(struct cec_msg *msg, | ||
| 294 | __u8 ana_bcast_type, | ||
| 295 | __u16 ana_freq, | ||
| 296 | __u8 bcast_system) | ||
| 297 | { | ||
| 298 | msg->len = 7; | ||
| 299 | msg->msg[1] = CEC_MSG_RECORD_ON; | ||
| 300 | msg->msg[2] = CEC_OP_RECORD_SRC_ANALOG; | ||
| 301 | msg->msg[3] = ana_bcast_type; | ||
| 302 | msg->msg[4] = ana_freq >> 8; | ||
| 303 | msg->msg[5] = ana_freq & 0xff; | ||
| 304 | msg->msg[6] = bcast_system; | ||
| 305 | } | ||
| 306 | |||
| 307 | static inline void cec_msg_record_on_plug(struct cec_msg *msg, | ||
| 308 | __u8 plug) | ||
| 309 | { | ||
| 310 | msg->len = 4; | ||
| 311 | msg->msg[1] = CEC_MSG_RECORD_ON; | ||
| 312 | msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PLUG; | ||
| 313 | msg->msg[3] = plug; | ||
| 314 | } | ||
| 315 | |||
| 316 | static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg, | ||
| 317 | __u16 phys_addr) | ||
| 318 | { | ||
| 319 | msg->len = 5; | ||
| 320 | msg->msg[1] = CEC_MSG_RECORD_ON; | ||
| 321 | msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PHYS_ADDR; | ||
| 322 | msg->msg[3] = phys_addr >> 8; | ||
| 323 | msg->msg[4] = phys_addr & 0xff; | ||
| 324 | } | ||
| 325 | |||
| 326 | static inline void cec_msg_record_on(struct cec_msg *msg, | ||
| 327 | bool reply, | ||
| 328 | const struct cec_op_record_src *rec_src) | ||
| 329 | { | ||
| 330 | switch (rec_src->type) { | ||
| 331 | case CEC_OP_RECORD_SRC_OWN: | ||
| 332 | cec_msg_record_on_own(msg); | ||
| 333 | break; | ||
| 334 | case CEC_OP_RECORD_SRC_DIGITAL: | ||
| 335 | cec_msg_record_on_digital(msg, &rec_src->digital); | ||
| 336 | break; | ||
| 337 | case CEC_OP_RECORD_SRC_ANALOG: | ||
| 338 | cec_msg_record_on_analog(msg, | ||
| 339 | rec_src->analog.ana_bcast_type, | ||
| 340 | rec_src->analog.ana_freq, | ||
| 341 | rec_src->analog.bcast_system); | ||
| 342 | break; | ||
| 343 | case CEC_OP_RECORD_SRC_EXT_PLUG: | ||
| 344 | cec_msg_record_on_plug(msg, rec_src->ext_plug.plug); | ||
| 345 | break; | ||
| 346 | case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR: | ||
| 347 | cec_msg_record_on_phys_addr(msg, | ||
| 348 | rec_src->ext_phys_addr.phys_addr); | ||
| 349 | break; | ||
| 350 | } | ||
| 351 | msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0; | ||
| 352 | } | ||
| 353 | |||
| 354 | static inline void cec_ops_record_on(const struct cec_msg *msg, | ||
| 355 | struct cec_op_record_src *rec_src) | ||
| 356 | { | ||
| 357 | rec_src->type = msg->msg[2]; | ||
| 358 | switch (rec_src->type) { | ||
| 359 | case CEC_OP_RECORD_SRC_OWN: | ||
| 360 | break; | ||
| 361 | case CEC_OP_RECORD_SRC_DIGITAL: | ||
| 362 | cec_get_digital_service_id(msg->msg + 3, &rec_src->digital); | ||
| 363 | break; | ||
| 364 | case CEC_OP_RECORD_SRC_ANALOG: | ||
| 365 | rec_src->analog.ana_bcast_type = msg->msg[3]; | ||
| 366 | rec_src->analog.ana_freq = | ||
| 367 | (msg->msg[4] << 8) | msg->msg[5]; | ||
| 368 | rec_src->analog.bcast_system = msg->msg[6]; | ||
| 369 | break; | ||
| 370 | case CEC_OP_RECORD_SRC_EXT_PLUG: | ||
| 371 | rec_src->ext_plug.plug = msg->msg[3]; | ||
| 372 | break; | ||
| 373 | case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR: | ||
| 374 | rec_src->ext_phys_addr.phys_addr = | ||
| 375 | (msg->msg[3] << 8) | msg->msg[4]; | ||
| 376 | break; | ||
| 377 | } | ||
| 378 | } | ||
| 379 | |||
| 380 | static inline void cec_msg_record_status(struct cec_msg *msg, __u8 rec_status) | ||
| 381 | { | ||
| 382 | msg->len = 3; | ||
| 383 | msg->msg[1] = CEC_MSG_RECORD_STATUS; | ||
| 384 | msg->msg[2] = rec_status; | ||
| 385 | } | ||
| 386 | |||
| 387 | static inline void cec_ops_record_status(const struct cec_msg *msg, | ||
| 388 | __u8 *rec_status) | ||
| 389 | { | ||
| 390 | *rec_status = msg->msg[2]; | ||
| 391 | } | ||
| 392 | |||
| 393 | static inline void cec_msg_record_tv_screen(struct cec_msg *msg, | ||
| 394 | bool reply) | ||
| 395 | { | ||
| 396 | msg->len = 2; | ||
| 397 | msg->msg[1] = CEC_MSG_RECORD_TV_SCREEN; | ||
| 398 | msg->reply = reply ? CEC_MSG_RECORD_ON : 0; | ||
| 399 | } | ||
| 400 | |||
| 401 | |||
| 402 | /* Timer Programming Feature */ | ||
| 403 | static inline void cec_msg_timer_status(struct cec_msg *msg, | ||
| 404 | __u8 timer_overlap_warning, | ||
| 405 | __u8 media_info, | ||
| 406 | __u8 prog_info, | ||
| 407 | __u8 prog_error, | ||
| 408 | __u8 duration_hr, | ||
| 409 | __u8 duration_min) | ||
| 410 | { | ||
| 411 | msg->len = 3; | ||
| 412 | msg->msg[1] = CEC_MSG_TIMER_STATUS; | ||
| 413 | msg->msg[2] = (timer_overlap_warning << 7) | | ||
| 414 | (media_info << 5) | | ||
| 415 | (prog_info ? 0x10 : 0) | | ||
| 416 | (prog_info ? prog_info : prog_error); | ||
| 417 | if (prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE || | ||
| 418 | prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE || | ||
| 419 | prog_error == CEC_OP_PROG_ERROR_DUPLICATE) { | ||
| 420 | msg->len += 2; | ||
| 421 | msg->msg[3] = ((duration_hr / 10) << 4) | (duration_hr % 10); | ||
| 422 | msg->msg[4] = ((duration_min / 10) << 4) | (duration_min % 10); | ||
| 423 | } | ||
| 424 | } | ||
| 425 | |||
| 426 | static inline void cec_ops_timer_status(const struct cec_msg *msg, | ||
| 427 | __u8 *timer_overlap_warning, | ||
| 428 | __u8 *media_info, | ||
| 429 | __u8 *prog_info, | ||
| 430 | __u8 *prog_error, | ||
| 431 | __u8 *duration_hr, | ||
| 432 | __u8 *duration_min) | ||
| 433 | { | ||
| 434 | *timer_overlap_warning = msg->msg[2] >> 7; | ||
| 435 | *media_info = (msg->msg[2] >> 5) & 3; | ||
| 436 | if (msg->msg[2] & 0x10) { | ||
| 437 | *prog_info = msg->msg[2] & 0xf; | ||
| 438 | *prog_error = 0; | ||
| 439 | } else { | ||
| 440 | *prog_info = 0; | ||
| 441 | *prog_error = msg->msg[2] & 0xf; | ||
| 442 | } | ||
| 443 | if (*prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE || | ||
| 444 | *prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE || | ||
| 445 | *prog_error == CEC_OP_PROG_ERROR_DUPLICATE) { | ||
| 446 | *duration_hr = (msg->msg[3] >> 4) * 10 + (msg->msg[3] & 0xf); | ||
| 447 | *duration_min = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); | ||
| 448 | } else { | ||
| 449 | *duration_hr = *duration_min = 0; | ||
| 450 | } | ||
| 451 | } | ||
| 452 | |||
| 453 | static inline void cec_msg_timer_cleared_status(struct cec_msg *msg, | ||
| 454 | __u8 timer_cleared_status) | ||
| 455 | { | ||
| 456 | msg->len = 3; | ||
| 457 | msg->msg[1] = CEC_MSG_TIMER_CLEARED_STATUS; | ||
| 458 | msg->msg[2] = timer_cleared_status; | ||
| 459 | } | ||
| 460 | |||
| 461 | static inline void cec_ops_timer_cleared_status(const struct cec_msg *msg, | ||
| 462 | __u8 *timer_cleared_status) | ||
| 463 | { | ||
| 464 | *timer_cleared_status = msg->msg[2]; | ||
| 465 | } | ||
| 466 | |||
| 467 | static inline void cec_msg_clear_analogue_timer(struct cec_msg *msg, | ||
| 468 | bool reply, | ||
| 469 | __u8 day, | ||
| 470 | __u8 month, | ||
| 471 | __u8 start_hr, | ||
| 472 | __u8 start_min, | ||
| 473 | __u8 duration_hr, | ||
| 474 | __u8 duration_min, | ||
| 475 | __u8 recording_seq, | ||
| 476 | __u8 ana_bcast_type, | ||
| 477 | __u16 ana_freq, | ||
| 478 | __u8 bcast_system) | ||
| 479 | { | ||
| 480 | msg->len = 13; | ||
| 481 | msg->msg[1] = CEC_MSG_CLEAR_ANALOGUE_TIMER; | ||
| 482 | msg->msg[2] = day; | ||
| 483 | msg->msg[3] = month; | ||
| 484 | /* Hours and minutes are in BCD format */ | ||
| 485 | msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); | ||
| 486 | msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); | ||
| 487 | msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); | ||
| 488 | msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); | ||
| 489 | msg->msg[8] = recording_seq; | ||
| 490 | msg->msg[9] = ana_bcast_type; | ||
| 491 | msg->msg[10] = ana_freq >> 8; | ||
| 492 | msg->msg[11] = ana_freq & 0xff; | ||
| 493 | msg->msg[12] = bcast_system; | ||
| 494 | msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0; | ||
| 495 | } | ||
| 496 | |||
| 497 | static inline void cec_ops_clear_analogue_timer(const struct cec_msg *msg, | ||
| 498 | __u8 *day, | ||
| 499 | __u8 *month, | ||
| 500 | __u8 *start_hr, | ||
| 501 | __u8 *start_min, | ||
| 502 | __u8 *duration_hr, | ||
| 503 | __u8 *duration_min, | ||
| 504 | __u8 *recording_seq, | ||
| 505 | __u8 *ana_bcast_type, | ||
| 506 | __u16 *ana_freq, | ||
| 507 | __u8 *bcast_system) | ||
| 508 | { | ||
| 509 | *day = msg->msg[2]; | ||
| 510 | *month = msg->msg[3]; | ||
| 511 | /* Hours and minutes are in BCD format */ | ||
| 512 | *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); | ||
| 513 | *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); | ||
| 514 | *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); | ||
| 515 | *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); | ||
| 516 | *recording_seq = msg->msg[8]; | ||
| 517 | *ana_bcast_type = msg->msg[9]; | ||
| 518 | *ana_freq = (msg->msg[10] << 8) | msg->msg[11]; | ||
| 519 | *bcast_system = msg->msg[12]; | ||
| 520 | } | ||
| 521 | |||
| 522 | static inline void cec_msg_clear_digital_timer(struct cec_msg *msg, | ||
| 523 | bool reply, | ||
| 524 | __u8 day, | ||
| 525 | __u8 month, | ||
| 526 | __u8 start_hr, | ||
| 527 | __u8 start_min, | ||
| 528 | __u8 duration_hr, | ||
| 529 | __u8 duration_min, | ||
| 530 | __u8 recording_seq, | ||
| 531 | const struct cec_op_digital_service_id *digital) | ||
| 532 | { | ||
| 533 | msg->len = 16; | ||
| 534 | msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0; | ||
| 535 | msg->msg[1] = CEC_MSG_CLEAR_DIGITAL_TIMER; | ||
| 536 | msg->msg[2] = day; | ||
| 537 | msg->msg[3] = month; | ||
| 538 | /* Hours and minutes are in BCD format */ | ||
| 539 | msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); | ||
| 540 | msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); | ||
| 541 | msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); | ||
| 542 | msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); | ||
| 543 | msg->msg[8] = recording_seq; | ||
| 544 | cec_set_digital_service_id(msg->msg + 9, digital); | ||
| 545 | } | ||
| 546 | |||
| 547 | static inline void cec_ops_clear_digital_timer(const struct cec_msg *msg, | ||
| 548 | __u8 *day, | ||
| 549 | __u8 *month, | ||
| 550 | __u8 *start_hr, | ||
| 551 | __u8 *start_min, | ||
| 552 | __u8 *duration_hr, | ||
| 553 | __u8 *duration_min, | ||
| 554 | __u8 *recording_seq, | ||
| 555 | struct cec_op_digital_service_id *digital) | ||
| 556 | { | ||
| 557 | *day = msg->msg[2]; | ||
| 558 | *month = msg->msg[3]; | ||
| 559 | /* Hours and minutes are in BCD format */ | ||
| 560 | *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); | ||
| 561 | *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); | ||
| 562 | *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); | ||
| 563 | *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); | ||
| 564 | *recording_seq = msg->msg[8]; | ||
| 565 | cec_get_digital_service_id(msg->msg + 9, digital); | ||
| 566 | } | ||
| 567 | |||
| 568 | static inline void cec_msg_clear_ext_timer(struct cec_msg *msg, | ||
| 569 | bool reply, | ||
| 570 | __u8 day, | ||
| 571 | __u8 month, | ||
| 572 | __u8 start_hr, | ||
| 573 | __u8 start_min, | ||
| 574 | __u8 duration_hr, | ||
| 575 | __u8 duration_min, | ||
| 576 | __u8 recording_seq, | ||
| 577 | __u8 ext_src_spec, | ||
| 578 | __u8 plug, | ||
| 579 | __u16 phys_addr) | ||
| 580 | { | ||
| 581 | msg->len = 13; | ||
| 582 | msg->msg[1] = CEC_MSG_CLEAR_EXT_TIMER; | ||
| 583 | msg->msg[2] = day; | ||
| 584 | msg->msg[3] = month; | ||
| 585 | /* Hours and minutes are in BCD format */ | ||
| 586 | msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); | ||
| 587 | msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); | ||
| 588 | msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); | ||
| 589 | msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); | ||
| 590 | msg->msg[8] = recording_seq; | ||
| 591 | msg->msg[9] = ext_src_spec; | ||
| 592 | msg->msg[10] = plug; | ||
| 593 | msg->msg[11] = phys_addr >> 8; | ||
| 594 | msg->msg[12] = phys_addr & 0xff; | ||
| 595 | msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0; | ||
| 596 | } | ||
| 597 | |||
| 598 | static inline void cec_ops_clear_ext_timer(const struct cec_msg *msg, | ||
| 599 | __u8 *day, | ||
| 600 | __u8 *month, | ||
| 601 | __u8 *start_hr, | ||
| 602 | __u8 *start_min, | ||
| 603 | __u8 *duration_hr, | ||
| 604 | __u8 *duration_min, | ||
| 605 | __u8 *recording_seq, | ||
| 606 | __u8 *ext_src_spec, | ||
| 607 | __u8 *plug, | ||
| 608 | __u16 *phys_addr) | ||
| 609 | { | ||
| 610 | *day = msg->msg[2]; | ||
| 611 | *month = msg->msg[3]; | ||
| 612 | /* Hours and minutes are in BCD format */ | ||
| 613 | *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); | ||
| 614 | *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); | ||
| 615 | *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); | ||
| 616 | *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); | ||
| 617 | *recording_seq = msg->msg[8]; | ||
| 618 | *ext_src_spec = msg->msg[9]; | ||
| 619 | *plug = msg->msg[10]; | ||
| 620 | *phys_addr = (msg->msg[11] << 8) | msg->msg[12]; | ||
| 621 | } | ||
| 622 | |||
| 623 | static inline void cec_msg_set_analogue_timer(struct cec_msg *msg, | ||
| 624 | bool reply, | ||
| 625 | __u8 day, | ||
| 626 | __u8 month, | ||
| 627 | __u8 start_hr, | ||
| 628 | __u8 start_min, | ||
| 629 | __u8 duration_hr, | ||
| 630 | __u8 duration_min, | ||
| 631 | __u8 recording_seq, | ||
| 632 | __u8 ana_bcast_type, | ||
| 633 | __u16 ana_freq, | ||
| 634 | __u8 bcast_system) | ||
| 635 | { | ||
| 636 | msg->len = 13; | ||
| 637 | msg->msg[1] = CEC_MSG_SET_ANALOGUE_TIMER; | ||
| 638 | msg->msg[2] = day; | ||
| 639 | msg->msg[3] = month; | ||
| 640 | /* Hours and minutes are in BCD format */ | ||
| 641 | msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); | ||
| 642 | msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); | ||
| 643 | msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); | ||
| 644 | msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); | ||
| 645 | msg->msg[8] = recording_seq; | ||
| 646 | msg->msg[9] = ana_bcast_type; | ||
| 647 | msg->msg[10] = ana_freq >> 8; | ||
| 648 | msg->msg[11] = ana_freq & 0xff; | ||
| 649 | msg->msg[12] = bcast_system; | ||
| 650 | msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0; | ||
| 651 | } | ||
| 652 | |||
| 653 | static inline void cec_ops_set_analogue_timer(const struct cec_msg *msg, | ||
| 654 | __u8 *day, | ||
| 655 | __u8 *month, | ||
| 656 | __u8 *start_hr, | ||
| 657 | __u8 *start_min, | ||
| 658 | __u8 *duration_hr, | ||
| 659 | __u8 *duration_min, | ||
| 660 | __u8 *recording_seq, | ||
| 661 | __u8 *ana_bcast_type, | ||
| 662 | __u16 *ana_freq, | ||
| 663 | __u8 *bcast_system) | ||
| 664 | { | ||
| 665 | *day = msg->msg[2]; | ||
| 666 | *month = msg->msg[3]; | ||
| 667 | /* Hours and minutes are in BCD format */ | ||
| 668 | *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); | ||
| 669 | *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); | ||
| 670 | *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); | ||
| 671 | *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); | ||
| 672 | *recording_seq = msg->msg[8]; | ||
| 673 | *ana_bcast_type = msg->msg[9]; | ||
| 674 | *ana_freq = (msg->msg[10] << 8) | msg->msg[11]; | ||
| 675 | *bcast_system = msg->msg[12]; | ||
| 676 | } | ||
| 677 | |||
| 678 | static inline void cec_msg_set_digital_timer(struct cec_msg *msg, | ||
| 679 | bool reply, | ||
| 680 | __u8 day, | ||
| 681 | __u8 month, | ||
| 682 | __u8 start_hr, | ||
| 683 | __u8 start_min, | ||
| 684 | __u8 duration_hr, | ||
| 685 | __u8 duration_min, | ||
| 686 | __u8 recording_seq, | ||
| 687 | const struct cec_op_digital_service_id *digital) | ||
| 688 | { | ||
| 689 | msg->len = 16; | ||
| 690 | msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0; | ||
| 691 | msg->msg[1] = CEC_MSG_SET_DIGITAL_TIMER; | ||
| 692 | msg->msg[2] = day; | ||
| 693 | msg->msg[3] = month; | ||
| 694 | /* Hours and minutes are in BCD format */ | ||
| 695 | msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); | ||
| 696 | msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); | ||
| 697 | msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); | ||
| 698 | msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); | ||
| 699 | msg->msg[8] = recording_seq; | ||
| 700 | cec_set_digital_service_id(msg->msg + 9, digital); | ||
| 701 | } | ||
| 702 | |||
| 703 | static inline void cec_ops_set_digital_timer(const struct cec_msg *msg, | ||
| 704 | __u8 *day, | ||
| 705 | __u8 *month, | ||
| 706 | __u8 *start_hr, | ||
| 707 | __u8 *start_min, | ||
| 708 | __u8 *duration_hr, | ||
| 709 | __u8 *duration_min, | ||
| 710 | __u8 *recording_seq, | ||
| 711 | struct cec_op_digital_service_id *digital) | ||
| 712 | { | ||
| 713 | *day = msg->msg[2]; | ||
| 714 | *month = msg->msg[3]; | ||
| 715 | /* Hours and minutes are in BCD format */ | ||
| 716 | *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); | ||
| 717 | *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); | ||
| 718 | *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); | ||
| 719 | *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); | ||
| 720 | *recording_seq = msg->msg[8]; | ||
| 721 | cec_get_digital_service_id(msg->msg + 9, digital); | ||
| 722 | } | ||
| 723 | |||
| 724 | static inline void cec_msg_set_ext_timer(struct cec_msg *msg, | ||
| 725 | bool reply, | ||
| 726 | __u8 day, | ||
| 727 | __u8 month, | ||
| 728 | __u8 start_hr, | ||
| 729 | __u8 start_min, | ||
| 730 | __u8 duration_hr, | ||
| 731 | __u8 duration_min, | ||
| 732 | __u8 recording_seq, | ||
| 733 | __u8 ext_src_spec, | ||
| 734 | __u8 plug, | ||
| 735 | __u16 phys_addr) | ||
| 736 | { | ||
| 737 | msg->len = 13; | ||
| 738 | msg->msg[1] = CEC_MSG_SET_EXT_TIMER; | ||
| 739 | msg->msg[2] = day; | ||
| 740 | msg->msg[3] = month; | ||
| 741 | /* Hours and minutes are in BCD format */ | ||
| 742 | msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10); | ||
| 743 | msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10); | ||
| 744 | msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10); | ||
| 745 | msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10); | ||
| 746 | msg->msg[8] = recording_seq; | ||
| 747 | msg->msg[9] = ext_src_spec; | ||
| 748 | msg->msg[10] = plug; | ||
| 749 | msg->msg[11] = phys_addr >> 8; | ||
| 750 | msg->msg[12] = phys_addr & 0xff; | ||
| 751 | msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0; | ||
| 752 | } | ||
| 753 | |||
| 754 | static inline void cec_ops_set_ext_timer(const struct cec_msg *msg, | ||
| 755 | __u8 *day, | ||
| 756 | __u8 *month, | ||
| 757 | __u8 *start_hr, | ||
| 758 | __u8 *start_min, | ||
| 759 | __u8 *duration_hr, | ||
| 760 | __u8 *duration_min, | ||
| 761 | __u8 *recording_seq, | ||
| 762 | __u8 *ext_src_spec, | ||
| 763 | __u8 *plug, | ||
| 764 | __u16 *phys_addr) | ||
| 765 | { | ||
| 766 | *day = msg->msg[2]; | ||
| 767 | *month = msg->msg[3]; | ||
| 768 | /* Hours and minutes are in BCD format */ | ||
| 769 | *start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf); | ||
| 770 | *start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf); | ||
| 771 | *duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf); | ||
| 772 | *duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf); | ||
| 773 | *recording_seq = msg->msg[8]; | ||
| 774 | *ext_src_spec = msg->msg[9]; | ||
| 775 | *plug = msg->msg[10]; | ||
| 776 | *phys_addr = (msg->msg[11] << 8) | msg->msg[12]; | ||
| 777 | } | ||
| 778 | |||
| 779 | static inline void cec_msg_set_timer_program_title(struct cec_msg *msg, | ||
| 780 | const char *prog_title) | ||
| 781 | { | ||
| 782 | unsigned int len = strlen(prog_title); | ||
| 783 | |||
| 784 | if (len > 14) | ||
| 785 | len = 14; | ||
| 786 | msg->len = 2 + len; | ||
| 787 | msg->msg[1] = CEC_MSG_SET_TIMER_PROGRAM_TITLE; | ||
| 788 | memcpy(msg->msg + 2, prog_title, len); | ||
| 789 | } | ||
| 790 | |||
| 791 | static inline void cec_ops_set_timer_program_title(const struct cec_msg *msg, | ||
| 792 | char *prog_title) | ||
| 793 | { | ||
| 794 | unsigned int len = msg->len > 2 ? msg->len - 2 : 0; | ||
| 795 | |||
| 796 | if (len > 14) | ||
| 797 | len = 14; | ||
| 798 | memcpy(prog_title, msg->msg + 2, len); | ||
| 799 | prog_title[len] = '\0'; | ||
| 800 | } | ||
| 801 | |||
| 802 | /* System Information Feature */ | ||
| 803 | static inline void cec_msg_cec_version(struct cec_msg *msg, __u8 cec_version) | ||
| 804 | { | ||
| 805 | msg->len = 3; | ||
| 806 | msg->msg[1] = CEC_MSG_CEC_VERSION; | ||
| 807 | msg->msg[2] = cec_version; | ||
| 808 | } | ||
| 809 | |||
| 810 | static inline void cec_ops_cec_version(const struct cec_msg *msg, | ||
| 811 | __u8 *cec_version) | ||
| 812 | { | ||
| 813 | *cec_version = msg->msg[2]; | ||
| 814 | } | ||
| 815 | |||
| 816 | static inline void cec_msg_get_cec_version(struct cec_msg *msg, | ||
| 817 | bool reply) | ||
| 818 | { | ||
| 819 | msg->len = 2; | ||
| 820 | msg->msg[1] = CEC_MSG_GET_CEC_VERSION; | ||
| 821 | msg->reply = reply ? CEC_MSG_CEC_VERSION : 0; | ||
| 822 | } | ||
| 823 | |||
| 824 | static inline void cec_msg_report_physical_addr(struct cec_msg *msg, | ||
| 825 | __u16 phys_addr, __u8 prim_devtype) | ||
| 826 | { | ||
| 827 | msg->len = 5; | ||
| 828 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 829 | msg->msg[1] = CEC_MSG_REPORT_PHYSICAL_ADDR; | ||
| 830 | msg->msg[2] = phys_addr >> 8; | ||
| 831 | msg->msg[3] = phys_addr & 0xff; | ||
| 832 | msg->msg[4] = prim_devtype; | ||
| 833 | } | ||
| 834 | |||
| 835 | static inline void cec_ops_report_physical_addr(const struct cec_msg *msg, | ||
| 836 | __u16 *phys_addr, __u8 *prim_devtype) | ||
| 837 | { | ||
| 838 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 839 | *prim_devtype = msg->msg[4]; | ||
| 840 | } | ||
| 841 | |||
| 842 | static inline void cec_msg_give_physical_addr(struct cec_msg *msg, | ||
| 843 | bool reply) | ||
| 844 | { | ||
| 845 | msg->len = 2; | ||
| 846 | msg->msg[1] = CEC_MSG_GIVE_PHYSICAL_ADDR; | ||
| 847 | msg->reply = reply ? CEC_MSG_REPORT_PHYSICAL_ADDR : 0; | ||
| 848 | } | ||
| 849 | |||
| 850 | static inline void cec_msg_set_menu_language(struct cec_msg *msg, | ||
| 851 | const char *language) | ||
| 852 | { | ||
| 853 | msg->len = 5; | ||
| 854 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 855 | msg->msg[1] = CEC_MSG_SET_MENU_LANGUAGE; | ||
| 856 | memcpy(msg->msg + 2, language, 3); | ||
| 857 | } | ||
| 858 | |||
| 859 | static inline void cec_ops_set_menu_language(const struct cec_msg *msg, | ||
| 860 | char *language) | ||
| 861 | { | ||
| 862 | memcpy(language, msg->msg + 2, 3); | ||
| 863 | language[3] = '\0'; | ||
| 864 | } | ||
| 865 | |||
| 866 | static inline void cec_msg_get_menu_language(struct cec_msg *msg, | ||
| 867 | bool reply) | ||
| 868 | { | ||
| 869 | msg->len = 2; | ||
| 870 | msg->msg[1] = CEC_MSG_GET_MENU_LANGUAGE; | ||
| 871 | msg->reply = reply ? CEC_MSG_SET_MENU_LANGUAGE : 0; | ||
| 872 | } | ||
| 873 | |||
| 874 | /* | ||
| 875 | * Assumes a single RC Profile byte and a single Device Features byte, | ||
| 876 | * i.e. no extended features are supported by this helper function. | ||
| 877 | * | ||
| 878 | * As of CEC 2.0 no extended features are defined, should those be added | ||
| 879 | * in the future, then this function needs to be adapted or a new function | ||
| 880 | * should be added. | ||
| 881 | */ | ||
| 882 | static inline void cec_msg_report_features(struct cec_msg *msg, | ||
| 883 | __u8 cec_version, __u8 all_device_types, | ||
| 884 | __u8 rc_profile, __u8 dev_features) | ||
| 885 | { | ||
| 886 | msg->len = 6; | ||
| 887 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 888 | msg->msg[1] = CEC_MSG_REPORT_FEATURES; | ||
| 889 | msg->msg[2] = cec_version; | ||
| 890 | msg->msg[3] = all_device_types; | ||
| 891 | msg->msg[4] = rc_profile; | ||
| 892 | msg->msg[5] = dev_features; | ||
| 893 | } | ||
| 894 | |||
| 895 | static inline void cec_ops_report_features(const struct cec_msg *msg, | ||
| 896 | __u8 *cec_version, __u8 *all_device_types, | ||
| 897 | const __u8 **rc_profile, const __u8 **dev_features) | ||
| 898 | { | ||
| 899 | const __u8 *p = &msg->msg[4]; | ||
| 900 | |||
| 901 | *cec_version = msg->msg[2]; | ||
| 902 | *all_device_types = msg->msg[3]; | ||
| 903 | *rc_profile = p; | ||
| 904 | while (p < &msg->msg[14] && (*p & CEC_OP_FEAT_EXT)) | ||
| 905 | p++; | ||
| 906 | if (!(*p & CEC_OP_FEAT_EXT)) { | ||
| 907 | *dev_features = p + 1; | ||
| 908 | while (p < &msg->msg[15] && (*p & CEC_OP_FEAT_EXT)) | ||
| 909 | p++; | ||
| 910 | } | ||
| 911 | if (*p & CEC_OP_FEAT_EXT) | ||
| 912 | *rc_profile = *dev_features = NULL; | ||
| 913 | } | ||
| 914 | |||
| 915 | static inline void cec_msg_give_features(struct cec_msg *msg, | ||
| 916 | bool reply) | ||
| 917 | { | ||
| 918 | msg->len = 2; | ||
| 919 | msg->msg[1] = CEC_MSG_GIVE_FEATURES; | ||
| 920 | msg->reply = reply ? CEC_MSG_REPORT_FEATURES : 0; | ||
| 921 | } | ||
| 922 | |||
| 923 | /* Deck Control Feature */ | ||
| 924 | static inline void cec_msg_deck_control(struct cec_msg *msg, | ||
| 925 | __u8 deck_control_mode) | ||
| 926 | { | ||
| 927 | msg->len = 3; | ||
| 928 | msg->msg[1] = CEC_MSG_DECK_CONTROL; | ||
| 929 | msg->msg[2] = deck_control_mode; | ||
| 930 | } | ||
| 931 | |||
| 932 | static inline void cec_ops_deck_control(const struct cec_msg *msg, | ||
| 933 | __u8 *deck_control_mode) | ||
| 934 | { | ||
| 935 | *deck_control_mode = msg->msg[2]; | ||
| 936 | } | ||
| 937 | |||
| 938 | static inline void cec_msg_deck_status(struct cec_msg *msg, | ||
| 939 | __u8 deck_info) | ||
| 940 | { | ||
| 941 | msg->len = 3; | ||
| 942 | msg->msg[1] = CEC_MSG_DECK_STATUS; | ||
| 943 | msg->msg[2] = deck_info; | ||
| 944 | } | ||
| 945 | |||
| 946 | static inline void cec_ops_deck_status(const struct cec_msg *msg, | ||
| 947 | __u8 *deck_info) | ||
| 948 | { | ||
| 949 | *deck_info = msg->msg[2]; | ||
| 950 | } | ||
| 951 | |||
| 952 | static inline void cec_msg_give_deck_status(struct cec_msg *msg, | ||
| 953 | bool reply, | ||
| 954 | __u8 status_req) | ||
| 955 | { | ||
| 956 | msg->len = 3; | ||
| 957 | msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS; | ||
| 958 | msg->msg[2] = status_req; | ||
| 959 | msg->reply = reply ? CEC_MSG_DECK_STATUS : 0; | ||
| 960 | } | ||
| 961 | |||
| 962 | static inline void cec_ops_give_deck_status(const struct cec_msg *msg, | ||
| 963 | __u8 *status_req) | ||
| 964 | { | ||
| 965 | *status_req = msg->msg[2]; | ||
| 966 | } | ||
| 967 | |||
| 968 | static inline void cec_msg_play(struct cec_msg *msg, | ||
| 969 | __u8 play_mode) | ||
| 970 | { | ||
| 971 | msg->len = 3; | ||
| 972 | msg->msg[1] = CEC_MSG_PLAY; | ||
| 973 | msg->msg[2] = play_mode; | ||
| 974 | } | ||
| 975 | |||
| 976 | static inline void cec_ops_play(const struct cec_msg *msg, | ||
| 977 | __u8 *play_mode) | ||
| 978 | { | ||
| 979 | *play_mode = msg->msg[2]; | ||
| 980 | } | ||
| 981 | |||
| 982 | |||
| 983 | /* Tuner Control Feature */ | ||
| 984 | struct cec_op_tuner_device_info { | ||
| 985 | __u8 rec_flag; | ||
| 986 | __u8 tuner_display_info; | ||
| 987 | bool is_analog; | ||
| 988 | union { | ||
| 989 | struct cec_op_digital_service_id digital; | ||
| 990 | struct { | ||
| 991 | __u8 ana_bcast_type; | ||
| 992 | __u16 ana_freq; | ||
| 993 | __u8 bcast_system; | ||
| 994 | } analog; | ||
| 995 | }; | ||
| 996 | }; | ||
| 997 | |||
| 998 | static inline void cec_msg_tuner_device_status_analog(struct cec_msg *msg, | ||
| 999 | __u8 rec_flag, | ||
| 1000 | __u8 tuner_display_info, | ||
| 1001 | __u8 ana_bcast_type, | ||
| 1002 | __u16 ana_freq, | ||
| 1003 | __u8 bcast_system) | ||
| 1004 | { | ||
| 1005 | msg->len = 7; | ||
| 1006 | msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS; | ||
| 1007 | msg->msg[2] = (rec_flag << 7) | tuner_display_info; | ||
| 1008 | msg->msg[3] = ana_bcast_type; | ||
| 1009 | msg->msg[4] = ana_freq >> 8; | ||
| 1010 | msg->msg[5] = ana_freq & 0xff; | ||
| 1011 | msg->msg[6] = bcast_system; | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | static inline void cec_msg_tuner_device_status_digital(struct cec_msg *msg, | ||
| 1015 | __u8 rec_flag, __u8 tuner_display_info, | ||
| 1016 | const struct cec_op_digital_service_id *digital) | ||
| 1017 | { | ||
| 1018 | msg->len = 10; | ||
| 1019 | msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS; | ||
| 1020 | msg->msg[2] = (rec_flag << 7) | tuner_display_info; | ||
| 1021 | cec_set_digital_service_id(msg->msg + 3, digital); | ||
| 1022 | } | ||
| 1023 | |||
| 1024 | static inline void cec_msg_tuner_device_status(struct cec_msg *msg, | ||
| 1025 | const struct cec_op_tuner_device_info *tuner_dev_info) | ||
| 1026 | { | ||
| 1027 | if (tuner_dev_info->is_analog) | ||
| 1028 | cec_msg_tuner_device_status_analog(msg, | ||
| 1029 | tuner_dev_info->rec_flag, | ||
| 1030 | tuner_dev_info->tuner_display_info, | ||
| 1031 | tuner_dev_info->analog.ana_bcast_type, | ||
| 1032 | tuner_dev_info->analog.ana_freq, | ||
| 1033 | tuner_dev_info->analog.bcast_system); | ||
| 1034 | else | ||
| 1035 | cec_msg_tuner_device_status_digital(msg, | ||
| 1036 | tuner_dev_info->rec_flag, | ||
| 1037 | tuner_dev_info->tuner_display_info, | ||
| 1038 | &tuner_dev_info->digital); | ||
| 1039 | } | ||
| 1040 | |||
| 1041 | static inline void cec_ops_tuner_device_status(const struct cec_msg *msg, | ||
| 1042 | struct cec_op_tuner_device_info *tuner_dev_info) | ||
| 1043 | { | ||
| 1044 | tuner_dev_info->is_analog = msg->len < 10; | ||
| 1045 | tuner_dev_info->rec_flag = msg->msg[2] >> 7; | ||
| 1046 | tuner_dev_info->tuner_display_info = msg->msg[2] & 0x7f; | ||
| 1047 | if (tuner_dev_info->is_analog) { | ||
| 1048 | tuner_dev_info->analog.ana_bcast_type = msg->msg[3]; | ||
| 1049 | tuner_dev_info->analog.ana_freq = (msg->msg[4] << 8) | msg->msg[5]; | ||
| 1050 | tuner_dev_info->analog.bcast_system = msg->msg[6]; | ||
| 1051 | return; | ||
| 1052 | } | ||
| 1053 | cec_get_digital_service_id(msg->msg + 3, &tuner_dev_info->digital); | ||
| 1054 | } | ||
| 1055 | |||
| 1056 | static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg, | ||
| 1057 | bool reply, | ||
| 1058 | __u8 status_req) | ||
| 1059 | { | ||
| 1060 | msg->len = 3; | ||
| 1061 | msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS; | ||
| 1062 | msg->msg[2] = status_req; | ||
| 1063 | msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0; | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg, | ||
| 1067 | __u8 *status_req) | ||
| 1068 | { | ||
| 1069 | *status_req = msg->msg[2]; | ||
| 1070 | } | ||
| 1071 | |||
| 1072 | static inline void cec_msg_select_analogue_service(struct cec_msg *msg, | ||
| 1073 | __u8 ana_bcast_type, | ||
| 1074 | __u16 ana_freq, | ||
| 1075 | __u8 bcast_system) | ||
| 1076 | { | ||
| 1077 | msg->len = 6; | ||
| 1078 | msg->msg[1] = CEC_MSG_SELECT_ANALOGUE_SERVICE; | ||
| 1079 | msg->msg[2] = ana_bcast_type; | ||
| 1080 | msg->msg[3] = ana_freq >> 8; | ||
| 1081 | msg->msg[4] = ana_freq & 0xff; | ||
| 1082 | msg->msg[5] = bcast_system; | ||
| 1083 | } | ||
| 1084 | |||
| 1085 | static inline void cec_ops_select_analogue_service(const struct cec_msg *msg, | ||
| 1086 | __u8 *ana_bcast_type, | ||
| 1087 | __u16 *ana_freq, | ||
| 1088 | __u8 *bcast_system) | ||
| 1089 | { | ||
| 1090 | *ana_bcast_type = msg->msg[2]; | ||
| 1091 | *ana_freq = (msg->msg[3] << 8) | msg->msg[4]; | ||
| 1092 | *bcast_system = msg->msg[5]; | ||
| 1093 | } | ||
| 1094 | |||
| 1095 | static inline void cec_msg_select_digital_service(struct cec_msg *msg, | ||
| 1096 | const struct cec_op_digital_service_id *digital) | ||
| 1097 | { | ||
| 1098 | msg->len = 9; | ||
| 1099 | msg->msg[1] = CEC_MSG_SELECT_DIGITAL_SERVICE; | ||
| 1100 | cec_set_digital_service_id(msg->msg + 2, digital); | ||
| 1101 | } | ||
| 1102 | |||
| 1103 | static inline void cec_ops_select_digital_service(const struct cec_msg *msg, | ||
| 1104 | struct cec_op_digital_service_id *digital) | ||
| 1105 | { | ||
| 1106 | cec_get_digital_service_id(msg->msg + 2, digital); | ||
| 1107 | } | ||
| 1108 | |||
| 1109 | static inline void cec_msg_tuner_step_decrement(struct cec_msg *msg) | ||
| 1110 | { | ||
| 1111 | msg->len = 2; | ||
| 1112 | msg->msg[1] = CEC_MSG_TUNER_STEP_DECREMENT; | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | static inline void cec_msg_tuner_step_increment(struct cec_msg *msg) | ||
| 1116 | { | ||
| 1117 | msg->len = 2; | ||
| 1118 | msg->msg[1] = CEC_MSG_TUNER_STEP_INCREMENT; | ||
| 1119 | } | ||
| 1120 | |||
| 1121 | |||
| 1122 | /* Vendor Specific Commands Feature */ | ||
| 1123 | static inline void cec_msg_device_vendor_id(struct cec_msg *msg, __u32 vendor_id) | ||
| 1124 | { | ||
| 1125 | msg->len = 5; | ||
| 1126 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 1127 | msg->msg[1] = CEC_MSG_DEVICE_VENDOR_ID; | ||
| 1128 | msg->msg[2] = vendor_id >> 16; | ||
| 1129 | msg->msg[3] = (vendor_id >> 8) & 0xff; | ||
| 1130 | msg->msg[4] = vendor_id & 0xff; | ||
| 1131 | } | ||
| 1132 | |||
| 1133 | static inline void cec_ops_device_vendor_id(const struct cec_msg *msg, | ||
| 1134 | __u32 *vendor_id) | ||
| 1135 | { | ||
| 1136 | *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4]; | ||
| 1137 | } | ||
| 1138 | |||
| 1139 | static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg, | ||
| 1140 | bool reply) | ||
| 1141 | { | ||
| 1142 | msg->len = 2; | ||
| 1143 | msg->msg[1] = CEC_MSG_GIVE_DEVICE_VENDOR_ID; | ||
| 1144 | msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0; | ||
| 1145 | } | ||
| 1146 | |||
| 1147 | static inline void cec_msg_vendor_command(struct cec_msg *msg, | ||
| 1148 | __u8 size, const __u8 *vendor_cmd) | ||
| 1149 | { | ||
| 1150 | if (size > 14) | ||
| 1151 | size = 14; | ||
| 1152 | msg->len = 2 + size; | ||
| 1153 | msg->msg[1] = CEC_MSG_VENDOR_COMMAND; | ||
| 1154 | memcpy(msg->msg + 2, vendor_cmd, size); | ||
| 1155 | } | ||
| 1156 | |||
| 1157 | static inline void cec_ops_vendor_command(const struct cec_msg *msg, | ||
| 1158 | __u8 *size, | ||
| 1159 | const __u8 **vendor_cmd) | ||
| 1160 | { | ||
| 1161 | *size = msg->len - 2; | ||
| 1162 | |||
| 1163 | if (*size > 14) | ||
| 1164 | *size = 14; | ||
| 1165 | *vendor_cmd = msg->msg + 2; | ||
| 1166 | } | ||
| 1167 | |||
| 1168 | static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg, | ||
| 1169 | __u32 vendor_id, __u8 size, | ||
| 1170 | const __u8 *vendor_cmd) | ||
| 1171 | { | ||
| 1172 | if (size > 11) | ||
| 1173 | size = 11; | ||
| 1174 | msg->len = 5 + size; | ||
| 1175 | msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID; | ||
| 1176 | msg->msg[2] = vendor_id >> 16; | ||
| 1177 | msg->msg[3] = (vendor_id >> 8) & 0xff; | ||
| 1178 | msg->msg[4] = vendor_id & 0xff; | ||
| 1179 | memcpy(msg->msg + 5, vendor_cmd, size); | ||
| 1180 | } | ||
| 1181 | |||
| 1182 | static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg, | ||
| 1183 | __u32 *vendor_id, __u8 *size, | ||
| 1184 | const __u8 **vendor_cmd) | ||
| 1185 | { | ||
| 1186 | *size = msg->len - 5; | ||
| 1187 | |||
| 1188 | if (*size > 11) | ||
| 1189 | *size = 11; | ||
| 1190 | *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4]; | ||
| 1191 | *vendor_cmd = msg->msg + 5; | ||
| 1192 | } | ||
| 1193 | |||
| 1194 | static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg, | ||
| 1195 | __u8 size, | ||
| 1196 | const __u8 *rc_code) | ||
| 1197 | { | ||
| 1198 | if (size > 14) | ||
| 1199 | size = 14; | ||
| 1200 | msg->len = 2 + size; | ||
| 1201 | msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN; | ||
| 1202 | memcpy(msg->msg + 2, rc_code, size); | ||
| 1203 | } | ||
| 1204 | |||
| 1205 | static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg, | ||
| 1206 | __u8 *size, | ||
| 1207 | const __u8 **rc_code) | ||
| 1208 | { | ||
| 1209 | *size = msg->len - 2; | ||
| 1210 | |||
| 1211 | if (*size > 14) | ||
| 1212 | *size = 14; | ||
| 1213 | *rc_code = msg->msg + 2; | ||
| 1214 | } | ||
| 1215 | |||
| 1216 | static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg) | ||
| 1217 | { | ||
| 1218 | msg->len = 2; | ||
| 1219 | msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_UP; | ||
| 1220 | } | ||
| 1221 | |||
| 1222 | |||
| 1223 | /* OSD Display Feature */ | ||
| 1224 | static inline void cec_msg_set_osd_string(struct cec_msg *msg, | ||
| 1225 | __u8 disp_ctl, | ||
| 1226 | const char *osd) | ||
| 1227 | { | ||
| 1228 | unsigned int len = strlen(osd); | ||
| 1229 | |||
| 1230 | if (len > 13) | ||
| 1231 | len = 13; | ||
| 1232 | msg->len = 3 + len; | ||
| 1233 | msg->msg[1] = CEC_MSG_SET_OSD_STRING; | ||
| 1234 | msg->msg[2] = disp_ctl; | ||
| 1235 | memcpy(msg->msg + 3, osd, len); | ||
| 1236 | } | ||
| 1237 | |||
| 1238 | static inline void cec_ops_set_osd_string(const struct cec_msg *msg, | ||
| 1239 | __u8 *disp_ctl, | ||
| 1240 | char *osd) | ||
| 1241 | { | ||
| 1242 | unsigned int len = msg->len > 3 ? msg->len - 3 : 0; | ||
| 1243 | |||
| 1244 | *disp_ctl = msg->msg[2]; | ||
| 1245 | if (len > 13) | ||
| 1246 | len = 13; | ||
| 1247 | memcpy(osd, msg->msg + 3, len); | ||
| 1248 | osd[len] = '\0'; | ||
| 1249 | } | ||
| 1250 | |||
| 1251 | |||
| 1252 | /* Device OSD Transfer Feature */ | ||
| 1253 | static inline void cec_msg_set_osd_name(struct cec_msg *msg, const char *name) | ||
| 1254 | { | ||
| 1255 | unsigned int len = strlen(name); | ||
| 1256 | |||
| 1257 | if (len > 14) | ||
| 1258 | len = 14; | ||
| 1259 | msg->len = 2 + len; | ||
| 1260 | msg->msg[1] = CEC_MSG_SET_OSD_NAME; | ||
| 1261 | memcpy(msg->msg + 2, name, len); | ||
| 1262 | } | ||
| 1263 | |||
| 1264 | static inline void cec_ops_set_osd_name(const struct cec_msg *msg, | ||
| 1265 | char *name) | ||
| 1266 | { | ||
| 1267 | unsigned int len = msg->len > 2 ? msg->len - 2 : 0; | ||
| 1268 | |||
| 1269 | if (len > 14) | ||
| 1270 | len = 14; | ||
| 1271 | memcpy(name, msg->msg + 2, len); | ||
| 1272 | name[len] = '\0'; | ||
| 1273 | } | ||
| 1274 | |||
| 1275 | static inline void cec_msg_give_osd_name(struct cec_msg *msg, | ||
| 1276 | bool reply) | ||
| 1277 | { | ||
| 1278 | msg->len = 2; | ||
| 1279 | msg->msg[1] = CEC_MSG_GIVE_OSD_NAME; | ||
| 1280 | msg->reply = reply ? CEC_MSG_SET_OSD_NAME : 0; | ||
| 1281 | } | ||
| 1282 | |||
| 1283 | |||
| 1284 | /* Device Menu Control Feature */ | ||
| 1285 | static inline void cec_msg_menu_status(struct cec_msg *msg, | ||
| 1286 | __u8 menu_state) | ||
| 1287 | { | ||
| 1288 | msg->len = 3; | ||
| 1289 | msg->msg[1] = CEC_MSG_MENU_STATUS; | ||
| 1290 | msg->msg[2] = menu_state; | ||
| 1291 | } | ||
| 1292 | |||
| 1293 | static inline void cec_ops_menu_status(const struct cec_msg *msg, | ||
| 1294 | __u8 *menu_state) | ||
| 1295 | { | ||
| 1296 | *menu_state = msg->msg[2]; | ||
| 1297 | } | ||
| 1298 | |||
| 1299 | static inline void cec_msg_menu_request(struct cec_msg *msg, | ||
| 1300 | bool reply, | ||
| 1301 | __u8 menu_req) | ||
| 1302 | { | ||
| 1303 | msg->len = 3; | ||
| 1304 | msg->msg[1] = CEC_MSG_MENU_REQUEST; | ||
| 1305 | msg->msg[2] = menu_req; | ||
| 1306 | msg->reply = reply ? CEC_MSG_MENU_STATUS : 0; | ||
| 1307 | } | ||
| 1308 | |||
| 1309 | static inline void cec_ops_menu_request(const struct cec_msg *msg, | ||
| 1310 | __u8 *menu_req) | ||
| 1311 | { | ||
| 1312 | *menu_req = msg->msg[2]; | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | struct cec_op_ui_command { | ||
| 1316 | __u8 ui_cmd; | ||
| 1317 | bool has_opt_arg; | ||
| 1318 | union { | ||
| 1319 | struct cec_op_channel_data channel_identifier; | ||
| 1320 | __u8 ui_broadcast_type; | ||
| 1321 | __u8 ui_sound_presentation_control; | ||
| 1322 | __u8 play_mode; | ||
| 1323 | __u8 ui_function_media; | ||
| 1324 | __u8 ui_function_select_av_input; | ||
| 1325 | __u8 ui_function_select_audio_input; | ||
| 1326 | }; | ||
| 1327 | }; | ||
| 1328 | |||
| 1329 | static inline void cec_msg_user_control_pressed(struct cec_msg *msg, | ||
| 1330 | const struct cec_op_ui_command *ui_cmd) | ||
| 1331 | { | ||
| 1332 | msg->len = 3; | ||
| 1333 | msg->msg[1] = CEC_MSG_USER_CONTROL_PRESSED; | ||
| 1334 | msg->msg[2] = ui_cmd->ui_cmd; | ||
| 1335 | if (!ui_cmd->has_opt_arg) | ||
| 1336 | return; | ||
| 1337 | switch (ui_cmd->ui_cmd) { | ||
| 1338 | case 0x56: | ||
| 1339 | case 0x57: | ||
| 1340 | case 0x60: | ||
| 1341 | case 0x68: | ||
| 1342 | case 0x69: | ||
| 1343 | case 0x6a: | ||
| 1344 | /* The optional operand is one byte for all these ui commands */ | ||
| 1345 | msg->len++; | ||
| 1346 | msg->msg[3] = ui_cmd->play_mode; | ||
| 1347 | break; | ||
| 1348 | case 0x67: | ||
| 1349 | msg->len += 4; | ||
| 1350 | msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) | | ||
| 1351 | (ui_cmd->channel_identifier.major >> 8); | ||
| 1352 | msg->msg[4] = ui_cmd->channel_identifier.major & 0xff; | ||
| 1353 | msg->msg[5] = ui_cmd->channel_identifier.minor >> 8; | ||
| 1354 | msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff; | ||
| 1355 | break; | ||
| 1356 | } | ||
| 1357 | } | ||
| 1358 | |||
| 1359 | static inline void cec_ops_user_control_pressed(const struct cec_msg *msg, | ||
| 1360 | struct cec_op_ui_command *ui_cmd) | ||
| 1361 | { | ||
| 1362 | ui_cmd->ui_cmd = msg->msg[2]; | ||
| 1363 | ui_cmd->has_opt_arg = false; | ||
| 1364 | if (msg->len == 3) | ||
| 1365 | return; | ||
| 1366 | switch (ui_cmd->ui_cmd) { | ||
| 1367 | case 0x56: | ||
| 1368 | case 0x57: | ||
| 1369 | case 0x60: | ||
| 1370 | case 0x68: | ||
| 1371 | case 0x69: | ||
| 1372 | case 0x6a: | ||
| 1373 | /* The optional operand is one byte for all these ui commands */ | ||
| 1374 | ui_cmd->play_mode = msg->msg[3]; | ||
| 1375 | ui_cmd->has_opt_arg = true; | ||
| 1376 | break; | ||
| 1377 | case 0x67: | ||
| 1378 | if (msg->len < 7) | ||
| 1379 | break; | ||
| 1380 | ui_cmd->has_opt_arg = true; | ||
| 1381 | ui_cmd->channel_identifier.channel_number_fmt = msg->msg[3] >> 2; | ||
| 1382 | ui_cmd->channel_identifier.major = ((msg->msg[3] & 3) << 6) | msg->msg[4]; | ||
| 1383 | ui_cmd->channel_identifier.minor = (msg->msg[5] << 8) | msg->msg[6]; | ||
| 1384 | break; | ||
| 1385 | } | ||
| 1386 | } | ||
| 1387 | |||
| 1388 | static inline void cec_msg_user_control_released(struct cec_msg *msg) | ||
| 1389 | { | ||
| 1390 | msg->len = 2; | ||
| 1391 | msg->msg[1] = CEC_MSG_USER_CONTROL_RELEASED; | ||
| 1392 | } | ||
| 1393 | |||
| 1394 | /* Remote Control Passthrough Feature */ | ||
| 1395 | |||
| 1396 | /* Power Status Feature */ | ||
| 1397 | static inline void cec_msg_report_power_status(struct cec_msg *msg, | ||
| 1398 | __u8 pwr_state) | ||
| 1399 | { | ||
| 1400 | msg->len = 3; | ||
| 1401 | msg->msg[1] = CEC_MSG_REPORT_POWER_STATUS; | ||
| 1402 | msg->msg[2] = pwr_state; | ||
| 1403 | } | ||
| 1404 | |||
| 1405 | static inline void cec_ops_report_power_status(const struct cec_msg *msg, | ||
| 1406 | __u8 *pwr_state) | ||
| 1407 | { | ||
| 1408 | *pwr_state = msg->msg[2]; | ||
| 1409 | } | ||
| 1410 | |||
| 1411 | static inline void cec_msg_give_device_power_status(struct cec_msg *msg, | ||
| 1412 | bool reply) | ||
| 1413 | { | ||
| 1414 | msg->len = 2; | ||
| 1415 | msg->msg[1] = CEC_MSG_GIVE_DEVICE_POWER_STATUS; | ||
| 1416 | msg->reply = reply ? CEC_MSG_REPORT_POWER_STATUS : 0; | ||
| 1417 | } | ||
| 1418 | |||
| 1419 | /* General Protocol Messages */ | ||
| 1420 | static inline void cec_msg_feature_abort(struct cec_msg *msg, | ||
| 1421 | __u8 abort_msg, __u8 reason) | ||
| 1422 | { | ||
| 1423 | msg->len = 4; | ||
| 1424 | msg->msg[1] = CEC_MSG_FEATURE_ABORT; | ||
| 1425 | msg->msg[2] = abort_msg; | ||
| 1426 | msg->msg[3] = reason; | ||
| 1427 | } | ||
| 1428 | |||
| 1429 | static inline void cec_ops_feature_abort(const struct cec_msg *msg, | ||
| 1430 | __u8 *abort_msg, __u8 *reason) | ||
| 1431 | { | ||
| 1432 | *abort_msg = msg->msg[2]; | ||
| 1433 | *reason = msg->msg[3]; | ||
| 1434 | } | ||
| 1435 | |||
| 1436 | /* This changes the current message into a feature abort message */ | ||
| 1437 | static inline void cec_msg_reply_feature_abort(struct cec_msg *msg, __u8 reason) | ||
| 1438 | { | ||
| 1439 | cec_msg_set_reply_to(msg, msg); | ||
| 1440 | msg->len = 4; | ||
| 1441 | msg->msg[2] = msg->msg[1]; | ||
| 1442 | msg->msg[3] = reason; | ||
| 1443 | msg->msg[1] = CEC_MSG_FEATURE_ABORT; | ||
| 1444 | } | ||
| 1445 | |||
| 1446 | static inline void cec_msg_abort(struct cec_msg *msg) | ||
| 1447 | { | ||
| 1448 | msg->len = 2; | ||
| 1449 | msg->msg[1] = CEC_MSG_ABORT; | ||
| 1450 | } | ||
| 1451 | |||
| 1452 | |||
| 1453 | /* System Audio Control Feature */ | ||
| 1454 | static inline void cec_msg_report_audio_status(struct cec_msg *msg, | ||
| 1455 | __u8 aud_mute_status, | ||
| 1456 | __u8 aud_vol_status) | ||
| 1457 | { | ||
| 1458 | msg->len = 3; | ||
| 1459 | msg->msg[1] = CEC_MSG_REPORT_AUDIO_STATUS; | ||
| 1460 | msg->msg[2] = (aud_mute_status << 7) | (aud_vol_status & 0x7f); | ||
| 1461 | } | ||
| 1462 | |||
| 1463 | static inline void cec_ops_report_audio_status(const struct cec_msg *msg, | ||
| 1464 | __u8 *aud_mute_status, | ||
| 1465 | __u8 *aud_vol_status) | ||
| 1466 | { | ||
| 1467 | *aud_mute_status = msg->msg[2] >> 7; | ||
| 1468 | *aud_vol_status = msg->msg[2] & 0x7f; | ||
| 1469 | } | ||
| 1470 | |||
| 1471 | static inline void cec_msg_give_audio_status(struct cec_msg *msg, | ||
| 1472 | bool reply) | ||
| 1473 | { | ||
| 1474 | msg->len = 2; | ||
| 1475 | msg->msg[1] = CEC_MSG_GIVE_AUDIO_STATUS; | ||
| 1476 | msg->reply = reply ? CEC_MSG_REPORT_AUDIO_STATUS : 0; | ||
| 1477 | } | ||
| 1478 | |||
| 1479 | static inline void cec_msg_set_system_audio_mode(struct cec_msg *msg, | ||
| 1480 | __u8 sys_aud_status) | ||
| 1481 | { | ||
| 1482 | msg->len = 3; | ||
| 1483 | msg->msg[1] = CEC_MSG_SET_SYSTEM_AUDIO_MODE; | ||
| 1484 | msg->msg[2] = sys_aud_status; | ||
| 1485 | } | ||
| 1486 | |||
| 1487 | static inline void cec_ops_set_system_audio_mode(const struct cec_msg *msg, | ||
| 1488 | __u8 *sys_aud_status) | ||
| 1489 | { | ||
| 1490 | *sys_aud_status = msg->msg[2]; | ||
| 1491 | } | ||
| 1492 | |||
| 1493 | static inline void cec_msg_system_audio_mode_request(struct cec_msg *msg, | ||
| 1494 | bool reply, | ||
| 1495 | __u16 phys_addr) | ||
| 1496 | { | ||
| 1497 | msg->len = phys_addr == 0xffff ? 2 : 4; | ||
| 1498 | msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST; | ||
| 1499 | msg->msg[2] = phys_addr >> 8; | ||
| 1500 | msg->msg[3] = phys_addr & 0xff; | ||
| 1501 | msg->reply = reply ? CEC_MSG_SET_SYSTEM_AUDIO_MODE : 0; | ||
| 1502 | |||
| 1503 | } | ||
| 1504 | |||
| 1505 | static inline void cec_ops_system_audio_mode_request(const struct cec_msg *msg, | ||
| 1506 | __u16 *phys_addr) | ||
| 1507 | { | ||
| 1508 | if (msg->len < 4) | ||
| 1509 | *phys_addr = 0xffff; | ||
| 1510 | else | ||
| 1511 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 1512 | } | ||
| 1513 | |||
| 1514 | static inline void cec_msg_system_audio_mode_status(struct cec_msg *msg, | ||
| 1515 | __u8 sys_aud_status) | ||
| 1516 | { | ||
| 1517 | msg->len = 3; | ||
| 1518 | msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_STATUS; | ||
| 1519 | msg->msg[2] = sys_aud_status; | ||
| 1520 | } | ||
| 1521 | |||
| 1522 | static inline void cec_ops_system_audio_mode_status(const struct cec_msg *msg, | ||
| 1523 | __u8 *sys_aud_status) | ||
| 1524 | { | ||
| 1525 | *sys_aud_status = msg->msg[2]; | ||
| 1526 | } | ||
| 1527 | |||
| 1528 | static inline void cec_msg_give_system_audio_mode_status(struct cec_msg *msg, | ||
| 1529 | bool reply) | ||
| 1530 | { | ||
| 1531 | msg->len = 2; | ||
| 1532 | msg->msg[1] = CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS; | ||
| 1533 | msg->reply = reply ? CEC_MSG_SYSTEM_AUDIO_MODE_STATUS : 0; | ||
| 1534 | } | ||
| 1535 | |||
| 1536 | static inline void cec_msg_report_short_audio_descriptor(struct cec_msg *msg, | ||
| 1537 | __u8 num_descriptors, | ||
| 1538 | const __u32 *descriptors) | ||
| 1539 | { | ||
| 1540 | unsigned int i; | ||
| 1541 | |||
| 1542 | if (num_descriptors > 4) | ||
| 1543 | num_descriptors = 4; | ||
| 1544 | msg->len = 2 + num_descriptors * 3; | ||
| 1545 | msg->msg[1] = CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR; | ||
| 1546 | for (i = 0; i < num_descriptors; i++) { | ||
| 1547 | msg->msg[2 + i * 3] = (descriptors[i] >> 16) & 0xff; | ||
| 1548 | msg->msg[3 + i * 3] = (descriptors[i] >> 8) & 0xff; | ||
| 1549 | msg->msg[4 + i * 3] = descriptors[i] & 0xff; | ||
| 1550 | } | ||
| 1551 | } | ||
| 1552 | |||
| 1553 | static inline void cec_ops_report_short_audio_descriptor(const struct cec_msg *msg, | ||
| 1554 | __u8 *num_descriptors, | ||
| 1555 | __u32 *descriptors) | ||
| 1556 | { | ||
| 1557 | unsigned int i; | ||
| 1558 | |||
| 1559 | *num_descriptors = (msg->len - 2) / 3; | ||
| 1560 | if (*num_descriptors > 4) | ||
| 1561 | *num_descriptors = 4; | ||
| 1562 | for (i = 0; i < *num_descriptors; i++) | ||
| 1563 | descriptors[i] = (msg->msg[2 + i * 3] << 16) | | ||
| 1564 | (msg->msg[3 + i * 3] << 8) | | ||
| 1565 | msg->msg[4 + i * 3]; | ||
| 1566 | } | ||
| 1567 | |||
| 1568 | static inline void cec_msg_request_short_audio_descriptor(struct cec_msg *msg, | ||
| 1569 | bool reply, | ||
| 1570 | __u8 num_descriptors, | ||
| 1571 | const __u8 *audio_format_id, | ||
| 1572 | const __u8 *audio_format_code) | ||
| 1573 | { | ||
| 1574 | unsigned int i; | ||
| 1575 | |||
| 1576 | if (num_descriptors > 4) | ||
| 1577 | num_descriptors = 4; | ||
| 1578 | msg->len = 2 + num_descriptors; | ||
| 1579 | msg->msg[1] = CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR; | ||
| 1580 | msg->reply = reply ? CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR : 0; | ||
| 1581 | for (i = 0; i < num_descriptors; i++) | ||
| 1582 | msg->msg[2 + i] = (audio_format_id[i] << 6) | | ||
| 1583 | (audio_format_code[i] & 0x3f); | ||
| 1584 | } | ||
| 1585 | |||
| 1586 | static inline void cec_ops_request_short_audio_descriptor(const struct cec_msg *msg, | ||
| 1587 | __u8 *num_descriptors, | ||
| 1588 | __u8 *audio_format_id, | ||
| 1589 | __u8 *audio_format_code) | ||
| 1590 | { | ||
| 1591 | unsigned int i; | ||
| 1592 | |||
| 1593 | *num_descriptors = msg->len - 2; | ||
| 1594 | if (*num_descriptors > 4) | ||
| 1595 | *num_descriptors = 4; | ||
| 1596 | for (i = 0; i < *num_descriptors; i++) { | ||
| 1597 | audio_format_id[i] = msg->msg[2 + i] >> 6; | ||
| 1598 | audio_format_code[i] = msg->msg[2 + i] & 0x3f; | ||
| 1599 | } | ||
| 1600 | } | ||
| 1601 | |||
| 1602 | |||
| 1603 | /* Audio Rate Control Feature */ | ||
| 1604 | static inline void cec_msg_set_audio_rate(struct cec_msg *msg, | ||
| 1605 | __u8 audio_rate) | ||
| 1606 | { | ||
| 1607 | msg->len = 3; | ||
| 1608 | msg->msg[1] = CEC_MSG_SET_AUDIO_RATE; | ||
| 1609 | msg->msg[2] = audio_rate; | ||
| 1610 | } | ||
| 1611 | |||
| 1612 | static inline void cec_ops_set_audio_rate(const struct cec_msg *msg, | ||
| 1613 | __u8 *audio_rate) | ||
| 1614 | { | ||
| 1615 | *audio_rate = msg->msg[2]; | ||
| 1616 | } | ||
| 1617 | |||
| 1618 | |||
| 1619 | /* Audio Return Channel Control Feature */ | ||
| 1620 | static inline void cec_msg_report_arc_initiated(struct cec_msg *msg) | ||
| 1621 | { | ||
| 1622 | msg->len = 2; | ||
| 1623 | msg->msg[1] = CEC_MSG_REPORT_ARC_INITIATED; | ||
| 1624 | } | ||
| 1625 | |||
| 1626 | static inline void cec_msg_initiate_arc(struct cec_msg *msg, | ||
| 1627 | bool reply) | ||
| 1628 | { | ||
| 1629 | msg->len = 2; | ||
| 1630 | msg->msg[1] = CEC_MSG_INITIATE_ARC; | ||
| 1631 | msg->reply = reply ? CEC_MSG_REPORT_ARC_INITIATED : 0; | ||
| 1632 | } | ||
| 1633 | |||
| 1634 | static inline void cec_msg_request_arc_initiation(struct cec_msg *msg, | ||
| 1635 | bool reply) | ||
| 1636 | { | ||
| 1637 | msg->len = 2; | ||
| 1638 | msg->msg[1] = CEC_MSG_REQUEST_ARC_INITIATION; | ||
| 1639 | msg->reply = reply ? CEC_MSG_INITIATE_ARC : 0; | ||
| 1640 | } | ||
| 1641 | |||
| 1642 | static inline void cec_msg_report_arc_terminated(struct cec_msg *msg) | ||
| 1643 | { | ||
| 1644 | msg->len = 2; | ||
| 1645 | msg->msg[1] = CEC_MSG_REPORT_ARC_TERMINATED; | ||
| 1646 | } | ||
| 1647 | |||
| 1648 | static inline void cec_msg_terminate_arc(struct cec_msg *msg, | ||
| 1649 | bool reply) | ||
| 1650 | { | ||
| 1651 | msg->len = 2; | ||
| 1652 | msg->msg[1] = CEC_MSG_TERMINATE_ARC; | ||
| 1653 | msg->reply = reply ? CEC_MSG_REPORT_ARC_TERMINATED : 0; | ||
| 1654 | } | ||
| 1655 | |||
| 1656 | static inline void cec_msg_request_arc_termination(struct cec_msg *msg, | ||
| 1657 | bool reply) | ||
| 1658 | { | ||
| 1659 | msg->len = 2; | ||
| 1660 | msg->msg[1] = CEC_MSG_REQUEST_ARC_TERMINATION; | ||
| 1661 | msg->reply = reply ? CEC_MSG_TERMINATE_ARC : 0; | ||
| 1662 | } | ||
| 1663 | |||
| 1664 | |||
| 1665 | /* Dynamic Audio Lipsync Feature */ | ||
| 1666 | /* Only for CEC 2.0 and up */ | ||
| 1667 | static inline void cec_msg_report_current_latency(struct cec_msg *msg, | ||
| 1668 | __u16 phys_addr, | ||
| 1669 | __u8 video_latency, | ||
| 1670 | __u8 low_latency_mode, | ||
| 1671 | __u8 audio_out_compensated, | ||
| 1672 | __u8 audio_out_delay) | ||
| 1673 | { | ||
| 1674 | msg->len = 7; | ||
| 1675 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 1676 | msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY; | ||
| 1677 | msg->msg[2] = phys_addr >> 8; | ||
| 1678 | msg->msg[3] = phys_addr & 0xff; | ||
| 1679 | msg->msg[4] = video_latency; | ||
| 1680 | msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated; | ||
| 1681 | msg->msg[6] = audio_out_delay; | ||
| 1682 | } | ||
| 1683 | |||
| 1684 | static inline void cec_ops_report_current_latency(const struct cec_msg *msg, | ||
| 1685 | __u16 *phys_addr, | ||
| 1686 | __u8 *video_latency, | ||
| 1687 | __u8 *low_latency_mode, | ||
| 1688 | __u8 *audio_out_compensated, | ||
| 1689 | __u8 *audio_out_delay) | ||
| 1690 | { | ||
| 1691 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 1692 | *video_latency = msg->msg[4]; | ||
| 1693 | *low_latency_mode = (msg->msg[5] >> 2) & 1; | ||
| 1694 | *audio_out_compensated = msg->msg[5] & 3; | ||
| 1695 | *audio_out_delay = msg->msg[6]; | ||
| 1696 | } | ||
| 1697 | |||
| 1698 | static inline void cec_msg_request_current_latency(struct cec_msg *msg, | ||
| 1699 | bool reply, | ||
| 1700 | __u16 phys_addr) | ||
| 1701 | { | ||
| 1702 | msg->len = 4; | ||
| 1703 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 1704 | msg->msg[1] = CEC_MSG_REQUEST_CURRENT_LATENCY; | ||
| 1705 | msg->msg[2] = phys_addr >> 8; | ||
| 1706 | msg->msg[3] = phys_addr & 0xff; | ||
| 1707 | msg->reply = reply ? CEC_MSG_REPORT_CURRENT_LATENCY : 0; | ||
| 1708 | } | ||
| 1709 | |||
| 1710 | static inline void cec_ops_request_current_latency(const struct cec_msg *msg, | ||
| 1711 | __u16 *phys_addr) | ||
| 1712 | { | ||
| 1713 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 1714 | } | ||
| 1715 | |||
| 1716 | |||
| 1717 | /* Capability Discovery and Control Feature */ | ||
| 1718 | static inline void cec_msg_cdc_hec_inquire_state(struct cec_msg *msg, | ||
| 1719 | __u16 phys_addr1, | ||
| 1720 | __u16 phys_addr2) | ||
| 1721 | { | ||
| 1722 | msg->len = 9; | ||
| 1723 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 1724 | msg->msg[1] = CEC_MSG_CDC_MESSAGE; | ||
| 1725 | /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ | ||
| 1726 | msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE; | ||
| 1727 | msg->msg[5] = phys_addr1 >> 8; | ||
| 1728 | msg->msg[6] = phys_addr1 & 0xff; | ||
| 1729 | msg->msg[7] = phys_addr2 >> 8; | ||
| 1730 | msg->msg[8] = phys_addr2 & 0xff; | ||
| 1731 | } | ||
| 1732 | |||
| 1733 | static inline void cec_ops_cdc_hec_inquire_state(const struct cec_msg *msg, | ||
| 1734 | __u16 *phys_addr, | ||
| 1735 | __u16 *phys_addr1, | ||
| 1736 | __u16 *phys_addr2) | ||
| 1737 | { | ||
| 1738 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 1739 | *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; | ||
| 1740 | *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8]; | ||
| 1741 | } | ||
| 1742 | |||
| 1743 | static inline void cec_msg_cdc_hec_report_state(struct cec_msg *msg, | ||
| 1744 | __u16 target_phys_addr, | ||
| 1745 | __u8 hec_func_state, | ||
| 1746 | __u8 host_func_state, | ||
| 1747 | __u8 enc_func_state, | ||
| 1748 | __u8 cdc_errcode, | ||
| 1749 | __u8 has_field, | ||
| 1750 | __u16 hec_field) | ||
| 1751 | { | ||
| 1752 | msg->len = has_field ? 10 : 8; | ||
| 1753 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 1754 | msg->msg[1] = CEC_MSG_CDC_MESSAGE; | ||
| 1755 | /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ | ||
| 1756 | msg->msg[4] = CEC_MSG_CDC_HEC_REPORT_STATE; | ||
| 1757 | msg->msg[5] = target_phys_addr >> 8; | ||
| 1758 | msg->msg[6] = target_phys_addr & 0xff; | ||
| 1759 | msg->msg[7] = (hec_func_state << 6) | | ||
| 1760 | (host_func_state << 4) | | ||
| 1761 | (enc_func_state << 2) | | ||
| 1762 | cdc_errcode; | ||
| 1763 | if (has_field) { | ||
| 1764 | msg->msg[8] = hec_field >> 8; | ||
| 1765 | msg->msg[9] = hec_field & 0xff; | ||
| 1766 | } | ||
| 1767 | } | ||
| 1768 | |||
| 1769 | static inline void cec_ops_cdc_hec_report_state(const struct cec_msg *msg, | ||
| 1770 | __u16 *phys_addr, | ||
| 1771 | __u16 *target_phys_addr, | ||
| 1772 | __u8 *hec_func_state, | ||
| 1773 | __u8 *host_func_state, | ||
| 1774 | __u8 *enc_func_state, | ||
| 1775 | __u8 *cdc_errcode, | ||
| 1776 | __u8 *has_field, | ||
| 1777 | __u16 *hec_field) | ||
| 1778 | { | ||
| 1779 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 1780 | *target_phys_addr = (msg->msg[5] << 8) | msg->msg[6]; | ||
| 1781 | *hec_func_state = msg->msg[7] >> 6; | ||
| 1782 | *host_func_state = (msg->msg[7] >> 4) & 3; | ||
| 1783 | *enc_func_state = (msg->msg[7] >> 4) & 3; | ||
| 1784 | *cdc_errcode = msg->msg[7] & 3; | ||
| 1785 | *has_field = msg->len >= 10; | ||
| 1786 | *hec_field = *has_field ? ((msg->msg[8] << 8) | msg->msg[9]) : 0; | ||
| 1787 | } | ||
| 1788 | |||
| 1789 | static inline void cec_msg_cdc_hec_set_state(struct cec_msg *msg, | ||
| 1790 | __u16 phys_addr1, | ||
| 1791 | __u16 phys_addr2, | ||
| 1792 | __u8 hec_set_state, | ||
| 1793 | __u16 phys_addr3, | ||
| 1794 | __u16 phys_addr4, | ||
| 1795 | __u16 phys_addr5) | ||
| 1796 | { | ||
| 1797 | msg->len = 10; | ||
| 1798 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 1799 | msg->msg[1] = CEC_MSG_CDC_MESSAGE; | ||
| 1800 | /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ | ||
| 1801 | msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE; | ||
| 1802 | msg->msg[5] = phys_addr1 >> 8; | ||
| 1803 | msg->msg[6] = phys_addr1 & 0xff; | ||
| 1804 | msg->msg[7] = phys_addr2 >> 8; | ||
| 1805 | msg->msg[8] = phys_addr2 & 0xff; | ||
| 1806 | msg->msg[9] = hec_set_state; | ||
| 1807 | if (phys_addr3 != CEC_PHYS_ADDR_INVALID) { | ||
| 1808 | msg->msg[msg->len++] = phys_addr3 >> 8; | ||
| 1809 | msg->msg[msg->len++] = phys_addr3 & 0xff; | ||
| 1810 | if (phys_addr4 != CEC_PHYS_ADDR_INVALID) { | ||
| 1811 | msg->msg[msg->len++] = phys_addr4 >> 8; | ||
| 1812 | msg->msg[msg->len++] = phys_addr4 & 0xff; | ||
| 1813 | if (phys_addr5 != CEC_PHYS_ADDR_INVALID) { | ||
| 1814 | msg->msg[msg->len++] = phys_addr5 >> 8; | ||
| 1815 | msg->msg[msg->len++] = phys_addr5 & 0xff; | ||
| 1816 | } | ||
| 1817 | } | ||
| 1818 | } | ||
| 1819 | } | ||
| 1820 | |||
| 1821 | static inline void cec_ops_cdc_hec_set_state(const struct cec_msg *msg, | ||
| 1822 | __u16 *phys_addr, | ||
| 1823 | __u16 *phys_addr1, | ||
| 1824 | __u16 *phys_addr2, | ||
| 1825 | __u8 *hec_set_state, | ||
| 1826 | __u16 *phys_addr3, | ||
| 1827 | __u16 *phys_addr4, | ||
| 1828 | __u16 *phys_addr5) | ||
| 1829 | { | ||
| 1830 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 1831 | *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; | ||
| 1832 | *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8]; | ||
| 1833 | *hec_set_state = msg->msg[9]; | ||
| 1834 | *phys_addr3 = *phys_addr4 = *phys_addr5 = CEC_PHYS_ADDR_INVALID; | ||
| 1835 | if (msg->len >= 12) | ||
| 1836 | *phys_addr3 = (msg->msg[10] << 8) | msg->msg[11]; | ||
| 1837 | if (msg->len >= 14) | ||
| 1838 | *phys_addr4 = (msg->msg[12] << 8) | msg->msg[13]; | ||
| 1839 | if (msg->len >= 16) | ||
| 1840 | *phys_addr5 = (msg->msg[14] << 8) | msg->msg[15]; | ||
| 1841 | } | ||
| 1842 | |||
| 1843 | static inline void cec_msg_cdc_hec_set_state_adjacent(struct cec_msg *msg, | ||
| 1844 | __u16 phys_addr1, | ||
| 1845 | __u8 hec_set_state) | ||
| 1846 | { | ||
| 1847 | msg->len = 8; | ||
| 1848 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 1849 | msg->msg[1] = CEC_MSG_CDC_MESSAGE; | ||
| 1850 | /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ | ||
| 1851 | msg->msg[4] = CEC_MSG_CDC_HEC_SET_STATE_ADJACENT; | ||
| 1852 | msg->msg[5] = phys_addr1 >> 8; | ||
| 1853 | msg->msg[6] = phys_addr1 & 0xff; | ||
| 1854 | msg->msg[7] = hec_set_state; | ||
| 1855 | } | ||
| 1856 | |||
| 1857 | static inline void cec_ops_cdc_hec_set_state_adjacent(const struct cec_msg *msg, | ||
| 1858 | __u16 *phys_addr, | ||
| 1859 | __u16 *phys_addr1, | ||
| 1860 | __u8 *hec_set_state) | ||
| 1861 | { | ||
| 1862 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 1863 | *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; | ||
| 1864 | *hec_set_state = msg->msg[7]; | ||
| 1865 | } | ||
| 1866 | |||
| 1867 | static inline void cec_msg_cdc_hec_request_deactivation(struct cec_msg *msg, | ||
| 1868 | __u16 phys_addr1, | ||
| 1869 | __u16 phys_addr2, | ||
| 1870 | __u16 phys_addr3) | ||
| 1871 | { | ||
| 1872 | msg->len = 11; | ||
| 1873 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 1874 | msg->msg[1] = CEC_MSG_CDC_MESSAGE; | ||
| 1875 | /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ | ||
| 1876 | msg->msg[4] = CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION; | ||
| 1877 | msg->msg[5] = phys_addr1 >> 8; | ||
| 1878 | msg->msg[6] = phys_addr1 & 0xff; | ||
| 1879 | msg->msg[7] = phys_addr2 >> 8; | ||
| 1880 | msg->msg[8] = phys_addr2 & 0xff; | ||
| 1881 | msg->msg[9] = phys_addr3 >> 8; | ||
| 1882 | msg->msg[10] = phys_addr3 & 0xff; | ||
| 1883 | } | ||
| 1884 | |||
| 1885 | static inline void cec_ops_cdc_hec_request_deactivation(const struct cec_msg *msg, | ||
| 1886 | __u16 *phys_addr, | ||
| 1887 | __u16 *phys_addr1, | ||
| 1888 | __u16 *phys_addr2, | ||
| 1889 | __u16 *phys_addr3) | ||
| 1890 | { | ||
| 1891 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 1892 | *phys_addr1 = (msg->msg[5] << 8) | msg->msg[6]; | ||
| 1893 | *phys_addr2 = (msg->msg[7] << 8) | msg->msg[8]; | ||
| 1894 | *phys_addr3 = (msg->msg[9] << 8) | msg->msg[10]; | ||
| 1895 | } | ||
| 1896 | |||
| 1897 | static inline void cec_msg_cdc_hec_notify_alive(struct cec_msg *msg) | ||
| 1898 | { | ||
| 1899 | msg->len = 5; | ||
| 1900 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 1901 | msg->msg[1] = CEC_MSG_CDC_MESSAGE; | ||
| 1902 | /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ | ||
| 1903 | msg->msg[4] = CEC_MSG_CDC_HEC_NOTIFY_ALIVE; | ||
| 1904 | } | ||
| 1905 | |||
| 1906 | static inline void cec_ops_cdc_hec_notify_alive(const struct cec_msg *msg, | ||
| 1907 | __u16 *phys_addr) | ||
| 1908 | { | ||
| 1909 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 1910 | } | ||
| 1911 | |||
| 1912 | static inline void cec_msg_cdc_hec_discover(struct cec_msg *msg) | ||
| 1913 | { | ||
| 1914 | msg->len = 5; | ||
| 1915 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 1916 | msg->msg[1] = CEC_MSG_CDC_MESSAGE; | ||
| 1917 | /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ | ||
| 1918 | msg->msg[4] = CEC_MSG_CDC_HEC_DISCOVER; | ||
| 1919 | } | ||
| 1920 | |||
| 1921 | static inline void cec_ops_cdc_hec_discover(const struct cec_msg *msg, | ||
| 1922 | __u16 *phys_addr) | ||
| 1923 | { | ||
| 1924 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 1925 | } | ||
| 1926 | |||
| 1927 | static inline void cec_msg_cdc_hpd_set_state(struct cec_msg *msg, | ||
| 1928 | __u8 input_port, | ||
| 1929 | __u8 hpd_state) | ||
| 1930 | { | ||
| 1931 | msg->len = 6; | ||
| 1932 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 1933 | msg->msg[1] = CEC_MSG_CDC_MESSAGE; | ||
| 1934 | /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ | ||
| 1935 | msg->msg[4] = CEC_MSG_CDC_HPD_SET_STATE; | ||
| 1936 | msg->msg[5] = (input_port << 4) | hpd_state; | ||
| 1937 | } | ||
| 1938 | |||
| 1939 | static inline void cec_ops_cdc_hpd_set_state(const struct cec_msg *msg, | ||
| 1940 | __u16 *phys_addr, | ||
| 1941 | __u8 *input_port, | ||
| 1942 | __u8 *hpd_state) | ||
| 1943 | { | ||
| 1944 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 1945 | *input_port = msg->msg[5] >> 4; | ||
| 1946 | *hpd_state = msg->msg[5] & 0xf; | ||
| 1947 | } | ||
| 1948 | |||
| 1949 | static inline void cec_msg_cdc_hpd_report_state(struct cec_msg *msg, | ||
| 1950 | __u8 hpd_state, | ||
| 1951 | __u8 hpd_error) | ||
| 1952 | { | ||
| 1953 | msg->len = 6; | ||
| 1954 | msg->msg[0] |= 0xf; /* broadcast */ | ||
| 1955 | msg->msg[1] = CEC_MSG_CDC_MESSAGE; | ||
| 1956 | /* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */ | ||
| 1957 | msg->msg[4] = CEC_MSG_CDC_HPD_REPORT_STATE; | ||
| 1958 | msg->msg[5] = (hpd_state << 4) | hpd_error; | ||
| 1959 | } | ||
| 1960 | |||
| 1961 | static inline void cec_ops_cdc_hpd_report_state(const struct cec_msg *msg, | ||
| 1962 | __u16 *phys_addr, | ||
| 1963 | __u8 *hpd_state, | ||
| 1964 | __u8 *hpd_error) | ||
| 1965 | { | ||
| 1966 | *phys_addr = (msg->msg[2] << 8) | msg->msg[3]; | ||
| 1967 | *hpd_state = msg->msg[5] >> 4; | ||
| 1968 | *hpd_error = msg->msg[5] & 0xf; | ||
| 1969 | } | ||
| 1970 | |||
| 1971 | #endif | ||
diff --git a/include/linux/cec.h b/include/linux/cec.h deleted file mode 100644 index 851968e803fa..000000000000 --- a/include/linux/cec.h +++ /dev/null | |||
| @@ -1,1014 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * cec - HDMI Consumer Electronics Control public header | ||
| 3 | * | ||
| 4 | * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. | ||
| 5 | * | ||
| 6 | * This program is free software; you may redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; version 2 of the License. | ||
| 9 | * | ||
| 10 | * Alternatively you can redistribute this file under the terms of the | ||
| 11 | * BSD license as stated below: | ||
| 12 | * | ||
| 13 | * Redistribution and use in source and binary forms, with or without | ||
| 14 | * modification, are permitted provided that the following conditions | ||
| 15 | * are met: | ||
| 16 | * 1. Redistributions of source code must retain the above copyright | ||
| 17 | * notice, this list of conditions and the following disclaimer. | ||
| 18 | * 2. Redistributions in binary form must reproduce the above copyright | ||
| 19 | * notice, this list of conditions and the following disclaimer in | ||
| 20 | * the documentation and/or other materials provided with the | ||
| 21 | * distribution. | ||
| 22 | * 3. The names of its contributors may not be used to endorse or promote | ||
| 23 | * products derived from this software without specific prior written | ||
| 24 | * permission. | ||
| 25 | * | ||
| 26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 33 | * SOFTWARE. | ||
| 34 | */ | ||
| 35 | |||
| 36 | /* | ||
| 37 | * Note: this framework is still in staging and it is likely the API | ||
| 38 | * will change before it goes out of staging. | ||
| 39 | * | ||
| 40 | * Once it is moved out of staging this header will move to uapi. | ||
| 41 | */ | ||
| 42 | #ifndef _CEC_UAPI_H | ||
| 43 | #define _CEC_UAPI_H | ||
| 44 | |||
| 45 | #include <linux/types.h> | ||
| 46 | |||
| 47 | #define CEC_MAX_MSG_SIZE 16 | ||
| 48 | |||
| 49 | /** | ||
| 50 | * struct cec_msg - CEC message structure. | ||
| 51 | * @tx_ts: Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the | ||
| 52 | * driver when the message transmission has finished. | ||
| 53 | * @rx_ts: Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the | ||
| 54 | * driver when the message was received. | ||
| 55 | * @len: Length in bytes of the message. | ||
| 56 | * @timeout: The timeout (in ms) that is used to timeout CEC_RECEIVE. | ||
| 57 | * Set to 0 if you want to wait forever. This timeout can also be | ||
| 58 | * used with CEC_TRANSMIT as the timeout for waiting for a reply. | ||
| 59 | * If 0, then it will use a 1 second timeout instead of waiting | ||
| 60 | * forever as is done with CEC_RECEIVE. | ||
| 61 | * @sequence: The framework assigns a sequence number to messages that are | ||
| 62 | * sent. This can be used to track replies to previously sent | ||
| 63 | * messages. | ||
| 64 | * @flags: Set to 0. | ||
| 65 | * @msg: The message payload. | ||
| 66 | * @reply: This field is ignored with CEC_RECEIVE and is only used by | ||
| 67 | * CEC_TRANSMIT. If non-zero, then wait for a reply with this | ||
| 68 | * opcode. Set to CEC_MSG_FEATURE_ABORT if you want to wait for | ||
| 69 | * a possible ABORT reply. If there was an error when sending the | ||
| 70 | * msg or FeatureAbort was returned, then reply is set to 0. | ||
| 71 | * If reply is non-zero upon return, then len/msg are set to | ||
| 72 | * the received message. | ||
| 73 | * If reply is zero upon return and status has the | ||
| 74 | * CEC_TX_STATUS_FEATURE_ABORT bit set, then len/msg are set to | ||
| 75 | * the received feature abort message. | ||
| 76 | * If reply is zero upon return and status has the | ||
| 77 | * CEC_TX_STATUS_MAX_RETRIES bit set, then no reply was seen at | ||
| 78 | * all. If reply is non-zero for CEC_TRANSMIT and the message is a | ||
| 79 | * broadcast, then -EINVAL is returned. | ||
| 80 | * if reply is non-zero, then timeout is set to 1000 (the required | ||
| 81 | * maximum response time). | ||
| 82 | * @rx_status: The message receive status bits. Set by the driver. | ||
| 83 | * @tx_status: The message transmit status bits. Set by the driver. | ||
| 84 | * @tx_arb_lost_cnt: The number of 'Arbitration Lost' events. Set by the driver. | ||
| 85 | * @tx_nack_cnt: The number of 'Not Acknowledged' events. Set by the driver. | ||
| 86 | * @tx_low_drive_cnt: The number of 'Low Drive Detected' events. Set by the | ||
| 87 | * driver. | ||
| 88 | * @tx_error_cnt: The number of 'Error' events. Set by the driver. | ||
| 89 | */ | ||
| 90 | struct cec_msg { | ||
| 91 | __u64 tx_ts; | ||
| 92 | __u64 rx_ts; | ||
| 93 | __u32 len; | ||
| 94 | __u32 timeout; | ||
| 95 | __u32 sequence; | ||
| 96 | __u32 flags; | ||
| 97 | __u8 msg[CEC_MAX_MSG_SIZE]; | ||
| 98 | __u8 reply; | ||
| 99 | __u8 rx_status; | ||
| 100 | __u8 tx_status; | ||
| 101 | __u8 tx_arb_lost_cnt; | ||
| 102 | __u8 tx_nack_cnt; | ||
| 103 | __u8 tx_low_drive_cnt; | ||
| 104 | __u8 tx_error_cnt; | ||
| 105 | }; | ||
| 106 | |||
| 107 | /** | ||
| 108 | * cec_msg_initiator - return the initiator's logical address. | ||
| 109 | * @msg: the message structure | ||
| 110 | */ | ||
| 111 | static inline __u8 cec_msg_initiator(const struct cec_msg *msg) | ||
| 112 | { | ||
| 113 | return msg->msg[0] >> 4; | ||
| 114 | } | ||
| 115 | |||
| 116 | /** | ||
| 117 | * cec_msg_destination - return the destination's logical address. | ||
| 118 | * @msg: the message structure | ||
| 119 | */ | ||
| 120 | static inline __u8 cec_msg_destination(const struct cec_msg *msg) | ||
| 121 | { | ||
| 122 | return msg->msg[0] & 0xf; | ||
| 123 | } | ||
| 124 | |||
| 125 | /** | ||
| 126 | * cec_msg_opcode - return the opcode of the message, -1 for poll | ||
| 127 | * @msg: the message structure | ||
| 128 | */ | ||
| 129 | static inline int cec_msg_opcode(const struct cec_msg *msg) | ||
| 130 | { | ||
| 131 | return msg->len > 1 ? msg->msg[1] : -1; | ||
| 132 | } | ||
| 133 | |||
| 134 | /** | ||
| 135 | * cec_msg_is_broadcast - return true if this is a broadcast message. | ||
| 136 | * @msg: the message structure | ||
| 137 | */ | ||
| 138 | static inline bool cec_msg_is_broadcast(const struct cec_msg *msg) | ||
| 139 | { | ||
| 140 | return (msg->msg[0] & 0xf) == 0xf; | ||
| 141 | } | ||
| 142 | |||
| 143 | /** | ||
| 144 | * cec_msg_init - initialize the message structure. | ||
| 145 | * @msg: the message structure | ||
| 146 | * @initiator: the logical address of the initiator | ||
| 147 | * @destination:the logical address of the destination (0xf for broadcast) | ||
| 148 | * | ||
| 149 | * The whole structure is zeroed, the len field is set to 1 (i.e. a poll | ||
| 150 | * message) and the initiator and destination are filled in. | ||
| 151 | */ | ||
| 152 | static inline void cec_msg_init(struct cec_msg *msg, | ||
| 153 | __u8 initiator, __u8 destination) | ||
| 154 | { | ||
| 155 | memset(msg, 0, sizeof(*msg)); | ||
| 156 | msg->msg[0] = (initiator << 4) | destination; | ||
| 157 | msg->len = 1; | ||
| 158 | } | ||
| 159 | |||
| 160 | /** | ||
| 161 | * cec_msg_set_reply_to - fill in destination/initiator in a reply message. | ||
| 162 | * @msg: the message structure for the reply | ||
| 163 | * @orig: the original message structure | ||
| 164 | * | ||
| 165 | * Set the msg destination to the orig initiator and the msg initiator to the | ||
| 166 | * orig destination. Note that msg and orig may be the same pointer, in which | ||
| 167 | * case the change is done in place. | ||
| 168 | */ | ||
| 169 | static inline void cec_msg_set_reply_to(struct cec_msg *msg, | ||
| 170 | struct cec_msg *orig) | ||
| 171 | { | ||
| 172 | /* The destination becomes the initiator and vice versa */ | ||
| 173 | msg->msg[0] = (cec_msg_destination(orig) << 4) | | ||
| 174 | cec_msg_initiator(orig); | ||
| 175 | msg->reply = msg->timeout = 0; | ||
| 176 | } | ||
| 177 | |||
| 178 | /* cec status field */ | ||
| 179 | #define CEC_TX_STATUS_OK (1 << 0) | ||
| 180 | #define CEC_TX_STATUS_ARB_LOST (1 << 1) | ||
| 181 | #define CEC_TX_STATUS_NACK (1 << 2) | ||
| 182 | #define CEC_TX_STATUS_LOW_DRIVE (1 << 3) | ||
| 183 | #define CEC_TX_STATUS_ERROR (1 << 4) | ||
| 184 | #define CEC_TX_STATUS_MAX_RETRIES (1 << 5) | ||
| 185 | |||
| 186 | #define CEC_RX_STATUS_OK (1 << 0) | ||
| 187 | #define CEC_RX_STATUS_TIMEOUT (1 << 1) | ||
| 188 | #define CEC_RX_STATUS_FEATURE_ABORT (1 << 2) | ||
| 189 | |||
| 190 | static inline bool cec_msg_status_is_ok(const struct cec_msg *msg) | ||
| 191 | { | ||
| 192 | if (msg->tx_status && !(msg->tx_status & CEC_TX_STATUS_OK)) | ||
| 193 | return false; | ||
| 194 | if (msg->rx_status && !(msg->rx_status & CEC_RX_STATUS_OK)) | ||
| 195 | return false; | ||
| 196 | if (!msg->tx_status && !msg->rx_status) | ||
| 197 | return false; | ||
| 198 | return !(msg->rx_status & CEC_RX_STATUS_FEATURE_ABORT); | ||
| 199 | } | ||
| 200 | |||
| 201 | #define CEC_LOG_ADDR_INVALID 0xff | ||
| 202 | #define CEC_PHYS_ADDR_INVALID 0xffff | ||
| 203 | |||
| 204 | /* | ||
| 205 | * The maximum number of logical addresses one device can be assigned to. | ||
| 206 | * The CEC 2.0 spec allows for only 2 logical addresses at the moment. The | ||
| 207 | * Analog Devices CEC hardware supports 3. So let's go wild and go for 4. | ||
| 208 | */ | ||
| 209 | #define CEC_MAX_LOG_ADDRS 4 | ||
| 210 | |||
| 211 | /* The logical addresses defined by CEC 2.0 */ | ||
| 212 | #define CEC_LOG_ADDR_TV 0 | ||
| 213 | #define CEC_LOG_ADDR_RECORD_1 1 | ||
| 214 | #define CEC_LOG_ADDR_RECORD_2 2 | ||
| 215 | #define CEC_LOG_ADDR_TUNER_1 3 | ||
| 216 | #define CEC_LOG_ADDR_PLAYBACK_1 4 | ||
| 217 | #define CEC_LOG_ADDR_AUDIOSYSTEM 5 | ||
| 218 | #define CEC_LOG_ADDR_TUNER_2 6 | ||
| 219 | #define CEC_LOG_ADDR_TUNER_3 7 | ||
| 220 | #define CEC_LOG_ADDR_PLAYBACK_2 8 | ||
| 221 | #define CEC_LOG_ADDR_RECORD_3 9 | ||
| 222 | #define CEC_LOG_ADDR_TUNER_4 10 | ||
| 223 | #define CEC_LOG_ADDR_PLAYBACK_3 11 | ||
| 224 | #define CEC_LOG_ADDR_BACKUP_1 12 | ||
| 225 | #define CEC_LOG_ADDR_BACKUP_2 13 | ||
| 226 | #define CEC_LOG_ADDR_SPECIFIC 14 | ||
| 227 | #define CEC_LOG_ADDR_UNREGISTERED 15 /* as initiator address */ | ||
| 228 | #define CEC_LOG_ADDR_BROADCAST 15 /* ad destination address */ | ||
| 229 | |||
| 230 | /* The logical address types that the CEC device wants to claim */ | ||
| 231 | #define CEC_LOG_ADDR_TYPE_TV 0 | ||
| 232 | #define CEC_LOG_ADDR_TYPE_RECORD 1 | ||
| 233 | #define CEC_LOG_ADDR_TYPE_TUNER 2 | ||
| 234 | #define CEC_LOG_ADDR_TYPE_PLAYBACK 3 | ||
| 235 | #define CEC_LOG_ADDR_TYPE_AUDIOSYSTEM 4 | ||
| 236 | #define CEC_LOG_ADDR_TYPE_SPECIFIC 5 | ||
| 237 | #define CEC_LOG_ADDR_TYPE_UNREGISTERED 6 | ||
| 238 | /* | ||
| 239 | * Switches should use UNREGISTERED. | ||
| 240 | * Processors should use SPECIFIC. | ||
| 241 | */ | ||
| 242 | |||
| 243 | #define CEC_LOG_ADDR_MASK_TV (1 << CEC_LOG_ADDR_TV) | ||
| 244 | #define CEC_LOG_ADDR_MASK_RECORD ((1 << CEC_LOG_ADDR_RECORD_1) | \ | ||
| 245 | (1 << CEC_LOG_ADDR_RECORD_2) | \ | ||
| 246 | (1 << CEC_LOG_ADDR_RECORD_3)) | ||
| 247 | #define CEC_LOG_ADDR_MASK_TUNER ((1 << CEC_LOG_ADDR_TUNER_1) | \ | ||
| 248 | (1 << CEC_LOG_ADDR_TUNER_2) | \ | ||
| 249 | (1 << CEC_LOG_ADDR_TUNER_3) | \ | ||
| 250 | (1 << CEC_LOG_ADDR_TUNER_4)) | ||
| 251 | #define CEC_LOG_ADDR_MASK_PLAYBACK ((1 << CEC_LOG_ADDR_PLAYBACK_1) | \ | ||
| 252 | (1 << CEC_LOG_ADDR_PLAYBACK_2) | \ | ||
| 253 | (1 << CEC_LOG_ADDR_PLAYBACK_3)) | ||
| 254 | #define CEC_LOG_ADDR_MASK_AUDIOSYSTEM (1 << CEC_LOG_ADDR_AUDIOSYSTEM) | ||
| 255 | #define CEC_LOG_ADDR_MASK_BACKUP ((1 << CEC_LOG_ADDR_BACKUP_1) | \ | ||
| 256 | (1 << CEC_LOG_ADDR_BACKUP_2)) | ||
| 257 | #define CEC_LOG_ADDR_MASK_SPECIFIC (1 << CEC_LOG_ADDR_SPECIFIC) | ||
| 258 | #define CEC_LOG_ADDR_MASK_UNREGISTERED (1 << CEC_LOG_ADDR_UNREGISTERED) | ||
| 259 | |||
| 260 | static inline bool cec_has_tv(__u16 log_addr_mask) | ||
| 261 | { | ||
| 262 | return log_addr_mask & CEC_LOG_ADDR_MASK_TV; | ||
| 263 | } | ||
| 264 | |||
| 265 | static inline bool cec_has_record(__u16 log_addr_mask) | ||
| 266 | { | ||
| 267 | return log_addr_mask & CEC_LOG_ADDR_MASK_RECORD; | ||
| 268 | } | ||
| 269 | |||
| 270 | static inline bool cec_has_tuner(__u16 log_addr_mask) | ||
| 271 | { | ||
| 272 | return log_addr_mask & CEC_LOG_ADDR_MASK_TUNER; | ||
| 273 | } | ||
| 274 | |||
| 275 | static inline bool cec_has_playback(__u16 log_addr_mask) | ||
| 276 | { | ||
| 277 | return log_addr_mask & CEC_LOG_ADDR_MASK_PLAYBACK; | ||
| 278 | } | ||
| 279 | |||
| 280 | static inline bool cec_has_audiosystem(__u16 log_addr_mask) | ||
| 281 | { | ||
| 282 | return log_addr_mask & CEC_LOG_ADDR_MASK_AUDIOSYSTEM; | ||
| 283 | } | ||
| 284 | |||
| 285 | static inline bool cec_has_backup(__u16 log_addr_mask) | ||
| 286 | { | ||
| 287 | return log_addr_mask & CEC_LOG_ADDR_MASK_BACKUP; | ||
| 288 | } | ||
| 289 | |||
| 290 | static inline bool cec_has_specific(__u16 log_addr_mask) | ||
| 291 | { | ||
| 292 | return log_addr_mask & CEC_LOG_ADDR_MASK_SPECIFIC; | ||
| 293 | } | ||
| 294 | |||
| 295 | static inline bool cec_is_unregistered(__u16 log_addr_mask) | ||
| 296 | { | ||
| 297 | return log_addr_mask & CEC_LOG_ADDR_MASK_UNREGISTERED; | ||
| 298 | } | ||
| 299 | |||
| 300 | static inline bool cec_is_unconfigured(__u16 log_addr_mask) | ||
| 301 | { | ||
| 302 | return log_addr_mask == 0; | ||
| 303 | } | ||
| 304 | |||
| 305 | /* | ||
| 306 | * Use this if there is no vendor ID (CEC_G_VENDOR_ID) or if the vendor ID | ||
| 307 | * should be disabled (CEC_S_VENDOR_ID) | ||
| 308 | */ | ||
| 309 | #define CEC_VENDOR_ID_NONE 0xffffffff | ||
| 310 | |||
| 311 | /* The message handling modes */ | ||
| 312 | /* Modes for initiator */ | ||
| 313 | #define CEC_MODE_NO_INITIATOR (0x0 << 0) | ||
| 314 | #define CEC_MODE_INITIATOR (0x1 << 0) | ||
| 315 | #define CEC_MODE_EXCL_INITIATOR (0x2 << 0) | ||
| 316 | #define CEC_MODE_INITIATOR_MSK 0x0f | ||
| 317 | |||
| 318 | /* Modes for follower */ | ||
| 319 | #define CEC_MODE_NO_FOLLOWER (0x0 << 4) | ||
| 320 | #define CEC_MODE_FOLLOWER (0x1 << 4) | ||
| 321 | #define CEC_MODE_EXCL_FOLLOWER (0x2 << 4) | ||
| 322 | #define CEC_MODE_EXCL_FOLLOWER_PASSTHRU (0x3 << 4) | ||
| 323 | #define CEC_MODE_MONITOR (0xe << 4) | ||
| 324 | #define CEC_MODE_MONITOR_ALL (0xf << 4) | ||
| 325 | #define CEC_MODE_FOLLOWER_MSK 0xf0 | ||
| 326 | |||
| 327 | /* Userspace has to configure the physical address */ | ||
| 328 | #define CEC_CAP_PHYS_ADDR (1 << 0) | ||
| 329 | /* Userspace has to configure the logical addresses */ | ||
| 330 | #define CEC_CAP_LOG_ADDRS (1 << 1) | ||
| 331 | /* Userspace can transmit messages (and thus become follower as well) */ | ||
| 332 | #define CEC_CAP_TRANSMIT (1 << 2) | ||
| 333 | /* | ||
| 334 | * Passthrough all messages instead of processing them. | ||
| 335 | */ | ||
| 336 | #define CEC_CAP_PASSTHROUGH (1 << 3) | ||
| 337 | /* Supports remote control */ | ||
| 338 | #define CEC_CAP_RC (1 << 4) | ||
| 339 | /* Hardware can monitor all messages, not just directed and broadcast. */ | ||
| 340 | #define CEC_CAP_MONITOR_ALL (1 << 5) | ||
| 341 | |||
| 342 | /** | ||
| 343 | * struct cec_caps - CEC capabilities structure. | ||
| 344 | * @driver: name of the CEC device driver. | ||
| 345 | * @name: name of the CEC device. @driver + @name must be unique. | ||
| 346 | * @available_log_addrs: number of available logical addresses. | ||
| 347 | * @capabilities: capabilities of the CEC adapter. | ||
| 348 | * @version: version of the CEC adapter framework. | ||
| 349 | */ | ||
| 350 | struct cec_caps { | ||
| 351 | char driver[32]; | ||
| 352 | char name[32]; | ||
| 353 | __u32 available_log_addrs; | ||
| 354 | __u32 capabilities; | ||
| 355 | __u32 version; | ||
| 356 | }; | ||
| 357 | |||
| 358 | /** | ||
| 359 | * struct cec_log_addrs - CEC logical addresses structure. | ||
| 360 | * @log_addr: the claimed logical addresses. Set by the driver. | ||
| 361 | * @log_addr_mask: current logical address mask. Set by the driver. | ||
| 362 | * @cec_version: the CEC version that the adapter should implement. Set by the | ||
| 363 | * caller. | ||
| 364 | * @num_log_addrs: how many logical addresses should be claimed. Set by the | ||
| 365 | * caller. | ||
| 366 | * @vendor_id: the vendor ID of the device. Set by the caller. | ||
| 367 | * @flags: flags. | ||
| 368 | * @osd_name: the OSD name of the device. Set by the caller. | ||
| 369 | * @primary_device_type: the primary device type for each logical address. | ||
| 370 | * Set by the caller. | ||
| 371 | * @log_addr_type: the logical address types. Set by the caller. | ||
| 372 | * @all_device_types: CEC 2.0: all device types represented by the logical | ||
| 373 | * address. Set by the caller. | ||
| 374 | * @features: CEC 2.0: The logical address features. Set by the caller. | ||
| 375 | */ | ||
| 376 | struct cec_log_addrs { | ||
| 377 | __u8 log_addr[CEC_MAX_LOG_ADDRS]; | ||
| 378 | __u16 log_addr_mask; | ||
| 379 | __u8 cec_version; | ||
| 380 | __u8 num_log_addrs; | ||
| 381 | __u32 vendor_id; | ||
| 382 | __u32 flags; | ||
| 383 | char osd_name[15]; | ||
| 384 | __u8 primary_device_type[CEC_MAX_LOG_ADDRS]; | ||
| 385 | __u8 log_addr_type[CEC_MAX_LOG_ADDRS]; | ||
| 386 | |||
| 387 | /* CEC 2.0 */ | ||
| 388 | __u8 all_device_types[CEC_MAX_LOG_ADDRS]; | ||
| 389 | __u8 features[CEC_MAX_LOG_ADDRS][12]; | ||
| 390 | }; | ||
| 391 | |||
| 392 | /* Allow a fallback to unregistered */ | ||
| 393 | #define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK (1 << 0) | ||
| 394 | |||
| 395 | /* Events */ | ||
| 396 | |||
| 397 | /* Event that occurs when the adapter state changes */ | ||
| 398 | #define CEC_EVENT_STATE_CHANGE 1 | ||
| 399 | /* | ||
| 400 | * This event is sent when messages are lost because the application | ||
| 401 | * didn't empty the message queue in time | ||
| 402 | */ | ||
| 403 | #define CEC_EVENT_LOST_MSGS 2 | ||
| 404 | |||
| 405 | #define CEC_EVENT_FL_INITIAL_STATE (1 << 0) | ||
| 406 | |||
| 407 | /** | ||
| 408 | * struct cec_event_state_change - used when the CEC adapter changes state. | ||
| 409 | * @phys_addr: the current physical address | ||
| 410 | * @log_addr_mask: the current logical address mask | ||
| 411 | */ | ||
| 412 | struct cec_event_state_change { | ||
| 413 | __u16 phys_addr; | ||
| 414 | __u16 log_addr_mask; | ||
| 415 | }; | ||
| 416 | |||
| 417 | /** | ||
| 418 | * struct cec_event_lost_msgs - tells you how many messages were lost due. | ||
| 419 | * @lost_msgs: how many messages were lost. | ||
| 420 | */ | ||
| 421 | struct cec_event_lost_msgs { | ||
| 422 | __u32 lost_msgs; | ||
| 423 | }; | ||
| 424 | |||
| 425 | /** | ||
| 426 | * struct cec_event - CEC event structure | ||
| 427 | * @ts: the timestamp of when the event was sent. | ||
| 428 | * @event: the event. | ||
| 429 | * array. | ||
| 430 | * @state_change: the event payload for CEC_EVENT_STATE_CHANGE. | ||
| 431 | * @lost_msgs: the event payload for CEC_EVENT_LOST_MSGS. | ||
| 432 | * @raw: array to pad the union. | ||
| 433 | */ | ||
| 434 | struct cec_event { | ||
| 435 | __u64 ts; | ||
| 436 | __u32 event; | ||
| 437 | __u32 flags; | ||
| 438 | union { | ||
| 439 | struct cec_event_state_change state_change; | ||
| 440 | struct cec_event_lost_msgs lost_msgs; | ||
| 441 | __u32 raw[16]; | ||
| 442 | }; | ||
| 443 | }; | ||
| 444 | |||
| 445 | /* ioctls */ | ||
| 446 | |||
| 447 | /* Adapter capabilities */ | ||
| 448 | #define CEC_ADAP_G_CAPS _IOWR('a', 0, struct cec_caps) | ||
| 449 | |||
| 450 | /* | ||
| 451 | * phys_addr is either 0 (if this is the CEC root device) | ||
| 452 | * or a valid physical address obtained from the sink's EDID | ||
| 453 | * as read by this CEC device (if this is a source device) | ||
| 454 | * or a physical address obtained and modified from a sink | ||
| 455 | * EDID and used for a sink CEC device. | ||
| 456 | * If nothing is connected, then phys_addr is 0xffff. | ||
| 457 | * See HDMI 1.4b, section 8.7 (Physical Address). | ||
| 458 | * | ||
| 459 | * The CEC_ADAP_S_PHYS_ADDR ioctl may not be available if that is handled | ||
| 460 | * internally. | ||
| 461 | */ | ||
| 462 | #define CEC_ADAP_G_PHYS_ADDR _IOR('a', 1, __u16) | ||
| 463 | #define CEC_ADAP_S_PHYS_ADDR _IOW('a', 2, __u16) | ||
| 464 | |||
| 465 | /* | ||
| 466 | * Configure the CEC adapter. It sets the device type and which | ||
| 467 | * logical types it will try to claim. It will return which | ||
| 468 | * logical addresses it could actually claim. | ||
| 469 | * An error is returned if the adapter is disabled or if there | ||
| 470 | * is no physical address assigned. | ||
| 471 | */ | ||
| 472 | |||
| 473 | #define CEC_ADAP_G_LOG_ADDRS _IOR('a', 3, struct cec_log_addrs) | ||
| 474 | #define CEC_ADAP_S_LOG_ADDRS _IOWR('a', 4, struct cec_log_addrs) | ||
| 475 | |||
| 476 | /* Transmit/receive a CEC command */ | ||
| 477 | #define CEC_TRANSMIT _IOWR('a', 5, struct cec_msg) | ||
| 478 | #define CEC_RECEIVE _IOWR('a', 6, struct cec_msg) | ||
| 479 | |||
| 480 | /* Dequeue CEC events */ | ||
| 481 | #define CEC_DQEVENT _IOWR('a', 7, struct cec_event) | ||
| 482 | |||
| 483 | /* | ||
| 484 | * Get and set the message handling mode for this filehandle. | ||
| 485 | */ | ||
| 486 | #define CEC_G_MODE _IOR('a', 8, __u32) | ||
| 487 | #define CEC_S_MODE _IOW('a', 9, __u32) | ||
| 488 | |||
| 489 | /* | ||
| 490 | * The remainder of this header defines all CEC messages and operands. | ||
| 491 | * The format matters since it the cec-ctl utility parses it to generate | ||
| 492 | * code for implementing all these messages. | ||
| 493 | * | ||
| 494 | * Comments ending with 'Feature' group messages for each feature. | ||
| 495 | * If messages are part of multiple features, then the "Has also" | ||
| 496 | * comment is used to list the previously defined messages that are | ||
| 497 | * supported by the feature. | ||
| 498 | * | ||
| 499 | * Before operands are defined a comment is added that gives the | ||
| 500 | * name of the operand and in brackets the variable name of the | ||
| 501 | * corresponding argument in the cec-funcs.h function. | ||
| 502 | */ | ||
| 503 | |||
| 504 | /* Messages */ | ||
| 505 | |||
| 506 | /* One Touch Play Feature */ | ||
| 507 | #define CEC_MSG_ACTIVE_SOURCE 0x82 | ||
| 508 | #define CEC_MSG_IMAGE_VIEW_ON 0x04 | ||
| 509 | #define CEC_MSG_TEXT_VIEW_ON 0x0d | ||
| 510 | |||
| 511 | |||
| 512 | /* Routing Control Feature */ | ||
| 513 | |||
| 514 | /* | ||
| 515 | * Has also: | ||
| 516 | * CEC_MSG_ACTIVE_SOURCE | ||
| 517 | */ | ||
| 518 | |||
| 519 | #define CEC_MSG_INACTIVE_SOURCE 0x9d | ||
| 520 | #define CEC_MSG_REQUEST_ACTIVE_SOURCE 0x85 | ||
| 521 | #define CEC_MSG_ROUTING_CHANGE 0x80 | ||
| 522 | #define CEC_MSG_ROUTING_INFORMATION 0x81 | ||
| 523 | #define CEC_MSG_SET_STREAM_PATH 0x86 | ||
| 524 | |||
| 525 | |||
| 526 | /* Standby Feature */ | ||
| 527 | #define CEC_MSG_STANDBY 0x36 | ||
| 528 | |||
| 529 | |||
| 530 | /* One Touch Record Feature */ | ||
| 531 | #define CEC_MSG_RECORD_OFF 0x0b | ||
| 532 | #define CEC_MSG_RECORD_ON 0x09 | ||
| 533 | /* Record Source Type Operand (rec_src_type) */ | ||
| 534 | #define CEC_OP_RECORD_SRC_OWN 1 | ||
| 535 | #define CEC_OP_RECORD_SRC_DIGITAL 2 | ||
| 536 | #define CEC_OP_RECORD_SRC_ANALOG 3 | ||
| 537 | #define CEC_OP_RECORD_SRC_EXT_PLUG 4 | ||
| 538 | #define CEC_OP_RECORD_SRC_EXT_PHYS_ADDR 5 | ||
| 539 | /* Service Identification Method Operand (service_id_method) */ | ||
| 540 | #define CEC_OP_SERVICE_ID_METHOD_BY_DIG_ID 0 | ||
| 541 | #define CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL 1 | ||
| 542 | /* Digital Service Broadcast System Operand (dig_bcast_system) */ | ||
| 543 | #define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_GEN 0x00 | ||
| 544 | #define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN 0x01 | ||
| 545 | #define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_GEN 0x02 | ||
| 546 | #define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_BS 0x08 | ||
| 547 | #define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_CS 0x09 | ||
| 548 | #define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_T 0x0a | ||
| 549 | #define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE 0x10 | ||
| 550 | #define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT 0x11 | ||
| 551 | #define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T 0x12 | ||
| 552 | #define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_C 0x18 | ||
| 553 | #define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S 0x19 | ||
| 554 | #define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S2 0x1a | ||
| 555 | #define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_T 0x1b | ||
| 556 | /* Analogue Broadcast Type Operand (ana_bcast_type) */ | ||
| 557 | #define CEC_OP_ANA_BCAST_TYPE_CABLE 0 | ||
| 558 | #define CEC_OP_ANA_BCAST_TYPE_SATELLITE 1 | ||
| 559 | #define CEC_OP_ANA_BCAST_TYPE_TERRESTRIAL 2 | ||
| 560 | /* Broadcast System Operand (bcast_system) */ | ||
| 561 | #define CEC_OP_BCAST_SYSTEM_PAL_BG 0x00 | ||
| 562 | #define CEC_OP_BCAST_SYSTEM_SECAM_LQ 0x01 /* SECAM L' */ | ||
| 563 | #define CEC_OP_BCAST_SYSTEM_PAL_M 0x02 | ||
| 564 | #define CEC_OP_BCAST_SYSTEM_NTSC_M 0x03 | ||
| 565 | #define CEC_OP_BCAST_SYSTEM_PAL_I 0x04 | ||
| 566 | #define CEC_OP_BCAST_SYSTEM_SECAM_DK 0x05 | ||
| 567 | #define CEC_OP_BCAST_SYSTEM_SECAM_BG 0x06 | ||
| 568 | #define CEC_OP_BCAST_SYSTEM_SECAM_L 0x07 | ||
| 569 | #define CEC_OP_BCAST_SYSTEM_PAL_DK 0x08 | ||
| 570 | #define CEC_OP_BCAST_SYSTEM_OTHER 0x1f | ||
| 571 | /* Channel Number Format Operand (channel_number_fmt) */ | ||
| 572 | #define CEC_OP_CHANNEL_NUMBER_FMT_1_PART 0x01 | ||
| 573 | #define CEC_OP_CHANNEL_NUMBER_FMT_2_PART 0x02 | ||
| 574 | |||
| 575 | #define CEC_MSG_RECORD_STATUS 0x0a | ||
| 576 | /* Record Status Operand (rec_status) */ | ||
| 577 | #define CEC_OP_RECORD_STATUS_CUR_SRC 0x01 | ||
| 578 | #define CEC_OP_RECORD_STATUS_DIG_SERVICE 0x02 | ||
| 579 | #define CEC_OP_RECORD_STATUS_ANA_SERVICE 0x03 | ||
| 580 | #define CEC_OP_RECORD_STATUS_EXT_INPUT 0x04 | ||
| 581 | #define CEC_OP_RECORD_STATUS_NO_DIG_SERVICE 0x05 | ||
| 582 | #define CEC_OP_RECORD_STATUS_NO_ANA_SERVICE 0x06 | ||
| 583 | #define CEC_OP_RECORD_STATUS_NO_SERVICE 0x07 | ||
| 584 | #define CEC_OP_RECORD_STATUS_INVALID_EXT_PLUG 0x09 | ||
| 585 | #define CEC_OP_RECORD_STATUS_INVALID_EXT_PHYS_ADDR 0x0a | ||
| 586 | #define CEC_OP_RECORD_STATUS_UNSUP_CA 0x0b | ||
| 587 | #define CEC_OP_RECORD_STATUS_NO_CA_ENTITLEMENTS 0x0c | ||
| 588 | #define CEC_OP_RECORD_STATUS_CANT_COPY_SRC 0x0d | ||
| 589 | #define CEC_OP_RECORD_STATUS_NO_MORE_COPIES 0x0e | ||
| 590 | #define CEC_OP_RECORD_STATUS_NO_MEDIA 0x10 | ||
| 591 | #define CEC_OP_RECORD_STATUS_PLAYING 0x11 | ||
| 592 | #define CEC_OP_RECORD_STATUS_ALREADY_RECORDING 0x12 | ||
| 593 | #define CEC_OP_RECORD_STATUS_MEDIA_PROT 0x13 | ||
| 594 | #define CEC_OP_RECORD_STATUS_NO_SIGNAL 0x14 | ||
| 595 | #define CEC_OP_RECORD_STATUS_MEDIA_PROBLEM 0x15 | ||
| 596 | #define CEC_OP_RECORD_STATUS_NO_SPACE 0x16 | ||
| 597 | #define CEC_OP_RECORD_STATUS_PARENTAL_LOCK 0x17 | ||
| 598 | #define CEC_OP_RECORD_STATUS_TERMINATED_OK 0x1a | ||
| 599 | #define CEC_OP_RECORD_STATUS_ALREADY_TERM 0x1b | ||
| 600 | #define CEC_OP_RECORD_STATUS_OTHER 0x1f | ||
| 601 | |||
| 602 | #define CEC_MSG_RECORD_TV_SCREEN 0x0f | ||
| 603 | |||
| 604 | |||
| 605 | /* Timer Programming Feature */ | ||
| 606 | #define CEC_MSG_CLEAR_ANALOGUE_TIMER 0x33 | ||
| 607 | /* Recording Sequence Operand (recording_seq) */ | ||
| 608 | #define CEC_OP_REC_SEQ_SUNDAY 0x01 | ||
| 609 | #define CEC_OP_REC_SEQ_MONDAY 0x02 | ||
| 610 | #define CEC_OP_REC_SEQ_TUESDAY 0x04 | ||
| 611 | #define CEC_OP_REC_SEQ_WEDNESDAY 0x08 | ||
| 612 | #define CEC_OP_REC_SEQ_THURSDAY 0x10 | ||
| 613 | #define CEC_OP_REC_SEQ_FRIDAY 0x20 | ||
| 614 | #define CEC_OP_REC_SEQ_SATERDAY 0x40 | ||
| 615 | #define CEC_OP_REC_SEQ_ONCE_ONLY 0x00 | ||
| 616 | |||
| 617 | #define CEC_MSG_CLEAR_DIGITAL_TIMER 0x99 | ||
| 618 | |||
| 619 | #define CEC_MSG_CLEAR_EXT_TIMER 0xa1 | ||
| 620 | /* External Source Specifier Operand (ext_src_spec) */ | ||
| 621 | #define CEC_OP_EXT_SRC_PLUG 0x04 | ||
| 622 | #define CEC_OP_EXT_SRC_PHYS_ADDR 0x05 | ||
| 623 | |||
| 624 | #define CEC_MSG_SET_ANALOGUE_TIMER 0x34 | ||
| 625 | #define CEC_MSG_SET_DIGITAL_TIMER 0x97 | ||
| 626 | #define CEC_MSG_SET_EXT_TIMER 0xa2 | ||
| 627 | |||
| 628 | #define CEC_MSG_SET_TIMER_PROGRAM_TITLE 0x67 | ||
| 629 | #define CEC_MSG_TIMER_CLEARED_STATUS 0x43 | ||
| 630 | /* Timer Cleared Status Data Operand (timer_cleared_status) */ | ||
| 631 | #define CEC_OP_TIMER_CLR_STAT_RECORDING 0x00 | ||
| 632 | #define CEC_OP_TIMER_CLR_STAT_NO_MATCHING 0x01 | ||
| 633 | #define CEC_OP_TIMER_CLR_STAT_NO_INFO 0x02 | ||
| 634 | #define CEC_OP_TIMER_CLR_STAT_CLEARED 0x80 | ||
| 635 | |||
| 636 | #define CEC_MSG_TIMER_STATUS 0x35 | ||
| 637 | /* Timer Overlap Warning Operand (timer_overlap_warning) */ | ||
| 638 | #define CEC_OP_TIMER_OVERLAP_WARNING_NO_OVERLAP 0 | ||
| 639 | #define CEC_OP_TIMER_OVERLAP_WARNING_OVERLAP 1 | ||
| 640 | /* Media Info Operand (media_info) */ | ||
| 641 | #define CEC_OP_MEDIA_INFO_UNPROT_MEDIA 0 | ||
| 642 | #define CEC_OP_MEDIA_INFO_PROT_MEDIA 1 | ||
| 643 | #define CEC_OP_MEDIA_INFO_NO_MEDIA 2 | ||
| 644 | /* Programmed Indicator Operand (prog_indicator) */ | ||
| 645 | #define CEC_OP_PROG_IND_NOT_PROGRAMMED 0 | ||
| 646 | #define CEC_OP_PROG_IND_PROGRAMMED 1 | ||
| 647 | /* Programmed Info Operand (prog_info) */ | ||
| 648 | #define CEC_OP_PROG_INFO_ENOUGH_SPACE 0x08 | ||
| 649 | #define CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE 0x09 | ||
| 650 | #define CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE 0x0b | ||
| 651 | #define CEC_OP_PROG_INFO_NONE_AVAILABLE 0x0a | ||
| 652 | /* Not Programmed Error Info Operand (prog_error) */ | ||
| 653 | #define CEC_OP_PROG_ERROR_NO_FREE_TIMER 0x01 | ||
| 654 | #define CEC_OP_PROG_ERROR_DATE_OUT_OF_RANGE 0x02 | ||
| 655 | #define CEC_OP_PROG_ERROR_REC_SEQ_ERROR 0x03 | ||
| 656 | #define CEC_OP_PROG_ERROR_INV_EXT_PLUG 0x04 | ||
| 657 | #define CEC_OP_PROG_ERROR_INV_EXT_PHYS_ADDR 0x05 | ||
| 658 | #define CEC_OP_PROG_ERROR_CA_UNSUPP 0x06 | ||
| 659 | #define CEC_OP_PROG_ERROR_INSUF_CA_ENTITLEMENTS 0x07 | ||
| 660 | #define CEC_OP_PROG_ERROR_RESOLUTION_UNSUPP 0x08 | ||
| 661 | #define CEC_OP_PROG_ERROR_PARENTAL_LOCK 0x09 | ||
| 662 | #define CEC_OP_PROG_ERROR_CLOCK_FAILURE 0x0a | ||
| 663 | #define CEC_OP_PROG_ERROR_DUPLICATE 0x0e | ||
| 664 | |||
| 665 | |||
| 666 | /* System Information Feature */ | ||
| 667 | #define CEC_MSG_CEC_VERSION 0x9e | ||
| 668 | /* CEC Version Operand (cec_version) */ | ||
| 669 | #define CEC_OP_CEC_VERSION_1_3A 4 | ||
| 670 | #define CEC_OP_CEC_VERSION_1_4 5 | ||
| 671 | #define CEC_OP_CEC_VERSION_2_0 6 | ||
| 672 | |||
| 673 | #define CEC_MSG_GET_CEC_VERSION 0x9f | ||
| 674 | #define CEC_MSG_GIVE_PHYSICAL_ADDR 0x83 | ||
| 675 | #define CEC_MSG_GET_MENU_LANGUAGE 0x91 | ||
| 676 | #define CEC_MSG_REPORT_PHYSICAL_ADDR 0x84 | ||
| 677 | /* Primary Device Type Operand (prim_devtype) */ | ||
| 678 | #define CEC_OP_PRIM_DEVTYPE_TV 0 | ||
| 679 | #define CEC_OP_PRIM_DEVTYPE_RECORD 1 | ||
| 680 | #define CEC_OP_PRIM_DEVTYPE_TUNER 3 | ||
| 681 | #define CEC_OP_PRIM_DEVTYPE_PLAYBACK 4 | ||
| 682 | #define CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM 5 | ||
| 683 | #define CEC_OP_PRIM_DEVTYPE_SWITCH 6 | ||
| 684 | #define CEC_OP_PRIM_DEVTYPE_PROCESSOR 7 | ||
| 685 | |||
| 686 | #define CEC_MSG_SET_MENU_LANGUAGE 0x32 | ||
| 687 | #define CEC_MSG_REPORT_FEATURES 0xa6 /* HDMI 2.0 */ | ||
| 688 | /* All Device Types Operand (all_device_types) */ | ||
| 689 | #define CEC_OP_ALL_DEVTYPE_TV 0x80 | ||
| 690 | #define CEC_OP_ALL_DEVTYPE_RECORD 0x40 | ||
| 691 | #define CEC_OP_ALL_DEVTYPE_TUNER 0x20 | ||
| 692 | #define CEC_OP_ALL_DEVTYPE_PLAYBACK 0x10 | ||
| 693 | #define CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM 0x08 | ||
| 694 | #define CEC_OP_ALL_DEVTYPE_SWITCH 0x04 | ||
| 695 | /* | ||
| 696 | * And if you wondering what happened to PROCESSOR devices: those should | ||
| 697 | * be mapped to a SWITCH. | ||
| 698 | */ | ||
| 699 | |||
| 700 | /* Valid for RC Profile and Device Feature operands */ | ||
| 701 | #define CEC_OP_FEAT_EXT 0x80 /* Extension bit */ | ||
| 702 | /* RC Profile Operand (rc_profile) */ | ||
| 703 | #define CEC_OP_FEAT_RC_TV_PROFILE_NONE 0x00 | ||
| 704 | #define CEC_OP_FEAT_RC_TV_PROFILE_1 0x02 | ||
| 705 | #define CEC_OP_FEAT_RC_TV_PROFILE_2 0x06 | ||
| 706 | #define CEC_OP_FEAT_RC_TV_PROFILE_3 0x0a | ||
| 707 | #define CEC_OP_FEAT_RC_TV_PROFILE_4 0x0e | ||
| 708 | #define CEC_OP_FEAT_RC_SRC_HAS_DEV_ROOT_MENU 0x50 | ||
| 709 | #define CEC_OP_FEAT_RC_SRC_HAS_DEV_SETUP_MENU 0x48 | ||
| 710 | #define CEC_OP_FEAT_RC_SRC_HAS_CONTENTS_MENU 0x44 | ||
| 711 | #define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_TOP_MENU 0x42 | ||
| 712 | #define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_CONTEXT_MENU 0x41 | ||
| 713 | /* Device Feature Operand (dev_features) */ | ||
| 714 | #define CEC_OP_FEAT_DEV_HAS_RECORD_TV_SCREEN 0x40 | ||
| 715 | #define CEC_OP_FEAT_DEV_HAS_SET_OSD_STRING 0x20 | ||
| 716 | #define CEC_OP_FEAT_DEV_HAS_DECK_CONTROL 0x10 | ||
| 717 | #define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE 0x08 | ||
| 718 | #define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX 0x04 | ||
| 719 | #define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX 0x02 | ||
| 720 | |||
| 721 | #define CEC_MSG_GIVE_FEATURES 0xa5 /* HDMI 2.0 */ | ||
| 722 | |||
| 723 | |||
| 724 | /* Deck Control Feature */ | ||
| 725 | #define CEC_MSG_DECK_CONTROL 0x42 | ||
| 726 | /* Deck Control Mode Operand (deck_control_mode) */ | ||
| 727 | #define CEC_OP_DECK_CTL_MODE_SKIP_FWD 1 | ||
| 728 | #define CEC_OP_DECK_CTL_MODE_SKIP_REV 2 | ||
| 729 | #define CEC_OP_DECK_CTL_MODE_STOP 3 | ||
| 730 | #define CEC_OP_DECK_CTL_MODE_EJECT 4 | ||
| 731 | |||
| 732 | #define CEC_MSG_DECK_STATUS 0x1b | ||
| 733 | /* Deck Info Operand (deck_info) */ | ||
| 734 | #define CEC_OP_DECK_INFO_PLAY 0x11 | ||
| 735 | #define CEC_OP_DECK_INFO_RECORD 0x12 | ||
| 736 | #define CEC_OP_DECK_INFO_PLAY_REV 0x13 | ||
| 737 | #define CEC_OP_DECK_INFO_STILL 0x14 | ||
| 738 | #define CEC_OP_DECK_INFO_SLOW 0x15 | ||
| 739 | #define CEC_OP_DECK_INFO_SLOW_REV 0x16 | ||
| 740 | #define CEC_OP_DECK_INFO_FAST_FWD 0x17 | ||
| 741 | #define CEC_OP_DECK_INFO_FAST_REV 0x18 | ||
| 742 | #define CEC_OP_DECK_INFO_NO_MEDIA 0x19 | ||
| 743 | #define CEC_OP_DECK_INFO_STOP 0x1a | ||
| 744 | #define CEC_OP_DECK_INFO_SKIP_FWD 0x1b | ||
| 745 | #define CEC_OP_DECK_INFO_SKIP_REV 0x1c | ||
| 746 | #define CEC_OP_DECK_INFO_INDEX_SEARCH_FWD 0x1d | ||
| 747 | #define CEC_OP_DECK_INFO_INDEX_SEARCH_REV 0x1e | ||
| 748 | #define CEC_OP_DECK_INFO_OTHER 0x1f | ||
| 749 | |||
| 750 | #define CEC_MSG_GIVE_DECK_STATUS 0x1a | ||
| 751 | /* Status Request Operand (status_req) */ | ||
| 752 | #define CEC_OP_STATUS_REQ_ON 1 | ||
| 753 | #define CEC_OP_STATUS_REQ_OFF 2 | ||
| 754 | #define CEC_OP_STATUS_REQ_ONCE 3 | ||
| 755 | |||
| 756 | #define CEC_MSG_PLAY 0x41 | ||
| 757 | /* Play Mode Operand (play_mode) */ | ||
| 758 | #define CEC_OP_PLAY_MODE_PLAY_FWD 0x24 | ||
| 759 | #define CEC_OP_PLAY_MODE_PLAY_REV 0x20 | ||
| 760 | #define CEC_OP_PLAY_MODE_PLAY_STILL 0x25 | ||
| 761 | #define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MIN 0x05 | ||
| 762 | #define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MED 0x06 | ||
| 763 | #define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MAX 0x07 | ||
| 764 | #define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MIN 0x09 | ||
| 765 | #define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MED 0x0a | ||
| 766 | #define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MAX 0x0b | ||
| 767 | #define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MIN 0x15 | ||
| 768 | #define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MED 0x16 | ||
| 769 | #define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MAX 0x17 | ||
| 770 | #define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MIN 0x19 | ||
| 771 | #define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MED 0x1a | ||
| 772 | #define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MAX 0x1b | ||
| 773 | |||
| 774 | |||
| 775 | /* Tuner Control Feature */ | ||
| 776 | #define CEC_MSG_GIVE_TUNER_DEVICE_STATUS 0x08 | ||
| 777 | #define CEC_MSG_SELECT_ANALOGUE_SERVICE 0x92 | ||
| 778 | #define CEC_MSG_SELECT_DIGITAL_SERVICE 0x93 | ||
| 779 | #define CEC_MSG_TUNER_DEVICE_STATUS 0x07 | ||
| 780 | /* Recording Flag Operand (rec_flag) */ | ||
| 781 | #define CEC_OP_REC_FLAG_USED 0 | ||
| 782 | #define CEC_OP_REC_FLAG_NOT_USED 1 | ||
| 783 | /* Tuner Display Info Operand (tuner_display_info) */ | ||
| 784 | #define CEC_OP_TUNER_DISPLAY_INFO_DIGITAL 0 | ||
| 785 | #define CEC_OP_TUNER_DISPLAY_INFO_NONE 1 | ||
| 786 | #define CEC_OP_TUNER_DISPLAY_INFO_ANALOGUE 2 | ||
| 787 | |||
| 788 | #define CEC_MSG_TUNER_STEP_DECREMENT 0x06 | ||
| 789 | #define CEC_MSG_TUNER_STEP_INCREMENT 0x05 | ||
| 790 | |||
| 791 | |||
| 792 | /* Vendor Specific Commands Feature */ | ||
| 793 | |||
| 794 | /* | ||
| 795 | * Has also: | ||
| 796 | * CEC_MSG_CEC_VERSION | ||
| 797 | * CEC_MSG_GET_CEC_VERSION | ||
| 798 | */ | ||
| 799 | #define CEC_MSG_DEVICE_VENDOR_ID 0x87 | ||
| 800 | #define CEC_MSG_GIVE_DEVICE_VENDOR_ID 0x8c | ||
| 801 | #define CEC_MSG_VENDOR_COMMAND 0x89 | ||
| 802 | #define CEC_MSG_VENDOR_COMMAND_WITH_ID 0xa0 | ||
| 803 | #define CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN 0x8a | ||
| 804 | #define CEC_MSG_VENDOR_REMOTE_BUTTON_UP 0x8b | ||
| 805 | |||
| 806 | |||
| 807 | /* OSD Display Feature */ | ||
| 808 | #define CEC_MSG_SET_OSD_STRING 0x64 | ||
| 809 | /* Display Control Operand (disp_ctl) */ | ||
| 810 | #define CEC_OP_DISP_CTL_DEFAULT 0x00 | ||
| 811 | #define CEC_OP_DISP_CTL_UNTIL_CLEARED 0x40 | ||
| 812 | #define CEC_OP_DISP_CTL_CLEAR 0x80 | ||
| 813 | |||
| 814 | |||
| 815 | /* Device OSD Transfer Feature */ | ||
| 816 | #define CEC_MSG_GIVE_OSD_NAME 0x46 | ||
| 817 | #define CEC_MSG_SET_OSD_NAME 0x47 | ||
| 818 | |||
| 819 | |||
| 820 | /* Device Menu Control Feature */ | ||
| 821 | #define CEC_MSG_MENU_REQUEST 0x8d | ||
| 822 | /* Menu Request Type Operand (menu_req) */ | ||
| 823 | #define CEC_OP_MENU_REQUEST_ACTIVATE 0x00 | ||
| 824 | #define CEC_OP_MENU_REQUEST_DEACTIVATE 0x01 | ||
| 825 | #define CEC_OP_MENU_REQUEST_QUERY 0x02 | ||
| 826 | |||
| 827 | #define CEC_MSG_MENU_STATUS 0x8e | ||
| 828 | /* Menu State Operand (menu_state) */ | ||
| 829 | #define CEC_OP_MENU_STATE_ACTIVATED 0x00 | ||
| 830 | #define CEC_OP_MENU_STATE_DEACTIVATED 0x01 | ||
| 831 | |||
| 832 | #define CEC_MSG_USER_CONTROL_PRESSED 0x44 | ||
| 833 | /* UI Broadcast Type Operand (ui_bcast_type) */ | ||
| 834 | #define CEC_OP_UI_BCAST_TYPE_TOGGLE_ALL 0x00 | ||
| 835 | #define CEC_OP_UI_BCAST_TYPE_TOGGLE_DIG_ANA 0x01 | ||
| 836 | #define CEC_OP_UI_BCAST_TYPE_ANALOGUE 0x10 | ||
| 837 | #define CEC_OP_UI_BCAST_TYPE_ANALOGUE_T 0x20 | ||
| 838 | #define CEC_OP_UI_BCAST_TYPE_ANALOGUE_CABLE 0x30 | ||
| 839 | #define CEC_OP_UI_BCAST_TYPE_ANALOGUE_SAT 0x40 | ||
| 840 | #define CEC_OP_UI_BCAST_TYPE_DIGITAL 0x50 | ||
| 841 | #define CEC_OP_UI_BCAST_TYPE_DIGITAL_T 0x60 | ||
| 842 | #define CEC_OP_UI_BCAST_TYPE_DIGITAL_CABLE 0x70 | ||
| 843 | #define CEC_OP_UI_BCAST_TYPE_DIGITAL_SAT 0x80 | ||
| 844 | #define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT 0x90 | ||
| 845 | #define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT2 0x91 | ||
| 846 | #define CEC_OP_UI_BCAST_TYPE_IP 0xa0 | ||
| 847 | /* UI Sound Presentation Control Operand (ui_snd_pres_ctl) */ | ||
| 848 | #define CEC_OP_UI_SND_PRES_CTL_DUAL_MONO 0x10 | ||
| 849 | #define CEC_OP_UI_SND_PRES_CTL_KARAOKE 0x20 | ||
| 850 | #define CEC_OP_UI_SND_PRES_CTL_DOWNMIX 0x80 | ||
| 851 | #define CEC_OP_UI_SND_PRES_CTL_REVERB 0x90 | ||
| 852 | #define CEC_OP_UI_SND_PRES_CTL_EQUALIZER 0xa0 | ||
| 853 | #define CEC_OP_UI_SND_PRES_CTL_BASS_UP 0xb1 | ||
| 854 | #define CEC_OP_UI_SND_PRES_CTL_BASS_NEUTRAL 0xb2 | ||
| 855 | #define CEC_OP_UI_SND_PRES_CTL_BASS_DOWN 0xb3 | ||
| 856 | #define CEC_OP_UI_SND_PRES_CTL_TREBLE_UP 0xc1 | ||
| 857 | #define CEC_OP_UI_SND_PRES_CTL_TREBLE_NEUTRAL 0xc2 | ||
| 858 | #define CEC_OP_UI_SND_PRES_CTL_TREBLE_DOWN 0xc3 | ||
| 859 | |||
| 860 | #define CEC_MSG_USER_CONTROL_RELEASED 0x45 | ||
| 861 | |||
| 862 | |||
| 863 | /* Remote Control Passthrough Feature */ | ||
| 864 | |||
| 865 | /* | ||
| 866 | * Has also: | ||
| 867 | * CEC_MSG_USER_CONTROL_PRESSED | ||
| 868 | * CEC_MSG_USER_CONTROL_RELEASED | ||
| 869 | */ | ||
| 870 | |||
| 871 | |||
| 872 | /* Power Status Feature */ | ||
| 873 | #define CEC_MSG_GIVE_DEVICE_POWER_STATUS 0x8f | ||
| 874 | #define CEC_MSG_REPORT_POWER_STATUS 0x90 | ||
| 875 | /* Power Status Operand (pwr_state) */ | ||
| 876 | #define CEC_OP_POWER_STATUS_ON 0 | ||
| 877 | #define CEC_OP_POWER_STATUS_STANDBY 1 | ||
| 878 | #define CEC_OP_POWER_STATUS_TO_ON 2 | ||
| 879 | #define CEC_OP_POWER_STATUS_TO_STANDBY 3 | ||
| 880 | |||
| 881 | |||
| 882 | /* General Protocol Messages */ | ||
| 883 | #define CEC_MSG_FEATURE_ABORT 0x00 | ||
| 884 | /* Abort Reason Operand (reason) */ | ||
| 885 | #define CEC_OP_ABORT_UNRECOGNIZED_OP 0 | ||
| 886 | #define CEC_OP_ABORT_INCORRECT_MODE 1 | ||
| 887 | #define CEC_OP_ABORT_NO_SOURCE 2 | ||
| 888 | #define CEC_OP_ABORT_INVALID_OP 3 | ||
| 889 | #define CEC_OP_ABORT_REFUSED 4 | ||
| 890 | #define CEC_OP_ABORT_UNDETERMINED 5 | ||
| 891 | |||
| 892 | #define CEC_MSG_ABORT 0xff | ||
| 893 | |||
| 894 | |||
| 895 | /* System Audio Control Feature */ | ||
| 896 | |||
| 897 | /* | ||
| 898 | * Has also: | ||
| 899 | * CEC_MSG_USER_CONTROL_PRESSED | ||
| 900 | * CEC_MSG_USER_CONTROL_RELEASED | ||
| 901 | */ | ||
| 902 | #define CEC_MSG_GIVE_AUDIO_STATUS 0x71 | ||
| 903 | #define CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS 0x7d | ||
| 904 | #define CEC_MSG_REPORT_AUDIO_STATUS 0x7a | ||
| 905 | /* Audio Mute Status Operand (aud_mute_status) */ | ||
| 906 | #define CEC_OP_AUD_MUTE_STATUS_OFF 0 | ||
| 907 | #define CEC_OP_AUD_MUTE_STATUS_ON 1 | ||
| 908 | |||
| 909 | #define CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR 0xa3 | ||
| 910 | #define CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR 0xa4 | ||
| 911 | #define CEC_MSG_SET_SYSTEM_AUDIO_MODE 0x72 | ||
| 912 | /* System Audio Status Operand (sys_aud_status) */ | ||
| 913 | #define CEC_OP_SYS_AUD_STATUS_OFF 0 | ||
| 914 | #define CEC_OP_SYS_AUD_STATUS_ON 1 | ||
| 915 | |||
| 916 | #define CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST 0x70 | ||
| 917 | #define CEC_MSG_SYSTEM_AUDIO_MODE_STATUS 0x7e | ||
| 918 | /* Audio Format ID Operand (audio_format_id) */ | ||
| 919 | #define CEC_OP_AUD_FMT_ID_CEA861 0 | ||
| 920 | #define CEC_OP_AUD_FMT_ID_CEA861_CXT 1 | ||
| 921 | |||
| 922 | |||
| 923 | /* Audio Rate Control Feature */ | ||
| 924 | #define CEC_MSG_SET_AUDIO_RATE 0x9a | ||
| 925 | /* Audio Rate Operand (audio_rate) */ | ||
| 926 | #define CEC_OP_AUD_RATE_OFF 0 | ||
| 927 | #define CEC_OP_AUD_RATE_WIDE_STD 1 | ||
| 928 | #define CEC_OP_AUD_RATE_WIDE_FAST 2 | ||
| 929 | #define CEC_OP_AUD_RATE_WIDE_SLOW 3 | ||
| 930 | #define CEC_OP_AUD_RATE_NARROW_STD 4 | ||
| 931 | #define CEC_OP_AUD_RATE_NARROW_FAST 5 | ||
| 932 | #define CEC_OP_AUD_RATE_NARROW_SLOW 6 | ||
| 933 | |||
| 934 | |||
| 935 | /* Audio Return Channel Control Feature */ | ||
| 936 | #define CEC_MSG_INITIATE_ARC 0xc0 | ||
| 937 | #define CEC_MSG_REPORT_ARC_INITIATED 0xc1 | ||
| 938 | #define CEC_MSG_REPORT_ARC_TERMINATED 0xc2 | ||
| 939 | #define CEC_MSG_REQUEST_ARC_INITIATION 0xc3 | ||
| 940 | #define CEC_MSG_REQUEST_ARC_TERMINATION 0xc4 | ||
| 941 | #define CEC_MSG_TERMINATE_ARC 0xc5 | ||
| 942 | |||
| 943 | |||
| 944 | /* Dynamic Audio Lipsync Feature */ | ||
| 945 | /* Only for CEC 2.0 and up */ | ||
| 946 | #define CEC_MSG_REQUEST_CURRENT_LATENCY 0xa7 | ||
| 947 | #define CEC_MSG_REPORT_CURRENT_LATENCY 0xa8 | ||
| 948 | /* Low Latency Mode Operand (low_latency_mode) */ | ||
| 949 | #define CEC_OP_LOW_LATENCY_MODE_OFF 0 | ||
| 950 | #define CEC_OP_LOW_LATENCY_MODE_ON 1 | ||
| 951 | /* Audio Output Compensated Operand (audio_out_compensated) */ | ||
| 952 | #define CEC_OP_AUD_OUT_COMPENSATED_NA 0 | ||
| 953 | #define CEC_OP_AUD_OUT_COMPENSATED_DELAY 1 | ||
| 954 | #define CEC_OP_AUD_OUT_COMPENSATED_NO_DELAY 2 | ||
| 955 | #define CEC_OP_AUD_OUT_COMPENSATED_PARTIAL_DELAY 3 | ||
| 956 | |||
| 957 | |||
| 958 | /* Capability Discovery and Control Feature */ | ||
| 959 | #define CEC_MSG_CDC_MESSAGE 0xf8 | ||
| 960 | /* Ethernet-over-HDMI: nobody ever does this... */ | ||
| 961 | #define CEC_MSG_CDC_HEC_INQUIRE_STATE 0x00 | ||
| 962 | #define CEC_MSG_CDC_HEC_REPORT_STATE 0x01 | ||
| 963 | /* HEC Functionality State Operand (hec_func_state) */ | ||
| 964 | #define CEC_OP_HEC_FUNC_STATE_NOT_SUPPORTED 0 | ||
| 965 | #define CEC_OP_HEC_FUNC_STATE_INACTIVE 1 | ||
| 966 | #define CEC_OP_HEC_FUNC_STATE_ACTIVE 2 | ||
| 967 | #define CEC_OP_HEC_FUNC_STATE_ACTIVATION_FIELD 3 | ||
| 968 | /* Host Functionality State Operand (host_func_state) */ | ||
| 969 | #define CEC_OP_HOST_FUNC_STATE_NOT_SUPPORTED 0 | ||
| 970 | #define CEC_OP_HOST_FUNC_STATE_INACTIVE 1 | ||
| 971 | #define CEC_OP_HOST_FUNC_STATE_ACTIVE 2 | ||
| 972 | /* ENC Functionality State Operand (enc_func_state) */ | ||
| 973 | #define CEC_OP_ENC_FUNC_STATE_EXT_CON_NOT_SUPPORTED 0 | ||
| 974 | #define CEC_OP_ENC_FUNC_STATE_EXT_CON_INACTIVE 1 | ||
| 975 | #define CEC_OP_ENC_FUNC_STATE_EXT_CON_ACTIVE 2 | ||
| 976 | /* CDC Error Code Operand (cdc_errcode) */ | ||
| 977 | #define CEC_OP_CDC_ERROR_CODE_NONE 0 | ||
| 978 | #define CEC_OP_CDC_ERROR_CODE_CAP_UNSUPPORTED 1 | ||
| 979 | #define CEC_OP_CDC_ERROR_CODE_WRONG_STATE 2 | ||
| 980 | #define CEC_OP_CDC_ERROR_CODE_OTHER 3 | ||
| 981 | /* HEC Support Operand (hec_support) */ | ||
| 982 | #define CEC_OP_HEC_SUPPORT_NO 0 | ||
| 983 | #define CEC_OP_HEC_SUPPORT_YES 1 | ||
| 984 | /* HEC Activation Operand (hec_activation) */ | ||
| 985 | #define CEC_OP_HEC_ACTIVATION_ON 0 | ||
| 986 | #define CEC_OP_HEC_ACTIVATION_OFF 1 | ||
| 987 | |||
| 988 | #define CEC_MSG_CDC_HEC_SET_STATE_ADJACENT 0x02 | ||
| 989 | #define CEC_MSG_CDC_HEC_SET_STATE 0x03 | ||
| 990 | /* HEC Set State Operand (hec_set_state) */ | ||
| 991 | #define CEC_OP_HEC_SET_STATE_DEACTIVATE 0 | ||
| 992 | #define CEC_OP_HEC_SET_STATE_ACTIVATE 1 | ||
| 993 | |||
| 994 | #define CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION 0x04 | ||
| 995 | #define CEC_MSG_CDC_HEC_NOTIFY_ALIVE 0x05 | ||
| 996 | #define CEC_MSG_CDC_HEC_DISCOVER 0x06 | ||
| 997 | /* Hotplug Detect messages */ | ||
| 998 | #define CEC_MSG_CDC_HPD_SET_STATE 0x10 | ||
| 999 | /* HPD State Operand (hpd_state) */ | ||
| 1000 | #define CEC_OP_HPD_STATE_CP_EDID_DISABLE 0 | ||
| 1001 | #define CEC_OP_HPD_STATE_CP_EDID_ENABLE 1 | ||
| 1002 | #define CEC_OP_HPD_STATE_CP_EDID_DISABLE_ENABLE 2 | ||
| 1003 | #define CEC_OP_HPD_STATE_EDID_DISABLE 3 | ||
| 1004 | #define CEC_OP_HPD_STATE_EDID_ENABLE 4 | ||
| 1005 | #define CEC_OP_HPD_STATE_EDID_DISABLE_ENABLE 5 | ||
| 1006 | #define CEC_MSG_CDC_HPD_REPORT_STATE 0x11 | ||
| 1007 | /* HPD Error Code Operand (hpd_error) */ | ||
| 1008 | #define CEC_OP_HPD_ERROR_NONE 0 | ||
| 1009 | #define CEC_OP_HPD_ERROR_INITIATOR_NOT_CAPABLE 1 | ||
| 1010 | #define CEC_OP_HPD_ERROR_INITIATOR_WRONG_STATE 2 | ||
| 1011 | #define CEC_OP_HPD_ERROR_OTHER 3 | ||
| 1012 | #define CEC_OP_HPD_ERROR_NONE_NO_VIDEO 4 | ||
| 1013 | |||
| 1014 | #endif | ||
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h index 374bb1c4ef52..a6747789fe5c 100644 --- a/include/linux/ceph/auth.h +++ b/include/linux/ceph/auth.h | |||
| @@ -64,7 +64,7 @@ struct ceph_auth_client_ops { | |||
| 64 | int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type, | 64 | int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type, |
| 65 | struct ceph_auth_handshake *auth); | 65 | struct ceph_auth_handshake *auth); |
| 66 | int (*verify_authorizer_reply)(struct ceph_auth_client *ac, | 66 | int (*verify_authorizer_reply)(struct ceph_auth_client *ac, |
| 67 | struct ceph_authorizer *a, size_t len); | 67 | struct ceph_authorizer *a); |
| 68 | void (*invalidate_authorizer)(struct ceph_auth_client *ac, | 68 | void (*invalidate_authorizer)(struct ceph_auth_client *ac, |
| 69 | int peer_type); | 69 | int peer_type); |
| 70 | 70 | ||
| @@ -118,8 +118,7 @@ extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, | |||
| 118 | int peer_type, | 118 | int peer_type, |
| 119 | struct ceph_auth_handshake *a); | 119 | struct ceph_auth_handshake *a); |
| 120 | extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, | 120 | extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, |
| 121 | struct ceph_authorizer *a, | 121 | struct ceph_authorizer *a); |
| 122 | size_t len); | ||
| 123 | extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, | 122 | extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, |
| 124 | int peer_type); | 123 | int peer_type); |
| 125 | 124 | ||
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index f96de8de4fa7..f4b2ee18f38c 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h | |||
| @@ -653,6 +653,9 @@ enum { | |||
| 653 | 653 | ||
| 654 | extern const char *ceph_cap_op_name(int op); | 654 | extern const char *ceph_cap_op_name(int op); |
| 655 | 655 | ||
| 656 | /* flags field in client cap messages (version >= 10) */ | ||
| 657 | #define CEPH_CLIENT_CAPS_SYNC (0x1) | ||
| 658 | |||
| 656 | /* | 659 | /* |
| 657 | * caps message, used for capability callbacks, acks, requests, etc. | 660 | * caps message, used for capability callbacks, acks, requests, etc. |
| 658 | */ | 661 | */ |
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h index 87ed09f54800..8ed5dc505fbb 100644 --- a/include/linux/ceph/mdsmap.h +++ b/include/linux/ceph/mdsmap.h | |||
| @@ -31,6 +31,10 @@ struct ceph_mdsmap { | |||
| 31 | int m_num_data_pg_pools; | 31 | int m_num_data_pg_pools; |
| 32 | u64 *m_data_pg_pools; | 32 | u64 *m_data_pg_pools; |
| 33 | u64 m_cas_pg_pool; | 33 | u64 m_cas_pg_pool; |
| 34 | |||
| 35 | bool m_enabled; | ||
| 36 | bool m_damaged; | ||
| 37 | int m_num_laggy; | ||
| 34 | }; | 38 | }; |
| 35 | 39 | ||
| 36 | static inline struct ceph_entity_addr * | 40 | static inline struct ceph_entity_addr * |
| @@ -59,5 +63,6 @@ static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w) | |||
| 59 | extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m); | 63 | extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m); |
| 60 | extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end); | 64 | extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end); |
| 61 | extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m); | 65 | extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m); |
| 66 | extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m); | ||
| 62 | 67 | ||
| 63 | #endif | 68 | #endif |
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 8dbd7879fdc6..c5c4c713e00f 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | #ifndef __FS_CEPH_MESSENGER_H | 1 | #ifndef __FS_CEPH_MESSENGER_H |
| 2 | #define __FS_CEPH_MESSENGER_H | 2 | #define __FS_CEPH_MESSENGER_H |
| 3 | 3 | ||
| 4 | #include <linux/blk_types.h> | 4 | #include <linux/bvec.h> |
| 5 | #include <linux/kref.h> | 5 | #include <linux/kref.h> |
| 6 | #include <linux/mutex.h> | 6 | #include <linux/mutex.h> |
| 7 | #include <linux/net.h> | 7 | #include <linux/net.h> |
| @@ -30,7 +30,7 @@ struct ceph_connection_operations { | |||
| 30 | struct ceph_auth_handshake *(*get_authorizer) ( | 30 | struct ceph_auth_handshake *(*get_authorizer) ( |
| 31 | struct ceph_connection *con, | 31 | struct ceph_connection *con, |
| 32 | int *proto, int force_new); | 32 | int *proto, int force_new); |
| 33 | int (*verify_authorizer_reply) (struct ceph_connection *con, int len); | 33 | int (*verify_authorizer_reply) (struct ceph_connection *con); |
| 34 | int (*invalidate_authorizer)(struct ceph_connection *con); | 34 | int (*invalidate_authorizer)(struct ceph_connection *con); |
| 35 | 35 | ||
| 36 | /* there was some error on the socket (disconnect, whatever) */ | 36 | /* there was some error on the socket (disconnect, whatever) */ |
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index a8e66344bacc..03a6653d329a 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h | |||
| @@ -176,7 +176,7 @@ struct ceph_osd_request { | |||
| 176 | struct kref r_kref; | 176 | struct kref r_kref; |
| 177 | bool r_mempool; | 177 | bool r_mempool; |
| 178 | struct completion r_completion; | 178 | struct completion r_completion; |
| 179 | struct completion r_safe_completion; /* fsync waiter */ | 179 | struct completion r_done_completion; /* fsync waiter */ |
| 180 | ceph_osdc_callback_t r_callback; | 180 | ceph_osdc_callback_t r_callback; |
| 181 | ceph_osdc_unsafe_callback_t r_unsafe_callback; | 181 | ceph_osdc_unsafe_callback_t r_unsafe_callback; |
| 182 | struct list_head r_unsafe_item; | 182 | struct list_head r_unsafe_item; |
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 5b17de62c962..861b4677fc5b 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/percpu-refcount.h> | 16 | #include <linux/percpu-refcount.h> |
| 17 | #include <linux/percpu-rwsem.h> | 17 | #include <linux/percpu-rwsem.h> |
| 18 | #include <linux/workqueue.h> | 18 | #include <linux/workqueue.h> |
| 19 | #include <linux/bpf-cgroup.h> | ||
| 19 | 20 | ||
| 20 | #ifdef CONFIG_CGROUPS | 21 | #ifdef CONFIG_CGROUPS |
| 21 | 22 | ||
| @@ -300,6 +301,9 @@ struct cgroup { | |||
| 300 | /* used to schedule release agent */ | 301 | /* used to schedule release agent */ |
| 301 | struct work_struct release_agent_work; | 302 | struct work_struct release_agent_work; |
| 302 | 303 | ||
| 304 | /* used to store eBPF programs */ | ||
| 305 | struct cgroup_bpf bpf; | ||
| 306 | |||
| 303 | /* ids of the ancestors at each level including self */ | 307 | /* ids of the ancestors at each level including self */ |
| 304 | int ancestor_ids[]; | 308 | int ancestor_ids[]; |
| 305 | }; | 309 | }; |
diff --git a/include/linux/clk.h b/include/linux/clk.h index 123c02788807..e9d36b3e49de 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h | |||
| @@ -17,8 +17,9 @@ | |||
| 17 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
| 18 | 18 | ||
| 19 | struct device; | 19 | struct device; |
| 20 | |||
| 21 | struct clk; | 20 | struct clk; |
| 21 | struct device_node; | ||
| 22 | struct of_phandle_args; | ||
| 22 | 23 | ||
| 23 | /** | 24 | /** |
| 24 | * DOC: clk notifier callback types | 25 | * DOC: clk notifier callback types |
| @@ -249,6 +250,23 @@ struct clk *clk_get(struct device *dev, const char *id); | |||
| 249 | struct clk *devm_clk_get(struct device *dev, const char *id); | 250 | struct clk *devm_clk_get(struct device *dev, const char *id); |
| 250 | 251 | ||
| 251 | /** | 252 | /** |
| 253 | * devm_get_clk_from_child - lookup and obtain a managed reference to a | ||
| 254 | * clock producer from child node. | ||
| 255 | * @dev: device for clock "consumer" | ||
| 256 | * @np: pointer to clock consumer node | ||
| 257 | * @con_id: clock consumer ID | ||
| 258 | * | ||
| 259 | * This function parses the clocks, and uses them to look up the | ||
| 260 | * struct clk from the registered list of clock providers by using | ||
| 261 | * @np and @con_id | ||
| 262 | * | ||
| 263 | * The clock will automatically be freed when the device is unbound | ||
| 264 | * from the bus. | ||
| 265 | */ | ||
| 266 | struct clk *devm_get_clk_from_child(struct device *dev, | ||
| 267 | struct device_node *np, const char *con_id); | ||
| 268 | |||
| 269 | /** | ||
| 252 | * clk_enable - inform the system when the clock source should be running. | 270 | * clk_enable - inform the system when the clock source should be running. |
| 253 | * @clk: clock source | 271 | * @clk: clock source |
| 254 | * | 272 | * |
| @@ -432,6 +450,12 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id) | |||
| 432 | return NULL; | 450 | return NULL; |
| 433 | } | 451 | } |
| 434 | 452 | ||
| 453 | static inline struct clk *devm_get_clk_from_child(struct device *dev, | ||
| 454 | struct device_node *np, const char *con_id) | ||
| 455 | { | ||
| 456 | return NULL; | ||
| 457 | } | ||
| 458 | |||
| 435 | static inline void clk_put(struct clk *clk) {} | 459 | static inline void clk_put(struct clk *clk) {} |
| 436 | 460 | ||
| 437 | static inline void devm_clk_put(struct device *dev, struct clk *clk) {} | 461 | static inline void devm_clk_put(struct device *dev, struct clk *clk) {} |
| @@ -501,9 +525,6 @@ static inline void clk_disable_unprepare(struct clk *clk) | |||
| 501 | clk_unprepare(clk); | 525 | clk_unprepare(clk); |
| 502 | } | 526 | } |
| 503 | 527 | ||
| 504 | struct device_node; | ||
| 505 | struct of_phandle_args; | ||
| 506 | |||
| 507 | #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) | 528 | #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) |
| 508 | struct clk *of_clk_get(struct device_node *np, int index); | 529 | struct clk *of_clk_get(struct device_node *np, int index); |
| 509 | struct clk *of_clk_get_by_name(struct device_node *np, const char *name); | 530 | struct clk *of_clk_get_by_name(struct device_node *np, const char *name); |
diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h index ba6fa4148515..9ebf1f8243bb 100644 --- a/include/linux/clk/renesas.h +++ b/include/linux/clk/renesas.h | |||
| @@ -20,10 +20,6 @@ struct device; | |||
| 20 | struct device_node; | 20 | struct device_node; |
| 21 | struct generic_pm_domain; | 21 | struct generic_pm_domain; |
| 22 | 22 | ||
| 23 | void r8a7778_clocks_init(u32 mode); | ||
| 24 | void r8a7779_clocks_init(u32 mode); | ||
| 25 | void rcar_gen2_clocks_init(u32 mode); | ||
| 26 | |||
| 27 | void cpg_mstp_add_clk_domain(struct device_node *np); | 23 | void cpg_mstp_add_clk_domain(struct device_node *np); |
| 28 | #ifdef CONFIG_CLK_RENESAS_CPG_MSTP | 24 | #ifdef CONFIG_CLK_RENESAS_CPG_MSTP |
| 29 | int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev); | 25 | int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev); |
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 08398182f56e..e315d04a2fd9 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
| @@ -75,8 +75,8 @@ struct module; | |||
| 75 | * structure. | 75 | * structure. |
| 76 | */ | 76 | */ |
| 77 | struct clocksource { | 77 | struct clocksource { |
| 78 | cycle_t (*read)(struct clocksource *cs); | 78 | u64 (*read)(struct clocksource *cs); |
| 79 | cycle_t mask; | 79 | u64 mask; |
| 80 | u32 mult; | 80 | u32 mult; |
| 81 | u32 shift; | 81 | u32 shift; |
| 82 | u64 max_idle_ns; | 82 | u64 max_idle_ns; |
| @@ -98,8 +98,8 @@ struct clocksource { | |||
| 98 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | 98 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
| 99 | /* Watchdog related data, used by the framework */ | 99 | /* Watchdog related data, used by the framework */ |
| 100 | struct list_head wd_list; | 100 | struct list_head wd_list; |
| 101 | cycle_t cs_last; | 101 | u64 cs_last; |
| 102 | cycle_t wd_last; | 102 | u64 wd_last; |
| 103 | #endif | 103 | #endif |
| 104 | struct module *owner; | 104 | struct module *owner; |
| 105 | }; | 105 | }; |
| @@ -117,7 +117,7 @@ struct clocksource { | |||
| 117 | #define CLOCK_SOURCE_RESELECT 0x100 | 117 | #define CLOCK_SOURCE_RESELECT 0x100 |
| 118 | 118 | ||
| 119 | /* simplify initialization of mask field */ | 119 | /* simplify initialization of mask field */ |
| 120 | #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) | 120 | #define CLOCKSOURCE_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) |
| 121 | 121 | ||
| 122 | static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from) | 122 | static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from) |
| 123 | { | 123 | { |
| @@ -169,11 +169,14 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) | |||
| 169 | * @mult: cycle to nanosecond multiplier | 169 | * @mult: cycle to nanosecond multiplier |
| 170 | * @shift: cycle to nanosecond divisor (power of two) | 170 | * @shift: cycle to nanosecond divisor (power of two) |
| 171 | * | 171 | * |
| 172 | * Converts cycles to nanoseconds, using the given mult and shift. | 172 | * Converts clocksource cycles to nanoseconds, using the given @mult and @shift. |
| 173 | * The code is optimized for performance and is not intended to work | ||
| 174 | * with absolute clocksource cycles (as those will easily overflow), | ||
| 175 | * but is only intended to be used with relative (delta) clocksource cycles. | ||
| 173 | * | 176 | * |
| 174 | * XXX - This could use some mult_lxl_ll() asm optimization | 177 | * XXX - This could use some mult_lxl_ll() asm optimization |
| 175 | */ | 178 | */ |
| 176 | static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) | 179 | static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift) |
| 177 | { | 180 | { |
| 178 | return ((u64) cycles * mult) >> shift; | 181 | return ((u64) cycles * mult) >> shift; |
| 179 | } | 182 | } |
| @@ -233,13 +236,13 @@ static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz | |||
| 233 | 236 | ||
| 234 | extern int timekeeping_notify(struct clocksource *clock); | 237 | extern int timekeeping_notify(struct clocksource *clock); |
| 235 | 238 | ||
| 236 | extern cycle_t clocksource_mmio_readl_up(struct clocksource *); | 239 | extern u64 clocksource_mmio_readl_up(struct clocksource *); |
| 237 | extern cycle_t clocksource_mmio_readl_down(struct clocksource *); | 240 | extern u64 clocksource_mmio_readl_down(struct clocksource *); |
| 238 | extern cycle_t clocksource_mmio_readw_up(struct clocksource *); | 241 | extern u64 clocksource_mmio_readw_up(struct clocksource *); |
| 239 | extern cycle_t clocksource_mmio_readw_down(struct clocksource *); | 242 | extern u64 clocksource_mmio_readw_down(struct clocksource *); |
| 240 | 243 | ||
| 241 | extern int clocksource_mmio_init(void __iomem *, const char *, | 244 | extern int clocksource_mmio_init(void __iomem *, const char *, |
| 242 | unsigned long, int, unsigned, cycle_t (*)(struct clocksource *)); | 245 | unsigned long, int, unsigned, u64 (*)(struct clocksource *)); |
| 243 | 246 | ||
| 244 | extern int clocksource_i8253_init(void); | 247 | extern int clocksource_i8253_init(void); |
| 245 | 248 | ||
diff --git a/include/linux/cma.h b/include/linux/cma.h index 29f9e774ab76..6f0a91b37f68 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h | |||
| @@ -1,6 +1,9 @@ | |||
| 1 | #ifndef __CMA_H__ | 1 | #ifndef __CMA_H__ |
| 2 | #define __CMA_H__ | 2 | #define __CMA_H__ |
| 3 | 3 | ||
| 4 | #include <linux/init.h> | ||
| 5 | #include <linux/types.h> | ||
| 6 | |||
| 4 | /* | 7 | /* |
| 5 | * There is always at least global CMA area and a few optional | 8 | * There is always at least global CMA area and a few optional |
| 6 | * areas configured in kernel .config. | 9 | * areas configured in kernel .config. |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 928e5ca0caee..0444b1336268 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
| @@ -21,7 +21,7 @@ | |||
| 21 | * clobbered. The issue is as follows: while the inline asm might | 21 | * clobbered. The issue is as follows: while the inline asm might |
| 22 | * access any memory it wants, the compiler could have fit all of | 22 | * access any memory it wants, the compiler could have fit all of |
| 23 | * @ptr into memory registers instead, and since @ptr never escaped | 23 | * @ptr into memory registers instead, and since @ptr never escaped |
| 24 | * from that, it proofed that the inline asm wasn't touching any of | 24 | * from that, it proved that the inline asm wasn't touching any of |
| 25 | * it. This version works well with both compilers, i.e. we're telling | 25 | * it. This version works well with both compilers, i.e. we're telling |
| 26 | * the compiler that the inline asm absolutely may see the contents | 26 | * the compiler that the inline asm absolutely may see the contents |
| 27 | * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 | 27 | * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 |
diff --git a/include/linux/configfs.h b/include/linux/configfs.h index d9d6a9d77489..2319b8c108e8 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h | |||
| @@ -35,14 +35,11 @@ | |||
| 35 | #ifndef _CONFIGFS_H_ | 35 | #ifndef _CONFIGFS_H_ |
| 36 | #define _CONFIGFS_H_ | 36 | #define _CONFIGFS_H_ |
| 37 | 37 | ||
| 38 | #include <linux/kernel.h> | 38 | #include <linux/stat.h> /* S_IRUGO */ |
| 39 | #include <linux/types.h> | 39 | #include <linux/types.h> /* ssize_t */ |
| 40 | #include <linux/list.h> | 40 | #include <linux/list.h> /* struct list_head */ |
| 41 | #include <linux/kref.h> | 41 | #include <linux/kref.h> /* struct kref */ |
| 42 | #include <linux/mutex.h> | 42 | #include <linux/mutex.h> /* struct mutex */ |
| 43 | #include <linux/err.h> | ||
| 44 | |||
| 45 | #include <linux/atomic.h> | ||
| 46 | 43 | ||
| 47 | #define CONFIGFS_ITEM_NAME_LEN 20 | 44 | #define CONFIGFS_ITEM_NAME_LEN 20 |
| 48 | 45 | ||
| @@ -228,7 +225,7 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \ | |||
| 228 | struct configfs_item_operations { | 225 | struct configfs_item_operations { |
| 229 | void (*release)(struct config_item *); | 226 | void (*release)(struct config_item *); |
| 230 | int (*allow_link)(struct config_item *src, struct config_item *target); | 227 | int (*allow_link)(struct config_item *src, struct config_item *target); |
| 231 | int (*drop_link)(struct config_item *src, struct config_item *target); | 228 | void (*drop_link)(struct config_item *src, struct config_item *target); |
| 232 | }; | 229 | }; |
| 233 | 230 | ||
| 234 | struct configfs_group_operations { | 231 | struct configfs_group_operations { |
diff --git a/include/linux/console.h b/include/linux/console.h index d530c4627e54..9c26c6685587 100644 --- a/include/linux/console.h +++ b/include/linux/console.h | |||
| @@ -28,9 +28,17 @@ struct tty_struct; | |||
| 28 | #define VT100ID "\033[?1;2c" | 28 | #define VT100ID "\033[?1;2c" |
| 29 | #define VT102ID "\033[?6c" | 29 | #define VT102ID "\033[?6c" |
| 30 | 30 | ||
| 31 | enum con_scroll { | ||
| 32 | SM_UP, | ||
| 33 | SM_DOWN, | ||
| 34 | }; | ||
| 35 | |||
| 31 | /** | 36 | /** |
| 32 | * struct consw - callbacks for consoles | 37 | * struct consw - callbacks for consoles |
| 33 | * | 38 | * |
| 39 | * @con_scroll: move lines from @top to @bottom in direction @dir by @lines. | ||
| 40 | * Return true if no generic handling should be done. | ||
| 41 | * Invoked by csi_M and printing to the console. | ||
| 34 | * @con_set_palette: sets the palette of the console to @table (optional) | 42 | * @con_set_palette: sets the palette of the console to @table (optional) |
| 35 | * @con_scrolldelta: the contents of the console should be scrolled by @lines. | 43 | * @con_scrolldelta: the contents of the console should be scrolled by @lines. |
| 36 | * Invoked by user. (optional) | 44 | * Invoked by user. (optional) |
| @@ -44,7 +52,9 @@ struct consw { | |||
| 44 | void (*con_putc)(struct vc_data *, int, int, int); | 52 | void (*con_putc)(struct vc_data *, int, int, int); |
| 45 | void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int); | 53 | void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int); |
| 46 | void (*con_cursor)(struct vc_data *, int); | 54 | void (*con_cursor)(struct vc_data *, int); |
| 47 | int (*con_scroll)(struct vc_data *, int, int, int, int); | 55 | bool (*con_scroll)(struct vc_data *, unsigned int top, |
| 56 | unsigned int bottom, enum con_scroll dir, | ||
| 57 | unsigned int lines); | ||
| 48 | int (*con_switch)(struct vc_data *); | 58 | int (*con_switch)(struct vc_data *); |
| 49 | int (*con_blank)(struct vc_data *, int, int); | 59 | int (*con_blank)(struct vc_data *, int, int); |
| 50 | int (*con_font_set)(struct vc_data *, struct console_font *, unsigned); | 60 | int (*con_font_set)(struct vc_data *, struct console_font *, unsigned); |
| @@ -99,10 +109,6 @@ static inline int con_debug_leave(void) | |||
| 99 | } | 109 | } |
| 100 | #endif | 110 | #endif |
| 101 | 111 | ||
| 102 | /* scroll */ | ||
| 103 | #define SM_UP (1) | ||
| 104 | #define SM_DOWN (2) | ||
| 105 | |||
| 106 | /* cursor */ | 112 | /* cursor */ |
| 107 | #define CM_DRAW (1) | 113 | #define CM_DRAW (1) |
| 108 | #define CM_ERASE (2) | 114 | #define CM_ERASE (2) |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index b886dc17f2f3..21f9c74496e7 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
| @@ -57,9 +57,6 @@ struct notifier_block; | |||
| 57 | 57 | ||
| 58 | #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ | 58 | #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ |
| 59 | #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ | 59 | #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ |
| 60 | #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ | ||
| 61 | #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ | ||
| 62 | #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ | ||
| 63 | #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ | 60 | #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ |
| 64 | #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug | 61 | #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug |
| 65 | * lock is dropped */ | 62 | * lock is dropped */ |
| @@ -80,87 +77,14 @@ struct notifier_block; | |||
| 80 | 77 | ||
| 81 | #ifdef CONFIG_SMP | 78 | #ifdef CONFIG_SMP |
| 82 | extern bool cpuhp_tasks_frozen; | 79 | extern bool cpuhp_tasks_frozen; |
| 83 | /* Need to know about CPUs going up/down? */ | ||
| 84 | #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) | ||
| 85 | #define cpu_notifier(fn, pri) { \ | ||
| 86 | static struct notifier_block fn##_nb = \ | ||
| 87 | { .notifier_call = fn, .priority = pri }; \ | ||
| 88 | register_cpu_notifier(&fn##_nb); \ | ||
| 89 | } | ||
| 90 | |||
| 91 | #define __cpu_notifier(fn, pri) { \ | ||
| 92 | static struct notifier_block fn##_nb = \ | ||
| 93 | { .notifier_call = fn, .priority = pri }; \ | ||
| 94 | __register_cpu_notifier(&fn##_nb); \ | ||
| 95 | } | ||
| 96 | #else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ | ||
| 97 | #define cpu_notifier(fn, pri) do { (void)(fn); } while (0) | ||
| 98 | #define __cpu_notifier(fn, pri) do { (void)(fn); } while (0) | ||
| 99 | #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ | ||
| 100 | |||
| 101 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 102 | extern int register_cpu_notifier(struct notifier_block *nb); | ||
| 103 | extern int __register_cpu_notifier(struct notifier_block *nb); | ||
| 104 | extern void unregister_cpu_notifier(struct notifier_block *nb); | ||
| 105 | extern void __unregister_cpu_notifier(struct notifier_block *nb); | ||
| 106 | #else | ||
| 107 | |||
| 108 | #ifndef MODULE | ||
| 109 | extern int register_cpu_notifier(struct notifier_block *nb); | ||
| 110 | extern int __register_cpu_notifier(struct notifier_block *nb); | ||
| 111 | #else | ||
| 112 | static inline int register_cpu_notifier(struct notifier_block *nb) | ||
| 113 | { | ||
| 114 | return 0; | ||
| 115 | } | ||
| 116 | |||
| 117 | static inline int __register_cpu_notifier(struct notifier_block *nb) | ||
| 118 | { | ||
| 119 | return 0; | ||
| 120 | } | ||
| 121 | #endif | ||
| 122 | |||
| 123 | static inline void unregister_cpu_notifier(struct notifier_block *nb) | ||
| 124 | { | ||
| 125 | } | ||
| 126 | |||
| 127 | static inline void __unregister_cpu_notifier(struct notifier_block *nb) | ||
| 128 | { | ||
| 129 | } | ||
| 130 | #endif | ||
| 131 | |||
| 132 | int cpu_up(unsigned int cpu); | 80 | int cpu_up(unsigned int cpu); |
| 133 | void notify_cpu_starting(unsigned int cpu); | 81 | void notify_cpu_starting(unsigned int cpu); |
| 134 | extern void cpu_maps_update_begin(void); | 82 | extern void cpu_maps_update_begin(void); |
| 135 | extern void cpu_maps_update_done(void); | 83 | extern void cpu_maps_update_done(void); |
| 136 | 84 | ||
| 137 | #define cpu_notifier_register_begin cpu_maps_update_begin | ||
| 138 | #define cpu_notifier_register_done cpu_maps_update_done | ||
| 139 | |||
| 140 | #else /* CONFIG_SMP */ | 85 | #else /* CONFIG_SMP */ |
| 141 | #define cpuhp_tasks_frozen 0 | 86 | #define cpuhp_tasks_frozen 0 |
| 142 | 87 | ||
| 143 | #define cpu_notifier(fn, pri) do { (void)(fn); } while (0) | ||
| 144 | #define __cpu_notifier(fn, pri) do { (void)(fn); } while (0) | ||
| 145 | |||
| 146 | static inline int register_cpu_notifier(struct notifier_block *nb) | ||
| 147 | { | ||
| 148 | return 0; | ||
| 149 | } | ||
| 150 | |||
| 151 | static inline int __register_cpu_notifier(struct notifier_block *nb) | ||
| 152 | { | ||
| 153 | return 0; | ||
| 154 | } | ||
| 155 | |||
| 156 | static inline void unregister_cpu_notifier(struct notifier_block *nb) | ||
| 157 | { | ||
| 158 | } | ||
| 159 | |||
| 160 | static inline void __unregister_cpu_notifier(struct notifier_block *nb) | ||
| 161 | { | ||
| 162 | } | ||
| 163 | |||
| 164 | static inline void cpu_maps_update_begin(void) | 88 | static inline void cpu_maps_update_begin(void) |
| 165 | { | 89 | { |
| 166 | } | 90 | } |
| @@ -169,14 +93,6 @@ static inline void cpu_maps_update_done(void) | |||
| 169 | { | 93 | { |
| 170 | } | 94 | } |
| 171 | 95 | ||
| 172 | static inline void cpu_notifier_register_begin(void) | ||
| 173 | { | ||
| 174 | } | ||
| 175 | |||
| 176 | static inline void cpu_notifier_register_done(void) | ||
| 177 | { | ||
| 178 | } | ||
| 179 | |||
| 180 | #endif /* CONFIG_SMP */ | 96 | #endif /* CONFIG_SMP */ |
| 181 | extern struct bus_type cpu_subsys; | 97 | extern struct bus_type cpu_subsys; |
| 182 | 98 | ||
| @@ -189,12 +105,6 @@ extern void get_online_cpus(void); | |||
| 189 | extern void put_online_cpus(void); | 105 | extern void put_online_cpus(void); |
| 190 | extern void cpu_hotplug_disable(void); | 106 | extern void cpu_hotplug_disable(void); |
| 191 | extern void cpu_hotplug_enable(void); | 107 | extern void cpu_hotplug_enable(void); |
| 192 | #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) | ||
| 193 | #define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri) | ||
| 194 | #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) | ||
| 195 | #define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb) | ||
| 196 | #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) | ||
| 197 | #define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb) | ||
| 198 | void clear_tasks_mm_cpumask(int cpu); | 108 | void clear_tasks_mm_cpumask(int cpu); |
| 199 | int cpu_down(unsigned int cpu); | 109 | int cpu_down(unsigned int cpu); |
| 200 | 110 | ||
| @@ -206,13 +116,6 @@ static inline void cpu_hotplug_done(void) {} | |||
| 206 | #define put_online_cpus() do { } while (0) | 116 | #define put_online_cpus() do { } while (0) |
| 207 | #define cpu_hotplug_disable() do { } while (0) | 117 | #define cpu_hotplug_disable() do { } while (0) |
| 208 | #define cpu_hotplug_enable() do { } while (0) | 118 | #define cpu_hotplug_enable() do { } while (0) |
| 209 | #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) | ||
| 210 | #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) | ||
| 211 | /* These aren't inline functions due to a GCC bug. */ | ||
| 212 | #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) | ||
| 213 | #define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) | ||
| 214 | #define unregister_hotcpu_notifier(nb) ({ (void)(nb); }) | ||
| 215 | #define __unregister_hotcpu_notifier(nb) ({ (void)(nb); }) | ||
| 216 | #endif /* CONFIG_HOTPLUG_CPU */ | 119 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 217 | 120 | ||
| 218 | #ifdef CONFIG_PM_SLEEP_SMP | 121 | #ifdef CONFIG_PM_SLEEP_SMP |
| @@ -245,6 +148,8 @@ void arch_cpu_idle_dead(void); | |||
| 245 | int cpu_report_state(int cpu); | 148 | int cpu_report_state(int cpu); |
| 246 | int cpu_check_up_prepare(int cpu); | 149 | int cpu_check_up_prepare(int cpu); |
| 247 | void cpu_set_state_online(int cpu); | 150 | void cpu_set_state_online(int cpu); |
| 151 | void play_idle(unsigned long duration_ms); | ||
| 152 | |||
| 248 | #ifdef CONFIG_HOTPLUG_CPU | 153 | #ifdef CONFIG_HOTPLUG_CPU |
| 249 | bool cpu_wait_death(unsigned int cpu, int seconds); | 154 | bool cpu_wait_death(unsigned int cpu, int seconds); |
| 250 | bool cpu_report_death(void); | 155 | bool cpu_report_death(void); |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 32dc0cbd51ca..7e05c5e4e45c 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
| @@ -175,7 +175,7 @@ void disable_cpufreq(void); | |||
| 175 | 175 | ||
| 176 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); | 176 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); |
| 177 | int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); | 177 | int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); |
| 178 | int cpufreq_update_policy(unsigned int cpu); | 178 | void cpufreq_update_policy(unsigned int cpu); |
| 179 | bool have_governor_per_policy(void); | 179 | bool have_governor_per_policy(void); |
| 180 | struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); | 180 | struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); |
| 181 | void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); | 181 | void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); |
| @@ -234,6 +234,10 @@ __ATTR(_name, _perm, show_##_name, NULL) | |||
| 234 | static struct freq_attr _name = \ | 234 | static struct freq_attr _name = \ |
| 235 | __ATTR(_name, 0644, show_##_name, store_##_name) | 235 | __ATTR(_name, 0644, show_##_name, store_##_name) |
| 236 | 236 | ||
| 237 | #define cpufreq_freq_attr_wo(_name) \ | ||
| 238 | static struct freq_attr _name = \ | ||
| 239 | __ATTR(_name, 0200, NULL, store_##_name) | ||
| 240 | |||
| 237 | struct global_attr { | 241 | struct global_attr { |
| 238 | struct attribute attr; | 242 | struct attribute attr; |
| 239 | ssize_t (*show)(struct kobject *kobj, | 243 | ssize_t (*show)(struct kobject *kobj, |
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index afe641c02dca..20bfefbe7594 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h | |||
| @@ -16,9 +16,11 @@ enum cpuhp_state { | |||
| 16 | CPUHP_PERF_SUPERH, | 16 | CPUHP_PERF_SUPERH, |
| 17 | CPUHP_X86_HPET_DEAD, | 17 | CPUHP_X86_HPET_DEAD, |
| 18 | CPUHP_X86_APB_DEAD, | 18 | CPUHP_X86_APB_DEAD, |
| 19 | CPUHP_X86_MCE_DEAD, | ||
| 19 | CPUHP_VIRT_NET_DEAD, | 20 | CPUHP_VIRT_NET_DEAD, |
| 20 | CPUHP_SLUB_DEAD, | 21 | CPUHP_SLUB_DEAD, |
| 21 | CPUHP_MM_WRITEBACK_DEAD, | 22 | CPUHP_MM_WRITEBACK_DEAD, |
| 23 | CPUHP_MM_VMSTAT_DEAD, | ||
| 22 | CPUHP_SOFTIRQ_DEAD, | 24 | CPUHP_SOFTIRQ_DEAD, |
| 23 | CPUHP_NET_MVNETA_DEAD, | 25 | CPUHP_NET_MVNETA_DEAD, |
| 24 | CPUHP_CPUIDLE_DEAD, | 26 | CPUHP_CPUIDLE_DEAD, |
| @@ -30,6 +32,18 @@ enum cpuhp_state { | |||
| 30 | CPUHP_ACPI_CPUDRV_DEAD, | 32 | CPUHP_ACPI_CPUDRV_DEAD, |
| 31 | CPUHP_S390_PFAULT_DEAD, | 33 | CPUHP_S390_PFAULT_DEAD, |
| 32 | CPUHP_BLK_MQ_DEAD, | 34 | CPUHP_BLK_MQ_DEAD, |
| 35 | CPUHP_FS_BUFF_DEAD, | ||
| 36 | CPUHP_PRINTK_DEAD, | ||
| 37 | CPUHP_MM_MEMCQ_DEAD, | ||
| 38 | CPUHP_PERCPU_CNT_DEAD, | ||
| 39 | CPUHP_RADIX_DEAD, | ||
| 40 | CPUHP_PAGE_ALLOC_DEAD, | ||
| 41 | CPUHP_NET_DEV_DEAD, | ||
| 42 | CPUHP_PCI_XGENE_DEAD, | ||
| 43 | CPUHP_IOMMU_INTEL_DEAD, | ||
| 44 | CPUHP_LUSTRE_CFS_DEAD, | ||
| 45 | CPUHP_SCSI_BNX2FC_DEAD, | ||
| 46 | CPUHP_SCSI_BNX2I_DEAD, | ||
| 33 | CPUHP_WORKQUEUE_PREP, | 47 | CPUHP_WORKQUEUE_PREP, |
| 34 | CPUHP_POWER_NUMA_PREPARE, | 48 | CPUHP_POWER_NUMA_PREPARE, |
| 35 | CPUHP_HRTIMERS_PREPARE, | 49 | CPUHP_HRTIMERS_PREPARE, |
| @@ -45,12 +59,20 @@ enum cpuhp_state { | |||
| 45 | CPUHP_POWERPC_MMU_CTX_PREPARE, | 59 | CPUHP_POWERPC_MMU_CTX_PREPARE, |
| 46 | CPUHP_XEN_PREPARE, | 60 | CPUHP_XEN_PREPARE, |
| 47 | CPUHP_XEN_EVTCHN_PREPARE, | 61 | CPUHP_XEN_EVTCHN_PREPARE, |
| 48 | CPUHP_NOTIFY_PREPARE, | ||
| 49 | CPUHP_ARM_SHMOBILE_SCU_PREPARE, | 62 | CPUHP_ARM_SHMOBILE_SCU_PREPARE, |
| 50 | CPUHP_SH_SH3X_PREPARE, | 63 | CPUHP_SH_SH3X_PREPARE, |
| 51 | CPUHP_BLK_MQ_PREPARE, | 64 | CPUHP_BLK_MQ_PREPARE, |
| 65 | CPUHP_NET_FLOW_PREPARE, | ||
| 66 | CPUHP_TOPOLOGY_PREPARE, | ||
| 67 | CPUHP_NET_IUCV_PREPARE, | ||
| 68 | CPUHP_ARM_BL_PREPARE, | ||
| 69 | CPUHP_TRACE_RB_PREPARE, | ||
| 70 | CPUHP_MM_ZS_PREPARE, | ||
| 71 | CPUHP_MM_ZSWP_MEM_PREPARE, | ||
| 72 | CPUHP_MM_ZSWP_POOL_PREPARE, | ||
| 73 | CPUHP_KVM_PPC_BOOK3S_PREPARE, | ||
| 74 | CPUHP_ZCOMP_PREPARE, | ||
| 52 | CPUHP_TIMERS_DEAD, | 75 | CPUHP_TIMERS_DEAD, |
| 53 | CPUHP_NOTF_ERR_INJ_PREPARE, | ||
| 54 | CPUHP_MIPS_SOC_PREPARE, | 76 | CPUHP_MIPS_SOC_PREPARE, |
| 55 | CPUHP_BRINGUP_CPU, | 77 | CPUHP_BRINGUP_CPU, |
| 56 | CPUHP_AP_IDLE_DEAD, | 78 | CPUHP_AP_IDLE_DEAD, |
| @@ -58,10 +80,8 @@ enum cpuhp_state { | |||
| 58 | CPUHP_AP_SCHED_STARTING, | 80 | CPUHP_AP_SCHED_STARTING, |
| 59 | CPUHP_AP_RCUTREE_DYING, | 81 | CPUHP_AP_RCUTREE_DYING, |
| 60 | CPUHP_AP_IRQ_GIC_STARTING, | 82 | CPUHP_AP_IRQ_GIC_STARTING, |
| 61 | CPUHP_AP_IRQ_GICV3_STARTING, | ||
| 62 | CPUHP_AP_IRQ_HIP04_STARTING, | 83 | CPUHP_AP_IRQ_HIP04_STARTING, |
| 63 | CPUHP_AP_IRQ_ARMADA_XP_STARTING, | 84 | CPUHP_AP_IRQ_ARMADA_XP_STARTING, |
| 64 | CPUHP_AP_IRQ_ARMADA_CASC_STARTING, | ||
| 65 | CPUHP_AP_IRQ_BCM2836_STARTING, | 85 | CPUHP_AP_IRQ_BCM2836_STARTING, |
| 66 | CPUHP_AP_ARM_MVEBU_COHERENCY, | 86 | CPUHP_AP_ARM_MVEBU_COHERENCY, |
| 67 | CPUHP_AP_PERF_X86_UNCORE_STARTING, | 87 | CPUHP_AP_PERF_X86_UNCORE_STARTING, |
| @@ -80,7 +100,6 @@ enum cpuhp_state { | |||
| 80 | CPUHP_AP_ARM_L2X0_STARTING, | 100 | CPUHP_AP_ARM_L2X0_STARTING, |
| 81 | CPUHP_AP_ARM_ARCH_TIMER_STARTING, | 101 | CPUHP_AP_ARM_ARCH_TIMER_STARTING, |
| 82 | CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, | 102 | CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, |
| 83 | CPUHP_AP_DUMMY_TIMER_STARTING, | ||
| 84 | CPUHP_AP_JCORE_TIMER_STARTING, | 103 | CPUHP_AP_JCORE_TIMER_STARTING, |
| 85 | CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, | 104 | CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, |
| 86 | CPUHP_AP_ARM_TWD_STARTING, | 105 | CPUHP_AP_ARM_TWD_STARTING, |
| @@ -94,9 +113,10 @@ enum cpuhp_state { | |||
| 94 | CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, | 113 | CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, |
| 95 | CPUHP_AP_KVM_ARM_VGIC_STARTING, | 114 | CPUHP_AP_KVM_ARM_VGIC_STARTING, |
| 96 | CPUHP_AP_KVM_ARM_TIMER_STARTING, | 115 | CPUHP_AP_KVM_ARM_TIMER_STARTING, |
| 116 | /* Must be the last timer callback */ | ||
| 117 | CPUHP_AP_DUMMY_TIMER_STARTING, | ||
| 97 | CPUHP_AP_ARM_XEN_STARTING, | 118 | CPUHP_AP_ARM_XEN_STARTING, |
| 98 | CPUHP_AP_ARM_CORESIGHT_STARTING, | 119 | CPUHP_AP_ARM_CORESIGHT_STARTING, |
| 99 | CPUHP_AP_ARM_CORESIGHT4_STARTING, | ||
| 100 | CPUHP_AP_ARM64_ISNDEP_STARTING, | 120 | CPUHP_AP_ARM64_ISNDEP_STARTING, |
| 101 | CPUHP_AP_SMPCFD_DYING, | 121 | CPUHP_AP_SMPCFD_DYING, |
| 102 | CPUHP_AP_X86_TBOOT_DYING, | 122 | CPUHP_AP_X86_TBOOT_DYING, |
| @@ -120,7 +140,6 @@ enum cpuhp_state { | |||
| 120 | CPUHP_AP_PERF_ARM_L2X0_ONLINE, | 140 | CPUHP_AP_PERF_ARM_L2X0_ONLINE, |
| 121 | CPUHP_AP_WORKQUEUE_ONLINE, | 141 | CPUHP_AP_WORKQUEUE_ONLINE, |
| 122 | CPUHP_AP_RCUTREE_ONLINE, | 142 | CPUHP_AP_RCUTREE_ONLINE, |
| 123 | CPUHP_AP_NOTIFY_ONLINE, | ||
| 124 | CPUHP_AP_ONLINE_DYN, | 143 | CPUHP_AP_ONLINE_DYN, |
| 125 | CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, | 144 | CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, |
| 126 | CPUHP_AP_X86_HPET_ONLINE, | 145 | CPUHP_AP_X86_HPET_ONLINE, |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index bb31373c3478..da346f2817a8 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
| @@ -74,6 +74,7 @@ struct cpuidle_driver_kobj; | |||
| 74 | struct cpuidle_device { | 74 | struct cpuidle_device { |
| 75 | unsigned int registered:1; | 75 | unsigned int registered:1; |
| 76 | unsigned int enabled:1; | 76 | unsigned int enabled:1; |
| 77 | unsigned int use_deepest_state:1; | ||
| 77 | unsigned int cpu; | 78 | unsigned int cpu; |
| 78 | 79 | ||
| 79 | int last_residency; | 80 | int last_residency; |
| @@ -192,11 +193,12 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver( | |||
| 192 | static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; } | 193 | static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; } |
| 193 | #endif | 194 | #endif |
| 194 | 195 | ||
| 195 | #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND) | 196 | #ifdef CONFIG_CPU_IDLE |
| 196 | extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | 197 | extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, |
| 197 | struct cpuidle_device *dev); | 198 | struct cpuidle_device *dev); |
| 198 | extern int cpuidle_enter_freeze(struct cpuidle_driver *drv, | 199 | extern int cpuidle_enter_freeze(struct cpuidle_driver *drv, |
| 199 | struct cpuidle_device *dev); | 200 | struct cpuidle_device *dev); |
| 201 | extern void cpuidle_use_deepest_state(bool enable); | ||
| 200 | #else | 202 | #else |
| 201 | static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | 203 | static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, |
| 202 | struct cpuidle_device *dev) | 204 | struct cpuidle_device *dev) |
| @@ -204,6 +206,9 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | |||
| 204 | static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv, | 206 | static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv, |
| 205 | struct cpuidle_device *dev) | 207 | struct cpuidle_device *dev) |
| 206 | {return -ENODEV; } | 208 | {return -ENODEV; } |
| 209 | static inline void cpuidle_use_deepest_state(bool enable) | ||
| 210 | { | ||
| 211 | } | ||
| 207 | #endif | 212 | #endif |
| 208 | 213 | ||
| 209 | /* kernel/sched/idle.c */ | 214 | /* kernel/sched/idle.c */ |
| @@ -235,8 +240,6 @@ struct cpuidle_governor { | |||
| 235 | int (*select) (struct cpuidle_driver *drv, | 240 | int (*select) (struct cpuidle_driver *drv, |
| 236 | struct cpuidle_device *dev); | 241 | struct cpuidle_device *dev); |
| 237 | void (*reflect) (struct cpuidle_device *dev, int index); | 242 | void (*reflect) (struct cpuidle_device *dev, int index); |
| 238 | |||
| 239 | struct module *owner; | ||
| 240 | }; | 243 | }; |
| 241 | 244 | ||
| 242 | #ifdef CONFIG_CPU_IDLE | 245 | #ifdef CONFIG_CPU_IDLE |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index da7fbf1cdd56..c717f5ea88cb 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
| @@ -722,6 +722,11 @@ void init_cpu_present(const struct cpumask *src); | |||
| 722 | void init_cpu_possible(const struct cpumask *src); | 722 | void init_cpu_possible(const struct cpumask *src); |
| 723 | void init_cpu_online(const struct cpumask *src); | 723 | void init_cpu_online(const struct cpumask *src); |
| 724 | 724 | ||
| 725 | static inline void reset_cpu_possible_mask(void) | ||
| 726 | { | ||
| 727 | bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS); | ||
| 728 | } | ||
| 729 | |||
| 725 | static inline void | 730 | static inline void |
| 726 | set_cpu_possible(unsigned int cpu, bool possible) | 731 | set_cpu_possible(unsigned int cpu, bool possible) |
| 727 | { | 732 | { |
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 7cee5551625b..c0b0cf3d2d2f 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
| @@ -50,6 +50,8 @@ | |||
| 50 | #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 | 50 | #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 |
| 51 | #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 | 51 | #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 |
| 52 | #define CRYPTO_ALG_TYPE_KPP 0x00000008 | 52 | #define CRYPTO_ALG_TYPE_KPP 0x00000008 |
| 53 | #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a | ||
| 54 | #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b | ||
| 53 | #define CRYPTO_ALG_TYPE_RNG 0x0000000c | 55 | #define CRYPTO_ALG_TYPE_RNG 0x0000000c |
| 54 | #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d | 56 | #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d |
| 55 | #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e | 57 | #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e |
| @@ -60,6 +62,7 @@ | |||
| 60 | #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e | 62 | #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e |
| 61 | #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e | 63 | #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e |
| 62 | #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c | 64 | #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c |
| 65 | #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e | ||
| 63 | 66 | ||
| 64 | #define CRYPTO_ALG_LARVAL 0x00000010 | 67 | #define CRYPTO_ALG_LARVAL 0x00000010 |
| 65 | #define CRYPTO_ALG_DEAD 0x00000020 | 68 | #define CRYPTO_ALG_DEAD 0x00000020 |
| @@ -87,7 +90,7 @@ | |||
| 87 | #define CRYPTO_ALG_TESTED 0x00000400 | 90 | #define CRYPTO_ALG_TESTED 0x00000400 |
| 88 | 91 | ||
| 89 | /* | 92 | /* |
| 90 | * Set if the algorithm is an instance that is build from templates. | 93 | * Set if the algorithm is an instance that is built from templates. |
| 91 | */ | 94 | */ |
| 92 | #define CRYPTO_ALG_INSTANCE 0x00000800 | 95 | #define CRYPTO_ALG_INSTANCE 0x00000800 |
| 93 | 96 | ||
| @@ -960,7 +963,7 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req) | |||
| 960 | * ablkcipher_request_set_callback() - set asynchronous callback function | 963 | * ablkcipher_request_set_callback() - set asynchronous callback function |
| 961 | * @req: request handle | 964 | * @req: request handle |
| 962 | * @flags: specify zero or an ORing of the flags | 965 | * @flags: specify zero or an ORing of the flags |
| 963 | * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and | 966 | * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and |
| 964 | * increase the wait queue beyond the initial maximum size; | 967 | * increase the wait queue beyond the initial maximum size; |
| 965 | * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep | 968 | * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep |
| 966 | * @compl: callback function pointer to be registered with the request handle | 969 | * @compl: callback function pointer to be registered with the request handle |
| @@ -977,7 +980,7 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req) | |||
| 977 | * cipher operation completes. | 980 | * cipher operation completes. |
| 978 | * | 981 | * |
| 979 | * The callback function is registered with the ablkcipher_request handle and | 982 | * The callback function is registered with the ablkcipher_request handle and |
| 980 | * must comply with the following template | 983 | * must comply with the following template:: |
| 981 | * | 984 | * |
| 982 | * void callback_function(struct crypto_async_request *req, int error) | 985 | * void callback_function(struct crypto_async_request *req, int error) |
| 983 | */ | 986 | */ |
diff --git a/include/linux/dax.h b/include/linux/dax.h index add6c4bc568f..f97bcfe79472 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h | |||
| @@ -8,25 +8,44 @@ | |||
| 8 | 8 | ||
| 9 | struct iomap_ops; | 9 | struct iomap_ops; |
| 10 | 10 | ||
| 11 | /* We use lowest available exceptional entry bit for locking */ | 11 | /* |
| 12 | * We use lowest available bit in exceptional entry for locking, one bit for | ||
| 13 | * the entry size (PMD) and two more to tell us if the entry is a huge zero | ||
| 14 | * page (HZP) or an empty entry that is just used for locking. In total four | ||
| 15 | * special bits. | ||
| 16 | * | ||
| 17 | * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and | ||
| 18 | * EMPTY bits aren't set the entry is a normal DAX entry with a filesystem | ||
| 19 | * block allocation. | ||
| 20 | */ | ||
| 21 | #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4) | ||
| 12 | #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) | 22 | #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) |
| 23 | #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) | ||
| 24 | #define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) | ||
| 25 | #define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)) | ||
| 13 | 26 | ||
| 14 | ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, | 27 | static inline unsigned long dax_radix_sector(void *entry) |
| 28 | { | ||
| 29 | return (unsigned long)entry >> RADIX_DAX_SHIFT; | ||
| 30 | } | ||
| 31 | |||
| 32 | static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags) | ||
| 33 | { | ||
| 34 | return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags | | ||
| 35 | ((unsigned long)sector << RADIX_DAX_SHIFT) | | ||
| 36 | RADIX_DAX_ENTRY_LOCK); | ||
| 37 | } | ||
| 38 | |||
| 39 | ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, | ||
| 15 | struct iomap_ops *ops); | 40 | struct iomap_ops *ops); |
| 16 | ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, | 41 | int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, |
| 17 | get_block_t, dio_iodone_t, int flags); | ||
| 18 | int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); | ||
| 19 | int dax_truncate_page(struct inode *, loff_t from, get_block_t); | ||
| 20 | int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | ||
| 21 | struct iomap_ops *ops); | 42 | struct iomap_ops *ops); |
| 22 | int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); | ||
| 23 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); | 43 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); |
| 24 | void dax_wake_mapping_entry_waiter(struct address_space *mapping, | 44 | void dax_wake_mapping_entry_waiter(struct address_space *mapping, |
| 25 | pgoff_t index, bool wake_all); | 45 | pgoff_t index, void *entry, bool wake_all); |
| 26 | 46 | ||
| 27 | #ifdef CONFIG_FS_DAX | 47 | #ifdef CONFIG_FS_DAX |
| 28 | struct page *read_dax_sector(struct block_device *bdev, sector_t n); | 48 | struct page *read_dax_sector(struct block_device *bdev, sector_t n); |
| 29 | void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index); | ||
| 30 | int __dax_zero_page_range(struct block_device *bdev, sector_t sector, | 49 | int __dax_zero_page_range(struct block_device *bdev, sector_t sector, |
| 31 | unsigned int offset, unsigned int length); | 50 | unsigned int offset, unsigned int length); |
| 32 | #else | 51 | #else |
| @@ -35,12 +54,6 @@ static inline struct page *read_dax_sector(struct block_device *bdev, | |||
| 35 | { | 54 | { |
| 36 | return ERR_PTR(-ENXIO); | 55 | return ERR_PTR(-ENXIO); |
| 37 | } | 56 | } |
| 38 | /* Shouldn't ever be called when dax is disabled. */ | ||
| 39 | static inline void dax_unlock_mapping_entry(struct address_space *mapping, | ||
| 40 | pgoff_t index) | ||
| 41 | { | ||
| 42 | BUG(); | ||
| 43 | } | ||
| 44 | static inline int __dax_zero_page_range(struct block_device *bdev, | 57 | static inline int __dax_zero_page_range(struct block_device *bdev, |
| 45 | sector_t sector, unsigned int offset, unsigned int length) | 58 | sector_t sector, unsigned int offset, unsigned int length) |
| 46 | { | 59 | { |
| @@ -48,18 +61,28 @@ static inline int __dax_zero_page_range(struct block_device *bdev, | |||
| 48 | } | 61 | } |
| 49 | #endif | 62 | #endif |
| 50 | 63 | ||
| 51 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) | 64 | #ifdef CONFIG_FS_DAX_PMD |
| 52 | int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, | 65 | static inline unsigned int dax_radix_order(void *entry) |
| 53 | unsigned int flags, get_block_t); | 66 | { |
| 67 | if ((unsigned long)entry & RADIX_DAX_PMD) | ||
| 68 | return PMD_SHIFT - PAGE_SHIFT; | ||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, | ||
| 72 | pmd_t *pmd, unsigned int flags, struct iomap_ops *ops); | ||
| 54 | #else | 73 | #else |
| 55 | static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, | 74 | static inline unsigned int dax_radix_order(void *entry) |
| 56 | pmd_t *pmd, unsigned int flags, get_block_t gb) | 75 | { |
| 76 | return 0; | ||
| 77 | } | ||
| 78 | static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma, | ||
| 79 | unsigned long address, pmd_t *pmd, unsigned int flags, | ||
| 80 | struct iomap_ops *ops) | ||
| 57 | { | 81 | { |
| 58 | return VM_FAULT_FALLBACK; | 82 | return VM_FAULT_FALLBACK; |
| 59 | } | 83 | } |
| 60 | #endif | 84 | #endif |
| 61 | int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); | 85 | int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); |
| 62 | #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) | ||
| 63 | 86 | ||
| 64 | static inline bool vma_is_dax(struct vm_area_struct *vma) | 87 | static inline bool vma_is_dax(struct vm_area_struct *vma) |
| 65 | { | 88 | { |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 5beed7b30561..c965e4469499 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
| @@ -139,7 +139,7 @@ struct dentry_operations { | |||
| 139 | void (*d_iput)(struct dentry *, struct inode *); | 139 | void (*d_iput)(struct dentry *, struct inode *); |
| 140 | char *(*d_dname)(struct dentry *, char *, int); | 140 | char *(*d_dname)(struct dentry *, char *, int); |
| 141 | struct vfsmount *(*d_automount)(struct path *); | 141 | struct vfsmount *(*d_automount)(struct path *); |
| 142 | int (*d_manage)(struct dentry *, bool); | 142 | int (*d_manage)(const struct path *, bool); |
| 143 | struct dentry *(*d_real)(struct dentry *, const struct inode *, | 143 | struct dentry *(*d_real)(struct dentry *, const struct inode *, |
| 144 | unsigned int); | 144 | unsigned int); |
| 145 | } ____cacheline_aligned; | 145 | } ____cacheline_aligned; |
| @@ -254,7 +254,7 @@ extern struct dentry *d_find_alias(struct inode *); | |||
| 254 | extern void d_prune_aliases(struct inode *); | 254 | extern void d_prune_aliases(struct inode *); |
| 255 | 255 | ||
| 256 | /* test whether we have any submounts in a subdir tree */ | 256 | /* test whether we have any submounts in a subdir tree */ |
| 257 | extern int have_submounts(struct dentry *); | 257 | extern int path_has_submounts(const struct path *); |
| 258 | 258 | ||
| 259 | /* | 259 | /* |
| 260 | * This adds the entry to the hash queues. | 260 | * This adds the entry to the hash queues. |
diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h index 5ac3bdd5cee6..699b6c499c4f 100644 --- a/include/linux/dcookies.h +++ b/include/linux/dcookies.h | |||
| @@ -44,7 +44,7 @@ void dcookie_unregister(struct dcookie_user * user); | |||
| 44 | * | 44 | * |
| 45 | * Returns 0 on success, with *cookie filled in | 45 | * Returns 0 on success, with *cookie filled in |
| 46 | */ | 46 | */ |
| 47 | int get_dcookie(struct path *path, unsigned long *cookie); | 47 | int get_dcookie(const struct path *path, unsigned long *cookie); |
| 48 | 48 | ||
| 49 | #else | 49 | #else |
| 50 | 50 | ||
| @@ -58,7 +58,7 @@ static inline void dcookie_unregister(struct dcookie_user * user) | |||
| 58 | return; | 58 | return; |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | static inline int get_dcookie(struct path *path, unsigned long *cookie) | 61 | static inline int get_dcookie(const struct path *path, unsigned long *cookie) |
| 62 | { | 62 | { |
| 63 | return -ENOSYS; | 63 | return -ENOSYS; |
| 64 | } | 64 | } |
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 4d3f0d1aec73..014cc564d1c4 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h | |||
| @@ -52,7 +52,8 @@ extern struct srcu_struct debugfs_srcu; | |||
| 52 | * Must only be called under the protection established by | 52 | * Must only be called under the protection established by |
| 53 | * debugfs_use_file_start(). | 53 | * debugfs_use_file_start(). |
| 54 | */ | 54 | */ |
| 55 | static inline const struct file_operations *debugfs_real_fops(struct file *filp) | 55 | static inline const struct file_operations * |
| 56 | debugfs_real_fops(const struct file *filp) | ||
| 56 | __must_hold(&debugfs_srcu) | 57 | __must_hold(&debugfs_srcu) |
| 57 | { | 58 | { |
| 58 | /* | 59 | /* |
| @@ -62,6 +63,21 @@ static inline const struct file_operations *debugfs_real_fops(struct file *filp) | |||
| 62 | return filp->f_path.dentry->d_fsdata; | 63 | return filp->f_path.dentry->d_fsdata; |
| 63 | } | 64 | } |
| 64 | 65 | ||
| 66 | #define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ | ||
| 67 | static int __fops ## _open(struct inode *inode, struct file *file) \ | ||
| 68 | { \ | ||
| 69 | __simple_attr_check_format(__fmt, 0ull); \ | ||
| 70 | return simple_attr_open(inode, file, __get, __set, __fmt); \ | ||
| 71 | } \ | ||
| 72 | static const struct file_operations __fops = { \ | ||
| 73 | .owner = THIS_MODULE, \ | ||
| 74 | .open = __fops ## _open, \ | ||
| 75 | .release = simple_attr_release, \ | ||
| 76 | .read = debugfs_attr_read, \ | ||
| 77 | .write = debugfs_attr_write, \ | ||
| 78 | .llseek = generic_file_llseek, \ | ||
| 79 | } | ||
| 80 | |||
| 65 | #if defined(CONFIG_DEBUG_FS) | 81 | #if defined(CONFIG_DEBUG_FS) |
| 66 | 82 | ||
| 67 | struct dentry *debugfs_create_file(const char *name, umode_t mode, | 83 | struct dentry *debugfs_create_file(const char *name, umode_t mode, |
| @@ -99,21 +115,6 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf, | |||
| 99 | ssize_t debugfs_attr_write(struct file *file, const char __user *buf, | 115 | ssize_t debugfs_attr_write(struct file *file, const char __user *buf, |
| 100 | size_t len, loff_t *ppos); | 116 | size_t len, loff_t *ppos); |
| 101 | 117 | ||
| 102 | #define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ | ||
| 103 | static int __fops ## _open(struct inode *inode, struct file *file) \ | ||
| 104 | { \ | ||
| 105 | __simple_attr_check_format(__fmt, 0ull); \ | ||
| 106 | return simple_attr_open(inode, file, __get, __set, __fmt); \ | ||
| 107 | } \ | ||
| 108 | static const struct file_operations __fops = { \ | ||
| 109 | .owner = THIS_MODULE, \ | ||
| 110 | .open = __fops ## _open, \ | ||
| 111 | .release = simple_attr_release, \ | ||
| 112 | .read = debugfs_attr_read, \ | ||
| 113 | .write = debugfs_attr_write, \ | ||
| 114 | .llseek = generic_file_llseek, \ | ||
| 115 | } | ||
| 116 | |||
| 117 | struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, | 118 | struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, |
| 118 | struct dentry *new_dir, const char *new_name); | 119 | struct dentry *new_dir, const char *new_name); |
| 119 | 120 | ||
| @@ -233,8 +234,18 @@ static inline void debugfs_use_file_finish(int srcu_idx) | |||
| 233 | __releases(&debugfs_srcu) | 234 | __releases(&debugfs_srcu) |
| 234 | { } | 235 | { } |
| 235 | 236 | ||
| 236 | #define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ | 237 | static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf, |
| 237 | static const struct file_operations __fops = { 0 } | 238 | size_t len, loff_t *ppos) |
| 239 | { | ||
| 240 | return -ENODEV; | ||
| 241 | } | ||
| 242 | |||
| 243 | static inline ssize_t debugfs_attr_write(struct file *file, | ||
| 244 | const char __user *buf, | ||
| 245 | size_t len, loff_t *ppos) | ||
| 246 | { | ||
| 247 | return -ENODEV; | ||
| 248 | } | ||
| 238 | 249 | ||
| 239 | static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, | 250 | static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, |
| 240 | struct dentry *new_dir, char *new_name) | 251 | struct dentry *new_dir, char *new_name) |
diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h index 7adf6cc4b305..c35d0c0e0ada 100644 --- a/include/linux/devfreq_cooling.h +++ b/include/linux/devfreq_cooling.h | |||
| @@ -20,7 +20,6 @@ | |||
| 20 | #include <linux/devfreq.h> | 20 | #include <linux/devfreq.h> |
| 21 | #include <linux/thermal.h> | 21 | #include <linux/thermal.h> |
| 22 | 22 | ||
| 23 | #ifdef CONFIG_DEVFREQ_THERMAL | ||
| 24 | 23 | ||
| 25 | /** | 24 | /** |
| 26 | * struct devfreq_cooling_power - Devfreq cooling power ops | 25 | * struct devfreq_cooling_power - Devfreq cooling power ops |
| @@ -37,12 +36,16 @@ | |||
| 37 | * @dyn_power_coeff * frequency * voltage^2 | 36 | * @dyn_power_coeff * frequency * voltage^2 |
| 38 | */ | 37 | */ |
| 39 | struct devfreq_cooling_power { | 38 | struct devfreq_cooling_power { |
| 40 | unsigned long (*get_static_power)(unsigned long voltage); | 39 | unsigned long (*get_static_power)(struct devfreq *devfreq, |
| 41 | unsigned long (*get_dynamic_power)(unsigned long freq, | 40 | unsigned long voltage); |
| 41 | unsigned long (*get_dynamic_power)(struct devfreq *devfreq, | ||
| 42 | unsigned long freq, | ||
| 42 | unsigned long voltage); | 43 | unsigned long voltage); |
| 43 | unsigned long dyn_power_coeff; | 44 | unsigned long dyn_power_coeff; |
| 44 | }; | 45 | }; |
| 45 | 46 | ||
| 47 | #ifdef CONFIG_DEVFREQ_THERMAL | ||
| 48 | |||
| 46 | struct thermal_cooling_device * | 49 | struct thermal_cooling_device * |
| 47 | of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, | 50 | of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, |
| 48 | struct devfreq_cooling_power *dfc_power); | 51 | struct devfreq_cooling_power *dfc_power); |
diff --git a/include/linux/device.h b/include/linux/device.h index bc41e87a969b..491b4c0ca633 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
| @@ -362,6 +362,7 @@ int subsys_virtual_register(struct bus_type *subsys, | |||
| 362 | * @name: Name of the class. | 362 | * @name: Name of the class. |
| 363 | * @owner: The module owner. | 363 | * @owner: The module owner. |
| 364 | * @class_attrs: Default attributes of this class. | 364 | * @class_attrs: Default attributes of this class. |
| 365 | * @class_groups: Default attributes of this class. | ||
| 365 | * @dev_groups: Default attributes of the devices that belong to the class. | 366 | * @dev_groups: Default attributes of the devices that belong to the class. |
| 366 | * @dev_kobj: The kobject that represents this class and links it into the hierarchy. | 367 | * @dev_kobj: The kobject that represents this class and links it into the hierarchy. |
| 367 | * @dev_uevent: Called when a device is added, removed from this class, or a | 368 | * @dev_uevent: Called when a device is added, removed from this class, or a |
| @@ -390,6 +391,7 @@ struct class { | |||
| 390 | struct module *owner; | 391 | struct module *owner; |
| 391 | 392 | ||
| 392 | struct class_attribute *class_attrs; | 393 | struct class_attribute *class_attrs; |
| 394 | const struct attribute_group **class_groups; | ||
| 393 | const struct attribute_group **dev_groups; | 395 | const struct attribute_group **dev_groups; |
| 394 | struct kobject *dev_kobj; | 396 | struct kobject *dev_kobj; |
| 395 | 397 | ||
| @@ -465,6 +467,8 @@ struct class_attribute { | |||
| 465 | struct class_attribute class_attr_##_name = __ATTR_RW(_name) | 467 | struct class_attribute class_attr_##_name = __ATTR_RW(_name) |
| 466 | #define CLASS_ATTR_RO(_name) \ | 468 | #define CLASS_ATTR_RO(_name) \ |
| 467 | struct class_attribute class_attr_##_name = __ATTR_RO(_name) | 469 | struct class_attribute class_attr_##_name = __ATTR_RO(_name) |
| 470 | #define CLASS_ATTR_WO(_name) \ | ||
| 471 | struct class_attribute class_attr_##_name = __ATTR_WO(_name) | ||
| 468 | 472 | ||
| 469 | extern int __must_check class_create_file_ns(struct class *class, | 473 | extern int __must_check class_create_file_ns(struct class *class, |
| 470 | const struct class_attribute *attr, | 474 | const struct class_attribute *attr, |
| @@ -698,6 +702,25 @@ static inline int devm_add_action_or_reset(struct device *dev, | |||
| 698 | return ret; | 702 | return ret; |
| 699 | } | 703 | } |
| 700 | 704 | ||
| 705 | /** | ||
| 706 | * devm_alloc_percpu - Resource-managed alloc_percpu | ||
| 707 | * @dev: Device to allocate per-cpu memory for | ||
| 708 | * @type: Type to allocate per-cpu memory for | ||
| 709 | * | ||
| 710 | * Managed alloc_percpu. Per-cpu memory allocated with this function is | ||
| 711 | * automatically freed on driver detach. | ||
| 712 | * | ||
| 713 | * RETURNS: | ||
| 714 | * Pointer to allocated memory on success, NULL on failure. | ||
| 715 | */ | ||
| 716 | #define devm_alloc_percpu(dev, type) \ | ||
| 717 | ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \ | ||
| 718 | __alignof__(type))) | ||
| 719 | |||
| 720 | void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, | ||
| 721 | size_t align); | ||
| 722 | void devm_free_percpu(struct device *dev, void __percpu *pdata); | ||
| 723 | |||
| 701 | struct device_dma_parameters { | 724 | struct device_dma_parameters { |
| 702 | /* | 725 | /* |
| 703 | * a low level driver may set these to teach IOMMU code about | 726 | * a low level driver may set these to teach IOMMU code about |
| @@ -708,6 +731,87 @@ struct device_dma_parameters { | |||
| 708 | }; | 731 | }; |
| 709 | 732 | ||
| 710 | /** | 733 | /** |
| 734 | * enum device_link_state - Device link states. | ||
| 735 | * @DL_STATE_NONE: The presence of the drivers is not being tracked. | ||
| 736 | * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present. | ||
| 737 | * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not. | ||
| 738 | * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present). | ||
| 739 | * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present. | ||
| 740 | * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding. | ||
| 741 | */ | ||
| 742 | enum device_link_state { | ||
| 743 | DL_STATE_NONE = -1, | ||
| 744 | DL_STATE_DORMANT = 0, | ||
| 745 | DL_STATE_AVAILABLE, | ||
| 746 | DL_STATE_CONSUMER_PROBE, | ||
| 747 | DL_STATE_ACTIVE, | ||
| 748 | DL_STATE_SUPPLIER_UNBIND, | ||
| 749 | }; | ||
| 750 | |||
| 751 | /* | ||
| 752 | * Device link flags. | ||
| 753 | * | ||
| 754 | * STATELESS: The core won't track the presence of supplier/consumer drivers. | ||
| 755 | * AUTOREMOVE: Remove this link automatically on consumer driver unbind. | ||
| 756 | * PM_RUNTIME: If set, the runtime PM framework will use this link. | ||
| 757 | * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation. | ||
| 758 | */ | ||
| 759 | #define DL_FLAG_STATELESS BIT(0) | ||
| 760 | #define DL_FLAG_AUTOREMOVE BIT(1) | ||
| 761 | #define DL_FLAG_PM_RUNTIME BIT(2) | ||
| 762 | #define DL_FLAG_RPM_ACTIVE BIT(3) | ||
| 763 | |||
| 764 | /** | ||
| 765 | * struct device_link - Device link representation. | ||
| 766 | * @supplier: The device on the supplier end of the link. | ||
| 767 | * @s_node: Hook to the supplier device's list of links to consumers. | ||
| 768 | * @consumer: The device on the consumer end of the link. | ||
| 769 | * @c_node: Hook to the consumer device's list of links to suppliers. | ||
| 770 | * @status: The state of the link (with respect to the presence of drivers). | ||
| 771 | * @flags: Link flags. | ||
| 772 | * @rpm_active: Whether or not the consumer device is runtime-PM-active. | ||
| 773 | * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks. | ||
| 774 | */ | ||
| 775 | struct device_link { | ||
| 776 | struct device *supplier; | ||
| 777 | struct list_head s_node; | ||
| 778 | struct device *consumer; | ||
| 779 | struct list_head c_node; | ||
| 780 | enum device_link_state status; | ||
| 781 | u32 flags; | ||
| 782 | bool rpm_active; | ||
| 783 | #ifdef CONFIG_SRCU | ||
| 784 | struct rcu_head rcu_head; | ||
| 785 | #endif | ||
| 786 | }; | ||
| 787 | |||
| 788 | /** | ||
| 789 | * enum dl_dev_state - Device driver presence tracking information. | ||
| 790 | * @DL_DEV_NO_DRIVER: There is no driver attached to the device. | ||
| 791 | * @DL_DEV_PROBING: A driver is probing. | ||
| 792 | * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device. | ||
| 793 | * @DL_DEV_UNBINDING: The driver is unbinding from the device. | ||
| 794 | */ | ||
| 795 | enum dl_dev_state { | ||
| 796 | DL_DEV_NO_DRIVER = 0, | ||
| 797 | DL_DEV_PROBING, | ||
| 798 | DL_DEV_DRIVER_BOUND, | ||
| 799 | DL_DEV_UNBINDING, | ||
| 800 | }; | ||
| 801 | |||
| 802 | /** | ||
| 803 | * struct dev_links_info - Device data related to device links. | ||
| 804 | * @suppliers: List of links to supplier devices. | ||
| 805 | * @consumers: List of links to consumer devices. | ||
| 806 | * @status: Driver status information. | ||
| 807 | */ | ||
| 808 | struct dev_links_info { | ||
| 809 | struct list_head suppliers; | ||
| 810 | struct list_head consumers; | ||
| 811 | enum dl_dev_state status; | ||
| 812 | }; | ||
| 813 | |||
| 814 | /** | ||
| 711 | * struct device - The basic device structure | 815 | * struct device - The basic device structure |
| 712 | * @parent: The device's "parent" device, the device to which it is attached. | 816 | * @parent: The device's "parent" device, the device to which it is attached. |
| 713 | * In most cases, a parent device is some sort of bus or host | 817 | * In most cases, a parent device is some sort of bus or host |
| @@ -732,8 +836,9 @@ struct device_dma_parameters { | |||
| 732 | * on. This shrinks the "Board Support Packages" (BSPs) and | 836 | * on. This shrinks the "Board Support Packages" (BSPs) and |
| 733 | * minimizes board-specific #ifdefs in drivers. | 837 | * minimizes board-specific #ifdefs in drivers. |
| 734 | * @driver_data: Private pointer for driver specific info. | 838 | * @driver_data: Private pointer for driver specific info. |
| 839 | * @links: Links to suppliers and consumers of this device. | ||
| 735 | * @power: For device power management. | 840 | * @power: For device power management. |
| 736 | * See Documentation/power/devices.txt for details. | 841 | * See Documentation/power/admin-guide/devices.rst for details. |
| 737 | * @pm_domain: Provide callbacks that are executed during system suspend, | 842 | * @pm_domain: Provide callbacks that are executed during system suspend, |
| 738 | * hibernation, system resume and during runtime PM transitions | 843 | * hibernation, system resume and during runtime PM transitions |
| 739 | * along with subsystem-level and driver-level callbacks. | 844 | * along with subsystem-level and driver-level callbacks. |
| @@ -799,6 +904,7 @@ struct device { | |||
| 799 | core doesn't touch it */ | 904 | core doesn't touch it */ |
| 800 | void *driver_data; /* Driver data, set and get with | 905 | void *driver_data; /* Driver data, set and get with |
| 801 | dev_set/get_drvdata */ | 906 | dev_set/get_drvdata */ |
| 907 | struct dev_links_info links; | ||
| 802 | struct dev_pm_info power; | 908 | struct dev_pm_info power; |
| 803 | struct dev_pm_domain *pm_domain; | 909 | struct dev_pm_domain *pm_domain; |
| 804 | 910 | ||
| @@ -1116,6 +1222,10 @@ extern void device_shutdown(void); | |||
| 1116 | /* debugging and troubleshooting/diagnostic helpers. */ | 1222 | /* debugging and troubleshooting/diagnostic helpers. */ |
| 1117 | extern const char *dev_driver_string(const struct device *dev); | 1223 | extern const char *dev_driver_string(const struct device *dev); |
| 1118 | 1224 | ||
| 1225 | /* Device links interface. */ | ||
| 1226 | struct device_link *device_link_add(struct device *consumer, | ||
| 1227 | struct device *supplier, u32 flags); | ||
| 1228 | void device_link_del(struct device_link *link); | ||
| 1119 | 1229 | ||
| 1120 | #ifdef CONFIG_PRINTK | 1230 | #ifdef CONFIG_PRINTK |
| 1121 | 1231 | ||
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h index b91b023deffb..a52c6580cc9a 100644 --- a/include/linux/dm-io.h +++ b/include/linux/dm-io.h | |||
| @@ -58,7 +58,7 @@ struct dm_io_notify { | |||
| 58 | struct dm_io_client; | 58 | struct dm_io_client; |
| 59 | struct dm_io_request { | 59 | struct dm_io_request { |
| 60 | int bi_op; /* REQ_OP */ | 60 | int bi_op; /* REQ_OP */ |
| 61 | int bi_op_flags; /* rq_flag_bits */ | 61 | int bi_op_flags; /* req_flag_bits */ |
| 62 | struct dm_io_memory mem; /* Memory to use for io */ | 62 | struct dm_io_memory mem; /* Memory to use for io */ |
| 63 | struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ | 63 | struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ |
| 64 | struct dm_io_client *client; /* Client memory handler */ | 64 | struct dm_io_client *client; /* Client memory handler */ |
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index e0b0741ae671..8daeb3ce0016 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | #include <linux/list.h> | 30 | #include <linux/list.h> |
| 31 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
| 32 | #include <linux/fs.h> | 32 | #include <linux/fs.h> |
| 33 | #include <linux/fence.h> | 33 | #include <linux/dma-fence.h> |
| 34 | #include <linux/wait.h> | 34 | #include <linux/wait.h> |
| 35 | 35 | ||
| 36 | struct device; | 36 | struct device; |
| @@ -143,7 +143,7 @@ struct dma_buf { | |||
| 143 | wait_queue_head_t poll; | 143 | wait_queue_head_t poll; |
| 144 | 144 | ||
| 145 | struct dma_buf_poll_cb_t { | 145 | struct dma_buf_poll_cb_t { |
| 146 | struct fence_cb cb; | 146 | struct dma_fence_cb cb; |
| 147 | wait_queue_head_t *poll; | 147 | wait_queue_head_t *poll; |
| 148 | 148 | ||
| 149 | unsigned long active; | 149 | unsigned long active; |
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h new file mode 100644 index 000000000000..5900945f962d --- /dev/null +++ b/include/linux/dma-fence-array.h | |||
| @@ -0,0 +1,86 @@ | |||
| 1 | /* | ||
| 2 | * fence-array: aggregates fence to be waited together | ||
| 3 | * | ||
| 4 | * Copyright (C) 2016 Collabora Ltd | ||
| 5 | * Copyright (C) 2016 Advanced Micro Devices, Inc. | ||
| 6 | * Authors: | ||
| 7 | * Gustavo Padovan <gustavo@padovan.org> | ||
| 8 | * Christian König <christian.koenig@amd.com> | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify it | ||
| 11 | * under the terms of the GNU General Public License version 2 as published by | ||
| 12 | * the Free Software Foundation. | ||
| 13 | * | ||
| 14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 17 | * more details. | ||
| 18 | */ | ||
| 19 | |||
| 20 | #ifndef __LINUX_DMA_FENCE_ARRAY_H | ||
| 21 | #define __LINUX_DMA_FENCE_ARRAY_H | ||
| 22 | |||
| 23 | #include <linux/dma-fence.h> | ||
| 24 | |||
| 25 | /** | ||
| 26 | * struct dma_fence_array_cb - callback helper for fence array | ||
| 27 | * @cb: fence callback structure for signaling | ||
| 28 | * @array: reference to the parent fence array object | ||
| 29 | */ | ||
| 30 | struct dma_fence_array_cb { | ||
| 31 | struct dma_fence_cb cb; | ||
| 32 | struct dma_fence_array *array; | ||
| 33 | }; | ||
| 34 | |||
| 35 | /** | ||
| 36 | * struct dma_fence_array - fence to represent an array of fences | ||
| 37 | * @base: fence base class | ||
| 38 | * @lock: spinlock for fence handling | ||
| 39 | * @num_fences: number of fences in the array | ||
| 40 | * @num_pending: fences in the array still pending | ||
| 41 | * @fences: array of the fences | ||
| 42 | */ | ||
| 43 | struct dma_fence_array { | ||
| 44 | struct dma_fence base; | ||
| 45 | |||
| 46 | spinlock_t lock; | ||
| 47 | unsigned num_fences; | ||
| 48 | atomic_t num_pending; | ||
| 49 | struct dma_fence **fences; | ||
| 50 | }; | ||
| 51 | |||
| 52 | extern const struct dma_fence_ops dma_fence_array_ops; | ||
| 53 | |||
| 54 | /** | ||
| 55 | * dma_fence_is_array - check if a fence is from the array subsclass | ||
| 56 | * @fence: fence to test | ||
| 57 | * | ||
| 58 | * Return true if it is a dma_fence_array and false otherwise. | ||
| 59 | */ | ||
| 60 | static inline bool dma_fence_is_array(struct dma_fence *fence) | ||
| 61 | { | ||
| 62 | return fence->ops == &dma_fence_array_ops; | ||
| 63 | } | ||
| 64 | |||
| 65 | /** | ||
| 66 | * to_dma_fence_array - cast a fence to a dma_fence_array | ||
| 67 | * @fence: fence to cast to a dma_fence_array | ||
| 68 | * | ||
| 69 | * Returns NULL if the fence is not a dma_fence_array, | ||
| 70 | * or the dma_fence_array otherwise. | ||
| 71 | */ | ||
| 72 | static inline struct dma_fence_array * | ||
| 73 | to_dma_fence_array(struct dma_fence *fence) | ||
| 74 | { | ||
| 75 | if (fence->ops != &dma_fence_array_ops) | ||
| 76 | return NULL; | ||
| 77 | |||
| 78 | return container_of(fence, struct dma_fence_array, base); | ||
| 79 | } | ||
| 80 | |||
| 81 | struct dma_fence_array *dma_fence_array_create(int num_fences, | ||
| 82 | struct dma_fence **fences, | ||
| 83 | u64 context, unsigned seqno, | ||
| 84 | bool signal_on_any); | ||
| 85 | |||
| 86 | #endif /* __LINUX_DMA_FENCE_ARRAY_H */ | ||
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h new file mode 100644 index 000000000000..d51a7d23c358 --- /dev/null +++ b/include/linux/dma-fence.h | |||
| @@ -0,0 +1,438 @@ | |||
| 1 | /* | ||
| 2 | * Fence mechanism for dma-buf to allow for asynchronous dma access | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Canonical Ltd | ||
| 5 | * Copyright (C) 2012 Texas Instruments | ||
| 6 | * | ||
| 7 | * Authors: | ||
| 8 | * Rob Clark <robdclark@gmail.com> | ||
| 9 | * Maarten Lankhorst <maarten.lankhorst@canonical.com> | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify it | ||
| 12 | * under the terms of the GNU General Public License version 2 as published by | ||
| 13 | * the Free Software Foundation. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 18 | * more details. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __LINUX_DMA_FENCE_H | ||
| 22 | #define __LINUX_DMA_FENCE_H | ||
| 23 | |||
| 24 | #include <linux/err.h> | ||
| 25 | #include <linux/wait.h> | ||
| 26 | #include <linux/list.h> | ||
| 27 | #include <linux/bitops.h> | ||
| 28 | #include <linux/kref.h> | ||
| 29 | #include <linux/sched.h> | ||
| 30 | #include <linux/printk.h> | ||
| 31 | #include <linux/rcupdate.h> | ||
| 32 | |||
| 33 | struct dma_fence; | ||
| 34 | struct dma_fence_ops; | ||
| 35 | struct dma_fence_cb; | ||
| 36 | |||
| 37 | /** | ||
| 38 | * struct dma_fence - software synchronization primitive | ||
| 39 | * @refcount: refcount for this fence | ||
| 40 | * @ops: dma_fence_ops associated with this fence | ||
| 41 | * @rcu: used for releasing fence with kfree_rcu | ||
| 42 | * @cb_list: list of all callbacks to call | ||
| 43 | * @lock: spin_lock_irqsave used for locking | ||
| 44 | * @context: execution context this fence belongs to, returned by | ||
| 45 | * dma_fence_context_alloc() | ||
| 46 | * @seqno: the sequence number of this fence inside the execution context, | ||
| 47 | * can be compared to decide which fence would be signaled later. | ||
| 48 | * @flags: A mask of DMA_FENCE_FLAG_* defined below | ||
| 49 | * @timestamp: Timestamp when the fence was signaled. | ||
| 50 | * @status: Optional, only valid if < 0, must be set before calling | ||
| 51 | * dma_fence_signal, indicates that the fence has completed with an error. | ||
| 52 | * | ||
| 53 | * the flags member must be manipulated and read using the appropriate | ||
| 54 | * atomic ops (bit_*), so taking the spinlock will not be needed most | ||
| 55 | * of the time. | ||
| 56 | * | ||
| 57 | * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled | ||
| 58 | * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called | ||
| 59 | * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the | ||
| 60 | * implementer of the fence for its own purposes. Can be used in different | ||
| 61 | * ways by different fence implementers, so do not rely on this. | ||
| 62 | * | ||
| 63 | * Since atomic bitops are used, this is not guaranteed to be the case. | ||
| 64 | * Particularly, if the bit was set, but dma_fence_signal was called right | ||
| 65 | * before this bit was set, it would have been able to set the | ||
| 66 | * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. | ||
| 67 | * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting | ||
| 68 | * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that | ||
| 69 | * after dma_fence_signal was called, any enable_signaling call will have either | ||
| 70 | * been completed, or never called at all. | ||
| 71 | */ | ||
| 72 | struct dma_fence { | ||
| 73 | struct kref refcount; | ||
| 74 | const struct dma_fence_ops *ops; | ||
| 75 | struct rcu_head rcu; | ||
| 76 | struct list_head cb_list; | ||
| 77 | spinlock_t *lock; | ||
| 78 | u64 context; | ||
| 79 | unsigned seqno; | ||
| 80 | unsigned long flags; | ||
| 81 | ktime_t timestamp; | ||
| 82 | int status; | ||
| 83 | }; | ||
| 84 | |||
| 85 | enum dma_fence_flag_bits { | ||
| 86 | DMA_FENCE_FLAG_SIGNALED_BIT, | ||
| 87 | DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, | ||
| 88 | DMA_FENCE_FLAG_USER_BITS, /* must always be last member */ | ||
| 89 | }; | ||
| 90 | |||
| 91 | typedef void (*dma_fence_func_t)(struct dma_fence *fence, | ||
| 92 | struct dma_fence_cb *cb); | ||
| 93 | |||
| 94 | /** | ||
| 95 | * struct dma_fence_cb - callback for dma_fence_add_callback | ||
| 96 | * @node: used by dma_fence_add_callback to append this struct to fence::cb_list | ||
| 97 | * @func: dma_fence_func_t to call | ||
| 98 | * | ||
| 99 | * This struct will be initialized by dma_fence_add_callback, additional | ||
| 100 | * data can be passed along by embedding dma_fence_cb in another struct. | ||
| 101 | */ | ||
| 102 | struct dma_fence_cb { | ||
| 103 | struct list_head node; | ||
| 104 | dma_fence_func_t func; | ||
| 105 | }; | ||
| 106 | |||
| 107 | /** | ||
| 108 | * struct dma_fence_ops - operations implemented for fence | ||
| 109 | * @get_driver_name: returns the driver name. | ||
| 110 | * @get_timeline_name: return the name of the context this fence belongs to. | ||
| 111 | * @enable_signaling: enable software signaling of fence. | ||
| 112 | * @signaled: [optional] peek whether the fence is signaled, can be null. | ||
| 113 | * @wait: custom wait implementation, or dma_fence_default_wait. | ||
| 114 | * @release: [optional] called on destruction of fence, can be null | ||
| 115 | * @fill_driver_data: [optional] callback to fill in free-form debug info | ||
| 116 | * Returns amount of bytes filled, or -errno. | ||
| 117 | * @fence_value_str: [optional] fills in the value of the fence as a string | ||
| 118 | * @timeline_value_str: [optional] fills in the current value of the timeline | ||
| 119 | * as a string | ||
| 120 | * | ||
| 121 | * Notes on enable_signaling: | ||
| 122 | * For fence implementations that have the capability for hw->hw | ||
| 123 | * signaling, they can implement this op to enable the necessary | ||
| 124 | * irqs, or insert commands into cmdstream, etc. This is called | ||
| 125 | * in the first wait() or add_callback() path to let the fence | ||
| 126 | * implementation know that there is another driver waiting on | ||
| 127 | * the signal (ie. hw->sw case). | ||
| 128 | * | ||
| 129 | * This function can be called called from atomic context, but not | ||
| 130 | * from irq context, so normal spinlocks can be used. | ||
| 131 | * | ||
| 132 | * A return value of false indicates the fence already passed, | ||
| 133 | * or some failure occurred that made it impossible to enable | ||
| 134 | * signaling. True indicates successful enabling. | ||
| 135 | * | ||
| 136 | * fence->status may be set in enable_signaling, but only when false is | ||
| 137 | * returned. | ||
| 138 | * | ||
| 139 | * Calling dma_fence_signal before enable_signaling is called allows | ||
| 140 | * for a tiny race window in which enable_signaling is called during, | ||
| 141 | * before, or after dma_fence_signal. To fight this, it is recommended | ||
| 142 | * that before enable_signaling returns true an extra reference is | ||
| 143 | * taken on the fence, to be released when the fence is signaled. | ||
| 144 | * This will mean dma_fence_signal will still be called twice, but | ||
| 145 | * the second time will be a noop since it was already signaled. | ||
| 146 | * | ||
| 147 | * Notes on signaled: | ||
| 148 | * May set fence->status if returning true. | ||
| 149 | * | ||
| 150 | * Notes on wait: | ||
| 151 | * Must not be NULL, set to dma_fence_default_wait for default implementation. | ||
| 152 | * the dma_fence_default_wait implementation should work for any fence, as long | ||
| 153 | * as enable_signaling works correctly. | ||
| 154 | * | ||
| 155 | * Must return -ERESTARTSYS if the wait is intr = true and the wait was | ||
| 156 | * interrupted, and remaining jiffies if fence has signaled, or 0 if wait | ||
| 157 | * timed out. Can also return other error values on custom implementations, | ||
| 158 | * which should be treated as if the fence is signaled. For example a hardware | ||
| 159 | * lockup could be reported like that. | ||
| 160 | * | ||
| 161 | * Notes on release: | ||
| 162 | * Can be NULL, this function allows additional commands to run on | ||
| 163 | * destruction of the fence. Can be called from irq context. | ||
| 164 | * If pointer is set to NULL, kfree will get called instead. | ||
| 165 | */ | ||
| 166 | |||
| 167 | struct dma_fence_ops { | ||
| 168 | const char * (*get_driver_name)(struct dma_fence *fence); | ||
| 169 | const char * (*get_timeline_name)(struct dma_fence *fence); | ||
| 170 | bool (*enable_signaling)(struct dma_fence *fence); | ||
| 171 | bool (*signaled)(struct dma_fence *fence); | ||
| 172 | signed long (*wait)(struct dma_fence *fence, | ||
| 173 | bool intr, signed long timeout); | ||
| 174 | void (*release)(struct dma_fence *fence); | ||
| 175 | |||
| 176 | int (*fill_driver_data)(struct dma_fence *fence, void *data, int size); | ||
| 177 | void (*fence_value_str)(struct dma_fence *fence, char *str, int size); | ||
| 178 | void (*timeline_value_str)(struct dma_fence *fence, | ||
| 179 | char *str, int size); | ||
| 180 | }; | ||
| 181 | |||
| 182 | void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, | ||
| 183 | spinlock_t *lock, u64 context, unsigned seqno); | ||
| 184 | |||
| 185 | void dma_fence_release(struct kref *kref); | ||
| 186 | void dma_fence_free(struct dma_fence *fence); | ||
| 187 | |||
| 188 | /** | ||
| 189 | * dma_fence_put - decreases refcount of the fence | ||
| 190 | * @fence: [in] fence to reduce refcount of | ||
| 191 | */ | ||
| 192 | static inline void dma_fence_put(struct dma_fence *fence) | ||
| 193 | { | ||
| 194 | if (fence) | ||
| 195 | kref_put(&fence->refcount, dma_fence_release); | ||
| 196 | } | ||
| 197 | |||
| 198 | /** | ||
| 199 | * dma_fence_get - increases refcount of the fence | ||
| 200 | * @fence: [in] fence to increase refcount of | ||
| 201 | * | ||
| 202 | * Returns the same fence, with refcount increased by 1. | ||
| 203 | */ | ||
| 204 | static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) | ||
| 205 | { | ||
| 206 | if (fence) | ||
| 207 | kref_get(&fence->refcount); | ||
| 208 | return fence; | ||
| 209 | } | ||
| 210 | |||
| 211 | /** | ||
| 212 | * dma_fence_get_rcu - get a fence from a reservation_object_list with | ||
| 213 | * rcu read lock | ||
| 214 | * @fence: [in] fence to increase refcount of | ||
| 215 | * | ||
| 216 | * Function returns NULL if no refcount could be obtained, or the fence. | ||
| 217 | */ | ||
| 218 | static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) | ||
| 219 | { | ||
| 220 | if (kref_get_unless_zero(&fence->refcount)) | ||
| 221 | return fence; | ||
| 222 | else | ||
| 223 | return NULL; | ||
| 224 | } | ||
| 225 | |||
| 226 | /** | ||
| 227 | * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence | ||
| 228 | * @fencep: [in] pointer to fence to increase refcount of | ||
| 229 | * | ||
| 230 | * Function returns NULL if no refcount could be obtained, or the fence. | ||
| 231 | * This function handles acquiring a reference to a fence that may be | ||
| 232 | * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU), | ||
| 233 | * so long as the caller is using RCU on the pointer to the fence. | ||
| 234 | * | ||
| 235 | * An alternative mechanism is to employ a seqlock to protect a bunch of | ||
| 236 | * fences, such as used by struct reservation_object. When using a seqlock, | ||
| 237 | * the seqlock must be taken before and checked after a reference to the | ||
| 238 | * fence is acquired (as shown here). | ||
| 239 | * | ||
| 240 | * The caller is required to hold the RCU read lock. | ||
| 241 | */ | ||
| 242 | static inline struct dma_fence * | ||
| 243 | dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep) | ||
| 244 | { | ||
| 245 | do { | ||
| 246 | struct dma_fence *fence; | ||
| 247 | |||
| 248 | fence = rcu_dereference(*fencep); | ||
| 249 | if (!fence || !dma_fence_get_rcu(fence)) | ||
| 250 | return NULL; | ||
| 251 | |||
| 252 | /* The atomic_inc_not_zero() inside dma_fence_get_rcu() | ||
| 253 | * provides a full memory barrier upon success (such as now). | ||
| 254 | * This is paired with the write barrier from assigning | ||
| 255 | * to the __rcu protected fence pointer so that if that | ||
| 256 | * pointer still matches the current fence, we know we | ||
| 257 | * have successfully acquire a reference to it. If it no | ||
| 258 | * longer matches, we are holding a reference to some other | ||
| 259 | * reallocated pointer. This is possible if the allocator | ||
| 260 | * is using a freelist like SLAB_DESTROY_BY_RCU where the | ||
| 261 | * fence remains valid for the RCU grace period, but it | ||
| 262 | * may be reallocated. When using such allocators, we are | ||
| 263 | * responsible for ensuring the reference we get is to | ||
| 264 | * the right fence, as below. | ||
| 265 | */ | ||
| 266 | if (fence == rcu_access_pointer(*fencep)) | ||
| 267 | return rcu_pointer_handoff(fence); | ||
| 268 | |||
| 269 | dma_fence_put(fence); | ||
| 270 | } while (1); | ||
| 271 | } | ||
| 272 | |||
| 273 | int dma_fence_signal(struct dma_fence *fence); | ||
| 274 | int dma_fence_signal_locked(struct dma_fence *fence); | ||
| 275 | signed long dma_fence_default_wait(struct dma_fence *fence, | ||
| 276 | bool intr, signed long timeout); | ||
| 277 | int dma_fence_add_callback(struct dma_fence *fence, | ||
| 278 | struct dma_fence_cb *cb, | ||
| 279 | dma_fence_func_t func); | ||
| 280 | bool dma_fence_remove_callback(struct dma_fence *fence, | ||
| 281 | struct dma_fence_cb *cb); | ||
| 282 | void dma_fence_enable_sw_signaling(struct dma_fence *fence); | ||
| 283 | |||
| 284 | /** | ||
| 285 | * dma_fence_is_signaled_locked - Return an indication if the fence | ||
| 286 | * is signaled yet. | ||
| 287 | * @fence: [in] the fence to check | ||
| 288 | * | ||
| 289 | * Returns true if the fence was already signaled, false if not. Since this | ||
| 290 | * function doesn't enable signaling, it is not guaranteed to ever return | ||
| 291 | * true if dma_fence_add_callback, dma_fence_wait or | ||
| 292 | * dma_fence_enable_sw_signaling haven't been called before. | ||
| 293 | * | ||
| 294 | * This function requires fence->lock to be held. | ||
| 295 | */ | ||
| 296 | static inline bool | ||
| 297 | dma_fence_is_signaled_locked(struct dma_fence *fence) | ||
| 298 | { | ||
| 299 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
| 300 | return true; | ||
| 301 | |||
| 302 | if (fence->ops->signaled && fence->ops->signaled(fence)) { | ||
| 303 | dma_fence_signal_locked(fence); | ||
| 304 | return true; | ||
| 305 | } | ||
| 306 | |||
| 307 | return false; | ||
| 308 | } | ||
| 309 | |||
| 310 | /** | ||
| 311 | * dma_fence_is_signaled - Return an indication if the fence is signaled yet. | ||
| 312 | * @fence: [in] the fence to check | ||
| 313 | * | ||
| 314 | * Returns true if the fence was already signaled, false if not. Since this | ||
| 315 | * function doesn't enable signaling, it is not guaranteed to ever return | ||
| 316 | * true if dma_fence_add_callback, dma_fence_wait or | ||
| 317 | * dma_fence_enable_sw_signaling haven't been called before. | ||
| 318 | * | ||
| 319 | * It's recommended for seqno fences to call dma_fence_signal when the | ||
| 320 | * operation is complete, it makes it possible to prevent issues from | ||
| 321 | * wraparound between time of issue and time of use by checking the return | ||
| 322 | * value of this function before calling hardware-specific wait instructions. | ||
| 323 | */ | ||
| 324 | static inline bool | ||
| 325 | dma_fence_is_signaled(struct dma_fence *fence) | ||
| 326 | { | ||
| 327 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
| 328 | return true; | ||
| 329 | |||
| 330 | if (fence->ops->signaled && fence->ops->signaled(fence)) { | ||
| 331 | dma_fence_signal(fence); | ||
| 332 | return true; | ||
| 333 | } | ||
| 334 | |||
| 335 | return false; | ||
| 336 | } | ||
| 337 | |||
| 338 | /** | ||
| 339 | * dma_fence_is_later - return if f1 is chronologically later than f2 | ||
| 340 | * @f1: [in] the first fence from the same context | ||
| 341 | * @f2: [in] the second fence from the same context | ||
| 342 | * | ||
| 343 | * Returns true if f1 is chronologically later than f2. Both fences must be | ||
| 344 | * from the same context, since a seqno is not re-used across contexts. | ||
| 345 | */ | ||
| 346 | static inline bool dma_fence_is_later(struct dma_fence *f1, | ||
| 347 | struct dma_fence *f2) | ||
| 348 | { | ||
| 349 | if (WARN_ON(f1->context != f2->context)) | ||
| 350 | return false; | ||
| 351 | |||
| 352 | return (int)(f1->seqno - f2->seqno) > 0; | ||
| 353 | } | ||
| 354 | |||
| 355 | /** | ||
| 356 | * dma_fence_later - return the chronologically later fence | ||
| 357 | * @f1: [in] the first fence from the same context | ||
| 358 | * @f2: [in] the second fence from the same context | ||
| 359 | * | ||
| 360 | * Returns NULL if both fences are signaled, otherwise the fence that would be | ||
| 361 | * signaled last. Both fences must be from the same context, since a seqno is | ||
| 362 | * not re-used across contexts. | ||
| 363 | */ | ||
| 364 | static inline struct dma_fence *dma_fence_later(struct dma_fence *f1, | ||
| 365 | struct dma_fence *f2) | ||
| 366 | { | ||
| 367 | if (WARN_ON(f1->context != f2->context)) | ||
| 368 | return NULL; | ||
| 369 | |||
| 370 | /* | ||
| 371 | * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never | ||
| 372 | * have been set if enable_signaling wasn't called, and enabling that | ||
| 373 | * here is overkill. | ||
| 374 | */ | ||
| 375 | if (dma_fence_is_later(f1, f2)) | ||
| 376 | return dma_fence_is_signaled(f1) ? NULL : f1; | ||
| 377 | else | ||
| 378 | return dma_fence_is_signaled(f2) ? NULL : f2; | ||
| 379 | } | ||
| 380 | |||
| 381 | signed long dma_fence_wait_timeout(struct dma_fence *, | ||
| 382 | bool intr, signed long timeout); | ||
| 383 | signed long dma_fence_wait_any_timeout(struct dma_fence **fences, | ||
| 384 | uint32_t count, | ||
| 385 | bool intr, signed long timeout, | ||
| 386 | uint32_t *idx); | ||
| 387 | |||
| 388 | /** | ||
| 389 | * dma_fence_wait - sleep until the fence gets signaled | ||
| 390 | * @fence: [in] the fence to wait on | ||
| 391 | * @intr: [in] if true, do an interruptible wait | ||
| 392 | * | ||
| 393 | * This function will return -ERESTARTSYS if interrupted by a signal, | ||
| 394 | * or 0 if the fence was signaled. Other error values may be | ||
| 395 | * returned on custom implementations. | ||
| 396 | * | ||
| 397 | * Performs a synchronous wait on this fence. It is assumed the caller | ||
| 398 | * directly or indirectly holds a reference to the fence, otherwise the | ||
| 399 | * fence might be freed before return, resulting in undefined behavior. | ||
| 400 | */ | ||
| 401 | static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) | ||
| 402 | { | ||
| 403 | signed long ret; | ||
| 404 | |||
| 405 | /* Since dma_fence_wait_timeout cannot timeout with | ||
| 406 | * MAX_SCHEDULE_TIMEOUT, only valid return values are | ||
| 407 | * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. | ||
| 408 | */ | ||
| 409 | ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); | ||
| 410 | |||
| 411 | return ret < 0 ? ret : 0; | ||
| 412 | } | ||
| 413 | |||
| 414 | u64 dma_fence_context_alloc(unsigned num); | ||
| 415 | |||
| 416 | #define DMA_FENCE_TRACE(f, fmt, args...) \ | ||
| 417 | do { \ | ||
| 418 | struct dma_fence *__ff = (f); \ | ||
| 419 | if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ | ||
| 420 | pr_info("f %llu#%u: " fmt, \ | ||
| 421 | __ff->context, __ff->seqno, ##args); \ | ||
| 422 | } while (0) | ||
| 423 | |||
| 424 | #define DMA_FENCE_WARN(f, fmt, args...) \ | ||
| 425 | do { \ | ||
| 426 | struct dma_fence *__ff = (f); \ | ||
| 427 | pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ | ||
| 428 | ##args); \ | ||
| 429 | } while (0) | ||
| 430 | |||
| 431 | #define DMA_FENCE_ERR(f, fmt, args...) \ | ||
| 432 | do { \ | ||
| 433 | struct dma_fence *__ff = (f); \ | ||
| 434 | pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ | ||
| 435 | ##args); \ | ||
| 436 | } while (0) | ||
| 437 | |||
| 438 | #endif /* __LINUX_DMA_FENCE_H */ | ||
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 32c589062bd9..7f7e9a7e3839 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h | |||
| @@ -61,6 +61,10 @@ void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | |||
| 61 | enum dma_data_direction dir, unsigned long attrs); | 61 | enum dma_data_direction dir, unsigned long attrs); |
| 62 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 62 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
| 63 | enum dma_data_direction dir, unsigned long attrs); | 63 | enum dma_data_direction dir, unsigned long attrs); |
| 64 | dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, | ||
| 65 | size_t size, enum dma_data_direction dir, unsigned long attrs); | ||
| 66 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, | ||
| 67 | size_t size, enum dma_data_direction dir, unsigned long attrs); | ||
| 64 | int iommu_dma_supported(struct device *dev, u64 mask); | 68 | int iommu_dma_supported(struct device *dev, u64 mask); |
| 65 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); | 69 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); |
| 66 | 70 | ||
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 08528afdf58b..10c5a17b1f51 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
| @@ -243,29 +243,33 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg | |||
| 243 | ops->unmap_sg(dev, sg, nents, dir, attrs); | 243 | ops->unmap_sg(dev, sg, nents, dir, attrs); |
| 244 | } | 244 | } |
| 245 | 245 | ||
| 246 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | 246 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, |
| 247 | size_t offset, size_t size, | 247 | struct page *page, |
| 248 | enum dma_data_direction dir) | 248 | size_t offset, size_t size, |
| 249 | enum dma_data_direction dir, | ||
| 250 | unsigned long attrs) | ||
| 249 | { | 251 | { |
| 250 | struct dma_map_ops *ops = get_dma_ops(dev); | 252 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 251 | dma_addr_t addr; | 253 | dma_addr_t addr; |
| 252 | 254 | ||
| 253 | kmemcheck_mark_initialized(page_address(page) + offset, size); | 255 | kmemcheck_mark_initialized(page_address(page) + offset, size); |
| 254 | BUG_ON(!valid_dma_direction(dir)); | 256 | BUG_ON(!valid_dma_direction(dir)); |
| 255 | addr = ops->map_page(dev, page, offset, size, dir, 0); | 257 | addr = ops->map_page(dev, page, offset, size, dir, attrs); |
| 256 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); | 258 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); |
| 257 | 259 | ||
| 258 | return addr; | 260 | return addr; |
| 259 | } | 261 | } |
| 260 | 262 | ||
| 261 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | 263 | static inline void dma_unmap_page_attrs(struct device *dev, |
| 262 | size_t size, enum dma_data_direction dir) | 264 | dma_addr_t addr, size_t size, |
| 265 | enum dma_data_direction dir, | ||
| 266 | unsigned long attrs) | ||
| 263 | { | 267 | { |
| 264 | struct dma_map_ops *ops = get_dma_ops(dev); | 268 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 265 | 269 | ||
| 266 | BUG_ON(!valid_dma_direction(dir)); | 270 | BUG_ON(!valid_dma_direction(dir)); |
| 267 | if (ops->unmap_page) | 271 | if (ops->unmap_page) |
| 268 | ops->unmap_page(dev, addr, size, dir, 0); | 272 | ops->unmap_page(dev, addr, size, dir, attrs); |
| 269 | debug_dma_unmap_page(dev, addr, size, dir, false); | 273 | debug_dma_unmap_page(dev, addr, size, dir, false); |
| 270 | } | 274 | } |
| 271 | 275 | ||
| @@ -385,6 +389,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
| 385 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) | 389 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) |
| 386 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) | 390 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) |
| 387 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) | 391 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) |
| 392 | #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) | ||
| 393 | #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) | ||
| 388 | 394 | ||
| 389 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | 395 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, |
| 390 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | 396 | void *cpu_addr, dma_addr_t dma_addr, size_t size); |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index cc535a478bae..feee6ec6a13b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -336,6 +336,12 @@ enum dma_slave_buswidth { | |||
| 336 | * may or may not be applicable on memory sources. | 336 | * may or may not be applicable on memory sources. |
| 337 | * @dst_maxburst: same as src_maxburst but for destination target | 337 | * @dst_maxburst: same as src_maxburst but for destination target |
| 338 | * mutatis mutandis. | 338 | * mutatis mutandis. |
| 339 | * @src_port_window_size: The length of the register area in words the data need | ||
| 340 | * to be accessed on the device side. It is only used for devices which is using | ||
| 341 | * an area instead of a single register to receive the data. Typically the DMA | ||
| 342 | * loops in this area in order to transfer the data. | ||
| 343 | * @dst_port_window_size: same as src_port_window_size but for the destination | ||
| 344 | * port. | ||
| 339 | * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill | 345 | * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill |
| 340 | * with 'true' if peripheral should be flow controller. Direction will be | 346 | * with 'true' if peripheral should be flow controller. Direction will be |
| 341 | * selected at Runtime. | 347 | * selected at Runtime. |
| @@ -363,6 +369,8 @@ struct dma_slave_config { | |||
| 363 | enum dma_slave_buswidth dst_addr_width; | 369 | enum dma_slave_buswidth dst_addr_width; |
| 364 | u32 src_maxburst; | 370 | u32 src_maxburst; |
| 365 | u32 dst_maxburst; | 371 | u32 dst_maxburst; |
| 372 | u32 src_port_window_size; | ||
| 373 | u32 dst_port_window_size; | ||
| 366 | bool device_fc; | 374 | bool device_fc; |
| 367 | unsigned int slave_id; | 375 | unsigned int slave_id; |
| 368 | }; | 376 | }; |
diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h index c934d3a96b5e..2896f93808ae 100644 --- a/include/linux/drbd_genl.h +++ b/include/linux/drbd_genl.h | |||
| @@ -67,7 +67,7 @@ | |||
| 67 | * genl_magic_func.h | 67 | * genl_magic_func.h |
| 68 | * generates an entry in the static genl_ops array, | 68 | * generates an entry in the static genl_ops array, |
| 69 | * and static register/unregister functions to | 69 | * and static register/unregister functions to |
| 70 | * genl_register_family_with_ops(). | 70 | * genl_register_family(). |
| 71 | * | 71 | * |
| 72 | * flags and handler: | 72 | * flags and handler: |
| 73 | * GENL_op_init( .doit = x, .dumpit = y, .flags = something) | 73 | * GENL_op_init( .doit = x, .dumpit = y, .flags = something) |
diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h index 1f79b20918b1..4334106f44c3 100644 --- a/include/linux/dw_apb_timer.h +++ b/include/linux/dw_apb_timer.h | |||
| @@ -50,6 +50,6 @@ dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base, | |||
| 50 | unsigned long freq); | 50 | unsigned long freq); |
| 51 | void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs); | 51 | void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs); |
| 52 | void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs); | 52 | void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs); |
| 53 | cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); | 53 | u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); |
| 54 | 54 | ||
| 55 | #endif /* __DW_APB_TIMER_H__ */ | 55 | #endif /* __DW_APB_TIMER_H__ */ |
diff --git a/include/linux/edac.h b/include/linux/edac.h index 9e0d78966552..07c52c0af62d 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h | |||
| @@ -18,6 +18,8 @@ | |||
| 18 | #include <linux/workqueue.h> | 18 | #include <linux/workqueue.h> |
| 19 | #include <linux/debugfs.h> | 19 | #include <linux/debugfs.h> |
| 20 | 20 | ||
| 21 | #define EDAC_DEVICE_NAME_LEN 31 | ||
| 22 | |||
| 21 | struct device; | 23 | struct device; |
| 22 | 24 | ||
| 23 | #define EDAC_OPSTATE_INVAL -1 | 25 | #define EDAC_OPSTATE_INVAL -1 |
| @@ -128,12 +130,21 @@ enum dev_type { | |||
| 128 | * fatal (maybe it is on an unused memory area, | 130 | * fatal (maybe it is on an unused memory area, |
| 129 | * or the memory controller could recover from | 131 | * or the memory controller could recover from |
| 130 | * it for example, by re-trying the operation). | 132 | * it for example, by re-trying the operation). |
| 133 | * @HW_EVENT_ERR_DEFERRED: Deferred Error - Indicates an uncorrectable | ||
| 134 | * error whose handling is not urgent. This could | ||
| 135 | * be due to hardware data poisoning where the | ||
| 136 | * system can continue operation until the poisoned | ||
| 137 | * data is consumed. Preemptive measures may also | ||
| 138 | * be taken, e.g. offlining pages, etc. | ||
| 131 | * @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not | 139 | * @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not |
| 132 | * be recovered. | 140 | * be recovered. |
| 141 | * @HW_EVENT_ERR_INFO: Informational - The CPER spec defines a forth | ||
| 142 | * type of error: informational logs. | ||
| 133 | */ | 143 | */ |
| 134 | enum hw_event_mc_err_type { | 144 | enum hw_event_mc_err_type { |
| 135 | HW_EVENT_ERR_CORRECTED, | 145 | HW_EVENT_ERR_CORRECTED, |
| 136 | HW_EVENT_ERR_UNCORRECTED, | 146 | HW_EVENT_ERR_UNCORRECTED, |
| 147 | HW_EVENT_ERR_DEFERRED, | ||
| 137 | HW_EVENT_ERR_FATAL, | 148 | HW_EVENT_ERR_FATAL, |
| 138 | HW_EVENT_ERR_INFO, | 149 | HW_EVENT_ERR_INFO, |
| 139 | }; | 150 | }; |
| @@ -145,6 +156,8 @@ static inline char *mc_event_error_type(const unsigned int err_type) | |||
| 145 | return "Corrected"; | 156 | return "Corrected"; |
| 146 | case HW_EVENT_ERR_UNCORRECTED: | 157 | case HW_EVENT_ERR_UNCORRECTED: |
| 147 | return "Uncorrected"; | 158 | return "Uncorrected"; |
| 159 | case HW_EVENT_ERR_DEFERRED: | ||
| 160 | return "Deferred"; | ||
| 148 | case HW_EVENT_ERR_FATAL: | 161 | case HW_EVENT_ERR_FATAL: |
| 149 | return "Fatal"; | 162 | return "Fatal"; |
| 150 | default: | 163 | default: |
| @@ -157,7 +170,7 @@ static inline char *mc_event_error_type(const unsigned int err_type) | |||
| 157 | * enum mem_type - memory types. For a more detailed reference, please see | 170 | * enum mem_type - memory types. For a more detailed reference, please see |
| 158 | * http://en.wikipedia.org/wiki/DRAM | 171 | * http://en.wikipedia.org/wiki/DRAM |
| 159 | * | 172 | * |
| 160 | * @MEM_EMPTY Empty csrow | 173 | * @MEM_EMPTY: Empty csrow |
| 161 | * @MEM_RESERVED: Reserved csrow type | 174 | * @MEM_RESERVED: Reserved csrow type |
| 162 | * @MEM_UNKNOWN: Unknown csrow type | 175 | * @MEM_UNKNOWN: Unknown csrow type |
| 163 | * @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995. | 176 | * @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995. |
| @@ -192,10 +205,11 @@ static inline char *mc_event_error_type(const unsigned int err_type) | |||
| 192 | * @MEM_DDR3: DDR3 RAM | 205 | * @MEM_DDR3: DDR3 RAM |
| 193 | * @MEM_RDDR3: Registered DDR3 RAM | 206 | * @MEM_RDDR3: Registered DDR3 RAM |
| 194 | * This is a variant of the DDR3 memories. | 207 | * This is a variant of the DDR3 memories. |
| 195 | * @MEM_LRDDR3 Load-Reduced DDR3 memory. | 208 | * @MEM_LRDDR3: Load-Reduced DDR3 memory. |
| 196 | * @MEM_DDR4: Unbuffered DDR4 RAM | 209 | * @MEM_DDR4: Unbuffered DDR4 RAM |
| 197 | * @MEM_RDDR4: Registered DDR4 RAM | 210 | * @MEM_RDDR4: Registered DDR4 RAM |
| 198 | * This is a variant of the DDR4 memories. | 211 | * This is a variant of the DDR4 memories. |
| 212 | * @MEM_LRDDR4: Load-Reduced DDR4 memory. | ||
| 199 | */ | 213 | */ |
| 200 | enum mem_type { | 214 | enum mem_type { |
| 201 | MEM_EMPTY = 0, | 215 | MEM_EMPTY = 0, |
| @@ -218,6 +232,7 @@ enum mem_type { | |||
| 218 | MEM_LRDDR3, | 232 | MEM_LRDDR3, |
| 219 | MEM_DDR4, | 233 | MEM_DDR4, |
| 220 | MEM_RDDR4, | 234 | MEM_RDDR4, |
| 235 | MEM_LRDDR4, | ||
| 221 | }; | 236 | }; |
| 222 | 237 | ||
| 223 | #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) | 238 | #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) |
| @@ -239,6 +254,7 @@ enum mem_type { | |||
| 239 | #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) | 254 | #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) |
| 240 | #define MEM_FLAG_DDR4 BIT(MEM_DDR4) | 255 | #define MEM_FLAG_DDR4 BIT(MEM_DDR4) |
| 241 | #define MEM_FLAG_RDDR4 BIT(MEM_RDDR4) | 256 | #define MEM_FLAG_RDDR4 BIT(MEM_RDDR4) |
| 257 | #define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4) | ||
| 242 | 258 | ||
| 243 | /** | 259 | /** |
| 244 | * enum edac-type - Error Detection and Correction capabilities and mode | 260 | * enum edac-type - Error Detection and Correction capabilities and mode |
| @@ -278,7 +294,7 @@ enum edac_type { | |||
| 278 | 294 | ||
| 279 | /** | 295 | /** |
| 280 | * enum scrub_type - scrubbing capabilities | 296 | * enum scrub_type - scrubbing capabilities |
| 281 | * @SCRUB_UNKNOWN Unknown if scrubber is available | 297 | * @SCRUB_UNKNOWN: Unknown if scrubber is available |
| 282 | * @SCRUB_NONE: No scrubber | 298 | * @SCRUB_NONE: No scrubber |
| 283 | * @SCRUB_SW_PROG: SW progressive (sequential) scrubbing | 299 | * @SCRUB_SW_PROG: SW progressive (sequential) scrubbing |
| 284 | * @SCRUB_SW_SRC: Software scrub only errors | 300 | * @SCRUB_SW_SRC: Software scrub only errors |
| @@ -287,7 +303,7 @@ enum edac_type { | |||
| 287 | * @SCRUB_HW_PROG: HW progressive (sequential) scrubbing | 303 | * @SCRUB_HW_PROG: HW progressive (sequential) scrubbing |
| 288 | * @SCRUB_HW_SRC: Hardware scrub only errors | 304 | * @SCRUB_HW_SRC: Hardware scrub only errors |
| 289 | * @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error | 305 | * @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error |
| 290 | * SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable | 306 | * @SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable |
| 291 | */ | 307 | */ |
| 292 | enum scrub_type { | 308 | enum scrub_type { |
| 293 | SCRUB_UNKNOWN = 0, | 309 | SCRUB_UNKNOWN = 0, |
| @@ -320,114 +336,6 @@ enum scrub_type { | |||
| 320 | #define OP_RUNNING_POLL_INTR 0x203 | 336 | #define OP_RUNNING_POLL_INTR 0x203 |
| 321 | #define OP_OFFLINE 0x300 | 337 | #define OP_OFFLINE 0x300 |
| 322 | 338 | ||
| 323 | /* | ||
| 324 | * Concepts used at the EDAC subsystem | ||
| 325 | * | ||
| 326 | * There are several things to be aware of that aren't at all obvious: | ||
| 327 | * | ||
| 328 | * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc.. | ||
| 329 | * | ||
| 330 | * These are some of the many terms that are thrown about that don't always | ||
| 331 | * mean what people think they mean (Inconceivable!). In the interest of | ||
| 332 | * creating a common ground for discussion, terms and their definitions | ||
| 333 | * will be established. | ||
| 334 | * | ||
| 335 | * Memory devices: The individual DRAM chips on a memory stick. These | ||
| 336 | * devices commonly output 4 and 8 bits each (x4, x8). | ||
| 337 | * Grouping several of these in parallel provides the | ||
| 338 | * number of bits that the memory controller expects: | ||
| 339 | * typically 72 bits, in order to provide 64 bits + | ||
| 340 | * 8 bits of ECC data. | ||
| 341 | * | ||
| 342 | * Memory Stick: A printed circuit board that aggregates multiple | ||
| 343 | * memory devices in parallel. In general, this is the | ||
| 344 | * Field Replaceable Unit (FRU) which gets replaced, in | ||
| 345 | * the case of excessive errors. Most often it is also | ||
| 346 | * called DIMM (Dual Inline Memory Module). | ||
| 347 | * | ||
| 348 | * Memory Socket: A physical connector on the motherboard that accepts | ||
| 349 | * a single memory stick. Also called as "slot" on several | ||
| 350 | * datasheets. | ||
| 351 | * | ||
| 352 | * Channel: A memory controller channel, responsible to communicate | ||
| 353 | * with a group of DIMMs. Each channel has its own | ||
| 354 | * independent control (command) and data bus, and can | ||
| 355 | * be used independently or grouped with other channels. | ||
| 356 | * | ||
| 357 | * Branch: It is typically the highest hierarchy on a | ||
| 358 | * Fully-Buffered DIMM memory controller. | ||
| 359 | * Typically, it contains two channels. | ||
| 360 | * Two channels at the same branch can be used in single | ||
| 361 | * mode or in lockstep mode. | ||
| 362 | * When lockstep is enabled, the cacheline is doubled, | ||
| 363 | * but it generally brings some performance penalty. | ||
| 364 | * Also, it is generally not possible to point to just one | ||
| 365 | * memory stick when an error occurs, as the error | ||
| 366 | * correction code is calculated using two DIMMs instead | ||
| 367 | * of one. Due to that, it is capable of correcting more | ||
| 368 | * errors than on single mode. | ||
| 369 | * | ||
| 370 | * Single-channel: The data accessed by the memory controller is contained | ||
| 371 | * into one dimm only. E. g. if the data is 64 bits-wide, | ||
| 372 | * the data flows to the CPU using one 64 bits parallel | ||
| 373 | * access. | ||
| 374 | * Typically used with SDR, DDR, DDR2 and DDR3 memories. | ||
| 375 | * FB-DIMM and RAMBUS use a different concept for channel, | ||
| 376 | * so this concept doesn't apply there. | ||
| 377 | * | ||
| 378 | * Double-channel: The data size accessed by the memory controller is | ||
| 379 | * interlaced into two dimms, accessed at the same time. | ||
| 380 | * E. g. if the DIMM is 64 bits-wide (72 bits with ECC), | ||
| 381 | * the data flows to the CPU using a 128 bits parallel | ||
| 382 | * access. | ||
| 383 | * | ||
| 384 | * Chip-select row: This is the name of the DRAM signal used to select the | ||
| 385 | * DRAM ranks to be accessed. Common chip-select rows for | ||
| 386 | * single channel are 64 bits, for dual channel 128 bits. | ||
| 387 | * It may not be visible by the memory controller, as some | ||
| 388 | * DIMM types have a memory buffer that can hide direct | ||
| 389 | * access to it from the Memory Controller. | ||
| 390 | * | ||
| 391 | * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory. | ||
| 392 | * Motherboards commonly drive two chip-select pins to | ||
| 393 | * a memory stick. A single-ranked stick, will occupy | ||
| 394 | * only one of those rows. The other will be unused. | ||
| 395 | * | ||
| 396 | * Double-Ranked stick: A double-ranked stick has two chip-select rows which | ||
| 397 | * access different sets of memory devices. The two | ||
| 398 | * rows cannot be accessed concurrently. | ||
| 399 | * | ||
| 400 | * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick. | ||
| 401 | * A double-sided stick has two chip-select rows which | ||
| 402 | * access different sets of memory devices. The two | ||
| 403 | * rows cannot be accessed concurrently. "Double-sided" | ||
| 404 | * is irrespective of the memory devices being mounted | ||
| 405 | * on both sides of the memory stick. | ||
| 406 | * | ||
| 407 | * Socket set: All of the memory sticks that are required for | ||
| 408 | * a single memory access or all of the memory sticks | ||
| 409 | * spanned by a chip-select row. A single socket set | ||
| 410 | * has two chip-select rows and if double-sided sticks | ||
| 411 | * are used these will occupy those chip-select rows. | ||
| 412 | * | ||
| 413 | * Bank: This term is avoided because it is unclear when | ||
| 414 | * needing to distinguish between chip-select rows and | ||
| 415 | * socket sets. | ||
| 416 | * | ||
| 417 | * Controller pages: | ||
| 418 | * | ||
| 419 | * Physical pages: | ||
| 420 | * | ||
| 421 | * Virtual pages: | ||
| 422 | * | ||
| 423 | * | ||
| 424 | * STRUCTURE ORGANIZATION AND CHOICES | ||
| 425 | * | ||
| 426 | * | ||
| 427 | * | ||
| 428 | * PS - I enjoyed writing all that about as much as you enjoyed reading it. | ||
| 429 | */ | ||
| 430 | |||
| 431 | /** | 339 | /** |
| 432 | * enum edac_mc_layer - memory controller hierarchy layer | 340 | * enum edac_mc_layer - memory controller hierarchy layer |
| 433 | * | 341 | * |
| @@ -452,7 +360,7 @@ enum edac_mc_layer_type { | |||
| 452 | 360 | ||
| 453 | /** | 361 | /** |
| 454 | * struct edac_mc_layer - describes the memory controller hierarchy | 362 | * struct edac_mc_layer - describes the memory controller hierarchy |
| 455 | * @layer: layer type | 363 | * @type: layer type |
| 456 | * @size: number of components per layer. For example, | 364 | * @size: number of components per layer. For example, |
| 457 | * if the channel layer has two channels, size = 2 | 365 | * if the channel layer has two channels, size = 2 |
| 458 | * @is_virt_csrow: This layer is part of the "csrow" when old API | 366 | * @is_virt_csrow: This layer is part of the "csrow" when old API |
| @@ -475,24 +383,28 @@ struct edac_mc_layer { | |||
| 475 | #define EDAC_MAX_LAYERS 3 | 383 | #define EDAC_MAX_LAYERS 3 |
| 476 | 384 | ||
| 477 | /** | 385 | /** |
| 478 | * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer array | 386 | * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer |
| 479 | * for the element given by [layer0,layer1,layer2] position | 387 | * array for the element given by [layer0,layer1,layer2] |
| 388 | * position | ||
| 480 | * | 389 | * |
| 481 | * @layers: a struct edac_mc_layer array, describing how many elements | 390 | * @layers: a struct edac_mc_layer array, describing how many elements |
| 482 | * were allocated for each layer | 391 | * were allocated for each layer |
| 483 | * @n_layers: Number of layers at the @layers array | 392 | * @nlayers: Number of layers at the @layers array |
| 484 | * @layer0: layer0 position | 393 | * @layer0: layer0 position |
| 485 | * @layer1: layer1 position. Unused if n_layers < 2 | 394 | * @layer1: layer1 position. Unused if n_layers < 2 |
| 486 | * @layer2: layer2 position. Unused if n_layers < 3 | 395 | * @layer2: layer2 position. Unused if n_layers < 3 |
| 487 | * | 396 | * |
| 488 | * For 1 layer, this macro returns &var[layer0] - &var | 397 | * For 1 layer, this macro returns "var[layer0] - var"; |
| 398 | * | ||
| 489 | * For 2 layers, this macro is similar to allocate a bi-dimensional array | 399 | * For 2 layers, this macro is similar to allocate a bi-dimensional array |
| 490 | * and to return "&var[layer0][layer1] - &var" | 400 | * and to return "var[layer0][layer1] - var"; |
| 401 | * | ||
| 491 | * For 3 layers, this macro is similar to allocate a tri-dimensional array | 402 | * For 3 layers, this macro is similar to allocate a tri-dimensional array |
| 492 | * and to return "&var[layer0][layer1][layer2] - &var" | 403 | * and to return "var[layer0][layer1][layer2] - var". |
| 493 | * | 404 | * |
| 494 | * A loop could be used here to make it more generic, but, as we only have | 405 | * A loop could be used here to make it more generic, but, as we only have |
| 495 | * 3 layers, this is a little faster. | 406 | * 3 layers, this is a little faster. |
| 407 | * | ||
| 496 | * By design, layers can never be 0 or more than 3. If that ever happens, | 408 | * By design, layers can never be 0 or more than 3. If that ever happens, |
| 497 | * a NULL is returned, causing an OOPS during the memory allocation routine, | 409 | * a NULL is returned, causing an OOPS during the memory allocation routine, |
| 498 | * with would point to the developer that he's doing something wrong. | 410 | * with would point to the developer that he's doing something wrong. |
| @@ -519,16 +431,18 @@ struct edac_mc_layer { | |||
| 519 | * were allocated for each layer | 431 | * were allocated for each layer |
| 520 | * @var: name of the var where we want to get the pointer | 432 | * @var: name of the var where we want to get the pointer |
| 521 | * (like mci->dimms) | 433 | * (like mci->dimms) |
| 522 | * @n_layers: Number of layers at the @layers array | 434 | * @nlayers: Number of layers at the @layers array |
| 523 | * @layer0: layer0 position | 435 | * @layer0: layer0 position |
| 524 | * @layer1: layer1 position. Unused if n_layers < 2 | 436 | * @layer1: layer1 position. Unused if n_layers < 2 |
| 525 | * @layer2: layer2 position. Unused if n_layers < 3 | 437 | * @layer2: layer2 position. Unused if n_layers < 3 |
| 526 | * | 438 | * |
| 527 | * For 1 layer, this macro returns &var[layer0] | 439 | * For 1 layer, this macro returns "var[layer0]"; |
| 440 | * | ||
| 528 | * For 2 layers, this macro is similar to allocate a bi-dimensional array | 441 | * For 2 layers, this macro is similar to allocate a bi-dimensional array |
| 529 | * and to return "&var[layer0][layer1]" | 442 | * and to return "var[layer0][layer1]"; |
| 443 | * | ||
| 530 | * For 3 layers, this macro is similar to allocate a tri-dimensional array | 444 | * For 3 layers, this macro is similar to allocate a tri-dimensional array |
| 531 | * and to return "&var[layer0][layer1][layer2]" | 445 | * and to return "var[layer0][layer1][layer2]"; |
| 532 | */ | 446 | */ |
| 533 | #define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \ | 447 | #define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \ |
| 534 | typeof(*var) __p; \ | 448 | typeof(*var) __p; \ |
| @@ -614,7 +528,7 @@ struct errcount_attribute_data { | |||
| 614 | }; | 528 | }; |
| 615 | 529 | ||
| 616 | /** | 530 | /** |
| 617 | * edac_raw_error_desc - Raw error report structure | 531 | * struct edac_raw_error_desc - Raw error report structure |
| 618 | * @grain: minimum granularity for an error report, in bytes | 532 | * @grain: minimum granularity for an error report, in bytes |
| 619 | * @error_count: number of errors of the same type | 533 | * @error_count: number of errors of the same type |
| 620 | * @top_layer: top layer of the error (layer[0]) | 534 | * @top_layer: top layer of the error (layer[0]) |
diff --git a/include/linux/efi.h b/include/linux/efi.h index 2d089487d2da..a07a476178cd 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
| @@ -443,6 +443,22 @@ typedef struct { | |||
| 443 | #define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000 | 443 | #define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000 |
| 444 | #define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000 | 444 | #define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000 |
| 445 | 445 | ||
| 446 | typedef struct { | ||
| 447 | u32 version; | ||
| 448 | u32 get; | ||
| 449 | u32 set; | ||
| 450 | u32 del; | ||
| 451 | u32 get_all; | ||
| 452 | } apple_properties_protocol_32_t; | ||
| 453 | |||
| 454 | typedef struct { | ||
| 455 | u64 version; | ||
| 456 | u64 get; | ||
| 457 | u64 set; | ||
| 458 | u64 del; | ||
| 459 | u64 get_all; | ||
| 460 | } apple_properties_protocol_64_t; | ||
| 461 | |||
| 446 | /* | 462 | /* |
| 447 | * Types and defines for EFI ResetSystem | 463 | * Types and defines for EFI ResetSystem |
| 448 | */ | 464 | */ |
| @@ -589,8 +605,10 @@ void efi_native_runtime_setup(void); | |||
| 589 | #define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0) | 605 | #define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0) |
| 590 | #define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5) | 606 | #define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5) |
| 591 | #define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) | 607 | #define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) |
| 608 | #define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61) | ||
| 592 | #define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) | 609 | #define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) |
| 593 | #define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) | 610 | #define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) |
| 611 | #define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0) | ||
| 594 | 612 | ||
| 595 | /* | 613 | /* |
| 596 | * This GUID is used to pass to the kernel proper the struct screen_info | 614 | * This GUID is used to pass to the kernel proper the struct screen_info |
| @@ -599,6 +617,7 @@ void efi_native_runtime_setup(void); | |||
| 599 | */ | 617 | */ |
| 600 | #define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) | 618 | #define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) |
| 601 | #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) | 619 | #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) |
| 620 | #define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) | ||
| 602 | 621 | ||
| 603 | typedef struct { | 622 | typedef struct { |
| 604 | efi_guid_t guid; | 623 | efi_guid_t guid; |
| @@ -872,6 +891,7 @@ extern struct efi { | |||
| 872 | unsigned long esrt; /* ESRT table */ | 891 | unsigned long esrt; /* ESRT table */ |
| 873 | unsigned long properties_table; /* properties table */ | 892 | unsigned long properties_table; /* properties table */ |
| 874 | unsigned long mem_attr_table; /* memory attributes table */ | 893 | unsigned long mem_attr_table; /* memory attributes table */ |
| 894 | unsigned long rng_seed; /* UEFI firmware random seed */ | ||
| 875 | efi_get_time_t *get_time; | 895 | efi_get_time_t *get_time; |
| 876 | efi_set_time_t *set_time; | 896 | efi_set_time_t *set_time; |
| 877 | efi_get_wakeup_time_t *get_wakeup_time; | 897 | efi_get_wakeup_time_t *get_wakeup_time; |
| @@ -1145,6 +1165,26 @@ struct efi_generic_dev_path { | |||
| 1145 | u16 length; | 1165 | u16 length; |
| 1146 | } __attribute ((packed)); | 1166 | } __attribute ((packed)); |
| 1147 | 1167 | ||
| 1168 | struct efi_dev_path { | ||
| 1169 | u8 type; /* can be replaced with unnamed */ | ||
| 1170 | u8 sub_type; /* struct efi_generic_dev_path; */ | ||
| 1171 | u16 length; /* once we've moved to -std=c11 */ | ||
| 1172 | union { | ||
| 1173 | struct { | ||
| 1174 | u32 hid; | ||
| 1175 | u32 uid; | ||
| 1176 | } acpi; | ||
| 1177 | struct { | ||
| 1178 | u8 fn; | ||
| 1179 | u8 dev; | ||
| 1180 | } pci; | ||
| 1181 | }; | ||
| 1182 | } __attribute ((packed)); | ||
| 1183 | |||
| 1184 | #if IS_ENABLED(CONFIG_EFI_DEV_PATH_PARSER) | ||
| 1185 | struct device *efi_get_device_by_path(struct efi_dev_path **node, size_t *len); | ||
| 1186 | #endif | ||
| 1187 | |||
| 1148 | static inline void memrange_efi_to_native(u64 *addr, u64 *npages) | 1188 | static inline void memrange_efi_to_native(u64 *addr, u64 *npages) |
| 1149 | { | 1189 | { |
| 1150 | *npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr); | 1190 | *npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr); |
| @@ -1493,4 +1533,10 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table, | |||
| 1493 | struct efi_boot_memmap *map, | 1533 | struct efi_boot_memmap *map, |
| 1494 | void *priv, | 1534 | void *priv, |
| 1495 | efi_exit_boot_map_processing priv_func); | 1535 | efi_exit_boot_map_processing priv_func); |
| 1536 | |||
| 1537 | struct linux_efi_random_seed { | ||
| 1538 | u32 size; | ||
| 1539 | u8 bits[]; | ||
| 1540 | }; | ||
| 1541 | |||
| 1496 | #endif /* _LINUX_EFI_H */ | 1542 | #endif /* _LINUX_EFI_H */ |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index e7f358d2e5fc..b276e9ef0e0b 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
| @@ -30,7 +30,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int); | |||
| 30 | typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); | 30 | typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); |
| 31 | typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); | 31 | typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); |
| 32 | typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); | 32 | typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); |
| 33 | typedef int (elevator_may_queue_fn) (struct request_queue *, int, int); | 33 | typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int); |
| 34 | 34 | ||
| 35 | typedef void (elevator_init_icq_fn) (struct io_cq *); | 35 | typedef void (elevator_init_icq_fn) (struct io_cq *); |
| 36 | typedef void (elevator_exit_icq_fn) (struct io_cq *); | 36 | typedef void (elevator_exit_icq_fn) (struct io_cq *); |
| @@ -108,6 +108,11 @@ struct elevator_type | |||
| 108 | 108 | ||
| 109 | #define ELV_HASH_BITS 6 | 109 | #define ELV_HASH_BITS 6 |
| 110 | 110 | ||
| 111 | void elv_rqhash_del(struct request_queue *q, struct request *rq); | ||
| 112 | void elv_rqhash_add(struct request_queue *q, struct request *rq); | ||
| 113 | void elv_rqhash_reposition(struct request_queue *q, struct request *rq); | ||
| 114 | struct request *elv_rqhash_find(struct request_queue *q, sector_t offset); | ||
| 115 | |||
| 111 | /* | 116 | /* |
| 112 | * each queue has an elevator_queue associated with it | 117 | * each queue has an elevator_queue associated with it |
| 113 | */ | 118 | */ |
| @@ -139,7 +144,7 @@ extern struct request *elv_former_request(struct request_queue *, struct request | |||
| 139 | extern struct request *elv_latter_request(struct request_queue *, struct request *); | 144 | extern struct request *elv_latter_request(struct request_queue *, struct request *); |
| 140 | extern int elv_register_queue(struct request_queue *q); | 145 | extern int elv_register_queue(struct request_queue *q); |
| 141 | extern void elv_unregister_queue(struct request_queue *q); | 146 | extern void elv_unregister_queue(struct request_queue *q); |
| 142 | extern int elv_may_queue(struct request_queue *, int, int); | 147 | extern int elv_may_queue(struct request_queue *, unsigned int); |
| 143 | extern void elv_completed_request(struct request_queue *, struct request *); | 148 | extern void elv_completed_request(struct request_queue *, struct request *); |
| 144 | extern int elv_set_request(struct request_queue *q, struct request *rq, | 149 | extern int elv_set_request(struct request_queue *q, struct request *rq, |
| 145 | struct bio *bio, gfp_t gfp_mask); | 150 | struct bio *bio, gfp_t gfp_mask); |
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 422630b8e588..cea41a124a80 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h | |||
| @@ -52,10 +52,17 @@ | |||
| 52 | 52 | ||
| 53 | #define VERSION_LEN 256 | 53 | #define VERSION_LEN 256 |
| 54 | #define MAX_VOLUME_NAME 512 | 54 | #define MAX_VOLUME_NAME 512 |
| 55 | #define MAX_PATH_LEN 64 | ||
| 56 | #define MAX_DEVICES 8 | ||
| 55 | 57 | ||
| 56 | /* | 58 | /* |
| 57 | * For superblock | 59 | * For superblock |
| 58 | */ | 60 | */ |
| 61 | struct f2fs_device { | ||
| 62 | __u8 path[MAX_PATH_LEN]; | ||
| 63 | __le32 total_segments; | ||
| 64 | } __packed; | ||
| 65 | |||
| 59 | struct f2fs_super_block { | 66 | struct f2fs_super_block { |
| 60 | __le32 magic; /* Magic Number */ | 67 | __le32 magic; /* Magic Number */ |
| 61 | __le16 major_ver; /* Major Version */ | 68 | __le16 major_ver; /* Major Version */ |
| @@ -94,7 +101,8 @@ struct f2fs_super_block { | |||
| 94 | __le32 feature; /* defined features */ | 101 | __le32 feature; /* defined features */ |
| 95 | __u8 encryption_level; /* versioning level for encryption */ | 102 | __u8 encryption_level; /* versioning level for encryption */ |
| 96 | __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */ | 103 | __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */ |
| 97 | __u8 reserved[871]; /* valid reserved region */ | 104 | struct f2fs_device devs[MAX_DEVICES]; /* device list */ |
| 105 | __u8 reserved[327]; /* valid reserved region */ | ||
| 98 | } __packed; | 106 | } __packed; |
| 99 | 107 | ||
| 100 | /* | 108 | /* |
diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h index 9a79f0106da1..32c22cfb238b 100644 --- a/include/linux/fddidevice.h +++ b/include/linux/fddidevice.h | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | 26 | ||
| 27 | #ifdef __KERNEL__ | 27 | #ifdef __KERNEL__ |
| 28 | __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev); | 28 | __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev); |
| 29 | int fddi_change_mtu(struct net_device *dev, int new_mtu); | ||
| 30 | struct net_device *alloc_fddidev(int sizeof_priv); | 29 | struct net_device *alloc_fddidev(int sizeof_priv); |
| 31 | #endif | 30 | #endif |
| 32 | 31 | ||
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h deleted file mode 100644 index a44794e508df..000000000000 --- a/include/linux/fence-array.h +++ /dev/null | |||
| @@ -1,83 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * fence-array: aggregates fence to be waited together | ||
| 3 | * | ||
| 4 | * Copyright (C) 2016 Collabora Ltd | ||
| 5 | * Copyright (C) 2016 Advanced Micro Devices, Inc. | ||
| 6 | * Authors: | ||
| 7 | * Gustavo Padovan <gustavo@padovan.org> | ||
| 8 | * Christian König <christian.koenig@amd.com> | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify it | ||
| 11 | * under the terms of the GNU General Public License version 2 as published by | ||
| 12 | * the Free Software Foundation. | ||
| 13 | * | ||
| 14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 17 | * more details. | ||
| 18 | */ | ||
| 19 | |||
| 20 | #ifndef __LINUX_FENCE_ARRAY_H | ||
| 21 | #define __LINUX_FENCE_ARRAY_H | ||
| 22 | |||
| 23 | #include <linux/fence.h> | ||
| 24 | |||
| 25 | /** | ||
| 26 | * struct fence_array_cb - callback helper for fence array | ||
| 27 | * @cb: fence callback structure for signaling | ||
| 28 | * @array: reference to the parent fence array object | ||
| 29 | */ | ||
| 30 | struct fence_array_cb { | ||
| 31 | struct fence_cb cb; | ||
| 32 | struct fence_array *array; | ||
| 33 | }; | ||
| 34 | |||
| 35 | /** | ||
| 36 | * struct fence_array - fence to represent an array of fences | ||
| 37 | * @base: fence base class | ||
| 38 | * @lock: spinlock for fence handling | ||
| 39 | * @num_fences: number of fences in the array | ||
| 40 | * @num_pending: fences in the array still pending | ||
| 41 | * @fences: array of the fences | ||
| 42 | */ | ||
| 43 | struct fence_array { | ||
| 44 | struct fence base; | ||
| 45 | |||
| 46 | spinlock_t lock; | ||
| 47 | unsigned num_fences; | ||
| 48 | atomic_t num_pending; | ||
| 49 | struct fence **fences; | ||
| 50 | }; | ||
| 51 | |||
| 52 | extern const struct fence_ops fence_array_ops; | ||
| 53 | |||
| 54 | /** | ||
| 55 | * fence_is_array - check if a fence is from the array subsclass | ||
| 56 | * | ||
| 57 | * Return true if it is a fence_array and false otherwise. | ||
| 58 | */ | ||
| 59 | static inline bool fence_is_array(struct fence *fence) | ||
| 60 | { | ||
| 61 | return fence->ops == &fence_array_ops; | ||
| 62 | } | ||
| 63 | |||
| 64 | /** | ||
| 65 | * to_fence_array - cast a fence to a fence_array | ||
| 66 | * @fence: fence to cast to a fence_array | ||
| 67 | * | ||
| 68 | * Returns NULL if the fence is not a fence_array, | ||
| 69 | * or the fence_array otherwise. | ||
| 70 | */ | ||
| 71 | static inline struct fence_array *to_fence_array(struct fence *fence) | ||
| 72 | { | ||
| 73 | if (fence->ops != &fence_array_ops) | ||
| 74 | return NULL; | ||
| 75 | |||
| 76 | return container_of(fence, struct fence_array, base); | ||
| 77 | } | ||
| 78 | |||
| 79 | struct fence_array *fence_array_create(int num_fences, struct fence **fences, | ||
| 80 | u64 context, unsigned seqno, | ||
| 81 | bool signal_on_any); | ||
| 82 | |||
| 83 | #endif /* __LINUX_FENCE_ARRAY_H */ | ||
diff --git a/include/linux/fence.h b/include/linux/fence.h deleted file mode 100644 index 0d763053f97a..000000000000 --- a/include/linux/fence.h +++ /dev/null | |||
| @@ -1,378 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Fence mechanism for dma-buf to allow for asynchronous dma access | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Canonical Ltd | ||
| 5 | * Copyright (C) 2012 Texas Instruments | ||
| 6 | * | ||
| 7 | * Authors: | ||
| 8 | * Rob Clark <robdclark@gmail.com> | ||
| 9 | * Maarten Lankhorst <maarten.lankhorst@canonical.com> | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify it | ||
| 12 | * under the terms of the GNU General Public License version 2 as published by | ||
| 13 | * the Free Software Foundation. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 18 | * more details. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __LINUX_FENCE_H | ||
| 22 | #define __LINUX_FENCE_H | ||
| 23 | |||
| 24 | #include <linux/err.h> | ||
| 25 | #include <linux/wait.h> | ||
| 26 | #include <linux/list.h> | ||
| 27 | #include <linux/bitops.h> | ||
| 28 | #include <linux/kref.h> | ||
| 29 | #include <linux/sched.h> | ||
| 30 | #include <linux/printk.h> | ||
| 31 | #include <linux/rcupdate.h> | ||
| 32 | |||
| 33 | struct fence; | ||
| 34 | struct fence_ops; | ||
| 35 | struct fence_cb; | ||
| 36 | |||
| 37 | /** | ||
| 38 | * struct fence - software synchronization primitive | ||
| 39 | * @refcount: refcount for this fence | ||
| 40 | * @ops: fence_ops associated with this fence | ||
| 41 | * @rcu: used for releasing fence with kfree_rcu | ||
| 42 | * @cb_list: list of all callbacks to call | ||
| 43 | * @lock: spin_lock_irqsave used for locking | ||
| 44 | * @context: execution context this fence belongs to, returned by | ||
| 45 | * fence_context_alloc() | ||
| 46 | * @seqno: the sequence number of this fence inside the execution context, | ||
| 47 | * can be compared to decide which fence would be signaled later. | ||
| 48 | * @flags: A mask of FENCE_FLAG_* defined below | ||
| 49 | * @timestamp: Timestamp when the fence was signaled. | ||
| 50 | * @status: Optional, only valid if < 0, must be set before calling | ||
| 51 | * fence_signal, indicates that the fence has completed with an error. | ||
| 52 | * | ||
| 53 | * the flags member must be manipulated and read using the appropriate | ||
| 54 | * atomic ops (bit_*), so taking the spinlock will not be needed most | ||
| 55 | * of the time. | ||
| 56 | * | ||
| 57 | * FENCE_FLAG_SIGNALED_BIT - fence is already signaled | ||
| 58 | * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called* | ||
| 59 | * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the | ||
| 60 | * implementer of the fence for its own purposes. Can be used in different | ||
| 61 | * ways by different fence implementers, so do not rely on this. | ||
| 62 | * | ||
| 63 | * Since atomic bitops are used, this is not guaranteed to be the case. | ||
| 64 | * Particularly, if the bit was set, but fence_signal was called right | ||
| 65 | * before this bit was set, it would have been able to set the | ||
| 66 | * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. | ||
| 67 | * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting | ||
| 68 | * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that | ||
| 69 | * after fence_signal was called, any enable_signaling call will have either | ||
| 70 | * been completed, or never called at all. | ||
| 71 | */ | ||
| 72 | struct fence { | ||
| 73 | struct kref refcount; | ||
| 74 | const struct fence_ops *ops; | ||
| 75 | struct rcu_head rcu; | ||
| 76 | struct list_head cb_list; | ||
| 77 | spinlock_t *lock; | ||
| 78 | u64 context; | ||
| 79 | unsigned seqno; | ||
| 80 | unsigned long flags; | ||
| 81 | ktime_t timestamp; | ||
| 82 | int status; | ||
| 83 | }; | ||
| 84 | |||
| 85 | enum fence_flag_bits { | ||
| 86 | FENCE_FLAG_SIGNALED_BIT, | ||
| 87 | FENCE_FLAG_ENABLE_SIGNAL_BIT, | ||
| 88 | FENCE_FLAG_USER_BITS, /* must always be last member */ | ||
| 89 | }; | ||
| 90 | |||
| 91 | typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb); | ||
| 92 | |||
| 93 | /** | ||
| 94 | * struct fence_cb - callback for fence_add_callback | ||
| 95 | * @node: used by fence_add_callback to append this struct to fence::cb_list | ||
| 96 | * @func: fence_func_t to call | ||
| 97 | * | ||
| 98 | * This struct will be initialized by fence_add_callback, additional | ||
| 99 | * data can be passed along by embedding fence_cb in another struct. | ||
| 100 | */ | ||
| 101 | struct fence_cb { | ||
| 102 | struct list_head node; | ||
| 103 | fence_func_t func; | ||
| 104 | }; | ||
| 105 | |||
| 106 | /** | ||
| 107 | * struct fence_ops - operations implemented for fence | ||
| 108 | * @get_driver_name: returns the driver name. | ||
| 109 | * @get_timeline_name: return the name of the context this fence belongs to. | ||
| 110 | * @enable_signaling: enable software signaling of fence. | ||
| 111 | * @signaled: [optional] peek whether the fence is signaled, can be null. | ||
| 112 | * @wait: custom wait implementation, or fence_default_wait. | ||
| 113 | * @release: [optional] called on destruction of fence, can be null | ||
| 114 | * @fill_driver_data: [optional] callback to fill in free-form debug info | ||
| 115 | * Returns amount of bytes filled, or -errno. | ||
| 116 | * @fence_value_str: [optional] fills in the value of the fence as a string | ||
| 117 | * @timeline_value_str: [optional] fills in the current value of the timeline | ||
| 118 | * as a string | ||
| 119 | * | ||
| 120 | * Notes on enable_signaling: | ||
| 121 | * For fence implementations that have the capability for hw->hw | ||
| 122 | * signaling, they can implement this op to enable the necessary | ||
| 123 | * irqs, or insert commands into cmdstream, etc. This is called | ||
| 124 | * in the first wait() or add_callback() path to let the fence | ||
| 125 | * implementation know that there is another driver waiting on | ||
| 126 | * the signal (ie. hw->sw case). | ||
| 127 | * | ||
| 128 | * This function can be called called from atomic context, but not | ||
| 129 | * from irq context, so normal spinlocks can be used. | ||
| 130 | * | ||
| 131 | * A return value of false indicates the fence already passed, | ||
| 132 | * or some failure occurred that made it impossible to enable | ||
| 133 | * signaling. True indicates successful enabling. | ||
| 134 | * | ||
| 135 | * fence->status may be set in enable_signaling, but only when false is | ||
| 136 | * returned. | ||
| 137 | * | ||
| 138 | * Calling fence_signal before enable_signaling is called allows | ||
| 139 | * for a tiny race window in which enable_signaling is called during, | ||
| 140 | * before, or after fence_signal. To fight this, it is recommended | ||
| 141 | * that before enable_signaling returns true an extra reference is | ||
| 142 | * taken on the fence, to be released when the fence is signaled. | ||
| 143 | * This will mean fence_signal will still be called twice, but | ||
| 144 | * the second time will be a noop since it was already signaled. | ||
| 145 | * | ||
| 146 | * Notes on signaled: | ||
| 147 | * May set fence->status if returning true. | ||
| 148 | * | ||
| 149 | * Notes on wait: | ||
| 150 | * Must not be NULL, set to fence_default_wait for default implementation. | ||
| 151 | * the fence_default_wait implementation should work for any fence, as long | ||
| 152 | * as enable_signaling works correctly. | ||
| 153 | * | ||
| 154 | * Must return -ERESTARTSYS if the wait is intr = true and the wait was | ||
| 155 | * interrupted, and remaining jiffies if fence has signaled, or 0 if wait | ||
| 156 | * timed out. Can also return other error values on custom implementations, | ||
| 157 | * which should be treated as if the fence is signaled. For example a hardware | ||
| 158 | * lockup could be reported like that. | ||
| 159 | * | ||
| 160 | * Notes on release: | ||
| 161 | * Can be NULL, this function allows additional commands to run on | ||
| 162 | * destruction of the fence. Can be called from irq context. | ||
| 163 | * If pointer is set to NULL, kfree will get called instead. | ||
| 164 | */ | ||
| 165 | |||
| 166 | struct fence_ops { | ||
| 167 | const char * (*get_driver_name)(struct fence *fence); | ||
| 168 | const char * (*get_timeline_name)(struct fence *fence); | ||
| 169 | bool (*enable_signaling)(struct fence *fence); | ||
| 170 | bool (*signaled)(struct fence *fence); | ||
| 171 | signed long (*wait)(struct fence *fence, bool intr, signed long timeout); | ||
| 172 | void (*release)(struct fence *fence); | ||
| 173 | |||
| 174 | int (*fill_driver_data)(struct fence *fence, void *data, int size); | ||
| 175 | void (*fence_value_str)(struct fence *fence, char *str, int size); | ||
| 176 | void (*timeline_value_str)(struct fence *fence, char *str, int size); | ||
| 177 | }; | ||
| 178 | |||
| 179 | void fence_init(struct fence *fence, const struct fence_ops *ops, | ||
| 180 | spinlock_t *lock, u64 context, unsigned seqno); | ||
| 181 | |||
| 182 | void fence_release(struct kref *kref); | ||
| 183 | void fence_free(struct fence *fence); | ||
| 184 | |||
| 185 | /** | ||
| 186 | * fence_get - increases refcount of the fence | ||
| 187 | * @fence: [in] fence to increase refcount of | ||
| 188 | * | ||
| 189 | * Returns the same fence, with refcount increased by 1. | ||
| 190 | */ | ||
| 191 | static inline struct fence *fence_get(struct fence *fence) | ||
| 192 | { | ||
| 193 | if (fence) | ||
| 194 | kref_get(&fence->refcount); | ||
| 195 | return fence; | ||
| 196 | } | ||
| 197 | |||
| 198 | /** | ||
| 199 | * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock | ||
| 200 | * @fence: [in] fence to increase refcount of | ||
| 201 | * | ||
| 202 | * Function returns NULL if no refcount could be obtained, or the fence. | ||
| 203 | */ | ||
| 204 | static inline struct fence *fence_get_rcu(struct fence *fence) | ||
| 205 | { | ||
| 206 | if (kref_get_unless_zero(&fence->refcount)) | ||
| 207 | return fence; | ||
| 208 | else | ||
| 209 | return NULL; | ||
| 210 | } | ||
| 211 | |||
| 212 | /** | ||
| 213 | * fence_put - decreases refcount of the fence | ||
| 214 | * @fence: [in] fence to reduce refcount of | ||
| 215 | */ | ||
| 216 | static inline void fence_put(struct fence *fence) | ||
| 217 | { | ||
| 218 | if (fence) | ||
| 219 | kref_put(&fence->refcount, fence_release); | ||
| 220 | } | ||
| 221 | |||
| 222 | int fence_signal(struct fence *fence); | ||
| 223 | int fence_signal_locked(struct fence *fence); | ||
| 224 | signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout); | ||
| 225 | int fence_add_callback(struct fence *fence, struct fence_cb *cb, | ||
| 226 | fence_func_t func); | ||
| 227 | bool fence_remove_callback(struct fence *fence, struct fence_cb *cb); | ||
| 228 | void fence_enable_sw_signaling(struct fence *fence); | ||
| 229 | |||
| 230 | /** | ||
| 231 | * fence_is_signaled_locked - Return an indication if the fence is signaled yet. | ||
| 232 | * @fence: [in] the fence to check | ||
| 233 | * | ||
| 234 | * Returns true if the fence was already signaled, false if not. Since this | ||
| 235 | * function doesn't enable signaling, it is not guaranteed to ever return | ||
| 236 | * true if fence_add_callback, fence_wait or fence_enable_sw_signaling | ||
| 237 | * haven't been called before. | ||
| 238 | * | ||
| 239 | * This function requires fence->lock to be held. | ||
| 240 | */ | ||
| 241 | static inline bool | ||
| 242 | fence_is_signaled_locked(struct fence *fence) | ||
| 243 | { | ||
| 244 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
| 245 | return true; | ||
| 246 | |||
| 247 | if (fence->ops->signaled && fence->ops->signaled(fence)) { | ||
| 248 | fence_signal_locked(fence); | ||
| 249 | return true; | ||
| 250 | } | ||
| 251 | |||
| 252 | return false; | ||
| 253 | } | ||
| 254 | |||
| 255 | /** | ||
| 256 | * fence_is_signaled - Return an indication if the fence is signaled yet. | ||
| 257 | * @fence: [in] the fence to check | ||
| 258 | * | ||
| 259 | * Returns true if the fence was already signaled, false if not. Since this | ||
| 260 | * function doesn't enable signaling, it is not guaranteed to ever return | ||
| 261 | * true if fence_add_callback, fence_wait or fence_enable_sw_signaling | ||
| 262 | * haven't been called before. | ||
| 263 | * | ||
| 264 | * It's recommended for seqno fences to call fence_signal when the | ||
| 265 | * operation is complete, it makes it possible to prevent issues from | ||
| 266 | * wraparound between time of issue and time of use by checking the return | ||
| 267 | * value of this function before calling hardware-specific wait instructions. | ||
| 268 | */ | ||
| 269 | static inline bool | ||
| 270 | fence_is_signaled(struct fence *fence) | ||
| 271 | { | ||
| 272 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | ||
| 273 | return true; | ||
| 274 | |||
| 275 | if (fence->ops->signaled && fence->ops->signaled(fence)) { | ||
| 276 | fence_signal(fence); | ||
| 277 | return true; | ||
| 278 | } | ||
| 279 | |||
| 280 | return false; | ||
| 281 | } | ||
| 282 | |||
| 283 | /** | ||
| 284 | * fence_is_later - return if f1 is chronologically later than f2 | ||
| 285 | * @f1: [in] the first fence from the same context | ||
| 286 | * @f2: [in] the second fence from the same context | ||
| 287 | * | ||
| 288 | * Returns true if f1 is chronologically later than f2. Both fences must be | ||
| 289 | * from the same context, since a seqno is not re-used across contexts. | ||
| 290 | */ | ||
| 291 | static inline bool fence_is_later(struct fence *f1, struct fence *f2) | ||
| 292 | { | ||
| 293 | if (WARN_ON(f1->context != f2->context)) | ||
| 294 | return false; | ||
| 295 | |||
| 296 | return (int)(f1->seqno - f2->seqno) > 0; | ||
| 297 | } | ||
| 298 | |||
| 299 | /** | ||
| 300 | * fence_later - return the chronologically later fence | ||
| 301 | * @f1: [in] the first fence from the same context | ||
| 302 | * @f2: [in] the second fence from the same context | ||
| 303 | * | ||
| 304 | * Returns NULL if both fences are signaled, otherwise the fence that would be | ||
| 305 | * signaled last. Both fences must be from the same context, since a seqno is | ||
| 306 | * not re-used across contexts. | ||
| 307 | */ | ||
| 308 | static inline struct fence *fence_later(struct fence *f1, struct fence *f2) | ||
| 309 | { | ||
| 310 | if (WARN_ON(f1->context != f2->context)) | ||
| 311 | return NULL; | ||
| 312 | |||
| 313 | /* | ||
| 314 | * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been | ||
| 315 | * set if enable_signaling wasn't called, and enabling that here is | ||
| 316 | * overkill. | ||
| 317 | */ | ||
| 318 | if (fence_is_later(f1, f2)) | ||
| 319 | return fence_is_signaled(f1) ? NULL : f1; | ||
| 320 | else | ||
| 321 | return fence_is_signaled(f2) ? NULL : f2; | ||
| 322 | } | ||
| 323 | |||
| 324 | signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); | ||
| 325 | signed long fence_wait_any_timeout(struct fence **fences, uint32_t count, | ||
| 326 | bool intr, signed long timeout); | ||
| 327 | |||
| 328 | /** | ||
| 329 | * fence_wait - sleep until the fence gets signaled | ||
| 330 | * @fence: [in] the fence to wait on | ||
| 331 | * @intr: [in] if true, do an interruptible wait | ||
| 332 | * | ||
| 333 | * This function will return -ERESTARTSYS if interrupted by a signal, | ||
| 334 | * or 0 if the fence was signaled. Other error values may be | ||
| 335 | * returned on custom implementations. | ||
| 336 | * | ||
| 337 | * Performs a synchronous wait on this fence. It is assumed the caller | ||
| 338 | * directly or indirectly holds a reference to the fence, otherwise the | ||
| 339 | * fence might be freed before return, resulting in undefined behavior. | ||
| 340 | */ | ||
| 341 | static inline signed long fence_wait(struct fence *fence, bool intr) | ||
| 342 | { | ||
| 343 | signed long ret; | ||
| 344 | |||
| 345 | /* Since fence_wait_timeout cannot timeout with | ||
| 346 | * MAX_SCHEDULE_TIMEOUT, only valid return values are | ||
| 347 | * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. | ||
| 348 | */ | ||
| 349 | ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); | ||
| 350 | |||
| 351 | return ret < 0 ? ret : 0; | ||
| 352 | } | ||
| 353 | |||
| 354 | u64 fence_context_alloc(unsigned num); | ||
| 355 | |||
| 356 | #define FENCE_TRACE(f, fmt, args...) \ | ||
| 357 | do { \ | ||
| 358 | struct fence *__ff = (f); \ | ||
| 359 | if (IS_ENABLED(CONFIG_FENCE_TRACE)) \ | ||
| 360 | pr_info("f %llu#%u: " fmt, \ | ||
| 361 | __ff->context, __ff->seqno, ##args); \ | ||
| 362 | } while (0) | ||
| 363 | |||
| 364 | #define FENCE_WARN(f, fmt, args...) \ | ||
| 365 | do { \ | ||
| 366 | struct fence *__ff = (f); \ | ||
| 367 | pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ | ||
| 368 | ##args); \ | ||
| 369 | } while (0) | ||
| 370 | |||
| 371 | #define FENCE_ERR(f, fmt, args...) \ | ||
| 372 | do { \ | ||
| 373 | struct fence *__ff = (f); \ | ||
| 374 | pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ | ||
| 375 | ##args); \ | ||
| 376 | } while (0) | ||
| 377 | |||
| 378 | #endif /* __LINUX_FENCE_H */ | ||
diff --git a/include/linux/file.h b/include/linux/file.h index 7444f5feda12..61eb82cbafba 100644 --- a/include/linux/file.h +++ b/include/linux/file.h | |||
| @@ -17,7 +17,7 @@ struct file_operations; | |||
| 17 | struct vfsmount; | 17 | struct vfsmount; |
| 18 | struct dentry; | 18 | struct dentry; |
| 19 | struct path; | 19 | struct path; |
| 20 | extern struct file *alloc_file(struct path *, fmode_t mode, | 20 | extern struct file *alloc_file(const struct path *, fmode_t mode, |
| 21 | const struct file_operations *fop); | 21 | const struct file_operations *fop); |
| 22 | 22 | ||
| 23 | static inline void fput_light(struct file *file, int fput_needed) | 23 | static inline void fput_light(struct file *file, int fput_needed) |
diff --git a/include/linux/filter.h b/include/linux/filter.h index 1f09c521adfe..702314253797 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/workqueue.h> | 14 | #include <linux/workqueue.h> |
| 15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
| 16 | #include <linux/capability.h> | 16 | #include <linux/capability.h> |
| 17 | #include <linux/cryptohash.h> | ||
| 17 | 18 | ||
| 18 | #include <net/sch_generic.h> | 19 | #include <net/sch_generic.h> |
| 19 | 20 | ||
| @@ -402,14 +403,16 @@ struct bpf_prog { | |||
| 402 | u16 jited:1, /* Is our filter JIT'ed? */ | 403 | u16 jited:1, /* Is our filter JIT'ed? */ |
| 403 | gpl_compatible:1, /* Is filter GPL compatible? */ | 404 | gpl_compatible:1, /* Is filter GPL compatible? */ |
| 404 | cb_access:1, /* Is control block accessed? */ | 405 | cb_access:1, /* Is control block accessed? */ |
| 405 | dst_needed:1; /* Do we need dst entry? */ | 406 | dst_needed:1, /* Do we need dst entry? */ |
| 407 | xdp_adjust_head:1; /* Adjusting pkt head? */ | ||
| 406 | kmemcheck_bitfield_end(meta); | 408 | kmemcheck_bitfield_end(meta); |
| 407 | u32 len; /* Number of filter blocks */ | ||
| 408 | enum bpf_prog_type type; /* Type of BPF program */ | 409 | enum bpf_prog_type type; /* Type of BPF program */ |
| 410 | u32 len; /* Number of filter blocks */ | ||
| 411 | u32 digest[SHA_DIGEST_WORDS]; /* Program digest */ | ||
| 409 | struct bpf_prog_aux *aux; /* Auxiliary fields */ | 412 | struct bpf_prog_aux *aux; /* Auxiliary fields */ |
| 410 | struct sock_fprog_kern *orig_prog; /* Original BPF program */ | 413 | struct sock_fprog_kern *orig_prog; /* Original BPF program */ |
| 411 | unsigned int (*bpf_func)(const struct sk_buff *skb, | 414 | unsigned int (*bpf_func)(const void *ctx, |
| 412 | const struct bpf_insn *filter); | 415 | const struct bpf_insn *insn); |
| 413 | /* Instructions for interpreter */ | 416 | /* Instructions for interpreter */ |
| 414 | union { | 417 | union { |
| 415 | struct sock_filter insns[0]; | 418 | struct sock_filter insns[0]; |
| @@ -435,10 +438,11 @@ struct bpf_skb_data_end { | |||
| 435 | struct xdp_buff { | 438 | struct xdp_buff { |
| 436 | void *data; | 439 | void *data; |
| 437 | void *data_end; | 440 | void *data_end; |
| 441 | void *data_hard_start; | ||
| 438 | }; | 442 | }; |
| 439 | 443 | ||
| 440 | /* compute the linear packet data range [data, data_end) which | 444 | /* compute the linear packet data range [data, data_end) which |
| 441 | * will be accessed by cls_bpf and act_bpf programs | 445 | * will be accessed by cls_bpf, act_bpf and lwt programs |
| 442 | */ | 446 | */ |
| 443 | static inline void bpf_compute_data_end(struct sk_buff *skb) | 447 | static inline void bpf_compute_data_end(struct sk_buff *skb) |
| 444 | { | 448 | { |
| @@ -498,16 +502,27 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, | |||
| 498 | return BPF_PROG_RUN(prog, skb); | 502 | return BPF_PROG_RUN(prog, skb); |
| 499 | } | 503 | } |
| 500 | 504 | ||
| 501 | static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, | 505 | static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, |
| 502 | struct xdp_buff *xdp) | 506 | struct xdp_buff *xdp) |
| 503 | { | 507 | { |
| 504 | u32 ret; | 508 | /* Caller needs to hold rcu_read_lock() (!), otherwise program |
| 509 | * can be released while still running, or map elements could be | ||
| 510 | * freed early while still having concurrent users. XDP fastpath | ||
| 511 | * already takes rcu_read_lock() when fetching the program, so | ||
| 512 | * it's not necessary here anymore. | ||
| 513 | */ | ||
| 514 | return BPF_PROG_RUN(prog, xdp); | ||
| 515 | } | ||
| 505 | 516 | ||
| 506 | rcu_read_lock(); | 517 | static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) |
| 507 | ret = BPF_PROG_RUN(prog, (void *)xdp); | 518 | { |
| 508 | rcu_read_unlock(); | 519 | return prog->len * sizeof(struct bpf_insn); |
| 520 | } | ||
| 509 | 521 | ||
| 510 | return ret; | 522 | static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog) |
| 523 | { | ||
| 524 | return round_up(bpf_prog_insn_size(prog) + | ||
| 525 | sizeof(__be64) + 1, SHA_MESSAGE_BYTES); | ||
| 511 | } | 526 | } |
| 512 | 527 | ||
| 513 | static inline unsigned int bpf_prog_size(unsigned int proglen) | 528 | static inline unsigned int bpf_prog_size(unsigned int proglen) |
| @@ -590,11 +605,12 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); | |||
| 590 | u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); | 605 | u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
| 591 | 606 | ||
| 592 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); | 607 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); |
| 593 | bool bpf_helper_changes_skb_data(void *func); | 608 | bool bpf_helper_changes_pkt_data(void *func); |
| 594 | 609 | ||
| 595 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, | 610 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, |
| 596 | const struct bpf_insn *patch, u32 len); | 611 | const struct bpf_insn *patch, u32 len); |
| 597 | void bpf_warn_invalid_xdp_action(u32 act); | 612 | void bpf_warn_invalid_xdp_action(u32 act); |
| 613 | void bpf_warn_invalid_xdp_buffer(void); | ||
| 598 | 614 | ||
| 599 | #ifdef CONFIG_BPF_JIT | 615 | #ifdef CONFIG_BPF_JIT |
| 600 | extern int bpf_jit_enable; | 616 | extern int bpf_jit_enable; |
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h new file mode 100644 index 000000000000..dba6e3c697c7 --- /dev/null +++ b/include/linux/fpga/fpga-bridge.h | |||
| @@ -0,0 +1,60 @@ | |||
| 1 | #include <linux/device.h> | ||
| 2 | #include <linux/fpga/fpga-mgr.h> | ||
| 3 | |||
| 4 | #ifndef _LINUX_FPGA_BRIDGE_H | ||
| 5 | #define _LINUX_FPGA_BRIDGE_H | ||
| 6 | |||
| 7 | struct fpga_bridge; | ||
| 8 | |||
| 9 | /** | ||
| 10 | * struct fpga_bridge_ops - ops for low level FPGA bridge drivers | ||
| 11 | * @enable_show: returns the FPGA bridge's status | ||
| 12 | * @enable_set: set a FPGA bridge as enabled or disabled | ||
| 13 | * @fpga_bridge_remove: set FPGA into a specific state during driver remove | ||
| 14 | */ | ||
| 15 | struct fpga_bridge_ops { | ||
| 16 | int (*enable_show)(struct fpga_bridge *bridge); | ||
| 17 | int (*enable_set)(struct fpga_bridge *bridge, bool enable); | ||
| 18 | void (*fpga_bridge_remove)(struct fpga_bridge *bridge); | ||
| 19 | }; | ||
| 20 | |||
| 21 | /** | ||
| 22 | * struct fpga_bridge - FPGA bridge structure | ||
| 23 | * @name: name of low level FPGA bridge | ||
| 24 | * @dev: FPGA bridge device | ||
| 25 | * @mutex: enforces exclusive reference to bridge | ||
| 26 | * @br_ops: pointer to struct of FPGA bridge ops | ||
| 27 | * @info: fpga image specific information | ||
| 28 | * @node: FPGA bridge list node | ||
| 29 | * @priv: low level driver private date | ||
| 30 | */ | ||
| 31 | struct fpga_bridge { | ||
| 32 | const char *name; | ||
| 33 | struct device dev; | ||
| 34 | struct mutex mutex; /* for exclusive reference to bridge */ | ||
| 35 | const struct fpga_bridge_ops *br_ops; | ||
| 36 | struct fpga_image_info *info; | ||
| 37 | struct list_head node; | ||
| 38 | void *priv; | ||
| 39 | }; | ||
| 40 | |||
| 41 | #define to_fpga_bridge(d) container_of(d, struct fpga_bridge, dev) | ||
| 42 | |||
| 43 | struct fpga_bridge *of_fpga_bridge_get(struct device_node *node, | ||
| 44 | struct fpga_image_info *info); | ||
| 45 | void fpga_bridge_put(struct fpga_bridge *bridge); | ||
| 46 | int fpga_bridge_enable(struct fpga_bridge *bridge); | ||
| 47 | int fpga_bridge_disable(struct fpga_bridge *bridge); | ||
| 48 | |||
| 49 | int fpga_bridges_enable(struct list_head *bridge_list); | ||
| 50 | int fpga_bridges_disable(struct list_head *bridge_list); | ||
| 51 | void fpga_bridges_put(struct list_head *bridge_list); | ||
| 52 | int fpga_bridge_get_to_list(struct device_node *np, | ||
| 53 | struct fpga_image_info *info, | ||
| 54 | struct list_head *bridge_list); | ||
| 55 | |||
| 56 | int fpga_bridge_register(struct device *dev, const char *name, | ||
| 57 | const struct fpga_bridge_ops *br_ops, void *priv); | ||
| 58 | void fpga_bridge_unregister(struct device *dev); | ||
| 59 | |||
| 60 | #endif /* _LINUX_FPGA_BRIDGE_H */ | ||
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index 0940bf45e2f2..16551d5eac36 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h | |||
| @@ -65,11 +65,26 @@ enum fpga_mgr_states { | |||
| 65 | /* | 65 | /* |
| 66 | * FPGA Manager flags | 66 | * FPGA Manager flags |
| 67 | * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported | 67 | * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported |
| 68 | * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting | ||
| 68 | */ | 69 | */ |
| 69 | #define FPGA_MGR_PARTIAL_RECONFIG BIT(0) | 70 | #define FPGA_MGR_PARTIAL_RECONFIG BIT(0) |
| 71 | #define FPGA_MGR_EXTERNAL_CONFIG BIT(1) | ||
| 72 | |||
| 73 | /** | ||
| 74 | * struct fpga_image_info - information specific to a FPGA image | ||
| 75 | * @flags: boolean flags as defined above | ||
| 76 | * @enable_timeout_us: maximum time to enable traffic through bridge (uSec) | ||
| 77 | * @disable_timeout_us: maximum time to disable traffic through bridge (uSec) | ||
| 78 | */ | ||
| 79 | struct fpga_image_info { | ||
| 80 | u32 flags; | ||
| 81 | u32 enable_timeout_us; | ||
| 82 | u32 disable_timeout_us; | ||
| 83 | }; | ||
| 70 | 84 | ||
| 71 | /** | 85 | /** |
| 72 | * struct fpga_manager_ops - ops for low level fpga manager drivers | 86 | * struct fpga_manager_ops - ops for low level fpga manager drivers |
| 87 | * @initial_header_size: Maximum number of bytes that should be passed into write_init | ||
| 73 | * @state: returns an enum value of the FPGA's state | 88 | * @state: returns an enum value of the FPGA's state |
| 74 | * @write_init: prepare the FPGA to receive confuration data | 89 | * @write_init: prepare the FPGA to receive confuration data |
| 75 | * @write: write count bytes of configuration data to the FPGA | 90 | * @write: write count bytes of configuration data to the FPGA |
| @@ -81,11 +96,14 @@ enum fpga_mgr_states { | |||
| 81 | * called, so leaving them out is fine. | 96 | * called, so leaving them out is fine. |
| 82 | */ | 97 | */ |
| 83 | struct fpga_manager_ops { | 98 | struct fpga_manager_ops { |
| 99 | size_t initial_header_size; | ||
| 84 | enum fpga_mgr_states (*state)(struct fpga_manager *mgr); | 100 | enum fpga_mgr_states (*state)(struct fpga_manager *mgr); |
| 85 | int (*write_init)(struct fpga_manager *mgr, u32 flags, | 101 | int (*write_init)(struct fpga_manager *mgr, |
| 102 | struct fpga_image_info *info, | ||
| 86 | const char *buf, size_t count); | 103 | const char *buf, size_t count); |
| 87 | int (*write)(struct fpga_manager *mgr, const char *buf, size_t count); | 104 | int (*write)(struct fpga_manager *mgr, const char *buf, size_t count); |
| 88 | int (*write_complete)(struct fpga_manager *mgr, u32 flags); | 105 | int (*write_complete)(struct fpga_manager *mgr, |
| 106 | struct fpga_image_info *info); | ||
| 89 | void (*fpga_remove)(struct fpga_manager *mgr); | 107 | void (*fpga_remove)(struct fpga_manager *mgr); |
| 90 | }; | 108 | }; |
| 91 | 109 | ||
| @@ -109,14 +127,17 @@ struct fpga_manager { | |||
| 109 | 127 | ||
| 110 | #define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) | 128 | #define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) |
| 111 | 129 | ||
| 112 | int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, | 130 | int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info, |
| 113 | const char *buf, size_t count); | 131 | const char *buf, size_t count); |
| 114 | 132 | ||
| 115 | int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags, | 133 | int fpga_mgr_firmware_load(struct fpga_manager *mgr, |
| 134 | struct fpga_image_info *info, | ||
| 116 | const char *image_name); | 135 | const char *image_name); |
| 117 | 136 | ||
| 118 | struct fpga_manager *of_fpga_mgr_get(struct device_node *node); | 137 | struct fpga_manager *of_fpga_mgr_get(struct device_node *node); |
| 119 | 138 | ||
| 139 | struct fpga_manager *fpga_mgr_get(struct device *dev); | ||
| 140 | |||
| 120 | void fpga_mgr_put(struct fpga_manager *mgr); | 141 | void fpga_mgr_put(struct fpga_manager *mgr); |
| 121 | 142 | ||
| 122 | int fpga_mgr_register(struct device *dev, const char *name, | 143 | int fpga_mgr_register(struct device *dev, const char *name, |
diff --git a/include/linux/fs.h b/include/linux/fs.h index dc0478c07b2a..2ba074328894 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -28,7 +28,6 @@ | |||
| 28 | #include <linux/uidgid.h> | 28 | #include <linux/uidgid.h> |
| 29 | #include <linux/lockdep.h> | 29 | #include <linux/lockdep.h> |
| 30 | #include <linux/percpu-rwsem.h> | 30 | #include <linux/percpu-rwsem.h> |
| 31 | #include <linux/blk_types.h> | ||
| 32 | #include <linux/workqueue.h> | 31 | #include <linux/workqueue.h> |
| 33 | #include <linux/percpu-rwsem.h> | 32 | #include <linux/percpu-rwsem.h> |
| 34 | #include <linux/delayed_call.h> | 33 | #include <linux/delayed_call.h> |
| @@ -38,6 +37,7 @@ | |||
| 38 | 37 | ||
| 39 | struct backing_dev_info; | 38 | struct backing_dev_info; |
| 40 | struct bdi_writeback; | 39 | struct bdi_writeback; |
| 40 | struct bio; | ||
| 41 | struct export_operations; | 41 | struct export_operations; |
| 42 | struct hd_geometry; | 42 | struct hd_geometry; |
| 43 | struct iovec; | 43 | struct iovec; |
| @@ -152,58 +152,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | |||
| 152 | #define CHECK_IOVEC_ONLY -1 | 152 | #define CHECK_IOVEC_ONLY -1 |
| 153 | 153 | ||
| 154 | /* | 154 | /* |
| 155 | * The below are the various read and write flags that we support. Some of | ||
| 156 | * them include behavioral modifiers that send information down to the | ||
| 157 | * block layer and IO scheduler. They should be used along with a req_op. | ||
| 158 | * Terminology: | ||
| 159 | * | ||
| 160 | * The block layer uses device plugging to defer IO a little bit, in | ||
| 161 | * the hope that we will see more IO very shortly. This increases | ||
| 162 | * coalescing of adjacent IO and thus reduces the number of IOs we | ||
| 163 | * have to send to the device. It also allows for better queuing, | ||
| 164 | * if the IO isn't mergeable. If the caller is going to be waiting | ||
| 165 | * for the IO, then he must ensure that the device is unplugged so | ||
| 166 | * that the IO is dispatched to the driver. | ||
| 167 | * | ||
| 168 | * All IO is handled async in Linux. This is fine for background | ||
| 169 | * writes, but for reads or writes that someone waits for completion | ||
| 170 | * on, we want to notify the block layer and IO scheduler so that they | ||
| 171 | * know about it. That allows them to make better scheduling | ||
| 172 | * decisions. So when the below references 'sync' and 'async', it | ||
| 173 | * is referencing this priority hint. | ||
| 174 | * | ||
| 175 | * With that in mind, the available types are: | ||
| 176 | * | ||
| 177 | * READ A normal read operation. Device will be plugged. | ||
| 178 | * READ_SYNC A synchronous read. Device is not plugged, caller can | ||
| 179 | * immediately wait on this read without caring about | ||
| 180 | * unplugging. | ||
| 181 | * WRITE A normal async write. Device will be plugged. | ||
| 182 | * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down | ||
| 183 | * the hint that someone will be waiting on this IO | ||
| 184 | * shortly. The write equivalent of READ_SYNC. | ||
| 185 | * WRITE_ODIRECT Special case write for O_DIRECT only. | ||
| 186 | * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush. | ||
| 187 | * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on | ||
| 188 | * non-volatile media on completion. | ||
| 189 | * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded | ||
| 190 | * by a cache flush and data is guaranteed to be on | ||
| 191 | * non-volatile media on completion. | ||
| 192 | * | ||
| 193 | */ | ||
| 194 | #define RW_MASK REQ_OP_WRITE | ||
| 195 | |||
| 196 | #define READ REQ_OP_READ | ||
| 197 | #define WRITE REQ_OP_WRITE | ||
| 198 | |||
| 199 | #define READ_SYNC REQ_SYNC | ||
| 200 | #define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE) | ||
| 201 | #define WRITE_ODIRECT REQ_SYNC | ||
| 202 | #define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH) | ||
| 203 | #define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA) | ||
| 204 | #define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA) | ||
| 205 | |||
| 206 | /* | ||
| 207 | * Attribute flags. These should be or-ed together to figure out what | 155 | * Attribute flags. These should be or-ed together to figure out what |
| 208 | * has been changed! | 156 | * has been changed! |
| 209 | */ | 157 | */ |
| @@ -595,6 +543,7 @@ is_uncached_acl(struct posix_acl *acl) | |||
| 595 | #define IOP_LOOKUP 0x0002 | 543 | #define IOP_LOOKUP 0x0002 |
| 596 | #define IOP_NOFOLLOW 0x0004 | 544 | #define IOP_NOFOLLOW 0x0004 |
| 597 | #define IOP_XATTR 0x0008 | 545 | #define IOP_XATTR 0x0008 |
| 546 | #define IOP_DEFAULT_READLINK 0x0010 | ||
| 598 | 547 | ||
| 599 | /* | 548 | /* |
| 600 | * Keep mostly read-only and often accessed (especially for | 549 | * Keep mostly read-only and often accessed (especially for |
| @@ -1778,11 +1727,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *, | |||
| 1778 | unsigned long, loff_t *, int); | 1727 | unsigned long, loff_t *, int); |
| 1779 | extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, | 1728 | extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, |
| 1780 | loff_t, size_t, unsigned int); | 1729 | loff_t, size_t, unsigned int); |
| 1730 | extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in, | ||
| 1731 | struct inode *inode_out, loff_t pos_out, | ||
| 1732 | u64 *len, bool is_dedupe); | ||
| 1781 | extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in, | 1733 | extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in, |
| 1782 | struct file *file_out, loff_t pos_out, u64 len); | 1734 | struct file *file_out, loff_t pos_out, u64 len); |
| 1735 | extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, | ||
| 1736 | struct inode *dest, loff_t destoff, | ||
| 1737 | loff_t len, bool *is_same); | ||
| 1783 | extern int vfs_dedupe_file_range(struct file *file, | 1738 | extern int vfs_dedupe_file_range(struct file *file, |
| 1784 | struct file_dedupe_range *same); | 1739 | struct file_dedupe_range *same); |
| 1785 | 1740 | ||
| 1741 | static inline int do_clone_file_range(struct file *file_in, loff_t pos_in, | ||
| 1742 | struct file *file_out, loff_t pos_out, | ||
| 1743 | u64 len) | ||
| 1744 | { | ||
| 1745 | int ret; | ||
| 1746 | |||
| 1747 | sb_start_write(file_inode(file_out)->i_sb); | ||
| 1748 | ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len); | ||
| 1749 | sb_end_write(file_inode(file_out)->i_sb); | ||
| 1750 | |||
| 1751 | return ret; | ||
| 1752 | } | ||
| 1753 | |||
| 1786 | struct super_operations { | 1754 | struct super_operations { |
| 1787 | struct inode *(*alloc_inode)(struct super_block *sb); | 1755 | struct inode *(*alloc_inode)(struct super_block *sb); |
| 1788 | void (*destroy_inode)(struct inode *); | 1756 | void (*destroy_inode)(struct inode *); |
| @@ -2123,11 +2091,11 @@ extern int may_umount_tree(struct vfsmount *); | |||
| 2123 | extern int may_umount(struct vfsmount *); | 2091 | extern int may_umount(struct vfsmount *); |
| 2124 | extern long do_mount(const char *, const char __user *, | 2092 | extern long do_mount(const char *, const char __user *, |
| 2125 | const char *, unsigned long, void *); | 2093 | const char *, unsigned long, void *); |
| 2126 | extern struct vfsmount *collect_mounts(struct path *); | 2094 | extern struct vfsmount *collect_mounts(const struct path *); |
| 2127 | extern void drop_collected_mounts(struct vfsmount *); | 2095 | extern void drop_collected_mounts(struct vfsmount *); |
| 2128 | extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, | 2096 | extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, |
| 2129 | struct vfsmount *); | 2097 | struct vfsmount *); |
| 2130 | extern int vfs_statfs(struct path *, struct kstatfs *); | 2098 | extern int vfs_statfs(const struct path *, struct kstatfs *); |
| 2131 | extern int user_statfs(const char __user *, struct kstatfs *); | 2099 | extern int user_statfs(const char __user *, struct kstatfs *); |
| 2132 | extern int fd_statfs(int, struct kstatfs *); | 2100 | extern int fd_statfs(int, struct kstatfs *); |
| 2133 | extern int vfs_ustat(dev_t, struct kstatfs *); | 2101 | extern int vfs_ustat(dev_t, struct kstatfs *); |
| @@ -2499,19 +2467,6 @@ extern void make_bad_inode(struct inode *); | |||
| 2499 | extern bool is_bad_inode(struct inode *); | 2467 | extern bool is_bad_inode(struct inode *); |
| 2500 | 2468 | ||
| 2501 | #ifdef CONFIG_BLOCK | 2469 | #ifdef CONFIG_BLOCK |
| 2502 | static inline bool op_is_write(unsigned int op) | ||
| 2503 | { | ||
| 2504 | return op == REQ_OP_READ ? false : true; | ||
| 2505 | } | ||
| 2506 | |||
| 2507 | /* | ||
| 2508 | * return data direction, READ or WRITE | ||
| 2509 | */ | ||
| 2510 | static inline int bio_data_dir(struct bio *bio) | ||
| 2511 | { | ||
| 2512 | return op_is_write(bio_op(bio)) ? WRITE : READ; | ||
| 2513 | } | ||
| 2514 | |||
| 2515 | extern void check_disk_size_change(struct gendisk *disk, | 2470 | extern void check_disk_size_change(struct gendisk *disk, |
| 2516 | struct block_device *bdev); | 2471 | struct block_device *bdev); |
| 2517 | extern int revalidate_disk(struct gendisk *); | 2472 | extern int revalidate_disk(struct gendisk *); |
| @@ -2709,7 +2664,7 @@ extern struct file * open_exec(const char *); | |||
| 2709 | 2664 | ||
| 2710 | /* fs/dcache.c -- generic fs support functions */ | 2665 | /* fs/dcache.c -- generic fs support functions */ |
| 2711 | extern bool is_subdir(struct dentry *, struct dentry *); | 2666 | extern bool is_subdir(struct dentry *, struct dentry *); |
| 2712 | extern bool path_is_under(struct path *, struct path *); | 2667 | extern bool path_is_under(const struct path *, const struct path *); |
| 2713 | 2668 | ||
| 2714 | extern char *file_path(struct file *, char *, int); | 2669 | extern char *file_path(struct file *, char *, int); |
| 2715 | 2670 | ||
| @@ -2782,7 +2737,6 @@ static inline void remove_inode_hash(struct inode *inode) | |||
| 2782 | extern void inode_sb_list_add(struct inode *inode); | 2737 | extern void inode_sb_list_add(struct inode *inode); |
| 2783 | 2738 | ||
| 2784 | #ifdef CONFIG_BLOCK | 2739 | #ifdef CONFIG_BLOCK |
| 2785 | extern blk_qc_t submit_bio(struct bio *); | ||
| 2786 | extern int bdev_read_only(struct block_device *); | 2740 | extern int bdev_read_only(struct block_device *); |
| 2787 | #endif | 2741 | #endif |
| 2788 | extern int set_blocksize(struct block_device *, int); | 2742 | extern int set_blocksize(struct block_device *, int); |
| @@ -2914,7 +2868,6 @@ extern int __page_symlink(struct inode *inode, const char *symname, int len, | |||
| 2914 | extern int page_symlink(struct inode *inode, const char *symname, int len); | 2868 | extern int page_symlink(struct inode *inode, const char *symname, int len); |
| 2915 | extern const struct inode_operations page_symlink_inode_operations; | 2869 | extern const struct inode_operations page_symlink_inode_operations; |
| 2916 | extern void kfree_link(void *); | 2870 | extern void kfree_link(void *); |
| 2917 | extern int generic_readlink(struct dentry *, char __user *, int); | ||
| 2918 | extern void generic_fillattr(struct inode *, struct kstat *); | 2871 | extern void generic_fillattr(struct inode *, struct kstat *); |
| 2919 | int vfs_getattr_nosec(struct path *path, struct kstat *stat); | 2872 | int vfs_getattr_nosec(struct path *path, struct kstat *stat); |
| 2920 | extern int vfs_getattr(struct path *, struct kstat *); | 2873 | extern int vfs_getattr(struct path *, struct kstat *); |
| @@ -2935,6 +2888,7 @@ extern int vfs_lstat(const char __user *, struct kstat *); | |||
| 2935 | extern int vfs_fstat(unsigned int, struct kstat *); | 2888 | extern int vfs_fstat(unsigned int, struct kstat *); |
| 2936 | extern int vfs_fstatat(int , const char __user *, struct kstat *, int); | 2889 | extern int vfs_fstatat(int , const char __user *, struct kstat *, int); |
| 2937 | extern const char *vfs_get_link(struct dentry *, struct delayed_call *); | 2890 | extern const char *vfs_get_link(struct dentry *, struct delayed_call *); |
| 2891 | extern int vfs_readlink(struct dentry *, char __user *, int); | ||
| 2938 | 2892 | ||
| 2939 | extern int __generic_block_fiemap(struct inode *inode, | 2893 | extern int __generic_block_fiemap(struct inode *inode, |
| 2940 | struct fiemap_extent_info *fieinfo, | 2894 | struct fiemap_extent_info *fieinfo, |
| @@ -2949,8 +2903,10 @@ extern void put_filesystem(struct file_system_type *fs); | |||
| 2949 | extern struct file_system_type *get_fs_type(const char *name); | 2903 | extern struct file_system_type *get_fs_type(const char *name); |
| 2950 | extern struct super_block *get_super(struct block_device *); | 2904 | extern struct super_block *get_super(struct block_device *); |
| 2951 | extern struct super_block *get_super_thawed(struct block_device *); | 2905 | extern struct super_block *get_super_thawed(struct block_device *); |
| 2906 | extern struct super_block *get_super_exclusive_thawed(struct block_device *bdev); | ||
| 2952 | extern struct super_block *get_active_super(struct block_device *bdev); | 2907 | extern struct super_block *get_active_super(struct block_device *bdev); |
| 2953 | extern void drop_super(struct super_block *sb); | 2908 | extern void drop_super(struct super_block *sb); |
| 2909 | extern void drop_super_exclusive(struct super_block *sb); | ||
| 2954 | extern void iterate_supers(void (*)(struct super_block *, void *), void *); | 2910 | extern void iterate_supers(void (*)(struct super_block *, void *), void *); |
| 2955 | extern void iterate_supers_type(struct file_system_type *, | 2911 | extern void iterate_supers_type(struct file_system_type *, |
| 2956 | void (*)(struct super_block *, void *), void *); | 2912 | void (*)(struct super_block *, void *), void *); |
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h index ff8b11b26f31..c074b670aa99 100644 --- a/include/linux/fscrypto.h +++ b/include/linux/fscrypto.h | |||
| @@ -18,73 +18,9 @@ | |||
| 18 | #include <crypto/skcipher.h> | 18 | #include <crypto/skcipher.h> |
| 19 | #include <uapi/linux/fs.h> | 19 | #include <uapi/linux/fs.h> |
| 20 | 20 | ||
| 21 | #define FS_KEY_DERIVATION_NONCE_SIZE 16 | 21 | #define FS_CRYPTO_BLOCK_SIZE 16 |
| 22 | #define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1 | ||
| 23 | |||
| 24 | #define FS_POLICY_FLAGS_PAD_4 0x00 | ||
| 25 | #define FS_POLICY_FLAGS_PAD_8 0x01 | ||
| 26 | #define FS_POLICY_FLAGS_PAD_16 0x02 | ||
| 27 | #define FS_POLICY_FLAGS_PAD_32 0x03 | ||
| 28 | #define FS_POLICY_FLAGS_PAD_MASK 0x03 | ||
| 29 | #define FS_POLICY_FLAGS_VALID 0x03 | ||
| 30 | |||
| 31 | /* Encryption algorithms */ | ||
| 32 | #define FS_ENCRYPTION_MODE_INVALID 0 | ||
| 33 | #define FS_ENCRYPTION_MODE_AES_256_XTS 1 | ||
| 34 | #define FS_ENCRYPTION_MODE_AES_256_GCM 2 | ||
| 35 | #define FS_ENCRYPTION_MODE_AES_256_CBC 3 | ||
| 36 | #define FS_ENCRYPTION_MODE_AES_256_CTS 4 | ||
| 37 | |||
| 38 | /** | ||
| 39 | * Encryption context for inode | ||
| 40 | * | ||
| 41 | * Protector format: | ||
| 42 | * 1 byte: Protector format (1 = this version) | ||
| 43 | * 1 byte: File contents encryption mode | ||
| 44 | * 1 byte: File names encryption mode | ||
| 45 | * 1 byte: Flags | ||
| 46 | * 8 bytes: Master Key descriptor | ||
| 47 | * 16 bytes: Encryption Key derivation nonce | ||
| 48 | */ | ||
| 49 | struct fscrypt_context { | ||
| 50 | u8 format; | ||
| 51 | u8 contents_encryption_mode; | ||
| 52 | u8 filenames_encryption_mode; | ||
| 53 | u8 flags; | ||
| 54 | u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; | ||
| 55 | u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; | ||
| 56 | } __packed; | ||
| 57 | |||
| 58 | /* Encryption parameters */ | ||
| 59 | #define FS_XTS_TWEAK_SIZE 16 | ||
| 60 | #define FS_AES_128_ECB_KEY_SIZE 16 | ||
| 61 | #define FS_AES_256_GCM_KEY_SIZE 32 | ||
| 62 | #define FS_AES_256_CBC_KEY_SIZE 32 | ||
| 63 | #define FS_AES_256_CTS_KEY_SIZE 32 | ||
| 64 | #define FS_AES_256_XTS_KEY_SIZE 64 | ||
| 65 | #define FS_MAX_KEY_SIZE 64 | ||
| 66 | |||
| 67 | #define FS_KEY_DESC_PREFIX "fscrypt:" | ||
| 68 | #define FS_KEY_DESC_PREFIX_SIZE 8 | ||
| 69 | |||
| 70 | /* This is passed in from userspace into the kernel keyring */ | ||
| 71 | struct fscrypt_key { | ||
| 72 | u32 mode; | ||
| 73 | u8 raw[FS_MAX_KEY_SIZE]; | ||
| 74 | u32 size; | ||
| 75 | } __packed; | ||
| 76 | |||
| 77 | struct fscrypt_info { | ||
| 78 | u8 ci_data_mode; | ||
| 79 | u8 ci_filename_mode; | ||
| 80 | u8 ci_flags; | ||
| 81 | struct crypto_skcipher *ci_ctfm; | ||
| 82 | struct key *ci_keyring_key; | ||
| 83 | u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; | ||
| 84 | }; | ||
| 85 | 22 | ||
| 86 | #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 | 23 | struct fscrypt_info; |
| 87 | #define FS_WRITE_PATH_FL 0x00000002 | ||
| 88 | 24 | ||
| 89 | struct fscrypt_ctx { | 25 | struct fscrypt_ctx { |
| 90 | union { | 26 | union { |
| @@ -102,19 +38,6 @@ struct fscrypt_ctx { | |||
| 102 | u8 mode; /* Encryption mode for tfm */ | 38 | u8 mode; /* Encryption mode for tfm */ |
| 103 | }; | 39 | }; |
| 104 | 40 | ||
| 105 | struct fscrypt_completion_result { | ||
| 106 | struct completion completion; | ||
| 107 | int res; | ||
| 108 | }; | ||
| 109 | |||
| 110 | #define DECLARE_FS_COMPLETION_RESULT(ecr) \ | ||
| 111 | struct fscrypt_completion_result ecr = { \ | ||
| 112 | COMPLETION_INITIALIZER((ecr).completion), 0 } | ||
| 113 | |||
| 114 | #define FS_FNAME_NUM_SCATTER_ENTRIES 4 | ||
| 115 | #define FS_CRYPTO_BLOCK_SIZE 16 | ||
| 116 | #define FS_FNAME_CRYPTO_DIGEST_SIZE 32 | ||
| 117 | |||
| 118 | /** | 41 | /** |
| 119 | * For encrypted symlinks, the ciphertext length is stored at the beginning | 42 | * For encrypted symlinks, the ciphertext length is stored at the beginning |
| 120 | * of the string in little-endian format. | 43 | * of the string in little-endian format. |
| @@ -154,9 +77,15 @@ struct fscrypt_name { | |||
| 154 | #define fname_len(p) ((p)->disk_name.len) | 77 | #define fname_len(p) ((p)->disk_name.len) |
| 155 | 78 | ||
| 156 | /* | 79 | /* |
| 80 | * fscrypt superblock flags | ||
| 81 | */ | ||
| 82 | #define FS_CFLG_OWN_PAGES (1U << 1) | ||
| 83 | |||
| 84 | /* | ||
| 157 | * crypto opertions for filesystems | 85 | * crypto opertions for filesystems |
| 158 | */ | 86 | */ |
| 159 | struct fscrypt_operations { | 87 | struct fscrypt_operations { |
| 88 | unsigned int flags; | ||
| 160 | int (*get_context)(struct inode *, void *, size_t); | 89 | int (*get_context)(struct inode *, void *, size_t); |
| 161 | int (*key_prefix)(struct inode *, u8 **); | 90 | int (*key_prefix)(struct inode *, u8 **); |
| 162 | int (*prepare_context)(struct inode *); | 91 | int (*prepare_context)(struct inode *); |
| @@ -206,7 +135,7 @@ static inline struct page *fscrypt_control_page(struct page *page) | |||
| 206 | #endif | 135 | #endif |
| 207 | } | 136 | } |
| 208 | 137 | ||
| 209 | static inline int fscrypt_has_encryption_key(struct inode *inode) | 138 | static inline int fscrypt_has_encryption_key(const struct inode *inode) |
| 210 | { | 139 | { |
| 211 | #if IS_ENABLED(CONFIG_FS_ENCRYPTION) | 140 | #if IS_ENABLED(CONFIG_FS_ENCRYPTION) |
| 212 | return (inode->i_crypt_info != NULL); | 141 | return (inode->i_crypt_info != NULL); |
| @@ -238,25 +167,25 @@ static inline void fscrypt_set_d_op(struct dentry *dentry) | |||
| 238 | #if IS_ENABLED(CONFIG_FS_ENCRYPTION) | 167 | #if IS_ENABLED(CONFIG_FS_ENCRYPTION) |
| 239 | /* crypto.c */ | 168 | /* crypto.c */ |
| 240 | extern struct kmem_cache *fscrypt_info_cachep; | 169 | extern struct kmem_cache *fscrypt_info_cachep; |
| 241 | int fscrypt_initialize(void); | 170 | extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t); |
| 242 | |||
| 243 | extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t); | ||
| 244 | extern void fscrypt_release_ctx(struct fscrypt_ctx *); | 171 | extern void fscrypt_release_ctx(struct fscrypt_ctx *); |
| 245 | extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t); | 172 | extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, |
| 246 | extern int fscrypt_decrypt_page(struct page *); | 173 | unsigned int, unsigned int, |
| 174 | u64, gfp_t); | ||
| 175 | extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, | ||
| 176 | unsigned int, u64); | ||
| 247 | extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *); | 177 | extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *); |
| 248 | extern void fscrypt_pullback_bio_page(struct page **, bool); | 178 | extern void fscrypt_pullback_bio_page(struct page **, bool); |
| 249 | extern void fscrypt_restore_control_page(struct page *); | 179 | extern void fscrypt_restore_control_page(struct page *); |
| 250 | extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t, | 180 | extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, |
| 251 | unsigned int); | 181 | unsigned int); |
| 252 | /* policy.c */ | 182 | /* policy.c */ |
| 253 | extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *); | 183 | extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); |
| 254 | extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *); | 184 | extern int fscrypt_ioctl_get_policy(struct file *, void __user *); |
| 255 | extern int fscrypt_has_permitted_context(struct inode *, struct inode *); | 185 | extern int fscrypt_has_permitted_context(struct inode *, struct inode *); |
| 256 | extern int fscrypt_inherit_context(struct inode *, struct inode *, | 186 | extern int fscrypt_inherit_context(struct inode *, struct inode *, |
| 257 | void *, bool); | 187 | void *, bool); |
| 258 | /* keyinfo.c */ | 188 | /* keyinfo.c */ |
| 259 | extern int get_crypt_info(struct inode *); | ||
| 260 | extern int fscrypt_get_encryption_info(struct inode *); | 189 | extern int fscrypt_get_encryption_info(struct inode *); |
| 261 | extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *); | 190 | extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *); |
| 262 | 191 | ||
| @@ -264,8 +193,8 @@ extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *); | |||
| 264 | extern int fscrypt_setup_filename(struct inode *, const struct qstr *, | 193 | extern int fscrypt_setup_filename(struct inode *, const struct qstr *, |
| 265 | int lookup, struct fscrypt_name *); | 194 | int lookup, struct fscrypt_name *); |
| 266 | extern void fscrypt_free_filename(struct fscrypt_name *); | 195 | extern void fscrypt_free_filename(struct fscrypt_name *); |
| 267 | extern u32 fscrypt_fname_encrypted_size(struct inode *, u32); | 196 | extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32); |
| 268 | extern int fscrypt_fname_alloc_buffer(struct inode *, u32, | 197 | extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, |
| 269 | struct fscrypt_str *); | 198 | struct fscrypt_str *); |
| 270 | extern void fscrypt_fname_free_buffer(struct fscrypt_str *); | 199 | extern void fscrypt_fname_free_buffer(struct fscrypt_str *); |
| 271 | extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, | 200 | extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, |
| @@ -275,7 +204,7 @@ extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *, | |||
| 275 | #endif | 204 | #endif |
| 276 | 205 | ||
| 277 | /* crypto.c */ | 206 | /* crypto.c */ |
| 278 | static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i, | 207 | static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(const struct inode *i, |
| 279 | gfp_t f) | 208 | gfp_t f) |
| 280 | { | 209 | { |
| 281 | return ERR_PTR(-EOPNOTSUPP); | 210 | return ERR_PTR(-EOPNOTSUPP); |
| @@ -286,13 +215,18 @@ static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c) | |||
| 286 | return; | 215 | return; |
| 287 | } | 216 | } |
| 288 | 217 | ||
| 289 | static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i, | 218 | static inline struct page *fscrypt_notsupp_encrypt_page(const struct inode *i, |
| 290 | struct page *p, gfp_t f) | 219 | struct page *p, |
| 220 | unsigned int len, | ||
| 221 | unsigned int offs, | ||
| 222 | u64 lblk_num, gfp_t f) | ||
| 291 | { | 223 | { |
| 292 | return ERR_PTR(-EOPNOTSUPP); | 224 | return ERR_PTR(-EOPNOTSUPP); |
| 293 | } | 225 | } |
| 294 | 226 | ||
| 295 | static inline int fscrypt_notsupp_decrypt_page(struct page *p) | 227 | static inline int fscrypt_notsupp_decrypt_page(const struct inode *i, struct page *p, |
| 228 | unsigned int len, unsigned int offs, | ||
| 229 | u64 lblk_num) | ||
| 296 | { | 230 | { |
| 297 | return -EOPNOTSUPP; | 231 | return -EOPNOTSUPP; |
| 298 | } | 232 | } |
| @@ -313,21 +247,21 @@ static inline void fscrypt_notsupp_restore_control_page(struct page *p) | |||
| 313 | return; | 247 | return; |
| 314 | } | 248 | } |
| 315 | 249 | ||
| 316 | static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p, | 250 | static inline int fscrypt_notsupp_zeroout_range(const struct inode *i, pgoff_t p, |
| 317 | sector_t s, unsigned int f) | 251 | sector_t s, unsigned int f) |
| 318 | { | 252 | { |
| 319 | return -EOPNOTSUPP; | 253 | return -EOPNOTSUPP; |
| 320 | } | 254 | } |
| 321 | 255 | ||
| 322 | /* policy.c */ | 256 | /* policy.c */ |
| 323 | static inline int fscrypt_notsupp_process_policy(struct file *f, | 257 | static inline int fscrypt_notsupp_ioctl_set_policy(struct file *f, |
| 324 | const struct fscrypt_policy *p) | 258 | const void __user *arg) |
| 325 | { | 259 | { |
| 326 | return -EOPNOTSUPP; | 260 | return -EOPNOTSUPP; |
| 327 | } | 261 | } |
| 328 | 262 | ||
| 329 | static inline int fscrypt_notsupp_get_policy(struct inode *i, | 263 | static inline int fscrypt_notsupp_ioctl_get_policy(struct file *f, |
| 330 | struct fscrypt_policy *p) | 264 | void __user *arg) |
| 331 | { | 265 | { |
| 332 | return -EOPNOTSUPP; | 266 | return -EOPNOTSUPP; |
| 333 | } | 267 | } |
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h index 649e9171a9b3..3efa3b861d44 100644 --- a/include/linux/fsl/guts.h +++ b/include/linux/fsl/guts.h | |||
| @@ -29,83 +29,112 @@ | |||
| 29 | * #ifdefs. | 29 | * #ifdefs. |
| 30 | */ | 30 | */ |
| 31 | struct ccsr_guts { | 31 | struct ccsr_guts { |
| 32 | __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ | 32 | u32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ |
| 33 | __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ | 33 | u32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ |
| 34 | __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ | 34 | u32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and |
| 35 | __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ | 35 | * Control Register |
| 36 | __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ | 36 | */ |
| 37 | __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ | 37 | u32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ |
| 38 | u32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ | ||
| 39 | u32 pordevsr2; /* 0x.0014 - POR device status register 2 */ | ||
| 38 | u8 res018[0x20 - 0x18]; | 40 | u8 res018[0x20 - 0x18]; |
| 39 | __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ | 41 | u32 porcir; /* 0x.0020 - POR Configuration Information |
| 42 | * Register | ||
| 43 | */ | ||
| 40 | u8 res024[0x30 - 0x24]; | 44 | u8 res024[0x30 - 0x24]; |
| 41 | __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ | 45 | u32 gpiocr; /* 0x.0030 - GPIO Control Register */ |
| 42 | u8 res034[0x40 - 0x34]; | 46 | u8 res034[0x40 - 0x34]; |
| 43 | __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ | 47 | u32 gpoutdr; /* 0x.0040 - General-Purpose Output Data |
| 48 | * Register | ||
| 49 | */ | ||
| 44 | u8 res044[0x50 - 0x44]; | 50 | u8 res044[0x50 - 0x44]; |
| 45 | __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ | 51 | u32 gpindr; /* 0x.0050 - General-Purpose Input Data |
| 52 | * Register | ||
| 53 | */ | ||
| 46 | u8 res054[0x60 - 0x54]; | 54 | u8 res054[0x60 - 0x54]; |
| 47 | __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ | 55 | u32 pmuxcr; /* 0x.0060 - Alternate Function Signal |
| 48 | __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ | 56 | * Multiplex Control |
| 49 | __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ | 57 | */ |
| 58 | u32 pmuxcr2; /* 0x.0064 - Alternate function signal | ||
| 59 | * multiplex control 2 | ||
| 60 | */ | ||
| 61 | u32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ | ||
| 50 | u8 res06c[0x70 - 0x6c]; | 62 | u8 res06c[0x70 - 0x6c]; |
| 51 | __be32 devdisr; /* 0x.0070 - Device Disable Control */ | 63 | u32 devdisr; /* 0x.0070 - Device Disable Control */ |
| 52 | #define CCSR_GUTS_DEVDISR_TB1 0x00001000 | 64 | #define CCSR_GUTS_DEVDISR_TB1 0x00001000 |
| 53 | #define CCSR_GUTS_DEVDISR_TB0 0x00004000 | 65 | #define CCSR_GUTS_DEVDISR_TB0 0x00004000 |
| 54 | __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ | 66 | u32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ |
| 55 | u8 res078[0x7c - 0x78]; | 67 | u8 res078[0x7c - 0x78]; |
| 56 | __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ | 68 | u32 pmjcr; /* 0x.007c - 4 Power Management Jog Control |
| 57 | __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ | 69 | * Register |
| 58 | __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ | 70 | */ |
| 59 | __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ | 71 | u32 powmgtcsr; /* 0x.0080 - Power Management Status and |
| 60 | __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ | 72 | * Control Register |
| 61 | __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ | 73 | */ |
| 62 | __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ | 74 | u32 pmrccr; /* 0x.0084 - Power Management Reset Counter |
| 63 | __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ | 75 | * Configuration Register |
| 64 | __be32 autorstsr; /* 0x.009c - Automatic reset status register */ | 76 | */ |
| 65 | __be32 pvr; /* 0x.00a0 - Processor Version Register */ | 77 | u32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter |
| 66 | __be32 svr; /* 0x.00a4 - System Version Register */ | 78 | * Configuration Register |
| 79 | */ | ||
| 80 | u32 pmcdr; /* 0x.008c - 4Power management clock disable | ||
| 81 | * register | ||
| 82 | */ | ||
| 83 | u32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ | ||
| 84 | u32 rstrscr; /* 0x.0094 - Reset Request Status and | ||
| 85 | * Control Register | ||
| 86 | */ | ||
| 87 | u32 ectrstcr; /* 0x.0098 - Exception reset control register */ | ||
| 88 | u32 autorstsr; /* 0x.009c - Automatic reset status register */ | ||
| 89 | u32 pvr; /* 0x.00a0 - Processor Version Register */ | ||
| 90 | u32 svr; /* 0x.00a4 - System Version Register */ | ||
| 67 | u8 res0a8[0xb0 - 0xa8]; | 91 | u8 res0a8[0xb0 - 0xa8]; |
| 68 | __be32 rstcr; /* 0x.00b0 - Reset Control Register */ | 92 | u32 rstcr; /* 0x.00b0 - Reset Control Register */ |
| 69 | u8 res0b4[0xc0 - 0xb4]; | 93 | u8 res0b4[0xc0 - 0xb4]; |
| 70 | __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register | 94 | u32 iovselsr; /* 0x.00c0 - I/O voltage select status register |
| 71 | Called 'elbcvselcr' on 86xx SOCs */ | 95 | Called 'elbcvselcr' on 86xx SOCs */ |
| 72 | u8 res0c4[0x100 - 0xc4]; | 96 | u8 res0c4[0x100 - 0xc4]; |
| 73 | __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers | 97 | u32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers |
| 74 | There are 16 registers */ | 98 | There are 16 registers */ |
| 75 | u8 res140[0x224 - 0x140]; | 99 | u8 res140[0x224 - 0x140]; |
| 76 | __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ | 100 | u32 iodelay1; /* 0x.0224 - IO delay control register 1 */ |
| 77 | __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ | 101 | u32 iodelay2; /* 0x.0228 - IO delay control register 2 */ |
| 78 | u8 res22c[0x604 - 0x22c]; | 102 | u8 res22c[0x604 - 0x22c]; |
| 79 | __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ | 103 | u32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ |
| 80 | u8 res608[0x800 - 0x608]; | 104 | u8 res608[0x800 - 0x608]; |
| 81 | __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ | 105 | u32 clkdvdr; /* 0x.0800 - Clock Divide Register */ |
| 82 | u8 res804[0x900 - 0x804]; | 106 | u8 res804[0x900 - 0x804]; |
| 83 | __be32 ircr; /* 0x.0900 - Infrared Control Register */ | 107 | u32 ircr; /* 0x.0900 - Infrared Control Register */ |
| 84 | u8 res904[0x908 - 0x904]; | 108 | u8 res904[0x908 - 0x904]; |
| 85 | __be32 dmacr; /* 0x.0908 - DMA Control Register */ | 109 | u32 dmacr; /* 0x.0908 - DMA Control Register */ |
| 86 | u8 res90c[0x914 - 0x90c]; | 110 | u8 res90c[0x914 - 0x90c]; |
| 87 | __be32 elbccr; /* 0x.0914 - eLBC Control Register */ | 111 | u32 elbccr; /* 0x.0914 - eLBC Control Register */ |
| 88 | u8 res918[0xb20 - 0x918]; | 112 | u8 res918[0xb20 - 0x918]; |
| 89 | __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ | 113 | u32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ |
| 90 | __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ | 114 | u32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ |
| 91 | __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ | 115 | u32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ |
| 92 | u8 resb2c[0xe00 - 0xb2c]; | 116 | u8 resb2c[0xe00 - 0xb2c]; |
| 93 | __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ | 117 | u32 clkocr; /* 0x.0e00 - Clock Out Select Register */ |
| 94 | u8 rese04[0xe10 - 0xe04]; | 118 | u8 rese04[0xe10 - 0xe04]; |
| 95 | __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ | 119 | u32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ |
| 96 | u8 rese14[0xe20 - 0xe14]; | 120 | u8 rese14[0xe20 - 0xe14]; |
| 97 | __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ | 121 | u32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ |
| 98 | __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ | 122 | u32 cpfor; /* 0x.0e24 - L2 charge pump fuse override |
| 123 | * register | ||
| 124 | */ | ||
| 99 | u8 rese28[0xf04 - 0xe28]; | 125 | u8 rese28[0xf04 - 0xe28]; |
| 100 | __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ | 126 | u32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ |
| 101 | __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ | 127 | u32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ |
| 102 | u8 resf0c[0xf2c - 0xf0c]; | 128 | u8 resf0c[0xf2c - 0xf0c]; |
| 103 | __be32 itcr; /* 0x.0f2c - Internal transaction control register */ | 129 | u32 itcr; /* 0x.0f2c - Internal transaction control |
| 130 | * register | ||
| 131 | */ | ||
| 104 | u8 resf30[0xf40 - 0xf30]; | 132 | u8 resf30[0xf40 - 0xf30]; |
| 105 | __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ | 133 | u32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ |
| 106 | __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ | 134 | u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ |
| 107 | } __attribute__ ((packed)); | 135 | } __attribute__ ((packed)); |
| 108 | 136 | ||
| 137 | u32 fsl_guts_get_svr(void); | ||
| 109 | 138 | ||
| 110 | /* Alternate function signal multiplex control */ | 139 | /* Alternate function signal multiplex control */ |
| 111 | #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) | 140 | #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) |
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index f2912914141a..60cef8227534 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h | |||
| @@ -100,6 +100,7 @@ struct fsl_usb2_platform_data { | |||
| 100 | unsigned already_suspended:1; | 100 | unsigned already_suspended:1; |
| 101 | unsigned has_fsl_erratum_a007792:1; | 101 | unsigned has_fsl_erratum_a007792:1; |
| 102 | unsigned has_fsl_erratum_a005275:1; | 102 | unsigned has_fsl_erratum_a005275:1; |
| 103 | unsigned has_fsl_erratum_a005697:1; | ||
| 103 | unsigned check_phy_clk_valid:1; | 104 | unsigned check_phy_clk_valid:1; |
| 104 | 105 | ||
| 105 | /* register save area for suspend/resume */ | 106 | /* register save area for suspend/resume */ |
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index b8bcc058e031..b43d3f5bd9ea 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | #include <linux/bug.h> | 17 | #include <linux/bug.h> |
| 18 | 18 | ||
| 19 | /* Notify this dentry's parent about a child's events. */ | 19 | /* Notify this dentry's parent about a child's events. */ |
| 20 | static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask) | 20 | static inline int fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask) |
| 21 | { | 21 | { |
| 22 | if (!dentry) | 22 | if (!dentry) |
| 23 | dentry = path->dentry; | 23 | dentry = path->dentry; |
| @@ -28,7 +28,7 @@ static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u3 | |||
| 28 | /* simple call site for access decisions */ | 28 | /* simple call site for access decisions */ |
| 29 | static inline int fsnotify_perm(struct file *file, int mask) | 29 | static inline int fsnotify_perm(struct file *file, int mask) |
| 30 | { | 30 | { |
| 31 | struct path *path = &file->f_path; | 31 | const struct path *path = &file->f_path; |
| 32 | /* | 32 | /* |
| 33 | * Do not use file_inode() here or anywhere in this file to get the | 33 | * Do not use file_inode() here or anywhere in this file to get the |
| 34 | * inode. That would break *notity on overlayfs. | 34 | * inode. That would break *notity on overlayfs. |
| @@ -176,7 +176,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) | |||
| 176 | */ | 176 | */ |
| 177 | static inline void fsnotify_access(struct file *file) | 177 | static inline void fsnotify_access(struct file *file) |
| 178 | { | 178 | { |
| 179 | struct path *path = &file->f_path; | 179 | const struct path *path = &file->f_path; |
| 180 | struct inode *inode = path->dentry->d_inode; | 180 | struct inode *inode = path->dentry->d_inode; |
| 181 | __u32 mask = FS_ACCESS; | 181 | __u32 mask = FS_ACCESS; |
| 182 | 182 | ||
| @@ -194,7 +194,7 @@ static inline void fsnotify_access(struct file *file) | |||
| 194 | */ | 194 | */ |
| 195 | static inline void fsnotify_modify(struct file *file) | 195 | static inline void fsnotify_modify(struct file *file) |
| 196 | { | 196 | { |
| 197 | struct path *path = &file->f_path; | 197 | const struct path *path = &file->f_path; |
| 198 | struct inode *inode = path->dentry->d_inode; | 198 | struct inode *inode = path->dentry->d_inode; |
| 199 | __u32 mask = FS_MODIFY; | 199 | __u32 mask = FS_MODIFY; |
| 200 | 200 | ||
| @@ -212,7 +212,7 @@ static inline void fsnotify_modify(struct file *file) | |||
| 212 | */ | 212 | */ |
| 213 | static inline void fsnotify_open(struct file *file) | 213 | static inline void fsnotify_open(struct file *file) |
| 214 | { | 214 | { |
| 215 | struct path *path = &file->f_path; | 215 | const struct path *path = &file->f_path; |
| 216 | struct inode *inode = path->dentry->d_inode; | 216 | struct inode *inode = path->dentry->d_inode; |
| 217 | __u32 mask = FS_OPEN; | 217 | __u32 mask = FS_OPEN; |
| 218 | 218 | ||
| @@ -228,7 +228,7 @@ static inline void fsnotify_open(struct file *file) | |||
| 228 | */ | 228 | */ |
| 229 | static inline void fsnotify_close(struct file *file) | 229 | static inline void fsnotify_close(struct file *file) |
| 230 | { | 230 | { |
| 231 | struct path *path = &file->f_path; | 231 | const struct path *path = &file->f_path; |
| 232 | struct inode *inode = path->dentry->d_inode; | 232 | struct inode *inode = path->dentry->d_inode; |
| 233 | fmode_t mode = file->f_mode; | 233 | fmode_t mode = file->f_mode; |
| 234 | __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE; | 234 | __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE; |
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 79467b239fcf..0cf34d6cc253 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h | |||
| @@ -96,7 +96,7 @@ struct fsnotify_ops { | |||
| 96 | struct inode *inode, | 96 | struct inode *inode, |
| 97 | struct fsnotify_mark *inode_mark, | 97 | struct fsnotify_mark *inode_mark, |
| 98 | struct fsnotify_mark *vfsmount_mark, | 98 | struct fsnotify_mark *vfsmount_mark, |
| 99 | u32 mask, void *data, int data_type, | 99 | u32 mask, const void *data, int data_type, |
| 100 | const unsigned char *file_name, u32 cookie); | 100 | const unsigned char *file_name, u32 cookie); |
| 101 | void (*free_group_priv)(struct fsnotify_group *group); | 101 | void (*free_group_priv)(struct fsnotify_group *group); |
| 102 | void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); | 102 | void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); |
| @@ -245,9 +245,9 @@ struct fsnotify_mark { | |||
| 245 | /* called from the vfs helpers */ | 245 | /* called from the vfs helpers */ |
| 246 | 246 | ||
| 247 | /* main fsnotify call to send events */ | 247 | /* main fsnotify call to send events */ |
| 248 | extern int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, | 248 | extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, |
| 249 | const unsigned char *name, u32 cookie); | 249 | const unsigned char *name, u32 cookie); |
| 250 | extern int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask); | 250 | extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask); |
| 251 | extern void __fsnotify_inode_delete(struct inode *inode); | 251 | extern void __fsnotify_inode_delete(struct inode *inode); |
| 252 | extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); | 252 | extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); |
| 253 | extern u32 fsnotify_get_cookie(void); | 253 | extern u32 fsnotify_get_cookie(void); |
| @@ -357,13 +357,13 @@ extern void fsnotify_init_event(struct fsnotify_event *event, | |||
| 357 | 357 | ||
| 358 | #else | 358 | #else |
| 359 | 359 | ||
| 360 | static inline int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, | 360 | static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, |
| 361 | const unsigned char *name, u32 cookie) | 361 | const unsigned char *name, u32 cookie) |
| 362 | { | 362 | { |
| 363 | return 0; | 363 | return 0; |
| 364 | } | 364 | } |
| 365 | 365 | ||
| 366 | static inline int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask) | 366 | static inline int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask) |
| 367 | { | 367 | { |
| 368 | return 0; | 368 | return 0; |
| 369 | } | 369 | } |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index b3d34d3e0e7e..3633e8beff39 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
| @@ -398,6 +398,7 @@ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, | |||
| 398 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset); | 398 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset); |
| 399 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); | 399 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); |
| 400 | void ftrace_free_filter(struct ftrace_ops *ops); | 400 | void ftrace_free_filter(struct ftrace_ops *ops); |
| 401 | void ftrace_ops_set_global_filter(struct ftrace_ops *ops); | ||
| 401 | 402 | ||
| 402 | int register_ftrace_command(struct ftrace_func_command *cmd); | 403 | int register_ftrace_command(struct ftrace_func_command *cmd); |
| 403 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | 404 | int unregister_ftrace_command(struct ftrace_func_command *cmd); |
| @@ -645,6 +646,7 @@ static inline unsigned long ftrace_location(unsigned long ip) | |||
| 645 | #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) | 646 | #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) |
| 646 | #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) | 647 | #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) |
| 647 | #define ftrace_free_filter(ops) do { } while (0) | 648 | #define ftrace_free_filter(ops) do { } while (0) |
| 649 | #define ftrace_ops_set_global_filter(ops) do { } while (0) | ||
| 648 | 650 | ||
| 649 | static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, | 651 | static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, |
| 650 | size_t cnt, loff_t *ppos) { return -ENODEV; } | 652 | size_t cnt, loff_t *ppos) { return -ENODEV; } |
| @@ -945,6 +947,10 @@ extern int __disable_trace_on_warning; | |||
| 945 | #define INIT_TRACE_RECURSION .trace_recursion = 0, | 947 | #define INIT_TRACE_RECURSION .trace_recursion = 0, |
| 946 | #endif | 948 | #endif |
| 947 | 949 | ||
| 950 | int tracepoint_printk_sysctl(struct ctl_table *table, int write, | ||
| 951 | void __user *buffer, size_t *lenp, | ||
| 952 | loff_t *ppos); | ||
| 953 | |||
| 948 | #else /* CONFIG_TRACING */ | 954 | #else /* CONFIG_TRACING */ |
| 949 | static inline void disable_trace_on_warning(void) { } | 955 | static inline void disable_trace_on_warning(void) { } |
| 950 | #endif /* CONFIG_TRACING */ | 956 | #endif /* CONFIG_TRACING */ |
diff --git a/include/linux/futex.h b/include/linux/futex.h index 6435f46d6e13..7c5b694864cd 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h | |||
| @@ -1,14 +1,14 @@ | |||
| 1 | #ifndef _LINUX_FUTEX_H | 1 | #ifndef _LINUX_FUTEX_H |
| 2 | #define _LINUX_FUTEX_H | 2 | #define _LINUX_FUTEX_H |
| 3 | 3 | ||
| 4 | #include <linux/ktime.h> | ||
| 4 | #include <uapi/linux/futex.h> | 5 | #include <uapi/linux/futex.h> |
| 5 | 6 | ||
| 6 | struct inode; | 7 | struct inode; |
| 7 | struct mm_struct; | 8 | struct mm_struct; |
| 8 | struct task_struct; | 9 | struct task_struct; |
| 9 | union ktime; | ||
| 10 | 10 | ||
| 11 | long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, | 11 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
| 12 | u32 __user *uaddr2, u32 val2, u32 val3); | 12 | u32 __user *uaddr2, u32 val2, u32 val3); |
| 13 | 13 | ||
| 14 | extern int | 14 | extern int |
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 851671742790..8bd28ce6d76e 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h | |||
| @@ -17,8 +17,9 @@ enum fwnode_type { | |||
| 17 | FWNODE_OF, | 17 | FWNODE_OF, |
| 18 | FWNODE_ACPI, | 18 | FWNODE_ACPI, |
| 19 | FWNODE_ACPI_DATA, | 19 | FWNODE_ACPI_DATA, |
| 20 | FWNODE_ACPI_STATIC, | ||
| 20 | FWNODE_PDATA, | 21 | FWNODE_PDATA, |
| 21 | FWNODE_IRQCHIP, | 22 | FWNODE_IRQCHIP |
| 22 | }; | 23 | }; |
| 23 | 24 | ||
| 24 | struct fwnode_handle { | 25 | struct fwnode_handle { |
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h index 667c31101b8b..377257d8f7e3 100644 --- a/include/linux/genl_magic_func.h +++ b/include/linux/genl_magic_func.h | |||
| @@ -259,16 +259,7 @@ static struct genl_ops ZZZ_genl_ops[] __read_mostly = { | |||
| 259 | * {{{2 | 259 | * {{{2 |
| 260 | */ | 260 | */ |
| 261 | #define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family) | 261 | #define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family) |
| 262 | static struct genl_family ZZZ_genl_family __read_mostly = { | 262 | static struct genl_family ZZZ_genl_family; |
| 263 | .id = GENL_ID_GENERATE, | ||
| 264 | .name = __stringify(GENL_MAGIC_FAMILY), | ||
| 265 | .version = GENL_MAGIC_VERSION, | ||
| 266 | #ifdef GENL_MAGIC_FAMILY_HDRSZ | ||
| 267 | .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ), | ||
| 268 | #endif | ||
| 269 | .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1, | ||
| 270 | }; | ||
| 271 | |||
| 272 | /* | 263 | /* |
| 273 | * Magic: define multicast groups | 264 | * Magic: define multicast groups |
| 274 | * Magic: define multicast group registration helper | 265 | * Magic: define multicast group registration helper |
| @@ -302,11 +293,23 @@ static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \ | |||
| 302 | #undef GENL_mc_group | 293 | #undef GENL_mc_group |
| 303 | #define GENL_mc_group(group) | 294 | #define GENL_mc_group(group) |
| 304 | 295 | ||
| 296 | static struct genl_family ZZZ_genl_family __ro_after_init = { | ||
| 297 | .name = __stringify(GENL_MAGIC_FAMILY), | ||
| 298 | .version = GENL_MAGIC_VERSION, | ||
| 299 | #ifdef GENL_MAGIC_FAMILY_HDRSZ | ||
| 300 | .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ), | ||
| 301 | #endif | ||
| 302 | .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1, | ||
| 303 | .ops = ZZZ_genl_ops, | ||
| 304 | .n_ops = ARRAY_SIZE(ZZZ_genl_ops), | ||
| 305 | .mcgrps = ZZZ_genl_mcgrps, | ||
| 306 | .n_mcgrps = ARRAY_SIZE(ZZZ_genl_mcgrps), | ||
| 307 | .module = THIS_MODULE, | ||
| 308 | }; | ||
| 309 | |||
| 305 | int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void) | 310 | int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void) |
| 306 | { | 311 | { |
| 307 | return genl_register_family_with_ops_groups(&ZZZ_genl_family, \ | 312 | return genl_register_family(&ZZZ_genl_family); |
| 308 | ZZZ_genl_ops, \ | ||
| 309 | ZZZ_genl_mcgrps); | ||
| 310 | } | 313 | } |
| 311 | 314 | ||
| 312 | void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void) | 315 | void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void) |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index f8041f9de31e..4175dca4ac39 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -506,6 +506,8 @@ extern void free_hot_cold_page(struct page *page, bool cold); | |||
| 506 | extern void free_hot_cold_page_list(struct list_head *list, bool cold); | 506 | extern void free_hot_cold_page_list(struct list_head *list, bool cold); |
| 507 | 507 | ||
| 508 | struct page_frag_cache; | 508 | struct page_frag_cache; |
| 509 | extern void __page_frag_drain(struct page *page, unsigned int order, | ||
| 510 | unsigned int count); | ||
| 509 | extern void *__alloc_page_frag(struct page_frag_cache *nc, | 511 | extern void *__alloc_page_frag(struct page_frag_cache *nc, |
| 510 | unsigned int fragsz, gfp_t gfp_mask); | 512 | unsigned int fragsz, gfp_t gfp_mask); |
| 511 | extern void __free_page_frag(void *addr); | 513 | extern void __free_page_frag(void *addr); |
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 24e2cc56beb1..c2748accea71 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h | |||
| @@ -82,8 +82,6 @@ enum single_ended_mode { | |||
| 82 | * implies that if the chip supports IRQs, these IRQs need to be threaded | 82 | * implies that if the chip supports IRQs, these IRQs need to be threaded |
| 83 | * as the chip access may sleep when e.g. reading out the IRQ status | 83 | * as the chip access may sleep when e.g. reading out the IRQ status |
| 84 | * registers. | 84 | * registers. |
| 85 | * @irq_not_threaded: flag must be set if @can_sleep is set but the | ||
| 86 | * IRQs don't need to be threaded | ||
| 87 | * @read_reg: reader function for generic GPIO | 85 | * @read_reg: reader function for generic GPIO |
| 88 | * @write_reg: writer function for generic GPIO | 86 | * @write_reg: writer function for generic GPIO |
| 89 | * @pin2mask: some generic GPIO controllers work with the big-endian bits | 87 | * @pin2mask: some generic GPIO controllers work with the big-endian bits |
| @@ -91,7 +89,7 @@ enum single_ended_mode { | |||
| 91 | * bit. This callback assigns the right bit mask. | 89 | * bit. This callback assigns the right bit mask. |
| 92 | * @reg_dat: data (in) register for generic GPIO | 90 | * @reg_dat: data (in) register for generic GPIO |
| 93 | * @reg_set: output set register (out=high) for generic GPIO | 91 | * @reg_set: output set register (out=high) for generic GPIO |
| 94 | * @reg_clk: output clear register (out=low) for generic GPIO | 92 | * @reg_clr: output clear register (out=low) for generic GPIO |
| 95 | * @reg_dir: direction setting register for generic GPIO | 93 | * @reg_dir: direction setting register for generic GPIO |
| 96 | * @bgpio_bits: number of register bits used for a generic GPIO i.e. | 94 | * @bgpio_bits: number of register bits used for a generic GPIO i.e. |
| 97 | * <register width> * 8 | 95 | * <register width> * 8 |
| @@ -109,8 +107,10 @@ enum single_ended_mode { | |||
| 109 | * for GPIO IRQs, provided by GPIO driver | 107 | * for GPIO IRQs, provided by GPIO driver |
| 110 | * @irq_default_type: default IRQ triggering type applied during GPIO driver | 108 | * @irq_default_type: default IRQ triggering type applied during GPIO driver |
| 111 | * initialization, provided by GPIO driver | 109 | * initialization, provided by GPIO driver |
| 112 | * @irq_parent: GPIO IRQ chip parent/bank linux irq number, | 110 | * @irq_chained_parent: GPIO IRQ chip parent/bank linux irq number, |
| 113 | * provided by GPIO driver | 111 | * provided by GPIO driver for chained interrupt (not for nested |
| 112 | * interrupts). | ||
| 113 | * @irq_nested: True if set the interrupt handling is nested. | ||
| 114 | * @irq_need_valid_mask: If set core allocates @irq_valid_mask with all | 114 | * @irq_need_valid_mask: If set core allocates @irq_valid_mask with all |
| 115 | * bits set to one | 115 | * bits set to one |
| 116 | * @irq_valid_mask: If not %NULL holds bitmask of GPIOs which are valid to | 116 | * @irq_valid_mask: If not %NULL holds bitmask of GPIOs which are valid to |
| @@ -166,7 +166,6 @@ struct gpio_chip { | |||
| 166 | u16 ngpio; | 166 | u16 ngpio; |
| 167 | const char *const *names; | 167 | const char *const *names; |
| 168 | bool can_sleep; | 168 | bool can_sleep; |
| 169 | bool irq_not_threaded; | ||
| 170 | 169 | ||
| 171 | #if IS_ENABLED(CONFIG_GPIO_GENERIC) | 170 | #if IS_ENABLED(CONFIG_GPIO_GENERIC) |
| 172 | unsigned long (*read_reg)(void __iomem *reg); | 171 | unsigned long (*read_reg)(void __iomem *reg); |
| @@ -192,7 +191,8 @@ struct gpio_chip { | |||
| 192 | unsigned int irq_base; | 191 | unsigned int irq_base; |
| 193 | irq_flow_handler_t irq_handler; | 192 | irq_flow_handler_t irq_handler; |
| 194 | unsigned int irq_default_type; | 193 | unsigned int irq_default_type; |
| 195 | int irq_parent; | 194 | int irq_chained_parent; |
| 195 | bool irq_nested; | ||
| 196 | bool irq_need_valid_mask; | 196 | bool irq_need_valid_mask; |
| 197 | unsigned long *irq_valid_mask; | 197 | unsigned long *irq_valid_mask; |
| 198 | struct lock_class_key *lock_key; | 198 | struct lock_class_key *lock_key; |
| @@ -270,24 +270,40 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, | |||
| 270 | int parent_irq, | 270 | int parent_irq, |
| 271 | irq_flow_handler_t parent_handler); | 271 | irq_flow_handler_t parent_handler); |
| 272 | 272 | ||
| 273 | void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip, | ||
| 274 | struct irq_chip *irqchip, | ||
| 275 | int parent_irq); | ||
| 276 | |||
| 273 | int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, | 277 | int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, |
| 274 | struct irq_chip *irqchip, | 278 | struct irq_chip *irqchip, |
| 275 | unsigned int first_irq, | 279 | unsigned int first_irq, |
| 276 | irq_flow_handler_t handler, | 280 | irq_flow_handler_t handler, |
| 277 | unsigned int type, | 281 | unsigned int type, |
| 282 | bool nested, | ||
| 278 | struct lock_class_key *lock_key); | 283 | struct lock_class_key *lock_key); |
| 279 | 284 | ||
| 285 | /* FIXME: I assume threaded IRQchips do not have the lockdep problem */ | ||
| 286 | static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, | ||
| 287 | struct irq_chip *irqchip, | ||
| 288 | unsigned int first_irq, | ||
| 289 | irq_flow_handler_t handler, | ||
| 290 | unsigned int type) | ||
| 291 | { | ||
| 292 | return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq, | ||
| 293 | handler, type, true, NULL); | ||
| 294 | } | ||
| 295 | |||
| 280 | #ifdef CONFIG_LOCKDEP | 296 | #ifdef CONFIG_LOCKDEP |
| 281 | #define gpiochip_irqchip_add(...) \ | 297 | #define gpiochip_irqchip_add(...) \ |
| 282 | ( \ | 298 | ( \ |
| 283 | ({ \ | 299 | ({ \ |
| 284 | static struct lock_class_key _key; \ | 300 | static struct lock_class_key _key; \ |
| 285 | _gpiochip_irqchip_add(__VA_ARGS__, &_key); \ | 301 | _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \ |
| 286 | }) \ | 302 | }) \ |
| 287 | ) | 303 | ) |
| 288 | #else | 304 | #else |
| 289 | #define gpiochip_irqchip_add(...) \ | 305 | #define gpiochip_irqchip_add(...) \ |
| 290 | _gpiochip_irqchip_add(__VA_ARGS__, NULL) | 306 | _gpiochip_irqchip_add(__VA_ARGS__, false, NULL) |
| 291 | #endif | 307 | #endif |
| 292 | 308 | ||
| 293 | #endif /* CONFIG_GPIOLIB_IRQCHIP */ | 309 | #endif /* CONFIG_GPIOLIB_IRQCHIP */ |
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index ee2d8c6f9130..0b71024c082c 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h | |||
| @@ -2,7 +2,6 @@ | |||
| 2 | #define _GPIO_KEYS_H | 2 | #define _GPIO_KEYS_H |
| 3 | 3 | ||
| 4 | struct device; | 4 | struct device; |
| 5 | struct gpio_desc; | ||
| 6 | 5 | ||
| 7 | /** | 6 | /** |
| 8 | * struct gpio_keys_button - configuration parameters | 7 | * struct gpio_keys_button - configuration parameters |
| @@ -18,7 +17,6 @@ struct gpio_desc; | |||
| 18 | * disable button via sysfs | 17 | * disable button via sysfs |
| 19 | * @value: axis value for %EV_ABS | 18 | * @value: axis value for %EV_ABS |
| 20 | * @irq: Irq number in case of interrupt keys | 19 | * @irq: Irq number in case of interrupt keys |
| 21 | * @gpiod: GPIO descriptor | ||
| 22 | */ | 20 | */ |
| 23 | struct gpio_keys_button { | 21 | struct gpio_keys_button { |
| 24 | unsigned int code; | 22 | unsigned int code; |
| @@ -31,7 +29,6 @@ struct gpio_keys_button { | |||
| 31 | bool can_disable; | 29 | bool can_disable; |
| 32 | int value; | 30 | int value; |
| 33 | unsigned int irq; | 31 | unsigned int irq; |
| 34 | struct gpio_desc *gpiod; | ||
| 35 | }; | 32 | }; |
| 36 | 33 | ||
| 37 | /** | 34 | /** |
| @@ -46,7 +43,7 @@ struct gpio_keys_button { | |||
| 46 | * @name: input device name | 43 | * @name: input device name |
| 47 | */ | 44 | */ |
| 48 | struct gpio_keys_platform_data { | 45 | struct gpio_keys_platform_data { |
| 49 | struct gpio_keys_button *buttons; | 46 | const struct gpio_keys_button *buttons; |
| 50 | int nbuttons; | 47 | int nbuttons; |
| 51 | unsigned int poll_interval; | 48 | unsigned int poll_interval; |
| 52 | unsigned int rep:1; | 49 | unsigned int rep:1; |
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index e31bcd4c7859..97585d9679f3 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h | |||
| @@ -93,8 +93,6 @@ static __inline__ void debug_frame(const struct sk_buff *skb) | |||
| 93 | int hdlc_open(struct net_device *dev); | 93 | int hdlc_open(struct net_device *dev); |
| 94 | /* Must be called by hardware driver when HDLC device is being closed */ | 94 | /* Must be called by hardware driver when HDLC device is being closed */ |
| 95 | void hdlc_close(struct net_device *dev); | 95 | void hdlc_close(struct net_device *dev); |
| 96 | /* May be used by hardware driver */ | ||
| 97 | int hdlc_change_mtu(struct net_device *dev, int new_mtu); | ||
| 98 | /* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */ | 96 | /* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */ |
| 99 | netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev); | 97 | netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev); |
| 100 | 98 | ||
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index e9744202fa29..edbb4fc674ed 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h | |||
| @@ -78,6 +78,8 @@ enum hdmi_picture_aspect { | |||
| 78 | HDMI_PICTURE_ASPECT_NONE, | 78 | HDMI_PICTURE_ASPECT_NONE, |
| 79 | HDMI_PICTURE_ASPECT_4_3, | 79 | HDMI_PICTURE_ASPECT_4_3, |
| 80 | HDMI_PICTURE_ASPECT_16_9, | 80 | HDMI_PICTURE_ASPECT_16_9, |
| 81 | HDMI_PICTURE_ASPECT_64_27, | ||
| 82 | HDMI_PICTURE_ASPECT_256_135, | ||
| 81 | HDMI_PICTURE_ASPECT_RESERVED, | 83 | HDMI_PICTURE_ASPECT_RESERVED, |
| 82 | }; | 84 | }; |
| 83 | 85 | ||
diff --git a/include/linux/hid.h b/include/linux/hid.h index b2ec82712baa..28f38e2b8f30 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
| @@ -231,7 +231,11 @@ struct hid_item { | |||
| 231 | #define HID_DG_TAP 0x000d0035 | 231 | #define HID_DG_TAP 0x000d0035 |
| 232 | #define HID_DG_TABLETFUNCTIONKEY 0x000d0039 | 232 | #define HID_DG_TABLETFUNCTIONKEY 0x000d0039 |
| 233 | #define HID_DG_PROGRAMCHANGEKEY 0x000d003a | 233 | #define HID_DG_PROGRAMCHANGEKEY 0x000d003a |
| 234 | #define HID_DG_BATTERYSTRENGTH 0x000d003b | ||
| 234 | #define HID_DG_INVERT 0x000d003c | 235 | #define HID_DG_INVERT 0x000d003c |
| 236 | #define HID_DG_TILT_X 0x000d003d | ||
| 237 | #define HID_DG_TILT_Y 0x000d003e | ||
| 238 | #define HID_DG_TWIST 0x000d0041 | ||
| 235 | #define HID_DG_TIPSWITCH 0x000d0042 | 239 | #define HID_DG_TIPSWITCH 0x000d0042 |
| 236 | #define HID_DG_TIPSWITCH2 0x000d0043 | 240 | #define HID_DG_TIPSWITCH2 0x000d0043 |
| 237 | #define HID_DG_BARRELSWITCH 0x000d0044 | 241 | #define HID_DG_BARRELSWITCH 0x000d0044 |
| @@ -479,6 +483,7 @@ struct hid_input { | |||
| 479 | struct list_head list; | 483 | struct list_head list; |
| 480 | struct hid_report *report; | 484 | struct hid_report *report; |
| 481 | struct input_dev *input; | 485 | struct input_dev *input; |
| 486 | bool registered; | ||
| 482 | }; | 487 | }; |
| 483 | 488 | ||
| 484 | enum hid_type { | 489 | enum hid_type { |
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h index 8ec23fb0b412..402f99e328d4 100644 --- a/include/linux/hippidevice.h +++ b/include/linux/hippidevice.h | |||
| @@ -32,7 +32,6 @@ struct hippi_cb { | |||
| 32 | }; | 32 | }; |
| 33 | 33 | ||
| 34 | __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev); | 34 | __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev); |
| 35 | int hippi_change_mtu(struct net_device *dev, int new_mtu); | ||
| 36 | int hippi_mac_addr(struct net_device *dev, void *p); | 35 | int hippi_mac_addr(struct net_device *dev, void *p); |
| 37 | int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p); | 36 | int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p); |
| 38 | struct net_device *alloc_hippi_dev(int sizeof_priv); | 37 | struct net_device *alloc_hippi_dev(int sizeof_priv); |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 5e00f80b1535..cdab81ba29f8 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
| @@ -228,8 +228,8 @@ static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t t | |||
| 228 | 228 | ||
| 229 | static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) | 229 | static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) |
| 230 | { | 230 | { |
| 231 | timer->node.expires.tv64 = tv64; | 231 | timer->node.expires = tv64; |
| 232 | timer->_softexpires.tv64 = tv64; | 232 | timer->_softexpires = tv64; |
| 233 | } | 233 | } |
| 234 | 234 | ||
| 235 | static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) | 235 | static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) |
| @@ -256,11 +256,11 @@ static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) | |||
| 256 | 256 | ||
| 257 | static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) | 257 | static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) |
| 258 | { | 258 | { |
| 259 | return timer->node.expires.tv64; | 259 | return timer->node.expires; |
| 260 | } | 260 | } |
| 261 | static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) | 261 | static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) |
| 262 | { | 262 | { |
| 263 | return timer->_softexpires.tv64; | 263 | return timer->_softexpires; |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) | 266 | static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) |
| @@ -297,7 +297,7 @@ extern void hrtimer_peek_ahead_timers(void); | |||
| 297 | * this resolution values. | 297 | * this resolution values. |
| 298 | */ | 298 | */ |
| 299 | # define HIGH_RES_NSEC 1 | 299 | # define HIGH_RES_NSEC 1 |
| 300 | # define KTIME_HIGH_RES (ktime_t) { .tv64 = HIGH_RES_NSEC } | 300 | # define KTIME_HIGH_RES (HIGH_RES_NSEC) |
| 301 | # define MONOTONIC_RES_NSEC HIGH_RES_NSEC | 301 | # define MONOTONIC_RES_NSEC HIGH_RES_NSEC |
| 302 | # define KTIME_MONOTONIC_RES KTIME_HIGH_RES | 302 | # define KTIME_MONOTONIC_RES KTIME_HIGH_RES |
| 303 | 303 | ||
| @@ -333,7 +333,7 @@ __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now) | |||
| 333 | * hrtimer_start_range_ns() to prevent short timeouts. | 333 | * hrtimer_start_range_ns() to prevent short timeouts. |
| 334 | */ | 334 | */ |
| 335 | if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel) | 335 | if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel) |
| 336 | rem.tv64 -= hrtimer_resolution; | 336 | rem -= hrtimer_resolution; |
| 337 | return rem; | 337 | return rem; |
| 338 | } | 338 | } |
| 339 | 339 | ||
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index e35e6de633b9..97e478d6b690 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
| @@ -1,12 +1,12 @@ | |||
| 1 | #ifndef _LINUX_HUGE_MM_H | 1 | #ifndef _LINUX_HUGE_MM_H |
| 2 | #define _LINUX_HUGE_MM_H | 2 | #define _LINUX_HUGE_MM_H |
| 3 | 3 | ||
| 4 | extern int do_huge_pmd_anonymous_page(struct fault_env *fe); | 4 | extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf); |
| 5 | extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, | 5 | extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
| 6 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, | 6 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, |
| 7 | struct vm_area_struct *vma); | 7 | struct vm_area_struct *vma); |
| 8 | extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd); | 8 | extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); |
| 9 | extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd); | 9 | extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); |
| 10 | extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, | 10 | extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, |
| 11 | unsigned long addr, | 11 | unsigned long addr, |
| 12 | pmd_t *pmd, | 12 | pmd_t *pmd, |
| @@ -142,7 +142,7 @@ static inline int hpage_nr_pages(struct page *page) | |||
| 142 | return 1; | 142 | return 1; |
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd); | 145 | extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); |
| 146 | 146 | ||
| 147 | extern struct page *huge_zero_page; | 147 | extern struct page *huge_zero_page; |
| 148 | 148 | ||
| @@ -189,6 +189,8 @@ static inline void deferred_split_huge_page(struct page *page) {} | |||
| 189 | #define split_huge_pmd(__vma, __pmd, __address) \ | 189 | #define split_huge_pmd(__vma, __pmd, __address) \ |
| 190 | do { } while (0) | 190 | do { } while (0) |
| 191 | 191 | ||
| 192 | static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | ||
| 193 | unsigned long address, bool freeze, struct page *page) {} | ||
| 192 | static inline void split_huge_pmd_address(struct vm_area_struct *vma, | 194 | static inline void split_huge_pmd_address(struct vm_area_struct *vma, |
| 193 | unsigned long address, bool freeze, struct page *page) {} | 195 | unsigned long address, bool freeze, struct page *page) {} |
| 194 | 196 | ||
| @@ -210,7 +212,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, | |||
| 210 | return NULL; | 212 | return NULL; |
| 211 | } | 213 | } |
| 212 | 214 | ||
| 213 | static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd) | 215 | static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd) |
| 214 | { | 216 | { |
| 215 | return 0; | 217 | return 0; |
| 216 | } | 218 | } |
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 34a0dc18f327..bee0827766a3 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h | |||
| @@ -30,8 +30,7 @@ | |||
| 30 | * Must not be NULL. *OBSOLETE* | 30 | * Must not be NULL. *OBSOLETE* |
| 31 | * @read: New API. drivers can fill up to max bytes of data | 31 | * @read: New API. drivers can fill up to max bytes of data |
| 32 | * into the buffer. The buffer is aligned for any type | 32 | * into the buffer. The buffer is aligned for any type |
| 33 | * and max is guaranteed to be >= to that alignment | 33 | * and max is a multiple of 4 and >= 32 bytes. |
| 34 | * (either 4 or 8 depending on architecture). | ||
| 35 | * @priv: Private data, for use by the RNG driver. | 34 | * @priv: Private data, for use by the RNG driver. |
| 36 | * @quality: Estimation of true entropy in RNG's bitstream | 35 | * @quality: Estimation of true entropy in RNG's bitstream |
| 37 | * (per mill). | 36 | * (per mill). |
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 9d2f8bde7d12..78d59dba563e 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h | |||
| @@ -298,8 +298,8 @@ enum hwmon_pwm_attributes { | |||
| 298 | * Channel number | 298 | * Channel number |
| 299 | * The function returns the file permissions. | 299 | * The function returns the file permissions. |
| 300 | * If the return value is 0, no attribute will be created. | 300 | * If the return value is 0, no attribute will be created. |
| 301 | * @read: Read callback. Optional. If not provided, attributes | 301 | * @read: Read callback for data attributes. Mandatory if readable |
| 302 | * will not be readable. | 302 | * data attributes are present. |
| 303 | * Parameters are: | 303 | * Parameters are: |
| 304 | * @dev: Pointer to hardware monitoring device | 304 | * @dev: Pointer to hardware monitoring device |
| 305 | * @type: Sensor type | 305 | * @type: Sensor type |
| @@ -308,8 +308,19 @@ enum hwmon_pwm_attributes { | |||
| 308 | * Channel number | 308 | * Channel number |
| 309 | * @val: Pointer to returned value | 309 | * @val: Pointer to returned value |
| 310 | * The function returns 0 on success or a negative error number. | 310 | * The function returns 0 on success or a negative error number. |
| 311 | * @write: Write callback. Optional. If not provided, attributes | 311 | * @read_string: |
| 312 | * will not be writable. | 312 | * Read callback for string attributes. Mandatory if string |
| 313 | * attributes are present. | ||
| 314 | * Parameters are: | ||
| 315 | * @dev: Pointer to hardware monitoring device | ||
| 316 | * @type: Sensor type | ||
| 317 | * @attr: Sensor attribute | ||
| 318 | * @channel: | ||
| 319 | * Channel number | ||
| 320 | * @str: Pointer to returned string | ||
| 321 | * The function returns 0 on success or a negative error number. | ||
| 322 | * @write: Write callback for data attributes. Mandatory if writeable | ||
| 323 | * data attributes are present. | ||
| 313 | * Parameters are: | 324 | * Parameters are: |
| 314 | * @dev: Pointer to hardware monitoring device | 325 | * @dev: Pointer to hardware monitoring device |
| 315 | * @type: Sensor type | 326 | * @type: Sensor type |
| @@ -324,6 +335,8 @@ struct hwmon_ops { | |||
| 324 | u32 attr, int channel); | 335 | u32 attr, int channel); |
| 325 | int (*read)(struct device *dev, enum hwmon_sensor_types type, | 336 | int (*read)(struct device *dev, enum hwmon_sensor_types type, |
| 326 | u32 attr, int channel, long *val); | 337 | u32 attr, int channel, long *val); |
| 338 | int (*read_string)(struct device *dev, enum hwmon_sensor_types type, | ||
| 339 | u32 attr, int channel, char **str); | ||
| 327 | int (*write)(struct device *dev, enum hwmon_sensor_types type, | 340 | int (*write)(struct device *dev, enum hwmon_sensor_types type, |
| 328 | u32 attr, int channel, long val); | 341 | u32 attr, int channel, long val); |
| 329 | }; | 342 | }; |
| @@ -349,7 +362,9 @@ struct hwmon_chip_info { | |||
| 349 | const struct hwmon_channel_info **info; | 362 | const struct hwmon_channel_info **info; |
| 350 | }; | 363 | }; |
| 351 | 364 | ||
| 365 | /* hwmon_device_register() is deprecated */ | ||
| 352 | struct device *hwmon_device_register(struct device *dev); | 366 | struct device *hwmon_device_register(struct device *dev); |
| 367 | |||
| 353 | struct device * | 368 | struct device * |
| 354 | hwmon_device_register_with_groups(struct device *dev, const char *name, | 369 | hwmon_device_register_with_groups(struct device *dev, const char *name, |
| 355 | void *drvdata, | 370 | void *drvdata, |
| @@ -362,12 +377,12 @@ struct device * | |||
| 362 | hwmon_device_register_with_info(struct device *dev, | 377 | hwmon_device_register_with_info(struct device *dev, |
| 363 | const char *name, void *drvdata, | 378 | const char *name, void *drvdata, |
| 364 | const struct hwmon_chip_info *info, | 379 | const struct hwmon_chip_info *info, |
| 365 | const struct attribute_group **groups); | 380 | const struct attribute_group **extra_groups); |
| 366 | struct device * | 381 | struct device * |
| 367 | devm_hwmon_device_register_with_info(struct device *dev, | 382 | devm_hwmon_device_register_with_info(struct device *dev, |
| 368 | const char *name, void *drvdata, | 383 | const char *name, void *drvdata, |
| 369 | const struct hwmon_chip_info *info, | 384 | const struct hwmon_chip_info *info, |
| 370 | const struct attribute_group **groups); | 385 | const struct attribute_group **extra_groups); |
| 371 | 386 | ||
| 372 | void hwmon_device_unregister(struct device *dev); | 387 | void hwmon_device_unregister(struct device *dev); |
| 373 | void devm_hwmon_device_unregister(struct device *dev); | 388 | void devm_hwmon_device_unregister(struct device *dev); |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index cd184bdca58f..42fe43fb0c80 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
| @@ -696,7 +696,7 @@ enum vmbus_device_type { | |||
| 696 | HV_FCOPY, | 696 | HV_FCOPY, |
| 697 | HV_BACKUP, | 697 | HV_BACKUP, |
| 698 | HV_DM, | 698 | HV_DM, |
| 699 | HV_UNKOWN, | 699 | HV_UNKNOWN, |
| 700 | }; | 700 | }; |
| 701 | 701 | ||
| 702 | struct vmbus_device { | 702 | struct vmbus_device { |
| @@ -1119,6 +1119,12 @@ struct hv_driver { | |||
| 1119 | 1119 | ||
| 1120 | struct device_driver driver; | 1120 | struct device_driver driver; |
| 1121 | 1121 | ||
| 1122 | /* dynamic device GUID's */ | ||
| 1123 | struct { | ||
| 1124 | spinlock_t lock; | ||
| 1125 | struct list_head list; | ||
| 1126 | } dynids; | ||
| 1127 | |||
| 1122 | int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *); | 1128 | int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *); |
| 1123 | int (*remove)(struct hv_device *); | 1129 | int (*remove)(struct hv_device *); |
| 1124 | void (*shutdown)(struct hv_device *); | 1130 | void (*shutdown)(struct hv_device *); |
| @@ -1447,6 +1453,7 @@ void hv_event_tasklet_enable(struct vmbus_channel *channel); | |||
| 1447 | 1453 | ||
| 1448 | void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); | 1454 | void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); |
| 1449 | 1455 | ||
| 1456 | void vmbus_setevent(struct vmbus_channel *channel); | ||
| 1450 | /* | 1457 | /* |
| 1451 | * Negotiated version with the Host. | 1458 | * Negotiated version with the Host. |
| 1452 | */ | 1459 | */ |
| @@ -1479,10 +1486,11 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) | |||
| 1479 | * there is room for the producer to send the pending packet. | 1486 | * there is room for the producer to send the pending packet. |
| 1480 | */ | 1487 | */ |
| 1481 | 1488 | ||
| 1482 | static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) | 1489 | static inline void hv_signal_on_read(struct vmbus_channel *channel) |
| 1483 | { | 1490 | { |
| 1484 | u32 cur_write_sz; | 1491 | u32 cur_write_sz; |
| 1485 | u32 pending_sz; | 1492 | u32 pending_sz; |
| 1493 | struct hv_ring_buffer_info *rbi = &channel->inbound; | ||
| 1486 | 1494 | ||
| 1487 | /* | 1495 | /* |
| 1488 | * Issue a full memory barrier before making the signaling decision. | 1496 | * Issue a full memory barrier before making the signaling decision. |
| @@ -1500,14 +1508,14 @@ static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) | |||
| 1500 | pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); | 1508 | pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); |
| 1501 | /* If the other end is not blocked on write don't bother. */ | 1509 | /* If the other end is not blocked on write don't bother. */ |
| 1502 | if (pending_sz == 0) | 1510 | if (pending_sz == 0) |
| 1503 | return false; | 1511 | return; |
| 1504 | 1512 | ||
| 1505 | cur_write_sz = hv_get_bytes_to_write(rbi); | 1513 | cur_write_sz = hv_get_bytes_to_write(rbi); |
| 1506 | 1514 | ||
| 1507 | if (cur_write_sz >= pending_sz) | 1515 | if (cur_write_sz >= pending_sz) |
| 1508 | return true; | 1516 | vmbus_setevent(channel); |
| 1509 | 1517 | ||
| 1510 | return false; | 1518 | return; |
| 1511 | } | 1519 | } |
| 1512 | 1520 | ||
| 1513 | /* | 1521 | /* |
| @@ -1519,31 +1527,23 @@ static inline struct vmpacket_descriptor * | |||
| 1519 | get_next_pkt_raw(struct vmbus_channel *channel) | 1527 | get_next_pkt_raw(struct vmbus_channel *channel) |
| 1520 | { | 1528 | { |
| 1521 | struct hv_ring_buffer_info *ring_info = &channel->inbound; | 1529 | struct hv_ring_buffer_info *ring_info = &channel->inbound; |
| 1522 | u32 read_loc = ring_info->priv_read_index; | 1530 | u32 priv_read_loc = ring_info->priv_read_index; |
| 1523 | void *ring_buffer = hv_get_ring_buffer(ring_info); | 1531 | void *ring_buffer = hv_get_ring_buffer(ring_info); |
| 1524 | struct vmpacket_descriptor *cur_desc; | ||
| 1525 | u32 packetlen; | ||
| 1526 | u32 dsize = ring_info->ring_datasize; | 1532 | u32 dsize = ring_info->ring_datasize; |
| 1527 | u32 delta = read_loc - ring_info->ring_buffer->read_index; | 1533 | /* |
| 1534 | * delta is the difference between what is available to read and | ||
| 1535 | * what was already consumed in place. We commit read index after | ||
| 1536 | * the whole batch is processed. | ||
| 1537 | */ | ||
| 1538 | u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ? | ||
| 1539 | priv_read_loc - ring_info->ring_buffer->read_index : | ||
| 1540 | (dsize - ring_info->ring_buffer->read_index) + priv_read_loc; | ||
| 1528 | u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); | 1541 | u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); |
| 1529 | 1542 | ||
| 1530 | if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) | 1543 | if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) |
| 1531 | return NULL; | 1544 | return NULL; |
| 1532 | 1545 | ||
| 1533 | if ((read_loc + sizeof(*cur_desc)) > dsize) | 1546 | return ring_buffer + priv_read_loc; |
| 1534 | return NULL; | ||
| 1535 | |||
| 1536 | cur_desc = ring_buffer + read_loc; | ||
| 1537 | packetlen = cur_desc->len8 << 3; | ||
| 1538 | |||
| 1539 | /* | ||
| 1540 | * If the packet under consideration is wrapping around, | ||
| 1541 | * return failure. | ||
| 1542 | */ | ||
| 1543 | if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1)) | ||
| 1544 | return NULL; | ||
| 1545 | |||
| 1546 | return cur_desc; | ||
| 1547 | } | 1547 | } |
| 1548 | 1548 | ||
| 1549 | /* | 1549 | /* |
| @@ -1555,16 +1555,14 @@ static inline void put_pkt_raw(struct vmbus_channel *channel, | |||
| 1555 | struct vmpacket_descriptor *desc) | 1555 | struct vmpacket_descriptor *desc) |
| 1556 | { | 1556 | { |
| 1557 | struct hv_ring_buffer_info *ring_info = &channel->inbound; | 1557 | struct hv_ring_buffer_info *ring_info = &channel->inbound; |
| 1558 | u32 read_loc = ring_info->priv_read_index; | ||
| 1559 | u32 packetlen = desc->len8 << 3; | 1558 | u32 packetlen = desc->len8 << 3; |
| 1560 | u32 dsize = ring_info->ring_datasize; | 1559 | u32 dsize = ring_info->ring_datasize; |
| 1561 | 1560 | ||
| 1562 | if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize) | ||
| 1563 | BUG(); | ||
| 1564 | /* | 1561 | /* |
| 1565 | * Include the packet trailer. | 1562 | * Include the packet trailer. |
| 1566 | */ | 1563 | */ |
| 1567 | ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; | 1564 | ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; |
| 1565 | ring_info->priv_read_index %= dsize; | ||
| 1568 | } | 1566 | } |
| 1569 | 1567 | ||
| 1570 | /* | 1568 | /* |
| @@ -1589,8 +1587,7 @@ static inline void commit_rd_index(struct vmbus_channel *channel) | |||
| 1589 | virt_rmb(); | 1587 | virt_rmb(); |
| 1590 | ring_info->ring_buffer->read_index = ring_info->priv_read_index; | 1588 | ring_info->ring_buffer->read_index = ring_info->priv_read_index; |
| 1591 | 1589 | ||
| 1592 | if (hv_need_to_signal_on_read(ring_info)) | 1590 | hv_signal_on_read(channel); |
| 1593 | vmbus_set_event(channel); | ||
| 1594 | } | 1591 | } |
| 1595 | 1592 | ||
| 1596 | 1593 | ||
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h index c2e3324f9468..a1385023a29b 100644 --- a/include/linux/i2c-smbus.h +++ b/include/linux/i2c-smbus.h | |||
| @@ -50,31 +50,4 @@ struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter, | |||
| 50 | struct i2c_smbus_alert_setup *setup); | 50 | struct i2c_smbus_alert_setup *setup); |
| 51 | int i2c_handle_smbus_alert(struct i2c_client *ara); | 51 | int i2c_handle_smbus_alert(struct i2c_client *ara); |
| 52 | 52 | ||
| 53 | /** | ||
| 54 | * smbus_host_notify - internal structure used by the Host Notify mechanism. | ||
| 55 | * @adapter: the I2C adapter associated with this struct | ||
| 56 | * @work: worker used to schedule the IRQ in the slave device | ||
| 57 | * @lock: spinlock to check if a notification is already pending | ||
| 58 | * @pending: flag set when a notification is pending (any new notification will | ||
| 59 | * be rejected if pending is true) | ||
| 60 | * @payload: the actual payload of the Host Notify event | ||
| 61 | * @addr: the address of the slave device which raised the notification | ||
| 62 | * | ||
| 63 | * This struct needs to be allocated by i2c_setup_smbus_host_notify() and does | ||
| 64 | * not need to be freed. Internally, i2c_setup_smbus_host_notify() uses a | ||
| 65 | * managed resource to clean this up when the adapter get released. | ||
| 66 | */ | ||
| 67 | struct smbus_host_notify { | ||
| 68 | struct i2c_adapter *adapter; | ||
| 69 | struct work_struct work; | ||
| 70 | spinlock_t lock; | ||
| 71 | bool pending; | ||
| 72 | u16 payload; | ||
| 73 | u8 addr; | ||
| 74 | }; | ||
| 75 | |||
| 76 | struct smbus_host_notify *i2c_setup_smbus_host_notify(struct i2c_adapter *adap); | ||
| 77 | int i2c_handle_smbus_host_notify(struct smbus_host_notify *host_notify, | ||
| 78 | unsigned short addr, unsigned int data); | ||
| 79 | |||
| 80 | #endif /* _LINUX_I2C_SMBUS_H */ | 53 | #endif /* _LINUX_I2C_SMBUS_H */ |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 6422eef428c4..b2109c522dec 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/device.h> /* for struct device */ | 30 | #include <linux/device.h> /* for struct device */ |
| 31 | #include <linux/sched.h> /* for completion */ | 31 | #include <linux/sched.h> /* for completion */ |
| 32 | #include <linux/mutex.h> | 32 | #include <linux/mutex.h> |
| 33 | #include <linux/irqdomain.h> /* for Host Notify IRQ */ | ||
| 33 | #include <linux/of.h> /* for struct device_node */ | 34 | #include <linux/of.h> /* for struct device_node */ |
| 34 | #include <linux/swab.h> /* for swab16 */ | 35 | #include <linux/swab.h> /* for swab16 */ |
| 35 | #include <uapi/linux/i2c.h> | 36 | #include <uapi/linux/i2c.h> |
| @@ -135,7 +136,8 @@ enum i2c_alert_protocol { | |||
| 135 | * struct i2c_driver - represent an I2C device driver | 136 | * struct i2c_driver - represent an I2C device driver |
| 136 | * @class: What kind of i2c device we instantiate (for detect) | 137 | * @class: What kind of i2c device we instantiate (for detect) |
| 137 | * @attach_adapter: Callback for bus addition (deprecated) | 138 | * @attach_adapter: Callback for bus addition (deprecated) |
| 138 | * @probe: Callback for device binding | 139 | * @probe: Callback for device binding - soon to be deprecated |
| 140 | * @probe_new: New callback for device binding | ||
| 139 | * @remove: Callback for device unbinding | 141 | * @remove: Callback for device unbinding |
| 140 | * @shutdown: Callback for device shutdown | 142 | * @shutdown: Callback for device shutdown |
| 141 | * @alert: Alert callback, for example for the SMBus alert protocol | 143 | * @alert: Alert callback, for example for the SMBus alert protocol |
| @@ -178,6 +180,11 @@ struct i2c_driver { | |||
| 178 | int (*probe)(struct i2c_client *, const struct i2c_device_id *); | 180 | int (*probe)(struct i2c_client *, const struct i2c_device_id *); |
| 179 | int (*remove)(struct i2c_client *); | 181 | int (*remove)(struct i2c_client *); |
| 180 | 182 | ||
| 183 | /* New driver model interface to aid the seamless removal of the | ||
| 184 | * current probe()'s, more commonly unused than used second parameter. | ||
| 185 | */ | ||
| 186 | int (*probe_new)(struct i2c_client *); | ||
| 187 | |||
| 181 | /* driver model interfaces that don't relate to enumeration */ | 188 | /* driver model interfaces that don't relate to enumeration */ |
| 182 | void (*shutdown)(struct i2c_client *); | 189 | void (*shutdown)(struct i2c_client *); |
| 183 | 190 | ||
| @@ -243,6 +250,8 @@ struct i2c_client { | |||
| 243 | 250 | ||
| 244 | extern struct i2c_client *i2c_verify_client(struct device *dev); | 251 | extern struct i2c_client *i2c_verify_client(struct device *dev); |
| 245 | extern struct i2c_adapter *i2c_verify_adapter(struct device *dev); | 252 | extern struct i2c_adapter *i2c_verify_adapter(struct device *dev); |
| 253 | extern const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, | ||
| 254 | const struct i2c_client *client); | ||
| 246 | 255 | ||
| 247 | static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj) | 256 | static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj) |
| 248 | { | 257 | { |
| @@ -567,6 +576,8 @@ struct i2c_adapter { | |||
| 567 | 576 | ||
| 568 | struct i2c_bus_recovery_info *bus_recovery_info; | 577 | struct i2c_bus_recovery_info *bus_recovery_info; |
| 569 | const struct i2c_adapter_quirks *quirks; | 578 | const struct i2c_adapter_quirks *quirks; |
| 579 | |||
| 580 | struct irq_domain *host_notify_domain; | ||
| 570 | }; | 581 | }; |
| 571 | #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) | 582 | #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) |
| 572 | 583 | ||
| @@ -739,6 +750,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) | |||
| 739 | return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0); | 750 | return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0); |
| 740 | } | 751 | } |
| 741 | 752 | ||
| 753 | int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); | ||
| 742 | /** | 754 | /** |
| 743 | * module_i2c_driver() - Helper macro for registering a modular I2C driver | 755 | * module_i2c_driver() - Helper macro for registering a modular I2C driver |
| 744 | * @__i2c_driver: i2c_driver struct | 756 | * @__i2c_driver: i2c_driver struct |
| @@ -774,6 +786,10 @@ extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) | |||
| 774 | /* must call i2c_put_adapter() when done with returned i2c_adapter device */ | 786 | /* must call i2c_put_adapter() when done with returned i2c_adapter device */ |
| 775 | struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node); | 787 | struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node); |
| 776 | 788 | ||
| 789 | extern const struct of_device_id | ||
| 790 | *i2c_of_match_device(const struct of_device_id *matches, | ||
| 791 | struct i2c_client *client); | ||
| 792 | |||
| 777 | #else | 793 | #else |
| 778 | 794 | ||
| 779 | static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) | 795 | static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) |
| @@ -790,6 +806,14 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node | |||
| 790 | { | 806 | { |
| 791 | return NULL; | 807 | return NULL; |
| 792 | } | 808 | } |
| 809 | |||
| 810 | static inline const struct of_device_id | ||
| 811 | *i2c_of_match_device(const struct of_device_id *matches, | ||
| 812 | struct i2c_client *client) | ||
| 813 | { | ||
| 814 | return NULL; | ||
| 815 | } | ||
| 816 | |||
| 793 | #endif /* CONFIG_OF */ | 817 | #endif /* CONFIG_OF */ |
| 794 | 818 | ||
| 795 | #if IS_ENABLED(CONFIG_ACPI) | 819 | #if IS_ENABLED(CONFIG_ACPI) |
diff --git a/include/linux/i2c/mlxcpld.h b/include/linux/i2c/mlxcpld.h new file mode 100644 index 000000000000..b08dcb183fca --- /dev/null +++ b/include/linux/i2c/mlxcpld.h | |||
| @@ -0,0 +1,52 @@ | |||
| 1 | /* | ||
| 2 | * mlxcpld.h - Mellanox I2C multiplexer support in CPLD | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016 Mellanox Technologies. All rights reserved. | ||
| 5 | * Copyright (c) 2016 Michael Shych <michaels@mellanox.com> | ||
| 6 | * | ||
| 7 | * Redistribution and use in source and binary forms, with or without | ||
| 8 | * modification, are permitted provided that the following conditions are met: | ||
| 9 | * | ||
| 10 | * 1. Redistributions of source code must retain the above copyright | ||
| 11 | * notice, this list of conditions and the following disclaimer. | ||
| 12 | * 2. Redistributions in binary form must reproduce the above copyright | ||
| 13 | * notice, this list of conditions and the following disclaimer in the | ||
| 14 | * documentation and/or other materials provided with the distribution. | ||
| 15 | * 3. Neither the names of the copyright holders nor the names of its | ||
| 16 | * contributors may be used to endorse or promote products derived from | ||
| 17 | * this software without specific prior written permission. | ||
| 18 | * | ||
| 19 | * Alternatively, this software may be distributed under the terms of the | ||
| 20 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
| 21 | * Software Foundation. | ||
| 22 | * | ||
| 23 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 24 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 25 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 26 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 27 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 28 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 29 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
| 30 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
| 31 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 32 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 33 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 34 | */ | ||
| 35 | |||
| 36 | #ifndef _LINUX_I2C_MLXCPLD_H | ||
| 37 | #define _LINUX_I2C_MLXCPLD_H | ||
| 38 | |||
| 39 | /* Platform data for the CPLD I2C multiplexers */ | ||
| 40 | |||
| 41 | /* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info | ||
| 42 | * @adap_ids - adapter array | ||
| 43 | * @num_adaps - number of adapters | ||
| 44 | * @sel_reg_addr - mux select register offset in CPLD space | ||
| 45 | */ | ||
| 46 | struct mlxcpld_mux_plat_data { | ||
| 47 | int *adap_ids; | ||
| 48 | int num_adaps; | ||
| 49 | int sel_reg_addr; | ||
| 50 | }; | ||
| 51 | |||
| 52 | #endif /* _LINUX_I2C_MLXCPLD_H */ | ||
diff --git a/include/linux/idr.h b/include/linux/idr.h index 083d61e92706..3c01b89aed67 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h | |||
| @@ -18,12 +18,11 @@ | |||
| 18 | #include <linux/rcupdate.h> | 18 | #include <linux/rcupdate.h> |
| 19 | 19 | ||
| 20 | /* | 20 | /* |
| 21 | * We want shallower trees and thus more bits covered at each layer. 8 | 21 | * Using 6 bits at each layer allows us to allocate 7 layers out of each page. |
| 22 | * bits gives us large enough first layer for most use cases and maximum | 22 | * 8 bits only gave us 3 layers out of every pair of pages, which is less |
| 23 | * tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and | 23 | * efficient except for trees with a largest element between 192-255 inclusive. |
| 24 | * 1k on 32bit. | ||
| 25 | */ | 24 | */ |
| 26 | #define IDR_BITS 8 | 25 | #define IDR_BITS 6 |
| 27 | #define IDR_SIZE (1 << IDR_BITS) | 26 | #define IDR_SIZE (1 << IDR_BITS) |
| 28 | #define IDR_MASK ((1 << IDR_BITS)-1) | 27 | #define IDR_MASK ((1 << IDR_BITS)-1) |
| 29 | 28 | ||
| @@ -56,6 +55,32 @@ struct idr { | |||
| 56 | #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) | 55 | #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) |
| 57 | 56 | ||
| 58 | /** | 57 | /** |
| 58 | * idr_get_cursor - Return the current position of the cyclic allocator | ||
| 59 | * @idr: idr handle | ||
| 60 | * | ||
| 61 | * The value returned is the value that will be next returned from | ||
| 62 | * idr_alloc_cyclic() if it is free (otherwise the search will start from | ||
| 63 | * this position). | ||
| 64 | */ | ||
| 65 | static inline unsigned int idr_get_cursor(struct idr *idr) | ||
| 66 | { | ||
| 67 | return READ_ONCE(idr->cur); | ||
| 68 | } | ||
| 69 | |||
| 70 | /** | ||
| 71 | * idr_set_cursor - Set the current position of the cyclic allocator | ||
| 72 | * @idr: idr handle | ||
| 73 | * @val: new position | ||
| 74 | * | ||
| 75 | * The next call to idr_alloc_cyclic() will return @val if it is free | ||
| 76 | * (otherwise the search will start from this position). | ||
| 77 | */ | ||
| 78 | static inline void idr_set_cursor(struct idr *idr, unsigned int val) | ||
| 79 | { | ||
| 80 | WRITE_ONCE(idr->cur, val); | ||
| 81 | } | ||
| 82 | |||
| 83 | /** | ||
| 59 | * DOC: idr sync | 84 | * DOC: idr sync |
| 60 | * idr synchronization (stolen from radix-tree.h) | 85 | * idr synchronization (stolen from radix-tree.h) |
| 61 | * | 86 | * |
| @@ -195,6 +220,11 @@ static inline int ida_get_new(struct ida *ida, int *p_id) | |||
| 195 | return ida_get_new_above(ida, 0, p_id); | 220 | return ida_get_new_above(ida, 0, p_id); |
| 196 | } | 221 | } |
| 197 | 222 | ||
| 223 | static inline bool ida_is_empty(struct ida *ida) | ||
| 224 | { | ||
| 225 | return idr_is_empty(&ida->idr); | ||
| 226 | } | ||
| 227 | |||
| 198 | void __init idr_init_cache(void); | 228 | void __init idr_init_cache(void); |
| 199 | 229 | ||
| 200 | #endif /* __IDR_H__ */ | 230 | #endif /* __IDR_H__ */ |
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a80516fd65c8..fe849329511a 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
| @@ -1576,6 +1576,9 @@ struct ieee80211_vht_operation { | |||
| 1576 | #define WLAN_AUTH_SHARED_KEY 1 | 1576 | #define WLAN_AUTH_SHARED_KEY 1 |
| 1577 | #define WLAN_AUTH_FT 2 | 1577 | #define WLAN_AUTH_FT 2 |
| 1578 | #define WLAN_AUTH_SAE 3 | 1578 | #define WLAN_AUTH_SAE 3 |
| 1579 | #define WLAN_AUTH_FILS_SK 4 | ||
| 1580 | #define WLAN_AUTH_FILS_SK_PFS 5 | ||
| 1581 | #define WLAN_AUTH_FILS_PK 6 | ||
| 1579 | #define WLAN_AUTH_LEAP 128 | 1582 | #define WLAN_AUTH_LEAP 128 |
| 1580 | 1583 | ||
| 1581 | #define WLAN_AUTH_CHALLENGE_LEN 128 | 1584 | #define WLAN_AUTH_CHALLENGE_LEN 128 |
| @@ -1960,6 +1963,26 @@ enum ieee80211_eid { | |||
| 1960 | 1963 | ||
| 1961 | WLAN_EID_VENDOR_SPECIFIC = 221, | 1964 | WLAN_EID_VENDOR_SPECIFIC = 221, |
| 1962 | WLAN_EID_QOS_PARAMETER = 222, | 1965 | WLAN_EID_QOS_PARAMETER = 222, |
| 1966 | WLAN_EID_CAG_NUMBER = 237, | ||
| 1967 | WLAN_EID_AP_CSN = 239, | ||
| 1968 | WLAN_EID_FILS_INDICATION = 240, | ||
| 1969 | WLAN_EID_DILS = 241, | ||
| 1970 | WLAN_EID_FRAGMENT = 242, | ||
| 1971 | WLAN_EID_EXTENSION = 255 | ||
| 1972 | }; | ||
| 1973 | |||
| 1974 | /* Element ID Extensions for Element ID 255 */ | ||
| 1975 | enum ieee80211_eid_ext { | ||
| 1976 | WLAN_EID_EXT_ASSOC_DELAY_INFO = 1, | ||
| 1977 | WLAN_EID_EXT_FILS_REQ_PARAMS = 2, | ||
| 1978 | WLAN_EID_EXT_FILS_KEY_CONFIRM = 3, | ||
| 1979 | WLAN_EID_EXT_FILS_SESSION = 4, | ||
| 1980 | WLAN_EID_EXT_FILS_HLP_CONTAINER = 5, | ||
| 1981 | WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN = 6, | ||
| 1982 | WLAN_EID_EXT_KEY_DELIVERY = 7, | ||
| 1983 | WLAN_EID_EXT_FILS_WRAPPED_DATA = 8, | ||
| 1984 | WLAN_EID_EXT_FILS_PUBLIC_KEY = 12, | ||
| 1985 | WLAN_EID_EXT_FILS_NONCE = 13, | ||
| 1963 | }; | 1986 | }; |
| 1964 | 1987 | ||
| 1965 | /* Action category code */ | 1988 | /* Action category code */ |
| @@ -2073,6 +2096,9 @@ enum ieee80211_key_len { | |||
| 2073 | #define IEEE80211_GCMP_MIC_LEN 16 | 2096 | #define IEEE80211_GCMP_MIC_LEN 16 |
| 2074 | #define IEEE80211_GCMP_PN_LEN 6 | 2097 | #define IEEE80211_GCMP_PN_LEN 6 |
| 2075 | 2098 | ||
| 2099 | #define FILS_NONCE_LEN 16 | ||
| 2100 | #define FILS_MAX_KEK_LEN 64 | ||
| 2101 | |||
| 2076 | /* Public action codes */ | 2102 | /* Public action codes */ |
| 2077 | enum ieee80211_pub_actioncode { | 2103 | enum ieee80211_pub_actioncode { |
| 2078 | WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4, | 2104 | WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4, |
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index f563907ed776..3355efc89781 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h | |||
| @@ -44,4 +44,20 @@ static inline int arp_hdr_len(struct net_device *dev) | |||
| 44 | return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2; | 44 | return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2; |
| 45 | } | 45 | } |
| 46 | } | 46 | } |
| 47 | |||
| 48 | static inline bool dev_is_mac_header_xmit(const struct net_device *dev) | ||
| 49 | { | ||
| 50 | switch (dev->type) { | ||
| 51 | case ARPHRD_TUNNEL: | ||
| 52 | case ARPHRD_TUNNEL6: | ||
| 53 | case ARPHRD_SIT: | ||
| 54 | case ARPHRD_IPGRE: | ||
| 55 | case ARPHRD_VOID: | ||
| 56 | case ARPHRD_NONE: | ||
| 57 | return false; | ||
| 58 | default: | ||
| 59 | return true; | ||
| 60 | } | ||
| 61 | } | ||
| 62 | |||
| 47 | #endif /* _LINUX_IF_ARP_H */ | 63 | #endif /* _LINUX_IF_ARP_H */ |
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 3319d97d789d..8d5fcd6284ce 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h | |||
| @@ -399,22 +399,6 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) | |||
| 399 | skb->vlan_tci = 0; | 399 | skb->vlan_tci = 0; |
| 400 | return skb; | 400 | return skb; |
| 401 | } | 401 | } |
| 402 | /* | ||
| 403 | * vlan_hwaccel_push_inside - pushes vlan tag to the payload | ||
| 404 | * @skb: skbuff to tag | ||
| 405 | * | ||
| 406 | * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the | ||
| 407 | * VLAN tag from @skb->vlan_tci inside to the payload. | ||
| 408 | * | ||
| 409 | * Following the skb_unshare() example, in case of error, the calling function | ||
| 410 | * doesn't have to worry about freeing the original skb. | ||
| 411 | */ | ||
| 412 | static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) | ||
| 413 | { | ||
| 414 | if (skb_vlan_tag_present(skb)) | ||
| 415 | skb = __vlan_hwaccel_push_inside(skb); | ||
| 416 | return skb; | ||
| 417 | } | ||
| 418 | 402 | ||
| 419 | /** | 403 | /** |
| 420 | * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting | 404 | * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting |
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index e7fdec4db9da..5ba430cc9a87 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h | |||
| @@ -136,6 +136,7 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig); | |||
| 136 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ | 136 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ |
| 137 | BIT(IIO_CHAN_INFO_OFFSET), \ | 137 | BIT(IIO_CHAN_INFO_OFFSET), \ |
| 138 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ | 138 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ |
| 139 | .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ | ||
| 139 | .scan_index = (_si), \ | 140 | .scan_index = (_si), \ |
| 140 | .scan_type = { \ | 141 | .scan_type = { \ |
| 141 | .sign = 'u', \ | 142 | .sign = 'u', \ |
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index 9edccfba1ffb..47eeec3218b5 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h | |||
| @@ -226,6 +226,34 @@ int iio_read_channel_processed(struct iio_channel *chan, int *val); | |||
| 226 | int iio_write_channel_raw(struct iio_channel *chan, int val); | 226 | int iio_write_channel_raw(struct iio_channel *chan, int val); |
| 227 | 227 | ||
| 228 | /** | 228 | /** |
| 229 | * iio_read_max_channel_raw() - read maximum available raw value from a given | ||
| 230 | * channel, i.e. the maximum possible value. | ||
| 231 | * @chan: The channel being queried. | ||
| 232 | * @val: Value read back. | ||
| 233 | * | ||
| 234 | * Note raw reads from iio channels are in adc counts and hence | ||
| 235 | * scale will need to be applied if standard units are required. | ||
| 236 | */ | ||
| 237 | int iio_read_max_channel_raw(struct iio_channel *chan, int *val); | ||
| 238 | |||
| 239 | /** | ||
| 240 | * iio_read_avail_channel_raw() - read available raw values from a given channel | ||
| 241 | * @chan: The channel being queried. | ||
| 242 | * @vals: Available values read back. | ||
| 243 | * @length: Number of entries in vals. | ||
| 244 | * | ||
| 245 | * Returns an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST. | ||
| 246 | * | ||
| 247 | * For ranges, three vals are always returned; min, step and max. | ||
| 248 | * For lists, all the possible values are enumerated. | ||
| 249 | * | ||
| 250 | * Note raw available values from iio channels are in adc counts and | ||
| 251 | * hence scale will need to be applied if standard units are required. | ||
| 252 | */ | ||
| 253 | int iio_read_avail_channel_raw(struct iio_channel *chan, | ||
| 254 | const int **vals, int *length); | ||
| 255 | |||
| 256 | /** | ||
| 229 | * iio_get_channel_type() - get the type of a channel | 257 | * iio_get_channel_type() - get the type of a channel |
| 230 | * @channel: The channel being queried. | 258 | * @channel: The channel being queried. |
| 231 | * @type: The type of the channel. | 259 | * @type: The type of the channel. |
| @@ -236,6 +264,19 @@ int iio_get_channel_type(struct iio_channel *channel, | |||
| 236 | enum iio_chan_type *type); | 264 | enum iio_chan_type *type); |
| 237 | 265 | ||
| 238 | /** | 266 | /** |
| 267 | * iio_read_channel_offset() - read the offset value for a channel | ||
| 268 | * @chan: The channel being queried. | ||
| 269 | * @val: First part of value read back. | ||
| 270 | * @val2: Second part of value read back. | ||
| 271 | * | ||
| 272 | * Note returns a description of what is in val and val2, such | ||
| 273 | * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val | ||
| 274 | * + val2/1e6 | ||
| 275 | */ | ||
| 276 | int iio_read_channel_offset(struct iio_channel *chan, int *val, | ||
| 277 | int *val2); | ||
| 278 | |||
| 279 | /** | ||
| 239 | * iio_read_channel_scale() - read the scale value for a channel | 280 | * iio_read_channel_scale() - read the scale value for a channel |
| 240 | * @chan: The channel being queried. | 281 | * @chan: The channel being queried. |
| 241 | * @val: First part of value read back. | 282 | * @val: First part of value read back. |
diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h index 91530e6611e9..628b2cf54c50 100644 --- a/include/linux/iio/dac/mcp4725.h +++ b/include/linux/iio/dac/mcp4725.h | |||
| @@ -9,8 +9,18 @@ | |||
| 9 | #ifndef IIO_DAC_MCP4725_H_ | 9 | #ifndef IIO_DAC_MCP4725_H_ |
| 10 | #define IIO_DAC_MCP4725_H_ | 10 | #define IIO_DAC_MCP4725_H_ |
| 11 | 11 | ||
| 12 | /** | ||
| 13 | * struct mcp4725_platform_data - MCP4725/6 DAC specific data. | ||
| 14 | * @use_vref: Whether an external reference voltage on Vref pin should be used. | ||
| 15 | * Additional vref-supply must be specified when used. | ||
| 16 | * @vref_buffered: Controls buffering of the external reference voltage. | ||
| 17 | * | ||
| 18 | * Vref related settings are available only on MCP4756. See | ||
| 19 | * Documentation/devicetree/bindings/iio/dac/mcp4725.txt for more information. | ||
| 20 | */ | ||
| 12 | struct mcp4725_platform_data { | 21 | struct mcp4725_platform_data { |
| 13 | u16 vref_mv; | 22 | bool use_vref; |
| 23 | bool vref_buffered; | ||
| 14 | }; | 24 | }; |
| 15 | 25 | ||
| 16 | #endif /* IIO_DAC_MCP4725_H_ */ | 26 | #endif /* IIO_DAC_MCP4725_H_ */ |
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index b4a0679e4a49..3f5ea2e9a39e 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h | |||
| @@ -225,12 +225,22 @@ struct iio_event_spec { | |||
| 225 | * endianness: little or big endian | 225 | * endianness: little or big endian |
| 226 | * @info_mask_separate: What information is to be exported that is specific to | 226 | * @info_mask_separate: What information is to be exported that is specific to |
| 227 | * this channel. | 227 | * this channel. |
| 228 | * @info_mask_separate_available: What availability information is to be | ||
| 229 | * exported that is specific to this channel. | ||
| 228 | * @info_mask_shared_by_type: What information is to be exported that is shared | 230 | * @info_mask_shared_by_type: What information is to be exported that is shared |
| 229 | * by all channels of the same type. | 231 | * by all channels of the same type. |
| 232 | * @info_mask_shared_by_type_available: What availability information is to be | ||
| 233 | * exported that is shared by all channels of the same | ||
| 234 | * type. | ||
| 230 | * @info_mask_shared_by_dir: What information is to be exported that is shared | 235 | * @info_mask_shared_by_dir: What information is to be exported that is shared |
| 231 | * by all channels of the same direction. | 236 | * by all channels of the same direction. |
| 237 | * @info_mask_shared_by_dir_available: What availability information is to be | ||
| 238 | * exported that is shared by all channels of the same | ||
| 239 | * direction. | ||
| 232 | * @info_mask_shared_by_all: What information is to be exported that is shared | 240 | * @info_mask_shared_by_all: What information is to be exported that is shared |
| 233 | * by all channels. | 241 | * by all channels. |
| 242 | * @info_mask_shared_by_all_available: What availability information is to be | ||
| 243 | * exported that is shared by all channels. | ||
| 234 | * @event_spec: Array of events which should be registered for this | 244 | * @event_spec: Array of events which should be registered for this |
| 235 | * channel. | 245 | * channel. |
| 236 | * @num_event_specs: Size of the event_spec array. | 246 | * @num_event_specs: Size of the event_spec array. |
| @@ -269,9 +279,13 @@ struct iio_chan_spec { | |||
| 269 | enum iio_endian endianness; | 279 | enum iio_endian endianness; |
| 270 | } scan_type; | 280 | } scan_type; |
| 271 | long info_mask_separate; | 281 | long info_mask_separate; |
| 282 | long info_mask_separate_available; | ||
| 272 | long info_mask_shared_by_type; | 283 | long info_mask_shared_by_type; |
| 284 | long info_mask_shared_by_type_available; | ||
| 273 | long info_mask_shared_by_dir; | 285 | long info_mask_shared_by_dir; |
| 286 | long info_mask_shared_by_dir_available; | ||
| 274 | long info_mask_shared_by_all; | 287 | long info_mask_shared_by_all; |
| 288 | long info_mask_shared_by_all_available; | ||
| 275 | const struct iio_event_spec *event_spec; | 289 | const struct iio_event_spec *event_spec; |
| 276 | unsigned int num_event_specs; | 290 | unsigned int num_event_specs; |
| 277 | const struct iio_chan_spec_ext_info *ext_info; | 291 | const struct iio_chan_spec_ext_info *ext_info; |
| @@ -301,6 +315,23 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan, | |||
| 301 | (chan->info_mask_shared_by_all & BIT(type)); | 315 | (chan->info_mask_shared_by_all & BIT(type)); |
| 302 | } | 316 | } |
| 303 | 317 | ||
| 318 | /** | ||
| 319 | * iio_channel_has_available() - Checks if a channel has an available attribute | ||
| 320 | * @chan: The channel to be queried | ||
| 321 | * @type: Type of the available attribute to be checked | ||
| 322 | * | ||
| 323 | * Returns true if the channel supports reporting available values for the | ||
| 324 | * given attribute type, false otherwise. | ||
| 325 | */ | ||
| 326 | static inline bool iio_channel_has_available(const struct iio_chan_spec *chan, | ||
| 327 | enum iio_chan_info_enum type) | ||
| 328 | { | ||
| 329 | return (chan->info_mask_separate_available & BIT(type)) | | ||
| 330 | (chan->info_mask_shared_by_type_available & BIT(type)) | | ||
| 331 | (chan->info_mask_shared_by_dir_available & BIT(type)) | | ||
| 332 | (chan->info_mask_shared_by_all_available & BIT(type)); | ||
| 333 | } | ||
| 334 | |||
| 304 | #define IIO_CHAN_SOFT_TIMESTAMP(_si) { \ | 335 | #define IIO_CHAN_SOFT_TIMESTAMP(_si) { \ |
| 305 | .type = IIO_TIMESTAMP, \ | 336 | .type = IIO_TIMESTAMP, \ |
| 306 | .channel = -1, \ | 337 | .channel = -1, \ |
| @@ -349,6 +380,14 @@ struct iio_dev; | |||
| 349 | * max_len specifies maximum number of elements | 380 | * max_len specifies maximum number of elements |
| 350 | * vals pointer can contain. val_len is used to return | 381 | * vals pointer can contain. val_len is used to return |
| 351 | * length of valid elements in vals. | 382 | * length of valid elements in vals. |
| 383 | * @read_avail: function to return the available values from the device. | ||
| 384 | * mask specifies which value. Note 0 means the available | ||
| 385 | * values for the channel in question. Return value | ||
| 386 | * specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is | ||
| 387 | * returned in vals. The type of the vals are returned in | ||
| 388 | * type and the number of vals is returned in length. For | ||
| 389 | * ranges, there are always three vals returned; min, step | ||
| 390 | * and max. For lists, all possible values are enumerated. | ||
| 352 | * @write_raw: function to write a value to the device. | 391 | * @write_raw: function to write a value to the device. |
| 353 | * Parameters are the same as for read_raw. | 392 | * Parameters are the same as for read_raw. |
| 354 | * @write_raw_get_fmt: callback function to query the expected | 393 | * @write_raw_get_fmt: callback function to query the expected |
| @@ -381,7 +420,7 @@ struct iio_dev; | |||
| 381 | **/ | 420 | **/ |
| 382 | struct iio_info { | 421 | struct iio_info { |
| 383 | struct module *driver_module; | 422 | struct module *driver_module; |
| 384 | struct attribute_group *event_attrs; | 423 | const struct attribute_group *event_attrs; |
| 385 | const struct attribute_group *attrs; | 424 | const struct attribute_group *attrs; |
| 386 | 425 | ||
| 387 | int (*read_raw)(struct iio_dev *indio_dev, | 426 | int (*read_raw)(struct iio_dev *indio_dev, |
| @@ -397,6 +436,13 @@ struct iio_info { | |||
| 397 | int *val_len, | 436 | int *val_len, |
| 398 | long mask); | 437 | long mask); |
| 399 | 438 | ||
| 439 | int (*read_avail)(struct iio_dev *indio_dev, | ||
| 440 | struct iio_chan_spec const *chan, | ||
| 441 | const int **vals, | ||
| 442 | int *type, | ||
| 443 | int *length, | ||
| 444 | long mask); | ||
| 445 | |||
| 400 | int (*write_raw)(struct iio_dev *indio_dev, | 446 | int (*write_raw)(struct iio_dev *indio_dev, |
| 401 | struct iio_chan_spec const *chan, | 447 | struct iio_chan_spec const *chan, |
| 402 | int val, | 448 | int val, |
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h index 9cd8f747212f..ce9426c507fd 100644 --- a/include/linux/iio/sysfs.h +++ b/include/linux/iio/sysfs.h | |||
| @@ -55,10 +55,34 @@ struct iio_const_attr { | |||
| 55 | { .dev_attr = __ATTR(_name, _mode, _show, _store), \ | 55 | { .dev_attr = __ATTR(_name, _mode, _show, _store), \ |
| 56 | .address = _addr } | 56 | .address = _addr } |
| 57 | 57 | ||
| 58 | #define IIO_ATTR_RO(_name, _addr) \ | ||
| 59 | { .dev_attr = __ATTR_RO(_name), \ | ||
| 60 | .address = _addr } | ||
| 61 | |||
| 62 | #define IIO_ATTR_WO(_name, _addr) \ | ||
| 63 | { .dev_attr = __ATTR_WO(_name), \ | ||
| 64 | .address = _addr } | ||
| 65 | |||
| 66 | #define IIO_ATTR_RW(_name, _addr) \ | ||
| 67 | { .dev_attr = __ATTR_RW(_name), \ | ||
| 68 | .address = _addr } | ||
| 69 | |||
| 58 | #define IIO_DEVICE_ATTR(_name, _mode, _show, _store, _addr) \ | 70 | #define IIO_DEVICE_ATTR(_name, _mode, _show, _store, _addr) \ |
| 59 | struct iio_dev_attr iio_dev_attr_##_name \ | 71 | struct iio_dev_attr iio_dev_attr_##_name \ |
| 60 | = IIO_ATTR(_name, _mode, _show, _store, _addr) | 72 | = IIO_ATTR(_name, _mode, _show, _store, _addr) |
| 61 | 73 | ||
| 74 | #define IIO_DEVICE_ATTR_RO(_name, _addr) \ | ||
| 75 | struct iio_dev_attr iio_dev_attr_##_name \ | ||
| 76 | = IIO_ATTR_RO(_name, _addr) | ||
| 77 | |||
| 78 | #define IIO_DEVICE_ATTR_WO(_name, _addr) \ | ||
| 79 | struct iio_dev_attr iio_dev_attr_##_name \ | ||
| 80 | = IIO_ATTR_WO(_name, _addr) | ||
| 81 | |||
| 82 | #define IIO_DEVICE_ATTR_RW(_name, _addr) \ | ||
| 83 | struct iio_dev_attr iio_dev_attr_##_name \ | ||
| 84 | = IIO_ATTR_RW(_name, _addr) | ||
| 85 | |||
| 62 | #define IIO_DEVICE_ATTR_NAMED(_vname, _name, _mode, _show, _store, _addr) \ | 86 | #define IIO_DEVICE_ATTR_NAMED(_vname, _name, _mode, _show, _store, _addr) \ |
| 63 | struct iio_dev_attr iio_dev_attr_##_vname \ | 87 | struct iio_dev_attr iio_dev_attr_##_vname \ |
| 64 | = IIO_ATTR(_name, _mode, _show, _store, _addr) | 88 | = IIO_ATTR(_name, _mode, _show, _store, _addr) |
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 4f1154f7a33c..ea08302f2d7b 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h | |||
| @@ -170,6 +170,8 @@ void iio_trigger_free(struct iio_trigger *trig); | |||
| 170 | */ | 170 | */ |
| 171 | bool iio_trigger_using_own(struct iio_dev *indio_dev); | 171 | bool iio_trigger_using_own(struct iio_dev *indio_dev); |
| 172 | 172 | ||
| 173 | int iio_trigger_validate_own_device(struct iio_trigger *trig, | ||
| 174 | struct iio_dev *indio_dev); | ||
| 173 | 175 | ||
| 174 | #else | 176 | #else |
| 175 | struct iio_trigger; | 177 | struct iio_trigger; |
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index 32b579525004..2aa7b6384d64 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h | |||
| @@ -29,4 +29,9 @@ enum iio_event_info { | |||
| 29 | #define IIO_VAL_FRACTIONAL 10 | 29 | #define IIO_VAL_FRACTIONAL 10 |
| 30 | #define IIO_VAL_FRACTIONAL_LOG2 11 | 30 | #define IIO_VAL_FRACTIONAL_LOG2 11 |
| 31 | 31 | ||
| 32 | enum iio_available_type { | ||
| 33 | IIO_AVAIL_LIST, | ||
| 34 | IIO_AVAIL_RANGE, | ||
| 35 | }; | ||
| 36 | |||
| 32 | #endif /* _IIO_TYPES_H_ */ | 37 | #endif /* _IIO_TYPES_H_ */ |
diff --git a/include/linux/ima.h b/include/linux/ima.h index 0eb7c2e7f0d6..7f6952f8d6aa 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #define _LINUX_IMA_H | 11 | #define _LINUX_IMA_H |
| 12 | 12 | ||
| 13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
| 14 | #include <linux/kexec.h> | ||
| 14 | struct linux_binprm; | 15 | struct linux_binprm; |
| 15 | 16 | ||
| 16 | #ifdef CONFIG_IMA | 17 | #ifdef CONFIG_IMA |
| @@ -23,6 +24,10 @@ extern int ima_post_read_file(struct file *file, void *buf, loff_t size, | |||
| 23 | enum kernel_read_file_id id); | 24 | enum kernel_read_file_id id); |
| 24 | extern void ima_post_path_mknod(struct dentry *dentry); | 25 | extern void ima_post_path_mknod(struct dentry *dentry); |
| 25 | 26 | ||
| 27 | #ifdef CONFIG_IMA_KEXEC | ||
| 28 | extern void ima_add_kexec_buffer(struct kimage *image); | ||
| 29 | #endif | ||
| 30 | |||
| 26 | #else | 31 | #else |
| 27 | static inline int ima_bprm_check(struct linux_binprm *bprm) | 32 | static inline int ima_bprm_check(struct linux_binprm *bprm) |
| 28 | { | 33 | { |
| @@ -62,6 +67,13 @@ static inline void ima_post_path_mknod(struct dentry *dentry) | |||
| 62 | 67 | ||
| 63 | #endif /* CONFIG_IMA */ | 68 | #endif /* CONFIG_IMA */ |
| 64 | 69 | ||
| 70 | #ifndef CONFIG_IMA_KEXEC | ||
| 71 | struct kimage; | ||
| 72 | |||
| 73 | static inline void ima_add_kexec_buffer(struct kimage *image) | ||
| 74 | {} | ||
| 75 | #endif | ||
| 76 | |||
| 65 | #ifdef CONFIG_IMA_APPRAISE | 77 | #ifdef CONFIG_IMA_APPRAISE |
| 66 | extern void ima_inode_post_setattr(struct dentry *dentry); | 78 | extern void ima_inode_post_setattr(struct dentry *dentry); |
| 67 | extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name, | 79 | extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name, |
diff --git a/include/linux/init.h b/include/linux/init.h index e30104ceb86d..885c3e6d0f9d 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
| @@ -126,6 +126,9 @@ void prepare_namespace(void); | |||
| 126 | void __init load_default_modules(void); | 126 | void __init load_default_modules(void); |
| 127 | int __init init_rootfs(void); | 127 | int __init init_rootfs(void); |
| 128 | 128 | ||
| 129 | #if defined(CONFIG_DEBUG_RODATA) || defined(CONFIG_DEBUG_SET_MODULE_RONX) | ||
| 130 | extern bool rodata_enabled; | ||
| 131 | #endif | ||
| 129 | #ifdef CONFIG_DEBUG_RODATA | 132 | #ifdef CONFIG_DEBUG_RODATA |
| 130 | void mark_rodata_ro(void); | 133 | void mark_rodata_ro(void); |
| 131 | #endif | 134 | #endif |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 72f0721f75e7..53144e78a369 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -232,6 +232,18 @@ struct irq_affinity_notify { | |||
| 232 | void (*release)(struct kref *ref); | 232 | void (*release)(struct kref *ref); |
| 233 | }; | 233 | }; |
| 234 | 234 | ||
| 235 | /** | ||
| 236 | * struct irq_affinity - Description for automatic irq affinity assignements | ||
| 237 | * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of | ||
| 238 | * the MSI(-X) vector space | ||
| 239 | * @post_vectors: Don't apply affinity to @post_vectors at end of | ||
| 240 | * the MSI(-X) vector space | ||
| 241 | */ | ||
| 242 | struct irq_affinity { | ||
| 243 | int pre_vectors; | ||
| 244 | int post_vectors; | ||
| 245 | }; | ||
| 246 | |||
| 235 | #if defined(CONFIG_SMP) | 247 | #if defined(CONFIG_SMP) |
| 236 | 248 | ||
| 237 | extern cpumask_var_t irq_default_affinity; | 249 | extern cpumask_var_t irq_default_affinity; |
| @@ -278,8 +290,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); | |||
| 278 | extern int | 290 | extern int |
| 279 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); | 291 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); |
| 280 | 292 | ||
| 281 | struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec); | 293 | struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); |
| 282 | int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec); | 294 | int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd); |
| 283 | 295 | ||
| 284 | #else /* CONFIG_SMP */ | 296 | #else /* CONFIG_SMP */ |
| 285 | 297 | ||
| @@ -313,13 +325,13 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | |||
| 313 | } | 325 | } |
| 314 | 326 | ||
| 315 | static inline struct cpumask * | 327 | static inline struct cpumask * |
| 316 | irq_create_affinity_masks(const struct cpumask *affinity, int nvec) | 328 | irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) |
| 317 | { | 329 | { |
| 318 | return NULL; | 330 | return NULL; |
| 319 | } | 331 | } |
| 320 | 332 | ||
| 321 | static inline int | 333 | static inline int |
| 322 | irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec) | 334 | irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd) |
| 323 | { | 335 | { |
| 324 | return maxvec; | 336 | return maxvec; |
| 325 | } | 337 | } |
diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 7892f55a1866..a4c94b86401e 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h | |||
| @@ -49,6 +49,8 @@ struct iomap { | |||
| 49 | #define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */ | 49 | #define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */ |
| 50 | #define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */ | 50 | #define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */ |
| 51 | #define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */ | 51 | #define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */ |
| 52 | #define IOMAP_FAULT (1 << 3) /* mapping for page fault */ | ||
| 53 | #define IOMAP_DIRECT (1 << 4) /* direct I/O */ | ||
| 52 | 54 | ||
| 53 | struct iomap_ops { | 55 | struct iomap_ops { |
| 54 | /* | 56 | /* |
| @@ -82,4 +84,14 @@ int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
| 82 | int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | 84 | int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
| 83 | loff_t start, loff_t len, struct iomap_ops *ops); | 85 | loff_t start, loff_t len, struct iomap_ops *ops); |
| 84 | 86 | ||
| 87 | /* | ||
| 88 | * Flags for direct I/O ->end_io: | ||
| 89 | */ | ||
| 90 | #define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */ | ||
| 91 | #define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */ | ||
| 92 | typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret, | ||
| 93 | unsigned flags); | ||
| 94 | ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | ||
| 95 | struct iomap_ops *ops, iomap_dio_end_io_t end_io); | ||
| 96 | |||
| 85 | #endif /* LINUX_IOMAP_H */ | 97 | #endif /* LINUX_IOMAP_H */ |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 436dc21318af..0ff5111f6959 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
| @@ -253,6 +253,7 @@ extern void iommu_group_remove_device(struct device *dev); | |||
| 253 | extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, | 253 | extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, |
| 254 | int (*fn)(struct device *, void *)); | 254 | int (*fn)(struct device *, void *)); |
| 255 | extern struct iommu_group *iommu_group_get(struct device *dev); | 255 | extern struct iommu_group *iommu_group_get(struct device *dev); |
| 256 | extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); | ||
| 256 | extern void iommu_group_put(struct iommu_group *group); | 257 | extern void iommu_group_put(struct iommu_group *group); |
| 257 | extern int iommu_group_register_notifier(struct iommu_group *group, | 258 | extern int iommu_group_register_notifier(struct iommu_group *group, |
| 258 | struct notifier_block *nb); | 259 | struct notifier_block *nb); |
| @@ -351,6 +352,9 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, | |||
| 351 | const struct iommu_ops *ops); | 352 | const struct iommu_ops *ops); |
| 352 | void iommu_fwspec_free(struct device *dev); | 353 | void iommu_fwspec_free(struct device *dev); |
| 353 | int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); | 354 | int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); |
| 355 | void iommu_register_instance(struct fwnode_handle *fwnode, | ||
| 356 | const struct iommu_ops *ops); | ||
| 357 | const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode); | ||
| 354 | 358 | ||
| 355 | #else /* CONFIG_IOMMU_API */ | 359 | #else /* CONFIG_IOMMU_API */ |
| 356 | 360 | ||
| @@ -580,6 +584,17 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, | |||
| 580 | return -ENODEV; | 584 | return -ENODEV; |
| 581 | } | 585 | } |
| 582 | 586 | ||
| 587 | static inline void iommu_register_instance(struct fwnode_handle *fwnode, | ||
| 588 | const struct iommu_ops *ops) | ||
| 589 | { | ||
| 590 | } | ||
| 591 | |||
| 592 | static inline | ||
| 593 | const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode) | ||
| 594 | { | ||
| 595 | return NULL; | ||
| 596 | } | ||
| 597 | |||
| 583 | #endif /* CONFIG_IOMMU_API */ | 598 | #endif /* CONFIG_IOMMU_API */ |
| 584 | 599 | ||
| 585 | #endif /* __LINUX_IOMMU_H */ | 600 | #endif /* __LINUX_IOMMU_H */ |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index a0649973ee5b..671d014e6429 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
| @@ -64,6 +64,11 @@ struct ipv6_devconf { | |||
| 64 | } stable_secret; | 64 | } stable_secret; |
| 65 | __s32 use_oif_addrs_only; | 65 | __s32 use_oif_addrs_only; |
| 66 | __s32 keep_addr_on_down; | 66 | __s32 keep_addr_on_down; |
| 67 | __s32 seg6_enabled; | ||
| 68 | #ifdef CONFIG_IPV6_SEG6_HMAC | ||
| 69 | __s32 seg6_require_hmac; | ||
| 70 | #endif | ||
| 71 | __u32 enhanced_dad; | ||
| 67 | 72 | ||
| 68 | struct ctl_table_header *sysctl_header; | 73 | struct ctl_table_header *sysctl_header; |
| 69 | }; | 74 | }; |
| @@ -229,8 +234,9 @@ struct ipv6_pinfo { | |||
| 229 | rxflow:1, | 234 | rxflow:1, |
| 230 | rxtclass:1, | 235 | rxtclass:1, |
| 231 | rxpmtu:1, | 236 | rxpmtu:1, |
| 232 | rxorigdstaddr:1; | 237 | rxorigdstaddr:1, |
| 233 | /* 2 bits hole */ | 238 | recvfragsize:1; |
| 239 | /* 1 bits hole */ | ||
| 234 | } bits; | 240 | } bits; |
| 235 | __u16 all; | 241 | __u16 all; |
| 236 | } rxopt; | 242 | } rxopt; |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index b7e34313cdfe..e808f8ae6f14 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
| @@ -239,7 +239,7 @@ | |||
| 239 | #define GITS_TYPER_PTA (1UL << 19) | 239 | #define GITS_TYPER_PTA (1UL << 19) |
| 240 | #define GITS_TYPER_HWCOLLCNT_SHIFT 24 | 240 | #define GITS_TYPER_HWCOLLCNT_SHIFT 24 |
| 241 | 241 | ||
| 242 | #define GITS_CBASER_VALID (1UL << 63) | 242 | #define GITS_CBASER_VALID (1ULL << 63) |
| 243 | #define GITS_CBASER_SHAREABILITY_SHIFT (10) | 243 | #define GITS_CBASER_SHAREABILITY_SHIFT (10) |
| 244 | #define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) | 244 | #define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) |
| 245 | #define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) | 245 | #define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) |
| @@ -265,7 +265,7 @@ | |||
| 265 | 265 | ||
| 266 | #define GITS_BASER_NR_REGS 8 | 266 | #define GITS_BASER_NR_REGS 8 |
| 267 | 267 | ||
| 268 | #define GITS_BASER_VALID (1UL << 63) | 268 | #define GITS_BASER_VALID (1ULL << 63) |
| 269 | #define GITS_BASER_INDIRECT (1ULL << 62) | 269 | #define GITS_BASER_INDIRECT (1ULL << 62) |
| 270 | 270 | ||
| 271 | #define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) | 271 | #define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) |
| @@ -295,10 +295,10 @@ | |||
| 295 | #define GITS_BASER_InnerShareable \ | 295 | #define GITS_BASER_InnerShareable \ |
| 296 | GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | 296 | GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) |
| 297 | #define GITS_BASER_PAGE_SIZE_SHIFT (8) | 297 | #define GITS_BASER_PAGE_SIZE_SHIFT (8) |
| 298 | #define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT) | 298 | #define GITS_BASER_PAGE_SIZE_4K (0ULL << GITS_BASER_PAGE_SIZE_SHIFT) |
| 299 | #define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) | 299 | #define GITS_BASER_PAGE_SIZE_16K (1ULL << GITS_BASER_PAGE_SIZE_SHIFT) |
| 300 | #define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) | 300 | #define GITS_BASER_PAGE_SIZE_64K (2ULL << GITS_BASER_PAGE_SIZE_SHIFT) |
| 301 | #define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) | 301 | #define GITS_BASER_PAGE_SIZE_MASK (3ULL << GITS_BASER_PAGE_SIZE_SHIFT) |
| 302 | #define GITS_BASER_PAGES_MAX 256 | 302 | #define GITS_BASER_PAGES_MAX 256 |
| 303 | #define GITS_BASER_PAGES_SHIFT (0) | 303 | #define GITS_BASER_PAGES_SHIFT (0) |
| 304 | #define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) | 304 | #define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) |
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h index 81f930b0bca9..7b49c71c968b 100644 --- a/include/linux/irqchip/mips-gic.h +++ b/include/linux/irqchip/mips-gic.h | |||
| @@ -259,11 +259,11 @@ extern void gic_init(unsigned long gic_base_addr, | |||
| 259 | unsigned long gic_addrspace_size, unsigned int cpu_vec, | 259 | unsigned long gic_addrspace_size, unsigned int cpu_vec, |
| 260 | unsigned int irqbase); | 260 | unsigned int irqbase); |
| 261 | extern void gic_clocksource_init(unsigned int); | 261 | extern void gic_clocksource_init(unsigned int); |
| 262 | extern cycle_t gic_read_count(void); | 262 | extern u64 gic_read_count(void); |
| 263 | extern unsigned int gic_get_count_width(void); | 263 | extern unsigned int gic_get_count_width(void); |
| 264 | extern cycle_t gic_read_compare(void); | 264 | extern u64 gic_read_compare(void); |
| 265 | extern void gic_write_compare(cycle_t cnt); | 265 | extern void gic_write_compare(u64 cnt); |
| 266 | extern void gic_write_cpu_compare(cycle_t cnt, int cpu); | 266 | extern void gic_write_cpu_compare(u64 cnt, int cpu); |
| 267 | extern void gic_start_count(void); | 267 | extern void gic_start_count(void); |
| 268 | extern void gic_stop_count(void); | 268 | extern void gic_stop_count(void); |
| 269 | extern int gic_get_c0_compare_int(void); | 269 | extern int gic_get_c0_compare_int(void); |
diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h index 0fc6ff276221..8d80fdc68647 100644 --- a/include/linux/isdnif.h +++ b/include/linux/isdnif.h | |||
| @@ -500,6 +500,6 @@ typedef struct { | |||
| 500 | * | 500 | * |
| 501 | */ | 501 | */ |
| 502 | extern int register_isdn(isdn_if*); | 502 | extern int register_isdn(isdn_if*); |
| 503 | #include <asm/uaccess.h> | 503 | #include <linux/uaccess.h> |
| 504 | 504 | ||
| 505 | #endif /* __ISDNIF_H__ */ | 505 | #endif /* __ISDNIF_H__ */ |
diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 410decacff8f..68bd88223417 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h | |||
| @@ -77,7 +77,6 @@ extern int kdb_poll_idx; | |||
| 77 | * number whenever the kernel debugger is entered. | 77 | * number whenever the kernel debugger is entered. |
| 78 | */ | 78 | */ |
| 79 | extern int kdb_initial_cpu; | 79 | extern int kdb_initial_cpu; |
| 80 | extern atomic_t kdb_event; | ||
| 81 | 80 | ||
| 82 | /* Types and messages used for dynamically added kdb shell commands */ | 81 | /* Types and messages used for dynamically added kdb shell commands */ |
| 83 | 82 | ||
| @@ -162,6 +161,7 @@ enum kdb_msgsrc { | |||
| 162 | }; | 161 | }; |
| 163 | 162 | ||
| 164 | extern int kdb_trap_printk; | 163 | extern int kdb_trap_printk; |
| 164 | extern int kdb_printf_cpu; | ||
| 165 | extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt, | 165 | extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt, |
| 166 | va_list args); | 166 | va_list args); |
| 167 | extern __printf(1, 2) int kdb_printf(const char *, ...); | 167 | extern __printf(1, 2) int kdb_printf(const char *, ...); |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index bc6ed52a39b9..56aec84237ad 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -45,11 +45,16 @@ | |||
| 45 | 45 | ||
| 46 | #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) | 46 | #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) |
| 47 | 47 | ||
| 48 | /* @a is a power of 2 value */ | ||
| 48 | #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) | 49 | #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) |
| 49 | #define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) | 50 | #define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) |
| 50 | #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) | 51 | #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) |
| 51 | #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) | 52 | #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) |
| 52 | 53 | ||
| 54 | /* generic data direction definitions */ | ||
| 55 | #define READ 0 | ||
| 56 | #define WRITE 1 | ||
| 57 | |||
| 53 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) | 58 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) |
| 54 | 59 | ||
| 55 | #define u64_to_user_ptr(x) ( \ | 60 | #define u64_to_user_ptr(x) ( \ |
| @@ -506,6 +511,15 @@ extern enum system_states { | |||
| 506 | #define TAINT_UNSIGNED_MODULE 13 | 511 | #define TAINT_UNSIGNED_MODULE 13 |
| 507 | #define TAINT_SOFTLOCKUP 14 | 512 | #define TAINT_SOFTLOCKUP 14 |
| 508 | #define TAINT_LIVEPATCH 15 | 513 | #define TAINT_LIVEPATCH 15 |
| 514 | #define TAINT_FLAGS_COUNT 16 | ||
| 515 | |||
| 516 | struct taint_flag { | ||
| 517 | char true; /* character printed when tainted */ | ||
| 518 | char false; /* character printed when not tainted */ | ||
| 519 | bool module; /* also show as a per-module taint flag */ | ||
| 520 | }; | ||
| 521 | |||
| 522 | extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT]; | ||
| 509 | 523 | ||
| 510 | extern const char hex_asc[]; | 524 | extern const char hex_asc[]; |
| 511 | #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] | 525 | #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] |
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 44fda64ad434..00f776816aa3 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h | |||
| @@ -78,8 +78,8 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) | |||
| 78 | return kstat_cpu(cpu).irqs_sum; | 78 | return kstat_cpu(cpu).irqs_sum; |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | extern void account_user_time(struct task_struct *, cputime_t, cputime_t); | 81 | extern void account_user_time(struct task_struct *, cputime_t); |
| 82 | extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); | 82 | extern void account_system_time(struct task_struct *, int, cputime_t); |
| 83 | extern void account_steal_time(cputime_t); | 83 | extern void account_steal_time(cputime_t); |
| 84 | extern void account_idle_time(cputime_t); | 84 | extern void account_idle_time(cputime_t); |
| 85 | 85 | ||
diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 406c33dcae13..d419d0e51fe5 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h | |||
| @@ -148,7 +148,36 @@ struct kexec_file_ops { | |||
| 148 | kexec_verify_sig_t *verify_sig; | 148 | kexec_verify_sig_t *verify_sig; |
| 149 | #endif | 149 | #endif |
| 150 | }; | 150 | }; |
| 151 | #endif | 151 | |
| 152 | /** | ||
| 153 | * struct kexec_buf - parameters for finding a place for a buffer in memory | ||
| 154 | * @image: kexec image in which memory to search. | ||
| 155 | * @buffer: Contents which will be copied to the allocated memory. | ||
| 156 | * @bufsz: Size of @buffer. | ||
| 157 | * @mem: On return will have address of the buffer in memory. | ||
| 158 | * @memsz: Size for the buffer in memory. | ||
| 159 | * @buf_align: Minimum alignment needed. | ||
| 160 | * @buf_min: The buffer can't be placed below this address. | ||
| 161 | * @buf_max: The buffer can't be placed above this address. | ||
| 162 | * @top_down: Allocate from top of memory. | ||
| 163 | */ | ||
| 164 | struct kexec_buf { | ||
| 165 | struct kimage *image; | ||
| 166 | void *buffer; | ||
| 167 | unsigned long bufsz; | ||
| 168 | unsigned long mem; | ||
| 169 | unsigned long memsz; | ||
| 170 | unsigned long buf_align; | ||
| 171 | unsigned long buf_min; | ||
| 172 | unsigned long buf_max; | ||
| 173 | bool top_down; | ||
| 174 | }; | ||
| 175 | |||
| 176 | int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, | ||
| 177 | int (*func)(u64, u64, void *)); | ||
| 178 | extern int kexec_add_buffer(struct kexec_buf *kbuf); | ||
| 179 | int kexec_locate_mem_hole(struct kexec_buf *kbuf); | ||
| 180 | #endif /* CONFIG_KEXEC_FILE */ | ||
| 152 | 181 | ||
| 153 | struct kimage { | 182 | struct kimage { |
| 154 | kimage_entry_t head; | 183 | kimage_entry_t head; |
| @@ -212,11 +241,6 @@ extern asmlinkage long sys_kexec_load(unsigned long entry, | |||
| 212 | struct kexec_segment __user *segments, | 241 | struct kexec_segment __user *segments, |
| 213 | unsigned long flags); | 242 | unsigned long flags); |
| 214 | extern int kernel_kexec(void); | 243 | extern int kernel_kexec(void); |
| 215 | extern int kexec_add_buffer(struct kimage *image, char *buffer, | ||
| 216 | unsigned long bufsz, unsigned long memsz, | ||
| 217 | unsigned long buf_align, unsigned long buf_min, | ||
| 218 | unsigned long buf_max, bool top_down, | ||
| 219 | unsigned long *load_addr); | ||
| 220 | extern struct page *kimage_alloc_control_pages(struct kimage *image, | 244 | extern struct page *kimage_alloc_control_pages(struct kimage *image, |
| 221 | unsigned int order); | 245 | unsigned int order); |
| 222 | extern int kexec_load_purgatory(struct kimage *image, unsigned long min, | 246 | extern int kexec_load_purgatory(struct kimage *image, unsigned long min, |
| @@ -259,12 +283,6 @@ phys_addr_t paddr_vmcoreinfo_note(void); | |||
| 259 | vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name) | 283 | vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name) |
| 260 | #define VMCOREINFO_CONFIG(name) \ | 284 | #define VMCOREINFO_CONFIG(name) \ |
| 261 | vmcoreinfo_append_str("CONFIG_%s=y\n", #name) | 285 | vmcoreinfo_append_str("CONFIG_%s=y\n", #name) |
| 262 | #define VMCOREINFO_PAGE_OFFSET(value) \ | ||
| 263 | vmcoreinfo_append_str("PAGE_OFFSET=%lx\n", (unsigned long)value) | ||
| 264 | #define VMCOREINFO_VMALLOC_START(value) \ | ||
| 265 | vmcoreinfo_append_str("VMALLOC_START=%lx\n", (unsigned long)value) | ||
| 266 | #define VMCOREINFO_VMEMMAP_START(value) \ | ||
| 267 | vmcoreinfo_append_str("VMEMMAP_START=%lx\n", (unsigned long)value) | ||
| 268 | 286 | ||
| 269 | extern struct kimage *kexec_image; | 287 | extern struct kimage *kexec_image; |
| 270 | extern struct kimage *kexec_crash_image; | 288 | extern struct kimage *kexec_crash_image; |
diff --git a/include/linux/kthread.h b/include/linux/kthread.h index a6e82a69c363..4fec8b775895 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h | |||
| @@ -48,6 +48,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), | |||
| 48 | __k; \ | 48 | __k; \ |
| 49 | }) | 49 | }) |
| 50 | 50 | ||
| 51 | void free_kthread_struct(struct task_struct *k); | ||
| 51 | void kthread_bind(struct task_struct *k, unsigned int cpu); | 52 | void kthread_bind(struct task_struct *k, unsigned int cpu); |
| 52 | void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); | 53 | void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); |
| 53 | int kthread_stop(struct task_struct *k); | 54 | int kthread_stop(struct task_struct *k); |
| @@ -174,7 +175,7 @@ __printf(2, 3) | |||
| 174 | struct kthread_worker * | 175 | struct kthread_worker * |
| 175 | kthread_create_worker(unsigned int flags, const char namefmt[], ...); | 176 | kthread_create_worker(unsigned int flags, const char namefmt[], ...); |
| 176 | 177 | ||
| 177 | struct kthread_worker * | 178 | __printf(3, 4) struct kthread_worker * |
| 178 | kthread_create_worker_on_cpu(int cpu, unsigned int flags, | 179 | kthread_create_worker_on_cpu(int cpu, unsigned int flags, |
| 179 | const char namefmt[], ...); | 180 | const char namefmt[], ...); |
| 180 | 181 | ||
diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 0fb7ffb1775f..0c8bd45c8206 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h | |||
| @@ -24,21 +24,8 @@ | |||
| 24 | #include <linux/time.h> | 24 | #include <linux/time.h> |
| 25 | #include <linux/jiffies.h> | 25 | #include <linux/jiffies.h> |
| 26 | 26 | ||
| 27 | /* | 27 | /* Nanosecond scalar representation for kernel time values */ |
| 28 | * ktime_t: | 28 | typedef s64 ktime_t; |
| 29 | * | ||
| 30 | * A single 64-bit variable is used to store the hrtimers | ||
| 31 | * internal representation of time values in scalar nanoseconds. The | ||
| 32 | * design plays out best on 64-bit CPUs, where most conversions are | ||
| 33 | * NOPs and most arithmetic ktime_t operations are plain arithmetic | ||
| 34 | * operations. | ||
| 35 | * | ||
| 36 | */ | ||
| 37 | union ktime { | ||
| 38 | s64 tv64; | ||
| 39 | }; | ||
| 40 | |||
| 41 | typedef union ktime ktime_t; /* Kill this */ | ||
| 42 | 29 | ||
| 43 | /** | 30 | /** |
| 44 | * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value | 31 | * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value |
| @@ -50,39 +37,34 @@ typedef union ktime ktime_t; /* Kill this */ | |||
| 50 | static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) | 37 | static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) |
| 51 | { | 38 | { |
| 52 | if (unlikely(secs >= KTIME_SEC_MAX)) | 39 | if (unlikely(secs >= KTIME_SEC_MAX)) |
| 53 | return (ktime_t){ .tv64 = KTIME_MAX }; | 40 | return KTIME_MAX; |
| 54 | 41 | ||
| 55 | return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs }; | 42 | return secs * NSEC_PER_SEC + (s64)nsecs; |
| 56 | } | 43 | } |
| 57 | 44 | ||
| 58 | /* Subtract two ktime_t variables. rem = lhs -rhs: */ | 45 | /* Subtract two ktime_t variables. rem = lhs -rhs: */ |
| 59 | #define ktime_sub(lhs, rhs) \ | 46 | #define ktime_sub(lhs, rhs) ((lhs) - (rhs)) |
| 60 | ({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; }) | ||
| 61 | 47 | ||
| 62 | /* Add two ktime_t variables. res = lhs + rhs: */ | 48 | /* Add two ktime_t variables. res = lhs + rhs: */ |
| 63 | #define ktime_add(lhs, rhs) \ | 49 | #define ktime_add(lhs, rhs) ((lhs) + (rhs)) |
| 64 | ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; }) | ||
| 65 | 50 | ||
| 66 | /* | 51 | /* |
| 67 | * Same as ktime_add(), but avoids undefined behaviour on overflow; however, | 52 | * Same as ktime_add(), but avoids undefined behaviour on overflow; however, |
| 68 | * this means that you must check the result for overflow yourself. | 53 | * this means that you must check the result for overflow yourself. |
| 69 | */ | 54 | */ |
| 70 | #define ktime_add_unsafe(lhs, rhs) \ | 55 | #define ktime_add_unsafe(lhs, rhs) ((u64) (lhs) + (rhs)) |
| 71 | ({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; }) | ||
| 72 | 56 | ||
| 73 | /* | 57 | /* |
| 74 | * Add a ktime_t variable and a scalar nanosecond value. | 58 | * Add a ktime_t variable and a scalar nanosecond value. |
| 75 | * res = kt + nsval: | 59 | * res = kt + nsval: |
| 76 | */ | 60 | */ |
| 77 | #define ktime_add_ns(kt, nsval) \ | 61 | #define ktime_add_ns(kt, nsval) ((kt) + (nsval)) |
| 78 | ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; }) | ||
| 79 | 62 | ||
| 80 | /* | 63 | /* |
| 81 | * Subtract a scalar nanosecod from a ktime_t variable | 64 | * Subtract a scalar nanosecod from a ktime_t variable |
| 82 | * res = kt - nsval: | 65 | * res = kt - nsval: |
| 83 | */ | 66 | */ |
| 84 | #define ktime_sub_ns(kt, nsval) \ | 67 | #define ktime_sub_ns(kt, nsval) ((kt) - (nsval)) |
| 85 | ({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; }) | ||
| 86 | 68 | ||
| 87 | /* convert a timespec to ktime_t format: */ | 69 | /* convert a timespec to ktime_t format: */ |
| 88 | static inline ktime_t timespec_to_ktime(struct timespec ts) | 70 | static inline ktime_t timespec_to_ktime(struct timespec ts) |
| @@ -103,31 +85,16 @@ static inline ktime_t timeval_to_ktime(struct timeval tv) | |||
| 103 | } | 85 | } |
| 104 | 86 | ||
| 105 | /* Map the ktime_t to timespec conversion to ns_to_timespec function */ | 87 | /* Map the ktime_t to timespec conversion to ns_to_timespec function */ |
| 106 | #define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) | 88 | #define ktime_to_timespec(kt) ns_to_timespec((kt)) |
| 107 | 89 | ||
| 108 | /* Map the ktime_t to timespec conversion to ns_to_timespec function */ | 90 | /* Map the ktime_t to timespec conversion to ns_to_timespec function */ |
| 109 | #define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64) | 91 | #define ktime_to_timespec64(kt) ns_to_timespec64((kt)) |
| 110 | 92 | ||
| 111 | /* Map the ktime_t to timeval conversion to ns_to_timeval function */ | 93 | /* Map the ktime_t to timeval conversion to ns_to_timeval function */ |
| 112 | #define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) | 94 | #define ktime_to_timeval(kt) ns_to_timeval((kt)) |
| 113 | 95 | ||
| 114 | /* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ | 96 | /* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ |
| 115 | #define ktime_to_ns(kt) ((kt).tv64) | 97 | #define ktime_to_ns(kt) (kt) |
| 116 | |||
| 117 | |||
| 118 | /** | ||
| 119 | * ktime_equal - Compares two ktime_t variables to see if they are equal | ||
| 120 | * @cmp1: comparable1 | ||
| 121 | * @cmp2: comparable2 | ||
| 122 | * | ||
| 123 | * Compare two ktime_t variables. | ||
| 124 | * | ||
| 125 | * Return: 1 if equal. | ||
| 126 | */ | ||
| 127 | static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2) | ||
| 128 | { | ||
| 129 | return cmp1.tv64 == cmp2.tv64; | ||
| 130 | } | ||
| 131 | 98 | ||
| 132 | /** | 99 | /** |
| 133 | * ktime_compare - Compares two ktime_t variables for less, greater or equal | 100 | * ktime_compare - Compares two ktime_t variables for less, greater or equal |
| @@ -141,9 +108,9 @@ static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2) | |||
| 141 | */ | 108 | */ |
| 142 | static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) | 109 | static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) |
| 143 | { | 110 | { |
| 144 | if (cmp1.tv64 < cmp2.tv64) | 111 | if (cmp1 < cmp2) |
| 145 | return -1; | 112 | return -1; |
| 146 | if (cmp1.tv64 > cmp2.tv64) | 113 | if (cmp1 > cmp2) |
| 147 | return 1; | 114 | return 1; |
| 148 | return 0; | 115 | return 0; |
| 149 | } | 116 | } |
| @@ -182,7 +149,7 @@ static inline s64 ktime_divns(const ktime_t kt, s64 div) | |||
| 182 | */ | 149 | */ |
| 183 | BUG_ON(div < 0); | 150 | BUG_ON(div < 0); |
| 184 | if (__builtin_constant_p(div) && !(div >> 32)) { | 151 | if (__builtin_constant_p(div) && !(div >> 32)) { |
| 185 | s64 ns = kt.tv64; | 152 | s64 ns = kt; |
| 186 | u64 tmp = ns < 0 ? -ns : ns; | 153 | u64 tmp = ns < 0 ? -ns : ns; |
| 187 | 154 | ||
| 188 | do_div(tmp, div); | 155 | do_div(tmp, div); |
| @@ -199,7 +166,7 @@ static inline s64 ktime_divns(const ktime_t kt, s64 div) | |||
| 199 | * so catch them on 64bit as well. | 166 | * so catch them on 64bit as well. |
| 200 | */ | 167 | */ |
| 201 | WARN_ON(div < 0); | 168 | WARN_ON(div < 0); |
| 202 | return kt.tv64 / div; | 169 | return kt / div; |
| 203 | } | 170 | } |
| 204 | #endif | 171 | #endif |
| 205 | 172 | ||
| @@ -256,7 +223,7 @@ extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); | |||
| 256 | static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, | 223 | static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, |
| 257 | struct timespec *ts) | 224 | struct timespec *ts) |
| 258 | { | 225 | { |
| 259 | if (kt.tv64) { | 226 | if (kt) { |
| 260 | *ts = ktime_to_timespec(kt); | 227 | *ts = ktime_to_timespec(kt); |
| 261 | return true; | 228 | return true; |
| 262 | } else { | 229 | } else { |
| @@ -275,7 +242,7 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, | |||
| 275 | static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, | 242 | static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, |
| 276 | struct timespec64 *ts) | 243 | struct timespec64 *ts) |
| 277 | { | 244 | { |
| 278 | if (kt.tv64) { | 245 | if (kt) { |
| 279 | *ts = ktime_to_timespec64(kt); | 246 | *ts = ktime_to_timespec64(kt); |
| 280 | return true; | 247 | return true; |
| 281 | } else { | 248 | } else { |
| @@ -290,20 +257,16 @@ static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, | |||
| 290 | * this resolution values. | 257 | * this resolution values. |
| 291 | */ | 258 | */ |
| 292 | #define LOW_RES_NSEC TICK_NSEC | 259 | #define LOW_RES_NSEC TICK_NSEC |
| 293 | #define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC } | 260 | #define KTIME_LOW_RES (LOW_RES_NSEC) |
| 294 | 261 | ||
| 295 | static inline ktime_t ns_to_ktime(u64 ns) | 262 | static inline ktime_t ns_to_ktime(u64 ns) |
| 296 | { | 263 | { |
| 297 | static const ktime_t ktime_zero = { .tv64 = 0 }; | 264 | return ns; |
| 298 | |||
| 299 | return ktime_add_ns(ktime_zero, ns); | ||
| 300 | } | 265 | } |
| 301 | 266 | ||
| 302 | static inline ktime_t ms_to_ktime(u64 ms) | 267 | static inline ktime_t ms_to_ktime(u64 ms) |
| 303 | { | 268 | { |
| 304 | static const ktime_t ktime_zero = { .tv64 = 0 }; | 269 | return ms * NSEC_PER_MSEC; |
| 305 | |||
| 306 | return ktime_add_ms(ktime_zero, ms); | ||
| 307 | } | 270 | } |
| 308 | 271 | ||
| 309 | # include <linux/timekeeping.h> | 272 | # include <linux/timekeeping.h> |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 01c0b9cc3915..1c5190dab2c1 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -224,7 +224,6 @@ struct kvm_vcpu { | |||
| 224 | 224 | ||
| 225 | int fpu_active; | 225 | int fpu_active; |
| 226 | int guest_fpu_loaded, guest_xcr0_loaded; | 226 | int guest_fpu_loaded, guest_xcr0_loaded; |
| 227 | unsigned char fpu_counter; | ||
| 228 | struct swait_queue_head wq; | 227 | struct swait_queue_head wq; |
| 229 | struct pid *pid; | 228 | struct pid *pid; |
| 230 | int sigset_active; | 229 | int sigset_active; |
| @@ -439,6 +438,9 @@ struct kvm { | |||
| 439 | pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) | 438 | pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
| 440 | #define kvm_debug(fmt, ...) \ | 439 | #define kvm_debug(fmt, ...) \ |
| 441 | pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) | 440 | pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
| 441 | #define kvm_debug_ratelimited(fmt, ...) \ | ||
| 442 | pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ | ||
| 443 | ## __VA_ARGS__) | ||
| 442 | #define kvm_pr_unimpl(fmt, ...) \ | 444 | #define kvm_pr_unimpl(fmt, ...) \ |
| 443 | pr_err_ratelimited("kvm [%i]: " fmt, \ | 445 | pr_err_ratelimited("kvm [%i]: " fmt, \ |
| 444 | task_tgid_nr(current), ## __VA_ARGS__) | 446 | task_tgid_nr(current), ## __VA_ARGS__) |
| @@ -450,6 +452,9 @@ struct kvm { | |||
| 450 | 452 | ||
| 451 | #define vcpu_debug(vcpu, fmt, ...) \ | 453 | #define vcpu_debug(vcpu, fmt, ...) \ |
| 452 | kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) | 454 | kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) |
| 455 | #define vcpu_debug_ratelimited(vcpu, fmt, ...) \ | ||
| 456 | kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ | ||
| 457 | ## __VA_ARGS__) | ||
| 453 | #define vcpu_err(vcpu, fmt, ...) \ | 458 | #define vcpu_err(vcpu, fmt, ...) \ |
| 454 | kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) | 459 | kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) |
| 455 | 460 | ||
| @@ -645,6 +650,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | |||
| 645 | unsigned long len); | 650 | unsigned long len); |
| 646 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | 651 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 647 | void *data, unsigned long len); | 652 | void *data, unsigned long len); |
| 653 | int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | ||
| 654 | void *data, int offset, unsigned long len); | ||
| 648 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | 655 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 649 | gpa_t gpa, unsigned long len); | 656 | gpa_t gpa, unsigned long len); |
| 650 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); | 657 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); |
| @@ -1107,6 +1114,10 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) | |||
| 1107 | 1114 | ||
| 1108 | extern bool kvm_rebooting; | 1115 | extern bool kvm_rebooting; |
| 1109 | 1116 | ||
| 1117 | extern unsigned int halt_poll_ns; | ||
| 1118 | extern unsigned int halt_poll_ns_grow; | ||
| 1119 | extern unsigned int halt_poll_ns_shrink; | ||
| 1120 | |||
| 1110 | struct kvm_device { | 1121 | struct kvm_device { |
| 1111 | struct kvm_device_ops *ops; | 1122 | struct kvm_device_ops *ops; |
| 1112 | struct kvm *kvm; | 1123 | struct kvm *kvm; |
diff --git a/include/linux/leds.h b/include/linux/leds.h index ddfcb2df3656..569cb531094c 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h | |||
| @@ -42,16 +42,20 @@ struct led_classdev { | |||
| 42 | #define LED_UNREGISTERING (1 << 1) | 42 | #define LED_UNREGISTERING (1 << 1) |
| 43 | /* Upper 16 bits reflect control information */ | 43 | /* Upper 16 bits reflect control information */ |
| 44 | #define LED_CORE_SUSPENDRESUME (1 << 16) | 44 | #define LED_CORE_SUSPENDRESUME (1 << 16) |
| 45 | #define LED_BLINK_SW (1 << 17) | 45 | #define LED_SYSFS_DISABLE (1 << 17) |
| 46 | #define LED_BLINK_ONESHOT (1 << 18) | 46 | #define LED_DEV_CAP_FLASH (1 << 18) |
| 47 | #define LED_BLINK_ONESHOT_STOP (1 << 19) | 47 | #define LED_HW_PLUGGABLE (1 << 19) |
| 48 | #define LED_BLINK_INVERT (1 << 20) | 48 | #define LED_PANIC_INDICATOR (1 << 20) |
| 49 | #define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21) | 49 | |
| 50 | #define LED_BLINK_DISABLE (1 << 22) | 50 | /* set_brightness_work / blink_timer flags, atomic, private. */ |
| 51 | #define LED_SYSFS_DISABLE (1 << 23) | 51 | unsigned long work_flags; |
| 52 | #define LED_DEV_CAP_FLASH (1 << 24) | 52 | |
| 53 | #define LED_HW_PLUGGABLE (1 << 25) | 53 | #define LED_BLINK_SW 0 |
| 54 | #define LED_PANIC_INDICATOR (1 << 26) | 54 | #define LED_BLINK_ONESHOT 1 |
| 55 | #define LED_BLINK_ONESHOT_STOP 2 | ||
| 56 | #define LED_BLINK_INVERT 3 | ||
| 57 | #define LED_BLINK_BRIGHTNESS_CHANGE 4 | ||
| 58 | #define LED_BLINK_DISABLE 5 | ||
| 55 | 59 | ||
| 56 | /* Set LED brightness level | 60 | /* Set LED brightness level |
| 57 | * Must not sleep. Use brightness_set_blocking for drivers | 61 | * Must not sleep. Use brightness_set_blocking for drivers |
| @@ -89,6 +93,7 @@ struct led_classdev { | |||
| 89 | unsigned long blink_delay_on, blink_delay_off; | 93 | unsigned long blink_delay_on, blink_delay_off; |
| 90 | struct timer_list blink_timer; | 94 | struct timer_list blink_timer; |
| 91 | int blink_brightness; | 95 | int blink_brightness; |
| 96 | int new_blink_brightness; | ||
| 92 | void (*flash_resume)(struct led_classdev *led_cdev); | 97 | void (*flash_resume)(struct led_classdev *led_cdev); |
| 93 | 98 | ||
| 94 | struct work_struct set_brightness_work; | 99 | struct work_struct set_brightness_work; |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 616eef4d81ea..c170be548b7f 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -166,6 +166,8 @@ enum { | |||
| 166 | ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ | 166 | ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ |
| 167 | ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */ | 167 | ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */ |
| 168 | ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */ | 168 | ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */ |
| 169 | ATA_DFLAG_NCQ_PRIO = (1 << 20), /* device supports NCQ priority */ | ||
| 170 | ATA_DFLAG_NCQ_PRIO_ENABLE = (1 << 21), /* Priority cmds sent to dev */ | ||
| 169 | ATA_DFLAG_INIT_MASK = (1 << 24) - 1, | 171 | ATA_DFLAG_INIT_MASK = (1 << 24) - 1, |
| 170 | 172 | ||
| 171 | ATA_DFLAG_DETACH = (1 << 24), | 173 | ATA_DFLAG_DETACH = (1 << 24), |
| @@ -342,7 +344,9 @@ enum { | |||
| 342 | ATA_SHIFT_PIO = 0, | 344 | ATA_SHIFT_PIO = 0, |
| 343 | ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES, | 345 | ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES, |
| 344 | ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES, | 346 | ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES, |
| 347 | ATA_SHIFT_PRIO = 6, | ||
| 345 | 348 | ||
| 349 | ATA_PRIO_HIGH = 2, | ||
| 346 | /* size of buffer to pad xfers ending on unaligned boundaries */ | 350 | /* size of buffer to pad xfers ending on unaligned boundaries */ |
| 347 | ATA_DMA_PAD_SZ = 4, | 351 | ATA_DMA_PAD_SZ = 4, |
| 348 | 352 | ||
| @@ -542,6 +546,7 @@ typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes) | |||
| 542 | 546 | ||
| 543 | extern struct device_attribute dev_attr_link_power_management_policy; | 547 | extern struct device_attribute dev_attr_link_power_management_policy; |
| 544 | extern struct device_attribute dev_attr_unload_heads; | 548 | extern struct device_attribute dev_attr_unload_heads; |
| 549 | extern struct device_attribute dev_attr_ncq_prio_enable; | ||
| 545 | extern struct device_attribute dev_attr_em_message_type; | 550 | extern struct device_attribute dev_attr_em_message_type; |
| 546 | extern struct device_attribute dev_attr_em_message; | 551 | extern struct device_attribute dev_attr_em_message; |
| 547 | extern struct device_attribute dev_attr_sw_activity; | 552 | extern struct device_attribute dev_attr_sw_activity; |
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index d190786e4ad8..7c273bbc5351 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h | |||
| @@ -47,6 +47,7 @@ struct ppa_addr { | |||
| 47 | struct nvm_rq; | 47 | struct nvm_rq; |
| 48 | struct nvm_id; | 48 | struct nvm_id; |
| 49 | struct nvm_dev; | 49 | struct nvm_dev; |
| 50 | struct nvm_tgt_dev; | ||
| 50 | 51 | ||
| 51 | typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); | 52 | typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); |
| 52 | typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); | 53 | typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); |
| @@ -107,6 +108,8 @@ enum { | |||
| 107 | NVM_RSP_NOT_CHANGEABLE = 0x1, | 108 | NVM_RSP_NOT_CHANGEABLE = 0x1, |
| 108 | NVM_RSP_ERR_FAILWRITE = 0x40ff, | 109 | NVM_RSP_ERR_FAILWRITE = 0x40ff, |
| 109 | NVM_RSP_ERR_EMPTYPAGE = 0x42ff, | 110 | NVM_RSP_ERR_EMPTYPAGE = 0x42ff, |
| 111 | NVM_RSP_ERR_FAILECC = 0x4281, | ||
| 112 | NVM_RSP_WARN_HIGHECC = 0x4700, | ||
| 110 | 113 | ||
| 111 | /* Device opcodes */ | 114 | /* Device opcodes */ |
| 112 | NVM_OP_HBREAD = 0x02, | 115 | NVM_OP_HBREAD = 0x02, |
| @@ -208,7 +211,7 @@ struct nvm_id { | |||
| 208 | 211 | ||
| 209 | struct nvm_target { | 212 | struct nvm_target { |
| 210 | struct list_head list; | 213 | struct list_head list; |
| 211 | struct nvm_dev *dev; | 214 | struct nvm_tgt_dev *dev; |
| 212 | struct nvm_tgt_type *type; | 215 | struct nvm_tgt_type *type; |
| 213 | struct gendisk *disk; | 216 | struct gendisk *disk; |
| 214 | }; | 217 | }; |
| @@ -228,7 +231,7 @@ typedef void (nvm_end_io_fn)(struct nvm_rq *); | |||
| 228 | 231 | ||
| 229 | struct nvm_rq { | 232 | struct nvm_rq { |
| 230 | struct nvm_tgt_instance *ins; | 233 | struct nvm_tgt_instance *ins; |
| 231 | struct nvm_dev *dev; | 234 | struct nvm_tgt_dev *dev; |
| 232 | 235 | ||
| 233 | struct bio *bio; | 236 | struct bio *bio; |
| 234 | 237 | ||
| @@ -263,35 +266,12 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) | |||
| 263 | return rqdata + 1; | 266 | return rqdata + 1; |
| 264 | } | 267 | } |
| 265 | 268 | ||
| 266 | struct nvm_block; | ||
| 267 | |||
| 268 | struct nvm_lun { | ||
| 269 | int id; | ||
| 270 | |||
| 271 | int lun_id; | ||
| 272 | int chnl_id; | ||
| 273 | |||
| 274 | spinlock_t lock; | ||
| 275 | |||
| 276 | unsigned int nr_free_blocks; /* Number of unused blocks */ | ||
| 277 | struct nvm_block *blocks; | ||
| 278 | }; | ||
| 279 | |||
| 280 | enum { | 269 | enum { |
| 281 | NVM_BLK_ST_FREE = 0x1, /* Free block */ | 270 | NVM_BLK_ST_FREE = 0x1, /* Free block */ |
| 282 | NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ | 271 | NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ |
| 283 | NVM_BLK_ST_BAD = 0x8, /* Bad block */ | 272 | NVM_BLK_ST_BAD = 0x8, /* Bad block */ |
| 284 | }; | 273 | }; |
| 285 | 274 | ||
| 286 | struct nvm_block { | ||
| 287 | struct list_head list; | ||
| 288 | struct nvm_lun *lun; | ||
| 289 | unsigned long id; | ||
| 290 | |||
| 291 | void *priv; | ||
| 292 | int state; | ||
| 293 | }; | ||
| 294 | |||
| 295 | /* system block cpu representation */ | 275 | /* system block cpu representation */ |
| 296 | struct nvm_sb_info { | 276 | struct nvm_sb_info { |
| 297 | unsigned long seqnr; | 277 | unsigned long seqnr; |
| @@ -301,22 +281,12 @@ struct nvm_sb_info { | |||
| 301 | struct ppa_addr fs_ppa; | 281 | struct ppa_addr fs_ppa; |
| 302 | }; | 282 | }; |
| 303 | 283 | ||
| 304 | struct nvm_dev { | 284 | /* Device generic information */ |
| 305 | struct nvm_dev_ops *ops; | 285 | struct nvm_geo { |
| 306 | |||
| 307 | struct list_head devices; | ||
| 308 | |||
| 309 | /* Media manager */ | ||
| 310 | struct nvmm_type *mt; | ||
| 311 | void *mp; | ||
| 312 | |||
| 313 | /* System blocks */ | ||
| 314 | struct nvm_sb_info sb; | ||
| 315 | |||
| 316 | /* Device information */ | ||
| 317 | int nr_chnls; | 286 | int nr_chnls; |
| 287 | int nr_luns; | ||
| 288 | int luns_per_chnl; /* -1 if channels are not symmetric */ | ||
| 318 | int nr_planes; | 289 | int nr_planes; |
| 319 | int luns_per_chnl; | ||
| 320 | int sec_per_pg; /* only sectors for a single page */ | 290 | int sec_per_pg; /* only sectors for a single page */ |
| 321 | int pgs_per_blk; | 291 | int pgs_per_blk; |
| 322 | int blks_per_lun; | 292 | int blks_per_lun; |
| @@ -336,14 +306,44 @@ struct nvm_dev { | |||
| 336 | int sec_per_pl; /* all sectors across planes */ | 306 | int sec_per_pl; /* all sectors across planes */ |
| 337 | int sec_per_blk; | 307 | int sec_per_blk; |
| 338 | int sec_per_lun; | 308 | int sec_per_lun; |
| 309 | }; | ||
| 310 | |||
| 311 | struct nvm_tgt_dev { | ||
| 312 | /* Device information */ | ||
| 313 | struct nvm_geo geo; | ||
| 314 | |||
| 315 | /* Base ppas for target LUNs */ | ||
| 316 | struct ppa_addr *luns; | ||
| 317 | |||
| 318 | sector_t total_secs; | ||
| 319 | |||
| 320 | struct nvm_id identity; | ||
| 321 | struct request_queue *q; | ||
| 322 | |||
| 323 | struct nvm_dev *parent; | ||
| 324 | void *map; | ||
| 325 | }; | ||
| 326 | |||
| 327 | struct nvm_dev { | ||
| 328 | struct nvm_dev_ops *ops; | ||
| 329 | |||
| 330 | struct list_head devices; | ||
| 331 | |||
| 332 | /* Media manager */ | ||
| 333 | struct nvmm_type *mt; | ||
| 334 | void *mp; | ||
| 335 | |||
| 336 | /* System blocks */ | ||
| 337 | struct nvm_sb_info sb; | ||
| 338 | |||
| 339 | /* Device information */ | ||
| 340 | struct nvm_geo geo; | ||
| 339 | 341 | ||
| 340 | /* lower page table */ | 342 | /* lower page table */ |
| 341 | int lps_per_blk; | 343 | int lps_per_blk; |
| 342 | int *lptbl; | 344 | int *lptbl; |
| 343 | 345 | ||
| 344 | unsigned long total_blocks; | ||
| 345 | unsigned long total_secs; | 346 | unsigned long total_secs; |
| 346 | int nr_luns; | ||
| 347 | 347 | ||
| 348 | unsigned long *lun_map; | 348 | unsigned long *lun_map; |
| 349 | void *dma_pool; | 349 | void *dma_pool; |
| @@ -352,26 +352,57 @@ struct nvm_dev { | |||
| 352 | 352 | ||
| 353 | /* Backend device */ | 353 | /* Backend device */ |
| 354 | struct request_queue *q; | 354 | struct request_queue *q; |
| 355 | struct device dev; | ||
| 356 | struct device *parent_dev; | ||
| 357 | char name[DISK_NAME_LEN]; | 355 | char name[DISK_NAME_LEN]; |
| 358 | void *private_data; | 356 | void *private_data; |
| 359 | 357 | ||
| 358 | void *rmap; | ||
| 359 | |||
| 360 | struct mutex mlock; | 360 | struct mutex mlock; |
| 361 | spinlock_t lock; | 361 | spinlock_t lock; |
| 362 | }; | 362 | }; |
| 363 | 363 | ||
| 364 | static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo, | ||
| 365 | u64 pba) | ||
| 366 | { | ||
| 367 | struct ppa_addr l; | ||
| 368 | int secs, pgs, blks, luns; | ||
| 369 | sector_t ppa = pba; | ||
| 370 | |||
| 371 | l.ppa = 0; | ||
| 372 | |||
| 373 | div_u64_rem(ppa, geo->sec_per_pg, &secs); | ||
| 374 | l.g.sec = secs; | ||
| 375 | |||
| 376 | sector_div(ppa, geo->sec_per_pg); | ||
| 377 | div_u64_rem(ppa, geo->pgs_per_blk, &pgs); | ||
| 378 | l.g.pg = pgs; | ||
| 379 | |||
| 380 | sector_div(ppa, geo->pgs_per_blk); | ||
| 381 | div_u64_rem(ppa, geo->blks_per_lun, &blks); | ||
| 382 | l.g.blk = blks; | ||
| 383 | |||
| 384 | sector_div(ppa, geo->blks_per_lun); | ||
| 385 | div_u64_rem(ppa, geo->luns_per_chnl, &luns); | ||
| 386 | l.g.lun = luns; | ||
| 387 | |||
| 388 | sector_div(ppa, geo->luns_per_chnl); | ||
| 389 | l.g.ch = ppa; | ||
| 390 | |||
| 391 | return l; | ||
| 392 | } | ||
| 393 | |||
| 364 | static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, | 394 | static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, |
| 365 | struct ppa_addr r) | 395 | struct ppa_addr r) |
| 366 | { | 396 | { |
| 397 | struct nvm_geo *geo = &dev->geo; | ||
| 367 | struct ppa_addr l; | 398 | struct ppa_addr l; |
| 368 | 399 | ||
| 369 | l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset; | 400 | l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset; |
| 370 | l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset; | 401 | l.ppa |= ((u64)r.g.pg) << geo->ppaf.pg_offset; |
| 371 | l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset; | 402 | l.ppa |= ((u64)r.g.sec) << geo->ppaf.sect_offset; |
| 372 | l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset; | 403 | l.ppa |= ((u64)r.g.pl) << geo->ppaf.pln_offset; |
| 373 | l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset; | 404 | l.ppa |= ((u64)r.g.lun) << geo->ppaf.lun_offset; |
| 374 | l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset; | 405 | l.ppa |= ((u64)r.g.ch) << geo->ppaf.ch_offset; |
| 375 | 406 | ||
| 376 | return l; | 407 | return l; |
| 377 | } | 408 | } |
| @@ -379,24 +410,25 @@ static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, | |||
| 379 | static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, | 410 | static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, |
| 380 | struct ppa_addr r) | 411 | struct ppa_addr r) |
| 381 | { | 412 | { |
| 413 | struct nvm_geo *geo = &dev->geo; | ||
| 382 | struct ppa_addr l; | 414 | struct ppa_addr l; |
| 383 | 415 | ||
| 384 | l.ppa = 0; | 416 | l.ppa = 0; |
| 385 | /* | 417 | /* |
| 386 | * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. | 418 | * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. |
| 387 | */ | 419 | */ |
| 388 | l.g.blk = (r.ppa >> dev->ppaf.blk_offset) & | 420 | l.g.blk = (r.ppa >> geo->ppaf.blk_offset) & |
| 389 | (((1 << dev->ppaf.blk_len) - 1)); | 421 | (((1 << geo->ppaf.blk_len) - 1)); |
| 390 | l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) & | 422 | l.g.pg |= (r.ppa >> geo->ppaf.pg_offset) & |
| 391 | (((1 << dev->ppaf.pg_len) - 1)); | 423 | (((1 << geo->ppaf.pg_len) - 1)); |
| 392 | l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) & | 424 | l.g.sec |= (r.ppa >> geo->ppaf.sect_offset) & |
| 393 | (((1 << dev->ppaf.sect_len) - 1)); | 425 | (((1 << geo->ppaf.sect_len) - 1)); |
| 394 | l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) & | 426 | l.g.pl |= (r.ppa >> geo->ppaf.pln_offset) & |
| 395 | (((1 << dev->ppaf.pln_len) - 1)); | 427 | (((1 << geo->ppaf.pln_len) - 1)); |
| 396 | l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) & | 428 | l.g.lun |= (r.ppa >> geo->ppaf.lun_offset) & |
| 397 | (((1 << dev->ppaf.lun_len) - 1)); | 429 | (((1 << geo->ppaf.lun_len) - 1)); |
| 398 | l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) & | 430 | l.g.ch |= (r.ppa >> geo->ppaf.ch_offset) & |
| 399 | (((1 << dev->ppaf.ch_len) - 1)); | 431 | (((1 << geo->ppaf.ch_len) - 1)); |
| 400 | 432 | ||
| 401 | return l; | 433 | return l; |
| 402 | } | 434 | } |
| @@ -411,18 +443,13 @@ static inline void ppa_set_empty(struct ppa_addr *ppa_addr) | |||
| 411 | ppa_addr->ppa = ADDR_EMPTY; | 443 | ppa_addr->ppa = ADDR_EMPTY; |
| 412 | } | 444 | } |
| 413 | 445 | ||
| 414 | static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev, | 446 | static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2) |
| 415 | struct nvm_block *blk) | ||
| 416 | { | 447 | { |
| 417 | struct ppa_addr ppa; | 448 | if (ppa_empty(ppa1) || ppa_empty(ppa2)) |
| 418 | struct nvm_lun *lun = blk->lun; | 449 | return 0; |
| 419 | |||
| 420 | ppa.ppa = 0; | ||
| 421 | ppa.g.blk = blk->id % dev->blks_per_lun; | ||
| 422 | ppa.g.lun = lun->lun_id; | ||
| 423 | ppa.g.ch = lun->chnl_id; | ||
| 424 | 450 | ||
| 425 | return ppa; | 451 | return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) && |
| 452 | (ppa1.g.blk == ppa2.g.blk)); | ||
| 426 | } | 453 | } |
| 427 | 454 | ||
| 428 | static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg) | 455 | static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg) |
| @@ -432,7 +459,7 @@ static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg) | |||
| 432 | 459 | ||
| 433 | typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); | 460 | typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); |
| 434 | typedef sector_t (nvm_tgt_capacity_fn)(void *); | 461 | typedef sector_t (nvm_tgt_capacity_fn)(void *); |
| 435 | typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int); | 462 | typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *); |
| 436 | typedef void (nvm_tgt_exit_fn)(void *); | 463 | typedef void (nvm_tgt_exit_fn)(void *); |
| 437 | 464 | ||
| 438 | struct nvm_tgt_type { | 465 | struct nvm_tgt_type { |
| @@ -465,23 +492,18 @@ typedef void (nvmm_unregister_fn)(struct nvm_dev *); | |||
| 465 | 492 | ||
| 466 | typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *); | 493 | typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *); |
| 467 | typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *); | 494 | typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *); |
| 468 | typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *, | 495 | typedef int (nvmm_submit_io_fn)(struct nvm_tgt_dev *, struct nvm_rq *); |
| 469 | struct nvm_lun *, unsigned long); | 496 | typedef int (nvmm_erase_blk_fn)(struct nvm_tgt_dev *, struct ppa_addr *, int); |
| 470 | typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *); | ||
| 471 | typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *); | ||
| 472 | typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *); | ||
| 473 | typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *); | ||
| 474 | typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); | ||
| 475 | typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, | ||
| 476 | unsigned long); | ||
| 477 | typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int); | ||
| 478 | typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); | ||
| 479 | typedef int (nvmm_reserve_lun)(struct nvm_dev *, int); | ||
| 480 | typedef void (nvmm_release_lun)(struct nvm_dev *, int); | ||
| 481 | typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *); | ||
| 482 | |||
| 483 | typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t); | 497 | typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t); |
| 484 | typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t); | 498 | typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t); |
| 499 | typedef struct ppa_addr (nvmm_trans_ppa_fn)(struct nvm_tgt_dev *, | ||
| 500 | struct ppa_addr, int); | ||
| 501 | typedef void (nvmm_part_to_tgt_fn)(struct nvm_dev *, sector_t*, int); | ||
| 502 | |||
| 503 | enum { | ||
| 504 | TRANS_TGT_TO_DEV = 0x0, | ||
| 505 | TRANS_DEV_TO_TGT = 0x1, | ||
| 506 | }; | ||
| 485 | 507 | ||
| 486 | struct nvmm_type { | 508 | struct nvmm_type { |
| 487 | const char *name; | 509 | const char *name; |
| @@ -493,54 +515,41 @@ struct nvmm_type { | |||
| 493 | nvmm_create_tgt_fn *create_tgt; | 515 | nvmm_create_tgt_fn *create_tgt; |
| 494 | nvmm_remove_tgt_fn *remove_tgt; | 516 | nvmm_remove_tgt_fn *remove_tgt; |
| 495 | 517 | ||
| 496 | /* Block administration callbacks */ | ||
| 497 | nvmm_get_blk_fn *get_blk; | ||
| 498 | nvmm_put_blk_fn *put_blk; | ||
| 499 | nvmm_open_blk_fn *open_blk; | ||
| 500 | nvmm_close_blk_fn *close_blk; | ||
| 501 | nvmm_flush_blk_fn *flush_blk; | ||
| 502 | |||
| 503 | nvmm_submit_io_fn *submit_io; | 518 | nvmm_submit_io_fn *submit_io; |
| 504 | nvmm_erase_blk_fn *erase_blk; | 519 | nvmm_erase_blk_fn *erase_blk; |
| 505 | 520 | ||
| 506 | /* Bad block mgmt */ | ||
| 507 | nvmm_mark_blk_fn *mark_blk; | ||
| 508 | |||
| 509 | /* Configuration management */ | ||
| 510 | nvmm_get_lun_fn *get_lun; | ||
| 511 | nvmm_reserve_lun *reserve_lun; | ||
| 512 | nvmm_release_lun *release_lun; | ||
| 513 | |||
| 514 | /* Statistics */ | ||
| 515 | nvmm_lun_info_print_fn *lun_info_print; | ||
| 516 | |||
| 517 | nvmm_get_area_fn *get_area; | 521 | nvmm_get_area_fn *get_area; |
| 518 | nvmm_put_area_fn *put_area; | 522 | nvmm_put_area_fn *put_area; |
| 519 | 523 | ||
| 524 | nvmm_trans_ppa_fn *trans_ppa; | ||
| 525 | nvmm_part_to_tgt_fn *part_to_tgt; | ||
| 526 | |||
| 520 | struct list_head list; | 527 | struct list_head list; |
| 521 | }; | 528 | }; |
| 522 | 529 | ||
| 523 | extern int nvm_register_mgr(struct nvmm_type *); | 530 | extern int nvm_register_mgr(struct nvmm_type *); |
| 524 | extern void nvm_unregister_mgr(struct nvmm_type *); | 531 | extern void nvm_unregister_mgr(struct nvmm_type *); |
| 525 | 532 | ||
| 526 | extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *, | ||
| 527 | unsigned long); | ||
| 528 | extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *); | ||
| 529 | |||
| 530 | extern struct nvm_dev *nvm_alloc_dev(int); | 533 | extern struct nvm_dev *nvm_alloc_dev(int); |
| 531 | extern int nvm_register(struct nvm_dev *); | 534 | extern int nvm_register(struct nvm_dev *); |
| 532 | extern void nvm_unregister(struct nvm_dev *); | 535 | extern void nvm_unregister(struct nvm_dev *); |
| 533 | 536 | ||
| 534 | void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type); | 537 | extern int nvm_set_bb_tbl(struct nvm_dev *, struct ppa_addr *, int, int); |
| 535 | 538 | extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *, | |
| 536 | extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); | 539 | int, int); |
| 540 | extern int nvm_max_phys_sects(struct nvm_tgt_dev *); | ||
| 541 | extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); | ||
| 537 | extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *); | 542 | extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *); |
| 538 | extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); | 543 | extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); |
| 539 | extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, | 544 | extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, |
| 540 | const struct ppa_addr *, int, int); | 545 | const struct ppa_addr *, int, int); |
| 541 | extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); | 546 | extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); |
| 542 | extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int); | 547 | extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int, int); |
| 543 | extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); | 548 | extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int); |
| 549 | extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *, | ||
| 550 | void *); | ||
| 551 | extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t); | ||
| 552 | extern void nvm_put_area(struct nvm_tgt_dev *, sector_t); | ||
| 544 | extern void nvm_end_io(struct nvm_rq *, int); | 553 | extern void nvm_end_io(struct nvm_rq *, int); |
| 545 | extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int, | 554 | extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int, |
| 546 | void *, int); | 555 | void *, int); |
| @@ -548,6 +557,7 @@ extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int, | |||
| 548 | int, void *, int); | 557 | int, void *, int); |
| 549 | extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); | 558 | extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); |
| 550 | extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *); | 559 | extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *); |
| 560 | extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *); | ||
| 551 | 561 | ||
| 552 | /* sysblk.c */ | 562 | /* sysblk.c */ |
| 553 | #define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */ | 563 | #define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */ |
| @@ -569,10 +579,10 @@ extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *); | |||
| 569 | 579 | ||
| 570 | extern int nvm_dev_factory(struct nvm_dev *, int flags); | 580 | extern int nvm_dev_factory(struct nvm_dev *, int flags); |
| 571 | 581 | ||
| 572 | #define nvm_for_each_lun_ppa(dev, ppa, chid, lunid) \ | 582 | #define nvm_for_each_lun_ppa(geo, ppa, chid, lunid) \ |
| 573 | for ((chid) = 0, (ppa).ppa = 0; (chid) < (dev)->nr_chnls; \ | 583 | for ((chid) = 0, (ppa).ppa = 0; (chid) < (geo)->nr_chnls; \ |
| 574 | (chid)++, (ppa).g.ch = (chid)) \ | 584 | (chid)++, (ppa).g.ch = (chid)) \ |
| 575 | for ((lunid) = 0; (lunid) < (dev)->luns_per_chnl; \ | 585 | for ((lunid) = 0; (lunid) < (geo)->luns_per_chnl; \ |
| 576 | (lunid)++, (ppa).g.lun = (lunid)) | 586 | (lunid)++, (ppa).g.lun = (lunid)) |
| 577 | 587 | ||
| 578 | #else /* CONFIG_NVM */ | 588 | #else /* CONFIG_NVM */ |
diff --git a/include/linux/list.h b/include/linux/list.h index 5809e9a2de5b..d1039ecaf94f 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
| @@ -28,27 +28,42 @@ static inline void INIT_LIST_HEAD(struct list_head *list) | |||
| 28 | list->prev = list; | 28 | list->prev = list; |
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | #ifdef CONFIG_DEBUG_LIST | ||
| 32 | extern bool __list_add_valid(struct list_head *new, | ||
| 33 | struct list_head *prev, | ||
| 34 | struct list_head *next); | ||
| 35 | extern bool __list_del_entry_valid(struct list_head *entry); | ||
| 36 | #else | ||
| 37 | static inline bool __list_add_valid(struct list_head *new, | ||
| 38 | struct list_head *prev, | ||
| 39 | struct list_head *next) | ||
| 40 | { | ||
| 41 | return true; | ||
| 42 | } | ||
| 43 | static inline bool __list_del_entry_valid(struct list_head *entry) | ||
| 44 | { | ||
| 45 | return true; | ||
| 46 | } | ||
| 47 | #endif | ||
| 48 | |||
| 31 | /* | 49 | /* |
| 32 | * Insert a new entry between two known consecutive entries. | 50 | * Insert a new entry between two known consecutive entries. |
| 33 | * | 51 | * |
| 34 | * This is only for internal list manipulation where we know | 52 | * This is only for internal list manipulation where we know |
| 35 | * the prev/next entries already! | 53 | * the prev/next entries already! |
| 36 | */ | 54 | */ |
| 37 | #ifndef CONFIG_DEBUG_LIST | ||
| 38 | static inline void __list_add(struct list_head *new, | 55 | static inline void __list_add(struct list_head *new, |
| 39 | struct list_head *prev, | 56 | struct list_head *prev, |
| 40 | struct list_head *next) | 57 | struct list_head *next) |
| 41 | { | 58 | { |
| 59 | if (!__list_add_valid(new, prev, next)) | ||
| 60 | return; | ||
| 61 | |||
| 42 | next->prev = new; | 62 | next->prev = new; |
| 43 | new->next = next; | 63 | new->next = next; |
| 44 | new->prev = prev; | 64 | new->prev = prev; |
| 45 | WRITE_ONCE(prev->next, new); | 65 | WRITE_ONCE(prev->next, new); |
| 46 | } | 66 | } |
| 47 | #else | ||
| 48 | extern void __list_add(struct list_head *new, | ||
| 49 | struct list_head *prev, | ||
| 50 | struct list_head *next); | ||
| 51 | #endif | ||
| 52 | 67 | ||
| 53 | /** | 68 | /** |
| 54 | * list_add - add a new entry | 69 | * list_add - add a new entry |
| @@ -96,22 +111,20 @@ static inline void __list_del(struct list_head * prev, struct list_head * next) | |||
| 96 | * Note: list_empty() on entry does not return true after this, the entry is | 111 | * Note: list_empty() on entry does not return true after this, the entry is |
| 97 | * in an undefined state. | 112 | * in an undefined state. |
| 98 | */ | 113 | */ |
| 99 | #ifndef CONFIG_DEBUG_LIST | ||
| 100 | static inline void __list_del_entry(struct list_head *entry) | 114 | static inline void __list_del_entry(struct list_head *entry) |
| 101 | { | 115 | { |
| 116 | if (!__list_del_entry_valid(entry)) | ||
| 117 | return; | ||
| 118 | |||
| 102 | __list_del(entry->prev, entry->next); | 119 | __list_del(entry->prev, entry->next); |
| 103 | } | 120 | } |
| 104 | 121 | ||
| 105 | static inline void list_del(struct list_head *entry) | 122 | static inline void list_del(struct list_head *entry) |
| 106 | { | 123 | { |
| 107 | __list_del(entry->prev, entry->next); | 124 | __list_del_entry(entry); |
| 108 | entry->next = LIST_POISON1; | 125 | entry->next = LIST_POISON1; |
| 109 | entry->prev = LIST_POISON2; | 126 | entry->prev = LIST_POISON2; |
| 110 | } | 127 | } |
| 111 | #else | ||
| 112 | extern void __list_del_entry(struct list_head *entry); | ||
| 113 | extern void list_del(struct list_head *entry); | ||
| 114 | #endif | ||
| 115 | 128 | ||
| 116 | /** | 129 | /** |
| 117 | * list_replace - replace old entry by new one | 130 | * list_replace - replace old entry by new one |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index c1458fede1f9..1e327bb80838 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -338,9 +338,18 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 338 | extern void lock_release(struct lockdep_map *lock, int nested, | 338 | extern void lock_release(struct lockdep_map *lock, int nested, |
| 339 | unsigned long ip); | 339 | unsigned long ip); |
| 340 | 340 | ||
| 341 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) | 341 | /* |
| 342 | * Same "read" as for lock_acquire(), except -1 means any. | ||
| 343 | */ | ||
| 344 | extern int lock_is_held_type(struct lockdep_map *lock, int read); | ||
| 345 | |||
| 346 | static inline int lock_is_held(struct lockdep_map *lock) | ||
| 347 | { | ||
| 348 | return lock_is_held_type(lock, -1); | ||
| 349 | } | ||
| 342 | 350 | ||
| 343 | extern int lock_is_held(struct lockdep_map *lock); | 351 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) |
| 352 | #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) | ||
| 344 | 353 | ||
| 345 | extern void lock_set_class(struct lockdep_map *lock, const char *name, | 354 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
| 346 | struct lock_class_key *key, unsigned int subclass, | 355 | struct lock_class_key *key, unsigned int subclass, |
| @@ -372,6 +381,14 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); | |||
| 372 | WARN_ON(debug_locks && !lockdep_is_held(l)); \ | 381 | WARN_ON(debug_locks && !lockdep_is_held(l)); \ |
| 373 | } while (0) | 382 | } while (0) |
| 374 | 383 | ||
| 384 | #define lockdep_assert_held_exclusive(l) do { \ | ||
| 385 | WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ | ||
| 386 | } while (0) | ||
| 387 | |||
| 388 | #define lockdep_assert_held_read(l) do { \ | ||
| 389 | WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ | ||
| 390 | } while (0) | ||
| 391 | |||
| 375 | #define lockdep_assert_held_once(l) do { \ | 392 | #define lockdep_assert_held_once(l) do { \ |
| 376 | WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ | 393 | WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ |
| 377 | } while (0) | 394 | } while (0) |
| @@ -428,7 +445,11 @@ struct lock_class_key { }; | |||
| 428 | 445 | ||
| 429 | #define lockdep_depth(tsk) (0) | 446 | #define lockdep_depth(tsk) (0) |
| 430 | 447 | ||
| 448 | #define lockdep_is_held_type(l, r) (1) | ||
| 449 | |||
| 431 | #define lockdep_assert_held(l) do { (void)(l); } while (0) | 450 | #define lockdep_assert_held(l) do { (void)(l); } while (0) |
| 451 | #define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0) | ||
| 452 | #define lockdep_assert_held_read(l) do { (void)(l); } while (0) | ||
| 432 | #define lockdep_assert_held_once(l) do { (void)(l); } while (0) | 453 | #define lockdep_assert_held_once(l) do { (void)(l); } while (0) |
| 433 | 454 | ||
| 434 | #define lockdep_recursing(tsk) (0) | 455 | #define lockdep_recursing(tsk) (0) |
diff --git a/include/linux/mbus.h b/include/linux/mbus.h index 2931aa43dab1..0d3f14fd2621 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h | |||
| @@ -82,6 +82,7 @@ static inline int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, | |||
| 82 | } | 82 | } |
| 83 | #endif | 83 | #endif |
| 84 | 84 | ||
| 85 | #ifdef CONFIG_MVEBU_MBUS | ||
| 85 | int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr); | 86 | int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr); |
| 86 | void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); | 87 | void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); |
| 87 | void mvebu_mbus_get_pcie_io_aperture(struct resource *res); | 88 | void mvebu_mbus_get_pcie_io_aperture(struct resource *res); |
| @@ -97,5 +98,12 @@ int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base, | |||
| 97 | size_t mbus_size, phys_addr_t sdram_phys_base, | 98 | size_t mbus_size, phys_addr_t sdram_phys_base, |
| 98 | size_t sdram_size); | 99 | size_t sdram_size); |
| 99 | int mvebu_mbus_dt_init(bool is_coherent); | 100 | int mvebu_mbus_dt_init(bool is_coherent); |
| 101 | #else | ||
| 102 | static inline int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, | ||
| 103 | u8 *attr) | ||
| 104 | { | ||
| 105 | return -EINVAL; | ||
| 106 | } | ||
| 107 | #endif /* CONFIG_MVEBU_MBUS */ | ||
| 100 | 108 | ||
| 101 | #endif /* __LINUX_MBUS_H */ | 109 | #endif /* __LINUX_MBUS_H */ |
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h index a585b4b5fa0e..0661af17a758 100644 --- a/include/linux/mc146818rtc.h +++ b/include/linux/mc146818rtc.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <asm/mc146818rtc.h> /* register access macros */ | 16 | #include <asm/mc146818rtc.h> /* register access macros */ |
| 17 | #include <linux/bcd.h> | 17 | #include <linux/bcd.h> |
| 18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
| 19 | #include <linux/pm-trace.h> | ||
| 19 | 20 | ||
| 20 | #ifdef __KERNEL__ | 21 | #ifdef __KERNEL__ |
| 21 | #include <linux/spinlock.h> /* spinlock_t */ | 22 | #include <linux/spinlock.h> /* spinlock_t */ |
diff --git a/include/linux/mdev.h b/include/linux/mdev.h new file mode 100644 index 000000000000..ec819e9a115a --- /dev/null +++ b/include/linux/mdev.h | |||
| @@ -0,0 +1,168 @@ | |||
| 1 | /* | ||
| 2 | * Mediated device definition | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. | ||
| 5 | * Author: Neo Jia <cjia@nvidia.com> | ||
| 6 | * Kirti Wankhede <kwankhede@nvidia.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #ifndef MDEV_H | ||
| 14 | #define MDEV_H | ||
| 15 | |||
| 16 | /* Parent device */ | ||
| 17 | struct parent_device { | ||
| 18 | struct device *dev; | ||
| 19 | const struct parent_ops *ops; | ||
| 20 | |||
| 21 | /* internal */ | ||
| 22 | struct kref ref; | ||
| 23 | struct mutex lock; | ||
| 24 | struct list_head next; | ||
| 25 | struct kset *mdev_types_kset; | ||
| 26 | struct list_head type_list; | ||
| 27 | }; | ||
| 28 | |||
| 29 | /* Mediated device */ | ||
| 30 | struct mdev_device { | ||
| 31 | struct device dev; | ||
| 32 | struct parent_device *parent; | ||
| 33 | uuid_le uuid; | ||
| 34 | void *driver_data; | ||
| 35 | |||
| 36 | /* internal */ | ||
| 37 | struct kref ref; | ||
| 38 | struct list_head next; | ||
| 39 | struct kobject *type_kobj; | ||
| 40 | }; | ||
| 41 | |||
| 42 | /** | ||
| 43 | * struct parent_ops - Structure to be registered for each parent device to | ||
| 44 | * register the device to mdev module. | ||
| 45 | * | ||
| 46 | * @owner: The module owner. | ||
| 47 | * @dev_attr_groups: Attributes of the parent device. | ||
| 48 | * @mdev_attr_groups: Attributes of the mediated device. | ||
| 49 | * @supported_type_groups: Attributes to define supported types. It is mandatory | ||
| 50 | * to provide supported types. | ||
| 51 | * @create: Called to allocate basic resources in parent device's | ||
| 52 | * driver for a particular mediated device. It is | ||
| 53 | * mandatory to provide create ops. | ||
| 54 | * @kobj: kobject of type for which 'create' is called. | ||
| 55 | * @mdev: mdev_device structure on of mediated device | ||
| 56 | * that is being created | ||
| 57 | * Returns integer: success (0) or error (< 0) | ||
| 58 | * @remove: Called to free resources in parent device's driver for a | ||
| 59 | * a mediated device. It is mandatory to provide 'remove' | ||
| 60 | * ops. | ||
| 61 | * @mdev: mdev_device device structure which is being | ||
| 62 | * destroyed | ||
| 63 | * Returns integer: success (0) or error (< 0) | ||
| 64 | * @open: Open mediated device. | ||
| 65 | * @mdev: mediated device. | ||
| 66 | * Returns integer: success (0) or error (< 0) | ||
| 67 | * @release: release mediated device | ||
| 68 | * @mdev: mediated device. | ||
| 69 | * @read: Read emulation callback | ||
| 70 | * @mdev: mediated device structure | ||
| 71 | * @buf: read buffer | ||
| 72 | * @count: number of bytes to read | ||
| 73 | * @ppos: address. | ||
| 74 | * Retuns number on bytes read on success or error. | ||
| 75 | * @write: Write emulation callback | ||
| 76 | * @mdev: mediated device structure | ||
| 77 | * @buf: write buffer | ||
| 78 | * @count: number of bytes to be written | ||
| 79 | * @ppos: address. | ||
| 80 | * Retuns number on bytes written on success or error. | ||
| 81 | * @ioctl: IOCTL callback | ||
| 82 | * @mdev: mediated device structure | ||
| 83 | * @cmd: ioctl command | ||
| 84 | * @arg: arguments to ioctl | ||
| 85 | * @mmap: mmap callback | ||
| 86 | * @mdev: mediated device structure | ||
| 87 | * @vma: vma structure | ||
| 88 | * Parent device that support mediated device should be registered with mdev | ||
| 89 | * module with parent_ops structure. | ||
| 90 | **/ | ||
| 91 | |||
| 92 | struct parent_ops { | ||
| 93 | struct module *owner; | ||
| 94 | const struct attribute_group **dev_attr_groups; | ||
| 95 | const struct attribute_group **mdev_attr_groups; | ||
| 96 | struct attribute_group **supported_type_groups; | ||
| 97 | |||
| 98 | int (*create)(struct kobject *kobj, struct mdev_device *mdev); | ||
| 99 | int (*remove)(struct mdev_device *mdev); | ||
| 100 | int (*open)(struct mdev_device *mdev); | ||
| 101 | void (*release)(struct mdev_device *mdev); | ||
| 102 | ssize_t (*read)(struct mdev_device *mdev, char __user *buf, | ||
| 103 | size_t count, loff_t *ppos); | ||
| 104 | ssize_t (*write)(struct mdev_device *mdev, const char __user *buf, | ||
| 105 | size_t count, loff_t *ppos); | ||
| 106 | ssize_t (*ioctl)(struct mdev_device *mdev, unsigned int cmd, | ||
| 107 | unsigned long arg); | ||
| 108 | int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma); | ||
| 109 | }; | ||
| 110 | |||
| 111 | /* interface for exporting mdev supported type attributes */ | ||
| 112 | struct mdev_type_attribute { | ||
| 113 | struct attribute attr; | ||
| 114 | ssize_t (*show)(struct kobject *kobj, struct device *dev, char *buf); | ||
| 115 | ssize_t (*store)(struct kobject *kobj, struct device *dev, | ||
| 116 | const char *buf, size_t count); | ||
| 117 | }; | ||
| 118 | |||
| 119 | #define MDEV_TYPE_ATTR(_name, _mode, _show, _store) \ | ||
| 120 | struct mdev_type_attribute mdev_type_attr_##_name = \ | ||
| 121 | __ATTR(_name, _mode, _show, _store) | ||
| 122 | #define MDEV_TYPE_ATTR_RW(_name) \ | ||
| 123 | struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RW(_name) | ||
| 124 | #define MDEV_TYPE_ATTR_RO(_name) \ | ||
| 125 | struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name) | ||
| 126 | #define MDEV_TYPE_ATTR_WO(_name) \ | ||
| 127 | struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name) | ||
| 128 | |||
| 129 | /** | ||
| 130 | * struct mdev_driver - Mediated device driver | ||
| 131 | * @name: driver name | ||
| 132 | * @probe: called when new device created | ||
| 133 | * @remove: called when device removed | ||
| 134 | * @driver: device driver structure | ||
| 135 | * | ||
| 136 | **/ | ||
| 137 | struct mdev_driver { | ||
| 138 | const char *name; | ||
| 139 | int (*probe)(struct device *dev); | ||
| 140 | void (*remove)(struct device *dev); | ||
| 141 | struct device_driver driver; | ||
| 142 | }; | ||
| 143 | |||
| 144 | #define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver) | ||
| 145 | #define to_mdev_device(dev) container_of(dev, struct mdev_device, dev) | ||
| 146 | |||
| 147 | static inline void *mdev_get_drvdata(struct mdev_device *mdev) | ||
| 148 | { | ||
| 149 | return mdev->driver_data; | ||
| 150 | } | ||
| 151 | |||
| 152 | static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data) | ||
| 153 | { | ||
| 154 | mdev->driver_data = data; | ||
| 155 | } | ||
| 156 | |||
| 157 | extern struct bus_type mdev_bus_type; | ||
| 158 | |||
| 159 | #define dev_is_mdev(d) ((d)->bus == &mdev_bus_type) | ||
| 160 | |||
| 161 | extern int mdev_register_device(struct device *dev, | ||
| 162 | const struct parent_ops *ops); | ||
| 163 | extern void mdev_unregister_device(struct device *dev); | ||
| 164 | |||
| 165 | extern int mdev_register_driver(struct mdev_driver *drv, struct module *owner); | ||
| 166 | extern void mdev_unregister_driver(struct mdev_driver *drv); | ||
| 167 | |||
| 168 | #endif /* MDEV_H */ | ||
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index e746919530f5..a0d274fe08f1 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h | |||
| @@ -8,8 +8,7 @@ | |||
| 8 | struct mei_cl_device; | 8 | struct mei_cl_device; |
| 9 | struct mei_device; | 9 | struct mei_device; |
| 10 | 10 | ||
| 11 | typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev, | 11 | typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev); |
| 12 | u32 events, void *context); | ||
| 13 | 12 | ||
| 14 | /** | 13 | /** |
| 15 | * struct mei_cl_device - MEI device handle | 14 | * struct mei_cl_device - MEI device handle |
| @@ -24,12 +23,12 @@ typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev, | |||
| 24 | * @me_cl: me client | 23 | * @me_cl: me client |
| 25 | * @cl: mei client | 24 | * @cl: mei client |
| 26 | * @name: device name | 25 | * @name: device name |
| 27 | * @event_work: async work to execute event callback | 26 | * @rx_work: async work to execute Rx event callback |
| 28 | * @event_cb: Drivers register this callback to get asynchronous ME | 27 | * @rx_cb: Drivers register this callback to get asynchronous ME |
| 29 | * events (e.g. Rx buffer pending) notifications. | 28 | * Rx buffer pending notifications. |
| 30 | * @event_context: event callback run context | 29 | * @notif_work: async work to execute FW notif event callback |
| 31 | * @events_mask: Events bit mask requested by driver. | 30 | * @notif_cb: Drivers register this callback to get asynchronous ME |
| 32 | * @events: Events bitmask sent to the driver. | 31 | * FW notification pending notifications. |
| 33 | * | 32 | * |
| 34 | * @do_match: wheather device can be matched with a driver | 33 | * @do_match: wheather device can be matched with a driver |
| 35 | * @is_added: device is already scanned | 34 | * @is_added: device is already scanned |
| @@ -44,11 +43,10 @@ struct mei_cl_device { | |||
| 44 | struct mei_cl *cl; | 43 | struct mei_cl *cl; |
| 45 | char name[MEI_CL_NAME_SIZE]; | 44 | char name[MEI_CL_NAME_SIZE]; |
| 46 | 45 | ||
| 47 | struct work_struct event_work; | 46 | struct work_struct rx_work; |
| 48 | mei_cldev_event_cb_t event_cb; | 47 | mei_cldev_cb_t rx_cb; |
| 49 | void *event_context; | 48 | struct work_struct notif_work; |
| 50 | unsigned long events_mask; | 49 | mei_cldev_cb_t notif_cb; |
| 51 | unsigned long events; | ||
| 52 | 50 | ||
| 53 | unsigned int do_match:1; | 51 | unsigned int do_match:1; |
| 54 | unsigned int is_added:1; | 52 | unsigned int is_added:1; |
| @@ -74,16 +72,27 @@ int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, | |||
| 74 | 72 | ||
| 75 | void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); | 73 | void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); |
| 76 | 74 | ||
| 77 | ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); | 75 | /** |
| 78 | ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); | 76 | * module_mei_cl_driver - Helper macro for registering mei cl driver |
| 77 | * | ||
| 78 | * @__mei_cldrv: mei_cl_driver structure | ||
| 79 | * | ||
| 80 | * Helper macro for mei cl drivers which do not do anything special in module | ||
| 81 | * init/exit, for eliminating a boilerplate code. | ||
| 82 | */ | ||
| 83 | #define module_mei_cl_driver(__mei_cldrv) \ | ||
| 84 | module_driver(__mei_cldrv, \ | ||
| 85 | mei_cldev_driver_register,\ | ||
| 86 | mei_cldev_driver_unregister) | ||
| 79 | 87 | ||
| 80 | int mei_cldev_register_event_cb(struct mei_cl_device *cldev, | 88 | ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); |
| 81 | unsigned long event_mask, | 89 | ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); |
| 82 | mei_cldev_event_cb_t read_cb, void *context); | 90 | ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, |
| 91 | size_t length); | ||
| 83 | 92 | ||
| 84 | #define MEI_CL_EVENT_RX 0 | 93 | int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb); |
| 85 | #define MEI_CL_EVENT_TX 1 | 94 | int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, |
| 86 | #define MEI_CL_EVENT_NOTIF 2 | 95 | mei_cldev_cb_t notif_cb); |
| 87 | 96 | ||
| 88 | const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev); | 97 | const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev); |
| 89 | u8 mei_cldev_ver(const struct mei_cl_device *cldev); | 98 | u8 mei_cldev_ver(const struct mei_cl_device *cldev); |
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 5e5b2969d931..5f4d8281832b 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | 7 | ||
| 8 | 8 | ||
| 9 | #include <linux/mmzone.h> | 9 | #include <linux/mmzone.h> |
| 10 | #include <linux/dax.h> | ||
| 10 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
| 11 | #include <linux/rbtree.h> | 12 | #include <linux/rbtree.h> |
| 12 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
| @@ -177,6 +178,13 @@ static inline bool vma_migratable(struct vm_area_struct *vma) | |||
| 177 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) | 178 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
| 178 | return false; | 179 | return false; |
| 179 | 180 | ||
| 181 | /* | ||
| 182 | * DAX device mappings require predictable access latency, so avoid | ||
| 183 | * incurring periodic faults. | ||
| 184 | */ | ||
| 185 | if (vma_is_dax(vma)) | ||
| 186 | return false; | ||
| 187 | |||
| 180 | #ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION | 188 | #ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
| 181 | if (vma->vm_flags & VM_HUGETLB) | 189 | if (vma->vm_flags & VM_HUGETLB) |
| 182 | return false; | 190 | return false; |
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index fec597fb34cb..a4860bc9b73d 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h | |||
| @@ -115,6 +115,8 @@ enum { | |||
| 115 | #define AXP806_CLDO2_V_CTRL 0x25 | 115 | #define AXP806_CLDO2_V_CTRL 0x25 |
| 116 | #define AXP806_CLDO3_V_CTRL 0x26 | 116 | #define AXP806_CLDO3_V_CTRL 0x26 |
| 117 | #define AXP806_VREF_TEMP_WARN_L 0xf3 | 117 | #define AXP806_VREF_TEMP_WARN_L 0xf3 |
| 118 | #define AXP806_BUS_ADDR_EXT 0xfe | ||
| 119 | #define AXP806_REG_ADDR_EXT 0xff | ||
| 118 | 120 | ||
| 119 | /* Interrupt */ | 121 | /* Interrupt */ |
| 120 | #define AXP152_IRQ1_EN 0x40 | 122 | #define AXP152_IRQ1_EN 0x40 |
| @@ -226,6 +228,10 @@ enum { | |||
| 226 | #define AXP20X_OCV_MAX 0xf | 228 | #define AXP20X_OCV_MAX 0xf |
| 227 | 229 | ||
| 228 | /* AXP22X specific registers */ | 230 | /* AXP22X specific registers */ |
| 231 | #define AXP22X_PMIC_ADC_H 0x56 | ||
| 232 | #define AXP22X_PMIC_ADC_L 0x57 | ||
| 233 | #define AXP22X_TS_ADC_H 0x58 | ||
| 234 | #define AXP22X_TS_ADC_L 0x59 | ||
| 229 | #define AXP22X_BATLOW_THRES1 0xe6 | 235 | #define AXP22X_BATLOW_THRES1 0xe6 |
| 230 | 236 | ||
| 231 | /* AXP288 specific registers */ | 237 | /* AXP288 specific registers */ |
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 76f7ef4d3a0d..f62043a75f43 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h | |||
| @@ -148,6 +148,15 @@ struct cros_ec_device { | |||
| 148 | int event_size; | 148 | int event_size; |
| 149 | }; | 149 | }; |
| 150 | 150 | ||
| 151 | /** | ||
| 152 | * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information | ||
| 153 | * | ||
| 154 | * @sensor_num: Id of the sensor, as reported by the EC. | ||
| 155 | */ | ||
| 156 | struct cros_ec_sensor_platform { | ||
| 157 | u8 sensor_num; | ||
| 158 | }; | ||
| 159 | |||
| 151 | /* struct cros_ec_platform - ChromeOS EC platform information | 160 | /* struct cros_ec_platform - ChromeOS EC platform information |
| 152 | * | 161 | * |
| 153 | * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...) | 162 | * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...) |
| @@ -175,6 +184,7 @@ struct cros_ec_dev { | |||
| 175 | struct cros_ec_device *ec_dev; | 184 | struct cros_ec_device *ec_dev; |
| 176 | struct device *dev; | 185 | struct device *dev; |
| 177 | u16 cmd_offset; | 186 | u16 cmd_offset; |
| 187 | u32 features[2]; | ||
| 178 | }; | 188 | }; |
| 179 | 189 | ||
| 180 | /** | 190 | /** |
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 76728ff37d01..1683003603f3 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h | |||
| @@ -713,6 +713,90 @@ struct ec_response_get_set_value { | |||
| 713 | /* More than one command can use these structs to get/set paramters. */ | 713 | /* More than one command can use these structs to get/set paramters. */ |
| 714 | #define EC_CMD_GSV_PAUSE_IN_S5 0x0c | 714 | #define EC_CMD_GSV_PAUSE_IN_S5 0x0c |
| 715 | 715 | ||
| 716 | /*****************************************************************************/ | ||
| 717 | /* List the features supported by the firmware */ | ||
| 718 | #define EC_CMD_GET_FEATURES 0x0d | ||
| 719 | |||
| 720 | /* Supported features */ | ||
| 721 | enum ec_feature_code { | ||
| 722 | /* | ||
| 723 | * This image contains a limited set of features. Another image | ||
| 724 | * in RW partition may support more features. | ||
| 725 | */ | ||
| 726 | EC_FEATURE_LIMITED = 0, | ||
| 727 | /* | ||
| 728 | * Commands for probing/reading/writing/erasing the flash in the | ||
| 729 | * EC are present. | ||
| 730 | */ | ||
| 731 | EC_FEATURE_FLASH = 1, | ||
| 732 | /* | ||
| 733 | * Can control the fan speed directly. | ||
| 734 | */ | ||
| 735 | EC_FEATURE_PWM_FAN = 2, | ||
| 736 | /* | ||
| 737 | * Can control the intensity of the keyboard backlight. | ||
| 738 | */ | ||
| 739 | EC_FEATURE_PWM_KEYB = 3, | ||
| 740 | /* | ||
| 741 | * Support Google lightbar, introduced on Pixel. | ||
| 742 | */ | ||
| 743 | EC_FEATURE_LIGHTBAR = 4, | ||
| 744 | /* Control of LEDs */ | ||
| 745 | EC_FEATURE_LED = 5, | ||
| 746 | /* Exposes an interface to control gyro and sensors. | ||
| 747 | * The host goes through the EC to access these sensors. | ||
| 748 | * In addition, the EC may provide composite sensors, like lid angle. | ||
| 749 | */ | ||
| 750 | EC_FEATURE_MOTION_SENSE = 6, | ||
| 751 | /* The keyboard is controlled by the EC */ | ||
| 752 | EC_FEATURE_KEYB = 7, | ||
| 753 | /* The AP can use part of the EC flash as persistent storage. */ | ||
| 754 | EC_FEATURE_PSTORE = 8, | ||
| 755 | /* The EC monitors BIOS port 80h, and can return POST codes. */ | ||
| 756 | EC_FEATURE_PORT80 = 9, | ||
| 757 | /* | ||
| 758 | * Thermal management: include TMP specific commands. | ||
| 759 | * Higher level than direct fan control. | ||
| 760 | */ | ||
| 761 | EC_FEATURE_THERMAL = 10, | ||
| 762 | /* Can switch the screen backlight on/off */ | ||
| 763 | EC_FEATURE_BKLIGHT_SWITCH = 11, | ||
| 764 | /* Can switch the wifi module on/off */ | ||
| 765 | EC_FEATURE_WIFI_SWITCH = 12, | ||
| 766 | /* Monitor host events, through for example SMI or SCI */ | ||
| 767 | EC_FEATURE_HOST_EVENTS = 13, | ||
| 768 | /* The EC exposes GPIO commands to control/monitor connected devices. */ | ||
| 769 | EC_FEATURE_GPIO = 14, | ||
| 770 | /* The EC can send i2c messages to downstream devices. */ | ||
| 771 | EC_FEATURE_I2C = 15, | ||
| 772 | /* Command to control charger are included */ | ||
| 773 | EC_FEATURE_CHARGER = 16, | ||
| 774 | /* Simple battery support. */ | ||
| 775 | EC_FEATURE_BATTERY = 17, | ||
| 776 | /* | ||
| 777 | * Support Smart battery protocol | ||
| 778 | * (Common Smart Battery System Interface Specification) | ||
| 779 | */ | ||
| 780 | EC_FEATURE_SMART_BATTERY = 18, | ||
| 781 | /* EC can dectect when the host hangs. */ | ||
| 782 | EC_FEATURE_HANG_DETECT = 19, | ||
| 783 | /* Report power information, for pit only */ | ||
| 784 | EC_FEATURE_PMU = 20, | ||
| 785 | /* Another Cros EC device is present downstream of this one */ | ||
| 786 | EC_FEATURE_SUB_MCU = 21, | ||
| 787 | /* Support USB Power delivery (PD) commands */ | ||
| 788 | EC_FEATURE_USB_PD = 22, | ||
| 789 | /* Control USB multiplexer, for audio through USB port for instance. */ | ||
| 790 | EC_FEATURE_USB_MUX = 23, | ||
| 791 | /* Motion Sensor code has an internal software FIFO */ | ||
| 792 | EC_FEATURE_MOTION_SENSE_FIFO = 24, | ||
| 793 | }; | ||
| 794 | |||
| 795 | #define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32)) | ||
| 796 | #define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32)) | ||
| 797 | struct ec_response_get_features { | ||
| 798 | uint32_t flags[2]; | ||
| 799 | } __packed; | ||
| 716 | 800 | ||
| 717 | /*****************************************************************************/ | 801 | /*****************************************************************************/ |
| 718 | /* Flash commands */ | 802 | /* Flash commands */ |
| @@ -1315,6 +1399,24 @@ enum motionsense_command { | |||
| 1315 | */ | 1399 | */ |
| 1316 | MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5, | 1400 | MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5, |
| 1317 | 1401 | ||
| 1402 | /* | ||
| 1403 | * Returns a single sensor data. | ||
| 1404 | */ | ||
| 1405 | MOTIONSENSE_CMD_DATA = 6, | ||
| 1406 | |||
| 1407 | /* | ||
| 1408 | * Perform low level calibration.. On sensors that support it, ask to | ||
| 1409 | * do offset calibration. | ||
| 1410 | */ | ||
| 1411 | MOTIONSENSE_CMD_PERFORM_CALIB = 10, | ||
| 1412 | |||
| 1413 | /* | ||
| 1414 | * Sensor Offset command is a setter/getter command for the offset used | ||
| 1415 | * for calibration. The offsets can be calculated by the host, or via | ||
| 1416 | * PERFORM_CALIB command. | ||
| 1417 | */ | ||
| 1418 | MOTIONSENSE_CMD_SENSOR_OFFSET = 11, | ||
| 1419 | |||
| 1318 | /* Number of motionsense sub-commands. */ | 1420 | /* Number of motionsense sub-commands. */ |
| 1319 | MOTIONSENSE_NUM_CMDS | 1421 | MOTIONSENSE_NUM_CMDS |
| 1320 | }; | 1422 | }; |
| @@ -1335,12 +1437,18 @@ enum motionsensor_id { | |||
| 1335 | enum motionsensor_type { | 1437 | enum motionsensor_type { |
| 1336 | MOTIONSENSE_TYPE_ACCEL = 0, | 1438 | MOTIONSENSE_TYPE_ACCEL = 0, |
| 1337 | MOTIONSENSE_TYPE_GYRO = 1, | 1439 | MOTIONSENSE_TYPE_GYRO = 1, |
| 1440 | MOTIONSENSE_TYPE_MAG = 2, | ||
| 1441 | MOTIONSENSE_TYPE_PROX = 3, | ||
| 1442 | MOTIONSENSE_TYPE_LIGHT = 4, | ||
| 1443 | MOTIONSENSE_TYPE_ACTIVITY = 5, | ||
| 1444 | MOTIONSENSE_TYPE_MAX | ||
| 1338 | }; | 1445 | }; |
| 1339 | 1446 | ||
| 1340 | /* List of motion sensor locations. */ | 1447 | /* List of motion sensor locations. */ |
| 1341 | enum motionsensor_location { | 1448 | enum motionsensor_location { |
| 1342 | MOTIONSENSE_LOC_BASE = 0, | 1449 | MOTIONSENSE_LOC_BASE = 0, |
| 1343 | MOTIONSENSE_LOC_LID = 1, | 1450 | MOTIONSENSE_LOC_LID = 1, |
| 1451 | MOTIONSENSE_LOC_MAX, | ||
| 1344 | }; | 1452 | }; |
| 1345 | 1453 | ||
| 1346 | /* List of motion sensor chips. */ | 1454 | /* List of motion sensor chips. */ |
| @@ -1361,6 +1469,31 @@ enum motionsensor_chip { | |||
| 1361 | */ | 1469 | */ |
| 1362 | #define EC_MOTION_SENSE_NO_VALUE -1 | 1470 | #define EC_MOTION_SENSE_NO_VALUE -1 |
| 1363 | 1471 | ||
| 1472 | #define EC_MOTION_SENSE_INVALID_CALIB_TEMP 0x8000 | ||
| 1473 | |||
| 1474 | /* Set Calibration information */ | ||
| 1475 | #define MOTION_SENSE_SET_OFFSET 1 | ||
| 1476 | |||
| 1477 | struct ec_response_motion_sensor_data { | ||
| 1478 | /* Flags for each sensor. */ | ||
| 1479 | uint8_t flags; | ||
| 1480 | /* Sensor number the data comes from */ | ||
| 1481 | uint8_t sensor_num; | ||
| 1482 | /* Each sensor is up to 3-axis. */ | ||
| 1483 | union { | ||
| 1484 | int16_t data[3]; | ||
| 1485 | struct { | ||
| 1486 | uint16_t rsvd; | ||
| 1487 | uint32_t timestamp; | ||
| 1488 | } __packed; | ||
| 1489 | struct { | ||
| 1490 | uint8_t activity; /* motionsensor_activity */ | ||
| 1491 | uint8_t state; | ||
| 1492 | int16_t add_info[2]; | ||
| 1493 | }; | ||
| 1494 | }; | ||
| 1495 | } __packed; | ||
| 1496 | |||
| 1364 | struct ec_params_motion_sense { | 1497 | struct ec_params_motion_sense { |
| 1365 | uint8_t cmd; | 1498 | uint8_t cmd; |
| 1366 | union { | 1499 | union { |
| @@ -1378,9 +1511,37 @@ struct ec_params_motion_sense { | |||
| 1378 | int16_t data; | 1511 | int16_t data; |
| 1379 | } ec_rate, kb_wake_angle; | 1512 | } ec_rate, kb_wake_angle; |
| 1380 | 1513 | ||
| 1514 | /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */ | ||
| 1515 | struct { | ||
| 1516 | uint8_t sensor_num; | ||
| 1517 | |||
| 1518 | /* | ||
| 1519 | * bit 0: If set (MOTION_SENSE_SET_OFFSET), set | ||
| 1520 | * the calibration information in the EC. | ||
| 1521 | * If unset, just retrieve calibration information. | ||
| 1522 | */ | ||
| 1523 | uint16_t flags; | ||
| 1524 | |||
| 1525 | /* | ||
| 1526 | * Temperature at calibration, in units of 0.01 C | ||
| 1527 | * 0x8000: invalid / unknown. | ||
| 1528 | * 0x0: 0C | ||
| 1529 | * 0x7fff: +327.67C | ||
| 1530 | */ | ||
| 1531 | int16_t temp; | ||
| 1532 | |||
| 1533 | /* | ||
| 1534 | * Offset for calibration. | ||
| 1535 | * Unit: | ||
| 1536 | * Accelerometer: 1/1024 g | ||
| 1537 | * Gyro: 1/1024 deg/s | ||
| 1538 | * Compass: 1/16 uT | ||
| 1539 | */ | ||
| 1540 | int16_t offset[3]; | ||
| 1541 | } __packed sensor_offset; | ||
| 1542 | |||
| 1381 | /* Used for MOTIONSENSE_CMD_INFO. */ | 1543 | /* Used for MOTIONSENSE_CMD_INFO. */ |
| 1382 | struct { | 1544 | struct { |
| 1383 | /* Should be element of enum motionsensor_id. */ | ||
| 1384 | uint8_t sensor_num; | 1545 | uint8_t sensor_num; |
| 1385 | } info; | 1546 | } info; |
| 1386 | 1547 | ||
| @@ -1410,11 +1571,14 @@ struct ec_response_motion_sense { | |||
| 1410 | /* Flags representing the motion sensor module. */ | 1571 | /* Flags representing the motion sensor module. */ |
| 1411 | uint8_t module_flags; | 1572 | uint8_t module_flags; |
| 1412 | 1573 | ||
| 1413 | /* Flags for each sensor in enum motionsensor_id. */ | 1574 | /* Number of sensors managed directly by the EC. */ |
| 1414 | uint8_t sensor_flags[EC_MOTION_SENSOR_COUNT]; | 1575 | uint8_t sensor_count; |
| 1415 | 1576 | ||
| 1416 | /* Array of all sensor data. Each sensor is 3-axis. */ | 1577 | /* |
| 1417 | int16_t data[3*EC_MOTION_SENSOR_COUNT]; | 1578 | * Sensor data is truncated if response_max is too small |
| 1579 | * for holding all the data. | ||
| 1580 | */ | ||
| 1581 | struct ec_response_motion_sensor_data sensor[0]; | ||
| 1418 | } dump; | 1582 | } dump; |
| 1419 | 1583 | ||
| 1420 | /* Used for MOTIONSENSE_CMD_INFO. */ | 1584 | /* Used for MOTIONSENSE_CMD_INFO. */ |
| @@ -1429,6 +1593,9 @@ struct ec_response_motion_sense { | |||
| 1429 | uint8_t chip; | 1593 | uint8_t chip; |
| 1430 | } info; | 1594 | } info; |
| 1431 | 1595 | ||
| 1596 | /* Used for MOTIONSENSE_CMD_DATA */ | ||
| 1597 | struct ec_response_motion_sensor_data data; | ||
| 1598 | |||
| 1432 | /* | 1599 | /* |
| 1433 | * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR, | 1600 | * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR, |
| 1434 | * MOTIONSENSE_CMD_SENSOR_RANGE, and | 1601 | * MOTIONSENSE_CMD_SENSOR_RANGE, and |
| @@ -1438,6 +1605,12 @@ struct ec_response_motion_sense { | |||
| 1438 | /* Current value of the parameter queried. */ | 1605 | /* Current value of the parameter queried. */ |
| 1439 | int32_t ret; | 1606 | int32_t ret; |
| 1440 | } ec_rate, sensor_odr, sensor_range, kb_wake_angle; | 1607 | } ec_rate, sensor_odr, sensor_range, kb_wake_angle; |
| 1608 | |||
| 1609 | /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */ | ||
| 1610 | struct { | ||
| 1611 | int16_t temp; | ||
| 1612 | int16_t offset[3]; | ||
| 1613 | } sensor_offset, perform_calib; | ||
| 1441 | }; | 1614 | }; |
| 1442 | } __packed; | 1615 | } __packed; |
| 1443 | 1616 | ||
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h index 8e1cdbef3dad..2c0127cb06c5 100644 --- a/include/linux/mfd/davinci_voicecodec.h +++ b/include/linux/mfd/davinci_voicecodec.h | |||
| @@ -28,8 +28,6 @@ | |||
| 28 | #include <linux/mfd/core.h> | 28 | #include <linux/mfd/core.h> |
| 29 | #include <linux/platform_data/edma.h> | 29 | #include <linux/platform_data/edma.h> |
| 30 | 30 | ||
| 31 | #include <mach/hardware.h> | ||
| 32 | |||
| 33 | struct regmap; | 31 | struct regmap; |
| 34 | 32 | ||
| 35 | /* | 33 | /* |
| @@ -99,8 +97,6 @@ struct davinci_vcif { | |||
| 99 | dma_addr_t dma_rx_addr; | 97 | dma_addr_t dma_rx_addr; |
| 100 | }; | 98 | }; |
| 101 | 99 | ||
| 102 | struct davinci_vc; | ||
| 103 | |||
| 104 | struct davinci_vc { | 100 | struct davinci_vc { |
| 105 | /* Device data */ | 101 | /* Device data */ |
| 106 | struct device *dev; | 102 | struct device *dev; |
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h index cf619dbeace2..956caa0628f5 100644 --- a/include/linux/mfd/intel_soc_pmic.h +++ b/include/linux/mfd/intel_soc_pmic.h | |||
| @@ -26,6 +26,7 @@ struct intel_soc_pmic { | |||
| 26 | struct regmap *regmap; | 26 | struct regmap *regmap; |
| 27 | struct regmap_irq_chip_data *irq_chip_data; | 27 | struct regmap_irq_chip_data *irq_chip_data; |
| 28 | struct regmap_irq_chip_data *irq_chip_data_level2; | 28 | struct regmap_irq_chip_data *irq_chip_data_level2; |
| 29 | struct regmap_irq_chip_data *irq_chip_data_tmu; | ||
| 29 | struct device *dev; | 30 | struct device *dev; |
| 30 | }; | 31 | }; |
| 31 | 32 | ||
diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h index 3ca0af07fc78..ad2a9a852aea 100644 --- a/include/linux/mfd/max77620.h +++ b/include/linux/mfd/max77620.h | |||
| @@ -180,6 +180,7 @@ | |||
| 180 | #define MAX77620_SD_CFG1_FPWM_SD_MASK BIT(2) | 180 | #define MAX77620_SD_CFG1_FPWM_SD_MASK BIT(2) |
| 181 | #define MAX77620_SD_CFG1_FPWM_SD_SKIP 0 | 181 | #define MAX77620_SD_CFG1_FPWM_SD_SKIP 0 |
| 182 | #define MAX77620_SD_CFG1_FPWM_SD_FPWM BIT(2) | 182 | #define MAX77620_SD_CFG1_FPWM_SD_FPWM BIT(2) |
| 183 | #define MAX20024_SD_CFG1_MPOK_MASK BIT(1) | ||
| 183 | #define MAX77620_SD_CFG1_FSRADE_SD_MASK BIT(0) | 184 | #define MAX77620_SD_CFG1_FSRADE_SD_MASK BIT(0) |
| 184 | #define MAX77620_SD_CFG1_FSRADE_SD_DISABLE 0 | 185 | #define MAX77620_SD_CFG1_FSRADE_SD_DISABLE 0 |
| 185 | #define MAX77620_SD_CFG1_FSRADE_SD_ENABLE BIT(0) | 186 | #define MAX77620_SD_CFG1_FSRADE_SD_ENABLE BIT(0) |
| @@ -187,6 +188,7 @@ | |||
| 187 | /* LDO_CNFG2 */ | 188 | /* LDO_CNFG2 */ |
| 188 | #define MAX77620_LDO_POWER_MODE_MASK 0xC0 | 189 | #define MAX77620_LDO_POWER_MODE_MASK 0xC0 |
| 189 | #define MAX77620_LDO_POWER_MODE_SHIFT 6 | 190 | #define MAX77620_LDO_POWER_MODE_SHIFT 6 |
| 191 | #define MAX20024_LDO_CFG2_MPOK_MASK BIT(2) | ||
| 190 | #define MAX77620_LDO_CFG2_ADE_MASK BIT(1) | 192 | #define MAX77620_LDO_CFG2_ADE_MASK BIT(1) |
| 191 | #define MAX77620_LDO_CFG2_ADE_DISABLE 0 | 193 | #define MAX77620_LDO_CFG2_ADE_DISABLE 0 |
| 192 | #define MAX77620_LDO_CFG2_ADE_ENABLE BIT(1) | 194 | #define MAX77620_LDO_CFG2_ADE_ENABLE BIT(1) |
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index 6d435a3c06bc..83701ef7d3c7 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h | |||
| @@ -290,6 +290,7 @@ enum rk818_reg { | |||
| 290 | #define SWITCH2_EN BIT(6) | 290 | #define SWITCH2_EN BIT(6) |
| 291 | #define SWITCH1_EN BIT(5) | 291 | #define SWITCH1_EN BIT(5) |
| 292 | #define DEV_OFF_RST BIT(3) | 292 | #define DEV_OFF_RST BIT(3) |
| 293 | #define DEV_OFF BIT(0) | ||
| 293 | 294 | ||
| 294 | #define VB_LO_ACT BIT(4) | 295 | #define VB_LO_ACT BIT(4) |
| 295 | #define VB_LO_SEL_3500MV (7 << 0) | 296 | #define VB_LO_SEL_3500MV (7 << 0) |
diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h index cadc6543909d..e5a6cdeb77db 100644 --- a/include/linux/mfd/rn5t618.h +++ b/include/linux/mfd/rn5t618.h | |||
| @@ -58,10 +58,13 @@ | |||
| 58 | #define RN5T618_DC3CTL2 0x31 | 58 | #define RN5T618_DC3CTL2 0x31 |
| 59 | #define RN5T618_DC4CTL 0x32 | 59 | #define RN5T618_DC4CTL 0x32 |
| 60 | #define RN5T618_DC4CTL2 0x33 | 60 | #define RN5T618_DC4CTL2 0x33 |
| 61 | #define RN5T618_DC5CTL 0x34 | ||
| 62 | #define RN5T618_DC5CTL2 0x35 | ||
| 61 | #define RN5T618_DC1DAC 0x36 | 63 | #define RN5T618_DC1DAC 0x36 |
| 62 | #define RN5T618_DC2DAC 0x37 | 64 | #define RN5T618_DC2DAC 0x37 |
| 63 | #define RN5T618_DC3DAC 0x38 | 65 | #define RN5T618_DC3DAC 0x38 |
| 64 | #define RN5T618_DC4DAC 0x39 | 66 | #define RN5T618_DC4DAC 0x39 |
| 67 | #define RN5T618_DC5DAC 0x3a | ||
| 65 | #define RN5T618_DC1DAC_SLP 0x3b | 68 | #define RN5T618_DC1DAC_SLP 0x3b |
| 66 | #define RN5T618_DC2DAC_SLP 0x3c | 69 | #define RN5T618_DC2DAC_SLP 0x3c |
| 67 | #define RN5T618_DC3DAC_SLP 0x3d | 70 | #define RN5T618_DC3DAC_SLP 0x3d |
| @@ -77,6 +80,11 @@ | |||
| 77 | #define RN5T618_LDO3DAC 0x4e | 80 | #define RN5T618_LDO3DAC 0x4e |
| 78 | #define RN5T618_LDO4DAC 0x4f | 81 | #define RN5T618_LDO4DAC 0x4f |
| 79 | #define RN5T618_LDO5DAC 0x50 | 82 | #define RN5T618_LDO5DAC 0x50 |
| 83 | #define RN5T618_LDO6DAC 0x51 | ||
| 84 | #define RN5T618_LDO7DAC 0x52 | ||
| 85 | #define RN5T618_LDO8DAC 0x53 | ||
| 86 | #define RN5T618_LDO9DAC 0x54 | ||
| 87 | #define RN5T618_LDO10DAC 0x55 | ||
| 80 | #define RN5T618_LDORTCDAC 0x56 | 88 | #define RN5T618_LDORTCDAC 0x56 |
| 81 | #define RN5T618_LDORTC2DAC 0x57 | 89 | #define RN5T618_LDORTC2DAC 0x57 |
| 82 | #define RN5T618_LDO1DAC_SLP 0x58 | 90 | #define RN5T618_LDO1DAC_SLP 0x58 |
| @@ -231,6 +239,7 @@ enum { | |||
| 231 | enum { | 239 | enum { |
| 232 | RN5T567 = 0, | 240 | RN5T567 = 0, |
| 233 | RN5T618, | 241 | RN5T618, |
| 242 | RC5T619, | ||
| 234 | }; | 243 | }; |
| 235 | 244 | ||
| 236 | struct rn5t618 { | 245 | struct rn5t618 { |
diff --git a/include/linux/mfd/sun4i-gpadc.h b/include/linux/mfd/sun4i-gpadc.h new file mode 100644 index 000000000000..d7a29f246d64 --- /dev/null +++ b/include/linux/mfd/sun4i-gpadc.h | |||
| @@ -0,0 +1,94 @@ | |||
| 1 | /* Header of ADC MFD core driver for sunxi platforms | ||
| 2 | * | ||
| 3 | * Copyright (c) 2016 Quentin Schulz <quentin.schulz@free-electrons.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it under | ||
| 6 | * the terms of the GNU General Public License version 2 as published by the | ||
| 7 | * Free Software Foundation. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef __SUN4I_GPADC__H__ | ||
| 11 | #define __SUN4I_GPADC__H__ | ||
| 12 | |||
| 13 | #define SUN4I_GPADC_CTRL0 0x00 | ||
| 14 | |||
| 15 | #define SUN4I_GPADC_CTRL0_ADC_FIRST_DLY(x) ((GENMASK(7, 0) & (x)) << 24) | ||
| 16 | #define SUN4I_GPADC_CTRL0_ADC_FIRST_DLY_MODE BIT(23) | ||
| 17 | #define SUN4I_GPADC_CTRL0_ADC_CLK_SELECT BIT(22) | ||
| 18 | #define SUN4I_GPADC_CTRL0_ADC_CLK_DIVIDER(x) ((GENMASK(1, 0) & (x)) << 20) | ||
| 19 | #define SUN4I_GPADC_CTRL0_FS_DIV(x) ((GENMASK(3, 0) & (x)) << 16) | ||
| 20 | #define SUN4I_GPADC_CTRL0_T_ACQ(x) (GENMASK(15, 0) & (x)) | ||
| 21 | |||
| 22 | #define SUN4I_GPADC_CTRL1 0x04 | ||
| 23 | |||
| 24 | #define SUN4I_GPADC_CTRL1_STYLUS_UP_DEBOUNCE(x) ((GENMASK(7, 0) & (x)) << 12) | ||
| 25 | #define SUN4I_GPADC_CTRL1_STYLUS_UP_DEBOUNCE_EN BIT(9) | ||
| 26 | #define SUN4I_GPADC_CTRL1_TOUCH_PAN_CALI_EN BIT(6) | ||
| 27 | #define SUN4I_GPADC_CTRL1_TP_DUAL_EN BIT(5) | ||
| 28 | #define SUN4I_GPADC_CTRL1_TP_MODE_EN BIT(4) | ||
| 29 | #define SUN4I_GPADC_CTRL1_TP_ADC_SELECT BIT(3) | ||
| 30 | #define SUN4I_GPADC_CTRL1_ADC_CHAN_SELECT(x) (GENMASK(2, 0) & (x)) | ||
| 31 | |||
| 32 | /* TP_CTRL1 bits for sun6i SOCs */ | ||
| 33 | #define SUN6I_GPADC_CTRL1_TOUCH_PAN_CALI_EN BIT(7) | ||
| 34 | #define SUN6I_GPADC_CTRL1_TP_DUAL_EN BIT(6) | ||
| 35 | #define SUN6I_GPADC_CTRL1_TP_MODE_EN BIT(5) | ||
| 36 | #define SUN6I_GPADC_CTRL1_TP_ADC_SELECT BIT(4) | ||
| 37 | #define SUN6I_GPADC_CTRL1_ADC_CHAN_SELECT(x) (GENMASK(3, 0) & BIT(x)) | ||
| 38 | |||
| 39 | #define SUN4I_GPADC_CTRL2 0x08 | ||
| 40 | |||
| 41 | #define SUN4I_GPADC_CTRL2_TP_SENSITIVE_ADJUST(x) ((GENMASK(3, 0) & (x)) << 28) | ||
| 42 | #define SUN4I_GPADC_CTRL2_TP_MODE_SELECT(x) ((GENMASK(1, 0) & (x)) << 26) | ||
| 43 | #define SUN4I_GPADC_CTRL2_PRE_MEA_EN BIT(24) | ||
| 44 | #define SUN4I_GPADC_CTRL2_PRE_MEA_THRE_CNT(x) (GENMASK(23, 0) & (x)) | ||
| 45 | |||
| 46 | #define SUN4I_GPADC_CTRL3 0x0c | ||
| 47 | |||
| 48 | #define SUN4I_GPADC_CTRL3_FILTER_EN BIT(2) | ||
| 49 | #define SUN4I_GPADC_CTRL3_FILTER_TYPE(x) (GENMASK(1, 0) & (x)) | ||
| 50 | |||
| 51 | #define SUN4I_GPADC_TPR 0x18 | ||
| 52 | |||
| 53 | #define SUN4I_GPADC_TPR_TEMP_ENABLE BIT(16) | ||
| 54 | #define SUN4I_GPADC_TPR_TEMP_PERIOD(x) (GENMASK(15, 0) & (x)) | ||
| 55 | |||
| 56 | #define SUN4I_GPADC_INT_FIFOC 0x10 | ||
| 57 | |||
| 58 | #define SUN4I_GPADC_INT_FIFOC_TEMP_IRQ_EN BIT(18) | ||
| 59 | #define SUN4I_GPADC_INT_FIFOC_TP_OVERRUN_IRQ_EN BIT(17) | ||
| 60 | #define SUN4I_GPADC_INT_FIFOC_TP_DATA_IRQ_EN BIT(16) | ||
| 61 | #define SUN4I_GPADC_INT_FIFOC_TP_DATA_XY_CHANGE BIT(13) | ||
| 62 | #define SUN4I_GPADC_INT_FIFOC_TP_FIFO_TRIG_LEVEL(x) ((GENMASK(4, 0) & (x)) << 8) | ||
| 63 | #define SUN4I_GPADC_INT_FIFOC_TP_DATA_DRQ_EN BIT(7) | ||
| 64 | #define SUN4I_GPADC_INT_FIFOC_TP_FIFO_FLUSH BIT(4) | ||
| 65 | #define SUN4I_GPADC_INT_FIFOC_TP_UP_IRQ_EN BIT(1) | ||
| 66 | #define SUN4I_GPADC_INT_FIFOC_TP_DOWN_IRQ_EN BIT(0) | ||
| 67 | |||
| 68 | #define SUN4I_GPADC_INT_FIFOS 0x14 | ||
| 69 | |||
| 70 | #define SUN4I_GPADC_INT_FIFOS_TEMP_DATA_PENDING BIT(18) | ||
| 71 | #define SUN4I_GPADC_INT_FIFOS_FIFO_OVERRUN_PENDING BIT(17) | ||
| 72 | #define SUN4I_GPADC_INT_FIFOS_FIFO_DATA_PENDING BIT(16) | ||
| 73 | #define SUN4I_GPADC_INT_FIFOS_TP_IDLE_FLG BIT(2) | ||
| 74 | #define SUN4I_GPADC_INT_FIFOS_TP_UP_PENDING BIT(1) | ||
| 75 | #define SUN4I_GPADC_INT_FIFOS_TP_DOWN_PENDING BIT(0) | ||
| 76 | |||
| 77 | #define SUN4I_GPADC_CDAT 0x1c | ||
| 78 | #define SUN4I_GPADC_TEMP_DATA 0x20 | ||
| 79 | #define SUN4I_GPADC_DATA 0x24 | ||
| 80 | |||
| 81 | #define SUN4I_GPADC_IRQ_FIFO_DATA 0 | ||
| 82 | #define SUN4I_GPADC_IRQ_TEMP_DATA 1 | ||
| 83 | |||
| 84 | /* 10s delay before suspending the IP */ | ||
| 85 | #define SUN4I_GPADC_AUTOSUSPEND_DELAY 10000 | ||
| 86 | |||
| 87 | struct sun4i_gpadc_dev { | ||
| 88 | struct device *dev; | ||
| 89 | struct regmap *regmap; | ||
| 90 | struct regmap_irq_chip_data *regmap_irqc; | ||
| 91 | void __iomem *base; | ||
| 92 | }; | ||
| 93 | |||
| 94 | #endif | ||
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index 7f55b8b41032..b9a53e013bff 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h | |||
| @@ -23,6 +23,8 @@ | |||
| 23 | #define REG_IRQENABLE 0x02C | 23 | #define REG_IRQENABLE 0x02C |
| 24 | #define REG_IRQCLR 0x030 | 24 | #define REG_IRQCLR 0x030 |
| 25 | #define REG_IRQWAKEUP 0x034 | 25 | #define REG_IRQWAKEUP 0x034 |
| 26 | #define REG_DMAENABLE_SET 0x038 | ||
| 27 | #define REG_DMAENABLE_CLEAR 0x03c | ||
| 26 | #define REG_CTRL 0x040 | 28 | #define REG_CTRL 0x040 |
| 27 | #define REG_ADCFSM 0x044 | 29 | #define REG_ADCFSM 0x044 |
| 28 | #define REG_CLKDIV 0x04C | 30 | #define REG_CLKDIV 0x04C |
| @@ -36,6 +38,7 @@ | |||
| 36 | #define REG_FIFO0THR 0xE8 | 38 | #define REG_FIFO0THR 0xE8 |
| 37 | #define REG_FIFO1CNT 0xF0 | 39 | #define REG_FIFO1CNT 0xF0 |
| 38 | #define REG_FIFO1THR 0xF4 | 40 | #define REG_FIFO1THR 0xF4 |
| 41 | #define REG_DMA1REQ 0xF8 | ||
| 39 | #define REG_FIFO0 0x100 | 42 | #define REG_FIFO0 0x100 |
| 40 | #define REG_FIFO1 0x200 | 43 | #define REG_FIFO1 0x200 |
| 41 | 44 | ||
| @@ -126,6 +129,10 @@ | |||
| 126 | #define FIFOREAD_DATA_MASK (0xfff << 0) | 129 | #define FIFOREAD_DATA_MASK (0xfff << 0) |
| 127 | #define FIFOREAD_CHNLID_MASK (0xf << 16) | 130 | #define FIFOREAD_CHNLID_MASK (0xf << 16) |
| 128 | 131 | ||
| 132 | /* DMA ENABLE/CLEAR Register */ | ||
| 133 | #define DMA_FIFO0 BIT(0) | ||
| 134 | #define DMA_FIFO1 BIT(1) | ||
| 135 | |||
| 129 | /* Sequencer Status */ | 136 | /* Sequencer Status */ |
| 130 | #define SEQ_STATUS BIT(5) | 137 | #define SEQ_STATUS BIT(5) |
| 131 | #define CHARGE_STEP 0x11 | 138 | #define CHARGE_STEP 0x11 |
| @@ -155,6 +162,7 @@ struct ti_tscadc_dev { | |||
| 155 | struct device *dev; | 162 | struct device *dev; |
| 156 | struct regmap *regmap; | 163 | struct regmap *regmap; |
| 157 | void __iomem *tscadc_base; | 164 | void __iomem *tscadc_base; |
| 165 | phys_addr_t tscadc_phys_base; | ||
| 158 | int irq; | 166 | int irq; |
| 159 | int used_cells; /* 1-2 */ | 167 | int used_cells; /* 1-2 */ |
| 160 | int tsc_wires; | 168 | int tsc_wires; |
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 7a26286db895..fba44abd05ba 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h | |||
| @@ -100,6 +100,11 @@ | |||
| 100 | #define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8) | 100 | #define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8) |
| 101 | 101 | ||
| 102 | /* | 102 | /* |
| 103 | * Some controllers have a 32-bit wide data port register | ||
| 104 | */ | ||
| 105 | #define TMIO_MMC_32BIT_DATA_PORT (1 << 9) | ||
| 106 | |||
| 107 | /* | ||
| 103 | * Some controllers allows to set SDx actual clock | 108 | * Some controllers allows to set SDx actual clock |
| 104 | */ | 109 | */ |
| 105 | #define TMIO_MMC_CLK_ACTUAL (1 << 10) | 110 | #define TMIO_MMC_CLK_ACTUAL (1 << 10) |
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h index 4ccda8969639..eac285756b37 100644 --- a/include/linux/mfd/tps65217.h +++ b/include/linux/mfd/tps65217.h | |||
| @@ -73,13 +73,15 @@ | |||
| 73 | #define TPS65217_PPATH_AC_CURRENT_MASK 0x0C | 73 | #define TPS65217_PPATH_AC_CURRENT_MASK 0x0C |
| 74 | #define TPS65217_PPATH_USB_CURRENT_MASK 0x03 | 74 | #define TPS65217_PPATH_USB_CURRENT_MASK 0x03 |
| 75 | 75 | ||
| 76 | #define TPS65217_INT_RESERVEDM BIT(7) | ||
| 77 | #define TPS65217_INT_PBM BIT(6) | 76 | #define TPS65217_INT_PBM BIT(6) |
| 78 | #define TPS65217_INT_ACM BIT(5) | 77 | #define TPS65217_INT_ACM BIT(5) |
| 79 | #define TPS65217_INT_USBM BIT(4) | 78 | #define TPS65217_INT_USBM BIT(4) |
| 80 | #define TPS65217_INT_PBI BIT(2) | 79 | #define TPS65217_INT_PBI BIT(2) |
| 81 | #define TPS65217_INT_ACI BIT(1) | 80 | #define TPS65217_INT_ACI BIT(1) |
| 82 | #define TPS65217_INT_USBI BIT(0) | 81 | #define TPS65217_INT_USBI BIT(0) |
| 82 | #define TPS65217_INT_SHIFT 4 | ||
| 83 | #define TPS65217_INT_MASK (TPS65217_INT_PBM | TPS65217_INT_ACM | \ | ||
| 84 | TPS65217_INT_USBM) | ||
| 83 | 85 | ||
| 84 | #define TPS65217_CHGCONFIG0_TREG BIT(7) | 86 | #define TPS65217_CHGCONFIG0_TREG BIT(7) |
| 85 | #define TPS65217_CHGCONFIG0_DPPM BIT(6) | 87 | #define TPS65217_CHGCONFIG0_DPPM BIT(6) |
| @@ -234,12 +236,11 @@ struct tps65217_bl_pdata { | |||
| 234 | int dft_brightness; | 236 | int dft_brightness; |
| 235 | }; | 237 | }; |
| 236 | 238 | ||
| 237 | enum tps65217_irq_type { | 239 | /* Interrupt numbers */ |
| 238 | TPS65217_IRQ_PB, | 240 | #define TPS65217_IRQ_USB 0 |
| 239 | TPS65217_IRQ_AC, | 241 | #define TPS65217_IRQ_AC 1 |
| 240 | TPS65217_IRQ_USB, | 242 | #define TPS65217_IRQ_PB 2 |
| 241 | TPS65217_NUM_IRQ | 243 | #define TPS65217_NUM_IRQ 3 |
| 242 | }; | ||
| 243 | 244 | ||
| 244 | /** | 245 | /** |
| 245 | * struct tps65217_board - packages regulator init data | 246 | * struct tps65217_board - packages regulator init data |
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index d1db9527fab5..bccd2d68b1e3 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h | |||
| @@ -282,10 +282,9 @@ struct tps65218 { | |||
| 282 | struct regulator_desc desc[TPS65218_NUM_REGULATOR]; | 282 | struct regulator_desc desc[TPS65218_NUM_REGULATOR]; |
| 283 | struct tps_info *info[TPS65218_NUM_REGULATOR]; | 283 | struct tps_info *info[TPS65218_NUM_REGULATOR]; |
| 284 | struct regmap *regmap; | 284 | struct regmap *regmap; |
| 285 | u8 *strobes; | ||
| 285 | }; | 286 | }; |
| 286 | 287 | ||
| 287 | int tps65218_reg_read(struct tps65218 *tps, unsigned int reg, | ||
| 288 | unsigned int *val); | ||
| 289 | int tps65218_reg_write(struct tps65218 *tps, unsigned int reg, | 288 | int tps65218_reg_write(struct tps65218 *tps, unsigned int reg, |
| 290 | unsigned int val, unsigned int level); | 289 | unsigned int val, unsigned int level); |
| 291 | int tps65218_set_bits(struct tps65218 *tps, unsigned int reg, | 290 | int tps65218_set_bits(struct tps65218 *tps, unsigned int reg, |
diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h index 1a603701550e..b25d0297ba88 100644 --- a/include/linux/mfd/tps65912.h +++ b/include/linux/mfd/tps65912.h | |||
| @@ -319,21 +319,7 @@ struct tps65912 { | |||
| 319 | struct regmap_irq_chip_data *irq_data; | 319 | struct regmap_irq_chip_data *irq_data; |
| 320 | }; | 320 | }; |
| 321 | 321 | ||
| 322 | static const struct regmap_range tps65912_yes_ranges[] = { | 322 | extern const struct regmap_config tps65912_regmap_config; |
| 323 | regmap_reg_range(TPS65912_INT_STS, TPS65912_GPIO5), | ||
| 324 | }; | ||
| 325 | |||
| 326 | static const struct regmap_access_table tps65912_volatile_table = { | ||
| 327 | .yes_ranges = tps65912_yes_ranges, | ||
| 328 | .n_yes_ranges = ARRAY_SIZE(tps65912_yes_ranges), | ||
| 329 | }; | ||
| 330 | |||
| 331 | static const struct regmap_config tps65912_regmap_config = { | ||
| 332 | .reg_bits = 8, | ||
| 333 | .val_bits = 8, | ||
| 334 | .cache_type = REGCACHE_RBTREE, | ||
| 335 | .volatile_table = &tps65912_volatile_table, | ||
| 336 | }; | ||
| 337 | 323 | ||
| 338 | int tps65912_device_init(struct tps65912 *tps); | 324 | int tps65912_device_init(struct tps65912 *tps); |
| 339 | int tps65912_device_exit(struct tps65912 *tps); | 325 | int tps65912_device_exit(struct tps65912 *tps); |
diff --git a/include/linux/mii.h b/include/linux/mii.h index 47492c9631b3..1629a0c32679 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h | |||
| @@ -31,7 +31,11 @@ struct mii_if_info { | |||
| 31 | extern int mii_link_ok (struct mii_if_info *mii); | 31 | extern int mii_link_ok (struct mii_if_info *mii); |
| 32 | extern int mii_nway_restart (struct mii_if_info *mii); | 32 | extern int mii_nway_restart (struct mii_if_info *mii); |
| 33 | extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); | 33 | extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); |
| 34 | extern int mii_ethtool_get_link_ksettings( | ||
| 35 | struct mii_if_info *mii, struct ethtool_link_ksettings *cmd); | ||
| 34 | extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); | 36 | extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); |
| 37 | extern int mii_ethtool_set_link_ksettings( | ||
| 38 | struct mii_if_info *mii, const struct ethtool_link_ksettings *cmd); | ||
| 35 | extern int mii_check_gmii_support(struct mii_if_info *mii); | 39 | extern int mii_check_gmii_support(struct mii_if_info *mii); |
| 36 | extern void mii_check_link (struct mii_if_info *mii); | 40 | extern void mii_check_link (struct mii_if_info *mii); |
| 37 | extern unsigned int mii_check_media (struct mii_if_info *mii, | 41 | extern unsigned int mii_check_media (struct mii_if_info *mii, |
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 722698a43d79..ed30d5d713e3 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #define STORE_QUEUE_MINOR 155 /* unused */ | 32 | #define STORE_QUEUE_MINOR 155 /* unused */ |
| 33 | #define I2O_MINOR 166 | 33 | #define I2O_MINOR 166 |
| 34 | #define MICROCODE_MINOR 184 | 34 | #define MICROCODE_MINOR 184 |
| 35 | #define IRNET_MINOR 187 | ||
| 35 | #define VFIO_MINOR 196 | 36 | #define VFIO_MINOR 196 |
| 36 | #define TUN_MINOR 200 | 37 | #define TUN_MINOR 200 |
| 37 | #define CUSE_MINOR 203 | 38 | #define CUSE_MINOR 203 |
| @@ -72,6 +73,13 @@ extern int misc_register(struct miscdevice *misc); | |||
| 72 | extern void misc_deregister(struct miscdevice *misc); | 73 | extern void misc_deregister(struct miscdevice *misc); |
| 73 | 74 | ||
| 74 | /* | 75 | /* |
| 76 | * Helper macro for drivers that don't do anything special in the initcall. | ||
| 77 | * This helps in eleminating of boilerplate code. | ||
| 78 | */ | ||
| 79 | #define builtin_misc_device(__misc_device) \ | ||
| 80 | builtin_driver(__misc_device, misc_register) | ||
| 81 | |||
| 82 | /* | ||
| 75 | * Helper macro for drivers that don't do anything special in module init / exit | 83 | * Helper macro for drivers that don't do anything special in module init / exit |
| 76 | * call. This helps in eleminating of boilerplate code. | 84 | * call. This helps in eleminating of boilerplate code. |
| 77 | */ | 85 | */ |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index c9f379689dd0..93bdb3485192 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -1460,7 +1460,7 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, | |||
| 1460 | int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, | 1460 | int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, |
| 1461 | u32 max_range_qpn); | 1461 | u32 max_range_qpn); |
| 1462 | 1462 | ||
| 1463 | cycle_t mlx4_read_clock(struct mlx4_dev *dev); | 1463 | u64 mlx4_read_clock(struct mlx4_dev *dev); |
| 1464 | 1464 | ||
| 1465 | struct mlx4_active_ports { | 1465 | struct mlx4_active_ports { |
| 1466 | DECLARE_BITMAP(ports, MLX4_MAX_PORTS); | 1466 | DECLARE_BITMAP(ports, MLX4_MAX_PORTS); |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 58276144ba81..9f489365b3d3 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
| @@ -277,6 +277,7 @@ enum mlx5_event { | |||
| 277 | MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08, | 277 | MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08, |
| 278 | MLX5_EVENT_TYPE_PORT_CHANGE = 0x09, | 278 | MLX5_EVENT_TYPE_PORT_CHANGE = 0x09, |
| 279 | MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, | 279 | MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, |
| 280 | MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, | ||
| 280 | MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, | 281 | MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, |
| 281 | 282 | ||
| 282 | MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, | 283 | MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, |
| @@ -552,6 +553,15 @@ struct mlx5_eqe_vport_change { | |||
| 552 | __be32 rsvd1[6]; | 553 | __be32 rsvd1[6]; |
| 553 | } __packed; | 554 | } __packed; |
| 554 | 555 | ||
| 556 | struct mlx5_eqe_port_module { | ||
| 557 | u8 reserved_at_0[1]; | ||
| 558 | u8 module; | ||
| 559 | u8 reserved_at_2[1]; | ||
| 560 | u8 module_status; | ||
| 561 | u8 reserved_at_4[2]; | ||
| 562 | u8 error_type; | ||
| 563 | } __packed; | ||
| 564 | |||
| 555 | union ev_data { | 565 | union ev_data { |
| 556 | __be32 raw[7]; | 566 | __be32 raw[7]; |
| 557 | struct mlx5_eqe_cmd cmd; | 567 | struct mlx5_eqe_cmd cmd; |
| @@ -565,6 +575,7 @@ union ev_data { | |||
| 565 | struct mlx5_eqe_page_req req_pages; | 575 | struct mlx5_eqe_page_req req_pages; |
| 566 | struct mlx5_eqe_page_fault page_fault; | 576 | struct mlx5_eqe_page_fault page_fault; |
| 567 | struct mlx5_eqe_vport_change vport_change; | 577 | struct mlx5_eqe_vport_change vport_change; |
| 578 | struct mlx5_eqe_port_module port_module; | ||
| 568 | } __packed; | 579 | } __packed; |
| 569 | 580 | ||
| 570 | struct mlx5_eqe { | 581 | struct mlx5_eqe { |
| @@ -1060,6 +1071,11 @@ enum { | |||
| 1060 | MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, | 1071 | MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, |
| 1061 | }; | 1072 | }; |
| 1062 | 1073 | ||
| 1074 | enum { | ||
| 1075 | MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0, | ||
| 1076 | MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2, | ||
| 1077 | }; | ||
| 1078 | |||
| 1063 | static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) | 1079 | static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) |
| 1064 | { | 1080 | { |
| 1065 | if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) | 1081 | if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index ecc451d89ccd..0ae55361e674 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -104,6 +104,8 @@ enum { | |||
| 104 | enum { | 104 | enum { |
| 105 | MLX5_REG_QETCR = 0x4005, | 105 | MLX5_REG_QETCR = 0x4005, |
| 106 | MLX5_REG_QTCT = 0x400a, | 106 | MLX5_REG_QTCT = 0x400a, |
| 107 | MLX5_REG_DCBX_PARAM = 0x4020, | ||
| 108 | MLX5_REG_DCBX_APP = 0x4021, | ||
| 107 | MLX5_REG_PCAP = 0x5001, | 109 | MLX5_REG_PCAP = 0x5001, |
| 108 | MLX5_REG_PMTU = 0x5003, | 110 | MLX5_REG_PMTU = 0x5003, |
| 109 | MLX5_REG_PTYS = 0x5004, | 111 | MLX5_REG_PTYS = 0x5004, |
| @@ -121,6 +123,12 @@ enum { | |||
| 121 | MLX5_REG_HOST_ENDIANNESS = 0x7004, | 123 | MLX5_REG_HOST_ENDIANNESS = 0x7004, |
| 122 | MLX5_REG_MCIA = 0x9014, | 124 | MLX5_REG_MCIA = 0x9014, |
| 123 | MLX5_REG_MLCR = 0x902b, | 125 | MLX5_REG_MLCR = 0x902b, |
| 126 | MLX5_REG_MPCNT = 0x9051, | ||
| 127 | }; | ||
| 128 | |||
| 129 | enum mlx5_dcbx_oper_mode { | ||
| 130 | MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0, | ||
| 131 | MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, | ||
| 124 | }; | 132 | }; |
| 125 | 133 | ||
| 126 | enum { | 134 | enum { |
| @@ -208,7 +216,7 @@ struct mlx5_cmd_first { | |||
| 208 | 216 | ||
| 209 | struct mlx5_cmd_msg { | 217 | struct mlx5_cmd_msg { |
| 210 | struct list_head list; | 218 | struct list_head list; |
| 211 | struct cache_ent *cache; | 219 | struct cmd_msg_cache *parent; |
| 212 | u32 len; | 220 | u32 len; |
| 213 | struct mlx5_cmd_first first; | 221 | struct mlx5_cmd_first first; |
| 214 | struct mlx5_cmd_mailbox *next; | 222 | struct mlx5_cmd_mailbox *next; |
| @@ -228,17 +236,17 @@ struct mlx5_cmd_debug { | |||
| 228 | u16 outlen; | 236 | u16 outlen; |
| 229 | }; | 237 | }; |
| 230 | 238 | ||
| 231 | struct cache_ent { | 239 | struct cmd_msg_cache { |
| 232 | /* protect block chain allocations | 240 | /* protect block chain allocations |
| 233 | */ | 241 | */ |
| 234 | spinlock_t lock; | 242 | spinlock_t lock; |
| 235 | struct list_head head; | 243 | struct list_head head; |
| 244 | unsigned int max_inbox_size; | ||
| 245 | unsigned int num_ent; | ||
| 236 | }; | 246 | }; |
| 237 | 247 | ||
| 238 | struct cmd_msg_cache { | 248 | enum { |
| 239 | struct cache_ent large; | 249 | MLX5_NUM_COMMAND_CACHES = 5, |
| 240 | struct cache_ent med; | ||
| 241 | |||
| 242 | }; | 250 | }; |
| 243 | 251 | ||
| 244 | struct mlx5_cmd_stats { | 252 | struct mlx5_cmd_stats { |
| @@ -281,7 +289,7 @@ struct mlx5_cmd { | |||
| 281 | struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; | 289 | struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; |
| 282 | struct pci_pool *pool; | 290 | struct pci_pool *pool; |
| 283 | struct mlx5_cmd_debug dbg; | 291 | struct mlx5_cmd_debug dbg; |
| 284 | struct cmd_msg_cache cache; | 292 | struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; |
| 285 | int checksum_disabled; | 293 | int checksum_disabled; |
| 286 | struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; | 294 | struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; |
| 287 | }; | 295 | }; |
| @@ -310,6 +318,13 @@ struct mlx5_buf { | |||
| 310 | u8 page_shift; | 318 | u8 page_shift; |
| 311 | }; | 319 | }; |
| 312 | 320 | ||
| 321 | struct mlx5_frag_buf { | ||
| 322 | struct mlx5_buf_list *frags; | ||
| 323 | int npages; | ||
| 324 | int size; | ||
| 325 | u8 page_shift; | ||
| 326 | }; | ||
| 327 | |||
| 313 | struct mlx5_eq_tasklet { | 328 | struct mlx5_eq_tasklet { |
| 314 | struct list_head list; | 329 | struct list_head list; |
| 315 | struct list_head process_list; | 330 | struct list_head process_list; |
| @@ -498,6 +513,31 @@ struct mlx5_rl_table { | |||
| 498 | struct mlx5_rl_entry *rl_entry; | 513 | struct mlx5_rl_entry *rl_entry; |
| 499 | }; | 514 | }; |
| 500 | 515 | ||
| 516 | enum port_module_event_status_type { | ||
| 517 | MLX5_MODULE_STATUS_PLUGGED = 0x1, | ||
| 518 | MLX5_MODULE_STATUS_UNPLUGGED = 0x2, | ||
| 519 | MLX5_MODULE_STATUS_ERROR = 0x3, | ||
| 520 | MLX5_MODULE_STATUS_NUM = 0x3, | ||
| 521 | }; | ||
| 522 | |||
| 523 | enum port_module_event_error_type { | ||
| 524 | MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED, | ||
| 525 | MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE, | ||
| 526 | MLX5_MODULE_EVENT_ERROR_BUS_STUCK, | ||
| 527 | MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT, | ||
| 528 | MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST, | ||
| 529 | MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER, | ||
| 530 | MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE, | ||
| 531 | MLX5_MODULE_EVENT_ERROR_BAD_CABLE, | ||
| 532 | MLX5_MODULE_EVENT_ERROR_UNKNOWN, | ||
| 533 | MLX5_MODULE_EVENT_ERROR_NUM, | ||
| 534 | }; | ||
| 535 | |||
| 536 | struct mlx5_port_module_event_stats { | ||
| 537 | u64 status_counters[MLX5_MODULE_STATUS_NUM]; | ||
| 538 | u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM]; | ||
| 539 | }; | ||
| 540 | |||
| 501 | struct mlx5_priv { | 541 | struct mlx5_priv { |
| 502 | char name[MLX5_MAX_NAME_LEN]; | 542 | char name[MLX5_MAX_NAME_LEN]; |
| 503 | struct mlx5_eq_table eq_table; | 543 | struct mlx5_eq_table eq_table; |
| @@ -559,6 +599,8 @@ struct mlx5_priv { | |||
| 559 | unsigned long pci_dev_data; | 599 | unsigned long pci_dev_data; |
| 560 | struct mlx5_fc_stats fc_stats; | 600 | struct mlx5_fc_stats fc_stats; |
| 561 | struct mlx5_rl_table rl_table; | 601 | struct mlx5_rl_table rl_table; |
| 602 | |||
| 603 | struct mlx5_port_module_event_stats pme_stats; | ||
| 562 | }; | 604 | }; |
| 563 | 605 | ||
| 564 | enum mlx5_device_state { | 606 | enum mlx5_device_state { |
| @@ -787,6 +829,9 @@ int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, | |||
| 787 | struct mlx5_buf *buf, int node); | 829 | struct mlx5_buf *buf, int node); |
| 788 | int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); | 830 | int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); |
| 789 | void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); | 831 | void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); |
| 832 | int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, | ||
| 833 | struct mlx5_frag_buf *buf, int node); | ||
| 834 | void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); | ||
| 790 | struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, | 835 | struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, |
| 791 | gfp_t flags, int npages); | 836 | gfp_t flags, int npages); |
| 792 | void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, | 837 | void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, |
| @@ -831,6 +876,7 @@ void mlx5_unregister_debugfs(void); | |||
| 831 | int mlx5_eq_init(struct mlx5_core_dev *dev); | 876 | int mlx5_eq_init(struct mlx5_core_dev *dev); |
| 832 | void mlx5_eq_cleanup(struct mlx5_core_dev *dev); | 877 | void mlx5_eq_cleanup(struct mlx5_core_dev *dev); |
| 833 | void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); | 878 | void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); |
| 879 | void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); | ||
| 834 | void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); | 880 | void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); |
| 835 | void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); | 881 | void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); |
| 836 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | 882 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 93ebc5e21334..949b24b6c479 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h | |||
| @@ -42,6 +42,10 @@ enum { | |||
| 42 | MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, | 42 | MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, |
| 43 | }; | 43 | }; |
| 44 | 44 | ||
| 45 | enum { | ||
| 46 | MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0), | ||
| 47 | }; | ||
| 48 | |||
| 45 | #define LEFTOVERS_RULE_NUM 2 | 49 | #define LEFTOVERS_RULE_NUM 2 |
| 46 | static inline void build_leftovers_ft_param(int *priority, | 50 | static inline void build_leftovers_ft_param(int *priority, |
| 47 | int *n_ent, | 51 | int *n_ent, |
| @@ -69,8 +73,8 @@ enum mlx5_flow_namespace_type { | |||
| 69 | 73 | ||
| 70 | struct mlx5_flow_table; | 74 | struct mlx5_flow_table; |
| 71 | struct mlx5_flow_group; | 75 | struct mlx5_flow_group; |
| 72 | struct mlx5_flow_rule; | ||
| 73 | struct mlx5_flow_namespace; | 76 | struct mlx5_flow_namespace; |
| 77 | struct mlx5_flow_handle; | ||
| 74 | 78 | ||
| 75 | struct mlx5_flow_spec { | 79 | struct mlx5_flow_spec { |
| 76 | u8 match_criteria_enable; | 80 | u8 match_criteria_enable; |
| @@ -97,13 +101,15 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, | |||
| 97 | int prio, | 101 | int prio, |
| 98 | int num_flow_table_entries, | 102 | int num_flow_table_entries, |
| 99 | int max_num_groups, | 103 | int max_num_groups, |
| 100 | u32 level); | 104 | u32 level, |
| 105 | u32 flags); | ||
| 101 | 106 | ||
| 102 | struct mlx5_flow_table * | 107 | struct mlx5_flow_table * |
| 103 | mlx5_create_flow_table(struct mlx5_flow_namespace *ns, | 108 | mlx5_create_flow_table(struct mlx5_flow_namespace *ns, |
| 104 | int prio, | 109 | int prio, |
| 105 | int num_flow_table_entries, | 110 | int num_flow_table_entries, |
| 106 | u32 level); | 111 | u32 level, |
| 112 | u32 flags); | ||
| 107 | struct mlx5_flow_table * | 113 | struct mlx5_flow_table * |
| 108 | mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, | 114 | mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, |
| 109 | int prio, | 115 | int prio, |
| @@ -124,21 +130,28 @@ struct mlx5_flow_group * | |||
| 124 | mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); | 130 | mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); |
| 125 | void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); | 131 | void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); |
| 126 | 132 | ||
| 133 | struct mlx5_flow_act { | ||
| 134 | u32 action; | ||
| 135 | u32 flow_tag; | ||
| 136 | u32 encap_id; | ||
| 137 | }; | ||
| 138 | |||
| 127 | /* Single destination per rule. | 139 | /* Single destination per rule. |
| 128 | * Group ID is implied by the match criteria. | 140 | * Group ID is implied by the match criteria. |
| 129 | */ | 141 | */ |
| 130 | struct mlx5_flow_rule * | 142 | struct mlx5_flow_handle * |
| 131 | mlx5_add_flow_rule(struct mlx5_flow_table *ft, | 143 | mlx5_add_flow_rules(struct mlx5_flow_table *ft, |
| 132 | struct mlx5_flow_spec *spec, | 144 | struct mlx5_flow_spec *spec, |
| 133 | u32 action, | 145 | struct mlx5_flow_act *flow_act, |
| 134 | u32 flow_tag, | 146 | struct mlx5_flow_destination *dest, |
| 135 | struct mlx5_flow_destination *dest); | 147 | int dest_num); |
| 136 | void mlx5_del_flow_rule(struct mlx5_flow_rule *fr); | 148 | void mlx5_del_flow_rules(struct mlx5_flow_handle *fr); |
| 137 | 149 | ||
| 138 | int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, | 150 | int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, |
| 139 | struct mlx5_flow_destination *dest); | 151 | struct mlx5_flow_destination *new_dest, |
| 140 | 152 | struct mlx5_flow_destination *old_dest); | |
| 141 | struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule); | 153 | |
| 154 | struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler); | ||
| 142 | struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); | 155 | struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); |
| 143 | void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); | 156 | void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); |
| 144 | void mlx5_fc_query_cached(struct mlx5_fc *counter, | 157 | void mlx5_fc_query_cached(struct mlx5_fc *counter, |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 6045d4d58065..57bec544e20a 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
| @@ -83,6 +83,7 @@ enum { | |||
| 83 | MLX5_CMD_OP_SET_HCA_CAP = 0x109, | 83 | MLX5_CMD_OP_SET_HCA_CAP = 0x109, |
| 84 | MLX5_CMD_OP_QUERY_ISSI = 0x10a, | 84 | MLX5_CMD_OP_QUERY_ISSI = 0x10a, |
| 85 | MLX5_CMD_OP_SET_ISSI = 0x10b, | 85 | MLX5_CMD_OP_SET_ISSI = 0x10b, |
| 86 | MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d, | ||
| 86 | MLX5_CMD_OP_CREATE_MKEY = 0x200, | 87 | MLX5_CMD_OP_CREATE_MKEY = 0x200, |
| 87 | MLX5_CMD_OP_QUERY_MKEY = 0x201, | 88 | MLX5_CMD_OP_QUERY_MKEY = 0x201, |
| 88 | MLX5_CMD_OP_DESTROY_MKEY = 0x202, | 89 | MLX5_CMD_OP_DESTROY_MKEY = 0x202, |
| @@ -145,6 +146,12 @@ enum { | |||
| 145 | MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, | 146 | MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, |
| 146 | MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, | 147 | MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, |
| 147 | MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, | 148 | MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, |
| 149 | MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, | ||
| 150 | MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783, | ||
| 151 | MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT = 0x784, | ||
| 152 | MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT = 0x785, | ||
| 153 | MLX5_CMD_OP_CREATE_QOS_PARA_VPORT = 0x786, | ||
| 154 | MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT = 0x787, | ||
| 148 | MLX5_CMD_OP_ALLOC_PD = 0x800, | 155 | MLX5_CMD_OP_ALLOC_PD = 0x800, |
| 149 | MLX5_CMD_OP_DEALLOC_PD = 0x801, | 156 | MLX5_CMD_OP_DEALLOC_PD = 0x801, |
| 150 | MLX5_CMD_OP_ALLOC_UAR = 0x802, | 157 | MLX5_CMD_OP_ALLOC_UAR = 0x802, |
| @@ -537,13 +544,27 @@ struct mlx5_ifc_e_switch_cap_bits { | |||
| 537 | 544 | ||
| 538 | struct mlx5_ifc_qos_cap_bits { | 545 | struct mlx5_ifc_qos_cap_bits { |
| 539 | u8 packet_pacing[0x1]; | 546 | u8 packet_pacing[0x1]; |
| 540 | u8 reserved_0[0x1f]; | 547 | u8 esw_scheduling[0x1]; |
| 541 | u8 reserved_1[0x20]; | 548 | u8 reserved_at_2[0x1e]; |
| 549 | |||
| 550 | u8 reserved_at_20[0x20]; | ||
| 551 | |||
| 542 | u8 packet_pacing_max_rate[0x20]; | 552 | u8 packet_pacing_max_rate[0x20]; |
| 553 | |||
| 543 | u8 packet_pacing_min_rate[0x20]; | 554 | u8 packet_pacing_min_rate[0x20]; |
| 544 | u8 reserved_2[0x10]; | 555 | |
| 556 | u8 reserved_at_80[0x10]; | ||
| 545 | u8 packet_pacing_rate_table_size[0x10]; | 557 | u8 packet_pacing_rate_table_size[0x10]; |
| 546 | u8 reserved_3[0x760]; | 558 | |
| 559 | u8 esw_element_type[0x10]; | ||
| 560 | u8 esw_tsar_type[0x10]; | ||
| 561 | |||
| 562 | u8 reserved_at_c0[0x10]; | ||
| 563 | u8 max_qos_para_vport[0x10]; | ||
| 564 | |||
| 565 | u8 max_tsar_bw_share[0x20]; | ||
| 566 | |||
| 567 | u8 reserved_at_100[0x700]; | ||
| 547 | }; | 568 | }; |
| 548 | 569 | ||
| 549 | struct mlx5_ifc_per_protocol_networking_offload_caps_bits { | 570 | struct mlx5_ifc_per_protocol_networking_offload_caps_bits { |
| @@ -556,7 +577,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { | |||
| 556 | u8 self_lb_en_modifiable[0x1]; | 577 | u8 self_lb_en_modifiable[0x1]; |
| 557 | u8 reserved_at_9[0x2]; | 578 | u8 reserved_at_9[0x2]; |
| 558 | u8 max_lso_cap[0x5]; | 579 | u8 max_lso_cap[0x5]; |
| 559 | u8 reserved_at_10[0x2]; | 580 | u8 multi_pkt_send_wqe[0x2]; |
| 560 | u8 wqe_inline_mode[0x2]; | 581 | u8 wqe_inline_mode[0x2]; |
| 561 | u8 rss_ind_tbl_cap[0x4]; | 582 | u8 rss_ind_tbl_cap[0x4]; |
| 562 | u8 reg_umr_sq[0x1]; | 583 | u8 reg_umr_sq[0x1]; |
| @@ -804,7 +825,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { | |||
| 804 | u8 early_vf_enable[0x1]; | 825 | u8 early_vf_enable[0x1]; |
| 805 | u8 reserved_at_1a9[0x2]; | 826 | u8 reserved_at_1a9[0x2]; |
| 806 | u8 local_ca_ack_delay[0x5]; | 827 | u8 local_ca_ack_delay[0x5]; |
| 807 | u8 reserved_at_1af[0x2]; | 828 | u8 port_module_event[0x1]; |
| 829 | u8 reserved_at_1b0[0x1]; | ||
| 808 | u8 ports_check[0x1]; | 830 | u8 ports_check[0x1]; |
| 809 | u8 reserved_at_1b2[0x1]; | 831 | u8 reserved_at_1b2[0x1]; |
| 810 | u8 disable_link_up[0x1]; | 832 | u8 disable_link_up[0x1]; |
| @@ -888,7 +910,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { | |||
| 888 | u8 log_pg_sz[0x8]; | 910 | u8 log_pg_sz[0x8]; |
| 889 | 911 | ||
| 890 | u8 bf[0x1]; | 912 | u8 bf[0x1]; |
| 891 | u8 reserved_at_261[0x1]; | 913 | u8 driver_version[0x1]; |
| 892 | u8 pad_tx_eth_packet[0x1]; | 914 | u8 pad_tx_eth_packet[0x1]; |
| 893 | u8 reserved_at_263[0x8]; | 915 | u8 reserved_at_263[0x8]; |
| 894 | u8 log_bf_reg_size[0x5]; | 916 | u8 log_bf_reg_size[0x5]; |
| @@ -1735,6 +1757,80 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { | |||
| 1735 | u8 reserved_at_4c0[0x300]; | 1757 | u8 reserved_at_4c0[0x300]; |
| 1736 | }; | 1758 | }; |
| 1737 | 1759 | ||
| 1760 | struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits { | ||
| 1761 | u8 life_time_counter_high[0x20]; | ||
| 1762 | |||
| 1763 | u8 life_time_counter_low[0x20]; | ||
| 1764 | |||
| 1765 | u8 rx_errors[0x20]; | ||
| 1766 | |||
| 1767 | u8 tx_errors[0x20]; | ||
| 1768 | |||
| 1769 | u8 l0_to_recovery_eieos[0x20]; | ||
| 1770 | |||
| 1771 | u8 l0_to_recovery_ts[0x20]; | ||
| 1772 | |||
| 1773 | u8 l0_to_recovery_framing[0x20]; | ||
| 1774 | |||
| 1775 | u8 l0_to_recovery_retrain[0x20]; | ||
| 1776 | |||
| 1777 | u8 crc_error_dllp[0x20]; | ||
| 1778 | |||
| 1779 | u8 crc_error_tlp[0x20]; | ||
| 1780 | |||
| 1781 | u8 reserved_at_140[0x680]; | ||
| 1782 | }; | ||
| 1783 | |||
| 1784 | struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits { | ||
| 1785 | u8 life_time_counter_high[0x20]; | ||
| 1786 | |||
| 1787 | u8 life_time_counter_low[0x20]; | ||
| 1788 | |||
| 1789 | u8 time_to_boot_image_start[0x20]; | ||
| 1790 | |||
| 1791 | u8 time_to_link_image[0x20]; | ||
| 1792 | |||
| 1793 | u8 calibration_time[0x20]; | ||
| 1794 | |||
| 1795 | u8 time_to_first_perst[0x20]; | ||
| 1796 | |||
| 1797 | u8 time_to_detect_state[0x20]; | ||
| 1798 | |||
| 1799 | u8 time_to_l0[0x20]; | ||
| 1800 | |||
| 1801 | u8 time_to_crs_en[0x20]; | ||
| 1802 | |||
| 1803 | u8 time_to_plastic_image_start[0x20]; | ||
| 1804 | |||
| 1805 | u8 time_to_iron_image_start[0x20]; | ||
| 1806 | |||
| 1807 | u8 perst_handler[0x20]; | ||
| 1808 | |||
| 1809 | u8 times_in_l1[0x20]; | ||
| 1810 | |||
| 1811 | u8 times_in_l23[0x20]; | ||
| 1812 | |||
| 1813 | u8 dl_down[0x20]; | ||
| 1814 | |||
| 1815 | u8 config_cycle1usec[0x20]; | ||
| 1816 | |||
| 1817 | u8 config_cycle2to7usec[0x20]; | ||
| 1818 | |||
| 1819 | u8 config_cycle_8to15usec[0x20]; | ||
| 1820 | |||
| 1821 | u8 config_cycle_16_to_63usec[0x20]; | ||
| 1822 | |||
| 1823 | u8 config_cycle_64usec[0x20]; | ||
| 1824 | |||
| 1825 | u8 correctable_err_msg_sent[0x20]; | ||
| 1826 | |||
| 1827 | u8 non_fatal_err_msg_sent[0x20]; | ||
| 1828 | |||
| 1829 | u8 fatal_err_msg_sent[0x20]; | ||
| 1830 | |||
| 1831 | u8 reserved_at_2e0[0x4e0]; | ||
| 1832 | }; | ||
| 1833 | |||
| 1738 | struct mlx5_ifc_cmd_inter_comp_event_bits { | 1834 | struct mlx5_ifc_cmd_inter_comp_event_bits { |
| 1739 | u8 command_completion_vector[0x20]; | 1835 | u8 command_completion_vector[0x20]; |
| 1740 | 1836 | ||
| @@ -2333,6 +2429,30 @@ struct mlx5_ifc_sqc_bits { | |||
| 2333 | struct mlx5_ifc_wq_bits wq; | 2429 | struct mlx5_ifc_wq_bits wq; |
| 2334 | }; | 2430 | }; |
| 2335 | 2431 | ||
| 2432 | enum { | ||
| 2433 | SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0, | ||
| 2434 | SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1, | ||
| 2435 | SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2, | ||
| 2436 | SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, | ||
| 2437 | }; | ||
| 2438 | |||
| 2439 | struct mlx5_ifc_scheduling_context_bits { | ||
| 2440 | u8 element_type[0x8]; | ||
| 2441 | u8 reserved_at_8[0x18]; | ||
| 2442 | |||
| 2443 | u8 element_attributes[0x20]; | ||
| 2444 | |||
| 2445 | u8 parent_element_id[0x20]; | ||
| 2446 | |||
| 2447 | u8 reserved_at_60[0x40]; | ||
| 2448 | |||
| 2449 | u8 bw_share[0x20]; | ||
| 2450 | |||
| 2451 | u8 max_average_bw[0x20]; | ||
| 2452 | |||
| 2453 | u8 reserved_at_e0[0x120]; | ||
| 2454 | }; | ||
| 2455 | |||
| 2336 | struct mlx5_ifc_rqtc_bits { | 2456 | struct mlx5_ifc_rqtc_bits { |
| 2337 | u8 reserved_at_0[0xa0]; | 2457 | u8 reserved_at_0[0xa0]; |
| 2338 | 2458 | ||
| @@ -2844,7 +2964,7 @@ struct mlx5_ifc_xrqc_bits { | |||
| 2844 | 2964 | ||
| 2845 | struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; | 2965 | struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; |
| 2846 | 2966 | ||
| 2847 | u8 reserved_at_180[0x200]; | 2967 | u8 reserved_at_180[0x880]; |
| 2848 | 2968 | ||
| 2849 | struct mlx5_ifc_wq_bits wq; | 2969 | struct mlx5_ifc_wq_bits wq; |
| 2850 | }; | 2970 | }; |
| @@ -2875,6 +2995,12 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { | |||
| 2875 | u8 reserved_at_0[0x7c0]; | 2995 | u8 reserved_at_0[0x7c0]; |
| 2876 | }; | 2996 | }; |
| 2877 | 2997 | ||
| 2998 | union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits { | ||
| 2999 | struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout; | ||
| 3000 | struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits pcie_tas_cntrs_grp_data_layout; | ||
| 3001 | u8 reserved_at_0[0x7c0]; | ||
| 3002 | }; | ||
| 3003 | |||
| 2878 | union mlx5_ifc_event_auto_bits { | 3004 | union mlx5_ifc_event_auto_bits { |
| 2879 | struct mlx5_ifc_comp_event_bits comp_event; | 3005 | struct mlx5_ifc_comp_event_bits comp_event; |
| 2880 | struct mlx5_ifc_dct_events_bits dct_events; | 3006 | struct mlx5_ifc_dct_events_bits dct_events; |
| @@ -2920,6 +3046,29 @@ struct mlx5_ifc_register_loopback_control_bits { | |||
| 2920 | u8 reserved_at_20[0x60]; | 3046 | u8 reserved_at_20[0x60]; |
| 2921 | }; | 3047 | }; |
| 2922 | 3048 | ||
| 3049 | struct mlx5_ifc_vport_tc_element_bits { | ||
| 3050 | u8 traffic_class[0x4]; | ||
| 3051 | u8 reserved_at_4[0xc]; | ||
| 3052 | u8 vport_number[0x10]; | ||
| 3053 | }; | ||
| 3054 | |||
| 3055 | struct mlx5_ifc_vport_element_bits { | ||
| 3056 | u8 reserved_at_0[0x10]; | ||
| 3057 | u8 vport_number[0x10]; | ||
| 3058 | }; | ||
| 3059 | |||
| 3060 | enum { | ||
| 3061 | TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0, | ||
| 3062 | TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1, | ||
| 3063 | TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2, | ||
| 3064 | }; | ||
| 3065 | |||
| 3066 | struct mlx5_ifc_tsar_element_bits { | ||
| 3067 | u8 reserved_at_0[0x8]; | ||
| 3068 | u8 tsar_type[0x8]; | ||
| 3069 | u8 reserved_at_10[0x10]; | ||
| 3070 | }; | ||
| 3071 | |||
| 2923 | struct mlx5_ifc_teardown_hca_out_bits { | 3072 | struct mlx5_ifc_teardown_hca_out_bits { |
| 2924 | u8 status[0x8]; | 3073 | u8 status[0x8]; |
| 2925 | u8 reserved_at_8[0x18]; | 3074 | u8 reserved_at_8[0x18]; |
| @@ -3540,6 +3689,39 @@ struct mlx5_ifc_query_special_contexts_in_bits { | |||
| 3540 | u8 reserved_at_40[0x40]; | 3689 | u8 reserved_at_40[0x40]; |
| 3541 | }; | 3690 | }; |
| 3542 | 3691 | ||
| 3692 | struct mlx5_ifc_query_scheduling_element_out_bits { | ||
| 3693 | u8 opcode[0x10]; | ||
| 3694 | u8 reserved_at_10[0x10]; | ||
| 3695 | |||
| 3696 | u8 reserved_at_20[0x10]; | ||
| 3697 | u8 op_mod[0x10]; | ||
| 3698 | |||
| 3699 | u8 reserved_at_40[0xc0]; | ||
| 3700 | |||
| 3701 | struct mlx5_ifc_scheduling_context_bits scheduling_context; | ||
| 3702 | |||
| 3703 | u8 reserved_at_300[0x100]; | ||
| 3704 | }; | ||
| 3705 | |||
| 3706 | enum { | ||
| 3707 | SCHEDULING_HIERARCHY_E_SWITCH = 0x2, | ||
| 3708 | }; | ||
| 3709 | |||
| 3710 | struct mlx5_ifc_query_scheduling_element_in_bits { | ||
| 3711 | u8 opcode[0x10]; | ||
| 3712 | u8 reserved_at_10[0x10]; | ||
| 3713 | |||
| 3714 | u8 reserved_at_20[0x10]; | ||
| 3715 | u8 op_mod[0x10]; | ||
| 3716 | |||
| 3717 | u8 scheduling_hierarchy[0x8]; | ||
| 3718 | u8 reserved_at_48[0x18]; | ||
| 3719 | |||
| 3720 | u8 scheduling_element_id[0x20]; | ||
| 3721 | |||
| 3722 | u8 reserved_at_80[0x180]; | ||
| 3723 | }; | ||
| 3724 | |||
| 3543 | struct mlx5_ifc_query_rqt_out_bits { | 3725 | struct mlx5_ifc_query_rqt_out_bits { |
| 3544 | u8 status[0x8]; | 3726 | u8 status[0x8]; |
| 3545 | u8 reserved_at_8[0x18]; | 3727 | u8 reserved_at_8[0x18]; |
| @@ -3904,6 +4086,25 @@ struct mlx5_ifc_query_issi_in_bits { | |||
| 3904 | u8 reserved_at_40[0x40]; | 4086 | u8 reserved_at_40[0x40]; |
| 3905 | }; | 4087 | }; |
| 3906 | 4088 | ||
| 4089 | struct mlx5_ifc_set_driver_version_out_bits { | ||
| 4090 | u8 status[0x8]; | ||
| 4091 | u8 reserved_0[0x18]; | ||
| 4092 | |||
| 4093 | u8 syndrome[0x20]; | ||
| 4094 | u8 reserved_1[0x40]; | ||
| 4095 | }; | ||
| 4096 | |||
| 4097 | struct mlx5_ifc_set_driver_version_in_bits { | ||
| 4098 | u8 opcode[0x10]; | ||
| 4099 | u8 reserved_0[0x10]; | ||
| 4100 | |||
| 4101 | u8 reserved_1[0x10]; | ||
| 4102 | u8 op_mod[0x10]; | ||
| 4103 | |||
| 4104 | u8 reserved_2[0x40]; | ||
| 4105 | u8 driver_version[64][0x8]; | ||
| 4106 | }; | ||
| 4107 | |||
| 3907 | struct mlx5_ifc_query_hca_vport_pkey_out_bits { | 4108 | struct mlx5_ifc_query_hca_vport_pkey_out_bits { |
| 3908 | u8 status[0x8]; | 4109 | u8 status[0x8]; |
| 3909 | u8 reserved_at_8[0x18]; | 4110 | u8 reserved_at_8[0x18]; |
| @@ -4725,6 +4926,43 @@ struct mlx5_ifc_modify_sq_in_bits { | |||
| 4725 | struct mlx5_ifc_sqc_bits ctx; | 4926 | struct mlx5_ifc_sqc_bits ctx; |
| 4726 | }; | 4927 | }; |
| 4727 | 4928 | ||
| 4929 | struct mlx5_ifc_modify_scheduling_element_out_bits { | ||
| 4930 | u8 status[0x8]; | ||
| 4931 | u8 reserved_at_8[0x18]; | ||
| 4932 | |||
| 4933 | u8 syndrome[0x20]; | ||
| 4934 | |||
| 4935 | u8 reserved_at_40[0x1c0]; | ||
| 4936 | }; | ||
| 4937 | |||
| 4938 | enum { | ||
| 4939 | MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE = 0x1, | ||
| 4940 | MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW = 0x2, | ||
| 4941 | }; | ||
| 4942 | |||
| 4943 | struct mlx5_ifc_modify_scheduling_element_in_bits { | ||
| 4944 | u8 opcode[0x10]; | ||
| 4945 | u8 reserved_at_10[0x10]; | ||
| 4946 | |||
| 4947 | u8 reserved_at_20[0x10]; | ||
| 4948 | u8 op_mod[0x10]; | ||
| 4949 | |||
| 4950 | u8 scheduling_hierarchy[0x8]; | ||
| 4951 | u8 reserved_at_48[0x18]; | ||
| 4952 | |||
| 4953 | u8 scheduling_element_id[0x20]; | ||
| 4954 | |||
| 4955 | u8 reserved_at_80[0x20]; | ||
| 4956 | |||
| 4957 | u8 modify_bitmask[0x20]; | ||
| 4958 | |||
| 4959 | u8 reserved_at_c0[0x40]; | ||
| 4960 | |||
| 4961 | struct mlx5_ifc_scheduling_context_bits scheduling_context; | ||
| 4962 | |||
| 4963 | u8 reserved_at_300[0x100]; | ||
| 4964 | }; | ||
| 4965 | |||
| 4728 | struct mlx5_ifc_modify_rqt_out_bits { | 4966 | struct mlx5_ifc_modify_rqt_out_bits { |
| 4729 | u8 status[0x8]; | 4967 | u8 status[0x8]; |
| 4730 | u8 reserved_at_8[0x18]; | 4968 | u8 reserved_at_8[0x18]; |
| @@ -5390,6 +5628,30 @@ struct mlx5_ifc_destroy_sq_in_bits { | |||
| 5390 | u8 reserved_at_60[0x20]; | 5628 | u8 reserved_at_60[0x20]; |
| 5391 | }; | 5629 | }; |
| 5392 | 5630 | ||
| 5631 | struct mlx5_ifc_destroy_scheduling_element_out_bits { | ||
| 5632 | u8 status[0x8]; | ||
| 5633 | u8 reserved_at_8[0x18]; | ||
| 5634 | |||
| 5635 | u8 syndrome[0x20]; | ||
| 5636 | |||
| 5637 | u8 reserved_at_40[0x1c0]; | ||
| 5638 | }; | ||
| 5639 | |||
| 5640 | struct mlx5_ifc_destroy_scheduling_element_in_bits { | ||
| 5641 | u8 opcode[0x10]; | ||
| 5642 | u8 reserved_at_10[0x10]; | ||
| 5643 | |||
| 5644 | u8 reserved_at_20[0x10]; | ||
| 5645 | u8 op_mod[0x10]; | ||
| 5646 | |||
| 5647 | u8 scheduling_hierarchy[0x8]; | ||
| 5648 | u8 reserved_at_48[0x18]; | ||
| 5649 | |||
| 5650 | u8 scheduling_element_id[0x20]; | ||
| 5651 | |||
| 5652 | u8 reserved_at_80[0x180]; | ||
| 5653 | }; | ||
| 5654 | |||
| 5393 | struct mlx5_ifc_destroy_rqt_out_bits { | 5655 | struct mlx5_ifc_destroy_rqt_out_bits { |
| 5394 | u8 status[0x8]; | 5656 | u8 status[0x8]; |
| 5395 | u8 reserved_at_8[0x18]; | 5657 | u8 reserved_at_8[0x18]; |
| @@ -6017,6 +6279,36 @@ struct mlx5_ifc_create_sq_in_bits { | |||
| 6017 | struct mlx5_ifc_sqc_bits ctx; | 6279 | struct mlx5_ifc_sqc_bits ctx; |
| 6018 | }; | 6280 | }; |
| 6019 | 6281 | ||
| 6282 | struct mlx5_ifc_create_scheduling_element_out_bits { | ||
| 6283 | u8 status[0x8]; | ||
| 6284 | u8 reserved_at_8[0x18]; | ||
| 6285 | |||
| 6286 | u8 syndrome[0x20]; | ||
| 6287 | |||
| 6288 | u8 reserved_at_40[0x40]; | ||
| 6289 | |||
| 6290 | u8 scheduling_element_id[0x20]; | ||
| 6291 | |||
| 6292 | u8 reserved_at_a0[0x160]; | ||
| 6293 | }; | ||
| 6294 | |||
| 6295 | struct mlx5_ifc_create_scheduling_element_in_bits { | ||
| 6296 | u8 opcode[0x10]; | ||
| 6297 | u8 reserved_at_10[0x10]; | ||
| 6298 | |||
| 6299 | u8 reserved_at_20[0x10]; | ||
| 6300 | u8 op_mod[0x10]; | ||
| 6301 | |||
| 6302 | u8 scheduling_hierarchy[0x8]; | ||
| 6303 | u8 reserved_at_48[0x18]; | ||
| 6304 | |||
| 6305 | u8 reserved_at_60[0xa0]; | ||
| 6306 | |||
| 6307 | struct mlx5_ifc_scheduling_context_bits scheduling_context; | ||
| 6308 | |||
| 6309 | u8 reserved_at_300[0x100]; | ||
| 6310 | }; | ||
| 6311 | |||
| 6020 | struct mlx5_ifc_create_rqt_out_bits { | 6312 | struct mlx5_ifc_create_rqt_out_bits { |
| 6021 | u8 status[0x8]; | 6313 | u8 status[0x8]; |
| 6022 | u8 reserved_at_8[0x18]; | 6314 | u8 reserved_at_8[0x18]; |
| @@ -7028,6 +7320,18 @@ struct mlx5_ifc_ppcnt_reg_bits { | |||
| 7028 | union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; | 7320 | union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; |
| 7029 | }; | 7321 | }; |
| 7030 | 7322 | ||
| 7323 | struct mlx5_ifc_mpcnt_reg_bits { | ||
| 7324 | u8 reserved_at_0[0x8]; | ||
| 7325 | u8 pcie_index[0x8]; | ||
| 7326 | u8 reserved_at_10[0xa]; | ||
| 7327 | u8 grp[0x6]; | ||
| 7328 | |||
| 7329 | u8 clr[0x1]; | ||
| 7330 | u8 reserved_at_21[0x1f]; | ||
| 7331 | |||
| 7332 | union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set; | ||
| 7333 | }; | ||
| 7334 | |||
| 7031 | struct mlx5_ifc_ppad_reg_bits { | 7335 | struct mlx5_ifc_ppad_reg_bits { |
| 7032 | u8 reserved_at_0[0x3]; | 7336 | u8 reserved_at_0[0x3]; |
| 7033 | u8 single_mac[0x1]; | 7337 | u8 single_mac[0x1]; |
| @@ -7633,6 +7937,7 @@ union mlx5_ifc_ports_control_registers_document_bits { | |||
| 7633 | struct mlx5_ifc_pmtu_reg_bits pmtu_reg; | 7937 | struct mlx5_ifc_pmtu_reg_bits pmtu_reg; |
| 7634 | struct mlx5_ifc_ppad_reg_bits ppad_reg; | 7938 | struct mlx5_ifc_ppad_reg_bits ppad_reg; |
| 7635 | struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; | 7939 | struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; |
| 7940 | struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg; | ||
| 7636 | struct mlx5_ifc_pplm_reg_bits pplm_reg; | 7941 | struct mlx5_ifc_pplm_reg_bits pplm_reg; |
| 7637 | struct mlx5_ifc_pplr_reg_bits pplr_reg; | 7942 | struct mlx5_ifc_pplr_reg_bits pplr_reg; |
| 7638 | struct mlx5_ifc_ppsc_reg_bits ppsc_reg; | 7943 | struct mlx5_ifc_ppsc_reg_bits ppsc_reg; |
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index b3065acd20b4..e527732fb31b 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h | |||
| @@ -94,6 +94,9 @@ enum mlx5e_link_mode { | |||
| 94 | 94 | ||
| 95 | #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) | 95 | #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) |
| 96 | 96 | ||
| 97 | #define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF | ||
| 98 | #define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF | ||
| 99 | |||
| 97 | int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); | 100 | int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); |
| 98 | int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, | 101 | int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, |
| 99 | int ptys_size, int proto_mask, u8 local_port); | 102 | int ptys_size, int proto_mask, u8 local_port); |
| @@ -138,8 +141,12 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, | |||
| 138 | int mlx5_max_tc(struct mlx5_core_dev *mdev); | 141 | int mlx5_max_tc(struct mlx5_core_dev *mdev); |
| 139 | 142 | ||
| 140 | int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); | 143 | int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); |
| 144 | int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, | ||
| 145 | u8 prio, u8 *tc); | ||
| 141 | int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); | 146 | int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); |
| 142 | int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); | 147 | int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); |
| 148 | int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, | ||
| 149 | u8 tc, u8 *bw_pct); | ||
| 143 | int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, | 150 | int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, |
| 144 | u8 *max_bw_value, | 151 | u8 *max_bw_value, |
| 145 | u8 *max_bw_unit); | 152 | u8 *max_bw_unit); |
| @@ -155,4 +162,6 @@ void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, | |||
| 155 | int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, | 162 | int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, |
| 156 | u16 offset, u16 size, u8 *data); | 163 | u16 offset, u16 size, u8 *data); |
| 157 | 164 | ||
| 165 | int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out); | ||
| 166 | int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in); | ||
| 158 | #endif /* __MLX5_PORT_H__ */ | 167 | #endif /* __MLX5_PORT_H__ */ |
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h index 33c97dc900f8..1cde0fd53f90 100644 --- a/include/linux/mlx5/srq.h +++ b/include/linux/mlx5/srq.h | |||
| @@ -55,7 +55,7 @@ struct mlx5_srq_attr { | |||
| 55 | u32 lwm; | 55 | u32 lwm; |
| 56 | u32 user_index; | 56 | u32 user_index; |
| 57 | u64 db_record; | 57 | u64 db_record; |
| 58 | u64 *pas; | 58 | __be64 *pas; |
| 59 | }; | 59 | }; |
| 60 | 60 | ||
| 61 | struct mlx5_core_dev; | 61 | struct mlx5_core_dev; |
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 451b0bde9083..ec35157ea725 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h | |||
| @@ -36,6 +36,12 @@ | |||
| 36 | #include <linux/mlx5/driver.h> | 36 | #include <linux/mlx5/driver.h> |
| 37 | #include <linux/mlx5/device.h> | 37 | #include <linux/mlx5/device.h> |
| 38 | 38 | ||
| 39 | enum { | ||
| 40 | MLX5_CAP_INLINE_MODE_L2, | ||
| 41 | MLX5_CAP_INLINE_MODE_VPORT_CONTEXT, | ||
| 42 | MLX5_CAP_INLINE_MODE_NOT_REQUIRED, | ||
| 43 | }; | ||
| 44 | |||
| 39 | u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); | 45 | u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); |
| 40 | u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, | 46 | u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, |
| 41 | u16 vport); | 47 | u16 vport); |
| @@ -43,8 +49,8 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, | |||
| 43 | u16 vport, u8 state); | 49 | u16 vport, u8 state); |
| 44 | int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, | 50 | int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, |
| 45 | u16 vport, u8 *addr); | 51 | u16 vport, u8 *addr); |
| 46 | void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, | 52 | int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, |
| 47 | u8 *min_inline); | 53 | u16 vport, u8 *min_inline); |
| 48 | int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, | 54 | int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, |
| 49 | u16 vport, u8 min_inline); | 55 | u16 vport, u8 min_inline); |
| 50 | int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, | 56 | int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, |
diff --git a/include/linux/mm.h b/include/linux/mm.h index a92c8d73aeaf..fe6b4036664a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -292,36 +292,23 @@ extern pgprot_t protection_map[16]; | |||
| 292 | * pgoff should be used in favour of virtual_address, if possible. | 292 | * pgoff should be used in favour of virtual_address, if possible. |
| 293 | */ | 293 | */ |
| 294 | struct vm_fault { | 294 | struct vm_fault { |
| 295 | struct vm_area_struct *vma; /* Target VMA */ | ||
| 295 | unsigned int flags; /* FAULT_FLAG_xxx flags */ | 296 | unsigned int flags; /* FAULT_FLAG_xxx flags */ |
| 296 | gfp_t gfp_mask; /* gfp mask to be used for allocations */ | 297 | gfp_t gfp_mask; /* gfp mask to be used for allocations */ |
| 297 | pgoff_t pgoff; /* Logical page offset based on vma */ | 298 | pgoff_t pgoff; /* Logical page offset based on vma */ |
| 298 | void __user *virtual_address; /* Faulting virtual address */ | 299 | unsigned long address; /* Faulting virtual address */ |
| 300 | pmd_t *pmd; /* Pointer to pmd entry matching | ||
| 301 | * the 'address' */ | ||
| 302 | pte_t orig_pte; /* Value of PTE at the time of fault */ | ||
| 299 | 303 | ||
| 300 | struct page *cow_page; /* Handler may choose to COW */ | 304 | struct page *cow_page; /* Page handler may use for COW fault */ |
| 305 | struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */ | ||
| 301 | struct page *page; /* ->fault handlers should return a | 306 | struct page *page; /* ->fault handlers should return a |
| 302 | * page here, unless VM_FAULT_NOPAGE | 307 | * page here, unless VM_FAULT_NOPAGE |
| 303 | * is set (which is also implied by | 308 | * is set (which is also implied by |
| 304 | * VM_FAULT_ERROR). | 309 | * VM_FAULT_ERROR). |
| 305 | */ | 310 | */ |
| 306 | void *entry; /* ->fault handler can alternatively | 311 | /* These three entries are valid only while holding ptl lock */ |
| 307 | * return locked DAX entry. In that | ||
| 308 | * case handler should return | ||
| 309 | * VM_FAULT_DAX_LOCKED and fill in | ||
| 310 | * entry here. | ||
| 311 | */ | ||
| 312 | }; | ||
| 313 | |||
| 314 | /* | ||
| 315 | * Page fault context: passes though page fault handler instead of endless list | ||
| 316 | * of function arguments. | ||
| 317 | */ | ||
| 318 | struct fault_env { | ||
| 319 | struct vm_area_struct *vma; /* Target VMA */ | ||
| 320 | unsigned long address; /* Faulting virtual address */ | ||
| 321 | unsigned int flags; /* FAULT_FLAG_xxx flags */ | ||
| 322 | pmd_t *pmd; /* Pointer to pmd entry matching | ||
| 323 | * the 'address' | ||
| 324 | */ | ||
| 325 | pte_t *pte; /* Pointer to pte entry matching | 312 | pte_t *pte; /* Pointer to pte entry matching |
| 326 | * the 'address'. NULL if the page | 313 | * the 'address'. NULL if the page |
| 327 | * table hasn't been allocated. | 314 | * table hasn't been allocated. |
| @@ -351,7 +338,7 @@ struct vm_operations_struct { | |||
| 351 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); | 338 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); |
| 352 | int (*pmd_fault)(struct vm_area_struct *, unsigned long address, | 339 | int (*pmd_fault)(struct vm_area_struct *, unsigned long address, |
| 353 | pmd_t *, unsigned int flags); | 340 | pmd_t *, unsigned int flags); |
| 354 | void (*map_pages)(struct fault_env *fe, | 341 | void (*map_pages)(struct vm_fault *vmf, |
| 355 | pgoff_t start_pgoff, pgoff_t end_pgoff); | 342 | pgoff_t start_pgoff, pgoff_t end_pgoff); |
| 356 | 343 | ||
| 357 | /* notification that a previously read-only page is about to become | 344 | /* notification that a previously read-only page is about to become |
| @@ -625,8 +612,10 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) | |||
| 625 | return pte; | 612 | return pte; |
| 626 | } | 613 | } |
| 627 | 614 | ||
| 628 | int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, | 615 | int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, |
| 629 | struct page *page); | 616 | struct page *page); |
| 617 | int finish_fault(struct vm_fault *vmf); | ||
| 618 | int finish_mkwrite_fault(struct vm_fault *vmf); | ||
| 630 | #endif | 619 | #endif |
| 631 | 620 | ||
| 632 | /* | 621 | /* |
| @@ -1110,7 +1099,7 @@ static inline void clear_page_pfmemalloc(struct page *page) | |||
| 1110 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ | 1099 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ |
| 1111 | #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ | 1100 | #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ |
| 1112 | #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ | 1101 | #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ |
| 1113 | #define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */ | 1102 | #define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */ |
| 1114 | 1103 | ||
| 1115 | #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ | 1104 | #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ |
| 1116 | 1105 | ||
| @@ -1221,6 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
| 1221 | struct vm_area_struct *vma); | 1210 | struct vm_area_struct *vma); |
| 1222 | void unmap_mapping_range(struct address_space *mapping, | 1211 | void unmap_mapping_range(struct address_space *mapping, |
| 1223 | loff_t const holebegin, loff_t const holelen, int even_cows); | 1212 | loff_t const holebegin, loff_t const holelen, int even_cows); |
| 1213 | int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, | ||
| 1214 | spinlock_t **ptlp); | ||
| 1224 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, | 1215 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, |
| 1225 | unsigned long *pfn); | 1216 | unsigned long *pfn); |
| 1226 | int follow_phys(struct vm_area_struct *vma, unsigned long address, | 1217 | int follow_phys(struct vm_area_struct *vma, unsigned long address, |
| @@ -1270,19 +1261,18 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void * | |||
| 1270 | unsigned int gup_flags); | 1261 | unsigned int gup_flags); |
| 1271 | extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, | 1262 | extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, |
| 1272 | void *buf, int len, unsigned int gup_flags); | 1263 | void *buf, int len, unsigned int gup_flags); |
| 1264 | extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | ||
| 1265 | unsigned long addr, void *buf, int len, unsigned int gup_flags); | ||
| 1273 | 1266 | ||
| 1274 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, | 1267 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, |
| 1275 | unsigned long start, unsigned long nr_pages, | 1268 | unsigned long start, unsigned long nr_pages, |
| 1276 | unsigned int gup_flags, struct page **pages, | 1269 | unsigned int gup_flags, struct page **pages, |
| 1277 | struct vm_area_struct **vmas); | 1270 | struct vm_area_struct **vmas, int *locked); |
| 1278 | long get_user_pages(unsigned long start, unsigned long nr_pages, | 1271 | long get_user_pages(unsigned long start, unsigned long nr_pages, |
| 1279 | unsigned int gup_flags, struct page **pages, | 1272 | unsigned int gup_flags, struct page **pages, |
| 1280 | struct vm_area_struct **vmas); | 1273 | struct vm_area_struct **vmas); |
| 1281 | long get_user_pages_locked(unsigned long start, unsigned long nr_pages, | 1274 | long get_user_pages_locked(unsigned long start, unsigned long nr_pages, |
| 1282 | unsigned int gup_flags, struct page **pages, int *locked); | 1275 | unsigned int gup_flags, struct page **pages, int *locked); |
| 1283 | long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, | ||
| 1284 | unsigned long start, unsigned long nr_pages, | ||
| 1285 | struct page **pages, unsigned int gup_flags); | ||
| 1286 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, | 1276 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
| 1287 | struct page **pages, unsigned int gup_flags); | 1277 | struct page **pages, unsigned int gup_flags); |
| 1288 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | 1278 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
| @@ -1768,6 +1758,8 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) | |||
| 1768 | return ptl; | 1758 | return ptl; |
| 1769 | } | 1759 | } |
| 1770 | 1760 | ||
| 1761 | extern void __init pagecache_init(void); | ||
| 1762 | |||
| 1771 | extern void free_area_init(unsigned long * zones_size); | 1763 | extern void free_area_init(unsigned long * zones_size); |
| 1772 | extern void free_area_init_node(int nid, unsigned long * zones_size, | 1764 | extern void free_area_init_node(int nid, unsigned long * zones_size, |
| 1773 | unsigned long zone_start_pfn, unsigned long *zholes_size); | 1765 | unsigned long zone_start_pfn, unsigned long *zholes_size); |
| @@ -2097,7 +2089,7 @@ extern void truncate_inode_pages_final(struct address_space *); | |||
| 2097 | 2089 | ||
| 2098 | /* generic vm_area_ops exported for stackable file systems */ | 2090 | /* generic vm_area_ops exported for stackable file systems */ |
| 2099 | extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); | 2091 | extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); |
| 2100 | extern void filemap_map_pages(struct fault_env *fe, | 2092 | extern void filemap_map_pages(struct vm_fault *vmf, |
| 2101 | pgoff_t start_pgoff, pgoff_t end_pgoff); | 2093 | pgoff_t start_pgoff, pgoff_t end_pgoff); |
| 2102 | extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | 2094 | extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); |
| 2103 | 2095 | ||
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 4a8acedf4b7d..808751d7b737 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -473,6 +473,7 @@ struct mm_struct { | |||
| 473 | */ | 473 | */ |
| 474 | struct task_struct __rcu *owner; | 474 | struct task_struct __rcu *owner; |
| 475 | #endif | 475 | #endif |
| 476 | struct user_namespace *user_ns; | ||
| 476 | 477 | ||
| 477 | /* store ref to file /proc/<pid>/exe symlink points to */ | 478 | /* store ref to file /proc/<pid>/exe symlink points to */ |
| 478 | struct file __rcu *exe_file; | 479 | struct file __rcu *exe_file; |
| @@ -508,10 +509,6 @@ struct mm_struct { | |||
| 508 | bool tlb_flush_pending; | 509 | bool tlb_flush_pending; |
| 509 | #endif | 510 | #endif |
| 510 | struct uprobes_state uprobes_state; | 511 | struct uprobes_state uprobes_state; |
| 511 | #ifdef CONFIG_X86_INTEL_MPX | ||
| 512 | /* address of the bounds directory */ | ||
| 513 | void __user *bd_addr; | ||
| 514 | #endif | ||
| 515 | #ifdef CONFIG_HUGETLB_PAGE | 512 | #ifdef CONFIG_HUGETLB_PAGE |
| 516 | atomic_long_t hugetlb_usage; | 513 | atomic_long_t hugetlb_usage; |
| 517 | #endif | 514 | #endif |
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 73fad83acbcb..95d69d498296 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
| @@ -89,6 +89,8 @@ struct mmc_ext_csd { | |||
| 89 | unsigned int boot_ro_lock; /* ro lock support */ | 89 | unsigned int boot_ro_lock; /* ro lock support */ |
| 90 | bool boot_ro_lockable; | 90 | bool boot_ro_lockable; |
| 91 | bool ffu_capable; /* Firmware upgrade support */ | 91 | bool ffu_capable; /* Firmware upgrade support */ |
| 92 | bool cmdq_support; /* Command Queue supported */ | ||
| 93 | unsigned int cmdq_depth; /* Command Queue depth */ | ||
| 92 | #define MMC_FIRMWARE_LEN 8 | 94 | #define MMC_FIRMWARE_LEN 8 |
| 93 | u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */ | 95 | u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */ |
| 94 | u8 raw_exception_status; /* 54 */ | 96 | u8 raw_exception_status; /* 54 */ |
| @@ -207,18 +209,6 @@ struct sdio_func_tuple; | |||
| 207 | 209 | ||
| 208 | #define SDIO_MAX_FUNCS 7 | 210 | #define SDIO_MAX_FUNCS 7 |
| 209 | 211 | ||
| 210 | enum mmc_blk_status { | ||
| 211 | MMC_BLK_SUCCESS = 0, | ||
| 212 | MMC_BLK_PARTIAL, | ||
| 213 | MMC_BLK_CMD_ERR, | ||
| 214 | MMC_BLK_RETRY, | ||
| 215 | MMC_BLK_ABORT, | ||
| 216 | MMC_BLK_DATA_ERR, | ||
| 217 | MMC_BLK_ECC_ERR, | ||
| 218 | MMC_BLK_NOMEDIUM, | ||
| 219 | MMC_BLK_NEW_REQUEST, | ||
| 220 | }; | ||
| 221 | |||
| 222 | /* The number of MMC physical partitions. These consist of: | 212 | /* The number of MMC physical partitions. These consist of: |
| 223 | * boot partitions (2), general purpose partitions (4) and | 213 | * boot partitions (2), general purpose partitions (4) and |
| 224 | * RPMB partition (1) in MMC v4.4. | 214 | * RPMB partition (1) in MMC v4.4. |
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 2b953eb8ceae..e33cc748dcfe 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h | |||
| @@ -15,6 +15,18 @@ struct request; | |||
| 15 | struct mmc_data; | 15 | struct mmc_data; |
| 16 | struct mmc_request; | 16 | struct mmc_request; |
| 17 | 17 | ||
| 18 | enum mmc_blk_status { | ||
| 19 | MMC_BLK_SUCCESS = 0, | ||
| 20 | MMC_BLK_PARTIAL, | ||
| 21 | MMC_BLK_CMD_ERR, | ||
| 22 | MMC_BLK_RETRY, | ||
| 23 | MMC_BLK_ABORT, | ||
| 24 | MMC_BLK_DATA_ERR, | ||
| 25 | MMC_BLK_ECC_ERR, | ||
| 26 | MMC_BLK_NOMEDIUM, | ||
| 27 | MMC_BLK_NEW_REQUEST, | ||
| 28 | }; | ||
| 29 | |||
| 18 | struct mmc_command { | 30 | struct mmc_command { |
| 19 | u32 opcode; | 31 | u32 opcode; |
| 20 | u32 arg; | 32 | u32 arg; |
| @@ -150,7 +162,8 @@ struct mmc_async_req; | |||
| 150 | extern int mmc_stop_bkops(struct mmc_card *); | 162 | extern int mmc_stop_bkops(struct mmc_card *); |
| 151 | extern int mmc_read_bkops_status(struct mmc_card *); | 163 | extern int mmc_read_bkops_status(struct mmc_card *); |
| 152 | extern struct mmc_async_req *mmc_start_req(struct mmc_host *, | 164 | extern struct mmc_async_req *mmc_start_req(struct mmc_host *, |
| 153 | struct mmc_async_req *, int *); | 165 | struct mmc_async_req *, |
| 166 | enum mmc_blk_status *); | ||
| 154 | extern int mmc_interrupt_hpi(struct mmc_card *); | 167 | extern int mmc_interrupt_hpi(struct mmc_card *); |
| 155 | extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *); | 168 | extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *); |
| 156 | extern void mmc_wait_for_req_done(struct mmc_host *host, | 169 | extern void mmc_wait_for_req_done(struct mmc_host *host, |
| @@ -163,6 +176,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, | |||
| 163 | extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); | 176 | extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); |
| 164 | extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); | 177 | extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); |
| 165 | extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); | 178 | extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); |
| 179 | extern int mmc_abort_tuning(struct mmc_host *host, u32 opcode); | ||
| 166 | extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); | 180 | extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); |
| 167 | 181 | ||
| 168 | #define MMC_ERASE_ARG 0x00000000 | 182 | #define MMC_ERASE_ARG 0x00000000 |
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index f5af2bd35e7f..15db6f83f53f 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h | |||
| @@ -39,6 +39,12 @@ enum { | |||
| 39 | EVENT_DATA_ERROR, | 39 | EVENT_DATA_ERROR, |
| 40 | }; | 40 | }; |
| 41 | 41 | ||
| 42 | enum dw_mci_cookie { | ||
| 43 | COOKIE_UNMAPPED, | ||
| 44 | COOKIE_PRE_MAPPED, /* mapped by pre_req() of dwmmc */ | ||
| 45 | COOKIE_MAPPED, /* mapped by prepare_data() of dwmmc */ | ||
| 46 | }; | ||
| 47 | |||
| 42 | struct mmc_data; | 48 | struct mmc_data; |
| 43 | 49 | ||
| 44 | enum { | 50 | enum { |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 0b2439441cc8..8bc884121465 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
| @@ -93,8 +93,7 @@ struct mmc_host_ops { | |||
| 93 | */ | 93 | */ |
| 94 | void (*post_req)(struct mmc_host *host, struct mmc_request *req, | 94 | void (*post_req)(struct mmc_host *host, struct mmc_request *req, |
| 95 | int err); | 95 | int err); |
| 96 | void (*pre_req)(struct mmc_host *host, struct mmc_request *req, | 96 | void (*pre_req)(struct mmc_host *host, struct mmc_request *req); |
| 97 | bool is_first_req); | ||
| 98 | void (*request)(struct mmc_host *host, struct mmc_request *req); | 97 | void (*request)(struct mmc_host *host, struct mmc_request *req); |
| 99 | 98 | ||
| 100 | /* | 99 | /* |
| @@ -173,7 +172,7 @@ struct mmc_async_req { | |||
| 173 | * Check error status of completed mmc request. | 172 | * Check error status of completed mmc request. |
| 174 | * Returns 0 if success otherwise non zero. | 173 | * Returns 0 if success otherwise non zero. |
| 175 | */ | 174 | */ |
| 176 | int (*err_check) (struct mmc_card *, struct mmc_async_req *); | 175 | enum mmc_blk_status (*err_check)(struct mmc_card *, struct mmc_async_req *); |
| 177 | }; | 176 | }; |
| 178 | 177 | ||
| 179 | /** | 178 | /** |
| @@ -198,14 +197,12 @@ struct mmc_slot { | |||
| 198 | * @is_new_req wake up reason was new request | 197 | * @is_new_req wake up reason was new request |
| 199 | * @is_waiting_last_req mmc context waiting for single running request | 198 | * @is_waiting_last_req mmc context waiting for single running request |
| 200 | * @wait wait queue | 199 | * @wait wait queue |
| 201 | * @lock lock to protect data fields | ||
| 202 | */ | 200 | */ |
| 203 | struct mmc_context_info { | 201 | struct mmc_context_info { |
| 204 | bool is_done_rcv; | 202 | bool is_done_rcv; |
| 205 | bool is_new_req; | 203 | bool is_new_req; |
| 206 | bool is_waiting_last_req; | 204 | bool is_waiting_last_req; |
| 207 | wait_queue_head_t wait; | 205 | wait_queue_head_t wait; |
| 208 | spinlock_t lock; | ||
| 209 | }; | 206 | }; |
| 210 | 207 | ||
| 211 | struct regulator; | 208 | struct regulator; |
| @@ -495,11 +492,6 @@ static inline int mmc_host_uhs(struct mmc_host *host) | |||
| 495 | MMC_CAP_UHS_DDR50); | 492 | MMC_CAP_UHS_DDR50); |
| 496 | } | 493 | } |
| 497 | 494 | ||
| 498 | static inline int mmc_host_packed_wr(struct mmc_host *host) | ||
| 499 | { | ||
| 500 | return host->caps2 & MMC_CAP2_PACKED_WR; | ||
| 501 | } | ||
| 502 | |||
| 503 | static inline int mmc_card_hs(struct mmc_card *card) | 495 | static inline int mmc_card_hs(struct mmc_card *card) |
| 504 | { | 496 | { |
| 505 | return card->host->ios.timing == MMC_TIMING_SD_HS || | 497 | return card->host->ios.timing == MMC_TIMING_SD_HS || |
| @@ -546,6 +538,11 @@ static inline void mmc_retune_recheck(struct mmc_host *host) | |||
| 546 | host->retune_now = 1; | 538 | host->retune_now = 1; |
| 547 | } | 539 | } |
| 548 | 540 | ||
| 541 | static inline bool mmc_can_retune(struct mmc_host *host) | ||
| 542 | { | ||
| 543 | return host->can_retune == 1; | ||
| 544 | } | ||
| 545 | |||
| 549 | void mmc_retune_pause(struct mmc_host *host); | 546 | void mmc_retune_pause(struct mmc_host *host); |
| 550 | void mmc_retune_unpause(struct mmc_host *host); | 547 | void mmc_retune_unpause(struct mmc_host *host); |
| 551 | 548 | ||
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index c376209c70ef..672730acc705 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h | |||
| @@ -84,6 +84,13 @@ | |||
| 84 | #define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */ | 84 | #define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */ |
| 85 | #define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ | 85 | #define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ |
| 86 | 86 | ||
| 87 | /* class 11 */ | ||
| 88 | #define MMC_QUE_TASK_PARAMS 44 /* ac [20:16] task id R1 */ | ||
| 89 | #define MMC_QUE_TASK_ADDR 45 /* ac [31:0] data addr R1 */ | ||
| 90 | #define MMC_EXECUTE_READ_TASK 46 /* adtc [20:16] task id R1 */ | ||
| 91 | #define MMC_EXECUTE_WRITE_TASK 47 /* adtc [20:16] task id R1 */ | ||
| 92 | #define MMC_CMDQ_TASK_MGMT 48 /* ac [20:16] task id R1b */ | ||
| 93 | |||
| 87 | static inline bool mmc_op_multi(u32 opcode) | 94 | static inline bool mmc_op_multi(u32 opcode) |
| 88 | { | 95 | { |
| 89 | return opcode == MMC_WRITE_MULTIPLE_BLOCK || | 96 | return opcode == MMC_WRITE_MULTIPLE_BLOCK || |
| @@ -272,6 +279,7 @@ struct _mmc_csd { | |||
| 272 | * EXT_CSD fields | 279 | * EXT_CSD fields |
| 273 | */ | 280 | */ |
| 274 | 281 | ||
| 282 | #define EXT_CSD_CMDQ_MODE_EN 15 /* R/W */ | ||
| 275 | #define EXT_CSD_FLUSH_CACHE 32 /* W */ | 283 | #define EXT_CSD_FLUSH_CACHE 32 /* W */ |
| 276 | #define EXT_CSD_CACHE_CTRL 33 /* R/W */ | 284 | #define EXT_CSD_CACHE_CTRL 33 /* R/W */ |
| 277 | #define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */ | 285 | #define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */ |
| @@ -331,6 +339,8 @@ struct _mmc_csd { | |||
| 331 | #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ | 339 | #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ |
| 332 | #define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ | 340 | #define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ |
| 333 | #define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */ | 341 | #define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */ |
| 342 | #define EXT_CSD_CMDQ_DEPTH 307 /* RO */ | ||
| 343 | #define EXT_CSD_CMDQ_SUPPORT 308 /* RO */ | ||
| 334 | #define EXT_CSD_SUPPORTED_MODE 493 /* RO */ | 344 | #define EXT_CSD_SUPPORTED_MODE 493 /* RO */ |
| 335 | #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ | 345 | #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ |
| 336 | #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ | 346 | #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ |
| @@ -438,6 +448,13 @@ struct _mmc_csd { | |||
| 438 | #define EXT_CSD_MANUAL_BKOPS_MASK 0x01 | 448 | #define EXT_CSD_MANUAL_BKOPS_MASK 0x01 |
| 439 | 449 | ||
| 440 | /* | 450 | /* |
| 451 | * Command Queue | ||
| 452 | */ | ||
| 453 | #define EXT_CSD_CMDQ_MODE_ENABLED BIT(0) | ||
| 454 | #define EXT_CSD_CMDQ_DEPTH_MASK GENMASK(4, 0) | ||
| 455 | #define EXT_CSD_CMDQ_SUPPORTED BIT(0) | ||
| 456 | |||
| 457 | /* | ||
| 441 | * MMC_SWITCH access modes | 458 | * MMC_SWITCH access modes |
| 442 | */ | 459 | */ |
| 443 | 460 | ||
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index 3945a8c9d3cb..a7972cd3bc14 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h | |||
| @@ -29,5 +29,6 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, | |||
| 29 | void mmc_gpio_set_cd_isr(struct mmc_host *host, | 29 | void mmc_gpio_set_cd_isr(struct mmc_host *host, |
| 30 | irqreturn_t (*isr)(int irq, void *dev_id)); | 30 | irqreturn_t (*isr)(int irq, void *dev_id)); |
| 31 | void mmc_gpiod_request_cd_irq(struct mmc_host *host); | 31 | void mmc_gpiod_request_cd_irq(struct mmc_host *host); |
| 32 | bool mmc_can_gpio_cd(struct mmc_host *host); | ||
| 32 | 33 | ||
| 33 | #endif | 34 | #endif |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 0f088f3a2fed..36d9896fbc1e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -246,7 +246,7 @@ struct lruvec { | |||
| 246 | #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) | 246 | #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) |
| 247 | 247 | ||
| 248 | /* LRU Isolation modes. */ | 248 | /* LRU Isolation modes. */ |
| 249 | typedef unsigned __bitwise__ isolate_mode_t; | 249 | typedef unsigned __bitwise isolate_mode_t; |
| 250 | 250 | ||
| 251 | enum zone_watermarks { | 251 | enum zone_watermarks { |
| 252 | WMARK_MIN, | 252 | WMARK_MIN, |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index ed84c07f6a51..8a57f0b1242d 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
| @@ -175,7 +175,8 @@ struct ap_device_id { | |||
| 175 | kernel_ulong_t driver_info; | 175 | kernel_ulong_t driver_info; |
| 176 | }; | 176 | }; |
| 177 | 177 | ||
| 178 | #define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01 | 178 | #define AP_DEVICE_ID_MATCH_CARD_TYPE 0x01 |
| 179 | #define AP_DEVICE_ID_MATCH_QUEUE_TYPE 0x02 | ||
| 179 | 180 | ||
| 180 | /* s390 css bus devices (subchannels) */ | 181 | /* s390 css bus devices (subchannels) */ |
| 181 | struct css_device_id { | 182 | struct css_device_id { |
diff --git a/include/linux/module.h b/include/linux/module.h index 0c3207d26ac0..7c84273d60b9 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
| @@ -399,7 +399,7 @@ struct module { | |||
| 399 | /* Arch-specific module values */ | 399 | /* Arch-specific module values */ |
| 400 | struct mod_arch_specific arch; | 400 | struct mod_arch_specific arch; |
| 401 | 401 | ||
| 402 | unsigned int taints; /* same bits as kernel:tainted */ | 402 | unsigned long taints; /* same bits as kernel:taint_flags */ |
| 403 | 403 | ||
| 404 | #ifdef CONFIG_GENERIC_BUG | 404 | #ifdef CONFIG_GENERIC_BUG |
| 405 | /* Support for BUG */ | 405 | /* Support for BUG */ |
| @@ -412,7 +412,7 @@ struct module { | |||
| 412 | /* Protected by RCU and/or module_mutex: use rcu_dereference() */ | 412 | /* Protected by RCU and/or module_mutex: use rcu_dereference() */ |
| 413 | struct mod_kallsyms *kallsyms; | 413 | struct mod_kallsyms *kallsyms; |
| 414 | struct mod_kallsyms core_kallsyms; | 414 | struct mod_kallsyms core_kallsyms; |
| 415 | 415 | ||
| 416 | /* Section attributes */ | 416 | /* Section attributes */ |
| 417 | struct module_sect_attrs *sect_attrs; | 417 | struct module_sect_attrs *sect_attrs; |
| 418 | 418 | ||
diff --git a/include/linux/mount.h b/include/linux/mount.h index 1172cce949a4..c6f55158d5e5 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h | |||
| @@ -79,12 +79,12 @@ extern void mnt_drop_write(struct vfsmount *mnt); | |||
| 79 | extern void mnt_drop_write_file(struct file *file); | 79 | extern void mnt_drop_write_file(struct file *file); |
| 80 | extern void mntput(struct vfsmount *mnt); | 80 | extern void mntput(struct vfsmount *mnt); |
| 81 | extern struct vfsmount *mntget(struct vfsmount *mnt); | 81 | extern struct vfsmount *mntget(struct vfsmount *mnt); |
| 82 | extern struct vfsmount *mnt_clone_internal(struct path *path); | 82 | extern struct vfsmount *mnt_clone_internal(const struct path *path); |
| 83 | extern int __mnt_is_readonly(struct vfsmount *mnt); | 83 | extern int __mnt_is_readonly(struct vfsmount *mnt); |
| 84 | extern bool mnt_may_suid(struct vfsmount *mnt); | 84 | extern bool mnt_may_suid(struct vfsmount *mnt); |
| 85 | 85 | ||
| 86 | struct path; | 86 | struct path; |
| 87 | extern struct vfsmount *clone_private_mount(struct path *path); | 87 | extern struct vfsmount *clone_private_mount(const struct path *path); |
| 88 | 88 | ||
| 89 | struct file_system_type; | 89 | struct file_system_type; |
| 90 | extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, | 90 | extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, |
| @@ -98,4 +98,6 @@ extern dev_t name_to_dev_t(const char *name); | |||
| 98 | 98 | ||
| 99 | extern unsigned int sysctl_mount_max; | 99 | extern unsigned int sysctl_mount_max; |
| 100 | 100 | ||
| 101 | extern bool path_is_mountpoint(const struct path *path); | ||
| 102 | |||
| 101 | #endif /* _LINUX_MOUNT_H */ | 103 | #endif /* _LINUX_MOUNT_H */ |
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index d8905a229f34..c5f3a012ae62 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
| @@ -142,6 +142,12 @@ enum nand_ecc_algo { | |||
| 142 | */ | 142 | */ |
| 143 | #define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) | 143 | #define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) |
| 144 | #define NAND_ECC_MAXIMIZE BIT(1) | 144 | #define NAND_ECC_MAXIMIZE BIT(1) |
| 145 | /* | ||
| 146 | * If your controller already sends the required NAND commands when | ||
| 147 | * reading or writing a page, then the framework is not supposed to | ||
| 148 | * send READ0 and SEQIN/PAGEPROG respectively. | ||
| 149 | */ | ||
| 150 | #define NAND_ECC_CUSTOM_PAGE_ACCESS BIT(2) | ||
| 145 | 151 | ||
| 146 | /* Bit mask for flags passed to do_nand_read_ecc */ | 152 | /* Bit mask for flags passed to do_nand_read_ecc */ |
| 147 | #define NAND_GET_DEVICE 0x80 | 153 | #define NAND_GET_DEVICE 0x80 |
| @@ -186,6 +192,7 @@ enum nand_ecc_algo { | |||
| 186 | /* Macros to identify the above */ | 192 | /* Macros to identify the above */ |
| 187 | #define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) | 193 | #define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) |
| 188 | #define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) | 194 | #define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) |
| 195 | #define NAND_HAS_SUBPAGE_WRITE(chip) !((chip)->options & NAND_NO_SUBPAGE_WRITE) | ||
| 189 | 196 | ||
| 190 | /* Non chip related options */ | 197 | /* Non chip related options */ |
| 191 | /* This option skips the bbt scan during initialization. */ | 198 | /* This option skips the bbt scan during initialization. */ |
| @@ -210,6 +217,16 @@ enum nand_ecc_algo { | |||
| 210 | */ | 217 | */ |
| 211 | #define NAND_USE_BOUNCE_BUFFER 0x00100000 | 218 | #define NAND_USE_BOUNCE_BUFFER 0x00100000 |
| 212 | 219 | ||
| 220 | /* | ||
| 221 | * In case your controller is implementing ->cmd_ctrl() and is relying on the | ||
| 222 | * default ->cmdfunc() implementation, you may want to let the core handle the | ||
| 223 | * tCCS delay which is required when a column change (RNDIN or RNDOUT) is | ||
| 224 | * requested. | ||
| 225 | * If your controller already takes care of this delay, you don't need to set | ||
| 226 | * this flag. | ||
| 227 | */ | ||
| 228 | #define NAND_WAIT_TCCS 0x00200000 | ||
| 229 | |||
| 213 | /* Options set by nand scan */ | 230 | /* Options set by nand scan */ |
| 214 | /* Nand scan has allocated controller struct */ | 231 | /* Nand scan has allocated controller struct */ |
| 215 | #define NAND_CONTROLLER_ALLOC 0x80000000 | 232 | #define NAND_CONTROLLER_ALLOC 0x80000000 |
| @@ -558,6 +575,11 @@ struct nand_ecc_ctrl { | |||
| 558 | int page); | 575 | int page); |
| 559 | }; | 576 | }; |
| 560 | 577 | ||
| 578 | static inline int nand_standard_page_accessors(struct nand_ecc_ctrl *ecc) | ||
| 579 | { | ||
| 580 | return !(ecc->options & NAND_ECC_CUSTOM_PAGE_ACCESS); | ||
| 581 | } | ||
| 582 | |||
| 561 | /** | 583 | /** |
| 562 | * struct nand_buffers - buffer structure for read/write | 584 | * struct nand_buffers - buffer structure for read/write |
| 563 | * @ecccalc: buffer pointer for calculated ECC, size is oobsize. | 585 | * @ecccalc: buffer pointer for calculated ECC, size is oobsize. |
| @@ -584,6 +606,10 @@ struct nand_buffers { | |||
| 584 | * | 606 | * |
| 585 | * All these timings are expressed in picoseconds. | 607 | * All these timings are expressed in picoseconds. |
| 586 | * | 608 | * |
| 609 | * @tBERS_max: Block erase time | ||
| 610 | * @tCCS_min: Change column setup time | ||
| 611 | * @tPROG_max: Page program time | ||
| 612 | * @tR_max: Page read time | ||
| 587 | * @tALH_min: ALE hold time | 613 | * @tALH_min: ALE hold time |
| 588 | * @tADL_min: ALE to data loading time | 614 | * @tADL_min: ALE to data loading time |
| 589 | * @tALS_min: ALE setup time | 615 | * @tALS_min: ALE setup time |
| @@ -621,6 +647,10 @@ struct nand_buffers { | |||
| 621 | * @tWW_min: WP# transition to WE# low | 647 | * @tWW_min: WP# transition to WE# low |
| 622 | */ | 648 | */ |
| 623 | struct nand_sdr_timings { | 649 | struct nand_sdr_timings { |
| 650 | u32 tBERS_max; | ||
| 651 | u32 tCCS_min; | ||
| 652 | u32 tPROG_max; | ||
| 653 | u32 tR_max; | ||
| 624 | u32 tALH_min; | 654 | u32 tALH_min; |
| 625 | u32 tADL_min; | 655 | u32 tADL_min; |
| 626 | u32 tALS_min; | 656 | u32 tALS_min; |
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h deleted file mode 100644 index 4ac8b1977b73..000000000000 --- a/include/linux/mutex-debug.h +++ /dev/null | |||
| @@ -1,24 +0,0 @@ | |||
| 1 | #ifndef __LINUX_MUTEX_DEBUG_H | ||
| 2 | #define __LINUX_MUTEX_DEBUG_H | ||
| 3 | |||
| 4 | #include <linux/linkage.h> | ||
| 5 | #include <linux/lockdep.h> | ||
| 6 | #include <linux/debug_locks.h> | ||
| 7 | |||
| 8 | /* | ||
| 9 | * Mutexes - debugging helpers: | ||
| 10 | */ | ||
| 11 | |||
| 12 | #define __DEBUG_MUTEX_INITIALIZER(lockname) \ | ||
| 13 | , .magic = &lockname | ||
| 14 | |||
| 15 | #define mutex_init(mutex) \ | ||
| 16 | do { \ | ||
| 17 | static struct lock_class_key __key; \ | ||
| 18 | \ | ||
| 19 | __mutex_init((mutex), #mutex, &__key); \ | ||
| 20 | } while (0) | ||
| 21 | |||
| 22 | extern void mutex_destroy(struct mutex *lock); | ||
| 23 | |||
| 24 | #endif | ||
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 2cb7531e7d7a..b97870f2debd 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/atomic.h> | 18 | #include <linux/atomic.h> |
| 19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
| 20 | #include <linux/osq_lock.h> | 20 | #include <linux/osq_lock.h> |
| 21 | #include <linux/debug_locks.h> | ||
| 21 | 22 | ||
| 22 | /* | 23 | /* |
| 23 | * Simple, straightforward mutexes with strict semantics: | 24 | * Simple, straightforward mutexes with strict semantics: |
| @@ -48,16 +49,12 @@ | |||
| 48 | * locks and tasks (and only those tasks) | 49 | * locks and tasks (and only those tasks) |
| 49 | */ | 50 | */ |
| 50 | struct mutex { | 51 | struct mutex { |
| 51 | /* 1: unlocked, 0: locked, negative: locked, possible waiters */ | 52 | atomic_long_t owner; |
| 52 | atomic_t count; | ||
| 53 | spinlock_t wait_lock; | 53 | spinlock_t wait_lock; |
| 54 | struct list_head wait_list; | ||
| 55 | #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER) | ||
| 56 | struct task_struct *owner; | ||
| 57 | #endif | ||
| 58 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 54 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 59 | struct optimistic_spin_queue osq; /* Spinner MCS lock */ | 55 | struct optimistic_spin_queue osq; /* Spinner MCS lock */ |
| 60 | #endif | 56 | #endif |
| 57 | struct list_head wait_list; | ||
| 61 | #ifdef CONFIG_DEBUG_MUTEXES | 58 | #ifdef CONFIG_DEBUG_MUTEXES |
| 62 | void *magic; | 59 | void *magic; |
| 63 | #endif | 60 | #endif |
| @@ -66,6 +63,11 @@ struct mutex { | |||
| 66 | #endif | 63 | #endif |
| 67 | }; | 64 | }; |
| 68 | 65 | ||
| 66 | static inline struct task_struct *__mutex_owner(struct mutex *lock) | ||
| 67 | { | ||
| 68 | return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x03); | ||
| 69 | } | ||
| 70 | |||
| 69 | /* | 71 | /* |
| 70 | * This is the control structure for tasks blocked on mutex, | 72 | * This is the control structure for tasks blocked on mutex, |
| 71 | * which resides on the blocked task's kernel stack: | 73 | * which resides on the blocked task's kernel stack: |
| @@ -79,9 +81,20 @@ struct mutex_waiter { | |||
| 79 | }; | 81 | }; |
| 80 | 82 | ||
| 81 | #ifdef CONFIG_DEBUG_MUTEXES | 83 | #ifdef CONFIG_DEBUG_MUTEXES |
| 82 | # include <linux/mutex-debug.h> | 84 | |
| 85 | #define __DEBUG_MUTEX_INITIALIZER(lockname) \ | ||
| 86 | , .magic = &lockname | ||
| 87 | |||
| 88 | extern void mutex_destroy(struct mutex *lock); | ||
| 89 | |||
| 83 | #else | 90 | #else |
| 91 | |||
| 84 | # define __DEBUG_MUTEX_INITIALIZER(lockname) | 92 | # define __DEBUG_MUTEX_INITIALIZER(lockname) |
| 93 | |||
| 94 | static inline void mutex_destroy(struct mutex *lock) {} | ||
| 95 | |||
| 96 | #endif | ||
| 97 | |||
| 85 | /** | 98 | /** |
| 86 | * mutex_init - initialize the mutex | 99 | * mutex_init - initialize the mutex |
| 87 | * @mutex: the mutex to be initialized | 100 | * @mutex: the mutex to be initialized |
| @@ -90,14 +103,12 @@ struct mutex_waiter { | |||
| 90 | * | 103 | * |
| 91 | * It is not allowed to initialize an already locked mutex. | 104 | * It is not allowed to initialize an already locked mutex. |
| 92 | */ | 105 | */ |
| 93 | # define mutex_init(mutex) \ | 106 | #define mutex_init(mutex) \ |
| 94 | do { \ | 107 | do { \ |
| 95 | static struct lock_class_key __key; \ | 108 | static struct lock_class_key __key; \ |
| 96 | \ | 109 | \ |
| 97 | __mutex_init((mutex), #mutex, &__key); \ | 110 | __mutex_init((mutex), #mutex, &__key); \ |
| 98 | } while (0) | 111 | } while (0) |
| 99 | static inline void mutex_destroy(struct mutex *lock) {} | ||
| 100 | #endif | ||
| 101 | 112 | ||
| 102 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 113 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 103 | # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ | 114 | # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ |
| @@ -107,7 +118,7 @@ static inline void mutex_destroy(struct mutex *lock) {} | |||
| 107 | #endif | 118 | #endif |
| 108 | 119 | ||
| 109 | #define __MUTEX_INITIALIZER(lockname) \ | 120 | #define __MUTEX_INITIALIZER(lockname) \ |
| 110 | { .count = ATOMIC_INIT(1) \ | 121 | { .owner = ATOMIC_LONG_INIT(0) \ |
| 111 | , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ | 122 | , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ |
| 112 | , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ | 123 | , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ |
| 113 | __DEBUG_MUTEX_INITIALIZER(lockname) \ | 124 | __DEBUG_MUTEX_INITIALIZER(lockname) \ |
| @@ -127,7 +138,10 @@ extern void __mutex_init(struct mutex *lock, const char *name, | |||
| 127 | */ | 138 | */ |
| 128 | static inline int mutex_is_locked(struct mutex *lock) | 139 | static inline int mutex_is_locked(struct mutex *lock) |
| 129 | { | 140 | { |
| 130 | return atomic_read(&lock->count) != 1; | 141 | /* |
| 142 | * XXX think about spin_is_locked | ||
| 143 | */ | ||
| 144 | return __mutex_owner(lock) != NULL; | ||
| 131 | } | 145 | } |
| 132 | 146 | ||
| 133 | /* | 147 | /* |
| @@ -175,4 +189,35 @@ extern void mutex_unlock(struct mutex *lock); | |||
| 175 | 189 | ||
| 176 | extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | 190 | extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); |
| 177 | 191 | ||
| 192 | /* | ||
| 193 | * These values are chosen such that FAIL and SUCCESS match the | ||
| 194 | * values of the regular mutex_trylock(). | ||
| 195 | */ | ||
| 196 | enum mutex_trylock_recursive_enum { | ||
| 197 | MUTEX_TRYLOCK_FAILED = 0, | ||
| 198 | MUTEX_TRYLOCK_SUCCESS = 1, | ||
| 199 | MUTEX_TRYLOCK_RECURSIVE, | ||
| 200 | }; | ||
| 201 | |||
| 202 | /** | ||
| 203 | * mutex_trylock_recursive - trylock variant that allows recursive locking | ||
| 204 | * @lock: mutex to be locked | ||
| 205 | * | ||
| 206 | * This function should not be used, _ever_. It is purely for hysterical GEM | ||
| 207 | * raisins, and once those are gone this will be removed. | ||
| 208 | * | ||
| 209 | * Returns: | ||
| 210 | * MUTEX_TRYLOCK_FAILED - trylock failed, | ||
| 211 | * MUTEX_TRYLOCK_SUCCESS - lock acquired, | ||
| 212 | * MUTEX_TRYLOCK_RECURSIVE - we already owned the lock. | ||
| 213 | */ | ||
| 214 | static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum | ||
| 215 | mutex_trylock_recursive(struct mutex *lock) | ||
| 216 | { | ||
| 217 | if (unlikely(__mutex_owner(lock) == current)) | ||
| 218 | return MUTEX_TRYLOCK_RECURSIVE; | ||
| 219 | |||
| 220 | return mutex_trylock(lock); | ||
| 221 | } | ||
| 222 | |||
| 178 | #endif /* __LINUX_MUTEX_H */ | 223 | #endif /* __LINUX_MUTEX_H */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e16a2a980ea8..994f7423a74b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -192,6 +192,7 @@ struct net_device_stats { | |||
| 192 | #ifdef CONFIG_RPS | 192 | #ifdef CONFIG_RPS |
| 193 | #include <linux/static_key.h> | 193 | #include <linux/static_key.h> |
| 194 | extern struct static_key rps_needed; | 194 | extern struct static_key rps_needed; |
| 195 | extern struct static_key rfs_needed; | ||
| 195 | #endif | 196 | #endif |
| 196 | 197 | ||
| 197 | struct neighbour; | 198 | struct neighbour; |
| @@ -316,7 +317,6 @@ struct napi_struct { | |||
| 316 | unsigned int gro_count; | 317 | unsigned int gro_count; |
| 317 | int (*poll)(struct napi_struct *, int); | 318 | int (*poll)(struct napi_struct *, int); |
| 318 | #ifdef CONFIG_NETPOLL | 319 | #ifdef CONFIG_NETPOLL |
| 319 | spinlock_t poll_lock; | ||
| 320 | int poll_owner; | 320 | int poll_owner; |
| 321 | #endif | 321 | #endif |
| 322 | struct net_device *dev; | 322 | struct net_device *dev; |
| @@ -334,6 +334,16 @@ enum { | |||
| 334 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ | 334 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ |
| 335 | NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ | 335 | NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ |
| 336 | NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ | 336 | NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ |
| 337 | NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ | ||
| 338 | }; | ||
| 339 | |||
| 340 | enum { | ||
| 341 | NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED), | ||
| 342 | NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE), | ||
| 343 | NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC), | ||
| 344 | NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED), | ||
| 345 | NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL), | ||
| 346 | NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL), | ||
| 337 | }; | 347 | }; |
| 338 | 348 | ||
| 339 | enum gro_result { | 349 | enum gro_result { |
| @@ -453,32 +463,22 @@ static inline bool napi_reschedule(struct napi_struct *napi) | |||
| 453 | return false; | 463 | return false; |
| 454 | } | 464 | } |
| 455 | 465 | ||
| 456 | void __napi_complete(struct napi_struct *n); | 466 | bool __napi_complete(struct napi_struct *n); |
| 457 | void napi_complete_done(struct napi_struct *n, int work_done); | 467 | bool napi_complete_done(struct napi_struct *n, int work_done); |
| 458 | /** | 468 | /** |
| 459 | * napi_complete - NAPI processing complete | 469 | * napi_complete - NAPI processing complete |
| 460 | * @n: NAPI context | 470 | * @n: NAPI context |
| 461 | * | 471 | * |
| 462 | * Mark NAPI processing as complete. | 472 | * Mark NAPI processing as complete. |
| 463 | * Consider using napi_complete_done() instead. | 473 | * Consider using napi_complete_done() instead. |
| 474 | * Return false if device should avoid rearming interrupts. | ||
| 464 | */ | 475 | */ |
| 465 | static inline void napi_complete(struct napi_struct *n) | 476 | static inline bool napi_complete(struct napi_struct *n) |
| 466 | { | 477 | { |
| 467 | return napi_complete_done(n, 0); | 478 | return napi_complete_done(n, 0); |
| 468 | } | 479 | } |
| 469 | 480 | ||
| 470 | /** | 481 | /** |
| 471 | * napi_hash_add - add a NAPI to global hashtable | ||
| 472 | * @napi: NAPI context | ||
| 473 | * | ||
| 474 | * Generate a new napi_id and store a @napi under it in napi_hash. | ||
| 475 | * Used for busy polling (CONFIG_NET_RX_BUSY_POLL). | ||
| 476 | * Note: This is normally automatically done from netif_napi_add(), | ||
| 477 | * so might disappear in a future Linux version. | ||
| 478 | */ | ||
| 479 | void napi_hash_add(struct napi_struct *napi); | ||
| 480 | |||
| 481 | /** | ||
| 482 | * napi_hash_del - remove a NAPI from global table | 482 | * napi_hash_del - remove a NAPI from global table |
| 483 | * @napi: NAPI context | 483 | * @napi: NAPI context |
| 484 | * | 484 | * |
| @@ -732,8 +732,8 @@ struct xps_dev_maps { | |||
| 732 | struct rcu_head rcu; | 732 | struct rcu_head rcu; |
| 733 | struct xps_map __rcu *cpu_map[0]; | 733 | struct xps_map __rcu *cpu_map[0]; |
| 734 | }; | 734 | }; |
| 735 | #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \ | 735 | #define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ |
| 736 | (nr_cpu_ids * sizeof(struct xps_map *))) | 736 | (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) |
| 737 | #endif /* CONFIG_XPS */ | 737 | #endif /* CONFIG_XPS */ |
| 738 | 738 | ||
| 739 | #define TC_MAX_QUEUE 16 | 739 | #define TC_MAX_QUEUE 16 |
| @@ -803,6 +803,7 @@ struct tc_to_netdev { | |||
| 803 | struct tc_cls_matchall_offload *cls_mall; | 803 | struct tc_cls_matchall_offload *cls_mall; |
| 804 | struct tc_cls_bpf_offload *cls_bpf; | 804 | struct tc_cls_bpf_offload *cls_bpf; |
| 805 | }; | 805 | }; |
| 806 | bool egress_dev; | ||
| 806 | }; | 807 | }; |
| 807 | 808 | ||
| 808 | /* These structures hold the attributes of xdp state that are being passed | 809 | /* These structures hold the attributes of xdp state that are being passed |
| @@ -926,7 +927,7 @@ struct netdev_xdp { | |||
| 926 | * 3. Update dev->stats asynchronously and atomically, and define | 927 | * 3. Update dev->stats asynchronously and atomically, and define |
| 927 | * neither operation. | 928 | * neither operation. |
| 928 | * | 929 | * |
| 929 | * bool (*ndo_has_offload_stats)(int attr_id) | 930 | * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) |
| 930 | * Return true if this device supports offload stats of this attr_id. | 931 | * Return true if this device supports offload stats of this attr_id. |
| 931 | * | 932 | * |
| 932 | * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, | 933 | * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, |
| @@ -1166,7 +1167,7 @@ struct net_device_ops { | |||
| 1166 | 1167 | ||
| 1167 | struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, | 1168 | struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, |
| 1168 | struct rtnl_link_stats64 *storage); | 1169 | struct rtnl_link_stats64 *storage); |
| 1169 | bool (*ndo_has_offload_stats)(int attr_id); | 1170 | bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); |
| 1170 | int (*ndo_get_offload_stats)(int attr_id, | 1171 | int (*ndo_get_offload_stats)(int attr_id, |
| 1171 | const struct net_device *dev, | 1172 | const struct net_device *dev, |
| 1172 | void *attr_data); | 1173 | void *attr_data); |
| @@ -1456,7 +1457,6 @@ enum netdev_priv_flags { | |||
| 1456 | * @ptype_specific: Device-specific, protocol-specific packet handlers | 1457 | * @ptype_specific: Device-specific, protocol-specific packet handlers |
| 1457 | * | 1458 | * |
| 1458 | * @adj_list: Directly linked devices, like slaves for bonding | 1459 | * @adj_list: Directly linked devices, like slaves for bonding |
| 1459 | * @all_adj_list: All linked devices, *including* neighbours | ||
| 1460 | * @features: Currently active device features | 1460 | * @features: Currently active device features |
| 1461 | * @hw_features: User-changeable features | 1461 | * @hw_features: User-changeable features |
| 1462 | * | 1462 | * |
| @@ -1506,6 +1506,8 @@ enum netdev_priv_flags { | |||
| 1506 | * @if_port: Selectable AUI, TP, ... | 1506 | * @if_port: Selectable AUI, TP, ... |
| 1507 | * @dma: DMA channel | 1507 | * @dma: DMA channel |
| 1508 | * @mtu: Interface MTU value | 1508 | * @mtu: Interface MTU value |
| 1509 | * @min_mtu: Interface Minimum MTU value | ||
| 1510 | * @max_mtu: Interface Maximum MTU value | ||
| 1509 | * @type: Interface hardware type | 1511 | * @type: Interface hardware type |
| 1510 | * @hard_header_len: Maximum hardware header length. | 1512 | * @hard_header_len: Maximum hardware header length. |
| 1511 | * | 1513 | * |
| @@ -1673,11 +1675,6 @@ struct net_device { | |||
| 1673 | struct list_head lower; | 1675 | struct list_head lower; |
| 1674 | } adj_list; | 1676 | } adj_list; |
| 1675 | 1677 | ||
| 1676 | struct { | ||
| 1677 | struct list_head upper; | ||
| 1678 | struct list_head lower; | ||
| 1679 | } all_adj_list; | ||
| 1680 | |||
| 1681 | netdev_features_t features; | 1678 | netdev_features_t features; |
| 1682 | netdev_features_t hw_features; | 1679 | netdev_features_t hw_features; |
| 1683 | netdev_features_t wanted_features; | 1680 | netdev_features_t wanted_features; |
| @@ -1726,6 +1723,8 @@ struct net_device { | |||
| 1726 | unsigned char dma; | 1723 | unsigned char dma; |
| 1727 | 1724 | ||
| 1728 | unsigned int mtu; | 1725 | unsigned int mtu; |
| 1726 | unsigned int min_mtu; | ||
| 1727 | unsigned int max_mtu; | ||
| 1729 | unsigned short type; | 1728 | unsigned short type; |
| 1730 | unsigned short hard_header_len; | 1729 | unsigned short hard_header_len; |
| 1731 | 1730 | ||
| @@ -1922,34 +1921,10 @@ int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) | |||
| 1922 | return 0; | 1921 | return 0; |
| 1923 | } | 1922 | } |
| 1924 | 1923 | ||
| 1925 | static inline | 1924 | int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); |
| 1926 | void netdev_reset_tc(struct net_device *dev) | 1925 | void netdev_reset_tc(struct net_device *dev); |
| 1927 | { | 1926 | int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); |
| 1928 | dev->num_tc = 0; | 1927 | int netdev_set_num_tc(struct net_device *dev, u8 num_tc); |
| 1929 | memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); | ||
| 1930 | memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); | ||
| 1931 | } | ||
| 1932 | |||
| 1933 | static inline | ||
| 1934 | int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) | ||
| 1935 | { | ||
| 1936 | if (tc >= dev->num_tc) | ||
| 1937 | return -EINVAL; | ||
| 1938 | |||
| 1939 | dev->tc_to_txq[tc].count = count; | ||
| 1940 | dev->tc_to_txq[tc].offset = offset; | ||
| 1941 | return 0; | ||
| 1942 | } | ||
| 1943 | |||
| 1944 | static inline | ||
| 1945 | int netdev_set_num_tc(struct net_device *dev, u8 num_tc) | ||
| 1946 | { | ||
| 1947 | if (num_tc > TC_MAX_QUEUE) | ||
| 1948 | return -EINVAL; | ||
| 1949 | |||
| 1950 | dev->num_tc = num_tc; | ||
| 1951 | return 0; | ||
| 1952 | } | ||
| 1953 | 1928 | ||
| 1954 | static inline | 1929 | static inline |
| 1955 | int netdev_get_num_tc(struct net_device *dev) | 1930 | int netdev_get_num_tc(struct net_device *dev) |
| @@ -2686,71 +2661,6 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, | |||
| 2686 | remcsum_unadjust((__sum16 *)ptr, grc->delta); | 2661 | remcsum_unadjust((__sum16 *)ptr, grc->delta); |
| 2687 | } | 2662 | } |
| 2688 | 2663 | ||
| 2689 | struct skb_csum_offl_spec { | ||
| 2690 | __u16 ipv4_okay:1, | ||
| 2691 | ipv6_okay:1, | ||
| 2692 | encap_okay:1, | ||
| 2693 | ip_options_okay:1, | ||
| 2694 | ext_hdrs_okay:1, | ||
| 2695 | tcp_okay:1, | ||
| 2696 | udp_okay:1, | ||
| 2697 | sctp_okay:1, | ||
| 2698 | vlan_okay:1, | ||
| 2699 | no_encapped_ipv6:1, | ||
| 2700 | no_not_encapped:1; | ||
| 2701 | }; | ||
| 2702 | |||
| 2703 | bool __skb_csum_offload_chk(struct sk_buff *skb, | ||
| 2704 | const struct skb_csum_offl_spec *spec, | ||
| 2705 | bool *csum_encapped, | ||
| 2706 | bool csum_help); | ||
| 2707 | |||
| 2708 | static inline bool skb_csum_offload_chk(struct sk_buff *skb, | ||
| 2709 | const struct skb_csum_offl_spec *spec, | ||
| 2710 | bool *csum_encapped, | ||
| 2711 | bool csum_help) | ||
| 2712 | { | ||
| 2713 | if (skb->ip_summed != CHECKSUM_PARTIAL) | ||
| 2714 | return false; | ||
| 2715 | |||
| 2716 | return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help); | ||
| 2717 | } | ||
| 2718 | |||
| 2719 | static inline bool skb_csum_offload_chk_help(struct sk_buff *skb, | ||
| 2720 | const struct skb_csum_offl_spec *spec) | ||
| 2721 | { | ||
| 2722 | bool csum_encapped; | ||
| 2723 | |||
| 2724 | return skb_csum_offload_chk(skb, spec, &csum_encapped, true); | ||
| 2725 | } | ||
| 2726 | |||
| 2727 | static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb) | ||
| 2728 | { | ||
| 2729 | static const struct skb_csum_offl_spec csum_offl_spec = { | ||
| 2730 | .ipv4_okay = 1, | ||
| 2731 | .ip_options_okay = 1, | ||
| 2732 | .ipv6_okay = 1, | ||
| 2733 | .vlan_okay = 1, | ||
| 2734 | .tcp_okay = 1, | ||
| 2735 | .udp_okay = 1, | ||
| 2736 | }; | ||
| 2737 | |||
| 2738 | return skb_csum_offload_chk_help(skb, &csum_offl_spec); | ||
| 2739 | } | ||
| 2740 | |||
| 2741 | static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb) | ||
| 2742 | { | ||
| 2743 | static const struct skb_csum_offl_spec csum_offl_spec = { | ||
| 2744 | .ipv4_okay = 1, | ||
| 2745 | .ip_options_okay = 1, | ||
| 2746 | .tcp_okay = 1, | ||
| 2747 | .udp_okay = 1, | ||
| 2748 | .vlan_okay = 1, | ||
| 2749 | }; | ||
| 2750 | |||
| 2751 | return skb_csum_offload_chk_help(skb, &csum_offl_spec); | ||
| 2752 | } | ||
| 2753 | |||
| 2754 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, | 2664 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
| 2755 | unsigned short type, | 2665 | unsigned short type, |
| 2756 | const void *daddr, const void *saddr, | 2666 | const void *daddr, const void *saddr, |
| @@ -3345,7 +3255,7 @@ int dev_get_phys_port_id(struct net_device *dev, | |||
| 3345 | int dev_get_phys_port_name(struct net_device *dev, | 3255 | int dev_get_phys_port_name(struct net_device *dev, |
| 3346 | char *name, size_t len); | 3256 | char *name, size_t len); |
| 3347 | int dev_change_proto_down(struct net_device *dev, bool proto_down); | 3257 | int dev_change_proto_down(struct net_device *dev, bool proto_down); |
| 3348 | int dev_change_xdp_fd(struct net_device *dev, int fd); | 3258 | int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags); |
| 3349 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); | 3259 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); |
| 3350 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 3260 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
| 3351 | struct netdev_queue *txq, int *ret); | 3261 | struct netdev_queue *txq, int *ret); |
| @@ -3554,6 +3464,17 @@ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) | |||
| 3554 | txq->xmit_lock_owner = cpu; | 3464 | txq->xmit_lock_owner = cpu; |
| 3555 | } | 3465 | } |
| 3556 | 3466 | ||
| 3467 | static inline bool __netif_tx_acquire(struct netdev_queue *txq) | ||
| 3468 | { | ||
| 3469 | __acquire(&txq->_xmit_lock); | ||
| 3470 | return true; | ||
| 3471 | } | ||
| 3472 | |||
| 3473 | static inline void __netif_tx_release(struct netdev_queue *txq) | ||
| 3474 | { | ||
| 3475 | __release(&txq->_xmit_lock); | ||
| 3476 | } | ||
| 3477 | |||
| 3557 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) | 3478 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) |
| 3558 | { | 3479 | { |
| 3559 | spin_lock_bh(&txq->_xmit_lock); | 3480 | spin_lock_bh(&txq->_xmit_lock); |
| @@ -3655,17 +3576,21 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) | |||
| 3655 | #define HARD_TX_LOCK(dev, txq, cpu) { \ | 3576 | #define HARD_TX_LOCK(dev, txq, cpu) { \ |
| 3656 | if ((dev->features & NETIF_F_LLTX) == 0) { \ | 3577 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
| 3657 | __netif_tx_lock(txq, cpu); \ | 3578 | __netif_tx_lock(txq, cpu); \ |
| 3579 | } else { \ | ||
| 3580 | __netif_tx_acquire(txq); \ | ||
| 3658 | } \ | 3581 | } \ |
| 3659 | } | 3582 | } |
| 3660 | 3583 | ||
| 3661 | #define HARD_TX_TRYLOCK(dev, txq) \ | 3584 | #define HARD_TX_TRYLOCK(dev, txq) \ |
| 3662 | (((dev->features & NETIF_F_LLTX) == 0) ? \ | 3585 | (((dev->features & NETIF_F_LLTX) == 0) ? \ |
| 3663 | __netif_tx_trylock(txq) : \ | 3586 | __netif_tx_trylock(txq) : \ |
| 3664 | true ) | 3587 | __netif_tx_acquire(txq)) |
| 3665 | 3588 | ||
| 3666 | #define HARD_TX_UNLOCK(dev, txq) { \ | 3589 | #define HARD_TX_UNLOCK(dev, txq) { \ |
| 3667 | if ((dev->features & NETIF_F_LLTX) == 0) { \ | 3590 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
| 3668 | __netif_tx_unlock(txq); \ | 3591 | __netif_tx_unlock(txq); \ |
| 3592 | } else { \ | ||
| 3593 | __netif_tx_release(txq); \ | ||
| 3669 | } \ | 3594 | } \ |
| 3670 | } | 3595 | } |
| 3671 | 3596 | ||
| @@ -3884,12 +3809,13 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, | |||
| 3884 | updev; \ | 3809 | updev; \ |
| 3885 | updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) | 3810 | updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) |
| 3886 | 3811 | ||
| 3887 | /* iterate through upper list, must be called under RCU read lock */ | 3812 | int netdev_walk_all_upper_dev_rcu(struct net_device *dev, |
| 3888 | #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \ | 3813 | int (*fn)(struct net_device *upper_dev, |
| 3889 | for (iter = &(dev)->all_adj_list.upper, \ | 3814 | void *data), |
| 3890 | updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \ | 3815 | void *data); |
| 3891 | updev; \ | 3816 | |
| 3892 | updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter))) | 3817 | bool netdev_has_upper_dev_all_rcu(struct net_device *dev, |
| 3818 | struct net_device *upper_dev); | ||
| 3893 | 3819 | ||
| 3894 | void *netdev_lower_get_next_private(struct net_device *dev, | 3820 | void *netdev_lower_get_next_private(struct net_device *dev, |
| 3895 | struct list_head **iter); | 3821 | struct list_head **iter); |
| @@ -3922,17 +3848,14 @@ struct net_device *netdev_all_lower_get_next(struct net_device *dev, | |||
| 3922 | struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, | 3848 | struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, |
| 3923 | struct list_head **iter); | 3849 | struct list_head **iter); |
| 3924 | 3850 | ||
| 3925 | #define netdev_for_each_all_lower_dev(dev, ldev, iter) \ | 3851 | int netdev_walk_all_lower_dev(struct net_device *dev, |
| 3926 | for (iter = (dev)->all_adj_list.lower.next, \ | 3852 | int (*fn)(struct net_device *lower_dev, |
| 3927 | ldev = netdev_all_lower_get_next(dev, &(iter)); \ | 3853 | void *data), |
| 3928 | ldev; \ | 3854 | void *data); |
| 3929 | ldev = netdev_all_lower_get_next(dev, &(iter))) | 3855 | int netdev_walk_all_lower_dev_rcu(struct net_device *dev, |
| 3930 | 3856 | int (*fn)(struct net_device *lower_dev, | |
| 3931 | #define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \ | 3857 | void *data), |
| 3932 | for (iter = &(dev)->all_adj_list.lower, \ | 3858 | void *data); |
| 3933 | ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \ | ||
| 3934 | ldev; \ | ||
| 3935 | ldev = netdev_all_lower_get_next_rcu(dev, &(iter))) | ||
| 3936 | 3859 | ||
| 3937 | void *netdev_adjacent_get_private(struct list_head *adj_list); | 3860 | void *netdev_adjacent_get_private(struct list_head *adj_list); |
| 3938 | void *netdev_lower_get_first_private_rcu(struct net_device *dev); | 3861 | void *netdev_lower_get_first_private_rcu(struct net_device *dev); |
| @@ -4009,19 +3932,6 @@ static inline bool can_checksum_protocol(netdev_features_t features, | |||
| 4009 | } | 3932 | } |
| 4010 | } | 3933 | } |
| 4011 | 3934 | ||
| 4012 | /* Map an ethertype into IP protocol if possible */ | ||
| 4013 | static inline int eproto_to_ipproto(int eproto) | ||
| 4014 | { | ||
| 4015 | switch (eproto) { | ||
| 4016 | case htons(ETH_P_IP): | ||
| 4017 | return IPPROTO_IP; | ||
| 4018 | case htons(ETH_P_IPV6): | ||
| 4019 | return IPPROTO_IPV6; | ||
| 4020 | default: | ||
| 4021 | return -1; | ||
| 4022 | } | ||
| 4023 | } | ||
| 4024 | |||
| 4025 | #ifdef CONFIG_BUG | 3935 | #ifdef CONFIG_BUG |
| 4026 | void netdev_rx_csum_fault(struct net_device *dev); | 3936 | void netdev_rx_csum_fault(struct net_device *dev); |
| 4027 | #else | 3937 | #else |
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index abc7fdcb9eb1..a4b97be30b28 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h | |||
| @@ -49,13 +49,11 @@ struct sock; | |||
| 49 | 49 | ||
| 50 | struct nf_hook_state { | 50 | struct nf_hook_state { |
| 51 | unsigned int hook; | 51 | unsigned int hook; |
| 52 | int thresh; | ||
| 53 | u_int8_t pf; | 52 | u_int8_t pf; |
| 54 | struct net_device *in; | 53 | struct net_device *in; |
| 55 | struct net_device *out; | 54 | struct net_device *out; |
| 56 | struct sock *sk; | 55 | struct sock *sk; |
| 57 | struct net *net; | 56 | struct net *net; |
| 58 | struct nf_hook_entry __rcu *hook_entries; | ||
| 59 | int (*okfn)(struct net *, struct sock *, struct sk_buff *); | 57 | int (*okfn)(struct net *, struct sock *, struct sk_buff *); |
| 60 | }; | 58 | }; |
| 61 | 59 | ||
| @@ -77,14 +75,42 @@ struct nf_hook_ops { | |||
| 77 | 75 | ||
| 78 | struct nf_hook_entry { | 76 | struct nf_hook_entry { |
| 79 | struct nf_hook_entry __rcu *next; | 77 | struct nf_hook_entry __rcu *next; |
| 80 | struct nf_hook_ops ops; | 78 | nf_hookfn *hook; |
| 79 | void *priv; | ||
| 81 | const struct nf_hook_ops *orig_ops; | 80 | const struct nf_hook_ops *orig_ops; |
| 82 | }; | 81 | }; |
| 83 | 82 | ||
| 83 | static inline void | ||
| 84 | nf_hook_entry_init(struct nf_hook_entry *entry, const struct nf_hook_ops *ops) | ||
| 85 | { | ||
| 86 | entry->next = NULL; | ||
| 87 | entry->hook = ops->hook; | ||
| 88 | entry->priv = ops->priv; | ||
| 89 | entry->orig_ops = ops; | ||
| 90 | } | ||
| 91 | |||
| 92 | static inline int | ||
| 93 | nf_hook_entry_priority(const struct nf_hook_entry *entry) | ||
| 94 | { | ||
| 95 | return entry->orig_ops->priority; | ||
| 96 | } | ||
| 97 | |||
| 98 | static inline int | ||
| 99 | nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb, | ||
| 100 | struct nf_hook_state *state) | ||
| 101 | { | ||
| 102 | return entry->hook(entry->priv, skb, state); | ||
| 103 | } | ||
| 104 | |||
| 105 | static inline const struct nf_hook_ops * | ||
| 106 | nf_hook_entry_ops(const struct nf_hook_entry *entry) | ||
| 107 | { | ||
| 108 | return entry->orig_ops; | ||
| 109 | } | ||
| 110 | |||
| 84 | static inline void nf_hook_state_init(struct nf_hook_state *p, | 111 | static inline void nf_hook_state_init(struct nf_hook_state *p, |
| 85 | struct nf_hook_entry *hook_entry, | ||
| 86 | unsigned int hook, | 112 | unsigned int hook, |
| 87 | int thresh, u_int8_t pf, | 113 | u_int8_t pf, |
| 88 | struct net_device *indev, | 114 | struct net_device *indev, |
| 89 | struct net_device *outdev, | 115 | struct net_device *outdev, |
| 90 | struct sock *sk, | 116 | struct sock *sk, |
| @@ -92,13 +118,11 @@ static inline void nf_hook_state_init(struct nf_hook_state *p, | |||
| 92 | int (*okfn)(struct net *, struct sock *, struct sk_buff *)) | 118 | int (*okfn)(struct net *, struct sock *, struct sk_buff *)) |
| 93 | { | 119 | { |
| 94 | p->hook = hook; | 120 | p->hook = hook; |
| 95 | p->thresh = thresh; | ||
| 96 | p->pf = pf; | 121 | p->pf = pf; |
| 97 | p->in = indev; | 122 | p->in = indev; |
| 98 | p->out = outdev; | 123 | p->out = outdev; |
| 99 | p->sk = sk; | 124 | p->sk = sk; |
| 100 | p->net = net; | 125 | p->net = net; |
| 101 | RCU_INIT_POINTER(p->hook_entries, hook_entry); | ||
| 102 | p->okfn = okfn; | 126 | p->okfn = okfn; |
| 103 | } | 127 | } |
| 104 | 128 | ||
| @@ -152,23 +176,20 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg); | |||
| 152 | extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; | 176 | extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; |
| 153 | #endif | 177 | #endif |
| 154 | 178 | ||
| 155 | int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); | 179 | int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, |
| 180 | struct nf_hook_entry *entry); | ||
| 156 | 181 | ||
| 157 | /** | 182 | /** |
| 158 | * nf_hook_thresh - call a netfilter hook | 183 | * nf_hook - call a netfilter hook |
| 159 | * | 184 | * |
| 160 | * Returns 1 if the hook has allowed the packet to pass. The function | 185 | * Returns 1 if the hook has allowed the packet to pass. The function |
| 161 | * okfn must be invoked by the caller in this case. Any other return | 186 | * okfn must be invoked by the caller in this case. Any other return |
| 162 | * value indicates the packet has been consumed by the hook. | 187 | * value indicates the packet has been consumed by the hook. |
| 163 | */ | 188 | */ |
| 164 | static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, | 189 | static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, |
| 165 | struct net *net, | 190 | struct sock *sk, struct sk_buff *skb, |
| 166 | struct sock *sk, | 191 | struct net_device *indev, struct net_device *outdev, |
| 167 | struct sk_buff *skb, | 192 | int (*okfn)(struct net *, struct sock *, struct sk_buff *)) |
| 168 | struct net_device *indev, | ||
| 169 | struct net_device *outdev, | ||
| 170 | int (*okfn)(struct net *, struct sock *, struct sk_buff *), | ||
| 171 | int thresh) | ||
| 172 | { | 193 | { |
| 173 | struct nf_hook_entry *hook_head; | 194 | struct nf_hook_entry *hook_head; |
| 174 | int ret = 1; | 195 | int ret = 1; |
| @@ -185,24 +206,16 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, | |||
| 185 | if (hook_head) { | 206 | if (hook_head) { |
| 186 | struct nf_hook_state state; | 207 | struct nf_hook_state state; |
| 187 | 208 | ||
| 188 | nf_hook_state_init(&state, hook_head, hook, thresh, | 209 | nf_hook_state_init(&state, hook, pf, indev, outdev, |
| 189 | pf, indev, outdev, sk, net, okfn); | 210 | sk, net, okfn); |
| 190 | 211 | ||
| 191 | ret = nf_hook_slow(skb, &state); | 212 | ret = nf_hook_slow(skb, &state, hook_head); |
| 192 | } | 213 | } |
| 193 | rcu_read_unlock(); | 214 | rcu_read_unlock(); |
| 194 | 215 | ||
| 195 | return ret; | 216 | return ret; |
| 196 | } | 217 | } |
| 197 | 218 | ||
| 198 | static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, | ||
| 199 | struct sock *sk, struct sk_buff *skb, | ||
| 200 | struct net_device *indev, struct net_device *outdev, | ||
| 201 | int (*okfn)(struct net *, struct sock *, struct sk_buff *)) | ||
| 202 | { | ||
| 203 | return nf_hook_thresh(pf, hook, net, sk, skb, indev, outdev, okfn, INT_MIN); | ||
| 204 | } | ||
| 205 | |||
| 206 | /* Activate hook; either okfn or kfree_skb called, unless a hook | 219 | /* Activate hook; either okfn or kfree_skb called, unless a hook |
| 207 | returns NF_STOLEN (in which case, it's up to the hook to deal with | 220 | returns NF_STOLEN (in which case, it's up to the hook to deal with |
| 208 | the consequences). | 221 | the consequences). |
| @@ -221,19 +234,6 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, | |||
| 221 | */ | 234 | */ |
| 222 | 235 | ||
| 223 | static inline int | 236 | static inline int |
| 224 | NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, | ||
| 225 | struct sk_buff *skb, struct net_device *in, | ||
| 226 | struct net_device *out, | ||
| 227 | int (*okfn)(struct net *, struct sock *, struct sk_buff *), | ||
| 228 | int thresh) | ||
| 229 | { | ||
| 230 | int ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, thresh); | ||
| 231 | if (ret == 1) | ||
| 232 | ret = okfn(net, sk, skb); | ||
| 233 | return ret; | ||
| 234 | } | ||
| 235 | |||
| 236 | static inline int | ||
| 237 | NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, | 237 | NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, |
| 238 | struct sk_buff *skb, struct net_device *in, struct net_device *out, | 238 | struct sk_buff *skb, struct net_device *in, struct net_device *out, |
| 239 | int (*okfn)(struct net *, struct sock *, struct sk_buff *), | 239 | int (*okfn)(struct net *, struct sock *, struct sk_buff *), |
| @@ -242,7 +242,7 @@ NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, | |||
| 242 | int ret; | 242 | int ret; |
| 243 | 243 | ||
| 244 | if (!cond || | 244 | if (!cond || |
| 245 | ((ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, INT_MIN)) == 1)) | 245 | ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1)) |
| 246 | ret = okfn(net, sk, skb); | 246 | ret = okfn(net, sk, skb); |
| 247 | return ret; | 247 | return ret; |
| 248 | } | 248 | } |
| @@ -252,7 +252,10 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct | |||
| 252 | struct net_device *in, struct net_device *out, | 252 | struct net_device *in, struct net_device *out, |
| 253 | int (*okfn)(struct net *, struct sock *, struct sk_buff *)) | 253 | int (*okfn)(struct net *, struct sock *, struct sk_buff *)) |
| 254 | { | 254 | { |
| 255 | return NF_HOOK_THRESH(pf, hook, net, sk, skb, in, out, okfn, INT_MIN); | 255 | int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn); |
| 256 | if (ret == 1) | ||
| 257 | ret = okfn(net, sk, skb); | ||
| 258 | return ret; | ||
| 256 | } | 259 | } |
| 257 | 260 | ||
| 258 | /* Call setsockopt() */ | 261 | /* Call setsockopt() */ |
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 83b9a2e0d8d4..8e42253e5d4d 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h | |||
| @@ -79,10 +79,12 @@ enum ip_set_ext_id { | |||
| 79 | IPSET_EXT_ID_MAX, | 79 | IPSET_EXT_ID_MAX, |
| 80 | }; | 80 | }; |
| 81 | 81 | ||
| 82 | struct ip_set; | ||
| 83 | |||
| 82 | /* Extension type */ | 84 | /* Extension type */ |
| 83 | struct ip_set_ext_type { | 85 | struct ip_set_ext_type { |
| 84 | /* Destroy extension private data (can be NULL) */ | 86 | /* Destroy extension private data (can be NULL) */ |
| 85 | void (*destroy)(void *ext); | 87 | void (*destroy)(struct ip_set *set, void *ext); |
| 86 | enum ip_set_extension type; | 88 | enum ip_set_extension type; |
| 87 | enum ipset_cadt_flags flag; | 89 | enum ipset_cadt_flags flag; |
| 88 | /* Size and minimal alignment */ | 90 | /* Size and minimal alignment */ |
| @@ -92,17 +94,6 @@ struct ip_set_ext_type { | |||
| 92 | 94 | ||
| 93 | extern const struct ip_set_ext_type ip_set_extensions[]; | 95 | extern const struct ip_set_ext_type ip_set_extensions[]; |
| 94 | 96 | ||
| 95 | struct ip_set_ext { | ||
| 96 | u64 packets; | ||
| 97 | u64 bytes; | ||
| 98 | u32 timeout; | ||
| 99 | u32 skbmark; | ||
| 100 | u32 skbmarkmask; | ||
| 101 | u32 skbprio; | ||
| 102 | u16 skbqueue; | ||
| 103 | char *comment; | ||
| 104 | }; | ||
| 105 | |||
| 106 | struct ip_set_counter { | 97 | struct ip_set_counter { |
| 107 | atomic64_t bytes; | 98 | atomic64_t bytes; |
| 108 | atomic64_t packets; | 99 | atomic64_t packets; |
| @@ -122,6 +113,15 @@ struct ip_set_skbinfo { | |||
| 122 | u32 skbmarkmask; | 113 | u32 skbmarkmask; |
| 123 | u32 skbprio; | 114 | u32 skbprio; |
| 124 | u16 skbqueue; | 115 | u16 skbqueue; |
| 116 | u16 __pad; | ||
| 117 | }; | ||
| 118 | |||
| 119 | struct ip_set_ext { | ||
| 120 | struct ip_set_skbinfo skbinfo; | ||
| 121 | u64 packets; | ||
| 122 | u64 bytes; | ||
| 123 | char *comment; | ||
| 124 | u32 timeout; | ||
| 125 | }; | 125 | }; |
| 126 | 126 | ||
| 127 | struct ip_set; | 127 | struct ip_set; |
| @@ -252,6 +252,10 @@ struct ip_set { | |||
| 252 | u8 flags; | 252 | u8 flags; |
| 253 | /* Default timeout value, if enabled */ | 253 | /* Default timeout value, if enabled */ |
| 254 | u32 timeout; | 254 | u32 timeout; |
| 255 | /* Number of elements (vs timeout) */ | ||
| 256 | u32 elements; | ||
| 257 | /* Size of the dynamic extensions (vs timeout) */ | ||
| 258 | size_t ext_size; | ||
| 255 | /* Element data size */ | 259 | /* Element data size */ |
| 256 | size_t dsize; | 260 | size_t dsize; |
| 257 | /* Offsets to extensions in elements */ | 261 | /* Offsets to extensions in elements */ |
| @@ -268,7 +272,7 @@ ip_set_ext_destroy(struct ip_set *set, void *data) | |||
| 268 | */ | 272 | */ |
| 269 | if (SET_WITH_COMMENT(set)) | 273 | if (SET_WITH_COMMENT(set)) |
| 270 | ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy( | 274 | ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy( |
| 271 | ext_comment(data, set)); | 275 | set, ext_comment(data, set)); |
| 272 | } | 276 | } |
| 273 | 277 | ||
| 274 | static inline int | 278 | static inline int |
| @@ -294,104 +298,6 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set) | |||
| 294 | return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags)); | 298 | return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags)); |
| 295 | } | 299 | } |
| 296 | 300 | ||
| 297 | static inline void | ||
| 298 | ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) | ||
| 299 | { | ||
| 300 | atomic64_add((long long)bytes, &(counter)->bytes); | ||
| 301 | } | ||
| 302 | |||
| 303 | static inline void | ||
| 304 | ip_set_add_packets(u64 packets, struct ip_set_counter *counter) | ||
| 305 | { | ||
| 306 | atomic64_add((long long)packets, &(counter)->packets); | ||
| 307 | } | ||
| 308 | |||
| 309 | static inline u64 | ||
| 310 | ip_set_get_bytes(const struct ip_set_counter *counter) | ||
| 311 | { | ||
| 312 | return (u64)atomic64_read(&(counter)->bytes); | ||
| 313 | } | ||
| 314 | |||
| 315 | static inline u64 | ||
| 316 | ip_set_get_packets(const struct ip_set_counter *counter) | ||
| 317 | { | ||
| 318 | return (u64)atomic64_read(&(counter)->packets); | ||
| 319 | } | ||
| 320 | |||
| 321 | static inline void | ||
| 322 | ip_set_update_counter(struct ip_set_counter *counter, | ||
| 323 | const struct ip_set_ext *ext, | ||
| 324 | struct ip_set_ext *mext, u32 flags) | ||
| 325 | { | ||
| 326 | if (ext->packets != ULLONG_MAX && | ||
| 327 | !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { | ||
| 328 | ip_set_add_bytes(ext->bytes, counter); | ||
| 329 | ip_set_add_packets(ext->packets, counter); | ||
| 330 | } | ||
| 331 | if (flags & IPSET_FLAG_MATCH_COUNTERS) { | ||
| 332 | mext->packets = ip_set_get_packets(counter); | ||
| 333 | mext->bytes = ip_set_get_bytes(counter); | ||
| 334 | } | ||
| 335 | } | ||
| 336 | |||
| 337 | static inline void | ||
| 338 | ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, | ||
| 339 | const struct ip_set_ext *ext, | ||
| 340 | struct ip_set_ext *mext, u32 flags) | ||
| 341 | { | ||
| 342 | mext->skbmark = skbinfo->skbmark; | ||
| 343 | mext->skbmarkmask = skbinfo->skbmarkmask; | ||
| 344 | mext->skbprio = skbinfo->skbprio; | ||
| 345 | mext->skbqueue = skbinfo->skbqueue; | ||
| 346 | } | ||
| 347 | static inline bool | ||
| 348 | ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo) | ||
| 349 | { | ||
| 350 | /* Send nonzero parameters only */ | ||
| 351 | return ((skbinfo->skbmark || skbinfo->skbmarkmask) && | ||
| 352 | nla_put_net64(skb, IPSET_ATTR_SKBMARK, | ||
| 353 | cpu_to_be64((u64)skbinfo->skbmark << 32 | | ||
| 354 | skbinfo->skbmarkmask), | ||
| 355 | IPSET_ATTR_PAD)) || | ||
| 356 | (skbinfo->skbprio && | ||
| 357 | nla_put_net32(skb, IPSET_ATTR_SKBPRIO, | ||
| 358 | cpu_to_be32(skbinfo->skbprio))) || | ||
| 359 | (skbinfo->skbqueue && | ||
| 360 | nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, | ||
| 361 | cpu_to_be16(skbinfo->skbqueue))); | ||
| 362 | } | ||
| 363 | |||
| 364 | static inline void | ||
| 365 | ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, | ||
| 366 | const struct ip_set_ext *ext) | ||
| 367 | { | ||
| 368 | skbinfo->skbmark = ext->skbmark; | ||
| 369 | skbinfo->skbmarkmask = ext->skbmarkmask; | ||
| 370 | skbinfo->skbprio = ext->skbprio; | ||
| 371 | skbinfo->skbqueue = ext->skbqueue; | ||
| 372 | } | ||
| 373 | |||
| 374 | static inline bool | ||
| 375 | ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter) | ||
| 376 | { | ||
| 377 | return nla_put_net64(skb, IPSET_ATTR_BYTES, | ||
| 378 | cpu_to_be64(ip_set_get_bytes(counter)), | ||
| 379 | IPSET_ATTR_PAD) || | ||
| 380 | nla_put_net64(skb, IPSET_ATTR_PACKETS, | ||
| 381 | cpu_to_be64(ip_set_get_packets(counter)), | ||
| 382 | IPSET_ATTR_PAD); | ||
| 383 | } | ||
| 384 | |||
| 385 | static inline void | ||
| 386 | ip_set_init_counter(struct ip_set_counter *counter, | ||
| 387 | const struct ip_set_ext *ext) | ||
| 388 | { | ||
| 389 | if (ext->bytes != ULLONG_MAX) | ||
| 390 | atomic64_set(&(counter)->bytes, (long long)(ext->bytes)); | ||
| 391 | if (ext->packets != ULLONG_MAX) | ||
| 392 | atomic64_set(&(counter)->packets, (long long)(ext->packets)); | ||
| 393 | } | ||
| 394 | |||
| 395 | /* Netlink CB args */ | 301 | /* Netlink CB args */ |
| 396 | enum { | 302 | enum { |
| 397 | IPSET_CB_NET = 0, /* net namespace */ | 303 | IPSET_CB_NET = 0, /* net namespace */ |
| @@ -431,6 +337,8 @@ extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], | |||
| 431 | size_t len, size_t align); | 337 | size_t len, size_t align); |
| 432 | extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], | 338 | extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], |
| 433 | struct ip_set_ext *ext); | 339 | struct ip_set_ext *ext); |
| 340 | extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, | ||
| 341 | const void *e, bool active); | ||
| 434 | 342 | ||
| 435 | static inline int | 343 | static inline int |
| 436 | ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) | 344 | ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) |
| @@ -546,10 +454,8 @@ bitmap_bytes(u32 a, u32 b) | |||
| 546 | 454 | ||
| 547 | #include <linux/netfilter/ipset/ip_set_timeout.h> | 455 | #include <linux/netfilter/ipset/ip_set_timeout.h> |
| 548 | #include <linux/netfilter/ipset/ip_set_comment.h> | 456 | #include <linux/netfilter/ipset/ip_set_comment.h> |
| 549 | 457 | #include <linux/netfilter/ipset/ip_set_counter.h> | |
| 550 | int | 458 | #include <linux/netfilter/ipset/ip_set_skbinfo.h> |
| 551 | ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, | ||
| 552 | const void *e, bool active); | ||
| 553 | 459 | ||
| 554 | #define IP_SET_INIT_KEXT(skb, opt, set) \ | 460 | #define IP_SET_INIT_KEXT(skb, opt, set) \ |
| 555 | { .bytes = (skb)->len, .packets = 1, \ | 461 | { .bytes = (skb)->len, .packets = 1, \ |
diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h index 5e4662a71e01..366d6c0ea04f 100644 --- a/include/linux/netfilter/ipset/ip_set_bitmap.h +++ b/include/linux/netfilter/ipset/ip_set_bitmap.h | |||
| @@ -6,8 +6,8 @@ | |||
| 6 | #define IPSET_BITMAP_MAX_RANGE 0x0000FFFF | 6 | #define IPSET_BITMAP_MAX_RANGE 0x0000FFFF |
| 7 | 7 | ||
| 8 | enum { | 8 | enum { |
| 9 | IPSET_ADD_STORE_PLAIN_TIMEOUT = -1, | ||
| 9 | IPSET_ADD_FAILED = 1, | 10 | IPSET_ADD_FAILED = 1, |
| 10 | IPSET_ADD_STORE_PLAIN_TIMEOUT, | ||
| 11 | IPSET_ADD_START_STORED_TIMEOUT, | 11 | IPSET_ADD_START_STORED_TIMEOUT, |
| 12 | }; | 12 | }; |
| 13 | 13 | ||
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h index 8d0248525957..8e2bab1e8e90 100644 --- a/include/linux/netfilter/ipset/ip_set_comment.h +++ b/include/linux/netfilter/ipset/ip_set_comment.h | |||
| @@ -20,13 +20,14 @@ ip_set_comment_uget(struct nlattr *tb) | |||
| 20 | * The kadt functions don't use the comment extensions in any way. | 20 | * The kadt functions don't use the comment extensions in any way. |
| 21 | */ | 21 | */ |
| 22 | static inline void | 22 | static inline void |
| 23 | ip_set_init_comment(struct ip_set_comment *comment, | 23 | ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, |
| 24 | const struct ip_set_ext *ext) | 24 | const struct ip_set_ext *ext) |
| 25 | { | 25 | { |
| 26 | struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1); | 26 | struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1); |
| 27 | size_t len = ext->comment ? strlen(ext->comment) : 0; | 27 | size_t len = ext->comment ? strlen(ext->comment) : 0; |
| 28 | 28 | ||
| 29 | if (unlikely(c)) { | 29 | if (unlikely(c)) { |
| 30 | set->ext_size -= sizeof(*c) + strlen(c->str) + 1; | ||
| 30 | kfree_rcu(c, rcu); | 31 | kfree_rcu(c, rcu); |
| 31 | rcu_assign_pointer(comment->c, NULL); | 32 | rcu_assign_pointer(comment->c, NULL); |
| 32 | } | 33 | } |
| @@ -34,16 +35,17 @@ ip_set_init_comment(struct ip_set_comment *comment, | |||
| 34 | return; | 35 | return; |
| 35 | if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) | 36 | if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) |
| 36 | len = IPSET_MAX_COMMENT_SIZE; | 37 | len = IPSET_MAX_COMMENT_SIZE; |
| 37 | c = kzalloc(sizeof(*c) + len + 1, GFP_ATOMIC); | 38 | c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC); |
| 38 | if (unlikely(!c)) | 39 | if (unlikely(!c)) |
| 39 | return; | 40 | return; |
| 40 | strlcpy(c->str, ext->comment, len + 1); | 41 | strlcpy(c->str, ext->comment, len + 1); |
| 42 | set->ext_size += sizeof(*c) + strlen(c->str) + 1; | ||
| 41 | rcu_assign_pointer(comment->c, c); | 43 | rcu_assign_pointer(comment->c, c); |
| 42 | } | 44 | } |
| 43 | 45 | ||
| 44 | /* Used only when dumping a set, protected by rcu_read_lock_bh() */ | 46 | /* Used only when dumping a set, protected by rcu_read_lock_bh() */ |
| 45 | static inline int | 47 | static inline int |
| 46 | ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment) | 48 | ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment) |
| 47 | { | 49 | { |
| 48 | struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c); | 50 | struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c); |
| 49 | 51 | ||
| @@ -58,13 +60,14 @@ ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment) | |||
| 58 | * of the set data anymore. | 60 | * of the set data anymore. |
| 59 | */ | 61 | */ |
| 60 | static inline void | 62 | static inline void |
| 61 | ip_set_comment_free(struct ip_set_comment *comment) | 63 | ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment) |
| 62 | { | 64 | { |
| 63 | struct ip_set_comment_rcu *c; | 65 | struct ip_set_comment_rcu *c; |
| 64 | 66 | ||
| 65 | c = rcu_dereference_protected(comment->c, 1); | 67 | c = rcu_dereference_protected(comment->c, 1); |
| 66 | if (unlikely(!c)) | 68 | if (unlikely(!c)) |
| 67 | return; | 69 | return; |
| 70 | set->ext_size -= sizeof(*c) + strlen(c->str) + 1; | ||
| 68 | kfree_rcu(c, rcu); | 71 | kfree_rcu(c, rcu); |
| 69 | rcu_assign_pointer(comment->c, NULL); | 72 | rcu_assign_pointer(comment->c, NULL); |
| 70 | } | 73 | } |
diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h new file mode 100644 index 000000000000..bb6fba480118 --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_counter.h | |||
| @@ -0,0 +1,75 @@ | |||
| 1 | #ifndef _IP_SET_COUNTER_H | ||
| 2 | #define _IP_SET_COUNTER_H | ||
| 3 | |||
| 4 | /* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifdef __KERNEL__ | ||
| 12 | |||
| 13 | static inline void | ||
| 14 | ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) | ||
| 15 | { | ||
| 16 | atomic64_add((long long)bytes, &(counter)->bytes); | ||
| 17 | } | ||
| 18 | |||
| 19 | static inline void | ||
| 20 | ip_set_add_packets(u64 packets, struct ip_set_counter *counter) | ||
| 21 | { | ||
| 22 | atomic64_add((long long)packets, &(counter)->packets); | ||
| 23 | } | ||
| 24 | |||
| 25 | static inline u64 | ||
| 26 | ip_set_get_bytes(const struct ip_set_counter *counter) | ||
| 27 | { | ||
| 28 | return (u64)atomic64_read(&(counter)->bytes); | ||
| 29 | } | ||
| 30 | |||
| 31 | static inline u64 | ||
| 32 | ip_set_get_packets(const struct ip_set_counter *counter) | ||
| 33 | { | ||
| 34 | return (u64)atomic64_read(&(counter)->packets); | ||
| 35 | } | ||
| 36 | |||
| 37 | static inline void | ||
| 38 | ip_set_update_counter(struct ip_set_counter *counter, | ||
| 39 | const struct ip_set_ext *ext, | ||
| 40 | struct ip_set_ext *mext, u32 flags) | ||
| 41 | { | ||
| 42 | if (ext->packets != ULLONG_MAX && | ||
| 43 | !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { | ||
| 44 | ip_set_add_bytes(ext->bytes, counter); | ||
| 45 | ip_set_add_packets(ext->packets, counter); | ||
| 46 | } | ||
| 47 | if (flags & IPSET_FLAG_MATCH_COUNTERS) { | ||
| 48 | mext->packets = ip_set_get_packets(counter); | ||
| 49 | mext->bytes = ip_set_get_bytes(counter); | ||
| 50 | } | ||
| 51 | } | ||
| 52 | |||
| 53 | static inline bool | ||
| 54 | ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter) | ||
| 55 | { | ||
| 56 | return nla_put_net64(skb, IPSET_ATTR_BYTES, | ||
| 57 | cpu_to_be64(ip_set_get_bytes(counter)), | ||
| 58 | IPSET_ATTR_PAD) || | ||
| 59 | nla_put_net64(skb, IPSET_ATTR_PACKETS, | ||
| 60 | cpu_to_be64(ip_set_get_packets(counter)), | ||
| 61 | IPSET_ATTR_PAD); | ||
| 62 | } | ||
| 63 | |||
| 64 | static inline void | ||
| 65 | ip_set_init_counter(struct ip_set_counter *counter, | ||
| 66 | const struct ip_set_ext *ext) | ||
| 67 | { | ||
| 68 | if (ext->bytes != ULLONG_MAX) | ||
| 69 | atomic64_set(&(counter)->bytes, (long long)(ext->bytes)); | ||
| 70 | if (ext->packets != ULLONG_MAX) | ||
| 71 | atomic64_set(&(counter)->packets, (long long)(ext->packets)); | ||
| 72 | } | ||
| 73 | |||
| 74 | #endif /* __KERNEL__ */ | ||
| 75 | #endif /* _IP_SET_COUNTER_H */ | ||
diff --git a/include/linux/netfilter/ipset/ip_set_skbinfo.h b/include/linux/netfilter/ipset/ip_set_skbinfo.h new file mode 100644 index 000000000000..29d7ef2bc3fa --- /dev/null +++ b/include/linux/netfilter/ipset/ip_set_skbinfo.h | |||
| @@ -0,0 +1,46 @@ | |||
| 1 | #ifndef _IP_SET_SKBINFO_H | ||
| 2 | #define _IP_SET_SKBINFO_H | ||
| 3 | |||
| 4 | /* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifdef __KERNEL__ | ||
| 12 | |||
| 13 | static inline void | ||
| 14 | ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, | ||
| 15 | const struct ip_set_ext *ext, | ||
| 16 | struct ip_set_ext *mext, u32 flags) | ||
| 17 | { | ||
| 18 | mext->skbinfo = *skbinfo; | ||
| 19 | } | ||
| 20 | |||
| 21 | static inline bool | ||
| 22 | ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo) | ||
| 23 | { | ||
| 24 | /* Send nonzero parameters only */ | ||
| 25 | return ((skbinfo->skbmark || skbinfo->skbmarkmask) && | ||
| 26 | nla_put_net64(skb, IPSET_ATTR_SKBMARK, | ||
| 27 | cpu_to_be64((u64)skbinfo->skbmark << 32 | | ||
| 28 | skbinfo->skbmarkmask), | ||
| 29 | IPSET_ATTR_PAD)) || | ||
| 30 | (skbinfo->skbprio && | ||
| 31 | nla_put_net32(skb, IPSET_ATTR_SKBPRIO, | ||
| 32 | cpu_to_be32(skbinfo->skbprio))) || | ||
| 33 | (skbinfo->skbqueue && | ||
| 34 | nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, | ||
| 35 | cpu_to_be16(skbinfo->skbqueue))); | ||
| 36 | } | ||
| 37 | |||
| 38 | static inline void | ||
| 39 | ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, | ||
| 40 | const struct ip_set_ext *ext) | ||
| 41 | { | ||
| 42 | *skbinfo = ext->skbinfo; | ||
| 43 | } | ||
| 44 | |||
| 45 | #endif /* __KERNEL__ */ | ||
| 46 | #endif /* _IP_SET_SKBINFO_H */ | ||
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h index 1d6a935c1ac5..bfb3531fd88a 100644 --- a/include/linux/netfilter/ipset/ip_set_timeout.h +++ b/include/linux/netfilter/ipset/ip_set_timeout.h | |||
| @@ -40,7 +40,7 @@ ip_set_timeout_uget(struct nlattr *tb) | |||
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | static inline bool | 42 | static inline bool |
| 43 | ip_set_timeout_expired(unsigned long *t) | 43 | ip_set_timeout_expired(const unsigned long *t) |
| 44 | { | 44 | { |
| 45 | return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t); | 45 | return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t); |
| 46 | } | 46 | } |
| @@ -63,7 +63,7 @@ ip_set_timeout_set(unsigned long *timeout, u32 value) | |||
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | static inline u32 | 65 | static inline u32 |
| 66 | ip_set_timeout_get(unsigned long *timeout) | 66 | ip_set_timeout_get(const unsigned long *timeout) |
| 67 | { | 67 | { |
| 68 | return *timeout == IPSET_ELEM_PERMANENT ? 0 : | 68 | return *timeout == IPSET_ELEM_PERMANENT ? 0 : |
| 69 | jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; | 69 | jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; |
diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h index 40dcc82058d1..ff721d7325cf 100644 --- a/include/linux/netfilter/nf_conntrack_dccp.h +++ b/include/linux/netfilter/nf_conntrack_dccp.h | |||
| @@ -25,7 +25,7 @@ enum ct_dccp_roles { | |||
| 25 | #define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1) | 25 | #define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1) |
| 26 | 26 | ||
| 27 | #ifdef __KERNEL__ | 27 | #ifdef __KERNEL__ |
| 28 | #include <net/netfilter/nf_conntrack_tuple.h> | 28 | #include <linux/netfilter/nf_conntrack_tuple_common.h> |
| 29 | 29 | ||
| 30 | struct nf_ct_dccp { | 30 | struct nf_ct_dccp { |
| 31 | u_int8_t role[IP_CT_DIR_MAX]; | 31 | u_int8_t role[IP_CT_DIR_MAX]; |
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 2ad1a2b289b5..5117e4d2ddfa 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | #include <linux/netdevice.h> | 5 | #include <linux/netdevice.h> |
| 6 | #include <linux/static_key.h> | 6 | #include <linux/static_key.h> |
| 7 | #include <linux/netfilter.h> | ||
| 7 | #include <uapi/linux/netfilter/x_tables.h> | 8 | #include <uapi/linux/netfilter/x_tables.h> |
| 8 | 9 | ||
| 9 | /* Test a struct->invflags and a boolean for inequality */ | 10 | /* Test a struct->invflags and a boolean for inequality */ |
| @@ -17,14 +18,9 @@ | |||
| 17 | * @target: the target extension | 18 | * @target: the target extension |
| 18 | * @matchinfo: per-match data | 19 | * @matchinfo: per-match data |
| 19 | * @targetinfo: per-target data | 20 | * @targetinfo: per-target data |
| 20 | * @net network namespace through which the action was invoked | 21 | * @state: pointer to hook state this packet came from |
| 21 | * @in: input netdevice | ||
| 22 | * @out: output netdevice | ||
| 23 | * @fragoff: packet is a fragment, this is the data offset | 22 | * @fragoff: packet is a fragment, this is the data offset |
| 24 | * @thoff: position of transport header relative to skb->data | 23 | * @thoff: position of transport header relative to skb->data |
| 25 | * @hook: hook number given packet came from | ||
| 26 | * @family: Actual NFPROTO_* through which the function is invoked | ||
| 27 | * (helpful when match->family == NFPROTO_UNSPEC) | ||
| 28 | * | 24 | * |
| 29 | * Fields written to by extensions: | 25 | * Fields written to by extensions: |
| 30 | * | 26 | * |
| @@ -38,15 +34,47 @@ struct xt_action_param { | |||
| 38 | union { | 34 | union { |
| 39 | const void *matchinfo, *targinfo; | 35 | const void *matchinfo, *targinfo; |
| 40 | }; | 36 | }; |
| 41 | struct net *net; | 37 | const struct nf_hook_state *state; |
| 42 | const struct net_device *in, *out; | ||
| 43 | int fragoff; | 38 | int fragoff; |
| 44 | unsigned int thoff; | 39 | unsigned int thoff; |
| 45 | unsigned int hooknum; | ||
| 46 | u_int8_t family; | ||
| 47 | bool hotdrop; | 40 | bool hotdrop; |
| 48 | }; | 41 | }; |
| 49 | 42 | ||
| 43 | static inline struct net *xt_net(const struct xt_action_param *par) | ||
| 44 | { | ||
| 45 | return par->state->net; | ||
| 46 | } | ||
| 47 | |||
| 48 | static inline struct net_device *xt_in(const struct xt_action_param *par) | ||
| 49 | { | ||
| 50 | return par->state->in; | ||
| 51 | } | ||
| 52 | |||
| 53 | static inline const char *xt_inname(const struct xt_action_param *par) | ||
| 54 | { | ||
| 55 | return par->state->in->name; | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline struct net_device *xt_out(const struct xt_action_param *par) | ||
| 59 | { | ||
| 60 | return par->state->out; | ||
| 61 | } | ||
| 62 | |||
| 63 | static inline const char *xt_outname(const struct xt_action_param *par) | ||
| 64 | { | ||
| 65 | return par->state->out->name; | ||
| 66 | } | ||
| 67 | |||
| 68 | static inline unsigned int xt_hooknum(const struct xt_action_param *par) | ||
| 69 | { | ||
| 70 | return par->state->hook; | ||
| 71 | } | ||
| 72 | |||
| 73 | static inline u_int8_t xt_family(const struct xt_action_param *par) | ||
| 74 | { | ||
| 75 | return par->state->pf; | ||
| 76 | } | ||
| 77 | |||
| 50 | /** | 78 | /** |
| 51 | * struct xt_mtchk_param - parameters for match extensions' | 79 | * struct xt_mtchk_param - parameters for match extensions' |
| 52 | * checkentry functions | 80 | * checkentry functions |
| @@ -375,38 +403,14 @@ static inline unsigned long ifname_compare_aligned(const char *_a, | |||
| 375 | return ret; | 403 | return ret; |
| 376 | } | 404 | } |
| 377 | 405 | ||
| 406 | struct xt_percpu_counter_alloc_state { | ||
| 407 | unsigned int off; | ||
| 408 | const char __percpu *mem; | ||
| 409 | }; | ||
| 378 | 410 | ||
| 379 | /* On SMP, ip(6)t_entry->counters.pcnt holds address of the | 411 | bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, |
| 380 | * real (percpu) counter. On !SMP, its just the packet count, | 412 | struct xt_counters *counter); |
| 381 | * so nothing needs to be done there. | 413 | void xt_percpu_counter_free(struct xt_counters *cnt); |
| 382 | * | ||
| 383 | * xt_percpu_counter_alloc returns the address of the percpu | ||
| 384 | * counter, or 0 on !SMP. We force an alignment of 16 bytes | ||
| 385 | * so that bytes/packets share a common cache line. | ||
| 386 | * | ||
| 387 | * Hence caller must use IS_ERR_VALUE to check for error, this | ||
| 388 | * allows us to return 0 for single core systems without forcing | ||
| 389 | * callers to deal with SMP vs. NONSMP issues. | ||
| 390 | */ | ||
| 391 | static inline unsigned long xt_percpu_counter_alloc(void) | ||
| 392 | { | ||
| 393 | if (nr_cpu_ids > 1) { | ||
| 394 | void __percpu *res = __alloc_percpu(sizeof(struct xt_counters), | ||
| 395 | sizeof(struct xt_counters)); | ||
| 396 | |||
| 397 | if (res == NULL) | ||
| 398 | return -ENOMEM; | ||
| 399 | |||
| 400 | return (__force unsigned long) res; | ||
| 401 | } | ||
| 402 | |||
| 403 | return 0; | ||
| 404 | } | ||
| 405 | static inline void xt_percpu_counter_free(u64 pcnt) | ||
| 406 | { | ||
| 407 | if (nr_cpu_ids > 1) | ||
| 408 | free_percpu((void __percpu *) (unsigned long) pcnt); | ||
| 409 | } | ||
| 410 | 414 | ||
| 411 | static inline struct xt_counters * | 415 | static inline struct xt_counters * |
| 412 | xt_get_this_cpu_counter(struct xt_counters *cnt) | 416 | xt_get_this_cpu_counter(struct xt_counters *cnt) |
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h index 33e37fb41d5d..59476061de86 100644 --- a/include/linux/netfilter_ingress.h +++ b/include/linux/netfilter_ingress.h | |||
| @@ -19,6 +19,7 @@ static inline int nf_hook_ingress(struct sk_buff *skb) | |||
| 19 | { | 19 | { |
| 20 | struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress); | 20 | struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress); |
| 21 | struct nf_hook_state state; | 21 | struct nf_hook_state state; |
| 22 | int ret; | ||
| 22 | 23 | ||
| 23 | /* Must recheck the ingress hook head, in the event it became NULL | 24 | /* Must recheck the ingress hook head, in the event it became NULL |
| 24 | * after the check in nf_hook_ingress_active evaluated to true. | 25 | * after the check in nf_hook_ingress_active evaluated to true. |
| @@ -26,10 +27,14 @@ static inline int nf_hook_ingress(struct sk_buff *skb) | |||
| 26 | if (unlikely(!e)) | 27 | if (unlikely(!e)) |
| 27 | return 0; | 28 | return 0; |
| 28 | 29 | ||
| 29 | nf_hook_state_init(&state, e, NF_NETDEV_INGRESS, INT_MIN, | 30 | nf_hook_state_init(&state, NF_NETDEV_INGRESS, |
| 30 | NFPROTO_NETDEV, skb->dev, NULL, NULL, | 31 | NFPROTO_NETDEV, skb->dev, NULL, NULL, |
| 31 | dev_net(skb->dev), NULL); | 32 | dev_net(skb->dev), NULL); |
| 32 | return nf_hook_slow(skb, &state); | 33 | ret = nf_hook_slow(skb, &state, e); |
| 34 | if (ret == 0) | ||
| 35 | return -1; | ||
| 36 | |||
| 37 | return ret; | ||
| 33 | } | 38 | } |
| 34 | 39 | ||
| 35 | static inline void nf_hook_ingress_init(struct net_device *dev) | 40 | static inline void nf_hook_ingress_init(struct net_device *dev) |
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index b25ee9ffdbe6..1828900c9411 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h | |||
| @@ -78,8 +78,11 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) | |||
| 78 | struct net_device *dev = napi->dev; | 78 | struct net_device *dev = napi->dev; |
| 79 | 79 | ||
| 80 | if (dev && dev->npinfo) { | 80 | if (dev && dev->npinfo) { |
| 81 | spin_lock(&napi->poll_lock); | 81 | int owner = smp_processor_id(); |
| 82 | napi->poll_owner = smp_processor_id(); | 82 | |
| 83 | while (cmpxchg(&napi->poll_owner, -1, owner) != -1) | ||
| 84 | cpu_relax(); | ||
| 85 | |||
| 83 | return napi; | 86 | return napi; |
| 84 | } | 87 | } |
| 85 | return NULL; | 88 | return NULL; |
| @@ -89,10 +92,8 @@ static inline void netpoll_poll_unlock(void *have) | |||
| 89 | { | 92 | { |
| 90 | struct napi_struct *napi = have; | 93 | struct napi_struct *napi = have; |
| 91 | 94 | ||
| 92 | if (napi) { | 95 | if (napi) |
| 93 | napi->poll_owner = -1; | 96 | smp_store_release(&napi->poll_owner, -1); |
| 94 | spin_unlock(&napi->poll_lock); | ||
| 95 | } | ||
| 96 | } | 97 | } |
| 97 | 98 | ||
| 98 | static inline bool netpoll_tx_running(struct net_device *dev) | 99 | static inline bool netpoll_tx_running(struct net_device *dev) |
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 9094faf0699d..bca536341d1a 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h | |||
| @@ -440,6 +440,7 @@ enum lock_type4 { | |||
| 440 | #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) | 440 | #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) |
| 441 | #define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13) | 441 | #define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13) |
| 442 | #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) | 442 | #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) |
| 443 | #define FATTR4_WORD2_MODE_UMASK (1UL << 17) | ||
| 443 | 444 | ||
| 444 | /* MDS threshold bitmap bits */ | 445 | /* MDS threshold bitmap bits */ |
| 445 | #define THRESHOLD_RD (1UL << 0) | 446 | #define THRESHOLD_RD (1UL << 0) |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 810124b33327..f1da8c8dd473 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
| @@ -55,22 +55,18 @@ struct nfs_access_entry { | |||
| 55 | struct rcu_head rcu_head; | 55 | struct rcu_head rcu_head; |
| 56 | }; | 56 | }; |
| 57 | 57 | ||
| 58 | struct nfs_lockowner { | ||
| 59 | fl_owner_t l_owner; | ||
| 60 | pid_t l_pid; | ||
| 61 | }; | ||
| 62 | |||
| 63 | struct nfs_lock_context { | 58 | struct nfs_lock_context { |
| 64 | atomic_t count; | 59 | atomic_t count; |
| 65 | struct list_head list; | 60 | struct list_head list; |
| 66 | struct nfs_open_context *open_context; | 61 | struct nfs_open_context *open_context; |
| 67 | struct nfs_lockowner lockowner; | 62 | fl_owner_t lockowner; |
| 68 | atomic_t io_count; | 63 | atomic_t io_count; |
| 69 | }; | 64 | }; |
| 70 | 65 | ||
| 71 | struct nfs4_state; | 66 | struct nfs4_state; |
| 72 | struct nfs_open_context { | 67 | struct nfs_open_context { |
| 73 | struct nfs_lock_context lock_context; | 68 | struct nfs_lock_context lock_context; |
| 69 | fl_owner_t flock_owner; | ||
| 74 | struct dentry *dentry; | 70 | struct dentry *dentry; |
| 75 | struct rpc_cred *cred; | 71 | struct rpc_cred *cred; |
| 76 | struct nfs4_state *state; | 72 | struct nfs4_state *state; |
| @@ -344,11 +340,10 @@ extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); | |||
| 344 | extern void nfs_access_set_mask(struct nfs_access_entry *, u32); | 340 | extern void nfs_access_set_mask(struct nfs_access_entry *, u32); |
| 345 | extern int nfs_permission(struct inode *, int); | 341 | extern int nfs_permission(struct inode *, int); |
| 346 | extern int nfs_open(struct inode *, struct file *); | 342 | extern int nfs_open(struct inode *, struct file *); |
| 347 | extern int nfs_attribute_timeout(struct inode *inode); | ||
| 348 | extern int nfs_attribute_cache_expired(struct inode *inode); | 343 | extern int nfs_attribute_cache_expired(struct inode *inode); |
| 349 | extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); | 344 | extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); |
| 350 | extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode); | ||
| 351 | extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); | 345 | extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); |
| 346 | extern bool nfs_mapping_need_revalidate_inode(struct inode *inode); | ||
| 352 | extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); | 347 | extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); |
| 353 | extern int nfs_revalidate_mapping_rcu(struct inode *inode); | 348 | extern int nfs_revalidate_mapping_rcu(struct inode *inode); |
| 354 | extern int nfs_setattr(struct dentry *, struct iattr *); | 349 | extern int nfs_setattr(struct dentry *, struct iattr *); |
| @@ -358,7 +353,7 @@ extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, | |||
| 358 | extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); | 353 | extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); |
| 359 | extern void put_nfs_open_context(struct nfs_open_context *ctx); | 354 | extern void put_nfs_open_context(struct nfs_open_context *ctx); |
| 360 | extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); | 355 | extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); |
| 361 | extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode); | 356 | extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode, struct file *filp); |
| 362 | extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx); | 357 | extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx); |
| 363 | extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); | 358 | extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); |
| 364 | extern void nfs_file_clear_open_context(struct file *flip); | 359 | extern void nfs_file_clear_open_context(struct file *flip); |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index beb1e10f446e..348f7c158084 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
| @@ -216,6 +216,20 @@ struct nfs4_get_lease_time_res { | |||
| 216 | struct nfs_fsinfo *lr_fsinfo; | 216 | struct nfs_fsinfo *lr_fsinfo; |
| 217 | }; | 217 | }; |
| 218 | 218 | ||
| 219 | struct xdr_stream; | ||
| 220 | struct nfs4_xdr_opaque_data; | ||
| 221 | |||
| 222 | struct nfs4_xdr_opaque_ops { | ||
| 223 | void (*encode)(struct xdr_stream *, const void *args, | ||
| 224 | const struct nfs4_xdr_opaque_data *); | ||
| 225 | void (*free)(struct nfs4_xdr_opaque_data *); | ||
| 226 | }; | ||
| 227 | |||
| 228 | struct nfs4_xdr_opaque_data { | ||
| 229 | const struct nfs4_xdr_opaque_ops *ops; | ||
| 230 | void *data; | ||
| 231 | }; | ||
| 232 | |||
| 219 | #define PNFS_LAYOUT_MAXSIZE 4096 | 233 | #define PNFS_LAYOUT_MAXSIZE 4096 |
| 220 | 234 | ||
| 221 | struct nfs4_layoutdriver_data { | 235 | struct nfs4_layoutdriver_data { |
| @@ -306,6 +320,7 @@ struct nfs4_layoutreturn_args { | |||
| 306 | struct pnfs_layout_range range; | 320 | struct pnfs_layout_range range; |
| 307 | nfs4_stateid stateid; | 321 | nfs4_stateid stateid; |
| 308 | __u32 layout_type; | 322 | __u32 layout_type; |
| 323 | struct nfs4_xdr_opaque_data *ld_private; | ||
| 309 | }; | 324 | }; |
| 310 | 325 | ||
| 311 | struct nfs4_layoutreturn_res { | 326 | struct nfs4_layoutreturn_res { |
| @@ -321,6 +336,7 @@ struct nfs4_layoutreturn { | |||
| 321 | struct nfs_client *clp; | 336 | struct nfs_client *clp; |
| 322 | struct inode *inode; | 337 | struct inode *inode; |
| 323 | int rpc_status; | 338 | int rpc_status; |
| 339 | struct nfs4_xdr_opaque_data ld_private; | ||
| 324 | }; | 340 | }; |
| 325 | 341 | ||
| 326 | #define PNFS_LAYOUTSTATS_MAXSIZE 256 | 342 | #define PNFS_LAYOUTSTATS_MAXSIZE 256 |
| @@ -341,8 +357,7 @@ struct nfs42_layoutstat_devinfo { | |||
| 341 | __u64 write_count; | 357 | __u64 write_count; |
| 342 | __u64 write_bytes; | 358 | __u64 write_bytes; |
| 343 | __u32 layout_type; | 359 | __u32 layout_type; |
| 344 | layoutstats_encode_t layoutstats_encode; | 360 | struct nfs4_xdr_opaque_data ld_private; |
| 345 | void *layout_private; | ||
| 346 | }; | 361 | }; |
| 347 | 362 | ||
| 348 | struct nfs42_layoutstat_args { | 363 | struct nfs42_layoutstat_args { |
| @@ -418,6 +433,7 @@ struct nfs_openargs { | |||
| 418 | enum open_claim_type4 claim; | 433 | enum open_claim_type4 claim; |
| 419 | enum createmode4 createmode; | 434 | enum createmode4 createmode; |
| 420 | const struct nfs4_label *label; | 435 | const struct nfs4_label *label; |
| 436 | umode_t umask; | ||
| 421 | }; | 437 | }; |
| 422 | 438 | ||
| 423 | struct nfs_openres { | 439 | struct nfs_openres { |
| @@ -469,6 +485,7 @@ struct nfs_closeargs { | |||
| 469 | fmode_t fmode; | 485 | fmode_t fmode; |
| 470 | u32 share_access; | 486 | u32 share_access; |
| 471 | const u32 * bitmask; | 487 | const u32 * bitmask; |
| 488 | struct nfs4_layoutreturn_args *lr_args; | ||
| 472 | }; | 489 | }; |
| 473 | 490 | ||
| 474 | struct nfs_closeres { | 491 | struct nfs_closeres { |
| @@ -477,6 +494,8 @@ struct nfs_closeres { | |||
| 477 | struct nfs_fattr * fattr; | 494 | struct nfs_fattr * fattr; |
| 478 | struct nfs_seqid * seqid; | 495 | struct nfs_seqid * seqid; |
| 479 | const struct nfs_server *server; | 496 | const struct nfs_server *server; |
| 497 | struct nfs4_layoutreturn_res *lr_res; | ||
| 498 | int lr_ret; | ||
| 480 | }; | 499 | }; |
| 481 | /* | 500 | /* |
| 482 | * * Arguments to the lock,lockt, and locku call. | 501 | * * Arguments to the lock,lockt, and locku call. |
| @@ -549,12 +568,15 @@ struct nfs4_delegreturnargs { | |||
| 549 | const struct nfs_fh *fhandle; | 568 | const struct nfs_fh *fhandle; |
| 550 | const nfs4_stateid *stateid; | 569 | const nfs4_stateid *stateid; |
| 551 | const u32 * bitmask; | 570 | const u32 * bitmask; |
| 571 | struct nfs4_layoutreturn_args *lr_args; | ||
| 552 | }; | 572 | }; |
| 553 | 573 | ||
| 554 | struct nfs4_delegreturnres { | 574 | struct nfs4_delegreturnres { |
| 555 | struct nfs4_sequence_res seq_res; | 575 | struct nfs4_sequence_res seq_res; |
| 556 | struct nfs_fattr * fattr; | 576 | struct nfs_fattr * fattr; |
| 557 | struct nfs_server *server; | 577 | struct nfs_server *server; |
| 578 | struct nfs4_layoutreturn_res *lr_res; | ||
| 579 | int lr_ret; | ||
| 558 | }; | 580 | }; |
| 559 | 581 | ||
| 560 | /* | 582 | /* |
| @@ -937,6 +959,7 @@ struct nfs4_create_arg { | |||
| 937 | const struct nfs_fh * dir_fh; | 959 | const struct nfs_fh * dir_fh; |
| 938 | const u32 * bitmask; | 960 | const u32 * bitmask; |
| 939 | const struct nfs4_label *label; | 961 | const struct nfs4_label *label; |
| 962 | umode_t umask; | ||
| 940 | }; | 963 | }; |
| 941 | 964 | ||
| 942 | struct nfs4_create_res { | 965 | struct nfs4_create_res { |
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index a78c35cff1ae..aacca824a6ae 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h | |||
| @@ -7,6 +7,23 @@ | |||
| 7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
| 8 | #include <asm/irq.h> | 8 | #include <asm/irq.h> |
| 9 | 9 | ||
| 10 | /* | ||
| 11 | * The run state of the lockup detectors is controlled by the content of the | ||
| 12 | * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - | ||
| 13 | * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. | ||
| 14 | * | ||
| 15 | * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' | ||
| 16 | * are variables that are only used as an 'interface' between the parameters | ||
| 17 | * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The | ||
| 18 | * 'watchdog_thresh' variable is handled differently because its value is not | ||
| 19 | * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' | ||
| 20 | * is equal zero. | ||
| 21 | */ | ||
| 22 | #define NMI_WATCHDOG_ENABLED_BIT 0 | ||
| 23 | #define SOFT_WATCHDOG_ENABLED_BIT 1 | ||
| 24 | #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) | ||
| 25 | #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) | ||
| 26 | |||
| 10 | /** | 27 | /** |
| 11 | * touch_nmi_watchdog - restart NMI watchdog timeout. | 28 | * touch_nmi_watchdog - restart NMI watchdog timeout. |
| 12 | * | 29 | * |
| @@ -91,9 +108,16 @@ extern int nmi_watchdog_enabled; | |||
| 91 | extern int soft_watchdog_enabled; | 108 | extern int soft_watchdog_enabled; |
| 92 | extern int watchdog_user_enabled; | 109 | extern int watchdog_user_enabled; |
| 93 | extern int watchdog_thresh; | 110 | extern int watchdog_thresh; |
| 111 | extern unsigned long watchdog_enabled; | ||
| 94 | extern unsigned long *watchdog_cpumask_bits; | 112 | extern unsigned long *watchdog_cpumask_bits; |
| 113 | #ifdef CONFIG_SMP | ||
| 95 | extern int sysctl_softlockup_all_cpu_backtrace; | 114 | extern int sysctl_softlockup_all_cpu_backtrace; |
| 96 | extern int sysctl_hardlockup_all_cpu_backtrace; | 115 | extern int sysctl_hardlockup_all_cpu_backtrace; |
| 116 | #else | ||
| 117 | #define sysctl_softlockup_all_cpu_backtrace 0 | ||
| 118 | #define sysctl_hardlockup_all_cpu_backtrace 0 | ||
| 119 | #endif | ||
| 120 | extern bool is_hardlockup(void); | ||
| 97 | struct ctl_table; | 121 | struct ctl_table; |
| 98 | extern int proc_watchdog(struct ctl_table *, int , | 122 | extern int proc_watchdog(struct ctl_table *, int , |
| 99 | void __user *, size_t *, loff_t *); | 123 | void __user *, size_t *, loff_t *); |
diff --git a/include/linux/ntb.h b/include/linux/ntb.h index 6f47562d477b..de87ceac110e 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h | |||
| @@ -896,7 +896,7 @@ static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb) | |||
| 896 | } | 896 | } |
| 897 | 897 | ||
| 898 | /** | 898 | /** |
| 899 | * ntb_mw_count() - get the number of scratchpads | 899 | * ntb_spad_count() - get the number of scratchpads |
| 900 | * @ntb: NTB device context. | 900 | * @ntb: NTB device context. |
| 901 | * | 901 | * |
| 902 | * Hardware and topology may support a different number of scratchpads. | 902 | * Hardware and topology may support a different number of scratchpads. |
| @@ -968,6 +968,9 @@ static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx, | |||
| 968 | */ | 968 | */ |
| 969 | static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int idx) | 969 | static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int idx) |
| 970 | { | 970 | { |
| 971 | if (!ntb->ops->peer_spad_read) | ||
| 972 | return 0; | ||
| 973 | |||
| 971 | return ntb->ops->peer_spad_read(ntb, idx); | 974 | return ntb->ops->peer_spad_read(ntb, idx); |
| 972 | } | 975 | } |
| 973 | 976 | ||
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h new file mode 100644 index 000000000000..f21471f7ee40 --- /dev/null +++ b/include/linux/nvme-fc-driver.h | |||
| @@ -0,0 +1,851 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016, Avago Technologies | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms and conditions of the GNU General Public License, | ||
| 6 | * version 2, as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #ifndef _NVME_FC_DRIVER_H | ||
| 15 | #define _NVME_FC_DRIVER_H 1 | ||
| 16 | |||
| 17 | |||
| 18 | /* | ||
| 19 | * ********************** LLDD FC-NVME Host API ******************** | ||
| 20 | * | ||
| 21 | * For FC LLDD's that are the NVME Host role. | ||
| 22 | * | ||
| 23 | * ****************************************************************** | ||
| 24 | */ | ||
| 25 | |||
| 26 | |||
| 27 | |||
| 28 | /* FC Port role bitmask - can merge with FC Port Roles in fc transport */ | ||
| 29 | #define FC_PORT_ROLE_NVME_INITIATOR 0x10 | ||
| 30 | #define FC_PORT_ROLE_NVME_TARGET 0x11 | ||
| 31 | #define FC_PORT_ROLE_NVME_DISCOVERY 0x12 | ||
| 32 | |||
| 33 | |||
| 34 | /** | ||
| 35 | * struct nvme_fc_port_info - port-specific ids and FC connection-specific | ||
| 36 | * data element used during NVME Host role | ||
| 37 | * registrations | ||
| 38 | * | ||
| 39 | * Static fields describing the port being registered: | ||
| 40 | * @node_name: FC WWNN for the port | ||
| 41 | * @port_name: FC WWPN for the port | ||
| 42 | * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx) | ||
| 43 | * | ||
| 44 | * Initialization values for dynamic port fields: | ||
| 45 | * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must | ||
| 46 | * be set to 0. | ||
| 47 | */ | ||
| 48 | struct nvme_fc_port_info { | ||
| 49 | u64 node_name; | ||
| 50 | u64 port_name; | ||
| 51 | u32 port_role; | ||
| 52 | u32 port_id; | ||
| 53 | }; | ||
| 54 | |||
| 55 | |||
| 56 | /** | ||
| 57 | * struct nvmefc_ls_req - Request structure passed from NVME-FC transport | ||
| 58 | * to LLDD in order to perform a NVME FC-4 LS | ||
| 59 | * request and obtain a response. | ||
| 60 | * | ||
| 61 | * Values set by the NVME-FC layer prior to calling the LLDD ls_req | ||
| 62 | * entrypoint. | ||
| 63 | * @rqstaddr: pointer to request buffer | ||
| 64 | * @rqstdma: PCI DMA address of request buffer | ||
| 65 | * @rqstlen: Length, in bytes, of request buffer | ||
| 66 | * @rspaddr: pointer to response buffer | ||
| 67 | * @rspdma: PCI DMA address of response buffer | ||
| 68 | * @rsplen: Length, in bytes, of response buffer | ||
| 69 | * @timeout: Maximum amount of time, in seconds, to wait for the LS response. | ||
| 70 | * If timeout exceeded, LLDD to abort LS exchange and complete | ||
| 71 | * LS request with error status. | ||
| 72 | * @private: pointer to memory allocated alongside the ls request structure | ||
| 73 | * that is specifically for the LLDD to use while processing the | ||
| 74 | * request. The length of the buffer corresponds to the | ||
| 75 | * lsrqst_priv_sz value specified in the nvme_fc_port_template | ||
| 76 | * supplied by the LLDD. | ||
| 77 | * @done: The callback routine the LLDD is to invoke upon completion of | ||
| 78 | * the LS request. req argument is the pointer to the original LS | ||
| 79 | * request structure. Status argument must be 0 upon success, a | ||
| 80 | * negative errno on failure (example: -ENXIO). | ||
| 81 | */ | ||
| 82 | struct nvmefc_ls_req { | ||
| 83 | void *rqstaddr; | ||
| 84 | dma_addr_t rqstdma; | ||
| 85 | u32 rqstlen; | ||
| 86 | void *rspaddr; | ||
| 87 | dma_addr_t rspdma; | ||
| 88 | u32 rsplen; | ||
| 89 | u32 timeout; | ||
| 90 | |||
| 91 | void *private; | ||
| 92 | |||
| 93 | void (*done)(struct nvmefc_ls_req *req, int status); | ||
| 94 | |||
| 95 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ | ||
| 96 | |||
| 97 | |||
| 98 | enum nvmefc_fcp_datadir { | ||
| 99 | NVMEFC_FCP_NODATA, /* payload_length and sg_cnt will be zero */ | ||
| 100 | NVMEFC_FCP_WRITE, | ||
| 101 | NVMEFC_FCP_READ, | ||
| 102 | }; | ||
| 103 | |||
| 104 | |||
| 105 | #define NVME_FC_MAX_SEGMENTS 256 | ||
| 106 | |||
| 107 | /** | ||
| 108 | * struct nvmefc_fcp_req - Request structure passed from NVME-FC transport | ||
| 109 | * to LLDD in order to perform a NVME FCP IO operation. | ||
| 110 | * | ||
| 111 | * Values set by the NVME-FC layer prior to calling the LLDD fcp_io | ||
| 112 | * entrypoint. | ||
| 113 | * @cmdaddr: pointer to the FCP CMD IU buffer | ||
| 114 | * @rspaddr: pointer to the FCP RSP IU buffer | ||
| 115 | * @cmddma: PCI DMA address of the FCP CMD IU buffer | ||
| 116 | * @rspdma: PCI DMA address of the FCP RSP IU buffer | ||
| 117 | * @cmdlen: Length, in bytes, of the FCP CMD IU buffer | ||
| 118 | * @rsplen: Length, in bytes, of the FCP RSP IU buffer | ||
| 119 | * @payload_length: Length of DATA_IN or DATA_OUT payload data to transfer | ||
| 120 | * @sg_table: scatter/gather structure for payload data | ||
| 121 | * @first_sgl: memory for 1st scatter/gather list segment for payload data | ||
| 122 | * @sg_cnt: number of elements in the scatter/gather list | ||
| 123 | * @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx) | ||
| 124 | * @sqid: The nvme SQID the command is being issued on | ||
| 125 | * @done: The callback routine the LLDD is to invoke upon completion of | ||
| 126 | * the FCP operation. req argument is the pointer to the original | ||
| 127 | * FCP IO operation. | ||
| 128 | * @private: pointer to memory allocated alongside the FCP operation | ||
| 129 | * request structure that is specifically for the LLDD to use | ||
| 130 | * while processing the operation. The length of the buffer | ||
| 131 | * corresponds to the fcprqst_priv_sz value specified in the | ||
| 132 | * nvme_fc_port_template supplied by the LLDD. | ||
| 133 | * | ||
| 134 | * Values set by the LLDD indicating completion status of the FCP operation. | ||
| 135 | * Must be set prior to calling the done() callback. | ||
| 136 | * @transferred_length: amount of payload data, in bytes, that were | ||
| 137 | * transferred. Should equal payload_length on success. | ||
| 138 | * @rcv_rsplen: length, in bytes, of the FCP RSP IU received. | ||
| 139 | * @status: Completion status of the FCP operation. must be 0 upon success, | ||
| 140 | * NVME_SC_FC_xxx value upon failure. Note: this is NOT a | ||
| 141 | * reflection of the NVME CQE completion status. Only the status | ||
| 142 | * of the FCP operation at the NVME-FC level. | ||
| 143 | */ | ||
| 144 | struct nvmefc_fcp_req { | ||
| 145 | void *cmdaddr; | ||
| 146 | void *rspaddr; | ||
| 147 | dma_addr_t cmddma; | ||
| 148 | dma_addr_t rspdma; | ||
| 149 | u16 cmdlen; | ||
| 150 | u16 rsplen; | ||
| 151 | |||
| 152 | u32 payload_length; | ||
| 153 | struct sg_table sg_table; | ||
| 154 | struct scatterlist *first_sgl; | ||
| 155 | int sg_cnt; | ||
| 156 | enum nvmefc_fcp_datadir io_dir; | ||
| 157 | |||
| 158 | __le16 sqid; | ||
| 159 | |||
| 160 | void (*done)(struct nvmefc_fcp_req *req); | ||
| 161 | |||
| 162 | void *private; | ||
| 163 | |||
| 164 | u32 transferred_length; | ||
| 165 | u16 rcv_rsplen; | ||
| 166 | u32 status; | ||
| 167 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ | ||
| 168 | |||
| 169 | |||
| 170 | /* | ||
| 171 | * Direct copy of fc_port_state enum. For later merging | ||
| 172 | */ | ||
| 173 | enum nvme_fc_obj_state { | ||
| 174 | FC_OBJSTATE_UNKNOWN, | ||
| 175 | FC_OBJSTATE_NOTPRESENT, | ||
| 176 | FC_OBJSTATE_ONLINE, | ||
| 177 | FC_OBJSTATE_OFFLINE, /* User has taken Port Offline */ | ||
| 178 | FC_OBJSTATE_BLOCKED, | ||
| 179 | FC_OBJSTATE_BYPASSED, | ||
| 180 | FC_OBJSTATE_DIAGNOSTICS, | ||
| 181 | FC_OBJSTATE_LINKDOWN, | ||
| 182 | FC_OBJSTATE_ERROR, | ||
| 183 | FC_OBJSTATE_LOOPBACK, | ||
| 184 | FC_OBJSTATE_DELETED, | ||
| 185 | }; | ||
| 186 | |||
| 187 | |||
| 188 | /** | ||
| 189 | * struct nvme_fc_local_port - structure used between NVME-FC transport and | ||
| 190 | * a LLDD to reference a local NVME host port. | ||
| 191 | * Allocated/created by the nvme_fc_register_localport() | ||
| 192 | * transport interface. | ||
| 193 | * | ||
| 194 | * Fields with static values for the port. Initialized by the | ||
| 195 | * port_info struct supplied to the registration call. | ||
| 196 | * @port_num: NVME-FC transport host port number | ||
| 197 | * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx) | ||
| 198 | * @node_name: FC WWNN for the port | ||
| 199 | * @port_name: FC WWPN for the port | ||
| 200 | * @private: pointer to memory allocated alongside the local port | ||
| 201 | * structure that is specifically for the LLDD to use. | ||
| 202 | * The length of the buffer corresponds to the local_priv_sz | ||
| 203 | * value specified in the nvme_fc_port_template supplied by | ||
| 204 | * the LLDD. | ||
| 205 | * | ||
| 206 | * Fields with dynamic values. Values may change base on link state. LLDD | ||
| 207 | * may reference fields directly to change them. Initialized by the | ||
| 208 | * port_info struct supplied to the registration call. | ||
| 209 | * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must | ||
| 210 | * be set to 0. | ||
| 211 | * @port_state: Operational state of the port. | ||
| 212 | */ | ||
| 213 | struct nvme_fc_local_port { | ||
| 214 | /* static/read-only fields */ | ||
| 215 | u32 port_num; | ||
| 216 | u32 port_role; | ||
| 217 | u64 node_name; | ||
| 218 | u64 port_name; | ||
| 219 | |||
| 220 | void *private; | ||
| 221 | |||
| 222 | /* dynamic fields */ | ||
| 223 | u32 port_id; | ||
| 224 | enum nvme_fc_obj_state port_state; | ||
| 225 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ | ||
| 226 | |||
| 227 | |||
| 228 | /** | ||
| 229 | * struct nvme_fc_remote_port - structure used between NVME-FC transport and | ||
| 230 | * a LLDD to reference a remote NVME subsystem port. | ||
| 231 | * Allocated/created by the nvme_fc_register_remoteport() | ||
| 232 | * transport interface. | ||
| 233 | * | ||
| 234 | * Fields with static values for the port. Initialized by the | ||
| 235 | * port_info struct supplied to the registration call. | ||
| 236 | * @port_num: NVME-FC transport remote subsystem port number | ||
| 237 | * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx) | ||
| 238 | * @node_name: FC WWNN for the port | ||
| 239 | * @port_name: FC WWPN for the port | ||
| 240 | * @localport: pointer to the NVME-FC local host port the subsystem is | ||
| 241 | * connected to. | ||
| 242 | * @private: pointer to memory allocated alongside the remote port | ||
| 243 | * structure that is specifically for the LLDD to use. | ||
| 244 | * The length of the buffer corresponds to the remote_priv_sz | ||
| 245 | * value specified in the nvme_fc_port_template supplied by | ||
| 246 | * the LLDD. | ||
| 247 | * | ||
| 248 | * Fields with dynamic values. Values may change base on link or login | ||
| 249 | * state. LLDD may reference fields directly to change them. Initialized by | ||
| 250 | * the port_info struct supplied to the registration call. | ||
| 251 | * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must | ||
| 252 | * be set to 0. | ||
| 253 | * @port_state: Operational state of the remote port. Valid values are | ||
| 254 | * ONLINE or UNKNOWN. | ||
| 255 | */ | ||
| 256 | struct nvme_fc_remote_port { | ||
| 257 | /* static fields */ | ||
| 258 | u32 port_num; | ||
| 259 | u32 port_role; | ||
| 260 | u64 node_name; | ||
| 261 | u64 port_name; | ||
| 262 | |||
| 263 | struct nvme_fc_local_port *localport; | ||
| 264 | |||
| 265 | void *private; | ||
| 266 | |||
| 267 | /* dynamic fields */ | ||
| 268 | u32 port_id; | ||
| 269 | enum nvme_fc_obj_state port_state; | ||
| 270 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ | ||
| 271 | |||
| 272 | |||
| 273 | /** | ||
| 274 | * struct nvme_fc_port_template - structure containing static entrypoints and | ||
| 275 | * operational parameters for an LLDD that supports NVME host | ||
| 276 | * behavior. Passed by reference in port registrations. | ||
| 277 | * NVME-FC transport remembers template reference and may | ||
| 278 | * access it during runtime operation. | ||
| 279 | * | ||
| 280 | * Host/Initiator Transport Entrypoints/Parameters: | ||
| 281 | * | ||
| 282 | * @localport_delete: The LLDD initiates deletion of a localport via | ||
| 283 | * nvme_fc_deregister_localport(). However, the teardown is | ||
| 284 | * asynchronous. This routine is called upon the completion of the | ||
| 285 | * teardown to inform the LLDD that the localport has been deleted. | ||
| 286 | * Entrypoint is Mandatory. | ||
| 287 | * | ||
| 288 | * @remoteport_delete: The LLDD initiates deletion of a remoteport via | ||
| 289 | * nvme_fc_deregister_remoteport(). However, the teardown is | ||
| 290 | * asynchronous. This routine is called upon the completion of the | ||
| 291 | * teardown to inform the LLDD that the remoteport has been deleted. | ||
| 292 | * Entrypoint is Mandatory. | ||
| 293 | * | ||
| 294 | * @create_queue: Upon creating a host<->controller association, queues are | ||
| 295 | * created such that they can be affinitized to cpus/cores. This | ||
| 296 | * callback into the LLDD to notify that a controller queue is being | ||
| 297 | * created. The LLDD may choose to allocate an associated hw queue | ||
| 298 | * or map it onto a shared hw queue. Upon return from the call, the | ||
| 299 | * LLDD specifies a handle that will be given back to it for any | ||
| 300 | * command that is posted to the controller queue. The handle can | ||
| 301 | * be used by the LLDD to map quickly to the proper hw queue for | ||
| 302 | * command execution. The mask of cpu's that will map to this queue | ||
| 303 | * at the block-level is also passed in. The LLDD should use the | ||
| 304 | * queue id and/or cpu masks to ensure proper affinitization of the | ||
| 305 | * controller queue to the hw queue. | ||
| 306 | * Entrypoint is Optional. | ||
| 307 | * | ||
| 308 | * @delete_queue: This is the inverse of the crete_queue. During | ||
| 309 | * host<->controller association teardown, this routine is called | ||
| 310 | * when a controller queue is being terminated. Any association with | ||
| 311 | * a hw queue should be termined. If there is a unique hw queue, the | ||
| 312 | * hw queue should be torn down. | ||
| 313 | * Entrypoint is Optional. | ||
| 314 | * | ||
| 315 | * @poll_queue: Called to poll for the completion of an io on a blk queue. | ||
| 316 | * Entrypoint is Optional. | ||
| 317 | * | ||
| 318 | * @ls_req: Called to issue a FC-NVME FC-4 LS service request. | ||
| 319 | * The nvme_fc_ls_req structure will fully describe the buffers for | ||
| 320 | * the request payload and where to place the response payload. The | ||
| 321 | * LLDD is to allocate an exchange, issue the LS request, obtain the | ||
| 322 | * LS response, and call the "done" routine specified in the request | ||
| 323 | * structure (argument to done is the ls request structure itself). | ||
| 324 | * Entrypoint is Mandatory. | ||
| 325 | * | ||
| 326 | * @fcp_io: called to issue a FC-NVME I/O request. The I/O may be for | ||
| 327 | * an admin queue or an i/o queue. The nvmefc_fcp_req structure will | ||
| 328 | * fully describe the io: the buffer containing the FC-NVME CMD IU | ||
| 329 | * (which contains the SQE), the sg list for the payload if applicable, | ||
| 330 | * and the buffer to place the FC-NVME RSP IU into. The LLDD will | ||
| 331 | * complete the i/o, indicating the amount of data transferred or | ||
| 332 | * any transport error, and call the "done" routine specified in the | ||
| 333 | * request structure (argument to done is the fcp request structure | ||
| 334 | * itself). | ||
| 335 | * Entrypoint is Mandatory. | ||
| 336 | * | ||
| 337 | * @ls_abort: called to request the LLDD to abort the indicated ls request. | ||
| 338 | * The call may return before the abort has completed. After aborting | ||
| 339 | * the request, the LLDD must still call the ls request done routine | ||
| 340 | * indicating an FC transport Aborted status. | ||
| 341 | * Entrypoint is Mandatory. | ||
| 342 | * | ||
| 343 | * @fcp_abort: called to request the LLDD to abort the indicated fcp request. | ||
| 344 | * The call may return before the abort has completed. After aborting | ||
| 345 | * the request, the LLDD must still call the fcp request done routine | ||
| 346 | * indicating an FC transport Aborted status. | ||
| 347 | * Entrypoint is Mandatory. | ||
| 348 | * | ||
| 349 | * @max_hw_queues: indicates the maximum number of hw queues the LLDD | ||
| 350 | * supports for cpu affinitization. | ||
| 351 | * Value is Mandatory. Must be at least 1. | ||
| 352 | * | ||
| 353 | * @max_sgl_segments: indicates the maximum number of sgl segments supported | ||
| 354 | * by the LLDD | ||
| 355 | * Value is Mandatory. Must be at least 1. Recommend at least 256. | ||
| 356 | * | ||
| 357 | * @max_dif_sgl_segments: indicates the maximum number of sgl segments | ||
| 358 | * supported by the LLDD for DIF operations. | ||
| 359 | * Value is Mandatory. Must be at least 1. Recommend at least 256. | ||
| 360 | * | ||
| 361 | * @dma_boundary: indicates the dma address boundary where dma mappings | ||
| 362 | * will be split across. | ||
| 363 | * Value is Mandatory. Typical value is 0xFFFFFFFF to split across | ||
| 364 | * 4Gig address boundarys | ||
| 365 | * | ||
| 366 | * @local_priv_sz: The LLDD sets this field to the amount of additional | ||
| 367 | * memory that it would like fc nvme layer to allocate on the LLDD's | ||
| 368 | * behalf whenever a localport is allocated. The additional memory | ||
| 369 | * area solely for the of the LLDD and its location is specified by | ||
| 370 | * the localport->private pointer. | ||
| 371 | * Value is Mandatory. Allowed to be zero. | ||
| 372 | * | ||
| 373 | * @remote_priv_sz: The LLDD sets this field to the amount of additional | ||
| 374 | * memory that it would like fc nvme layer to allocate on the LLDD's | ||
| 375 | * behalf whenever a remoteport is allocated. The additional memory | ||
| 376 | * area solely for the of the LLDD and its location is specified by | ||
| 377 | * the remoteport->private pointer. | ||
| 378 | * Value is Mandatory. Allowed to be zero. | ||
| 379 | * | ||
| 380 | * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional | ||
| 381 | * memory that it would like fc nvme layer to allocate on the LLDD's | ||
| 382 | * behalf whenever a ls request structure is allocated. The additional | ||
| 383 | * memory area solely for the of the LLDD and its location is | ||
| 384 | * specified by the ls_request->private pointer. | ||
| 385 | * Value is Mandatory. Allowed to be zero. | ||
| 386 | * | ||
| 387 | * @fcprqst_priv_sz: The LLDD sets this field to the amount of additional | ||
| 388 | * memory that it would like fc nvme layer to allocate on the LLDD's | ||
| 389 | * behalf whenever a fcp request structure is allocated. The additional | ||
| 390 | * memory area solely for the of the LLDD and its location is | ||
| 391 | * specified by the fcp_request->private pointer. | ||
| 392 | * Value is Mandatory. Allowed to be zero. | ||
| 393 | */ | ||
| 394 | struct nvme_fc_port_template { | ||
| 395 | /* initiator-based functions */ | ||
| 396 | void (*localport_delete)(struct nvme_fc_local_port *); | ||
| 397 | void (*remoteport_delete)(struct nvme_fc_remote_port *); | ||
| 398 | int (*create_queue)(struct nvme_fc_local_port *, | ||
| 399 | unsigned int qidx, u16 qsize, | ||
| 400 | void **handle); | ||
| 401 | void (*delete_queue)(struct nvme_fc_local_port *, | ||
| 402 | unsigned int qidx, void *handle); | ||
| 403 | void (*poll_queue)(struct nvme_fc_local_port *, void *handle); | ||
| 404 | int (*ls_req)(struct nvme_fc_local_port *, | ||
| 405 | struct nvme_fc_remote_port *, | ||
| 406 | struct nvmefc_ls_req *); | ||
| 407 | int (*fcp_io)(struct nvme_fc_local_port *, | ||
| 408 | struct nvme_fc_remote_port *, | ||
| 409 | void *hw_queue_handle, | ||
| 410 | struct nvmefc_fcp_req *); | ||
| 411 | void (*ls_abort)(struct nvme_fc_local_port *, | ||
| 412 | struct nvme_fc_remote_port *, | ||
| 413 | struct nvmefc_ls_req *); | ||
| 414 | void (*fcp_abort)(struct nvme_fc_local_port *, | ||
| 415 | struct nvme_fc_remote_port *, | ||
| 416 | void *hw_queue_handle, | ||
| 417 | struct nvmefc_fcp_req *); | ||
| 418 | |||
| 419 | u32 max_hw_queues; | ||
| 420 | u16 max_sgl_segments; | ||
| 421 | u16 max_dif_sgl_segments; | ||
| 422 | u64 dma_boundary; | ||
| 423 | |||
| 424 | /* sizes of additional private data for data structures */ | ||
| 425 | u32 local_priv_sz; | ||
| 426 | u32 remote_priv_sz; | ||
| 427 | u32 lsrqst_priv_sz; | ||
| 428 | u32 fcprqst_priv_sz; | ||
| 429 | }; | ||
| 430 | |||
| 431 | |||
| 432 | /* | ||
| 433 | * Initiator/Host functions | ||
| 434 | */ | ||
| 435 | |||
| 436 | int nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, | ||
| 437 | struct nvme_fc_port_template *template, | ||
| 438 | struct device *dev, | ||
| 439 | struct nvme_fc_local_port **lport_p); | ||
| 440 | |||
| 441 | int nvme_fc_unregister_localport(struct nvme_fc_local_port *localport); | ||
| 442 | |||
| 443 | int nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, | ||
| 444 | struct nvme_fc_port_info *pinfo, | ||
| 445 | struct nvme_fc_remote_port **rport_p); | ||
| 446 | |||
| 447 | int nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *remoteport); | ||
| 448 | |||
| 449 | |||
| 450 | |||
| 451 | /* | ||
| 452 | * *************** LLDD FC-NVME Target/Subsystem API *************** | ||
| 453 | * | ||
| 454 | * For FC LLDD's that are the NVME Subsystem role | ||
| 455 | * | ||
| 456 | * ****************************************************************** | ||
| 457 | */ | ||
| 458 | |||
| 459 | /** | ||
| 460 | * struct nvmet_fc_port_info - port-specific ids and FC connection-specific | ||
| 461 | * data element used during NVME Subsystem role | ||
| 462 | * registrations | ||
| 463 | * | ||
| 464 | * Static fields describing the port being registered: | ||
| 465 | * @node_name: FC WWNN for the port | ||
| 466 | * @port_name: FC WWPN for the port | ||
| 467 | * | ||
| 468 | * Initialization values for dynamic port fields: | ||
| 469 | * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must | ||
| 470 | * be set to 0. | ||
| 471 | */ | ||
| 472 | struct nvmet_fc_port_info { | ||
| 473 | u64 node_name; | ||
| 474 | u64 port_name; | ||
| 475 | u32 port_id; | ||
| 476 | }; | ||
| 477 | |||
| 478 | |||
| 479 | /** | ||
| 480 | * struct nvmefc_tgt_ls_req - Structure used between LLDD and NVMET-FC | ||
| 481 | * layer to represent the exchange context for | ||
| 482 | * a FC-NVME Link Service (LS). | ||
| 483 | * | ||
| 484 | * The structure is allocated by the LLDD whenever a LS Request is received | ||
| 485 | * from the FC link. The address of the structure is passed to the nvmet-fc | ||
| 486 | * layer via the nvmet_fc_rcv_ls_req() call. The address of the structure | ||
| 487 | * will be passed back to the LLDD when the response is to be transmit. | ||
| 488 | * The LLDD is to use the address to map back to the LLDD exchange structure | ||
| 489 | * which maintains information such as the targetport the LS was received | ||
| 490 | * on, the remote FC NVME initiator that sent the LS, and any FC exchange | ||
| 491 | * context. Upon completion of the LS response transmit, the address of the | ||
| 492 | * structure will be passed back to the LS rsp done() routine, allowing the | ||
| 493 | * nvmet-fc layer to release dma resources. Upon completion of the done() | ||
| 494 | * routine, no further access will be made by the nvmet-fc layer and the | ||
| 495 | * LLDD can de-allocate the structure. | ||
| 496 | * | ||
| 497 | * Field initialization: | ||
| 498 | * At the time of the nvmet_fc_rcv_ls_req() call, there is no content that | ||
| 499 | * is valid in the structure. | ||
| 500 | * | ||
| 501 | * When the structure is used for the LLDD->xmt_ls_rsp() call, the nvmet-fc | ||
| 502 | * layer will fully set the fields in order to specify the response | ||
| 503 | * payload buffer and its length as well as the done routine to be called | ||
| 504 | * upon compeletion of the transmit. The nvmet-fc layer will also set a | ||
| 505 | * private pointer for its own use in the done routine. | ||
| 506 | * | ||
| 507 | * Values set by the NVMET-FC layer prior to calling the LLDD xmt_ls_rsp | ||
| 508 | * entrypoint. | ||
| 509 | * @rspbuf: pointer to the LS response buffer | ||
| 510 | * @rspdma: PCI DMA address of the LS response buffer | ||
| 511 | * @rsplen: Length, in bytes, of the LS response buffer | ||
| 512 | * @done: The callback routine the LLDD is to invoke upon completion of | ||
| 513 | * transmitting the LS response. req argument is the pointer to | ||
| 514 | * the original ls request. | ||
| 515 | * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used | ||
| 516 | * as part of the NVMET-FC processing. The LLDD is not to access | ||
| 517 | * this pointer. | ||
| 518 | */ | ||
| 519 | struct nvmefc_tgt_ls_req { | ||
| 520 | void *rspbuf; | ||
| 521 | dma_addr_t rspdma; | ||
| 522 | u16 rsplen; | ||
| 523 | |||
| 524 | void (*done)(struct nvmefc_tgt_ls_req *req); | ||
| 525 | void *nvmet_fc_private; /* LLDD is not to access !! */ | ||
| 526 | }; | ||
| 527 | |||
| 528 | /* Operations that NVME-FC layer may request the LLDD to perform for FCP */ | ||
| 529 | enum { | ||
| 530 | NVMET_FCOP_READDATA = 1, /* xmt data to initiator */ | ||
| 531 | NVMET_FCOP_WRITEDATA = 2, /* xmt data from initiator */ | ||
| 532 | NVMET_FCOP_READDATA_RSP = 3, /* xmt data to initiator and send | ||
| 533 | * rsp as well | ||
| 534 | */ | ||
| 535 | NVMET_FCOP_RSP = 4, /* send rsp frame */ | ||
| 536 | NVMET_FCOP_ABORT = 5, /* abort exchange via ABTS */ | ||
| 537 | NVMET_FCOP_BA_ACC = 6, /* send BA_ACC */ | ||
| 538 | NVMET_FCOP_BA_RJT = 7, /* send BA_RJT */ | ||
| 539 | }; | ||
| 540 | |||
| 541 | /** | ||
| 542 | * struct nvmefc_tgt_fcp_req - Structure used between LLDD and NVMET-FC | ||
| 543 | * layer to represent the exchange context and | ||
| 544 | * the specific FC-NVME IU operation(s) to perform | ||
| 545 | * for a FC-NVME FCP IO. | ||
| 546 | * | ||
| 547 | * Structure used between LLDD and nvmet-fc layer to represent the exchange | ||
| 548 | * context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related | ||
| 549 | * memory transfers, and its assocated cqe transfer). | ||
| 550 | * | ||
| 551 | * The structure is allocated by the LLDD whenever a FCP CMD IU is received | ||
| 552 | * from the FC link. The address of the structure is passed to the nvmet-fc | ||
| 553 | * layer via the nvmet_fc_rcv_fcp_req() call. The address of the structure | ||
| 554 | * will be passed back to the LLDD for the data operations and transmit of | ||
| 555 | * the response. The LLDD is to use the address to map back to the LLDD | ||
| 556 | * exchange structure which maintains information such as the targetport | ||
| 557 | * the FCP I/O was received on, the remote FC NVME initiator that sent the | ||
| 558 | * FCP I/O, and any FC exchange context. Upon completion of the FCP target | ||
| 559 | * operation, the address of the structure will be passed back to the FCP | ||
| 560 | * op done() routine, allowing the nvmet-fc layer to release dma resources. | ||
| 561 | * Upon completion of the done() routine for either RSP or ABORT ops, no | ||
| 562 | * further access will be made by the nvmet-fc layer and the LLDD can | ||
| 563 | * de-allocate the structure. | ||
| 564 | * | ||
| 565 | * Field initialization: | ||
| 566 | * At the time of the nvmet_fc_rcv_fcp_req() call, there is no content that | ||
| 567 | * is valid in the structure. | ||
| 568 | * | ||
| 569 | * When the structure is used for an FCP target operation, the nvmet-fc | ||
| 570 | * layer will fully set the fields in order to specify the scattergather | ||
| 571 | * list, the transfer length, as well as the done routine to be called | ||
| 572 | * upon compeletion of the operation. The nvmet-fc layer will also set a | ||
| 573 | * private pointer for its own use in the done routine. | ||
| 574 | * | ||
| 575 | * Note: the LLDD must never fail a NVMET_FCOP_ABORT request !! | ||
| 576 | * | ||
| 577 | * Values set by the NVMET-FC layer prior to calling the LLDD fcp_op | ||
| 578 | * entrypoint. | ||
| 579 | * @op: Indicates the FCP IU operation to perform (see NVMET_FCOP_xxx) | ||
| 580 | * @hwqid: Specifies the hw queue index (0..N-1, where N is the | ||
| 581 | * max_hw_queues value from the LLD's nvmet_fc_target_template) | ||
| 582 | * that the operation is to use. | ||
| 583 | * @offset: Indicates the DATA_OUT/DATA_IN payload offset to be tranferred. | ||
| 584 | * Field is only valid on WRITEDATA, READDATA, or READDATA_RSP ops. | ||
| 585 | * @timeout: amount of time, in seconds, to wait for a response from the NVME | ||
| 586 | * host. A value of 0 is an infinite wait. | ||
| 587 | * Valid only for the following ops: | ||
| 588 | * WRITEDATA: caps the wait for data reception | ||
| 589 | * READDATA_RSP & RSP: caps wait for FCP_CONF reception (if used) | ||
| 590 | * @transfer_length: the length, in bytes, of the DATA_OUT or DATA_IN payload | ||
| 591 | * that is to be transferred. | ||
| 592 | * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops. | ||
| 593 | * @ba_rjt: Contains the BA_RJT payload that is to be transferred. | ||
| 594 | * Valid only for the NVMET_FCOP_BA_RJT op. | ||
| 595 | * @sg: Scatter/gather list for the DATA_OUT/DATA_IN payload data. | ||
| 596 | * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops. | ||
| 597 | * @sg_cnt: Number of valid entries in the scatter/gather list. | ||
| 598 | * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops. | ||
| 599 | * @rspaddr: pointer to the FCP RSP IU buffer to be transmit | ||
| 600 | * Used by RSP and READDATA_RSP ops | ||
| 601 | * @rspdma: PCI DMA address of the FCP RSP IU buffer | ||
| 602 | * Used by RSP and READDATA_RSP ops | ||
| 603 | * @rsplen: Length, in bytes, of the FCP RSP IU buffer | ||
| 604 | * Used by RSP and READDATA_RSP ops | ||
| 605 | * @done: The callback routine the LLDD is to invoke upon completion of | ||
| 606 | * the operation. req argument is the pointer to the original | ||
| 607 | * FCP subsystem op request. | ||
| 608 | * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used | ||
| 609 | * as part of the NVMET-FC processing. The LLDD is not to | ||
| 610 | * reference this field. | ||
| 611 | * | ||
| 612 | * Values set by the LLDD indicating completion status of the FCP operation. | ||
| 613 | * Must be set prior to calling the done() callback. | ||
| 614 | * @transferred_length: amount of DATA_OUT payload data received by a | ||
| 615 | * a WRITEDATA operation. If not a WRITEDATA operation, value must | ||
| 616 | * be set to 0. Should equal transfer_length on success. | ||
| 617 | * @fcp_error: status of the FCP operation. Must be 0 on success; on failure | ||
| 618 | * must be a NVME_SC_FC_xxxx value. | ||
| 619 | */ | ||
| 620 | struct nvmefc_tgt_fcp_req { | ||
| 621 | u8 op; | ||
| 622 | u16 hwqid; | ||
| 623 | u32 offset; | ||
| 624 | u32 timeout; | ||
| 625 | u32 transfer_length; | ||
| 626 | struct fc_ba_rjt ba_rjt; | ||
| 627 | struct scatterlist sg[NVME_FC_MAX_SEGMENTS]; | ||
| 628 | int sg_cnt; | ||
| 629 | void *rspaddr; | ||
| 630 | dma_addr_t rspdma; | ||
| 631 | u16 rsplen; | ||
| 632 | |||
| 633 | void (*done)(struct nvmefc_tgt_fcp_req *); | ||
| 634 | |||
| 635 | void *nvmet_fc_private; /* LLDD is not to access !! */ | ||
| 636 | |||
| 637 | u32 transferred_length; | ||
| 638 | int fcp_error; | ||
| 639 | }; | ||
| 640 | |||
| 641 | |||
| 642 | /* Target Features (Bit fields) LLDD supports */ | ||
| 643 | enum { | ||
| 644 | NVMET_FCTGTFEAT_READDATA_RSP = (1 << 0), | ||
| 645 | /* Bit 0: supports the NVMET_FCPOP_READDATA_RSP op, which | ||
| 646 | * sends (the last) Read Data sequence followed by the RSP | ||
| 647 | * sequence in one LLDD operation. Errors during Data | ||
| 648 | * sequence transmit must not allow RSP sequence to be sent. | ||
| 649 | */ | ||
| 650 | NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED = (1 << 1), | ||
| 651 | /* Bit 1: When 0, the LLDD will deliver FCP CMD | ||
| 652 | * on the CPU it should be affinitized to. Thus work will | ||
| 653 | * be scheduled on the cpu received on. When 1, the LLDD | ||
| 654 | * may not deliver the CMD on the CPU it should be worked | ||
| 655 | * on. The transport should pick a cpu to schedule the work | ||
| 656 | * on. | ||
| 657 | */ | ||
| 658 | }; | ||
| 659 | |||
| 660 | |||
| 661 | /** | ||
| 662 | * struct nvmet_fc_target_port - structure used between NVME-FC transport and | ||
| 663 | * a LLDD to reference a local NVME subsystem port. | ||
| 664 | * Allocated/created by the nvme_fc_register_targetport() | ||
| 665 | * transport interface. | ||
| 666 | * | ||
| 667 | * Fields with static values for the port. Initialized by the | ||
| 668 | * port_info struct supplied to the registration call. | ||
| 669 | * @port_num: NVME-FC transport subsytem port number | ||
| 670 | * @node_name: FC WWNN for the port | ||
| 671 | * @port_name: FC WWPN for the port | ||
| 672 | * @private: pointer to memory allocated alongside the local port | ||
| 673 | * structure that is specifically for the LLDD to use. | ||
| 674 | * The length of the buffer corresponds to the target_priv_sz | ||
| 675 | * value specified in the nvme_fc_target_template supplied by | ||
| 676 | * the LLDD. | ||
| 677 | * | ||
| 678 | * Fields with dynamic values. Values may change base on link state. LLDD | ||
| 679 | * may reference fields directly to change them. Initialized by the | ||
| 680 | * port_info struct supplied to the registration call. | ||
| 681 | * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must | ||
| 682 | * be set to 0. | ||
| 683 | * @port_state: Operational state of the port. | ||
| 684 | */ | ||
| 685 | struct nvmet_fc_target_port { | ||
| 686 | /* static/read-only fields */ | ||
| 687 | u32 port_num; | ||
| 688 | u64 node_name; | ||
| 689 | u64 port_name; | ||
| 690 | |||
| 691 | void *private; | ||
| 692 | |||
| 693 | /* dynamic fields */ | ||
| 694 | u32 port_id; | ||
| 695 | enum nvme_fc_obj_state port_state; | ||
| 696 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ | ||
| 697 | |||
| 698 | |||
| 699 | /** | ||
| 700 | * struct nvmet_fc_target_template - structure containing static entrypoints | ||
| 701 | * and operational parameters for an LLDD that supports NVME | ||
| 702 | * subsystem behavior. Passed by reference in port | ||
| 703 | * registrations. NVME-FC transport remembers template | ||
| 704 | * reference and may access it during runtime operation. | ||
| 705 | * | ||
| 706 | * Subsystem/Target Transport Entrypoints/Parameters: | ||
| 707 | * | ||
| 708 | * @targetport_delete: The LLDD initiates deletion of a targetport via | ||
| 709 | * nvmet_fc_unregister_targetport(). However, the teardown is | ||
| 710 | * asynchronous. This routine is called upon the completion of the | ||
| 711 | * teardown to inform the LLDD that the targetport has been deleted. | ||
| 712 | * Entrypoint is Mandatory. | ||
| 713 | * | ||
| 714 | * @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service. | ||
| 715 | * The nvmefc_tgt_ls_req structure is the same LLDD-supplied exchange | ||
| 716 | * structure specified in the nvmet_fc_rcv_ls_req() call made when | ||
| 717 | * the LS request was received. The structure will fully describe | ||
| 718 | * the buffers for the response payload and the dma address of the | ||
| 719 | * payload. The LLDD is to transmit the response (or return a non-zero | ||
| 720 | * errno status), and upon completion of the transmit, call the | ||
| 721 | * "done" routine specified in the nvmefc_tgt_ls_req structure | ||
| 722 | * (argument to done is the ls reqwuest structure itself). | ||
| 723 | * After calling the done routine, the LLDD shall consider the | ||
| 724 | * LS handling complete and the nvmefc_tgt_ls_req structure may | ||
| 725 | * be freed/released. | ||
| 726 | * Entrypoint is Mandatory. | ||
| 727 | * | ||
| 728 | * @fcp_op: Called to perform a data transfer, transmit a response, or | ||
| 729 | * abort an FCP opertion. The nvmefc_tgt_fcp_req structure is the same | ||
| 730 | * LLDD-supplied exchange structure specified in the | ||
| 731 | * nvmet_fc_rcv_fcp_req() call made when the FCP CMD IU was received. | ||
| 732 | * The op field in the structure shall indicate the operation for | ||
| 733 | * the LLDD to perform relative to the io. | ||
| 734 | * NVMET_FCOP_READDATA operation: the LLDD is to send the | ||
| 735 | * payload data (described by sglist) to the host in 1 or | ||
| 736 | * more FC sequences (preferrably 1). Note: the fc-nvme layer | ||
| 737 | * may call the READDATA operation multiple times for longer | ||
| 738 | * payloads. | ||
| 739 | * NVMET_FCOP_WRITEDATA operation: the LLDD is to receive the | ||
| 740 | * payload data (described by sglist) from the host via 1 or | ||
| 741 | * more FC sequences (preferrably 1). The LLDD is to generate | ||
| 742 | * the XFER_RDY IU(s) corresponding to the data being requested. | ||
| 743 | * Note: the FC-NVME layer may call the WRITEDATA operation | ||
| 744 | * multiple times for longer payloads. | ||
| 745 | * NVMET_FCOP_READDATA_RSP operation: the LLDD is to send the | ||
| 746 | * payload data (described by sglist) to the host in 1 or | ||
| 747 | * more FC sequences (preferrably 1). If an error occurs during | ||
| 748 | * payload data transmission, the LLDD is to set the | ||
| 749 | * nvmefc_tgt_fcp_req fcp_error and transferred_length field, then | ||
| 750 | * consider the operation complete. On error, the LLDD is to not | ||
| 751 | * transmit the FCP_RSP iu. If all payload data is transferred | ||
| 752 | * successfully, the LLDD is to update the nvmefc_tgt_fcp_req | ||
| 753 | * transferred_length field and may subsequently transmit the | ||
| 754 | * FCP_RSP iu payload (described by rspbuf, rspdma, rsplen). | ||
| 755 | * The LLDD is to await FCP_CONF reception to confirm the RSP | ||
| 756 | * reception by the host. The LLDD may retramsit the FCP_RSP iu | ||
| 757 | * if necessary per FC-NVME. Upon reception of FCP_CONF, or upon | ||
| 758 | * FCP_CONF failure, the LLDD is to set the nvmefc_tgt_fcp_req | ||
| 759 | * fcp_error field and consider the operation complete.. | ||
| 760 | * NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload | ||
| 761 | * (described by rspbuf, rspdma, rsplen). The LLDD is to await | ||
| 762 | * FCP_CONF reception to confirm the RSP reception by the host. | ||
| 763 | * The LLDD may retramsit the FCP_RSP iu if necessary per FC-NVME. | ||
| 764 | * Upon reception of FCP_CONF, or upon FCP_CONF failure, the | ||
| 765 | * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and | ||
| 766 | * consider the operation complete.. | ||
| 767 | * NVMET_FCOP_ABORT: the LLDD is to terminate the exchange | ||
| 768 | * corresponding to the fcp operation. The LLDD shall send | ||
| 769 | * ABTS and follow FC exchange abort-multi rules, including | ||
| 770 | * ABTS retries and possible logout. | ||
| 771 | * Upon completing the indicated operation, the LLDD is to set the | ||
| 772 | * status fields for the operation (tranferred_length and fcp_error | ||
| 773 | * status) in the request, then all the "done" routine | ||
| 774 | * indicated in the fcp request. Upon return from the "done" | ||
| 775 | * routine for either a NVMET_FCOP_RSP or NVMET_FCOP_ABORT operation | ||
| 776 | * the fc-nvme layer will not longer reference the fcp request, | ||
| 777 | * allowing the LLDD to free/release the fcp request. | ||
| 778 | * Note: when calling the done routine for READDATA or WRITEDATA | ||
| 779 | * operations, the fc-nvme layer may immediate convert, in the same | ||
| 780 | * thread and before returning to the LLDD, the fcp operation to | ||
| 781 | * the next operation for the fcp io and call the LLDDs fcp_op | ||
| 782 | * call again. If fields in the fcp request are to be accessed post | ||
| 783 | * the done call, the LLDD should save their values prior to calling | ||
| 784 | * the done routine, and inspect the save values after the done | ||
| 785 | * routine. | ||
| 786 | * Returns 0 on success, -<errno> on failure (Ex: -EIO) | ||
| 787 | * Entrypoint is Mandatory. | ||
| 788 | * | ||
| 789 | * @max_hw_queues: indicates the maximum number of hw queues the LLDD | ||
| 790 | * supports for cpu affinitization. | ||
| 791 | * Value is Mandatory. Must be at least 1. | ||
| 792 | * | ||
| 793 | * @max_sgl_segments: indicates the maximum number of sgl segments supported | ||
| 794 | * by the LLDD | ||
| 795 | * Value is Mandatory. Must be at least 1. Recommend at least 256. | ||
| 796 | * | ||
| 797 | * @max_dif_sgl_segments: indicates the maximum number of sgl segments | ||
| 798 | * supported by the LLDD for DIF operations. | ||
| 799 | * Value is Mandatory. Must be at least 1. Recommend at least 256. | ||
| 800 | * | ||
| 801 | * @dma_boundary: indicates the dma address boundary where dma mappings | ||
| 802 | * will be split across. | ||
| 803 | * Value is Mandatory. Typical value is 0xFFFFFFFF to split across | ||
| 804 | * 4Gig address boundarys | ||
| 805 | * | ||
| 806 | * @target_features: The LLDD sets bits in this field to correspond to | ||
| 807 | * optional features that are supported by the LLDD. | ||
| 808 | * Refer to the NVMET_FCTGTFEAT_xxx values. | ||
| 809 | * Value is Mandatory. Allowed to be zero. | ||
| 810 | * | ||
| 811 | * @target_priv_sz: The LLDD sets this field to the amount of additional | ||
| 812 | * memory that it would like fc nvme layer to allocate on the LLDD's | ||
| 813 | * behalf whenever a targetport is allocated. The additional memory | ||
| 814 | * area solely for the of the LLDD and its location is specified by | ||
| 815 | * the targetport->private pointer. | ||
| 816 | * Value is Mandatory. Allowed to be zero. | ||
| 817 | */ | ||
| 818 | struct nvmet_fc_target_template { | ||
| 819 | void (*targetport_delete)(struct nvmet_fc_target_port *tgtport); | ||
| 820 | int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport, | ||
| 821 | struct nvmefc_tgt_ls_req *tls_req); | ||
| 822 | int (*fcp_op)(struct nvmet_fc_target_port *tgtport, | ||
| 823 | struct nvmefc_tgt_fcp_req *); | ||
| 824 | |||
| 825 | u32 max_hw_queues; | ||
| 826 | u16 max_sgl_segments; | ||
| 827 | u16 max_dif_sgl_segments; | ||
| 828 | u64 dma_boundary; | ||
| 829 | |||
| 830 | u32 target_features; | ||
| 831 | |||
| 832 | u32 target_priv_sz; | ||
| 833 | }; | ||
| 834 | |||
| 835 | |||
| 836 | int nvmet_fc_register_targetport(struct nvmet_fc_port_info *portinfo, | ||
| 837 | struct nvmet_fc_target_template *template, | ||
| 838 | struct device *dev, | ||
| 839 | struct nvmet_fc_target_port **tgtport_p); | ||
| 840 | |||
| 841 | int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *tgtport); | ||
| 842 | |||
| 843 | int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *tgtport, | ||
| 844 | struct nvmefc_tgt_ls_req *lsreq, | ||
| 845 | void *lsreqbuf, u32 lsreqbuf_len); | ||
| 846 | |||
| 847 | int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport, | ||
| 848 | struct nvmefc_tgt_fcp_req *fcpreq, | ||
| 849 | void *cmdiubuf, u32 cmdiubuf_len); | ||
| 850 | |||
| 851 | #endif /* _NVME_FC_DRIVER_H */ | ||
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h new file mode 100644 index 000000000000..4b45226bd604 --- /dev/null +++ b/include/linux/nvme-fc.h | |||
| @@ -0,0 +1,268 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016 Avago Technologies. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of version 2 of the GNU General Public License as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful. | ||
| 9 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, | ||
| 10 | * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A | ||
| 11 | * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO | ||
| 12 | * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. | ||
| 13 | * See the GNU General Public License for more details, a copy of which | ||
| 14 | * can be found in the file COPYING included with this package | ||
| 15 | * | ||
| 16 | */ | ||
| 17 | |||
| 18 | /* | ||
| 19 | * This file contains definitions relative to FC-NVME r1.11 and a few | ||
| 20 | * newer items | ||
| 21 | */ | ||
| 22 | |||
| 23 | #ifndef _NVME_FC_H | ||
| 24 | #define _NVME_FC_H 1 | ||
| 25 | |||
| 26 | |||
| 27 | #define NVME_CMD_SCSI_ID 0xFD | ||
| 28 | #define NVME_CMD_FC_ID FC_TYPE_NVME | ||
| 29 | |||
| 30 | /* FC-NVME Cmd IU Flags */ | ||
| 31 | #define FCNVME_CMD_FLAGS_DIRMASK 0x03 | ||
| 32 | #define FCNVME_CMD_FLAGS_WRITE 0x01 | ||
| 33 | #define FCNVME_CMD_FLAGS_READ 0x02 | ||
| 34 | |||
| 35 | struct nvme_fc_cmd_iu { | ||
| 36 | __u8 scsi_id; | ||
| 37 | __u8 fc_id; | ||
| 38 | __be16 iu_len; | ||
| 39 | __u8 rsvd4[3]; | ||
| 40 | __u8 flags; | ||
| 41 | __be64 connection_id; | ||
| 42 | __be32 csn; | ||
| 43 | __be32 data_len; | ||
| 44 | struct nvme_command sqe; | ||
| 45 | __be32 rsvd88[2]; | ||
| 46 | }; | ||
| 47 | |||
| 48 | #define NVME_FC_SIZEOF_ZEROS_RSP 12 | ||
| 49 | |||
| 50 | struct nvme_fc_ersp_iu { | ||
| 51 | __u8 rsvd0[2]; | ||
| 52 | __be16 iu_len; | ||
| 53 | __be32 rsn; | ||
| 54 | __be32 xfrd_len; | ||
| 55 | __be32 rsvd12; | ||
| 56 | struct nvme_completion cqe; | ||
| 57 | /* for now - no additional payload */ | ||
| 58 | }; | ||
| 59 | |||
| 60 | |||
| 61 | /* FC-NVME r1.03/16-119v0 NVME Link Services */ | ||
| 62 | enum { | ||
| 63 | FCNVME_LS_RSVD = 0, | ||
| 64 | FCNVME_LS_RJT = 1, | ||
| 65 | FCNVME_LS_ACC = 2, | ||
| 66 | FCNVME_LS_CREATE_ASSOCIATION = 3, | ||
| 67 | FCNVME_LS_CREATE_CONNECTION = 4, | ||
| 68 | FCNVME_LS_DISCONNECT = 5, | ||
| 69 | }; | ||
| 70 | |||
| 71 | /* FC-NVME r1.03/16-119v0 NVME Link Service Descriptors */ | ||
| 72 | enum { | ||
| 73 | FCNVME_LSDESC_RSVD = 0x0, | ||
| 74 | FCNVME_LSDESC_RQST = 0x1, | ||
| 75 | FCNVME_LSDESC_RJT = 0x2, | ||
| 76 | FCNVME_LSDESC_CREATE_ASSOC_CMD = 0x3, | ||
| 77 | FCNVME_LSDESC_CREATE_CONN_CMD = 0x4, | ||
| 78 | FCNVME_LSDESC_DISCONN_CMD = 0x5, | ||
| 79 | FCNVME_LSDESC_CONN_ID = 0x6, | ||
| 80 | FCNVME_LSDESC_ASSOC_ID = 0x7, | ||
| 81 | }; | ||
| 82 | |||
| 83 | |||
| 84 | /* ********** start of Link Service Descriptors ********** */ | ||
| 85 | |||
| 86 | |||
| 87 | /* | ||
| 88 | * fills in length of a descriptor. Struture minus descriptor header | ||
| 89 | */ | ||
| 90 | static inline __be32 fcnvme_lsdesc_len(size_t sz) | ||
| 91 | { | ||
| 92 | return cpu_to_be32(sz - (2 * sizeof(u32))); | ||
| 93 | } | ||
| 94 | |||
| 95 | |||
| 96 | struct fcnvme_ls_rqst_w0 { | ||
| 97 | u8 ls_cmd; /* FCNVME_LS_xxx */ | ||
| 98 | u8 zeros[3]; | ||
| 99 | }; | ||
| 100 | |||
| 101 | /* FCNVME_LSDESC_RQST */ | ||
| 102 | struct fcnvme_lsdesc_rqst { | ||
| 103 | __be32 desc_tag; /* FCNVME_LSDESC_xxx */ | ||
| 104 | __be32 desc_len; | ||
| 105 | struct fcnvme_ls_rqst_w0 w0; | ||
| 106 | __be32 rsvd12; | ||
| 107 | }; | ||
| 108 | |||
| 109 | |||
| 110 | |||
| 111 | |||
| 112 | /* FCNVME_LSDESC_RJT */ | ||
| 113 | struct fcnvme_lsdesc_rjt { | ||
| 114 | __be32 desc_tag; /* FCNVME_LSDESC_xxx */ | ||
| 115 | __be32 desc_len; | ||
| 116 | u8 rsvd8; | ||
| 117 | |||
| 118 | /* | ||
| 119 | * Reject reason and explanaction codes are generic | ||
| 120 | * to ELs's from LS-3. | ||
| 121 | */ | ||
| 122 | u8 reason_code; | ||
| 123 | u8 reason_explanation; | ||
| 124 | |||
| 125 | u8 vendor; | ||
| 126 | __be32 rsvd12; | ||
| 127 | }; | ||
| 128 | |||
| 129 | |||
| 130 | #define FCNVME_ASSOC_HOSTID_LEN 64 | ||
| 131 | #define FCNVME_ASSOC_HOSTNQN_LEN 256 | ||
| 132 | #define FCNVME_ASSOC_SUBNQN_LEN 256 | ||
| 133 | |||
| 134 | /* FCNVME_LSDESC_CREATE_ASSOC_CMD */ | ||
| 135 | struct fcnvme_lsdesc_cr_assoc_cmd { | ||
| 136 | __be32 desc_tag; /* FCNVME_LSDESC_xxx */ | ||
| 137 | __be32 desc_len; | ||
| 138 | __be16 ersp_ratio; | ||
| 139 | __be16 rsvd10; | ||
| 140 | __be32 rsvd12[9]; | ||
| 141 | __be16 cntlid; | ||
| 142 | __be16 sqsize; | ||
| 143 | __be32 rsvd52; | ||
| 144 | u8 hostid[FCNVME_ASSOC_HOSTID_LEN]; | ||
| 145 | u8 hostnqn[FCNVME_ASSOC_HOSTNQN_LEN]; | ||
| 146 | u8 subnqn[FCNVME_ASSOC_SUBNQN_LEN]; | ||
| 147 | u8 rsvd632[384]; | ||
| 148 | }; | ||
| 149 | |||
| 150 | /* FCNVME_LSDESC_CREATE_CONN_CMD */ | ||
| 151 | struct fcnvme_lsdesc_cr_conn_cmd { | ||
| 152 | __be32 desc_tag; /* FCNVME_LSDESC_xxx */ | ||
| 153 | __be32 desc_len; | ||
| 154 | __be16 ersp_ratio; | ||
| 155 | __be16 rsvd10; | ||
| 156 | __be32 rsvd12[9]; | ||
| 157 | __be16 qid; | ||
| 158 | __be16 sqsize; | ||
| 159 | __be32 rsvd52; | ||
| 160 | }; | ||
| 161 | |||
| 162 | /* Disconnect Scope Values */ | ||
| 163 | enum { | ||
| 164 | FCNVME_DISCONN_ASSOCIATION = 0, | ||
| 165 | FCNVME_DISCONN_CONNECTION = 1, | ||
| 166 | }; | ||
| 167 | |||
| 168 | /* FCNVME_LSDESC_DISCONN_CMD */ | ||
| 169 | struct fcnvme_lsdesc_disconn_cmd { | ||
| 170 | __be32 desc_tag; /* FCNVME_LSDESC_xxx */ | ||
| 171 | __be32 desc_len; | ||
| 172 | u8 rsvd8[3]; | ||
| 173 | /* note: scope is really a 1 bit field */ | ||
| 174 | u8 scope; /* FCNVME_DISCONN_xxx */ | ||
| 175 | __be32 rsvd12; | ||
| 176 | __be64 id; | ||
| 177 | }; | ||
| 178 | |||
| 179 | /* FCNVME_LSDESC_CONN_ID */ | ||
| 180 | struct fcnvme_lsdesc_conn_id { | ||
| 181 | __be32 desc_tag; /* FCNVME_LSDESC_xxx */ | ||
| 182 | __be32 desc_len; | ||
| 183 | __be64 connection_id; | ||
| 184 | }; | ||
| 185 | |||
| 186 | /* FCNVME_LSDESC_ASSOC_ID */ | ||
| 187 | struct fcnvme_lsdesc_assoc_id { | ||
| 188 | __be32 desc_tag; /* FCNVME_LSDESC_xxx */ | ||
| 189 | __be32 desc_len; | ||
| 190 | __be64 association_id; | ||
| 191 | }; | ||
| 192 | |||
| 193 | /* r_ctl values */ | ||
| 194 | enum { | ||
| 195 | FCNVME_RS_RCTL_DATA = 1, | ||
| 196 | FCNVME_RS_RCTL_XFER_RDY = 5, | ||
| 197 | FCNVME_RS_RCTL_RSP = 8, | ||
| 198 | }; | ||
| 199 | |||
| 200 | |||
| 201 | /* ********** start of Link Services ********** */ | ||
| 202 | |||
| 203 | |||
| 204 | /* FCNVME_LS_RJT */ | ||
| 205 | struct fcnvme_ls_rjt { | ||
| 206 | struct fcnvme_ls_rqst_w0 w0; | ||
| 207 | __be32 desc_list_len; | ||
| 208 | struct fcnvme_lsdesc_rqst rqst; | ||
| 209 | struct fcnvme_lsdesc_rjt rjt; | ||
| 210 | }; | ||
| 211 | |||
| 212 | /* FCNVME_LS_ACC */ | ||
| 213 | struct fcnvme_ls_acc_hdr { | ||
| 214 | struct fcnvme_ls_rqst_w0 w0; | ||
| 215 | __be32 desc_list_len; | ||
| 216 | struct fcnvme_lsdesc_rqst rqst; | ||
| 217 | /* Followed by cmd-specific ACC descriptors, see next definitions */ | ||
| 218 | }; | ||
| 219 | |||
| 220 | /* FCNVME_LS_CREATE_ASSOCIATION */ | ||
| 221 | struct fcnvme_ls_cr_assoc_rqst { | ||
| 222 | struct fcnvme_ls_rqst_w0 w0; | ||
| 223 | __be32 desc_list_len; | ||
| 224 | struct fcnvme_lsdesc_cr_assoc_cmd assoc_cmd; | ||
| 225 | }; | ||
| 226 | |||
| 227 | struct fcnvme_ls_cr_assoc_acc { | ||
| 228 | struct fcnvme_ls_acc_hdr hdr; | ||
| 229 | struct fcnvme_lsdesc_assoc_id associd; | ||
| 230 | struct fcnvme_lsdesc_conn_id connectid; | ||
| 231 | }; | ||
| 232 | |||
| 233 | |||
| 234 | /* FCNVME_LS_CREATE_CONNECTION */ | ||
| 235 | struct fcnvme_ls_cr_conn_rqst { | ||
| 236 | struct fcnvme_ls_rqst_w0 w0; | ||
| 237 | __be32 desc_list_len; | ||
| 238 | struct fcnvme_lsdesc_assoc_id associd; | ||
| 239 | struct fcnvme_lsdesc_cr_conn_cmd connect_cmd; | ||
| 240 | }; | ||
| 241 | |||
| 242 | struct fcnvme_ls_cr_conn_acc { | ||
| 243 | struct fcnvme_ls_acc_hdr hdr; | ||
| 244 | struct fcnvme_lsdesc_conn_id connectid; | ||
| 245 | }; | ||
| 246 | |||
| 247 | /* FCNVME_LS_DISCONNECT */ | ||
| 248 | struct fcnvme_ls_disconnect_rqst { | ||
| 249 | struct fcnvme_ls_rqst_w0 w0; | ||
| 250 | __be32 desc_list_len; | ||
| 251 | struct fcnvme_lsdesc_assoc_id associd; | ||
| 252 | struct fcnvme_lsdesc_disconn_cmd discon_cmd; | ||
| 253 | }; | ||
| 254 | |||
| 255 | struct fcnvme_ls_disconnect_acc { | ||
| 256 | struct fcnvme_ls_acc_hdr hdr; | ||
| 257 | }; | ||
| 258 | |||
| 259 | |||
| 260 | /* | ||
| 261 | * Yet to be defined in FC-NVME: | ||
| 262 | */ | ||
| 263 | #define NVME_FC_CONNECT_TIMEOUT_SEC 2 /* 2 seconds */ | ||
| 264 | #define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */ | ||
| 265 | #define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */ | ||
| 266 | |||
| 267 | |||
| 268 | #endif /* _NVME_FC_H */ | ||
diff --git a/include/linux/nvme.h b/include/linux/nvme.h index fc3c24206593..3d1c6f1b15c9 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h | |||
| @@ -242,6 +242,7 @@ enum { | |||
| 242 | NVME_CTRL_ONCS_COMPARE = 1 << 0, | 242 | NVME_CTRL_ONCS_COMPARE = 1 << 0, |
| 243 | NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, | 243 | NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, |
| 244 | NVME_CTRL_ONCS_DSM = 1 << 2, | 244 | NVME_CTRL_ONCS_DSM = 1 << 2, |
| 245 | NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3, | ||
| 245 | NVME_CTRL_VWC_PRESENT = 1 << 0, | 246 | NVME_CTRL_VWC_PRESENT = 1 << 0, |
| 246 | }; | 247 | }; |
| 247 | 248 | ||
| @@ -558,6 +559,23 @@ struct nvme_dsm_range { | |||
| 558 | __le64 slba; | 559 | __le64 slba; |
| 559 | }; | 560 | }; |
| 560 | 561 | ||
| 562 | struct nvme_write_zeroes_cmd { | ||
| 563 | __u8 opcode; | ||
| 564 | __u8 flags; | ||
| 565 | __u16 command_id; | ||
| 566 | __le32 nsid; | ||
| 567 | __u64 rsvd2; | ||
| 568 | __le64 metadata; | ||
| 569 | union nvme_data_ptr dptr; | ||
| 570 | __le64 slba; | ||
| 571 | __le16 length; | ||
| 572 | __le16 control; | ||
| 573 | __le32 dsmgmt; | ||
| 574 | __le32 reftag; | ||
| 575 | __le16 apptag; | ||
| 576 | __le16 appmask; | ||
| 577 | }; | ||
| 578 | |||
| 561 | /* Admin commands */ | 579 | /* Admin commands */ |
| 562 | 580 | ||
| 563 | enum nvme_admin_opcode { | 581 | enum nvme_admin_opcode { |
| @@ -857,6 +875,7 @@ struct nvme_command { | |||
| 857 | struct nvme_download_firmware dlfw; | 875 | struct nvme_download_firmware dlfw; |
| 858 | struct nvme_format_cmd format; | 876 | struct nvme_format_cmd format; |
| 859 | struct nvme_dsm_cmd dsm; | 877 | struct nvme_dsm_cmd dsm; |
| 878 | struct nvme_write_zeroes_cmd write_zeroes; | ||
| 860 | struct nvme_abort_cmd abort; | 879 | struct nvme_abort_cmd abort; |
| 861 | struct nvme_get_log_page_command get_log_page; | 880 | struct nvme_get_log_page_command get_log_page; |
| 862 | struct nvmf_common_command fabrics; | 881 | struct nvmf_common_command fabrics; |
| @@ -947,6 +966,7 @@ enum { | |||
| 947 | NVME_SC_BAD_ATTRIBUTES = 0x180, | 966 | NVME_SC_BAD_ATTRIBUTES = 0x180, |
| 948 | NVME_SC_INVALID_PI = 0x181, | 967 | NVME_SC_INVALID_PI = 0x181, |
| 949 | NVME_SC_READ_ONLY = 0x182, | 968 | NVME_SC_READ_ONLY = 0x182, |
| 969 | NVME_SC_ONCS_NOT_SUPPORTED = 0x183, | ||
| 950 | 970 | ||
| 951 | /* | 971 | /* |
| 952 | * I/O Command Set Specific - Fabrics commands: | 972 | * I/O Command Set Specific - Fabrics commands: |
| @@ -973,17 +993,30 @@ enum { | |||
| 973 | NVME_SC_UNWRITTEN_BLOCK = 0x287, | 993 | NVME_SC_UNWRITTEN_BLOCK = 0x287, |
| 974 | 994 | ||
| 975 | NVME_SC_DNR = 0x4000, | 995 | NVME_SC_DNR = 0x4000, |
| 996 | |||
| 997 | |||
| 998 | /* | ||
| 999 | * FC Transport-specific error status values for NVME commands | ||
| 1000 | * | ||
| 1001 | * Transport-specific status code values must be in the range 0xB0..0xBF | ||
| 1002 | */ | ||
| 1003 | |||
| 1004 | /* Generic FC failure - catchall */ | ||
| 1005 | NVME_SC_FC_TRANSPORT_ERROR = 0x00B0, | ||
| 1006 | |||
| 1007 | /* I/O failure due to FC ABTS'd */ | ||
| 1008 | NVME_SC_FC_TRANSPORT_ABORTED = 0x00B1, | ||
| 976 | }; | 1009 | }; |
| 977 | 1010 | ||
| 978 | struct nvme_completion { | 1011 | struct nvme_completion { |
| 979 | /* | 1012 | /* |
| 980 | * Used by Admin and Fabrics commands to return data: | 1013 | * Used by Admin and Fabrics commands to return data: |
| 981 | */ | 1014 | */ |
| 982 | union { | 1015 | union nvme_result { |
| 983 | __le16 result16; | 1016 | __le16 u16; |
| 984 | __le32 result; | 1017 | __le32 u32; |
| 985 | __le64 result64; | 1018 | __le64 u64; |
| 986 | }; | 1019 | } result; |
| 987 | __le16 sq_head; /* how much of this queue may be reclaimed */ | 1020 | __le16 sq_head; /* how much of this queue may be reclaimed */ |
| 988 | __le16 sq_id; /* submission queue that generated this entry */ | 1021 | __le16 sq_id; /* submission queue that generated this entry */ |
| 989 | __u16 command_id; /* of the command which completed */ | 1022 | __u16 command_id; /* of the command which completed */ |
diff --git a/include/linux/of.h b/include/linux/of.h index 299aeb192727..d72f01009297 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
| @@ -1266,6 +1266,18 @@ static inline bool of_device_is_system_power_controller(const struct device_node | |||
| 1266 | * Overlay support | 1266 | * Overlay support |
| 1267 | */ | 1267 | */ |
| 1268 | 1268 | ||
| 1269 | enum of_overlay_notify_action { | ||
| 1270 | OF_OVERLAY_PRE_APPLY, | ||
| 1271 | OF_OVERLAY_POST_APPLY, | ||
| 1272 | OF_OVERLAY_PRE_REMOVE, | ||
| 1273 | OF_OVERLAY_POST_REMOVE, | ||
| 1274 | }; | ||
| 1275 | |||
| 1276 | struct of_overlay_notify_data { | ||
| 1277 | struct device_node *overlay; | ||
| 1278 | struct device_node *target; | ||
| 1279 | }; | ||
| 1280 | |||
| 1269 | #ifdef CONFIG_OF_OVERLAY | 1281 | #ifdef CONFIG_OF_OVERLAY |
| 1270 | 1282 | ||
| 1271 | /* ID based overlays; the API for external users */ | 1283 | /* ID based overlays; the API for external users */ |
| @@ -1273,6 +1285,9 @@ int of_overlay_create(struct device_node *tree); | |||
| 1273 | int of_overlay_destroy(int id); | 1285 | int of_overlay_destroy(int id); |
| 1274 | int of_overlay_destroy_all(void); | 1286 | int of_overlay_destroy_all(void); |
| 1275 | 1287 | ||
| 1288 | int of_overlay_notifier_register(struct notifier_block *nb); | ||
| 1289 | int of_overlay_notifier_unregister(struct notifier_block *nb); | ||
| 1290 | |||
| 1276 | #else | 1291 | #else |
| 1277 | 1292 | ||
| 1278 | static inline int of_overlay_create(struct device_node *tree) | 1293 | static inline int of_overlay_create(struct device_node *tree) |
| @@ -1290,6 +1305,16 @@ static inline int of_overlay_destroy_all(void) | |||
| 1290 | return -ENOTSUPP; | 1305 | return -ENOTSUPP; |
| 1291 | } | 1306 | } |
| 1292 | 1307 | ||
| 1308 | static inline int of_overlay_notifier_register(struct notifier_block *nb) | ||
| 1309 | { | ||
| 1310 | return 0; | ||
| 1311 | } | ||
| 1312 | |||
| 1313 | static inline int of_overlay_notifier_unregister(struct notifier_block *nb) | ||
| 1314 | { | ||
| 1315 | return 0; | ||
| 1316 | } | ||
| 1317 | |||
| 1293 | #endif | 1318 | #endif |
| 1294 | 1319 | ||
| 1295 | #endif /* _LINUX_OF_H */ | 1320 | #endif /* _LINUX_OF_H */ |
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 4341f32516d8..271b3fdf0070 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h | |||
| @@ -71,6 +71,7 @@ extern int early_init_dt_scan_chosen_stdout(void); | |||
| 71 | extern void early_init_fdt_scan_reserved_mem(void); | 71 | extern void early_init_fdt_scan_reserved_mem(void); |
| 72 | extern void early_init_fdt_reserve_self(void); | 72 | extern void early_init_fdt_reserve_self(void); |
| 73 | extern void early_init_dt_add_memory_arch(u64 base, u64 size); | 73 | extern void early_init_dt_add_memory_arch(u64 base, u64 size); |
| 74 | extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size); | ||
| 74 | extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, | 75 | extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, |
| 75 | bool no_map); | 76 | bool no_map); |
| 76 | extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align); | 77 | extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align); |
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index e80b9c762a03..6a7fc5051099 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h | |||
| @@ -31,8 +31,16 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev, | |||
| 31 | 31 | ||
| 32 | #endif /* CONFIG_OF_IOMMU */ | 32 | #endif /* CONFIG_OF_IOMMU */ |
| 33 | 33 | ||
| 34 | void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops); | 34 | static inline void of_iommu_set_ops(struct device_node *np, |
| 35 | const struct iommu_ops *of_iommu_get_ops(struct device_node *np); | 35 | const struct iommu_ops *ops) |
| 36 | { | ||
| 37 | iommu_register_instance(&np->fwnode, ops); | ||
| 38 | } | ||
| 39 | |||
| 40 | static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np) | ||
| 41 | { | ||
| 42 | return iommu_get_instance(&np->fwnode); | ||
| 43 | } | ||
| 36 | 44 | ||
| 37 | extern struct of_device_id __iommu_of_table; | 45 | extern struct of_device_id __iommu_of_table; |
| 38 | 46 | ||
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index 7fd5cfce9140..0e0974eceb80 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h | |||
| @@ -16,6 +16,7 @@ int of_pci_get_devfn(struct device_node *np); | |||
| 16 | int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); | 16 | int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); |
| 17 | int of_pci_parse_bus_range(struct device_node *node, struct resource *res); | 17 | int of_pci_parse_bus_range(struct device_node *node, struct resource *res); |
| 18 | int of_get_pci_domain_nr(struct device_node *node); | 18 | int of_get_pci_domain_nr(struct device_node *node); |
| 19 | int of_pci_get_max_link_speed(struct device_node *node); | ||
| 19 | void of_pci_check_probe_only(void); | 20 | void of_pci_check_probe_only(void); |
| 20 | int of_pci_map_rid(struct device_node *np, u32 rid, | 21 | int of_pci_map_rid(struct device_node *np, u32 rid, |
| 21 | const char *map_name, const char *map_mask_name, | 22 | const char *map_name, const char *map_mask_name, |
| @@ -62,6 +63,12 @@ static inline int of_pci_map_rid(struct device_node *np, u32 rid, | |||
| 62 | return -EINVAL; | 63 | return -EINVAL; |
| 63 | } | 64 | } |
| 64 | 65 | ||
| 66 | static inline int | ||
| 67 | of_pci_get_max_link_speed(struct device_node *node) | ||
| 68 | { | ||
| 69 | return -EINVAL; | ||
| 70 | } | ||
| 71 | |||
| 65 | static inline void of_pci_check_probe_only(void) { } | 72 | static inline void of_pci_check_probe_only(void) { } |
| 66 | #endif | 73 | #endif |
| 67 | 74 | ||
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 74e4dda91238..c56b39890a41 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
| @@ -73,6 +73,7 @@ | |||
| 73 | */ | 73 | */ |
| 74 | enum pageflags { | 74 | enum pageflags { |
| 75 | PG_locked, /* Page is locked. Don't touch. */ | 75 | PG_locked, /* Page is locked. Don't touch. */ |
| 76 | PG_waiters, /* Page has waiters, check its waitqueue */ | ||
| 76 | PG_error, | 77 | PG_error, |
| 77 | PG_referenced, | 78 | PG_referenced, |
| 78 | PG_uptodate, | 79 | PG_uptodate, |
| @@ -87,7 +88,6 @@ enum pageflags { | |||
| 87 | PG_private_2, /* If pagecache, has fs aux data */ | 88 | PG_private_2, /* If pagecache, has fs aux data */ |
| 88 | PG_writeback, /* Page is under writeback */ | 89 | PG_writeback, /* Page is under writeback */ |
| 89 | PG_head, /* A head page */ | 90 | PG_head, /* A head page */ |
| 90 | PG_swapcache, /* Swap page: swp_entry_t in private */ | ||
| 91 | PG_mappedtodisk, /* Has blocks allocated on-disk */ | 91 | PG_mappedtodisk, /* Has blocks allocated on-disk */ |
| 92 | PG_reclaim, /* To be reclaimed asap */ | 92 | PG_reclaim, /* To be reclaimed asap */ |
| 93 | PG_swapbacked, /* Page is backed by RAM/swap */ | 93 | PG_swapbacked, /* Page is backed by RAM/swap */ |
| @@ -110,6 +110,9 @@ enum pageflags { | |||
| 110 | /* Filesystems */ | 110 | /* Filesystems */ |
| 111 | PG_checked = PG_owner_priv_1, | 111 | PG_checked = PG_owner_priv_1, |
| 112 | 112 | ||
| 113 | /* SwapBacked */ | ||
| 114 | PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ | ||
| 115 | |||
| 113 | /* Two page bits are conscripted by FS-Cache to maintain local caching | 116 | /* Two page bits are conscripted by FS-Cache to maintain local caching |
| 114 | * state. These bits are set on pages belonging to the netfs's inodes | 117 | * state. These bits are set on pages belonging to the netfs's inodes |
| 115 | * when those inodes are being locally cached. | 118 | * when those inodes are being locally cached. |
| @@ -167,6 +170,9 @@ static __always_inline int PageCompound(struct page *page) | |||
| 167 | * for compound page all operations related to the page flag applied to | 170 | * for compound page all operations related to the page flag applied to |
| 168 | * head page. | 171 | * head page. |
| 169 | * | 172 | * |
| 173 | * PF_ONLY_HEAD: | ||
| 174 | * for compound page, callers only ever operate on the head page. | ||
| 175 | * | ||
| 170 | * PF_NO_TAIL: | 176 | * PF_NO_TAIL: |
| 171 | * modifications of the page flag must be done on small or head pages, | 177 | * modifications of the page flag must be done on small or head pages, |
| 172 | * checks can be done on tail pages too. | 178 | * checks can be done on tail pages too. |
| @@ -176,6 +182,9 @@ static __always_inline int PageCompound(struct page *page) | |||
| 176 | */ | 182 | */ |
| 177 | #define PF_ANY(page, enforce) page | 183 | #define PF_ANY(page, enforce) page |
| 178 | #define PF_HEAD(page, enforce) compound_head(page) | 184 | #define PF_HEAD(page, enforce) compound_head(page) |
| 185 | #define PF_ONLY_HEAD(page, enforce) ({ \ | ||
| 186 | VM_BUG_ON_PGFLAGS(PageTail(page), page); \ | ||
| 187 | page;}) | ||
| 179 | #define PF_NO_TAIL(page, enforce) ({ \ | 188 | #define PF_NO_TAIL(page, enforce) ({ \ |
| 180 | VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ | 189 | VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ |
| 181 | compound_head(page);}) | 190 | compound_head(page);}) |
| @@ -253,6 +262,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; } | |||
| 253 | TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname) | 262 | TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname) |
| 254 | 263 | ||
| 255 | __PAGEFLAG(Locked, locked, PF_NO_TAIL) | 264 | __PAGEFLAG(Locked, locked, PF_NO_TAIL) |
| 265 | PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) | ||
| 256 | PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND) | 266 | PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND) |
| 257 | PAGEFLAG(Referenced, referenced, PF_HEAD) | 267 | PAGEFLAG(Referenced, referenced, PF_HEAD) |
| 258 | TESTCLEARFLAG(Referenced, referenced, PF_HEAD) | 268 | TESTCLEARFLAG(Referenced, referenced, PF_HEAD) |
| @@ -314,7 +324,13 @@ PAGEFLAG_FALSE(HighMem) | |||
| 314 | #endif | 324 | #endif |
| 315 | 325 | ||
| 316 | #ifdef CONFIG_SWAP | 326 | #ifdef CONFIG_SWAP |
| 317 | PAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND) | 327 | static __always_inline int PageSwapCache(struct page *page) |
| 328 | { | ||
| 329 | return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags); | ||
| 330 | |||
| 331 | } | ||
| 332 | SETPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND) | ||
| 333 | CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND) | ||
| 318 | #else | 334 | #else |
| 319 | PAGEFLAG_FALSE(SwapCache) | 335 | PAGEFLAG_FALSE(SwapCache) |
| 320 | #endif | 336 | #endif |
| @@ -701,12 +717,12 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) | |||
| 701 | * Flags checked when a page is freed. Pages being freed should not have | 717 | * Flags checked when a page is freed. Pages being freed should not have |
| 702 | * these flags set. It they are, there is a problem. | 718 | * these flags set. It they are, there is a problem. |
| 703 | */ | 719 | */ |
| 704 | #define PAGE_FLAGS_CHECK_AT_FREE \ | 720 | #define PAGE_FLAGS_CHECK_AT_FREE \ |
| 705 | (1UL << PG_lru | 1UL << PG_locked | \ | 721 | (1UL << PG_lru | 1UL << PG_locked | \ |
| 706 | 1UL << PG_private | 1UL << PG_private_2 | \ | 722 | 1UL << PG_private | 1UL << PG_private_2 | \ |
| 707 | 1UL << PG_writeback | 1UL << PG_reserved | \ | 723 | 1UL << PG_writeback | 1UL << PG_reserved | \ |
| 708 | 1UL << PG_slab | 1UL << PG_swapcache | 1UL << PG_active | \ | 724 | 1UL << PG_slab | 1UL << PG_active | \ |
| 709 | 1UL << PG_unevictable | __PG_MLOCKED) | 725 | 1UL << PG_unevictable | __PG_MLOCKED) |
| 710 | 726 | ||
| 711 | /* | 727 | /* |
| 712 | * Flags checked when a page is prepped for return by the page allocator. | 728 | * Flags checked when a page is prepped for return by the page allocator. |
| @@ -735,6 +751,7 @@ static inline int page_has_private(struct page *page) | |||
| 735 | 751 | ||
| 736 | #undef PF_ANY | 752 | #undef PF_ANY |
| 737 | #undef PF_HEAD | 753 | #undef PF_HEAD |
| 754 | #undef PF_ONLY_HEAD | ||
| 738 | #undef PF_NO_TAIL | 755 | #undef PF_NO_TAIL |
| 739 | #undef PF_NO_COMPOUND | 756 | #undef PF_NO_COMPOUND |
| 740 | #endif /* !__GENERATING_BOUNDS_H */ | 757 | #endif /* !__GENERATING_BOUNDS_H */ |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 7dbe9148b2f8..324c8dbad1e1 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | #include <linux/list.h> | 9 | #include <linux/list.h> |
| 10 | #include <linux/highmem.h> | 10 | #include <linux/highmem.h> |
| 11 | #include <linux/compiler.h> | 11 | #include <linux/compiler.h> |
| 12 | #include <asm/uaccess.h> | 12 | #include <linux/uaccess.h> |
| 13 | #include <linux/gfp.h> | 13 | #include <linux/gfp.h> |
| 14 | #include <linux/bitops.h> | 14 | #include <linux/bitops.h> |
| 15 | #include <linux/hardirq.h> /* for in_interrupt() */ | 15 | #include <linux/hardirq.h> /* for in_interrupt() */ |
| @@ -486,22 +486,14 @@ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, | |||
| 486 | * and for filesystems which need to wait on PG_private. | 486 | * and for filesystems which need to wait on PG_private. |
| 487 | */ | 487 | */ |
| 488 | extern void wait_on_page_bit(struct page *page, int bit_nr); | 488 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
| 489 | |||
| 490 | extern int wait_on_page_bit_killable(struct page *page, int bit_nr); | 489 | extern int wait_on_page_bit_killable(struct page *page, int bit_nr); |
| 491 | extern int wait_on_page_bit_killable_timeout(struct page *page, | 490 | extern void wake_up_page_bit(struct page *page, int bit_nr); |
| 492 | int bit_nr, unsigned long timeout); | ||
| 493 | |||
| 494 | static inline int wait_on_page_locked_killable(struct page *page) | ||
| 495 | { | ||
| 496 | if (!PageLocked(page)) | ||
| 497 | return 0; | ||
| 498 | return wait_on_page_bit_killable(compound_head(page), PG_locked); | ||
| 499 | } | ||
| 500 | 491 | ||
| 501 | extern wait_queue_head_t *page_waitqueue(struct page *page); | ||
| 502 | static inline void wake_up_page(struct page *page, int bit) | 492 | static inline void wake_up_page(struct page *page, int bit) |
| 503 | { | 493 | { |
| 504 | __wake_up_bit(page_waitqueue(page), &page->flags, bit); | 494 | if (!PageWaiters(page)) |
| 495 | return; | ||
| 496 | wake_up_page_bit(page, bit); | ||
| 505 | } | 497 | } |
| 506 | 498 | ||
| 507 | /* | 499 | /* |
| @@ -517,6 +509,13 @@ static inline void wait_on_page_locked(struct page *page) | |||
| 517 | wait_on_page_bit(compound_head(page), PG_locked); | 509 | wait_on_page_bit(compound_head(page), PG_locked); |
| 518 | } | 510 | } |
| 519 | 511 | ||
| 512 | static inline int wait_on_page_locked_killable(struct page *page) | ||
| 513 | { | ||
| 514 | if (!PageLocked(page)) | ||
| 515 | return 0; | ||
| 516 | return wait_on_page_bit_killable(compound_head(page), PG_locked); | ||
| 517 | } | ||
| 518 | |||
| 520 | /* | 519 | /* |
| 521 | * Wait for a page to complete writeback | 520 | * Wait for a page to complete writeback |
| 522 | */ | 521 | */ |
diff --git a/include/linux/parser.h b/include/linux/parser.h index 39d5b7955b23..884c1e6eb3fe 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h | |||
| @@ -27,6 +27,7 @@ typedef struct { | |||
| 27 | 27 | ||
| 28 | int match_token(char *, const match_table_t table, substring_t args[]); | 28 | int match_token(char *, const match_table_t table, substring_t args[]); |
| 29 | int match_int(substring_t *, int *result); | 29 | int match_int(substring_t *, int *result); |
| 30 | int match_u64(substring_t *, u64 *result); | ||
| 30 | int match_octal(substring_t *, int *result); | 31 | int match_octal(substring_t *, int *result); |
| 31 | int match_hex(substring_t *, int *result); | 32 | int match_hex(substring_t *, int *result); |
| 32 | bool match_wildcard(const char *pattern, const char *str); | 33 | bool match_wildcard(const char *pattern, const char *str); |
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 7d63a66e8ed4..7a4e83a8c89c 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h | |||
| @@ -24,7 +24,9 @@ static inline acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) | |||
| 24 | } | 24 | } |
| 25 | extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle); | 25 | extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle); |
| 26 | 26 | ||
| 27 | extern phys_addr_t pci_mcfg_lookup(u16 domain, struct resource *bus_res); | 27 | struct pci_ecam_ops; |
| 28 | extern int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres, | ||
| 29 | struct pci_ecam_ops **ecam_ops); | ||
| 28 | 30 | ||
| 29 | static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) | 31 | static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) |
| 30 | { | 32 | { |
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 7adad206b1f4..f0d2b9451270 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h | |||
| @@ -59,6 +59,15 @@ void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn, | |||
| 59 | /* default ECAM ops */ | 59 | /* default ECAM ops */ |
| 60 | extern struct pci_ecam_ops pci_generic_ecam_ops; | 60 | extern struct pci_ecam_ops pci_generic_ecam_ops; |
| 61 | 61 | ||
| 62 | #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) | ||
| 63 | extern struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */ | ||
| 64 | extern struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */ | ||
| 65 | extern struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */ | ||
| 66 | extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ | ||
| 67 | extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ | ||
| 68 | extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ | ||
| 69 | #endif | ||
| 70 | |||
| 62 | #ifdef CONFIG_PCI_HOST_GENERIC | 71 | #ifdef CONFIG_PCI_HOST_GENERIC |
| 63 | /* for DT-based PCI controllers that support ECAM */ | 72 | /* for DT-based PCI controllers that support ECAM */ |
| 64 | int pci_host_common_probe(struct platform_device *pdev, | 73 | int pci_host_common_probe(struct platform_device *pdev, |
diff --git a/include/linux/pci.h b/include/linux/pci.h index a38772a85588..e2d1a124216a 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -244,6 +244,7 @@ struct pci_cap_saved_state { | |||
| 244 | struct pci_cap_saved_data cap; | 244 | struct pci_cap_saved_data cap; |
| 245 | }; | 245 | }; |
| 246 | 246 | ||
| 247 | struct irq_affinity; | ||
| 247 | struct pcie_link_state; | 248 | struct pcie_link_state; |
| 248 | struct pci_vpd; | 249 | struct pci_vpd; |
| 249 | struct pci_sriov; | 250 | struct pci_sriov; |
| @@ -332,7 +333,6 @@ struct pci_dev { | |||
| 332 | * directly, use the values stored here. They might be different! | 333 | * directly, use the values stored here. They might be different! |
| 333 | */ | 334 | */ |
| 334 | unsigned int irq; | 335 | unsigned int irq; |
| 335 | struct cpumask *irq_affinity; | ||
| 336 | struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ | 336 | struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ |
| 337 | 337 | ||
| 338 | bool match_driver; /* Skip attaching driver */ | 338 | bool match_driver; /* Skip attaching driver */ |
| @@ -420,9 +420,13 @@ static inline int pci_channel_offline(struct pci_dev *pdev) | |||
| 420 | struct pci_host_bridge { | 420 | struct pci_host_bridge { |
| 421 | struct device dev; | 421 | struct device dev; |
| 422 | struct pci_bus *bus; /* root bus */ | 422 | struct pci_bus *bus; /* root bus */ |
| 423 | struct pci_ops *ops; | ||
| 424 | void *sysdata; | ||
| 425 | int busnr; | ||
| 423 | struct list_head windows; /* resource_entry */ | 426 | struct list_head windows; /* resource_entry */ |
| 424 | void (*release_fn)(struct pci_host_bridge *); | 427 | void (*release_fn)(struct pci_host_bridge *); |
| 425 | void *release_data; | 428 | void *release_data; |
| 429 | struct msi_controller *msi; | ||
| 426 | unsigned int ignore_reset_delay:1; /* for entire hierarchy */ | 430 | unsigned int ignore_reset_delay:1; /* for entire hierarchy */ |
| 427 | /* Resource alignment requirements */ | 431 | /* Resource alignment requirements */ |
| 428 | resource_size_t (*align_resource)(struct pci_dev *dev, | 432 | resource_size_t (*align_resource)(struct pci_dev *dev, |
| @@ -430,10 +434,23 @@ struct pci_host_bridge { | |||
| 430 | resource_size_t start, | 434 | resource_size_t start, |
| 431 | resource_size_t size, | 435 | resource_size_t size, |
| 432 | resource_size_t align); | 436 | resource_size_t align); |
| 437 | unsigned long private[0] ____cacheline_aligned; | ||
| 433 | }; | 438 | }; |
| 434 | 439 | ||
| 435 | #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) | 440 | #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) |
| 436 | 441 | ||
| 442 | static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge) | ||
| 443 | { | ||
| 444 | return (void *)bridge->private; | ||
| 445 | } | ||
| 446 | |||
| 447 | static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv) | ||
| 448 | { | ||
| 449 | return container_of(priv, struct pci_host_bridge, private); | ||
| 450 | } | ||
| 451 | |||
| 452 | struct pci_host_bridge *pci_alloc_host_bridge(size_t priv); | ||
| 453 | int pci_register_host_bridge(struct pci_host_bridge *bridge); | ||
| 437 | struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); | 454 | struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); |
| 438 | 455 | ||
| 439 | void pci_set_host_bridge_release(struct pci_host_bridge *bridge, | 456 | void pci_set_host_bridge_release(struct pci_host_bridge *bridge, |
| @@ -1310,8 +1327,10 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev, | |||
| 1310 | return rc; | 1327 | return rc; |
| 1311 | return 0; | 1328 | return 0; |
| 1312 | } | 1329 | } |
| 1313 | int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, | 1330 | int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, |
| 1314 | unsigned int max_vecs, unsigned int flags); | 1331 | unsigned int max_vecs, unsigned int flags, |
| 1332 | const struct irq_affinity *affd); | ||
| 1333 | |||
| 1315 | void pci_free_irq_vectors(struct pci_dev *dev); | 1334 | void pci_free_irq_vectors(struct pci_dev *dev); |
| 1316 | int pci_irq_vector(struct pci_dev *dev, unsigned int nr); | 1335 | int pci_irq_vector(struct pci_dev *dev, unsigned int nr); |
| 1317 | const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); | 1336 | const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); |
| @@ -1339,14 +1358,17 @@ static inline int pci_enable_msix_range(struct pci_dev *dev, | |||
| 1339 | static inline int pci_enable_msix_exact(struct pci_dev *dev, | 1358 | static inline int pci_enable_msix_exact(struct pci_dev *dev, |
| 1340 | struct msix_entry *entries, int nvec) | 1359 | struct msix_entry *entries, int nvec) |
| 1341 | { return -ENOSYS; } | 1360 | { return -ENOSYS; } |
| 1342 | static inline int pci_alloc_irq_vectors(struct pci_dev *dev, | 1361 | |
| 1343 | unsigned int min_vecs, unsigned int max_vecs, | 1362 | static inline int |
| 1344 | unsigned int flags) | 1363 | pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, |
| 1364 | unsigned int max_vecs, unsigned int flags, | ||
| 1365 | const struct irq_affinity *aff_desc) | ||
| 1345 | { | 1366 | { |
| 1346 | if (min_vecs > 1) | 1367 | if (min_vecs > 1) |
| 1347 | return -EINVAL; | 1368 | return -EINVAL; |
| 1348 | return 1; | 1369 | return 1; |
| 1349 | } | 1370 | } |
| 1371 | |||
| 1350 | static inline void pci_free_irq_vectors(struct pci_dev *dev) | 1372 | static inline void pci_free_irq_vectors(struct pci_dev *dev) |
| 1351 | { | 1373 | { |
| 1352 | } | 1374 | } |
| @@ -1364,6 +1386,14 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, | |||
| 1364 | } | 1386 | } |
| 1365 | #endif | 1387 | #endif |
| 1366 | 1388 | ||
| 1389 | static inline int | ||
| 1390 | pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, | ||
| 1391 | unsigned int max_vecs, unsigned int flags) | ||
| 1392 | { | ||
| 1393 | return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags, | ||
| 1394 | NULL); | ||
| 1395 | } | ||
| 1396 | |||
| 1367 | #ifdef CONFIG_PCIEPORTBUS | 1397 | #ifdef CONFIG_PCIEPORTBUS |
| 1368 | extern bool pcie_ports_disabled; | 1398 | extern bool pcie_ports_disabled; |
| 1369 | extern bool pcie_ports_auto; | 1399 | extern bool pcie_ports_auto; |
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 8c7895061121..2e855afa0212 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h | |||
| @@ -176,6 +176,7 @@ struct hotplug_params { | |||
| 176 | #ifdef CONFIG_ACPI | 176 | #ifdef CONFIG_ACPI |
| 177 | #include <linux/acpi.h> | 177 | #include <linux/acpi.h> |
| 178 | int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp); | 178 | int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp); |
| 179 | bool pciehp_is_native(struct pci_dev *pdev); | ||
| 179 | int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); | 180 | int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); |
| 180 | int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); | 181 | int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); |
| 181 | int acpi_pci_detect_ejectable(acpi_handle handle); | 182 | int acpi_pci_detect_ejectable(acpi_handle handle); |
| @@ -185,5 +186,6 @@ static inline int pci_get_hp_params(struct pci_dev *dev, | |||
| 185 | { | 186 | { |
| 186 | return -ENODEV; | 187 | return -ENODEV; |
| 187 | } | 188 | } |
| 189 | static inline bool pciehp_is_native(struct pci_dev *pdev) { return true; } | ||
| 188 | #endif | 190 | #endif |
| 189 | #endif | 191 | #endif |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c58752fe16c4..73dda0edcb97 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
| @@ -23,8 +23,10 @@ | |||
| 23 | #define PCI_CLASS_STORAGE_SATA 0x0106 | 23 | #define PCI_CLASS_STORAGE_SATA 0x0106 |
| 24 | #define PCI_CLASS_STORAGE_SATA_AHCI 0x010601 | 24 | #define PCI_CLASS_STORAGE_SATA_AHCI 0x010601 |
| 25 | #define PCI_CLASS_STORAGE_SAS 0x0107 | 25 | #define PCI_CLASS_STORAGE_SAS 0x0107 |
| 26 | #define PCI_CLASS_STORAGE_EXPRESS 0x010802 | ||
| 26 | #define PCI_CLASS_STORAGE_OTHER 0x0180 | 27 | #define PCI_CLASS_STORAGE_OTHER 0x0180 |
| 27 | 28 | ||
| 29 | |||
| 28 | #define PCI_BASE_CLASS_NETWORK 0x02 | 30 | #define PCI_BASE_CLASS_NETWORK 0x02 |
| 29 | #define PCI_CLASS_NETWORK_ETHERNET 0x0200 | 31 | #define PCI_CLASS_NETWORK_ETHERNET 0x0200 |
| 30 | #define PCI_CLASS_NETWORK_TOKEN_RING 0x0201 | 32 | #define PCI_CLASS_NETWORK_TOKEN_RING 0x0201 |
| @@ -2251,17 +2253,35 @@ | |||
| 2251 | #define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 | 2253 | #define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 |
| 2252 | 2254 | ||
| 2253 | #define PCI_VENDOR_ID_VMWARE 0x15ad | 2255 | #define PCI_VENDOR_ID_VMWARE 0x15ad |
| 2256 | #define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07b0 | ||
| 2254 | 2257 | ||
| 2255 | #define PCI_VENDOR_ID_ZOLTRIX 0x15b0 | 2258 | #define PCI_VENDOR_ID_ZOLTRIX 0x15b0 |
| 2256 | #define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 | 2259 | #define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 |
| 2257 | 2260 | ||
| 2258 | #define PCI_VENDOR_ID_MELLANOX 0x15b3 | 2261 | #define PCI_VENDOR_ID_MELLANOX 0x15b3 |
| 2259 | #define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 | 2262 | #define PCI_DEVICE_ID_MELLANOX_CONNECTX3 0x1003 |
| 2263 | #define PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO 0x1007 | ||
| 2264 | #define PCI_DEVICE_ID_MELLANOX_CONNECTIB 0x1011 | ||
| 2265 | #define PCI_DEVICE_ID_MELLANOX_CONNECTX4 0x1013 | ||
| 2266 | #define PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX 0x1015 | ||
| 2267 | #define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 | ||
| 2260 | #define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46 | 2268 | #define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46 |
| 2261 | #define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 | 2269 | #define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c |
| 2262 | #define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 | 2270 | #define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 |
| 2263 | #define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c | 2271 | #define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 |
| 2264 | #define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 | 2272 | #define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 |
| 2273 | #define PCI_DEVICE_ID_MELLANOX_HERMON_SDR 0x6340 | ||
| 2274 | #define PCI_DEVICE_ID_MELLANOX_HERMON_DDR 0x634a | ||
| 2275 | #define PCI_DEVICE_ID_MELLANOX_HERMON_QDR 0x6354 | ||
| 2276 | #define PCI_DEVICE_ID_MELLANOX_HERMON_EN 0x6368 | ||
| 2277 | #define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN 0x6372 | ||
| 2278 | #define PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2 0x6732 | ||
| 2279 | #define PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2 0x673c | ||
| 2280 | #define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2 0x6746 | ||
| 2281 | #define PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2 0x6750 | ||
| 2282 | #define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2 0x675a | ||
| 2283 | #define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2 0x6764 | ||
| 2284 | #define PCI_DEVICE_ID_MELLANOX_CONNECTX2 0x676e | ||
| 2265 | 2285 | ||
| 2266 | #define PCI_VENDOR_ID_DFI 0x15bd | 2286 | #define PCI_VENDOR_ID_DFI 0x15bd |
| 2267 | 2287 | ||
diff --git a/include/linux/phy.h b/include/linux/phy.h index e25f1830fbcf..f7d95f644eed 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/timer.h> | 25 | #include <linux/timer.h> |
| 26 | #include <linux/workqueue.h> | 26 | #include <linux/workqueue.h> |
| 27 | #include <linux/mod_devicetable.h> | 27 | #include <linux/mod_devicetable.h> |
| 28 | #include <linux/phy_led_triggers.h> | ||
| 28 | 29 | ||
| 29 | #include <linux/atomic.h> | 30 | #include <linux/atomic.h> |
| 30 | 31 | ||
| @@ -85,6 +86,21 @@ typedef enum { | |||
| 85 | } phy_interface_t; | 86 | } phy_interface_t; |
| 86 | 87 | ||
| 87 | /** | 88 | /** |
| 89 | * phy_supported_speeds - return all speeds currently supported by a phy device | ||
| 90 | * @phy: The phy device to return supported speeds of. | ||
| 91 | * @speeds: buffer to store supported speeds in. | ||
| 92 | * @size: size of speeds buffer. | ||
| 93 | * | ||
| 94 | * Description: Returns the number of supported speeds, and | ||
| 95 | * fills the speeds * buffer with the supported speeds. If speeds buffer is | ||
| 96 | * too small to contain * all currently supported speeds, will return as | ||
| 97 | * many speeds as can fit. | ||
| 98 | */ | ||
| 99 | unsigned int phy_supported_speeds(struct phy_device *phy, | ||
| 100 | unsigned int *speeds, | ||
| 101 | unsigned int size); | ||
| 102 | |||
| 103 | /** | ||
| 88 | * It maps 'enum phy_interface_t' found in include/linux/phy.h | 104 | * It maps 'enum phy_interface_t' found in include/linux/phy.h |
| 89 | * into the device tree binding of 'phy-mode', so that Ethernet | 105 | * into the device tree binding of 'phy-mode', so that Ethernet |
| 90 | * device driver can get phy interface from device tree. | 106 | * device driver can get phy interface from device tree. |
| @@ -343,7 +359,7 @@ struct phy_c45_device_ids { | |||
| 343 | * giving up on the current attempt at acquiring a link | 359 | * giving up on the current attempt at acquiring a link |
| 344 | * irq: IRQ number of the PHY's interrupt (-1 if none) | 360 | * irq: IRQ number of the PHY's interrupt (-1 if none) |
| 345 | * phy_timer: The timer for handling the state machine | 361 | * phy_timer: The timer for handling the state machine |
| 346 | * phy_queue: A work_queue for the interrupt | 362 | * phy_queue: A work_queue for the phy_mac_interrupt |
| 347 | * attached_dev: The attached enet driver's device instance ptr | 363 | * attached_dev: The attached enet driver's device instance ptr |
| 348 | * adjust_link: Callback for the enet controller to respond to | 364 | * adjust_link: Callback for the enet controller to respond to |
| 349 | * changes in the link state. | 365 | * changes in the link state. |
| @@ -401,10 +417,19 @@ struct phy_device { | |||
| 401 | u32 advertising; | 417 | u32 advertising; |
| 402 | u32 lp_advertising; | 418 | u32 lp_advertising; |
| 403 | 419 | ||
| 420 | /* Energy efficient ethernet modes which should be prohibited */ | ||
| 421 | u32 eee_broken_modes; | ||
| 422 | |||
| 404 | int autoneg; | 423 | int autoneg; |
| 405 | 424 | ||
| 406 | int link_timeout; | 425 | int link_timeout; |
| 407 | 426 | ||
| 427 | #ifdef CONFIG_LED_TRIGGER_PHY | ||
| 428 | struct phy_led_trigger *phy_led_triggers; | ||
| 429 | unsigned int phy_num_led_triggers; | ||
| 430 | struct phy_led_trigger *last_triggered; | ||
| 431 | #endif | ||
| 432 | |||
| 408 | /* | 433 | /* |
| 409 | * Interrupt number for this PHY | 434 | * Interrupt number for this PHY |
| 410 | * -1 means no interrupt | 435 | * -1 means no interrupt |
| @@ -425,6 +450,7 @@ struct phy_device { | |||
| 425 | struct net_device *attached_dev; | 450 | struct net_device *attached_dev; |
| 426 | 451 | ||
| 427 | u8 mdix; | 452 | u8 mdix; |
| 453 | u8 mdix_ctrl; | ||
| 428 | 454 | ||
| 429 | void (*adjust_link)(struct net_device *dev); | 455 | void (*adjust_link)(struct net_device *dev); |
| 430 | }; | 456 | }; |
| @@ -589,6 +615,13 @@ struct phy_driver { | |||
| 589 | void (*get_strings)(struct phy_device *dev, u8 *data); | 615 | void (*get_strings)(struct phy_device *dev, u8 *data); |
| 590 | void (*get_stats)(struct phy_device *dev, | 616 | void (*get_stats)(struct phy_device *dev, |
| 591 | struct ethtool_stats *stats, u64 *data); | 617 | struct ethtool_stats *stats, u64 *data); |
| 618 | |||
| 619 | /* Get and Set PHY tunables */ | ||
| 620 | int (*get_tunable)(struct phy_device *dev, | ||
| 621 | struct ethtool_tunable *tuna, void *data); | ||
| 622 | int (*set_tunable)(struct phy_device *dev, | ||
| 623 | struct ethtool_tunable *tuna, | ||
| 624 | const void *data); | ||
| 592 | }; | 625 | }; |
| 593 | #define to_phy_driver(d) container_of(to_mdio_common_driver(d), \ | 626 | #define to_phy_driver(d) container_of(to_mdio_common_driver(d), \ |
| 594 | struct phy_driver, mdiodrv) | 627 | struct phy_driver, mdiodrv) |
| @@ -764,6 +797,7 @@ void phy_detach(struct phy_device *phydev); | |||
| 764 | void phy_start(struct phy_device *phydev); | 797 | void phy_start(struct phy_device *phydev); |
| 765 | void phy_stop(struct phy_device *phydev); | 798 | void phy_stop(struct phy_device *phydev); |
| 766 | int phy_start_aneg(struct phy_device *phydev); | 799 | int phy_start_aneg(struct phy_device *phydev); |
| 800 | int phy_aneg_done(struct phy_device *phydev); | ||
| 767 | 801 | ||
| 768 | int phy_stop_interrupts(struct phy_device *phydev); | 802 | int phy_stop_interrupts(struct phy_device *phydev); |
| 769 | 803 | ||
| @@ -802,7 +836,8 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner); | |||
| 802 | int phy_drivers_register(struct phy_driver *new_driver, int n, | 836 | int phy_drivers_register(struct phy_driver *new_driver, int n, |
| 803 | struct module *owner); | 837 | struct module *owner); |
| 804 | void phy_state_machine(struct work_struct *work); | 838 | void phy_state_machine(struct work_struct *work); |
| 805 | void phy_change(struct work_struct *work); | 839 | void phy_change(struct phy_device *phydev); |
| 840 | void phy_change_work(struct work_struct *work); | ||
| 806 | void phy_mac_interrupt(struct phy_device *phydev, int new_link); | 841 | void phy_mac_interrupt(struct phy_device *phydev, int new_link); |
| 807 | void phy_start_machine(struct phy_device *phydev); | 842 | void phy_start_machine(struct phy_device *phydev); |
| 808 | void phy_stop_machine(struct phy_device *phydev); | 843 | void phy_stop_machine(struct phy_device *phydev); |
| @@ -825,6 +860,10 @@ int phy_register_fixup_for_id(const char *bus_id, | |||
| 825 | int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask, | 860 | int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask, |
| 826 | int (*run)(struct phy_device *)); | 861 | int (*run)(struct phy_device *)); |
| 827 | 862 | ||
| 863 | int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask); | ||
| 864 | int phy_unregister_fixup_for_id(const char *bus_id); | ||
| 865 | int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask); | ||
| 866 | |||
| 828 | int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable); | 867 | int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable); |
| 829 | int phy_get_eee_err(struct phy_device *phydev); | 868 | int phy_get_eee_err(struct phy_device *phydev); |
| 830 | int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data); | 869 | int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data); |
| @@ -836,6 +875,7 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev, | |||
| 836 | struct ethtool_link_ksettings *cmd); | 875 | struct ethtool_link_ksettings *cmd); |
| 837 | int phy_ethtool_set_link_ksettings(struct net_device *ndev, | 876 | int phy_ethtool_set_link_ksettings(struct net_device *ndev, |
| 838 | const struct ethtool_link_ksettings *cmd); | 877 | const struct ethtool_link_ksettings *cmd); |
| 878 | int phy_ethtool_nway_reset(struct net_device *ndev); | ||
| 839 | 879 | ||
| 840 | int __init mdio_bus_init(void); | 880 | int __init mdio_bus_init(void); |
| 841 | void mdio_bus_exit(void); | 881 | void mdio_bus_exit(void); |
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h index 9d18e9f948e9..35c070ea6ea3 100644 --- a/include/linux/phy/phy-qcom-ufs.h +++ b/include/linux/phy/phy-qcom-ufs.h | |||
| @@ -18,22 +18,6 @@ | |||
| 18 | #include "phy.h" | 18 | #include "phy.h" |
| 19 | 19 | ||
| 20 | /** | 20 | /** |
| 21 | * ufs_qcom_phy_enable_ref_clk() - Enable the phy | ||
| 22 | * ref clock. | ||
| 23 | * @phy: reference to a generic phy | ||
| 24 | * | ||
| 25 | * returns 0 for success, and non-zero for error. | ||
| 26 | */ | ||
| 27 | int ufs_qcom_phy_enable_ref_clk(struct phy *phy); | ||
| 28 | |||
| 29 | /** | ||
| 30 | * ufs_qcom_phy_disable_ref_clk() - Disable the phy | ||
| 31 | * ref clock. | ||
| 32 | * @phy: reference to a generic phy. | ||
| 33 | */ | ||
| 34 | void ufs_qcom_phy_disable_ref_clk(struct phy *phy); | ||
| 35 | |||
| 36 | /** | ||
| 37 | * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device | 21 | * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device |
| 38 | * ref clock. | 22 | * ref clock. |
| 39 | * @phy: reference to a generic phy. | 23 | * @phy: reference to a generic phy. |
| @@ -47,8 +31,6 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy); | |||
| 47 | */ | 31 | */ |
| 48 | void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy); | 32 | void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy); |
| 49 | 33 | ||
| 50 | int ufs_qcom_phy_enable_iface_clk(struct phy *phy); | ||
| 51 | void ufs_qcom_phy_disable_iface_clk(struct phy *phy); | ||
| 52 | int ufs_qcom_phy_start_serdes(struct phy *phy); | 34 | int ufs_qcom_phy_start_serdes(struct phy *phy); |
| 53 | int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes); | 35 | int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes); |
| 54 | int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B); | 36 | int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B); |
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h new file mode 100644 index 000000000000..a2daea0a37d2 --- /dev/null +++ b/include/linux/phy_led_triggers.h | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | /* Copyright (C) 2016 National Instruments Corp. | ||
| 2 | * | ||
| 3 | * This program is free software; you can redistribute it and/or modify | ||
| 4 | * it under the terms of the GNU General Public License as published by | ||
| 5 | * the Free Software Foundation; either version 2 of the License, or | ||
| 6 | * (at your option) any later version. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | */ | ||
| 13 | #ifndef __PHY_LED_TRIGGERS | ||
| 14 | #define __PHY_LED_TRIGGERS | ||
| 15 | |||
| 16 | struct phy_device; | ||
| 17 | |||
| 18 | #ifdef CONFIG_LED_TRIGGER_PHY | ||
| 19 | |||
| 20 | #include <linux/leds.h> | ||
| 21 | |||
| 22 | #define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10 | ||
| 23 | #define PHY_MII_BUS_ID_SIZE (20 - 3) | ||
| 24 | |||
| 25 | #define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \ | ||
| 26 | FIELD_SIZEOF(struct mdio_device, addr)+\ | ||
| 27 | PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE) | ||
| 28 | |||
| 29 | struct phy_led_trigger { | ||
| 30 | struct led_trigger trigger; | ||
| 31 | char name[PHY_LINK_LED_TRIGGER_NAME_SIZE]; | ||
| 32 | unsigned int speed; | ||
| 33 | }; | ||
| 34 | |||
| 35 | |||
| 36 | extern int phy_led_triggers_register(struct phy_device *phy); | ||
| 37 | extern void phy_led_triggers_unregister(struct phy_device *phy); | ||
| 38 | extern void phy_led_trigger_change_speed(struct phy_device *phy); | ||
| 39 | |||
| 40 | #else | ||
| 41 | |||
| 42 | static inline int phy_led_triggers_register(struct phy_device *phy) | ||
| 43 | { | ||
| 44 | return 0; | ||
| 45 | } | ||
| 46 | static inline void phy_led_triggers_unregister(struct phy_device *phy) { } | ||
| 47 | static inline void phy_led_trigger_change_speed(struct phy_device *phy) { } | ||
| 48 | |||
| 49 | #endif | ||
| 50 | |||
| 51 | #endif | ||
diff --git a/include/linux/pim.h b/include/linux/pim.h index e1d756f81348..0e81b2778ae0 100644 --- a/include/linux/pim.h +++ b/include/linux/pim.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | #ifndef __LINUX_PIM_H | 1 | #ifndef __LINUX_PIM_H |
| 2 | #define __LINUX_PIM_H | 2 | #define __LINUX_PIM_H |
| 3 | 3 | ||
| 4 | #include <linux/skbuff.h> | ||
| 4 | #include <asm/byteorder.h> | 5 | #include <asm/byteorder.h> |
| 5 | 6 | ||
| 6 | /* Message types - V1 */ | 7 | /* Message types - V1 */ |
| @@ -9,24 +10,86 @@ | |||
| 9 | 10 | ||
| 10 | /* Message types - V2 */ | 11 | /* Message types - V2 */ |
| 11 | #define PIM_VERSION 2 | 12 | #define PIM_VERSION 2 |
| 12 | #define PIM_REGISTER 1 | 13 | |
| 14 | /* RFC7761, sec 4.9: | ||
| 15 | * Type | ||
| 16 | * Types for specific PIM messages. PIM Types are: | ||
| 17 | * | ||
| 18 | * Message Type Destination | ||
| 19 | * --------------------------------------------------------------------- | ||
| 20 | * 0 = Hello Multicast to ALL-PIM-ROUTERS | ||
| 21 | * 1 = Register Unicast to RP | ||
| 22 | * 2 = Register-Stop Unicast to source of Register | ||
| 23 | * packet | ||
| 24 | * 3 = Join/Prune Multicast to ALL-PIM-ROUTERS | ||
| 25 | * 4 = Bootstrap Multicast to ALL-PIM-ROUTERS | ||
| 26 | * 5 = Assert Multicast to ALL-PIM-ROUTERS | ||
| 27 | * 6 = Graft (used in PIM-DM only) Unicast to RPF'(S) | ||
| 28 | * 7 = Graft-Ack (used in PIM-DM only) Unicast to source of Graft | ||
| 29 | * packet | ||
| 30 | * 8 = Candidate-RP-Advertisement Unicast to Domain's BSR | ||
| 31 | */ | ||
| 32 | enum { | ||
| 33 | PIM_TYPE_HELLO, | ||
| 34 | PIM_TYPE_REGISTER, | ||
| 35 | PIM_TYPE_REGISTER_STOP, | ||
| 36 | PIM_TYPE_JOIN_PRUNE, | ||
| 37 | PIM_TYPE_BOOTSTRAP, | ||
| 38 | PIM_TYPE_ASSERT, | ||
| 39 | PIM_TYPE_GRAFT, | ||
| 40 | PIM_TYPE_GRAFT_ACK, | ||
| 41 | PIM_TYPE_CANDIDATE_RP_ADV | ||
| 42 | }; | ||
| 13 | 43 | ||
| 14 | #define PIM_NULL_REGISTER cpu_to_be32(0x40000000) | 44 | #define PIM_NULL_REGISTER cpu_to_be32(0x40000000) |
| 15 | 45 | ||
| 16 | static inline bool ipmr_pimsm_enabled(void) | 46 | /* RFC7761, sec 4.9: |
| 17 | { | 47 | * The PIM header common to all PIM messages is: |
| 18 | return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2); | 48 | * 0 1 2 3 |
| 19 | } | 49 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 |
| 50 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
| 51 | * |PIM Ver| Type | Reserved | Checksum | | ||
| 52 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
| 53 | */ | ||
| 54 | struct pimhdr { | ||
| 55 | __u8 type; | ||
| 56 | __u8 reserved; | ||
| 57 | __be16 csum; | ||
| 58 | }; | ||
| 20 | 59 | ||
| 21 | /* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ | 60 | /* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ |
| 22 | struct pimreghdr | 61 | struct pimreghdr { |
| 23 | { | ||
| 24 | __u8 type; | 62 | __u8 type; |
| 25 | __u8 reserved; | 63 | __u8 reserved; |
| 26 | __be16 csum; | 64 | __be16 csum; |
| 27 | __be32 flags; | 65 | __be32 flags; |
| 28 | }; | 66 | }; |
| 29 | 67 | ||
| 30 | struct sk_buff; | 68 | int pim_rcv_v1(struct sk_buff *skb); |
| 31 | extern int pim_rcv_v1(struct sk_buff *); | 69 | |
| 70 | static inline bool ipmr_pimsm_enabled(void) | ||
| 71 | { | ||
| 72 | return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2); | ||
| 73 | } | ||
| 74 | |||
| 75 | static inline struct pimhdr *pim_hdr(const struct sk_buff *skb) | ||
| 76 | { | ||
| 77 | return (struct pimhdr *)skb_transport_header(skb); | ||
| 78 | } | ||
| 79 | |||
| 80 | static inline u8 pim_hdr_version(const struct pimhdr *pimhdr) | ||
| 81 | { | ||
| 82 | return pimhdr->type >> 4; | ||
| 83 | } | ||
| 84 | |||
| 85 | static inline u8 pim_hdr_type(const struct pimhdr *pimhdr) | ||
| 86 | { | ||
| 87 | return pimhdr->type & 0xf; | ||
| 88 | } | ||
| 89 | |||
| 90 | /* check if the address is 224.0.0.13, RFC7761 sec 4.3.1 */ | ||
| 91 | static inline bool pim_ipv4_all_pim_routers(__be32 addr) | ||
| 92 | { | ||
| 93 | return addr == htonl(0xE000000D); | ||
| 94 | } | ||
| 32 | #endif | 95 | #endif |
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 5f0e11e7354c..e69e415d0d98 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
| 15 | 15 | ||
| 16 | #define DW_DMA_MAX_NR_MASTERS 4 | 16 | #define DW_DMA_MAX_NR_MASTERS 4 |
| 17 | #define DW_DMA_MAX_NR_CHANNELS 8 | ||
| 17 | 18 | ||
| 18 | /** | 19 | /** |
| 19 | * struct dw_dma_slave - Controller-specific information about a slave | 20 | * struct dw_dma_slave - Controller-specific information about a slave |
| @@ -40,19 +41,18 @@ struct dw_dma_slave { | |||
| 40 | * @is_private: The device channels should be marked as private and not for | 41 | * @is_private: The device channels should be marked as private and not for |
| 41 | * by the general purpose DMA channel allocator. | 42 | * by the general purpose DMA channel allocator. |
| 42 | * @is_memcpy: The device channels do support memory-to-memory transfers. | 43 | * @is_memcpy: The device channels do support memory-to-memory transfers. |
| 43 | * @is_nollp: The device channels does not support multi block transfers. | ||
| 44 | * @chan_allocation_order: Allocate channels starting from 0 or 7 | 44 | * @chan_allocation_order: Allocate channels starting from 0 or 7 |
| 45 | * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. | 45 | * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. |
| 46 | * @block_size: Maximum block size supported by the controller | 46 | * @block_size: Maximum block size supported by the controller |
| 47 | * @nr_masters: Number of AHB masters supported by the controller | 47 | * @nr_masters: Number of AHB masters supported by the controller |
| 48 | * @data_width: Maximum data width supported by hardware per AHB master | 48 | * @data_width: Maximum data width supported by hardware per AHB master |
| 49 | * (in bytes, power of 2) | 49 | * (in bytes, power of 2) |
| 50 | * @multi_block: Multi block transfers supported by hardware per channel. | ||
| 50 | */ | 51 | */ |
| 51 | struct dw_dma_platform_data { | 52 | struct dw_dma_platform_data { |
| 52 | unsigned int nr_channels; | 53 | unsigned int nr_channels; |
| 53 | bool is_private; | 54 | bool is_private; |
| 54 | bool is_memcpy; | 55 | bool is_memcpy; |
| 55 | bool is_nollp; | ||
| 56 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ | 56 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ |
| 57 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ | 57 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ |
| 58 | unsigned char chan_allocation_order; | 58 | unsigned char chan_allocation_order; |
| @@ -62,6 +62,7 @@ struct dw_dma_platform_data { | |||
| 62 | unsigned int block_size; | 62 | unsigned int block_size; |
| 63 | unsigned char nr_masters; | 63 | unsigned char nr_masters; |
| 64 | unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; | 64 | unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; |
| 65 | unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS]; | ||
| 65 | }; | 66 | }; |
| 66 | 67 | ||
| 67 | #endif /* _PLATFORM_DATA_DMA_DW_H */ | 68 | #endif /* _PLATFORM_DATA_DMA_DW_H */ |
diff --git a/include/linux/platform_data/drv260x-pdata.h b/include/linux/platform_data/drv260x-pdata.h deleted file mode 100644 index 0a03b0944411..000000000000 --- a/include/linux/platform_data/drv260x-pdata.h +++ /dev/null | |||
| @@ -1,28 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Platform data for DRV260X haptics driver family | ||
| 3 | * | ||
| 4 | * Author: Dan Murphy <dmurphy@ti.com> | ||
| 5 | * | ||
| 6 | * Copyright: (C) 2014 Texas Instruments, Inc. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef _LINUX_DRV260X_PDATA_H | ||
| 19 | #define _LINUX_DRV260X_PDATA_H | ||
| 20 | |||
| 21 | struct drv260x_platform_data { | ||
| 22 | u32 library_selection; | ||
| 23 | u32 mode; | ||
| 24 | u32 vib_rated_voltage; | ||
| 25 | u32 vib_overdrive_voltage; | ||
| 26 | }; | ||
| 27 | |||
| 28 | #endif | ||
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h index 21b15f6fee25..7815d50c26ff 100644 --- a/include/linux/platform_data/macb.h +++ b/include/linux/platform_data/macb.h | |||
| @@ -8,6 +8,8 @@ | |||
| 8 | #ifndef __MACB_PDATA_H__ | 8 | #ifndef __MACB_PDATA_H__ |
| 9 | #define __MACB_PDATA_H__ | 9 | #define __MACB_PDATA_H__ |
| 10 | 10 | ||
| 11 | #include <linux/clk.h> | ||
| 12 | |||
| 11 | /** | 13 | /** |
| 12 | * struct macb_platform_data - platform data for MACB Ethernet | 14 | * struct macb_platform_data - platform data for MACB Ethernet |
| 13 | * @phy_mask: phy mask passed when register the MDIO bus | 15 | * @phy_mask: phy mask passed when register the MDIO bus |
| @@ -15,12 +17,16 @@ | |||
| 15 | * @phy_irq_pin: PHY IRQ | 17 | * @phy_irq_pin: PHY IRQ |
| 16 | * @is_rmii: using RMII interface? | 18 | * @is_rmii: using RMII interface? |
| 17 | * @rev_eth_addr: reverse Ethernet address byte order | 19 | * @rev_eth_addr: reverse Ethernet address byte order |
| 20 | * @pclk: platform clock | ||
| 21 | * @hclk: AHB clock | ||
| 18 | */ | 22 | */ |
| 19 | struct macb_platform_data { | 23 | struct macb_platform_data { |
| 20 | u32 phy_mask; | 24 | u32 phy_mask; |
| 21 | int phy_irq_pin; | 25 | int phy_irq_pin; |
| 22 | u8 is_rmii; | 26 | u8 is_rmii; |
| 23 | u8 rev_eth_addr; | 27 | u8 rev_eth_addr; |
| 28 | struct clk *pclk; | ||
| 29 | struct clk *hclk; | ||
| 24 | }; | 30 | }; |
| 25 | 31 | ||
| 26 | #endif /* __MACB_PDATA_H__ */ | 32 | #endif /* __MACB_PDATA_H__ */ |
diff --git a/include/linux/platform_data/mlxcpld-hotplug.h b/include/linux/platform_data/mlxcpld-hotplug.h new file mode 100644 index 000000000000..e4cfcffaa6f4 --- /dev/null +++ b/include/linux/platform_data/mlxcpld-hotplug.h | |||
| @@ -0,0 +1,99 @@ | |||
| 1 | /* | ||
| 2 | * include/linux/platform_data/mlxcpld-hotplug.h | ||
| 3 | * Copyright (c) 2016 Mellanox Technologies. All rights reserved. | ||
| 4 | * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com> | ||
| 5 | * | ||
| 6 | * Redistribution and use in source and binary forms, with or without | ||
| 7 | * modification, are permitted provided that the following conditions are met: | ||
| 8 | * | ||
| 9 | * 1. Redistributions of source code must retain the above copyright | ||
| 10 | * notice, this list of conditions and the following disclaimer. | ||
| 11 | * 2. Redistributions in binary form must reproduce the above copyright | ||
| 12 | * notice, this list of conditions and the following disclaimer in the | ||
| 13 | * documentation and/or other materials provided with the distribution. | ||
| 14 | * 3. Neither the names of the copyright holders nor the names of its | ||
| 15 | * contributors may be used to endorse or promote products derived from | ||
| 16 | * this software without specific prior written permission. | ||
| 17 | * | ||
| 18 | * Alternatively, this software may be distributed under the terms of the | ||
| 19 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
| 20 | * Software Foundation. | ||
| 21 | * | ||
| 22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 23 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 25 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
| 26 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
| 27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
| 28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
| 29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
| 30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
| 31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 32 | * POSSIBILITY OF SUCH DAMAGE. | ||
| 33 | */ | ||
| 34 | |||
| 35 | #ifndef __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H | ||
| 36 | #define __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H | ||
| 37 | |||
| 38 | /** | ||
| 39 | * struct mlxcpld_hotplug_device - I2C device data: | ||
| 40 | * @adapter: I2C device adapter; | ||
| 41 | * @client: I2C device client; | ||
| 42 | * @brdinfo: device board information; | ||
| 43 | * @bus: I2C bus, where device is attached; | ||
| 44 | * | ||
| 45 | * Structure represents I2C hotplug device static data (board topology) and | ||
| 46 | * dynamic data (related kernel objects handles). | ||
| 47 | */ | ||
| 48 | struct mlxcpld_hotplug_device { | ||
| 49 | struct i2c_adapter *adapter; | ||
| 50 | struct i2c_client *client; | ||
| 51 | struct i2c_board_info brdinfo; | ||
| 52 | u16 bus; | ||
| 53 | }; | ||
| 54 | |||
| 55 | /** | ||
| 56 | * struct mlxcpld_hotplug_platform_data - device platform data: | ||
| 57 | * @top_aggr_offset: offset of top aggregation interrupt register; | ||
| 58 | * @top_aggr_mask: top aggregation interrupt common mask; | ||
| 59 | * @top_aggr_psu_mask: top aggregation interrupt PSU mask; | ||
| 60 | * @psu_reg_offset: offset of PSU interrupt register; | ||
| 61 | * @psu_mask: PSU interrupt mask; | ||
| 62 | * @psu_count: number of equipped replaceable PSUs; | ||
| 63 | * @psu: pointer to PSU devices data array; | ||
| 64 | * @top_aggr_pwr_mask: top aggregation interrupt power mask; | ||
| 65 | * @pwr_reg_offset: offset of power interrupt register | ||
| 66 | * @pwr_mask: power interrupt mask; | ||
| 67 | * @pwr_count: number of power sources; | ||
| 68 | * @pwr: pointer to power devices data array; | ||
| 69 | * @top_aggr_fan_mask: top aggregation interrupt FAN mask; | ||
| 70 | * @fan_reg_offset: offset of FAN interrupt register; | ||
| 71 | * @fan_mask: FAN interrupt mask; | ||
| 72 | * @fan_count: number of equipped replaceable FANs; | ||
| 73 | * @fan: pointer to FAN devices data array; | ||
| 74 | * | ||
| 75 | * Structure represents board platform data, related to system hotplug events, | ||
| 76 | * like FAN, PSU, power cable insertion and removing. This data provides the | ||
| 77 | * number of hot-pluggable devices and hardware description for event handling. | ||
| 78 | */ | ||
| 79 | struct mlxcpld_hotplug_platform_data { | ||
| 80 | u16 top_aggr_offset; | ||
| 81 | u8 top_aggr_mask; | ||
| 82 | u8 top_aggr_psu_mask; | ||
| 83 | u16 psu_reg_offset; | ||
| 84 | u8 psu_mask; | ||
| 85 | u8 psu_count; | ||
| 86 | struct mlxcpld_hotplug_device *psu; | ||
| 87 | u8 top_aggr_pwr_mask; | ||
| 88 | u16 pwr_reg_offset; | ||
| 89 | u8 pwr_mask; | ||
| 90 | u8 pwr_count; | ||
| 91 | struct mlxcpld_hotplug_device *pwr; | ||
| 92 | u8 top_aggr_fan_mask; | ||
| 93 | u16 fan_reg_offset; | ||
| 94 | u8 fan_mask; | ||
| 95 | u8 fan_count; | ||
| 96 | struct mlxcpld_hotplug_device *fan; | ||
| 97 | }; | ||
| 98 | |||
| 99 | #endif /* __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H */ | ||
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h index c55e42ee57fa..f01659026b26 100644 --- a/include/linux/platform_data/mtd-nand-s3c2410.h +++ b/include/linux/platform_data/mtd-nand-s3c2410.h | |||
| @@ -12,9 +12,10 @@ | |||
| 12 | #ifndef __MTD_NAND_S3C2410_H | 12 | #ifndef __MTD_NAND_S3C2410_H |
| 13 | #define __MTD_NAND_S3C2410_H | 13 | #define __MTD_NAND_S3C2410_H |
| 14 | 14 | ||
| 15 | #include <linux/mtd/nand.h> | ||
| 16 | |||
| 15 | /** | 17 | /** |
| 16 | * struct s3c2410_nand_set - define a set of one or more nand chips | 18 | * struct s3c2410_nand_set - define a set of one or more nand chips |
| 17 | * @disable_ecc: Entirely disable ECC - Dangerous | ||
| 18 | * @flash_bbt: Openmoko u-boot can create a Bad Block Table | 19 | * @flash_bbt: Openmoko u-boot can create a Bad Block Table |
| 19 | * Setting this flag will allow the kernel to | 20 | * Setting this flag will allow the kernel to |
| 20 | * look for it at boot time and also skip the NAND | 21 | * look for it at boot time and also skip the NAND |
| @@ -31,7 +32,6 @@ | |||
| 31 | * a warning at boot time. | 32 | * a warning at boot time. |
| 32 | */ | 33 | */ |
| 33 | struct s3c2410_nand_set { | 34 | struct s3c2410_nand_set { |
| 34 | unsigned int disable_ecc:1; | ||
| 35 | unsigned int flash_bbt:1; | 35 | unsigned int flash_bbt:1; |
| 36 | 36 | ||
| 37 | unsigned int options; | 37 | unsigned int options; |
| @@ -40,6 +40,7 @@ struct s3c2410_nand_set { | |||
| 40 | char *name; | 40 | char *name; |
| 41 | int *nr_map; | 41 | int *nr_map; |
| 42 | struct mtd_partition *partitions; | 42 | struct mtd_partition *partitions; |
| 43 | struct device_node *of_node; | ||
| 43 | }; | 44 | }; |
| 44 | 45 | ||
| 45 | struct s3c2410_platform_nand { | 46 | struct s3c2410_platform_nand { |
| @@ -51,6 +52,8 @@ struct s3c2410_platform_nand { | |||
| 51 | 52 | ||
| 52 | unsigned int ignore_unset_ecc:1; | 53 | unsigned int ignore_unset_ecc:1; |
| 53 | 54 | ||
| 55 | nand_ecc_modes_t ecc_mode; | ||
| 56 | |||
| 54 | int nr_sets; | 57 | int nr_sets; |
| 55 | struct s3c2410_nand_set *sets; | 58 | struct s3c2410_nand_set *sets; |
| 56 | 59 | ||
diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h index 9bb63ac13f04..171a271c2cbd 100644 --- a/include/linux/platform_data/spi-ep93xx.h +++ b/include/linux/platform_data/spi-ep93xx.h | |||
| @@ -5,25 +5,14 @@ struct spi_device; | |||
| 5 | 5 | ||
| 6 | /** | 6 | /** |
| 7 | * struct ep93xx_spi_info - EP93xx specific SPI descriptor | 7 | * struct ep93xx_spi_info - EP93xx specific SPI descriptor |
| 8 | * @num_chipselect: number of chip selects on this board, must be | 8 | * @chipselect: array of gpio numbers to use as chip selects |
| 9 | * at least one | 9 | * @num_chipselect: ARRAY_SIZE(chipselect) |
| 10 | * @use_dma: use DMA for the transfers | 10 | * @use_dma: use DMA for the transfers |
| 11 | */ | 11 | */ |
| 12 | struct ep93xx_spi_info { | 12 | struct ep93xx_spi_info { |
| 13 | int *chipselect; | ||
| 13 | int num_chipselect; | 14 | int num_chipselect; |
| 14 | bool use_dma; | 15 | bool use_dma; |
| 15 | }; | 16 | }; |
| 16 | 17 | ||
| 17 | /** | ||
| 18 | * struct ep93xx_spi_chip_ops - operation callbacks for SPI slave device | ||
| 19 | * @setup: setup the chip select mechanism | ||
| 20 | * @cleanup: cleanup the chip select mechanism | ||
| 21 | * @cs_control: control the device chip select | ||
| 22 | */ | ||
| 23 | struct ep93xx_spi_chip_ops { | ||
| 24 | int (*setup)(struct spi_device *spi); | ||
| 25 | void (*cleanup)(struct spi_device *spi); | ||
| 26 | void (*cs_control)(struct spi_device *spi, int value); | ||
| 27 | }; | ||
| 28 | |||
| 29 | #endif /* __ASM_MACH_EP93XX_SPI_H */ | 18 | #endif /* __ASM_MACH_EP93XX_SPI_H */ |
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h index 5c1e21c87270..da79774078a7 100644 --- a/include/linux/platform_data/spi-s3c64xx.h +++ b/include/linux/platform_data/spi-s3c64xx.h | |||
| @@ -40,9 +40,6 @@ struct s3c64xx_spi_info { | |||
| 40 | int num_cs; | 40 | int num_cs; |
| 41 | bool no_cs; | 41 | bool no_cs; |
| 42 | int (*cfg_gpio)(void); | 42 | int (*cfg_gpio)(void); |
| 43 | dma_filter_fn filter; | ||
| 44 | void *dma_tx; | ||
| 45 | void *dma_rx; | ||
| 46 | }; | 43 | }; |
| 47 | 44 | ||
| 48 | /** | 45 | /** |
diff --git a/include/linux/platform_data/usb-davinci.h b/include/linux/platform_data/usb-davinci.h index e0bc4abe69c2..0926e99f2e8f 100644 --- a/include/linux/platform_data/usb-davinci.h +++ b/include/linux/platform_data/usb-davinci.h | |||
| @@ -11,29 +11,6 @@ | |||
| 11 | #ifndef __ASM_ARCH_USB_H | 11 | #ifndef __ASM_ARCH_USB_H |
| 12 | #define __ASM_ARCH_USB_H | 12 | #define __ASM_ARCH_USB_H |
| 13 | 13 | ||
| 14 | /* DA8xx CFGCHIP2 (USB 2.0 PHY Control) register bits */ | ||
| 15 | #define CFGCHIP2_PHYCLKGD (1 << 17) | ||
| 16 | #define CFGCHIP2_VBUSSENSE (1 << 16) | ||
| 17 | #define CFGCHIP2_RESET (1 << 15) | ||
| 18 | #define CFGCHIP2_OTGMODE (3 << 13) | ||
| 19 | #define CFGCHIP2_NO_OVERRIDE (0 << 13) | ||
| 20 | #define CFGCHIP2_FORCE_HOST (1 << 13) | ||
| 21 | #define CFGCHIP2_FORCE_DEVICE (2 << 13) | ||
| 22 | #define CFGCHIP2_FORCE_HOST_VBUS_LOW (3 << 13) | ||
| 23 | #define CFGCHIP2_USB1PHYCLKMUX (1 << 12) | ||
| 24 | #define CFGCHIP2_USB2PHYCLKMUX (1 << 11) | ||
| 25 | #define CFGCHIP2_PHYPWRDN (1 << 10) | ||
| 26 | #define CFGCHIP2_OTGPWRDN (1 << 9) | ||
| 27 | #define CFGCHIP2_DATPOL (1 << 8) | ||
| 28 | #define CFGCHIP2_USB1SUSPENDM (1 << 7) | ||
| 29 | #define CFGCHIP2_PHY_PLLON (1 << 6) /* override PLL suspend */ | ||
| 30 | #define CFGCHIP2_SESENDEN (1 << 5) /* Vsess_end comparator */ | ||
| 31 | #define CFGCHIP2_VBDTCTEN (1 << 4) /* Vbus comparator */ | ||
| 32 | #define CFGCHIP2_REFFREQ (0xf << 0) | ||
| 33 | #define CFGCHIP2_REFFREQ_12MHZ (1 << 0) | ||
| 34 | #define CFGCHIP2_REFFREQ_24MHZ (2 << 0) | ||
| 35 | #define CFGCHIP2_REFFREQ_48MHZ (3 << 0) | ||
| 36 | |||
| 37 | struct da8xx_ohci_root_hub; | 14 | struct da8xx_ohci_root_hub; |
| 38 | 15 | ||
| 39 | typedef void (*da8xx_ocic_handler_t)(struct da8xx_ohci_root_hub *hub, | 16 | typedef void (*da8xx_ocic_handler_t)(struct da8xx_ohci_root_hub *hub, |
diff --git a/include/linux/pm-trace.h b/include/linux/pm-trace.h index ecbde7a5548e..7b78793f07d7 100644 --- a/include/linux/pm-trace.h +++ b/include/linux/pm-trace.h | |||
| @@ -1,11 +1,17 @@ | |||
| 1 | #ifndef PM_TRACE_H | 1 | #ifndef PM_TRACE_H |
| 2 | #define PM_TRACE_H | 2 | #define PM_TRACE_H |
| 3 | 3 | ||
| 4 | #include <linux/types.h> | ||
| 4 | #ifdef CONFIG_PM_TRACE | 5 | #ifdef CONFIG_PM_TRACE |
| 5 | #include <asm/pm-trace.h> | 6 | #include <asm/pm-trace.h> |
| 6 | #include <linux/types.h> | ||
| 7 | 7 | ||
| 8 | extern int pm_trace_enabled; | 8 | extern int pm_trace_enabled; |
| 9 | extern bool pm_trace_rtc_abused; | ||
| 10 | |||
| 11 | static inline bool pm_trace_rtc_valid(void) | ||
| 12 | { | ||
| 13 | return !pm_trace_rtc_abused; | ||
| 14 | } | ||
| 9 | 15 | ||
| 10 | static inline int pm_trace_is_enabled(void) | 16 | static inline int pm_trace_is_enabled(void) |
| 11 | { | 17 | { |
| @@ -24,6 +30,7 @@ extern int show_trace_dev_match(char *buf, size_t size); | |||
| 24 | 30 | ||
| 25 | #else | 31 | #else |
| 26 | 32 | ||
| 33 | static inline bool pm_trace_rtc_valid(void) { return true; } | ||
| 27 | static inline int pm_trace_is_enabled(void) { return 0; } | 34 | static inline int pm_trace_is_enabled(void) { return 0; } |
| 28 | 35 | ||
| 29 | #define TRACE_DEVICE(dev) do { } while (0) | 36 | #define TRACE_DEVICE(dev) do { } while (0) |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 06eb353182ab..f926af41e122 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
| @@ -258,7 +258,7 @@ typedef struct pm_message { | |||
| 258 | * example, if it detects that a child was unplugged while the system was | 258 | * example, if it detects that a child was unplugged while the system was |
| 259 | * asleep). | 259 | * asleep). |
| 260 | * | 260 | * |
| 261 | * Refer to Documentation/power/devices.txt for more information about the role | 261 | * Refer to Documentation/power/admin-guide/devices.rst for more information about the role |
| 262 | * of the above callbacks in the system suspend process. | 262 | * of the above callbacks in the system suspend process. |
| 263 | * | 263 | * |
| 264 | * There also are callbacks related to runtime power management of devices. | 264 | * There also are callbacks related to runtime power management of devices. |
| @@ -559,6 +559,7 @@ struct dev_pm_info { | |||
| 559 | pm_message_t power_state; | 559 | pm_message_t power_state; |
| 560 | unsigned int can_wakeup:1; | 560 | unsigned int can_wakeup:1; |
| 561 | unsigned int async_suspend:1; | 561 | unsigned int async_suspend:1; |
| 562 | bool in_dpm_list:1; /* Owned by the PM core */ | ||
| 562 | bool is_prepared:1; /* Owned by the PM core */ | 563 | bool is_prepared:1; /* Owned by the PM core */ |
| 563 | bool is_suspended:1; /* Ditto */ | 564 | bool is_suspended:1; /* Ditto */ |
| 564 | bool is_noirq_suspended:1; | 565 | bool is_noirq_suspended:1; |
| @@ -596,6 +597,7 @@ struct dev_pm_info { | |||
| 596 | unsigned int use_autosuspend:1; | 597 | unsigned int use_autosuspend:1; |
| 597 | unsigned int timer_autosuspends:1; | 598 | unsigned int timer_autosuspends:1; |
| 598 | unsigned int memalloc_noio:1; | 599 | unsigned int memalloc_noio:1; |
| 600 | unsigned int links_count; | ||
| 599 | enum rpm_request request; | 601 | enum rpm_request request; |
| 600 | enum rpm_status runtime_status; | 602 | enum rpm_status runtime_status; |
| 601 | int runtime_error; | 603 | int runtime_error; |
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index a09fe5c009c8..81ece61075df 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h | |||
| @@ -15,11 +15,11 @@ | |||
| 15 | #include <linux/err.h> | 15 | #include <linux/err.h> |
| 16 | #include <linux/of.h> | 16 | #include <linux/of.h> |
| 17 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
| 18 | #include <linux/spinlock.h> | ||
| 18 | 19 | ||
| 19 | /* Defines used for the flags field in the struct generic_pm_domain */ | 20 | /* Defines used for the flags field in the struct generic_pm_domain */ |
| 20 | #define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ | 21 | #define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ |
| 21 | 22 | #define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */ | |
| 22 | #define GENPD_MAX_NUM_STATES 8 /* Number of possible low power states */ | ||
| 23 | 23 | ||
| 24 | enum gpd_status { | 24 | enum gpd_status { |
| 25 | GPD_STATE_ACTIVE = 0, /* PM domain is active */ | 25 | GPD_STATE_ACTIVE = 0, /* PM domain is active */ |
| @@ -40,15 +40,18 @@ struct gpd_dev_ops { | |||
| 40 | struct genpd_power_state { | 40 | struct genpd_power_state { |
| 41 | s64 power_off_latency_ns; | 41 | s64 power_off_latency_ns; |
| 42 | s64 power_on_latency_ns; | 42 | s64 power_on_latency_ns; |
| 43 | s64 residency_ns; | ||
| 44 | struct fwnode_handle *fwnode; | ||
| 43 | }; | 45 | }; |
| 44 | 46 | ||
| 47 | struct genpd_lock_ops; | ||
| 48 | |||
| 45 | struct generic_pm_domain { | 49 | struct generic_pm_domain { |
| 46 | struct dev_pm_domain domain; /* PM domain operations */ | 50 | struct dev_pm_domain domain; /* PM domain operations */ |
| 47 | struct list_head gpd_list_node; /* Node in the global PM domains list */ | 51 | struct list_head gpd_list_node; /* Node in the global PM domains list */ |
| 48 | struct list_head master_links; /* Links with PM domain as a master */ | 52 | struct list_head master_links; /* Links with PM domain as a master */ |
| 49 | struct list_head slave_links; /* Links with PM domain as a slave */ | 53 | struct list_head slave_links; /* Links with PM domain as a slave */ |
| 50 | struct list_head dev_list; /* List of devices */ | 54 | struct list_head dev_list; /* List of devices */ |
| 51 | struct mutex lock; | ||
| 52 | struct dev_power_governor *gov; | 55 | struct dev_power_governor *gov; |
| 53 | struct work_struct power_off_work; | 56 | struct work_struct power_off_work; |
| 54 | struct fwnode_handle *provider; /* Identity of the domain provider */ | 57 | struct fwnode_handle *provider; /* Identity of the domain provider */ |
| @@ -70,9 +73,18 @@ struct generic_pm_domain { | |||
| 70 | void (*detach_dev)(struct generic_pm_domain *domain, | 73 | void (*detach_dev)(struct generic_pm_domain *domain, |
| 71 | struct device *dev); | 74 | struct device *dev); |
| 72 | unsigned int flags; /* Bit field of configs for genpd */ | 75 | unsigned int flags; /* Bit field of configs for genpd */ |
| 73 | struct genpd_power_state states[GENPD_MAX_NUM_STATES]; | 76 | struct genpd_power_state *states; |
| 74 | unsigned int state_count; /* number of states */ | 77 | unsigned int state_count; /* number of states */ |
| 75 | unsigned int state_idx; /* state that genpd will go to when off */ | 78 | unsigned int state_idx; /* state that genpd will go to when off */ |
| 79 | void *free; /* Free the state that was allocated for default */ | ||
| 80 | const struct genpd_lock_ops *lock_ops; | ||
| 81 | union { | ||
| 82 | struct mutex mlock; | ||
| 83 | struct { | ||
| 84 | spinlock_t slock; | ||
| 85 | unsigned long lock_flags; | ||
| 86 | }; | ||
| 87 | }; | ||
| 76 | 88 | ||
| 77 | }; | 89 | }; |
| 78 | 90 | ||
| @@ -205,6 +217,8 @@ extern int of_genpd_add_device(struct of_phandle_args *args, | |||
| 205 | extern int of_genpd_add_subdomain(struct of_phandle_args *parent, | 217 | extern int of_genpd_add_subdomain(struct of_phandle_args *parent, |
| 206 | struct of_phandle_args *new_subdomain); | 218 | struct of_phandle_args *new_subdomain); |
| 207 | extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); | 219 | extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); |
| 220 | extern int of_genpd_parse_idle_states(struct device_node *dn, | ||
| 221 | struct genpd_power_state **states, int *n); | ||
| 208 | 222 | ||
| 209 | int genpd_dev_pm_attach(struct device *dev); | 223 | int genpd_dev_pm_attach(struct device *dev); |
| 210 | #else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ | 224 | #else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ |
| @@ -234,6 +248,12 @@ static inline int of_genpd_add_subdomain(struct of_phandle_args *parent, | |||
| 234 | return -ENODEV; | 248 | return -ENODEV; |
| 235 | } | 249 | } |
| 236 | 250 | ||
| 251 | static inline int of_genpd_parse_idle_states(struct device_node *dn, | ||
| 252 | struct genpd_power_state **states, int *n) | ||
| 253 | { | ||
| 254 | return -ENODEV; | ||
| 255 | } | ||
| 256 | |||
| 237 | static inline int genpd_dev_pm_attach(struct device *dev) | 257 | static inline int genpd_dev_pm_attach(struct device *dev) |
| 238 | { | 258 | { |
| 239 | return -ENODEV; | 259 | return -ENODEV; |
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index bca26157f5b6..0edd88f93904 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h | |||
| @@ -17,13 +17,65 @@ | |||
| 17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
| 18 | #include <linux/notifier.h> | 18 | #include <linux/notifier.h> |
| 19 | 19 | ||
| 20 | struct clk; | ||
| 21 | struct regulator; | ||
| 20 | struct dev_pm_opp; | 22 | struct dev_pm_opp; |
| 21 | struct device; | 23 | struct device; |
| 24 | struct opp_table; | ||
| 22 | 25 | ||
| 23 | enum dev_pm_opp_event { | 26 | enum dev_pm_opp_event { |
| 24 | OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, | 27 | OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, |
| 25 | }; | 28 | }; |
| 26 | 29 | ||
| 30 | /** | ||
| 31 | * struct dev_pm_opp_supply - Power supply voltage/current values | ||
| 32 | * @u_volt: Target voltage in microvolts corresponding to this OPP | ||
| 33 | * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP | ||
| 34 | * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP | ||
| 35 | * @u_amp: Maximum current drawn by the device in microamperes | ||
| 36 | * | ||
| 37 | * This structure stores the voltage/current values for a single power supply. | ||
| 38 | */ | ||
| 39 | struct dev_pm_opp_supply { | ||
| 40 | unsigned long u_volt; | ||
| 41 | unsigned long u_volt_min; | ||
| 42 | unsigned long u_volt_max; | ||
| 43 | unsigned long u_amp; | ||
| 44 | }; | ||
| 45 | |||
| 46 | /** | ||
| 47 | * struct dev_pm_opp_info - OPP freq/voltage/current values | ||
| 48 | * @rate: Target clk rate in hz | ||
| 49 | * @supplies: Array of voltage/current values for all power supplies | ||
| 50 | * | ||
| 51 | * This structure stores the freq/voltage/current values for a single OPP. | ||
| 52 | */ | ||
| 53 | struct dev_pm_opp_info { | ||
| 54 | unsigned long rate; | ||
| 55 | struct dev_pm_opp_supply *supplies; | ||
| 56 | }; | ||
| 57 | |||
| 58 | /** | ||
| 59 | * struct dev_pm_set_opp_data - Set OPP data | ||
| 60 | * @old_opp: Old OPP info | ||
| 61 | * @new_opp: New OPP info | ||
| 62 | * @regulators: Array of regulator pointers | ||
| 63 | * @regulator_count: Number of regulators | ||
| 64 | * @clk: Pointer to clk | ||
| 65 | * @dev: Pointer to the struct device | ||
| 66 | * | ||
| 67 | * This structure contains all information required for setting an OPP. | ||
| 68 | */ | ||
| 69 | struct dev_pm_set_opp_data { | ||
| 70 | struct dev_pm_opp_info old_opp; | ||
| 71 | struct dev_pm_opp_info new_opp; | ||
| 72 | |||
| 73 | struct regulator **regulators; | ||
| 74 | unsigned int regulator_count; | ||
| 75 | struct clk *clk; | ||
| 76 | struct device *dev; | ||
| 77 | }; | ||
| 78 | |||
| 27 | #if defined(CONFIG_PM_OPP) | 79 | #if defined(CONFIG_PM_OPP) |
| 28 | 80 | ||
| 29 | unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); | 81 | unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); |
| @@ -62,8 +114,10 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, | |||
| 62 | void dev_pm_opp_put_supported_hw(struct device *dev); | 114 | void dev_pm_opp_put_supported_hw(struct device *dev); |
| 63 | int dev_pm_opp_set_prop_name(struct device *dev, const char *name); | 115 | int dev_pm_opp_set_prop_name(struct device *dev, const char *name); |
| 64 | void dev_pm_opp_put_prop_name(struct device *dev); | 116 | void dev_pm_opp_put_prop_name(struct device *dev); |
| 65 | int dev_pm_opp_set_regulator(struct device *dev, const char *name); | 117 | struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); |
| 66 | void dev_pm_opp_put_regulator(struct device *dev); | 118 | void dev_pm_opp_put_regulators(struct opp_table *opp_table); |
| 119 | int dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); | ||
| 120 | void dev_pm_opp_register_put_opp_helper(struct device *dev); | ||
| 67 | int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); | 121 | int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); |
| 68 | int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); | 122 | int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); |
| 69 | int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); | 123 | int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); |
| @@ -163,6 +217,14 @@ static inline int dev_pm_opp_set_supported_hw(struct device *dev, | |||
| 163 | 217 | ||
| 164 | static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} | 218 | static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} |
| 165 | 219 | ||
| 220 | static inline int dev_pm_opp_register_set_opp_helper(struct device *dev, | ||
| 221 | int (*set_opp)(struct dev_pm_set_opp_data *data)) | ||
| 222 | { | ||
| 223 | return -ENOTSUPP; | ||
| 224 | } | ||
| 225 | |||
| 226 | static inline void dev_pm_opp_register_put_opp_helper(struct device *dev) {} | ||
| 227 | |||
| 166 | static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) | 228 | static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) |
| 167 | { | 229 | { |
| 168 | return -ENOTSUPP; | 230 | return -ENOTSUPP; |
| @@ -170,12 +232,12 @@ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) | |||
| 170 | 232 | ||
| 171 | static inline void dev_pm_opp_put_prop_name(struct device *dev) {} | 233 | static inline void dev_pm_opp_put_prop_name(struct device *dev) {} |
| 172 | 234 | ||
| 173 | static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name) | 235 | static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count) |
| 174 | { | 236 | { |
| 175 | return -ENOTSUPP; | 237 | return ERR_PTR(-ENOTSUPP); |
| 176 | } | 238 | } |
| 177 | 239 | ||
| 178 | static inline void dev_pm_opp_put_regulator(struct device *dev) {} | 240 | static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {} |
| 179 | 241 | ||
| 180 | static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) | 242 | static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) |
| 181 | { | 243 | { |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 2e14d2667b6c..ca4823e675e2 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
| @@ -55,18 +55,17 @@ extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); | |||
| 55 | extern void pm_runtime_update_max_time_suspended(struct device *dev, | 55 | extern void pm_runtime_update_max_time_suspended(struct device *dev, |
| 56 | s64 delta_ns); | 56 | s64 delta_ns); |
| 57 | extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); | 57 | extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); |
| 58 | extern void pm_runtime_clean_up_links(struct device *dev); | ||
| 59 | extern void pm_runtime_get_suppliers(struct device *dev); | ||
| 60 | extern void pm_runtime_put_suppliers(struct device *dev); | ||
| 61 | extern void pm_runtime_new_link(struct device *dev); | ||
| 62 | extern void pm_runtime_drop_link(struct device *dev); | ||
| 58 | 63 | ||
| 59 | static inline void pm_suspend_ignore_children(struct device *dev, bool enable) | 64 | static inline void pm_suspend_ignore_children(struct device *dev, bool enable) |
| 60 | { | 65 | { |
| 61 | dev->power.ignore_children = enable; | 66 | dev->power.ignore_children = enable; |
| 62 | } | 67 | } |
| 63 | 68 | ||
| 64 | static inline bool pm_children_suspended(struct device *dev) | ||
| 65 | { | ||
| 66 | return dev->power.ignore_children | ||
| 67 | || !atomic_read(&dev->power.child_count); | ||
| 68 | } | ||
| 69 | |||
| 70 | static inline void pm_runtime_get_noresume(struct device *dev) | 69 | static inline void pm_runtime_get_noresume(struct device *dev) |
| 71 | { | 70 | { |
| 72 | atomic_inc(&dev->power.usage_count); | 71 | atomic_inc(&dev->power.usage_count); |
| @@ -162,7 +161,6 @@ static inline void pm_runtime_allow(struct device *dev) {} | |||
| 162 | static inline void pm_runtime_forbid(struct device *dev) {} | 161 | static inline void pm_runtime_forbid(struct device *dev) {} |
| 163 | 162 | ||
| 164 | static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} | 163 | static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} |
| 165 | static inline bool pm_children_suspended(struct device *dev) { return false; } | ||
| 166 | static inline void pm_runtime_get_noresume(struct device *dev) {} | 164 | static inline void pm_runtime_get_noresume(struct device *dev) {} |
| 167 | static inline void pm_runtime_put_noidle(struct device *dev) {} | 165 | static inline void pm_runtime_put_noidle(struct device *dev) {} |
| 168 | static inline bool device_run_wake(struct device *dev) { return false; } | 166 | static inline bool device_run_wake(struct device *dev) { return false; } |
| @@ -186,6 +184,11 @@ static inline unsigned long pm_runtime_autosuspend_expiration( | |||
| 186 | struct device *dev) { return 0; } | 184 | struct device *dev) { return 0; } |
| 187 | static inline void pm_runtime_set_memalloc_noio(struct device *dev, | 185 | static inline void pm_runtime_set_memalloc_noio(struct device *dev, |
| 188 | bool enable){} | 186 | bool enable){} |
| 187 | static inline void pm_runtime_clean_up_links(struct device *dev) {} | ||
| 188 | static inline void pm_runtime_get_suppliers(struct device *dev) {} | ||
| 189 | static inline void pm_runtime_put_suppliers(struct device *dev) {} | ||
| 190 | static inline void pm_runtime_new_link(struct device *dev) {} | ||
| 191 | static inline void pm_runtime_drop_link(struct device *dev) {} | ||
| 189 | 192 | ||
| 190 | #endif /* !CONFIG_PM */ | 193 | #endif /* !CONFIG_PM */ |
| 191 | 194 | ||
| @@ -265,9 +268,9 @@ static inline int pm_runtime_set_active(struct device *dev) | |||
| 265 | return __pm_runtime_set_status(dev, RPM_ACTIVE); | 268 | return __pm_runtime_set_status(dev, RPM_ACTIVE); |
| 266 | } | 269 | } |
| 267 | 270 | ||
| 268 | static inline void pm_runtime_set_suspended(struct device *dev) | 271 | static inline int pm_runtime_set_suspended(struct device *dev) |
| 269 | { | 272 | { |
| 270 | __pm_runtime_set_status(dev, RPM_SUSPENDED); | 273 | return __pm_runtime_set_status(dev, RPM_SUSPENDED); |
| 271 | } | 274 | } |
| 272 | 275 | ||
| 273 | static inline void pm_runtime_disable(struct device *dev) | 276 | static inline void pm_runtime_disable(struct device *dev) |
diff --git a/include/linux/poll.h b/include/linux/poll.h index 37b057b63b46..a46d6755035e 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | #include <linux/string.h> | 8 | #include <linux/string.h> |
| 9 | #include <linux/fs.h> | 9 | #include <linux/fs.h> |
| 10 | #include <linux/sysctl.h> | 10 | #include <linux/sysctl.h> |
| 11 | #include <asm/uaccess.h> | 11 | #include <linux/uaccess.h> |
| 12 | #include <uapi/linux/poll.h> | 12 | #include <uapi/linux/poll.h> |
| 13 | 13 | ||
| 14 | extern struct ctl_table epoll_table[]; /* for sysctl */ | 14 | extern struct ctl_table epoll_table[]; /* for sysctl */ |
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index e30deb046156..bed9557b69e7 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h | |||
| @@ -4,7 +4,8 @@ | |||
| 4 | enum bq27xxx_chip { | 4 | enum bq27xxx_chip { |
| 5 | BQ27000 = 1, /* bq27000, bq27200 */ | 5 | BQ27000 = 1, /* bq27000, bq27200 */ |
| 6 | BQ27010, /* bq27010, bq27210 */ | 6 | BQ27010, /* bq27010, bq27210 */ |
| 7 | BQ27500, /* bq27500, bq27510, bq27520 */ | 7 | BQ27500, /* bq27500 */ |
| 8 | BQ27510, /* bq27510, bq27520 */ | ||
| 8 | BQ27530, /* bq27530, bq27531 */ | 9 | BQ27530, /* bq27530, bq27531 */ |
| 9 | BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ | 10 | BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ |
| 10 | BQ27545, /* bq27545 */ | 11 | BQ27545, /* bq27545 */ |
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 75e4e30677f1..7eeceac52dea 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
| @@ -65,19 +65,24 @@ | |||
| 65 | 65 | ||
| 66 | /* | 66 | /* |
| 67 | * Are we doing bottom half or hardware interrupt processing? | 67 | * Are we doing bottom half or hardware interrupt processing? |
| 68 | * Are we in a softirq context? Interrupt context? | 68 | * |
| 69 | * in_softirq - Are we currently processing softirq or have bh disabled? | 69 | * in_irq() - We're in (hard) IRQ context |
| 70 | * in_serving_softirq - Are we currently processing softirq? | 70 | * in_softirq() - We have BH disabled, or are processing softirqs |
| 71 | * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled | ||
| 72 | * in_serving_softirq() - We're in softirq context | ||
| 73 | * in_nmi() - We're in NMI context | ||
| 74 | * in_task() - We're in task context | ||
| 75 | * | ||
| 76 | * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really | ||
| 77 | * should not be used in new code. | ||
| 71 | */ | 78 | */ |
| 72 | #define in_irq() (hardirq_count()) | 79 | #define in_irq() (hardirq_count()) |
| 73 | #define in_softirq() (softirq_count()) | 80 | #define in_softirq() (softirq_count()) |
| 74 | #define in_interrupt() (irq_count()) | 81 | #define in_interrupt() (irq_count()) |
| 75 | #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) | 82 | #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) |
| 76 | 83 | #define in_nmi() (preempt_count() & NMI_MASK) | |
| 77 | /* | 84 | #define in_task() (!(preempt_count() & \ |
| 78 | * Are we in NMI context? | 85 | (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) |
| 79 | */ | ||
| 80 | #define in_nmi() (preempt_count() & NMI_MASK) | ||
| 81 | 86 | ||
| 82 | /* | 87 | /* |
| 83 | * The preempt_count offset after preempt_disable(); | 88 | * The preempt_count offset after preempt_disable(); |
diff --git a/include/linux/printk.h b/include/linux/printk.h index eac1af8502bb..3472cc6b7a60 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h | |||
| @@ -10,6 +10,8 @@ | |||
| 10 | extern const char linux_banner[]; | 10 | extern const char linux_banner[]; |
| 11 | extern const char linux_proc_banner[]; | 11 | extern const char linux_proc_banner[]; |
| 12 | 12 | ||
| 13 | #define PRINTK_MAX_SINGLE_HEADER_LEN 2 | ||
| 14 | |||
| 13 | static inline int printk_get_level(const char *buffer) | 15 | static inline int printk_get_level(const char *buffer) |
| 14 | { | 16 | { |
| 15 | if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { | 17 | if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { |
| @@ -31,6 +33,14 @@ static inline const char *printk_skip_level(const char *buffer) | |||
| 31 | return buffer; | 33 | return buffer; |
| 32 | } | 34 | } |
| 33 | 35 | ||
| 36 | static inline const char *printk_skip_headers(const char *buffer) | ||
| 37 | { | ||
| 38 | while (printk_get_level(buffer)) | ||
| 39 | buffer = printk_skip_level(buffer); | ||
| 40 | |||
| 41 | return buffer; | ||
| 42 | } | ||
| 43 | |||
| 34 | #define CONSOLE_EXT_LOG_MAX 8192 | 44 | #define CONSOLE_EXT_LOG_MAX 8192 |
| 35 | 45 | ||
| 36 | /* printk's without a loglevel use this.. */ | 46 | /* printk's without a loglevel use this.. */ |
| @@ -40,10 +50,15 @@ static inline const char *printk_skip_level(const char *buffer) | |||
| 40 | #define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */ | 50 | #define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */ |
| 41 | #define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */ | 51 | #define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */ |
| 42 | #define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */ | 52 | #define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */ |
| 43 | #define CONSOLE_LOGLEVEL_DEFAULT 7 /* anything MORE serious than KERN_DEBUG */ | ||
| 44 | #define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */ | 53 | #define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */ |
| 45 | #define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */ | 54 | #define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */ |
| 46 | 55 | ||
| 56 | /* | ||
| 57 | * Default used to be hard-coded at 7, we're now allowing it to be set from | ||
| 58 | * kernel config. | ||
| 59 | */ | ||
| 60 | #define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT | ||
| 61 | |||
| 47 | extern int console_printk[]; | 62 | extern int console_printk[]; |
| 48 | 63 | ||
| 49 | #define console_loglevel (console_printk[0]) | 64 | #define console_loglevel (console_printk[0]) |
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index b97bf2ef996e..2d2bf592d9db 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
| @@ -21,6 +21,7 @@ extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t, | |||
| 21 | struct proc_dir_entry *, void *); | 21 | struct proc_dir_entry *, void *); |
| 22 | extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t, | 22 | extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t, |
| 23 | struct proc_dir_entry *); | 23 | struct proc_dir_entry *); |
| 24 | struct proc_dir_entry *proc_create_mount_point(const char *name); | ||
| 24 | 25 | ||
| 25 | extern struct proc_dir_entry *proc_create_data(const char *, umode_t, | 26 | extern struct proc_dir_entry *proc_create_data(const char *, umode_t, |
| 26 | struct proc_dir_entry *, | 27 | struct proc_dir_entry *, |
| @@ -56,6 +57,7 @@ static inline struct proc_dir_entry *proc_symlink(const char *name, | |||
| 56 | struct proc_dir_entry *parent,const char *dest) { return NULL;} | 57 | struct proc_dir_entry *parent,const char *dest) { return NULL;} |
| 57 | static inline struct proc_dir_entry *proc_mkdir(const char *name, | 58 | static inline struct proc_dir_entry *proc_mkdir(const char *name, |
| 58 | struct proc_dir_entry *parent) {return NULL;} | 59 | struct proc_dir_entry *parent) {return NULL;} |
| 60 | static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; } | ||
| 59 | static inline struct proc_dir_entry *proc_mkdir_data(const char *name, | 61 | static inline struct proc_dir_entry *proc_mkdir_data(const char *name, |
| 60 | umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; } | 62 | umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; } |
| 61 | static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, | 63 | static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, |
| @@ -82,4 +84,8 @@ static inline struct proc_dir_entry *proc_net_mkdir( | |||
| 82 | return proc_mkdir_data(name, 0, parent, net); | 84 | return proc_mkdir_data(name, 0, parent, net); |
| 83 | } | 85 | } |
| 84 | 86 | ||
| 87 | struct ns_common; | ||
| 88 | int open_related_ns(struct ns_common *ns, | ||
| 89 | struct ns_common *(*get_ns)(struct ns_common *ns)); | ||
| 90 | |||
| 85 | #endif /* _LINUX_PROC_FS_H */ | 91 | #endif /* _LINUX_PROC_FS_H */ |
diff --git a/include/linux/pstore.h b/include/linux/pstore.h index 92013cc9cc8c..0da29cae009b 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h | |||
| @@ -89,4 +89,80 @@ extern int pstore_register(struct pstore_info *); | |||
| 89 | extern void pstore_unregister(struct pstore_info *); | 89 | extern void pstore_unregister(struct pstore_info *); |
| 90 | extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); | 90 | extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); |
| 91 | 91 | ||
| 92 | struct pstore_ftrace_record { | ||
| 93 | unsigned long ip; | ||
| 94 | unsigned long parent_ip; | ||
| 95 | u64 ts; | ||
| 96 | }; | ||
| 97 | |||
| 98 | /* | ||
| 99 | * ftrace related stuff: Both backends and frontends need these so expose | ||
| 100 | * them here. | ||
| 101 | */ | ||
| 102 | |||
| 103 | #if NR_CPUS <= 2 && defined(CONFIG_ARM_THUMB) | ||
| 104 | #define PSTORE_CPU_IN_IP 0x1 | ||
| 105 | #elif NR_CPUS <= 4 && defined(CONFIG_ARM) | ||
| 106 | #define PSTORE_CPU_IN_IP 0x3 | ||
| 107 | #endif | ||
| 108 | |||
| 109 | #define TS_CPU_SHIFT 8 | ||
| 110 | #define TS_CPU_MASK (BIT(TS_CPU_SHIFT) - 1) | ||
| 111 | |||
| 112 | /* | ||
| 113 | * If CPU number can be stored in IP, store it there, otherwise store it in | ||
| 114 | * the time stamp. This means more timestamp resolution is available when | ||
| 115 | * the CPU can be stored in the IP. | ||
| 116 | */ | ||
| 117 | #ifdef PSTORE_CPU_IN_IP | ||
| 118 | static inline void | ||
| 119 | pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu) | ||
| 120 | { | ||
| 121 | rec->ip |= cpu; | ||
| 122 | } | ||
| 123 | |||
| 124 | static inline unsigned int | ||
| 125 | pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec) | ||
| 126 | { | ||
| 127 | return rec->ip & PSTORE_CPU_IN_IP; | ||
| 128 | } | ||
| 129 | |||
| 130 | static inline u64 | ||
| 131 | pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec) | ||
| 132 | { | ||
| 133 | return rec->ts; | ||
| 134 | } | ||
| 135 | |||
| 136 | static inline void | ||
| 137 | pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val) | ||
| 138 | { | ||
| 139 | rec->ts = val; | ||
| 140 | } | ||
| 141 | #else | ||
| 142 | static inline void | ||
| 143 | pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu) | ||
| 144 | { | ||
| 145 | rec->ts &= ~(TS_CPU_MASK); | ||
| 146 | rec->ts |= cpu; | ||
| 147 | } | ||
| 148 | |||
| 149 | static inline unsigned int | ||
| 150 | pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec) | ||
| 151 | { | ||
| 152 | return rec->ts & TS_CPU_MASK; | ||
| 153 | } | ||
| 154 | |||
| 155 | static inline u64 | ||
| 156 | pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec) | ||
| 157 | { | ||
| 158 | return rec->ts >> TS_CPU_SHIFT; | ||
| 159 | } | ||
| 160 | |||
| 161 | static inline void | ||
| 162 | pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val) | ||
| 163 | { | ||
| 164 | rec->ts = (rec->ts & TS_CPU_MASK) | (val << TS_CPU_SHIFT); | ||
| 165 | } | ||
| 166 | #endif | ||
| 167 | |||
| 92 | #endif /*_LINUX_PSTORE_H*/ | 168 | #endif /*_LINUX_PSTORE_H*/ |
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index c668c861c96c..9395f06e8372 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h | |||
| @@ -24,6 +24,13 @@ | |||
| 24 | #include <linux/list.h> | 24 | #include <linux/list.h> |
| 25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
| 26 | 26 | ||
| 27 | /* | ||
| 28 | * Choose whether access to the RAM zone requires locking or not. If a zone | ||
| 29 | * can be written to from different CPUs like with ftrace for example, then | ||
| 30 | * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required. | ||
| 31 | */ | ||
| 32 | #define PRZ_FLAG_NO_LOCK BIT(0) | ||
| 33 | |||
| 27 | struct persistent_ram_buffer; | 34 | struct persistent_ram_buffer; |
| 28 | struct rs_control; | 35 | struct rs_control; |
| 29 | 36 | ||
| @@ -40,6 +47,8 @@ struct persistent_ram_zone { | |||
| 40 | void *vaddr; | 47 | void *vaddr; |
| 41 | struct persistent_ram_buffer *buffer; | 48 | struct persistent_ram_buffer *buffer; |
| 42 | size_t buffer_size; | 49 | size_t buffer_size; |
| 50 | u32 flags; | ||
| 51 | raw_spinlock_t buffer_lock; | ||
| 43 | 52 | ||
| 44 | /* ECC correction */ | 53 | /* ECC correction */ |
| 45 | char *par_buffer; | 54 | char *par_buffer; |
| @@ -55,7 +64,7 @@ struct persistent_ram_zone { | |||
| 55 | 64 | ||
| 56 | struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, | 65 | struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, |
| 57 | u32 sig, struct persistent_ram_ecc_info *ecc_info, | 66 | u32 sig, struct persistent_ram_ecc_info *ecc_info, |
| 58 | unsigned int memtype); | 67 | unsigned int memtype, u32 flags); |
| 59 | void persistent_ram_free(struct persistent_ram_zone *prz); | 68 | void persistent_ram_free(struct persistent_ram_zone *prz); |
| 60 | void persistent_ram_zap(struct persistent_ram_zone *prz); | 69 | void persistent_ram_zap(struct persistent_ram_zone *prz); |
| 61 | 70 | ||
| @@ -77,6 +86,8 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, | |||
| 77 | * @mem_address physical memory address to contain ramoops | 86 | * @mem_address physical memory address to contain ramoops |
| 78 | */ | 87 | */ |
| 79 | 88 | ||
| 89 | #define RAMOOPS_FLAG_FTRACE_PER_CPU BIT(0) | ||
| 90 | |||
| 80 | struct ramoops_platform_data { | 91 | struct ramoops_platform_data { |
| 81 | unsigned long mem_size; | 92 | unsigned long mem_size; |
| 82 | phys_addr_t mem_address; | 93 | phys_addr_t mem_address; |
| @@ -86,6 +97,7 @@ struct ramoops_platform_data { | |||
| 86 | unsigned long ftrace_size; | 97 | unsigned long ftrace_size; |
| 87 | unsigned long pmsg_size; | 98 | unsigned long pmsg_size; |
| 88 | int dump_oops; | 99 | int dump_oops; |
| 100 | u32 flags; | ||
| 89 | struct persistent_ram_ecc_info ecc_info; | 101 | struct persistent_ram_ecc_info ecc_info; |
| 90 | }; | 102 | }; |
| 91 | 103 | ||
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 5ad54fc66cf0..a026bfd089db 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h | |||
| @@ -58,7 +58,14 @@ struct system_device_crosststamp; | |||
| 58 | * | 58 | * |
| 59 | * clock operations | 59 | * clock operations |
| 60 | * | 60 | * |
| 61 | * @adjfine: Adjusts the frequency of the hardware clock. | ||
| 62 | * parameter scaled_ppm: Desired frequency offset from | ||
| 63 | * nominal frequency in parts per million, but with a | ||
| 64 | * 16 bit binary fractional field. | ||
| 65 | * | ||
| 61 | * @adjfreq: Adjusts the frequency of the hardware clock. | 66 | * @adjfreq: Adjusts the frequency of the hardware clock. |
| 67 | * This method is deprecated. New drivers should implement | ||
| 68 | * the @adjfine method instead. | ||
| 62 | * parameter delta: Desired frequency offset from nominal frequency | 69 | * parameter delta: Desired frequency offset from nominal frequency |
| 63 | * in parts per billion | 70 | * in parts per billion |
| 64 | * | 71 | * |
| @@ -108,6 +115,7 @@ struct ptp_clock_info { | |||
| 108 | int n_pins; | 115 | int n_pins; |
| 109 | int pps; | 116 | int pps; |
| 110 | struct ptp_pin_desc *pin_config; | 117 | struct ptp_pin_desc *pin_config; |
| 118 | int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm); | ||
| 111 | int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); | 119 | int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); |
| 112 | int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); | 120 | int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); |
| 113 | int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); | 121 | int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); |
| @@ -122,30 +130,6 @@ struct ptp_clock_info { | |||
| 122 | 130 | ||
| 123 | struct ptp_clock; | 131 | struct ptp_clock; |
| 124 | 132 | ||
| 125 | /** | ||
| 126 | * ptp_clock_register() - register a PTP hardware clock driver | ||
| 127 | * | ||
| 128 | * @info: Structure describing the new clock. | ||
| 129 | * @parent: Pointer to the parent device of the new clock. | ||
| 130 | * | ||
| 131 | * Returns a valid pointer on success or PTR_ERR on failure. If PHC | ||
| 132 | * support is missing at the configuration level, this function | ||
| 133 | * returns NULL, and drivers are expected to gracefully handle that | ||
| 134 | * case separately. | ||
| 135 | */ | ||
| 136 | |||
| 137 | extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, | ||
| 138 | struct device *parent); | ||
| 139 | |||
| 140 | /** | ||
| 141 | * ptp_clock_unregister() - unregister a PTP hardware clock driver | ||
| 142 | * | ||
| 143 | * @ptp: The clock to remove from service. | ||
| 144 | */ | ||
| 145 | |||
| 146 | extern int ptp_clock_unregister(struct ptp_clock *ptp); | ||
| 147 | |||
| 148 | |||
| 149 | enum ptp_clock_events { | 133 | enum ptp_clock_events { |
| 150 | PTP_CLOCK_ALARM, | 134 | PTP_CLOCK_ALARM, |
| 151 | PTP_CLOCK_EXTTS, | 135 | PTP_CLOCK_EXTTS, |
| @@ -171,6 +155,31 @@ struct ptp_clock_event { | |||
| 171 | }; | 155 | }; |
| 172 | }; | 156 | }; |
| 173 | 157 | ||
| 158 | #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) | ||
| 159 | |||
| 160 | /** | ||
| 161 | * ptp_clock_register() - register a PTP hardware clock driver | ||
| 162 | * | ||
| 163 | * @info: Structure describing the new clock. | ||
| 164 | * @parent: Pointer to the parent device of the new clock. | ||
| 165 | * | ||
| 166 | * Returns a valid pointer on success or PTR_ERR on failure. If PHC | ||
| 167 | * support is missing at the configuration level, this function | ||
| 168 | * returns NULL, and drivers are expected to gracefully handle that | ||
| 169 | * case separately. | ||
| 170 | */ | ||
| 171 | |||
| 172 | extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, | ||
| 173 | struct device *parent); | ||
| 174 | |||
| 175 | /** | ||
| 176 | * ptp_clock_unregister() - unregister a PTP hardware clock driver | ||
| 177 | * | ||
| 178 | * @ptp: The clock to remove from service. | ||
| 179 | */ | ||
| 180 | |||
| 181 | extern int ptp_clock_unregister(struct ptp_clock *ptp); | ||
| 182 | |||
| 174 | /** | 183 | /** |
| 175 | * ptp_clock_event() - notify the PTP layer about an event | 184 | * ptp_clock_event() - notify the PTP layer about an event |
| 176 | * | 185 | * |
| @@ -202,4 +211,20 @@ extern int ptp_clock_index(struct ptp_clock *ptp); | |||
| 202 | int ptp_find_pin(struct ptp_clock *ptp, | 211 | int ptp_find_pin(struct ptp_clock *ptp, |
| 203 | enum ptp_pin_function func, unsigned int chan); | 212 | enum ptp_pin_function func, unsigned int chan); |
| 204 | 213 | ||
| 214 | #else | ||
| 215 | static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, | ||
| 216 | struct device *parent) | ||
| 217 | { return NULL; } | ||
| 218 | static inline int ptp_clock_unregister(struct ptp_clock *ptp) | ||
| 219 | { return 0; } | ||
| 220 | static inline void ptp_clock_event(struct ptp_clock *ptp, | ||
| 221 | struct ptp_clock_event *event) | ||
| 222 | { } | ||
| 223 | static inline int ptp_clock_index(struct ptp_clock *ptp) | ||
| 224 | { return -1; } | ||
| 225 | static inline int ptp_find_pin(struct ptp_clock *ptp, | ||
| 226 | enum ptp_pin_function func, unsigned int chan) | ||
| 227 | { return -1; } | ||
| 228 | #endif | ||
| 229 | |||
| 205 | #endif | 230 | #endif |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 504c98a278d4..e0e539321ab9 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
| @@ -8,6 +8,9 @@ | |||
| 8 | #include <linux/pid_namespace.h> /* For task_active_pid_ns. */ | 8 | #include <linux/pid_namespace.h> /* For task_active_pid_ns. */ |
| 9 | #include <uapi/linux/ptrace.h> | 9 | #include <uapi/linux/ptrace.h> |
| 10 | 10 | ||
| 11 | extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, | ||
| 12 | void *buf, int len, unsigned int gup_flags); | ||
| 13 | |||
| 11 | /* | 14 | /* |
| 12 | * Ptrace flags | 15 | * Ptrace flags |
| 13 | * | 16 | * |
| @@ -19,7 +22,6 @@ | |||
| 19 | #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */ | 22 | #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */ |
| 20 | #define PT_PTRACED 0x00000001 | 23 | #define PT_PTRACED 0x00000001 |
| 21 | #define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */ | 24 | #define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */ |
| 22 | #define PT_PTRACE_CAP 0x00000004 /* ptracer can follow suid-exec */ | ||
| 23 | 25 | ||
| 24 | #define PT_OPT_FLAG_SHIFT 3 | 26 | #define PT_OPT_FLAG_SHIFT 3 |
| 25 | /* PT_TRACE_* event enable flags */ | 27 | /* PT_TRACE_* event enable flags */ |
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index 2d6f0c39ed68..a0522328d7aa 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h | |||
| @@ -90,9 +90,9 @@ | |||
| 90 | #define SSSR_RFL_MASK (0xf << 12) /* Receive FIFO Level mask */ | 90 | #define SSSR_RFL_MASK (0xf << 12) /* Receive FIFO Level mask */ |
| 91 | 91 | ||
| 92 | #define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */ | 92 | #define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */ |
| 93 | #define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */ | 93 | #define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */ |
| 94 | #define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */ | 94 | #define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */ |
| 95 | #define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */ | 95 | #define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */ |
| 96 | 96 | ||
| 97 | #define RX_THRESH_CE4100_DFLT 2 | 97 | #define RX_THRESH_CE4100_DFLT 2 |
| 98 | #define TX_THRESH_CE4100_DFLT 2 | 98 | #define TX_THRESH_CE4100_DFLT 2 |
| @@ -106,9 +106,9 @@ | |||
| 106 | #define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ | 106 | #define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ |
| 107 | 107 | ||
| 108 | /* QUARK_X1000 SSCR0 bit definition */ | 108 | /* QUARK_X1000 SSCR0 bit definition */ |
| 109 | #define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */ | 109 | #define QUARK_X1000_SSCR0_DSS (0x1F << 0) /* Data Size Select (mask) */ |
| 110 | #define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */ | 110 | #define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */ |
| 111 | #define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */ | 111 | #define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */ |
| 112 | #define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */ | 112 | #define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */ |
| 113 | 113 | ||
| 114 | #define RX_THRESH_QUARK_X1000_DFLT 1 | 114 | #define RX_THRESH_QUARK_X1000_DFLT 1 |
| @@ -121,8 +121,8 @@ | |||
| 121 | #define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */ | 121 | #define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */ |
| 122 | #define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */ | 122 | #define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */ |
| 123 | #define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */ | 123 | #define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */ |
| 124 | #define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */ | 124 | #define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */ |
| 125 | #define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */ | 125 | #define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */ |
| 126 | 126 | ||
| 127 | /* extra bits in PXA255, PXA26x and PXA27x SSP ports */ | 127 | /* extra bits in PXA255, PXA26x and PXA27x SSP ports */ |
| 128 | #define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ | 128 | #define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ |
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 72d88cf3ca25..37dfba101c6c 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h | |||
| @@ -56,23 +56,6 @@ struct qed_chain_pbl_u32 { | |||
| 56 | u32 cons_page_idx; | 56 | u32 cons_page_idx; |
| 57 | }; | 57 | }; |
| 58 | 58 | ||
| 59 | struct qed_chain_pbl { | ||
| 60 | /* Base address of a pre-allocated buffer for pbl */ | ||
| 61 | dma_addr_t p_phys_table; | ||
| 62 | void *p_virt_table; | ||
| 63 | |||
| 64 | /* Table for keeping the virtual addresses of the chain pages, | ||
| 65 | * respectively to the physical addresses in the pbl table. | ||
| 66 | */ | ||
| 67 | void **pp_virt_addr_tbl; | ||
| 68 | |||
| 69 | /* Index to current used page by producer/consumer */ | ||
| 70 | union { | ||
| 71 | struct qed_chain_pbl_u16 pbl16; | ||
| 72 | struct qed_chain_pbl_u32 pbl32; | ||
| 73 | } u; | ||
| 74 | }; | ||
| 75 | |||
| 76 | struct qed_chain_u16 { | 59 | struct qed_chain_u16 { |
| 77 | /* Cyclic index of next element to produce/consme */ | 60 | /* Cyclic index of next element to produce/consme */ |
| 78 | u16 prod_idx; | 61 | u16 prod_idx; |
| @@ -86,46 +69,78 @@ struct qed_chain_u32 { | |||
| 86 | }; | 69 | }; |
| 87 | 70 | ||
| 88 | struct qed_chain { | 71 | struct qed_chain { |
| 89 | void *p_virt_addr; | 72 | /* fastpath portion of the chain - required for commands such |
| 90 | dma_addr_t p_phys_addr; | 73 | * as produce / consume. |
| 91 | void *p_prod_elem; | 74 | */ |
| 92 | void *p_cons_elem; | 75 | /* Point to next element to produce/consume */ |
| 76 | void *p_prod_elem; | ||
| 77 | void *p_cons_elem; | ||
| 78 | |||
| 79 | /* Fastpath portions of the PBL [if exists] */ | ||
| 80 | struct { | ||
| 81 | /* Table for keeping the virtual addresses of the chain pages, | ||
| 82 | * respectively to the physical addresses in the pbl table. | ||
| 83 | */ | ||
| 84 | void **pp_virt_addr_tbl; | ||
| 93 | 85 | ||
| 94 | enum qed_chain_mode mode; | 86 | union { |
| 95 | enum qed_chain_use_mode intended_use; /* used to produce/consume */ | 87 | struct qed_chain_pbl_u16 u16; |
| 96 | enum qed_chain_cnt_type cnt_type; | 88 | struct qed_chain_pbl_u32 u32; |
| 89 | } c; | ||
| 90 | } pbl; | ||
| 97 | 91 | ||
| 98 | union { | 92 | union { |
| 99 | struct qed_chain_u16 chain16; | 93 | struct qed_chain_u16 chain16; |
| 100 | struct qed_chain_u32 chain32; | 94 | struct qed_chain_u32 chain32; |
| 101 | } u; | 95 | } u; |
| 102 | 96 | ||
| 97 | /* Capacity counts only usable elements */ | ||
| 98 | u32 capacity; | ||
| 103 | u32 page_cnt; | 99 | u32 page_cnt; |
| 104 | 100 | ||
| 105 | /* Number of elements - capacity is for usable elements only, | 101 | enum qed_chain_mode mode; |
| 106 | * while size will contain total number of elements [for entire chain]. | 102 | |
| 103 | /* Elements information for fast calculations */ | ||
| 104 | u16 elem_per_page; | ||
| 105 | u16 elem_per_page_mask; | ||
| 106 | u16 elem_size; | ||
| 107 | u16 next_page_mask; | ||
| 108 | u16 usable_per_page; | ||
| 109 | u8 elem_unusable; | ||
| 110 | |||
| 111 | u8 cnt_type; | ||
| 112 | |||
| 113 | /* Slowpath of the chain - required for initialization and destruction, | ||
| 114 | * but isn't involved in regular functionality. | ||
| 107 | */ | 115 | */ |
| 108 | u32 capacity; | 116 | |
| 117 | /* Base address of a pre-allocated buffer for pbl */ | ||
| 118 | struct { | ||
| 119 | dma_addr_t p_phys_table; | ||
| 120 | void *p_virt_table; | ||
| 121 | } pbl_sp; | ||
| 122 | |||
| 123 | /* Address of first page of the chain - the address is required | ||
| 124 | * for fastpath operation [consume/produce] but only for the the SINGLE | ||
| 125 | * flavour which isn't considered fastpath [== SPQ]. | ||
| 126 | */ | ||
| 127 | void *p_virt_addr; | ||
| 128 | dma_addr_t p_phys_addr; | ||
| 129 | |||
| 130 | /* Total number of elements [for entire chain] */ | ||
| 109 | u32 size; | 131 | u32 size; |
| 110 | 132 | ||
| 111 | /* Elements information for fast calculations */ | 133 | u8 intended_use; |
| 112 | u16 elem_per_page; | ||
| 113 | u16 elem_per_page_mask; | ||
| 114 | u16 elem_unusable; | ||
| 115 | u16 usable_per_page; | ||
| 116 | u16 elem_size; | ||
| 117 | u16 next_page_mask; | ||
| 118 | struct qed_chain_pbl pbl; | ||
| 119 | }; | 134 | }; |
| 120 | 135 | ||
| 121 | #define QED_CHAIN_PBL_ENTRY_SIZE (8) | 136 | #define QED_CHAIN_PBL_ENTRY_SIZE (8) |
| 122 | #define QED_CHAIN_PAGE_SIZE (0x1000) | 137 | #define QED_CHAIN_PAGE_SIZE (0x1000) |
| 123 | #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) | 138 | #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) |
| 124 | 139 | ||
| 125 | #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ | 140 | #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ |
| 126 | ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \ | 141 | (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ |
| 127 | (1 + ((sizeof(struct qed_chain_next) - 1) / \ | 142 | (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \ |
| 128 | (elem_size))) : 0) | 143 | (elem_size))) : 0) |
| 129 | 144 | ||
| 130 | #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ | 145 | #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ |
| 131 | ((u32)(ELEMS_PER_PAGE(elem_size) - \ | 146 | ((u32)(ELEMS_PER_PAGE(elem_size) - \ |
| @@ -186,7 +201,7 @@ static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain) | |||
| 186 | return p_chain->usable_per_page; | 201 | return p_chain->usable_per_page; |
| 187 | } | 202 | } |
| 188 | 203 | ||
| 189 | static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) | 204 | static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) |
| 190 | { | 205 | { |
| 191 | return p_chain->elem_unusable; | 206 | return p_chain->elem_unusable; |
| 192 | } | 207 | } |
| @@ -198,7 +213,7 @@ static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain) | |||
| 198 | 213 | ||
| 199 | static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) | 214 | static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) |
| 200 | { | 215 | { |
| 201 | return p_chain->pbl.p_phys_table; | 216 | return p_chain->pbl_sp.p_phys_table; |
| 202 | } | 217 | } |
| 203 | 218 | ||
| 204 | /** | 219 | /** |
| @@ -214,10 +229,10 @@ static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) | |||
| 214 | static inline void | 229 | static inline void |
| 215 | qed_chain_advance_page(struct qed_chain *p_chain, | 230 | qed_chain_advance_page(struct qed_chain *p_chain, |
| 216 | void **p_next_elem, void *idx_to_inc, void *page_to_inc) | 231 | void **p_next_elem, void *idx_to_inc, void *page_to_inc) |
| 217 | |||
| 218 | { | 232 | { |
| 219 | struct qed_chain_next *p_next = NULL; | 233 | struct qed_chain_next *p_next = NULL; |
| 220 | u32 page_index = 0; | 234 | u32 page_index = 0; |
| 235 | |||
| 221 | switch (p_chain->mode) { | 236 | switch (p_chain->mode) { |
| 222 | case QED_CHAIN_MODE_NEXT_PTR: | 237 | case QED_CHAIN_MODE_NEXT_PTR: |
| 223 | p_next = *p_next_elem; | 238 | p_next = *p_next_elem; |
| @@ -305,7 +320,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain) | |||
| 305 | if ((p_chain->u.chain16.prod_idx & | 320 | if ((p_chain->u.chain16.prod_idx & |
| 306 | p_chain->elem_per_page_mask) == p_chain->next_page_mask) { | 321 | p_chain->elem_per_page_mask) == p_chain->next_page_mask) { |
| 307 | p_prod_idx = &p_chain->u.chain16.prod_idx; | 322 | p_prod_idx = &p_chain->u.chain16.prod_idx; |
| 308 | p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx; | 323 | p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx; |
| 309 | qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, | 324 | qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, |
| 310 | p_prod_idx, p_prod_page_idx); | 325 | p_prod_idx, p_prod_page_idx); |
| 311 | } | 326 | } |
| @@ -314,7 +329,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain) | |||
| 314 | if ((p_chain->u.chain32.prod_idx & | 329 | if ((p_chain->u.chain32.prod_idx & |
| 315 | p_chain->elem_per_page_mask) == p_chain->next_page_mask) { | 330 | p_chain->elem_per_page_mask) == p_chain->next_page_mask) { |
| 316 | p_prod_idx = &p_chain->u.chain32.prod_idx; | 331 | p_prod_idx = &p_chain->u.chain32.prod_idx; |
| 317 | p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx; | 332 | p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx; |
| 318 | qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, | 333 | qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, |
| 319 | p_prod_idx, p_prod_page_idx); | 334 | p_prod_idx, p_prod_page_idx); |
| 320 | } | 335 | } |
| @@ -378,7 +393,7 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) | |||
| 378 | if ((p_chain->u.chain16.cons_idx & | 393 | if ((p_chain->u.chain16.cons_idx & |
| 379 | p_chain->elem_per_page_mask) == p_chain->next_page_mask) { | 394 | p_chain->elem_per_page_mask) == p_chain->next_page_mask) { |
| 380 | p_cons_idx = &p_chain->u.chain16.cons_idx; | 395 | p_cons_idx = &p_chain->u.chain16.cons_idx; |
| 381 | p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx; | 396 | p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx; |
| 382 | qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, | 397 | qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, |
| 383 | p_cons_idx, p_cons_page_idx); | 398 | p_cons_idx, p_cons_page_idx); |
| 384 | } | 399 | } |
| @@ -387,8 +402,8 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) | |||
| 387 | if ((p_chain->u.chain32.cons_idx & | 402 | if ((p_chain->u.chain32.cons_idx & |
| 388 | p_chain->elem_per_page_mask) == p_chain->next_page_mask) { | 403 | p_chain->elem_per_page_mask) == p_chain->next_page_mask) { |
| 389 | p_cons_idx = &p_chain->u.chain32.cons_idx; | 404 | p_cons_idx = &p_chain->u.chain32.cons_idx; |
| 390 | p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx; | 405 | p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx; |
| 391 | qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, | 406 | qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, |
| 392 | p_cons_idx, p_cons_page_idx); | 407 | p_cons_idx, p_cons_page_idx); |
| 393 | } | 408 | } |
| 394 | p_chain->u.chain32.cons_idx++; | 409 | p_chain->u.chain32.cons_idx++; |
| @@ -429,25 +444,26 @@ static inline void qed_chain_reset(struct qed_chain *p_chain) | |||
| 429 | u32 reset_val = p_chain->page_cnt - 1; | 444 | u32 reset_val = p_chain->page_cnt - 1; |
| 430 | 445 | ||
| 431 | if (is_chain_u16(p_chain)) { | 446 | if (is_chain_u16(p_chain)) { |
| 432 | p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val; | 447 | p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val; |
| 433 | p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val; | 448 | p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val; |
| 434 | } else { | 449 | } else { |
| 435 | p_chain->pbl.u.pbl32.prod_page_idx = reset_val; | 450 | p_chain->pbl.c.u32.prod_page_idx = reset_val; |
| 436 | p_chain->pbl.u.pbl32.cons_page_idx = reset_val; | 451 | p_chain->pbl.c.u32.cons_page_idx = reset_val; |
| 437 | } | 452 | } |
| 438 | } | 453 | } |
| 439 | 454 | ||
| 440 | switch (p_chain->intended_use) { | 455 | switch (p_chain->intended_use) { |
| 441 | case QED_CHAIN_USE_TO_CONSUME_PRODUCE: | ||
| 442 | case QED_CHAIN_USE_TO_PRODUCE: | ||
| 443 | /* Do nothing */ | ||
| 444 | break; | ||
| 445 | |||
| 446 | case QED_CHAIN_USE_TO_CONSUME: | 456 | case QED_CHAIN_USE_TO_CONSUME: |
| 447 | /* produce empty elements */ | 457 | /* produce empty elements */ |
| 448 | for (i = 0; i < p_chain->capacity; i++) | 458 | for (i = 0; i < p_chain->capacity; i++) |
| 449 | qed_chain_recycle_consumed(p_chain); | 459 | qed_chain_recycle_consumed(p_chain); |
| 450 | break; | 460 | break; |
| 461 | |||
| 462 | case QED_CHAIN_USE_TO_CONSUME_PRODUCE: | ||
| 463 | case QED_CHAIN_USE_TO_PRODUCE: | ||
| 464 | default: | ||
| 465 | /* Do nothing */ | ||
| 466 | break; | ||
| 451 | } | 467 | } |
| 452 | } | 468 | } |
| 453 | 469 | ||
| @@ -473,13 +489,13 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain, | |||
| 473 | p_chain->p_virt_addr = NULL; | 489 | p_chain->p_virt_addr = NULL; |
| 474 | p_chain->p_phys_addr = 0; | 490 | p_chain->p_phys_addr = 0; |
| 475 | p_chain->elem_size = elem_size; | 491 | p_chain->elem_size = elem_size; |
| 476 | p_chain->intended_use = intended_use; | 492 | p_chain->intended_use = (u8)intended_use; |
| 477 | p_chain->mode = mode; | 493 | p_chain->mode = mode; |
| 478 | p_chain->cnt_type = cnt_type; | 494 | p_chain->cnt_type = (u8)cnt_type; |
| 479 | 495 | ||
| 480 | p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); | 496 | p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); |
| 481 | p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); | 497 | p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); |
| 482 | p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; | 498 | p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; |
| 483 | p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); | 499 | p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); |
| 484 | p_chain->next_page_mask = (p_chain->usable_per_page & | 500 | p_chain->next_page_mask = (p_chain->usable_per_page & |
| 485 | p_chain->elem_per_page_mask); | 501 | p_chain->elem_per_page_mask); |
| @@ -488,8 +504,8 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain, | |||
| 488 | p_chain->capacity = p_chain->usable_per_page * page_cnt; | 504 | p_chain->capacity = p_chain->usable_per_page * page_cnt; |
| 489 | p_chain->size = p_chain->elem_per_page * page_cnt; | 505 | p_chain->size = p_chain->elem_per_page * page_cnt; |
| 490 | 506 | ||
| 491 | p_chain->pbl.p_phys_table = 0; | 507 | p_chain->pbl_sp.p_phys_table = 0; |
| 492 | p_chain->pbl.p_virt_table = NULL; | 508 | p_chain->pbl_sp.p_virt_table = NULL; |
| 493 | p_chain->pbl.pp_virt_addr_tbl = NULL; | 509 | p_chain->pbl.pp_virt_addr_tbl = NULL; |
| 494 | } | 510 | } |
| 495 | 511 | ||
| @@ -530,8 +546,8 @@ static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, | |||
| 530 | dma_addr_t p_phys_pbl, | 546 | dma_addr_t p_phys_pbl, |
| 531 | void **pp_virt_addr_tbl) | 547 | void **pp_virt_addr_tbl) |
| 532 | { | 548 | { |
| 533 | p_chain->pbl.p_phys_table = p_phys_pbl; | 549 | p_chain->pbl_sp.p_phys_table = p_phys_pbl; |
| 534 | p_chain->pbl.p_virt_table = p_virt_pbl; | 550 | p_chain->pbl_sp.p_virt_table = p_virt_pbl; |
| 535 | p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl; | 551 | p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl; |
| 536 | } | 552 | } |
| 537 | 553 | ||
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 33c24ebc9b7f..7a52f7c58c37 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h | |||
| @@ -15,6 +15,29 @@ | |||
| 15 | #include <linux/qed/qed_if.h> | 15 | #include <linux/qed/qed_if.h> |
| 16 | #include <linux/qed/qed_iov_if.h> | 16 | #include <linux/qed/qed_iov_if.h> |
| 17 | 17 | ||
| 18 | struct qed_queue_start_common_params { | ||
| 19 | /* Should always be relative to entity sending this. */ | ||
| 20 | u8 vport_id; | ||
| 21 | u16 queue_id; | ||
| 22 | |||
| 23 | /* Relative, but relevant only for PFs */ | ||
| 24 | u8 stats_id; | ||
| 25 | |||
| 26 | /* These are always absolute */ | ||
| 27 | u16 sb; | ||
| 28 | u8 sb_idx; | ||
| 29 | }; | ||
| 30 | |||
| 31 | struct qed_rxq_start_ret_params { | ||
| 32 | void __iomem *p_prod; | ||
| 33 | void *p_handle; | ||
| 34 | }; | ||
| 35 | |||
| 36 | struct qed_txq_start_ret_params { | ||
| 37 | void __iomem *p_doorbell; | ||
| 38 | void *p_handle; | ||
| 39 | }; | ||
| 40 | |||
| 18 | struct qed_dev_eth_info { | 41 | struct qed_dev_eth_info { |
| 19 | struct qed_dev_info common; | 42 | struct qed_dev_info common; |
| 20 | 43 | ||
| @@ -22,7 +45,8 @@ struct qed_dev_eth_info { | |||
| 22 | u8 num_tc; | 45 | u8 num_tc; |
| 23 | 46 | ||
| 24 | u8 port_mac[ETH_ALEN]; | 47 | u8 port_mac[ETH_ALEN]; |
| 25 | u8 num_vlan_filters; | 48 | u16 num_vlan_filters; |
| 49 | u16 num_mac_filters; | ||
| 26 | 50 | ||
| 27 | /* Legacy VF - this affects the datapath, so qede has to know */ | 51 | /* Legacy VF - this affects the datapath, so qede has to know */ |
| 28 | bool is_legacy; | 52 | bool is_legacy; |
| @@ -55,18 +79,6 @@ struct qed_start_vport_params { | |||
| 55 | bool clear_stats; | 79 | bool clear_stats; |
| 56 | }; | 80 | }; |
| 57 | 81 | ||
| 58 | struct qed_stop_rxq_params { | ||
| 59 | u8 rss_id; | ||
| 60 | u8 rx_queue_id; | ||
| 61 | u8 vport_id; | ||
| 62 | bool eq_completion_only; | ||
| 63 | }; | ||
| 64 | |||
| 65 | struct qed_stop_txq_params { | ||
| 66 | u8 rss_id; | ||
| 67 | u8 tx_queue_id; | ||
| 68 | }; | ||
| 69 | |||
| 70 | enum qed_filter_rx_mode_type { | 82 | enum qed_filter_rx_mode_type { |
| 71 | QED_FILTER_RX_MODE_TYPE_REGULAR, | 83 | QED_FILTER_RX_MODE_TYPE_REGULAR, |
| 72 | QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC, | 84 | QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC, |
| @@ -111,15 +123,6 @@ struct qed_filter_params { | |||
| 111 | union qed_filter_type_params filter; | 123 | union qed_filter_type_params filter; |
| 112 | }; | 124 | }; |
| 113 | 125 | ||
| 114 | struct qed_queue_start_common_params { | ||
| 115 | u8 rss_id; | ||
| 116 | u8 queue_id; | ||
| 117 | u8 vport_id; | ||
| 118 | u16 sb; | ||
| 119 | u16 sb_idx; | ||
| 120 | u16 vf_qid; | ||
| 121 | }; | ||
| 122 | |||
| 123 | struct qed_tunn_params { | 126 | struct qed_tunn_params { |
| 124 | u16 vxlan_port; | 127 | u16 vxlan_port; |
| 125 | u8 update_vxlan_port; | 128 | u8 update_vxlan_port; |
| @@ -129,7 +132,7 @@ struct qed_tunn_params { | |||
| 129 | 132 | ||
| 130 | struct qed_eth_cb_ops { | 133 | struct qed_eth_cb_ops { |
| 131 | struct qed_common_cb_ops common; | 134 | struct qed_common_cb_ops common; |
| 132 | void (*force_mac) (void *dev, u8 *mac); | 135 | void (*force_mac) (void *dev, u8 *mac, bool forced); |
| 133 | }; | 136 | }; |
| 134 | 137 | ||
| 135 | #ifdef CONFIG_DCB | 138 | #ifdef CONFIG_DCB |
| @@ -219,24 +222,24 @@ struct qed_eth_ops { | |||
| 219 | struct qed_update_vport_params *params); | 222 | struct qed_update_vport_params *params); |
| 220 | 223 | ||
| 221 | int (*q_rx_start)(struct qed_dev *cdev, | 224 | int (*q_rx_start)(struct qed_dev *cdev, |
| 225 | u8 rss_num, | ||
| 222 | struct qed_queue_start_common_params *params, | 226 | struct qed_queue_start_common_params *params, |
| 223 | u16 bd_max_bytes, | 227 | u16 bd_max_bytes, |
| 224 | dma_addr_t bd_chain_phys_addr, | 228 | dma_addr_t bd_chain_phys_addr, |
| 225 | dma_addr_t cqe_pbl_addr, | 229 | dma_addr_t cqe_pbl_addr, |
| 226 | u16 cqe_pbl_size, | 230 | u16 cqe_pbl_size, |
| 227 | void __iomem **pp_prod); | 231 | struct qed_rxq_start_ret_params *ret_params); |
| 228 | 232 | ||
| 229 | int (*q_rx_stop)(struct qed_dev *cdev, | 233 | int (*q_rx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle); |
| 230 | struct qed_stop_rxq_params *params); | ||
| 231 | 234 | ||
| 232 | int (*q_tx_start)(struct qed_dev *cdev, | 235 | int (*q_tx_start)(struct qed_dev *cdev, |
| 236 | u8 rss_num, | ||
| 233 | struct qed_queue_start_common_params *params, | 237 | struct qed_queue_start_common_params *params, |
| 234 | dma_addr_t pbl_addr, | 238 | dma_addr_t pbl_addr, |
| 235 | u16 pbl_size, | 239 | u16 pbl_size, |
| 236 | void __iomem **pp_doorbell); | 240 | struct qed_txq_start_ret_params *ret_params); |
| 237 | 241 | ||
| 238 | int (*q_tx_stop)(struct qed_dev *cdev, | 242 | int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle); |
| 239 | struct qed_stop_txq_params *params); | ||
| 240 | 243 | ||
| 241 | int (*filter_config)(struct qed_dev *cdev, | 244 | int (*filter_config)(struct qed_dev *cdev, |
| 242 | struct qed_filter_params *params); | 245 | struct qed_filter_params *params); |
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 8978a60371f4..4b454f4f5b25 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h | |||
| @@ -166,6 +166,7 @@ struct qed_iscsi_pf_params { | |||
| 166 | u32 max_cwnd; | 166 | u32 max_cwnd; |
| 167 | u16 cq_num_entries; | 167 | u16 cq_num_entries; |
| 168 | u16 cmdq_num_entries; | 168 | u16 cmdq_num_entries; |
| 169 | u32 two_msl_timer; | ||
| 169 | u16 dup_ack_threshold; | 170 | u16 dup_ack_threshold; |
| 170 | u16 tx_sws_timer; | 171 | u16 tx_sws_timer; |
| 171 | u16 min_rto; | 172 | u16 min_rto; |
| @@ -267,11 +268,15 @@ struct qed_dev_info { | |||
| 267 | u8 mf_mode; | 268 | u8 mf_mode; |
| 268 | bool tx_switching; | 269 | bool tx_switching; |
| 269 | bool rdma_supported; | 270 | bool rdma_supported; |
| 271 | u16 mtu; | ||
| 272 | |||
| 273 | bool wol_support; | ||
| 270 | }; | 274 | }; |
| 271 | 275 | ||
| 272 | enum qed_sb_type { | 276 | enum qed_sb_type { |
| 273 | QED_SB_TYPE_L2_QUEUE, | 277 | QED_SB_TYPE_L2_QUEUE, |
| 274 | QED_SB_TYPE_CNQ, | 278 | QED_SB_TYPE_CNQ, |
| 279 | QED_SB_TYPE_STORAGE, | ||
| 275 | }; | 280 | }; |
| 276 | 281 | ||
| 277 | enum qed_protocol { | 282 | enum qed_protocol { |
| @@ -401,6 +406,15 @@ struct qed_selftest_ops { | |||
| 401 | * @return 0 on success, error otherwise. | 406 | * @return 0 on success, error otherwise. |
| 402 | */ | 407 | */ |
| 403 | int (*selftest_clock)(struct qed_dev *cdev); | 408 | int (*selftest_clock)(struct qed_dev *cdev); |
| 409 | |||
| 410 | /** | ||
| 411 | * @brief selftest_nvram - Perform nvram test | ||
| 412 | * | ||
| 413 | * @param cdev | ||
| 414 | * | ||
| 415 | * @return 0 on success, error otherwise. | ||
| 416 | */ | ||
| 417 | int (*selftest_nvram) (struct qed_dev *cdev); | ||
| 404 | }; | 418 | }; |
| 405 | 419 | ||
| 406 | struct qed_common_ops { | 420 | struct qed_common_ops { |
| @@ -554,6 +568,41 @@ struct qed_common_ops { | |||
| 554 | */ | 568 | */ |
| 555 | int (*set_led)(struct qed_dev *cdev, | 569 | int (*set_led)(struct qed_dev *cdev, |
| 556 | enum qed_led_mode mode); | 570 | enum qed_led_mode mode); |
| 571 | |||
| 572 | /** | ||
| 573 | * @brief update_drv_state - API to inform the change in the driver state. | ||
| 574 | * | ||
| 575 | * @param cdev | ||
| 576 | * @param active | ||
| 577 | * | ||
| 578 | */ | ||
| 579 | int (*update_drv_state)(struct qed_dev *cdev, bool active); | ||
| 580 | |||
| 581 | /** | ||
| 582 | * @brief update_mac - API to inform the change in the mac address | ||
| 583 | * | ||
| 584 | * @param cdev | ||
| 585 | * @param mac | ||
| 586 | * | ||
| 587 | */ | ||
| 588 | int (*update_mac)(struct qed_dev *cdev, u8 *mac); | ||
| 589 | |||
| 590 | /** | ||
| 591 | * @brief update_mtu - API to inform the change in the mtu | ||
| 592 | * | ||
| 593 | * @param cdev | ||
| 594 | * @param mtu | ||
| 595 | * | ||
| 596 | */ | ||
| 597 | int (*update_mtu)(struct qed_dev *cdev, u16 mtu); | ||
| 598 | |||
| 599 | /** | ||
| 600 | * @brief update_wol - update of changes in the WoL configuration | ||
| 601 | * | ||
| 602 | * @param cdev | ||
| 603 | * @param enabled - true iff WoL should be enabled. | ||
| 604 | */ | ||
| 605 | int (*update_wol) (struct qed_dev *cdev, bool enabled); | ||
| 557 | }; | 606 | }; |
| 558 | 607 | ||
| 559 | #define MASK_FIELD(_name, _value) \ | 608 | #define MASK_FIELD(_name, _value) \ |
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h new file mode 100644 index 000000000000..d27912480cb3 --- /dev/null +++ b/include/linux/qed/qed_iscsi_if.h | |||
| @@ -0,0 +1,229 @@ | |||
| 1 | /* QLogic qed NIC Driver | ||
| 2 | * Copyright (c) 2015 QLogic Corporation | ||
| 3 | * | ||
| 4 | * This software is available under the terms of the GNU General Public License | ||
| 5 | * (GPL) Version 2, available from the file COPYING in the main directory of | ||
| 6 | * this source tree. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #ifndef _QED_ISCSI_IF_H | ||
| 10 | #define _QED_ISCSI_IF_H | ||
| 11 | #include <linux/types.h> | ||
| 12 | #include <linux/qed/qed_if.h> | ||
| 13 | |||
| 14 | typedef int (*iscsi_event_cb_t) (void *context, | ||
| 15 | u8 fw_event_code, void *fw_handle); | ||
| 16 | struct qed_iscsi_stats { | ||
| 17 | u64 iscsi_rx_bytes_cnt; | ||
| 18 | u64 iscsi_rx_packet_cnt; | ||
| 19 | u64 iscsi_rx_new_ooo_isle_events_cnt; | ||
| 20 | u32 iscsi_cmdq_threshold_cnt; | ||
| 21 | u32 iscsi_rq_threshold_cnt; | ||
| 22 | u32 iscsi_immq_threshold_cnt; | ||
| 23 | |||
| 24 | u64 iscsi_rx_dropped_pdus_task_not_valid; | ||
| 25 | |||
| 26 | u64 iscsi_rx_data_pdu_cnt; | ||
| 27 | u64 iscsi_rx_r2t_pdu_cnt; | ||
| 28 | u64 iscsi_rx_total_pdu_cnt; | ||
| 29 | |||
| 30 | u64 iscsi_tx_go_to_slow_start_event_cnt; | ||
| 31 | u64 iscsi_tx_fast_retransmit_event_cnt; | ||
| 32 | |||
| 33 | u64 iscsi_tx_data_pdu_cnt; | ||
| 34 | u64 iscsi_tx_r2t_pdu_cnt; | ||
| 35 | u64 iscsi_tx_total_pdu_cnt; | ||
| 36 | |||
| 37 | u64 iscsi_tx_bytes_cnt; | ||
| 38 | u64 iscsi_tx_packet_cnt; | ||
| 39 | }; | ||
| 40 | |||
| 41 | struct qed_dev_iscsi_info { | ||
| 42 | struct qed_dev_info common; | ||
| 43 | |||
| 44 | void __iomem *primary_dbq_rq_addr; | ||
| 45 | void __iomem *secondary_bdq_rq_addr; | ||
| 46 | }; | ||
| 47 | |||
| 48 | struct qed_iscsi_id_params { | ||
| 49 | u8 mac[ETH_ALEN]; | ||
| 50 | u32 ip[4]; | ||
| 51 | u16 port; | ||
| 52 | }; | ||
| 53 | |||
| 54 | struct qed_iscsi_params_offload { | ||
| 55 | u8 layer_code; | ||
| 56 | dma_addr_t sq_pbl_addr; | ||
| 57 | u32 initial_ack; | ||
| 58 | |||
| 59 | struct qed_iscsi_id_params src; | ||
| 60 | struct qed_iscsi_id_params dst; | ||
| 61 | u16 vlan_id; | ||
| 62 | u8 tcp_flags; | ||
| 63 | u8 ip_version; | ||
| 64 | u8 default_cq; | ||
| 65 | |||
| 66 | u8 ka_max_probe_cnt; | ||
| 67 | u8 dup_ack_theshold; | ||
| 68 | u32 rcv_next; | ||
| 69 | u32 snd_una; | ||
| 70 | u32 snd_next; | ||
| 71 | u32 snd_max; | ||
| 72 | u32 snd_wnd; | ||
| 73 | u32 rcv_wnd; | ||
| 74 | u32 snd_wl1; | ||
| 75 | u32 cwnd; | ||
| 76 | u32 ss_thresh; | ||
| 77 | u16 srtt; | ||
| 78 | u16 rtt_var; | ||
| 79 | u32 ts_time; | ||
| 80 | u32 ts_recent; | ||
| 81 | u32 ts_recent_age; | ||
| 82 | u32 total_rt; | ||
| 83 | u32 ka_timeout_delta; | ||
| 84 | u32 rt_timeout_delta; | ||
| 85 | u8 dup_ack_cnt; | ||
| 86 | u8 snd_wnd_probe_cnt; | ||
| 87 | u8 ka_probe_cnt; | ||
| 88 | u8 rt_cnt; | ||
| 89 | u32 flow_label; | ||
| 90 | u32 ka_timeout; | ||
| 91 | u32 ka_interval; | ||
| 92 | u32 max_rt_time; | ||
| 93 | u32 initial_rcv_wnd; | ||
| 94 | u8 ttl; | ||
| 95 | u8 tos_or_tc; | ||
| 96 | u16 remote_port; | ||
| 97 | u16 local_port; | ||
| 98 | u16 mss; | ||
| 99 | u8 snd_wnd_scale; | ||
| 100 | u8 rcv_wnd_scale; | ||
| 101 | u32 ts_ticks_per_second; | ||
| 102 | u16 da_timeout_value; | ||
| 103 | u8 ack_frequency; | ||
| 104 | }; | ||
| 105 | |||
| 106 | struct qed_iscsi_params_update { | ||
| 107 | u8 update_flag; | ||
| 108 | #define QED_ISCSI_CONN_HD_EN BIT(0) | ||
| 109 | #define QED_ISCSI_CONN_DD_EN BIT(1) | ||
| 110 | #define QED_ISCSI_CONN_INITIAL_R2T BIT(2) | ||
| 111 | #define QED_ISCSI_CONN_IMMEDIATE_DATA BIT(3) | ||
| 112 | |||
| 113 | u32 max_seq_size; | ||
| 114 | u32 max_recv_pdu_length; | ||
| 115 | u32 max_send_pdu_length; | ||
| 116 | u32 first_seq_length; | ||
| 117 | u32 exp_stat_sn; | ||
| 118 | }; | ||
| 119 | |||
| 120 | #define MAX_TID_BLOCKS_ISCSI (512) | ||
| 121 | struct qed_iscsi_tid { | ||
| 122 | u32 size; /* In bytes per task */ | ||
| 123 | u32 num_tids_per_block; | ||
| 124 | u8 *blocks[MAX_TID_BLOCKS_ISCSI]; | ||
| 125 | }; | ||
| 126 | |||
| 127 | struct qed_iscsi_cb_ops { | ||
| 128 | struct qed_common_cb_ops common; | ||
| 129 | }; | ||
| 130 | |||
| 131 | /** | ||
| 132 | * struct qed_iscsi_ops - qed iSCSI operations. | ||
| 133 | * @common: common operations pointer | ||
| 134 | * @ll2: light L2 operations pointer | ||
| 135 | * @fill_dev_info: fills iSCSI specific information | ||
| 136 | * @param cdev | ||
| 137 | * @param info | ||
| 138 | * @return 0 on sucesss, otherwise error value. | ||
| 139 | * @register_ops: register iscsi operations | ||
| 140 | * @param cdev | ||
| 141 | * @param ops - specified using qed_iscsi_cb_ops | ||
| 142 | * @param cookie - driver private | ||
| 143 | * @start: iscsi in FW | ||
| 144 | * @param cdev | ||
| 145 | * @param tasks - qed will fill information about tasks | ||
| 146 | * return 0 on success, otherwise error value. | ||
| 147 | * @stop: iscsi in FW | ||
| 148 | * @param cdev | ||
| 149 | * return 0 on success, otherwise error value. | ||
| 150 | * @acquire_conn: acquire a new iscsi connection | ||
| 151 | * @param cdev | ||
| 152 | * @param handle - qed will fill handle that should be | ||
| 153 | * used henceforth as identifier of the | ||
| 154 | * connection. | ||
| 155 | * @param p_doorbell - qed will fill the address of the | ||
| 156 | * doorbell. | ||
| 157 | * @return 0 on sucesss, otherwise error value. | ||
| 158 | * @release_conn: release a previously acquired iscsi connection | ||
| 159 | * @param cdev | ||
| 160 | * @param handle - the connection handle. | ||
| 161 | * @return 0 on success, otherwise error value. | ||
| 162 | * @offload_conn: configures an offloaded connection | ||
| 163 | * @param cdev | ||
| 164 | * @param handle - the connection handle. | ||
| 165 | * @param conn_info - the configuration to use for the | ||
| 166 | * offload. | ||
| 167 | * @return 0 on success, otherwise error value. | ||
| 168 | * @update_conn: updates an offloaded connection | ||
| 169 | * @param cdev | ||
| 170 | * @param handle - the connection handle. | ||
| 171 | * @param conn_info - the configuration to use for the | ||
| 172 | * offload. | ||
| 173 | * @return 0 on success, otherwise error value. | ||
| 174 | * @destroy_conn: stops an offloaded connection | ||
| 175 | * @param cdev | ||
| 176 | * @param handle - the connection handle. | ||
| 177 | * @return 0 on success, otherwise error value. | ||
| 178 | * @clear_sq: clear all task in sq | ||
| 179 | * @param cdev | ||
| 180 | * @param handle - the connection handle. | ||
| 181 | * @return 0 on success, otherwise error value. | ||
| 182 | * @get_stats: iSCSI related statistics | ||
| 183 | * @param cdev | ||
| 184 | * @param stats - pointer to struck that would be filled | ||
| 185 | * we stats | ||
| 186 | * @return 0 on success, error otherwise. | ||
| 187 | */ | ||
| 188 | struct qed_iscsi_ops { | ||
| 189 | const struct qed_common_ops *common; | ||
| 190 | |||
| 191 | const struct qed_ll2_ops *ll2; | ||
| 192 | |||
| 193 | int (*fill_dev_info)(struct qed_dev *cdev, | ||
| 194 | struct qed_dev_iscsi_info *info); | ||
| 195 | |||
| 196 | void (*register_ops)(struct qed_dev *cdev, | ||
| 197 | struct qed_iscsi_cb_ops *ops, void *cookie); | ||
| 198 | |||
| 199 | int (*start)(struct qed_dev *cdev, | ||
| 200 | struct qed_iscsi_tid *tasks, | ||
| 201 | void *event_context, iscsi_event_cb_t async_event_cb); | ||
| 202 | |||
| 203 | int (*stop)(struct qed_dev *cdev); | ||
| 204 | |||
| 205 | int (*acquire_conn)(struct qed_dev *cdev, | ||
| 206 | u32 *handle, | ||
| 207 | u32 *fw_cid, void __iomem **p_doorbell); | ||
| 208 | |||
| 209 | int (*release_conn)(struct qed_dev *cdev, u32 handle); | ||
| 210 | |||
| 211 | int (*offload_conn)(struct qed_dev *cdev, | ||
| 212 | u32 handle, | ||
| 213 | struct qed_iscsi_params_offload *conn_info); | ||
| 214 | |||
| 215 | int (*update_conn)(struct qed_dev *cdev, | ||
| 216 | u32 handle, | ||
| 217 | struct qed_iscsi_params_update *conn_info); | ||
| 218 | |||
| 219 | int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn); | ||
| 220 | |||
| 221 | int (*clear_sq)(struct qed_dev *cdev, u32 handle); | ||
| 222 | |||
| 223 | int (*get_stats)(struct qed_dev *cdev, | ||
| 224 | struct qed_iscsi_stats *stats); | ||
| 225 | }; | ||
| 226 | |||
| 227 | const struct qed_iscsi_ops *qed_get_iscsi_ops(void); | ||
| 228 | void qed_put_iscsi_ops(void); | ||
| 229 | #endif | ||
diff --git a/include/linux/quota.h b/include/linux/quota.h index 55107a8ff887..3434eef2a5aa 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h | |||
| @@ -431,7 +431,7 @@ struct qc_info { | |||
| 431 | 431 | ||
| 432 | /* Operations handling requests from userspace */ | 432 | /* Operations handling requests from userspace */ |
| 433 | struct quotactl_ops { | 433 | struct quotactl_ops { |
| 434 | int (*quota_on)(struct super_block *, int, int, struct path *); | 434 | int (*quota_on)(struct super_block *, int, int, const struct path *); |
| 435 | int (*quota_off)(struct super_block *, int); | 435 | int (*quota_off)(struct super_block *, int); |
| 436 | int (*quota_enable)(struct super_block *, unsigned int); | 436 | int (*quota_enable)(struct super_block *, unsigned int); |
| 437 | int (*quota_disable)(struct super_block *, unsigned int); | 437 | int (*quota_disable)(struct super_block *, unsigned int); |
| @@ -520,7 +520,6 @@ static inline void quota_send_warning(struct kqid qid, dev_t dev, | |||
| 520 | struct quota_info { | 520 | struct quota_info { |
| 521 | unsigned int flags; /* Flags for diskquotas on this device */ | 521 | unsigned int flags; /* Flags for diskquotas on this device */ |
| 522 | struct mutex dqio_mutex; /* lock device while I/O in progress */ | 522 | struct mutex dqio_mutex; /* lock device while I/O in progress */ |
| 523 | struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */ | ||
| 524 | struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ | 523 | struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ |
| 525 | struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ | 524 | struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ |
| 526 | const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ | 525 | const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ |
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index f00fa86ac966..799a63d0e1a8 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h | |||
| @@ -90,7 +90,7 @@ int dquot_file_open(struct inode *inode, struct file *file); | |||
| 90 | int dquot_enable(struct inode *inode, int type, int format_id, | 90 | int dquot_enable(struct inode *inode, int type, int format_id, |
| 91 | unsigned int flags); | 91 | unsigned int flags); |
| 92 | int dquot_quota_on(struct super_block *sb, int type, int format_id, | 92 | int dquot_quota_on(struct super_block *sb, int type, int format_id, |
| 93 | struct path *path); | 93 | const struct path *path); |
| 94 | int dquot_quota_on_mount(struct super_block *sb, char *qf_name, | 94 | int dquot_quota_on_mount(struct super_block *sb, char *qf_name, |
| 95 | int format_id, int type); | 95 | int format_id, int type); |
| 96 | int dquot_quota_off(struct super_block *sb, int type); | 96 | int dquot_quota_off(struct super_block *sb, int type); |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index af3581b8a451..5dea8f6440e4 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
| @@ -80,26 +80,25 @@ static inline bool radix_tree_is_internal_node(void *ptr) | |||
| 80 | #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ | 80 | #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ |
| 81 | RADIX_TREE_MAP_SHIFT)) | 81 | RADIX_TREE_MAP_SHIFT)) |
| 82 | 82 | ||
| 83 | /* Internally used bits of node->count */ | 83 | /* |
| 84 | #define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1) | 84 | * @count is the count of every non-NULL element in the ->slots array |
| 85 | #define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1) | 85 | * whether that is an exceptional entry, a retry entry, a user pointer, |
| 86 | 86 | * a sibling entry or a pointer to the next level of the tree. | |
| 87 | * @exceptional is the count of every element in ->slots which is | ||
| 88 | * either radix_tree_exceptional_entry() or is a sibling entry for an | ||
| 89 | * exceptional entry. | ||
| 90 | */ | ||
| 87 | struct radix_tree_node { | 91 | struct radix_tree_node { |
| 88 | unsigned char shift; /* Bits remaining in each slot */ | 92 | unsigned char shift; /* Bits remaining in each slot */ |
| 89 | unsigned char offset; /* Slot offset in parent */ | 93 | unsigned char offset; /* Slot offset in parent */ |
| 90 | unsigned int count; | 94 | unsigned char count; /* Total entry count */ |
| 95 | unsigned char exceptional; /* Exceptional entry count */ | ||
| 96 | struct radix_tree_node *parent; /* Used when ascending tree */ | ||
| 97 | void *private_data; /* For tree user */ | ||
| 91 | union { | 98 | union { |
| 92 | struct { | 99 | struct list_head private_list; /* For tree user */ |
| 93 | /* Used when ascending tree */ | 100 | struct rcu_head rcu_head; /* Used when freeing node */ |
| 94 | struct radix_tree_node *parent; | ||
| 95 | /* For tree user */ | ||
| 96 | void *private_data; | ||
| 97 | }; | ||
| 98 | /* Used when freeing node */ | ||
| 99 | struct rcu_head rcu_head; | ||
| 100 | }; | 101 | }; |
| 101 | /* For tree user */ | ||
| 102 | struct list_head private_list; | ||
| 103 | void __rcu *slots[RADIX_TREE_MAP_SIZE]; | 102 | void __rcu *slots[RADIX_TREE_MAP_SIZE]; |
| 104 | unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; | 103 | unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; |
| 105 | }; | 104 | }; |
| @@ -130,6 +129,41 @@ static inline bool radix_tree_empty(struct radix_tree_root *root) | |||
| 130 | } | 129 | } |
| 131 | 130 | ||
| 132 | /** | 131 | /** |
| 132 | * struct radix_tree_iter - radix tree iterator state | ||
| 133 | * | ||
| 134 | * @index: index of current slot | ||
| 135 | * @next_index: one beyond the last index for this chunk | ||
| 136 | * @tags: bit-mask for tag-iterating | ||
| 137 | * @node: node that contains current slot | ||
| 138 | * @shift: shift for the node that holds our slots | ||
| 139 | * | ||
| 140 | * This radix tree iterator works in terms of "chunks" of slots. A chunk is a | ||
| 141 | * subinterval of slots contained within one radix tree leaf node. It is | ||
| 142 | * described by a pointer to its first slot and a struct radix_tree_iter | ||
| 143 | * which holds the chunk's position in the tree and its size. For tagged | ||
| 144 | * iteration radix_tree_iter also holds the slots' bit-mask for one chosen | ||
| 145 | * radix tree tag. | ||
| 146 | */ | ||
| 147 | struct radix_tree_iter { | ||
| 148 | unsigned long index; | ||
| 149 | unsigned long next_index; | ||
| 150 | unsigned long tags; | ||
| 151 | struct radix_tree_node *node; | ||
| 152 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | ||
| 153 | unsigned int shift; | ||
| 154 | #endif | ||
| 155 | }; | ||
| 156 | |||
| 157 | static inline unsigned int iter_shift(const struct radix_tree_iter *iter) | ||
| 158 | { | ||
| 159 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | ||
| 160 | return iter->shift; | ||
| 161 | #else | ||
| 162 | return 0; | ||
| 163 | #endif | ||
| 164 | } | ||
| 165 | |||
| 166 | /** | ||
| 133 | * Radix-tree synchronization | 167 | * Radix-tree synchronization |
| 134 | * | 168 | * |
| 135 | * The radix-tree API requires that users provide all synchronisation (with | 169 | * The radix-tree API requires that users provide all synchronisation (with |
| @@ -248,20 +282,6 @@ static inline int radix_tree_exception(void *arg) | |||
| 248 | return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); | 282 | return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); |
| 249 | } | 283 | } |
| 250 | 284 | ||
| 251 | /** | ||
| 252 | * radix_tree_replace_slot - replace item in a slot | ||
| 253 | * @pslot: pointer to slot, returned by radix_tree_lookup_slot | ||
| 254 | * @item: new item to store in the slot. | ||
| 255 | * | ||
| 256 | * For use with radix_tree_lookup_slot(). Caller must hold tree write locked | ||
| 257 | * across slot lookup and replacement. | ||
| 258 | */ | ||
| 259 | static inline void radix_tree_replace_slot(void **pslot, void *item) | ||
| 260 | { | ||
| 261 | BUG_ON(radix_tree_is_internal_node(item)); | ||
| 262 | rcu_assign_pointer(*pslot, item); | ||
| 263 | } | ||
| 264 | |||
| 265 | int __radix_tree_create(struct radix_tree_root *root, unsigned long index, | 285 | int __radix_tree_create(struct radix_tree_root *root, unsigned long index, |
| 266 | unsigned order, struct radix_tree_node **nodep, | 286 | unsigned order, struct radix_tree_node **nodep, |
| 267 | void ***slotp); | 287 | void ***slotp); |
| @@ -276,7 +296,16 @@ void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, | |||
| 276 | struct radix_tree_node **nodep, void ***slotp); | 296 | struct radix_tree_node **nodep, void ***slotp); |
| 277 | void *radix_tree_lookup(struct radix_tree_root *, unsigned long); | 297 | void *radix_tree_lookup(struct radix_tree_root *, unsigned long); |
| 278 | void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); | 298 | void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); |
| 279 | bool __radix_tree_delete_node(struct radix_tree_root *root, | 299 | typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *); |
| 300 | void __radix_tree_replace(struct radix_tree_root *root, | ||
| 301 | struct radix_tree_node *node, | ||
| 302 | void **slot, void *item, | ||
| 303 | radix_tree_update_node_t update_node, void *private); | ||
| 304 | void radix_tree_iter_replace(struct radix_tree_root *, | ||
| 305 | const struct radix_tree_iter *, void **slot, void *item); | ||
| 306 | void radix_tree_replace_slot(struct radix_tree_root *root, | ||
| 307 | void **slot, void *item); | ||
| 308 | void __radix_tree_delete_node(struct radix_tree_root *root, | ||
| 280 | struct radix_tree_node *node); | 309 | struct radix_tree_node *node); |
| 281 | void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); | 310 | void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); |
| 282 | void *radix_tree_delete(struct radix_tree_root *, unsigned long); | 311 | void *radix_tree_delete(struct radix_tree_root *, unsigned long); |
| @@ -299,6 +328,8 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, | |||
| 299 | unsigned long index, unsigned int tag); | 328 | unsigned long index, unsigned int tag); |
| 300 | int radix_tree_tag_get(struct radix_tree_root *root, | 329 | int radix_tree_tag_get(struct radix_tree_root *root, |
| 301 | unsigned long index, unsigned int tag); | 330 | unsigned long index, unsigned int tag); |
| 331 | void radix_tree_iter_tag_set(struct radix_tree_root *root, | ||
| 332 | const struct radix_tree_iter *iter, unsigned int tag); | ||
| 302 | unsigned int | 333 | unsigned int |
| 303 | radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | 334 | radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, |
| 304 | unsigned long first_index, unsigned int max_items, | 335 | unsigned long first_index, unsigned int max_items, |
| @@ -307,50 +338,18 @@ unsigned int | |||
| 307 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | 338 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, |
| 308 | unsigned long first_index, unsigned int max_items, | 339 | unsigned long first_index, unsigned int max_items, |
| 309 | unsigned int tag); | 340 | unsigned int tag); |
| 310 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | ||
| 311 | unsigned long *first_indexp, unsigned long last_index, | ||
| 312 | unsigned long nr_to_tag, | ||
| 313 | unsigned int fromtag, unsigned int totag); | ||
| 314 | int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); | 341 | int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); |
| 315 | unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); | ||
| 316 | 342 | ||
| 317 | static inline void radix_tree_preload_end(void) | 343 | static inline void radix_tree_preload_end(void) |
| 318 | { | 344 | { |
| 319 | preempt_enable(); | 345 | preempt_enable(); |
| 320 | } | 346 | } |
| 321 | 347 | ||
| 322 | /** | 348 | int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t); |
| 323 | * struct radix_tree_iter - radix tree iterator state | 349 | int radix_tree_split(struct radix_tree_root *, unsigned long index, |
| 324 | * | 350 | unsigned new_order); |
| 325 | * @index: index of current slot | 351 | int radix_tree_join(struct radix_tree_root *, unsigned long index, |
| 326 | * @next_index: one beyond the last index for this chunk | 352 | unsigned new_order, void *); |
| 327 | * @tags: bit-mask for tag-iterating | ||
| 328 | * @shift: shift for the node that holds our slots | ||
| 329 | * | ||
| 330 | * This radix tree iterator works in terms of "chunks" of slots. A chunk is a | ||
| 331 | * subinterval of slots contained within one radix tree leaf node. It is | ||
| 332 | * described by a pointer to its first slot and a struct radix_tree_iter | ||
| 333 | * which holds the chunk's position in the tree and its size. For tagged | ||
| 334 | * iteration radix_tree_iter also holds the slots' bit-mask for one chosen | ||
| 335 | * radix tree tag. | ||
| 336 | */ | ||
| 337 | struct radix_tree_iter { | ||
| 338 | unsigned long index; | ||
| 339 | unsigned long next_index; | ||
| 340 | unsigned long tags; | ||
| 341 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | ||
| 342 | unsigned int shift; | ||
| 343 | #endif | ||
| 344 | }; | ||
| 345 | |||
| 346 | static inline unsigned int iter_shift(struct radix_tree_iter *iter) | ||
| 347 | { | ||
| 348 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | ||
| 349 | return iter->shift; | ||
| 350 | #else | ||
| 351 | return 0; | ||
| 352 | #endif | ||
| 353 | } | ||
| 354 | 353 | ||
| 355 | #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ | 354 | #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ |
| 356 | #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ | 355 | #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ |
| @@ -419,20 +418,17 @@ __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) | |||
| 419 | } | 418 | } |
| 420 | 419 | ||
| 421 | /** | 420 | /** |
| 422 | * radix_tree_iter_next - resume iterating when the chunk may be invalid | 421 | * radix_tree_iter_resume - resume iterating when the chunk may be invalid |
| 423 | * @iter: iterator state | 422 | * @slot: pointer to current slot |
| 423 | * @iter: iterator state | ||
| 424 | * Returns: New slot pointer | ||
| 424 | * | 425 | * |
| 425 | * If the iterator needs to release then reacquire a lock, the chunk may | 426 | * If the iterator needs to release then reacquire a lock, the chunk may |
| 426 | * have been invalidated by an insertion or deletion. Call this function | 427 | * have been invalidated by an insertion or deletion. Call this function |
| 427 | * to continue the iteration from the next index. | 428 | * before releasing the lock to continue the iteration from the next index. |
| 428 | */ | 429 | */ |
| 429 | static inline __must_check | 430 | void **__must_check radix_tree_iter_resume(void **slot, |
| 430 | void **radix_tree_iter_next(struct radix_tree_iter *iter) | 431 | struct radix_tree_iter *iter); |
| 431 | { | ||
| 432 | iter->next_index = __radix_tree_iter_add(iter, 1); | ||
| 433 | iter->tags = 0; | ||
| 434 | return NULL; | ||
| 435 | } | ||
| 436 | 432 | ||
| 437 | /** | 433 | /** |
| 438 | * radix_tree_chunk_size - get current chunk size | 434 | * radix_tree_chunk_size - get current chunk size |
| @@ -446,10 +442,17 @@ radix_tree_chunk_size(struct radix_tree_iter *iter) | |||
| 446 | return (iter->next_index - iter->index) >> iter_shift(iter); | 442 | return (iter->next_index - iter->index) >> iter_shift(iter); |
| 447 | } | 443 | } |
| 448 | 444 | ||
| 449 | static inline struct radix_tree_node *entry_to_node(void *ptr) | 445 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
| 446 | void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, | ||
| 447 | unsigned flags); | ||
| 448 | #else | ||
| 449 | /* Can't happen without sibling entries, but the compiler can't tell that */ | ||
| 450 | static inline void ** __radix_tree_next_slot(void **slot, | ||
| 451 | struct radix_tree_iter *iter, unsigned flags) | ||
| 450 | { | 452 | { |
| 451 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); | 453 | return slot; |
| 452 | } | 454 | } |
| 455 | #endif | ||
| 453 | 456 | ||
| 454 | /** | 457 | /** |
| 455 | * radix_tree_next_slot - find next slot in chunk | 458 | * radix_tree_next_slot - find next slot in chunk |
| @@ -463,7 +466,7 @@ static inline struct radix_tree_node *entry_to_node(void *ptr) | |||
| 463 | * For tagged lookup it also eats @iter->tags. | 466 | * For tagged lookup it also eats @iter->tags. |
| 464 | * | 467 | * |
| 465 | * There are several cases where 'slot' can be passed in as NULL to this | 468 | * There are several cases where 'slot' can be passed in as NULL to this |
| 466 | * function. These cases result from the use of radix_tree_iter_next() or | 469 | * function. These cases result from the use of radix_tree_iter_resume() or |
| 467 | * radix_tree_iter_retry(). In these cases we don't end up dereferencing | 470 | * radix_tree_iter_retry(). In these cases we don't end up dereferencing |
| 468 | * 'slot' because either: | 471 | * 'slot' because either: |
| 469 | * a) we are doing tagged iteration and iter->tags has been set to 0, or | 472 | * a) we are doing tagged iteration and iter->tags has been set to 0, or |
| @@ -474,51 +477,31 @@ static __always_inline void ** | |||
| 474 | radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) | 477 | radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) |
| 475 | { | 478 | { |
| 476 | if (flags & RADIX_TREE_ITER_TAGGED) { | 479 | if (flags & RADIX_TREE_ITER_TAGGED) { |
| 477 | void *canon = slot; | ||
| 478 | |||
| 479 | iter->tags >>= 1; | 480 | iter->tags >>= 1; |
| 480 | if (unlikely(!iter->tags)) | 481 | if (unlikely(!iter->tags)) |
| 481 | return NULL; | 482 | return NULL; |
| 482 | while (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && | ||
| 483 | radix_tree_is_internal_node(slot[1])) { | ||
| 484 | if (entry_to_node(slot[1]) == canon) { | ||
| 485 | iter->tags >>= 1; | ||
| 486 | iter->index = __radix_tree_iter_add(iter, 1); | ||
| 487 | slot++; | ||
| 488 | continue; | ||
| 489 | } | ||
| 490 | iter->next_index = __radix_tree_iter_add(iter, 1); | ||
| 491 | return NULL; | ||
| 492 | } | ||
| 493 | if (likely(iter->tags & 1ul)) { | 483 | if (likely(iter->tags & 1ul)) { |
| 494 | iter->index = __radix_tree_iter_add(iter, 1); | 484 | iter->index = __radix_tree_iter_add(iter, 1); |
| 495 | return slot + 1; | 485 | slot++; |
| 486 | goto found; | ||
| 496 | } | 487 | } |
| 497 | if (!(flags & RADIX_TREE_ITER_CONTIG)) { | 488 | if (!(flags & RADIX_TREE_ITER_CONTIG)) { |
| 498 | unsigned offset = __ffs(iter->tags); | 489 | unsigned offset = __ffs(iter->tags); |
| 499 | 490 | ||
| 500 | iter->tags >>= offset; | 491 | iter->tags >>= offset++; |
| 501 | iter->index = __radix_tree_iter_add(iter, offset + 1); | 492 | iter->index = __radix_tree_iter_add(iter, offset); |
| 502 | return slot + offset + 1; | 493 | slot += offset; |
| 494 | goto found; | ||
| 503 | } | 495 | } |
| 504 | } else { | 496 | } else { |
| 505 | long count = radix_tree_chunk_size(iter); | 497 | long count = radix_tree_chunk_size(iter); |
| 506 | void *canon = slot; | ||
| 507 | 498 | ||
| 508 | while (--count > 0) { | 499 | while (--count > 0) { |
| 509 | slot++; | 500 | slot++; |
| 510 | iter->index = __radix_tree_iter_add(iter, 1); | 501 | iter->index = __radix_tree_iter_add(iter, 1); |
| 511 | 502 | ||
| 512 | if (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && | ||
| 513 | radix_tree_is_internal_node(*slot)) { | ||
| 514 | if (entry_to_node(*slot) == canon) | ||
| 515 | continue; | ||
| 516 | iter->next_index = iter->index; | ||
| 517 | break; | ||
| 518 | } | ||
| 519 | |||
| 520 | if (likely(*slot)) | 503 | if (likely(*slot)) |
| 521 | return slot; | 504 | goto found; |
| 522 | if (flags & RADIX_TREE_ITER_CONTIG) { | 505 | if (flags & RADIX_TREE_ITER_CONTIG) { |
| 523 | /* forbid switching to the next chunk */ | 506 | /* forbid switching to the next chunk */ |
| 524 | iter->next_index = 0; | 507 | iter->next_index = 0; |
| @@ -527,6 +510,11 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) | |||
| 527 | } | 510 | } |
| 528 | } | 511 | } |
| 529 | return NULL; | 512 | return NULL; |
| 513 | |||
| 514 | found: | ||
| 515 | if (unlikely(radix_tree_is_internal_node(*slot))) | ||
| 516 | return __radix_tree_next_slot(slot, iter, flags); | ||
| 517 | return slot; | ||
| 530 | } | 518 | } |
| 531 | 519 | ||
| 532 | /** | 520 | /** |
| @@ -577,6 +565,6 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) | |||
| 577 | slot || (slot = radix_tree_next_chunk(root, iter, \ | 565 | slot || (slot = radix_tree_next_chunk(root, iter, \ |
| 578 | RADIX_TREE_ITER_TAGGED | tag)) ; \ | 566 | RADIX_TREE_ITER_TAGGED | tag)) ; \ |
| 579 | slot = radix_tree_next_slot(slot, iter, \ | 567 | slot = radix_tree_next_slot(slot, iter, \ |
| 580 | RADIX_TREE_ITER_TAGGED)) | 568 | RADIX_TREE_ITER_TAGGED | tag)) |
| 581 | 569 | ||
| 582 | #endif /* _LINUX_RADIX_TREE_H */ | 570 | #endif /* _LINUX_RADIX_TREE_H */ |
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 57c9e0622a38..56375edf2ed2 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h | |||
| @@ -77,8 +77,11 @@ extern int ___ratelimit(struct ratelimit_state *rs, const char *func); | |||
| 77 | 77 | ||
| 78 | #ifdef CONFIG_PRINTK | 78 | #ifdef CONFIG_PRINTK |
| 79 | 79 | ||
| 80 | #define WARN_ON_RATELIMIT(condition, state) \ | 80 | #define WARN_ON_RATELIMIT(condition, state) ({ \ |
| 81 | WARN_ON((condition) && __ratelimit(state)) | 81 | bool __rtn_cond = !!(condition); \ |
| 82 | WARN_ON(__rtn_cond && __ratelimit(state)); \ | ||
| 83 | __rtn_cond; \ | ||
| 84 | }) | ||
| 82 | 85 | ||
| 83 | #define WARN_RATELIMIT(condition, format, ...) \ | 86 | #define WARN_RATELIMIT(condition, format, ...) \ |
| 84 | ({ \ | 87 | ({ \ |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 8beb98dcf14f..4f7a9561b8c4 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
| @@ -45,19 +45,17 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) | |||
| 45 | * This is only for internal list manipulation where we know | 45 | * This is only for internal list manipulation where we know |
| 46 | * the prev/next entries already! | 46 | * the prev/next entries already! |
| 47 | */ | 47 | */ |
| 48 | #ifndef CONFIG_DEBUG_LIST | ||
| 49 | static inline void __list_add_rcu(struct list_head *new, | 48 | static inline void __list_add_rcu(struct list_head *new, |
| 50 | struct list_head *prev, struct list_head *next) | 49 | struct list_head *prev, struct list_head *next) |
| 51 | { | 50 | { |
| 51 | if (!__list_add_valid(new, prev, next)) | ||
| 52 | return; | ||
| 53 | |||
| 52 | new->next = next; | 54 | new->next = next; |
| 53 | new->prev = prev; | 55 | new->prev = prev; |
| 54 | rcu_assign_pointer(list_next_rcu(prev), new); | 56 | rcu_assign_pointer(list_next_rcu(prev), new); |
| 55 | next->prev = new; | 57 | next->prev = new; |
| 56 | } | 58 | } |
| 57 | #else | ||
| 58 | void __list_add_rcu(struct list_head *new, | ||
| 59 | struct list_head *prev, struct list_head *next); | ||
| 60 | #endif | ||
| 61 | 59 | ||
| 62 | /** | 60 | /** |
| 63 | * list_add_rcu - add a new entry to rcu-protected list | 61 | * list_add_rcu - add a new entry to rcu-protected list |
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 692108222271..ea0fffa5faeb 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h | |||
| @@ -120,6 +120,25 @@ struct regmap; | |||
| 120 | #define REGULATOR_EVENT_PRE_DISABLE 0x400 | 120 | #define REGULATOR_EVENT_PRE_DISABLE 0x400 |
| 121 | #define REGULATOR_EVENT_ABORT_DISABLE 0x800 | 121 | #define REGULATOR_EVENT_ABORT_DISABLE 0x800 |
| 122 | 122 | ||
| 123 | /* | ||
| 124 | * Regulator errors that can be queried using regulator_get_error_flags | ||
| 125 | * | ||
| 126 | * UNDER_VOLTAGE Regulator output is under voltage. | ||
| 127 | * OVER_CURRENT Regulator output current is too high. | ||
| 128 | * REGULATION_OUT Regulator output is out of regulation. | ||
| 129 | * FAIL Regulator output has failed. | ||
| 130 | * OVER_TEMP Regulator over temp. | ||
| 131 | * | ||
| 132 | * NOTE: These errors can be OR'ed together. | ||
| 133 | */ | ||
| 134 | |||
| 135 | #define REGULATOR_ERROR_UNDER_VOLTAGE BIT(1) | ||
| 136 | #define REGULATOR_ERROR_OVER_CURRENT BIT(2) | ||
| 137 | #define REGULATOR_ERROR_REGULATION_OUT BIT(3) | ||
| 138 | #define REGULATOR_ERROR_FAIL BIT(4) | ||
| 139 | #define REGULATOR_ERROR_OVER_TEMP BIT(5) | ||
| 140 | |||
| 141 | |||
| 123 | /** | 142 | /** |
| 124 | * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event | 143 | * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event |
| 125 | * | 144 | * |
| @@ -237,6 +256,8 @@ int regulator_get_current_limit(struct regulator *regulator); | |||
| 237 | 256 | ||
| 238 | int regulator_set_mode(struct regulator *regulator, unsigned int mode); | 257 | int regulator_set_mode(struct regulator *regulator, unsigned int mode); |
| 239 | unsigned int regulator_get_mode(struct regulator *regulator); | 258 | unsigned int regulator_get_mode(struct regulator *regulator); |
| 259 | int regulator_get_error_flags(struct regulator *regulator, | ||
| 260 | unsigned int *flags); | ||
| 240 | int regulator_set_load(struct regulator *regulator, int load_uA); | 261 | int regulator_set_load(struct regulator *regulator, int load_uA); |
| 241 | 262 | ||
| 242 | int regulator_allow_bypass(struct regulator *regulator, bool allow); | 263 | int regulator_allow_bypass(struct regulator *regulator, bool allow); |
| @@ -477,6 +498,12 @@ static inline unsigned int regulator_get_mode(struct regulator *regulator) | |||
| 477 | return REGULATOR_MODE_NORMAL; | 498 | return REGULATOR_MODE_NORMAL; |
| 478 | } | 499 | } |
| 479 | 500 | ||
| 501 | static inline int regulator_get_error_flags(struct regulator *regulator, | ||
| 502 | unsigned int *flags) | ||
| 503 | { | ||
| 504 | return -EINVAL; | ||
| 505 | } | ||
| 506 | |||
| 480 | static inline int regulator_set_load(struct regulator *regulator, int load_uA) | 507 | static inline int regulator_set_load(struct regulator *regulator, int load_uA) |
| 481 | { | 508 | { |
| 482 | return REGULATOR_MODE_NORMAL; | 509 | return REGULATOR_MODE_NORMAL; |
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 37b532410528..dac8e7b16bc6 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h | |||
| @@ -100,6 +100,7 @@ struct regulator_linear_range { | |||
| 100 | * | 100 | * |
| 101 | * @set_mode: Set the configured operating mode for the regulator. | 101 | * @set_mode: Set the configured operating mode for the regulator. |
| 102 | * @get_mode: Get the configured operating mode for the regulator. | 102 | * @get_mode: Get the configured operating mode for the regulator. |
| 103 | * @get_error_flags: Get the current error(s) for the regulator. | ||
| 103 | * @get_status: Return actual (not as-configured) status of regulator, as a | 104 | * @get_status: Return actual (not as-configured) status of regulator, as a |
| 104 | * REGULATOR_STATUS value (or negative errno) | 105 | * REGULATOR_STATUS value (or negative errno) |
| 105 | * @get_optimum_mode: Get the most efficient operating mode for the regulator | 106 | * @get_optimum_mode: Get the most efficient operating mode for the regulator |
| @@ -169,6 +170,9 @@ struct regulator_ops { | |||
| 169 | int (*set_mode) (struct regulator_dev *, unsigned int mode); | 170 | int (*set_mode) (struct regulator_dev *, unsigned int mode); |
| 170 | unsigned int (*get_mode) (struct regulator_dev *); | 171 | unsigned int (*get_mode) (struct regulator_dev *); |
| 171 | 172 | ||
| 173 | /* retrieve current error flags on the regulator */ | ||
| 174 | int (*get_error_flags)(struct regulator_dev *, unsigned int *flags); | ||
| 175 | |||
| 172 | /* Time taken to enable or set voltage on the regulator */ | 176 | /* Time taken to enable or set voltage on the regulator */ |
| 173 | int (*enable_time) (struct regulator_dev *); | 177 | int (*enable_time) (struct regulator_dev *); |
| 174 | int (*set_ramp_delay) (struct regulator_dev *, int ramp_delay); | 178 | int (*set_ramp_delay) (struct regulator_dev *, int ramp_delay); |
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 930023b7c825..e2f3a3281d8f 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h | |||
| @@ -400,6 +400,7 @@ enum rproc_crash_type { | |||
| 400 | * @firmware_loading_complete: marks e/o asynchronous firmware loading | 400 | * @firmware_loading_complete: marks e/o asynchronous firmware loading |
| 401 | * @bootaddr: address of first instruction to boot rproc with (optional) | 401 | * @bootaddr: address of first instruction to boot rproc with (optional) |
| 402 | * @rvdevs: list of remote virtio devices | 402 | * @rvdevs: list of remote virtio devices |
| 403 | * @subdevs: list of subdevices, to following the running state | ||
| 403 | * @notifyids: idr for dynamically assigning rproc-wide unique notify ids | 404 | * @notifyids: idr for dynamically assigning rproc-wide unique notify ids |
| 404 | * @index: index of this rproc device | 405 | * @index: index of this rproc device |
| 405 | * @crash_handler: workqueue for handling a crash | 406 | * @crash_handler: workqueue for handling a crash |
| @@ -407,15 +408,14 @@ enum rproc_crash_type { | |||
| 407 | * @crash_comp: completion used to sync crash handler and the rproc reload | 408 | * @crash_comp: completion used to sync crash handler and the rproc reload |
| 408 | * @recovery_disabled: flag that state if recovery was disabled | 409 | * @recovery_disabled: flag that state if recovery was disabled |
| 409 | * @max_notifyid: largest allocated notify id. | 410 | * @max_notifyid: largest allocated notify id. |
| 410 | * @table_ptr: pointer to the resource table in effect | 411 | * @table_ptr: our copy of the resource table |
| 411 | * @cached_table: copy of the resource table | ||
| 412 | * @has_iommu: flag to indicate if remote processor is behind an MMU | 412 | * @has_iommu: flag to indicate if remote processor is behind an MMU |
| 413 | */ | 413 | */ |
| 414 | struct rproc { | 414 | struct rproc { |
| 415 | struct list_head node; | 415 | struct list_head node; |
| 416 | struct iommu_domain *domain; | 416 | struct iommu_domain *domain; |
| 417 | const char *name; | 417 | const char *name; |
| 418 | const char *firmware; | 418 | char *firmware; |
| 419 | void *priv; | 419 | void *priv; |
| 420 | const struct rproc_ops *ops; | 420 | const struct rproc_ops *ops; |
| 421 | struct device dev; | 421 | struct device dev; |
| @@ -431,6 +431,7 @@ struct rproc { | |||
| 431 | struct completion firmware_loading_complete; | 431 | struct completion firmware_loading_complete; |
| 432 | u32 bootaddr; | 432 | u32 bootaddr; |
| 433 | struct list_head rvdevs; | 433 | struct list_head rvdevs; |
| 434 | struct list_head subdevs; | ||
| 434 | struct idr notifyids; | 435 | struct idr notifyids; |
| 435 | int index; | 436 | int index; |
| 436 | struct work_struct crash_handler; | 437 | struct work_struct crash_handler; |
| @@ -439,11 +440,23 @@ struct rproc { | |||
| 439 | bool recovery_disabled; | 440 | bool recovery_disabled; |
| 440 | int max_notifyid; | 441 | int max_notifyid; |
| 441 | struct resource_table *table_ptr; | 442 | struct resource_table *table_ptr; |
| 442 | struct resource_table *cached_table; | ||
| 443 | bool has_iommu; | 443 | bool has_iommu; |
| 444 | bool auto_boot; | 444 | bool auto_boot; |
| 445 | }; | 445 | }; |
| 446 | 446 | ||
| 447 | /** | ||
| 448 | * struct rproc_subdev - subdevice tied to a remoteproc | ||
| 449 | * @node: list node related to the rproc subdevs list | ||
| 450 | * @probe: probe function, called as the rproc is started | ||
| 451 | * @remove: remove function, called as the rproc is stopped | ||
| 452 | */ | ||
| 453 | struct rproc_subdev { | ||
| 454 | struct list_head node; | ||
| 455 | |||
| 456 | int (*probe)(struct rproc_subdev *subdev); | ||
| 457 | void (*remove)(struct rproc_subdev *subdev); | ||
| 458 | }; | ||
| 459 | |||
| 447 | /* we currently support only two vrings per rvdev */ | 460 | /* we currently support only two vrings per rvdev */ |
| 448 | 461 | ||
| 449 | #define RVDEV_NUM_VRINGS 2 | 462 | #define RVDEV_NUM_VRINGS 2 |
| @@ -472,6 +485,9 @@ struct rproc_vring { | |||
| 472 | 485 | ||
| 473 | /** | 486 | /** |
| 474 | * struct rproc_vdev - remoteproc state for a supported virtio device | 487 | * struct rproc_vdev - remoteproc state for a supported virtio device |
| 488 | * @refcount: reference counter for the vdev and vring allocations | ||
| 489 | * @subdev: handle for registering the vdev as a rproc subdevice | ||
| 490 | * @id: virtio device id (as in virtio_ids.h) | ||
| 475 | * @node: list node | 491 | * @node: list node |
| 476 | * @rproc: the rproc handle | 492 | * @rproc: the rproc handle |
| 477 | * @vdev: the virio device | 493 | * @vdev: the virio device |
| @@ -479,6 +495,11 @@ struct rproc_vring { | |||
| 479 | * @rsc_offset: offset of the vdev's resource entry | 495 | * @rsc_offset: offset of the vdev's resource entry |
| 480 | */ | 496 | */ |
| 481 | struct rproc_vdev { | 497 | struct rproc_vdev { |
| 498 | struct kref refcount; | ||
| 499 | |||
| 500 | struct rproc_subdev subdev; | ||
| 501 | |||
| 502 | unsigned int id; | ||
| 482 | struct list_head node; | 503 | struct list_head node; |
| 483 | struct rproc *rproc; | 504 | struct rproc *rproc; |
| 484 | struct virtio_device vdev; | 505 | struct virtio_device vdev; |
| @@ -511,4 +532,11 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev) | |||
| 511 | return rvdev->rproc; | 532 | return rvdev->rproc; |
| 512 | } | 533 | } |
| 513 | 534 | ||
| 535 | void rproc_add_subdev(struct rproc *rproc, | ||
| 536 | struct rproc_subdev *subdev, | ||
| 537 | int (*probe)(struct rproc_subdev *subdev), | ||
| 538 | void (*remove)(struct rproc_subdev *subdev)); | ||
| 539 | |||
| 540 | void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev); | ||
| 541 | |||
| 514 | #endif /* REMOTEPROC_H */ | 542 | #endif /* REMOTEPROC_H */ |
diff --git a/include/linux/remoteproc/st_slim_rproc.h b/include/linux/remoteproc/st_slim_rproc.h new file mode 100644 index 000000000000..4155556fa4b2 --- /dev/null +++ b/include/linux/remoteproc/st_slim_rproc.h | |||
| @@ -0,0 +1,58 @@ | |||
| 1 | /* | ||
| 2 | * SLIM core rproc driver header | ||
| 3 | * | ||
| 4 | * Copyright (C) 2016 STMicroelectronics | ||
| 5 | * | ||
| 6 | * Author: Peter Griffin <peter.griffin@linaro.org> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation; either version 2 of the License, or | ||
| 11 | * (at your option) any later version. | ||
| 12 | */ | ||
| 13 | #ifndef _ST_REMOTEPROC_SLIM_H | ||
| 14 | #define _ST_REMOTEPROC_SLIM_H | ||
| 15 | |||
| 16 | #define ST_SLIM_MEM_MAX 2 | ||
| 17 | #define ST_SLIM_MAX_CLK 4 | ||
| 18 | |||
| 19 | enum { | ||
| 20 | ST_SLIM_DMEM, | ||
| 21 | ST_SLIM_IMEM, | ||
| 22 | }; | ||
| 23 | |||
| 24 | /** | ||
| 25 | * struct st_slim_mem - slim internal memory structure | ||
| 26 | * @cpu_addr: MPU virtual address of the memory region | ||
| 27 | * @bus_addr: Bus address used to access the memory region | ||
| 28 | * @size: Size of the memory region | ||
| 29 | */ | ||
| 30 | struct st_slim_mem { | ||
| 31 | void __iomem *cpu_addr; | ||
| 32 | phys_addr_t bus_addr; | ||
| 33 | size_t size; | ||
| 34 | }; | ||
| 35 | |||
| 36 | /** | ||
| 37 | * struct st_slim_rproc - SLIM slim core | ||
| 38 | * @rproc: rproc handle | ||
| 39 | * @mem: slim memory information | ||
| 40 | * @slimcore: slim slimcore regs | ||
| 41 | * @peri: slim peripheral regs | ||
| 42 | * @clks: slim clocks | ||
| 43 | */ | ||
| 44 | struct st_slim_rproc { | ||
| 45 | struct rproc *rproc; | ||
| 46 | struct st_slim_mem mem[ST_SLIM_MEM_MAX]; | ||
| 47 | void __iomem *slimcore; | ||
| 48 | void __iomem *peri; | ||
| 49 | |||
| 50 | /* st_slim_rproc private */ | ||
| 51 | struct clk *clks[ST_SLIM_MAX_CLK]; | ||
| 52 | }; | ||
| 53 | |||
| 54 | struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev, | ||
| 55 | char *fw_name); | ||
| 56 | void st_slim_rproc_put(struct st_slim_rproc *slim_rproc); | ||
| 57 | |||
| 58 | #endif | ||
diff --git a/include/linux/reservation.h b/include/linux/reservation.h index b0f305e77b7f..d9706a6f5ae2 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | #define _LINUX_RESERVATION_H | 40 | #define _LINUX_RESERVATION_H |
| 41 | 41 | ||
| 42 | #include <linux/ww_mutex.h> | 42 | #include <linux/ww_mutex.h> |
| 43 | #include <linux/fence.h> | 43 | #include <linux/dma-fence.h> |
| 44 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
| 45 | #include <linux/seqlock.h> | 45 | #include <linux/seqlock.h> |
| 46 | #include <linux/rcupdate.h> | 46 | #include <linux/rcupdate.h> |
| @@ -59,7 +59,7 @@ extern const char reservation_seqcount_string[]; | |||
| 59 | struct reservation_object_list { | 59 | struct reservation_object_list { |
| 60 | struct rcu_head rcu; | 60 | struct rcu_head rcu; |
| 61 | u32 shared_count, shared_max; | 61 | u32 shared_count, shared_max; |
| 62 | struct fence __rcu *shared[]; | 62 | struct dma_fence __rcu *shared[]; |
| 63 | }; | 63 | }; |
| 64 | 64 | ||
| 65 | /** | 65 | /** |
| @@ -74,7 +74,7 @@ struct reservation_object { | |||
| 74 | struct ww_mutex lock; | 74 | struct ww_mutex lock; |
| 75 | seqcount_t seq; | 75 | seqcount_t seq; |
| 76 | 76 | ||
| 77 | struct fence __rcu *fence_excl; | 77 | struct dma_fence __rcu *fence_excl; |
| 78 | struct reservation_object_list __rcu *fence; | 78 | struct reservation_object_list __rcu *fence; |
| 79 | struct reservation_object_list *staged; | 79 | struct reservation_object_list *staged; |
| 80 | }; | 80 | }; |
| @@ -107,7 +107,7 @@ reservation_object_fini(struct reservation_object *obj) | |||
| 107 | { | 107 | { |
| 108 | int i; | 108 | int i; |
| 109 | struct reservation_object_list *fobj; | 109 | struct reservation_object_list *fobj; |
| 110 | struct fence *excl; | 110 | struct dma_fence *excl; |
| 111 | 111 | ||
| 112 | /* | 112 | /* |
| 113 | * This object should be dead and all references must have | 113 | * This object should be dead and all references must have |
| @@ -115,12 +115,12 @@ reservation_object_fini(struct reservation_object *obj) | |||
| 115 | */ | 115 | */ |
| 116 | excl = rcu_dereference_protected(obj->fence_excl, 1); | 116 | excl = rcu_dereference_protected(obj->fence_excl, 1); |
| 117 | if (excl) | 117 | if (excl) |
| 118 | fence_put(excl); | 118 | dma_fence_put(excl); |
| 119 | 119 | ||
| 120 | fobj = rcu_dereference_protected(obj->fence, 1); | 120 | fobj = rcu_dereference_protected(obj->fence, 1); |
| 121 | if (fobj) { | 121 | if (fobj) { |
| 122 | for (i = 0; i < fobj->shared_count; ++i) | 122 | for (i = 0; i < fobj->shared_count; ++i) |
| 123 | fence_put(rcu_dereference_protected(fobj->shared[i], 1)); | 123 | dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1)); |
| 124 | 124 | ||
| 125 | kfree(fobj); | 125 | kfree(fobj); |
| 126 | } | 126 | } |
| @@ -155,7 +155,7 @@ reservation_object_get_list(struct reservation_object *obj) | |||
| 155 | * RETURNS | 155 | * RETURNS |
| 156 | * The exclusive fence or NULL | 156 | * The exclusive fence or NULL |
| 157 | */ | 157 | */ |
| 158 | static inline struct fence * | 158 | static inline struct dma_fence * |
| 159 | reservation_object_get_excl(struct reservation_object *obj) | 159 | reservation_object_get_excl(struct reservation_object *obj) |
| 160 | { | 160 | { |
| 161 | return rcu_dereference_protected(obj->fence_excl, | 161 | return rcu_dereference_protected(obj->fence_excl, |
| @@ -173,35 +173,32 @@ reservation_object_get_excl(struct reservation_object *obj) | |||
| 173 | * RETURNS | 173 | * RETURNS |
| 174 | * The exclusive fence or NULL if none | 174 | * The exclusive fence or NULL if none |
| 175 | */ | 175 | */ |
| 176 | static inline struct fence * | 176 | static inline struct dma_fence * |
| 177 | reservation_object_get_excl_rcu(struct reservation_object *obj) | 177 | reservation_object_get_excl_rcu(struct reservation_object *obj) |
| 178 | { | 178 | { |
| 179 | struct fence *fence; | 179 | struct dma_fence *fence; |
| 180 | unsigned seq; | 180 | |
| 181 | retry: | 181 | if (!rcu_access_pointer(obj->fence_excl)) |
| 182 | seq = read_seqcount_begin(&obj->seq); | 182 | return NULL; |
| 183 | |||
| 183 | rcu_read_lock(); | 184 | rcu_read_lock(); |
| 184 | fence = rcu_dereference(obj->fence_excl); | 185 | fence = dma_fence_get_rcu_safe(&obj->fence_excl); |
| 185 | if (read_seqcount_retry(&obj->seq, seq)) { | ||
| 186 | rcu_read_unlock(); | ||
| 187 | goto retry; | ||
| 188 | } | ||
| 189 | fence = fence_get(fence); | ||
| 190 | rcu_read_unlock(); | 186 | rcu_read_unlock(); |
| 187 | |||
| 191 | return fence; | 188 | return fence; |
| 192 | } | 189 | } |
| 193 | 190 | ||
| 194 | int reservation_object_reserve_shared(struct reservation_object *obj); | 191 | int reservation_object_reserve_shared(struct reservation_object *obj); |
| 195 | void reservation_object_add_shared_fence(struct reservation_object *obj, | 192 | void reservation_object_add_shared_fence(struct reservation_object *obj, |
| 196 | struct fence *fence); | 193 | struct dma_fence *fence); |
| 197 | 194 | ||
| 198 | void reservation_object_add_excl_fence(struct reservation_object *obj, | 195 | void reservation_object_add_excl_fence(struct reservation_object *obj, |
| 199 | struct fence *fence); | 196 | struct dma_fence *fence); |
| 200 | 197 | ||
| 201 | int reservation_object_get_fences_rcu(struct reservation_object *obj, | 198 | int reservation_object_get_fences_rcu(struct reservation_object *obj, |
| 202 | struct fence **pfence_excl, | 199 | struct dma_fence **pfence_excl, |
| 203 | unsigned *pshared_count, | 200 | unsigned *pshared_count, |
| 204 | struct fence ***pshared); | 201 | struct dma_fence ***pshared); |
| 205 | 202 | ||
| 206 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, | 203 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, |
| 207 | bool wait_all, bool intr, | 204 | bool wait_all, bool intr, |
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h new file mode 100644 index 000000000000..0d905d8ec553 --- /dev/null +++ b/include/linux/restart_block.h | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | /* | ||
| 2 | * Common syscall restarting data | ||
| 3 | */ | ||
| 4 | #ifndef __LINUX_RESTART_BLOCK_H | ||
| 5 | #define __LINUX_RESTART_BLOCK_H | ||
| 6 | |||
| 7 | #include <linux/compiler.h> | ||
| 8 | #include <linux/types.h> | ||
| 9 | |||
| 10 | struct timespec; | ||
| 11 | struct compat_timespec; | ||
| 12 | struct pollfd; | ||
| 13 | |||
| 14 | /* | ||
| 15 | * System call restart block. | ||
| 16 | */ | ||
| 17 | struct restart_block { | ||
| 18 | long (*fn)(struct restart_block *); | ||
| 19 | union { | ||
| 20 | /* For futex_wait and futex_wait_requeue_pi */ | ||
| 21 | struct { | ||
| 22 | u32 __user *uaddr; | ||
| 23 | u32 val; | ||
| 24 | u32 flags; | ||
| 25 | u32 bitset; | ||
| 26 | u64 time; | ||
| 27 | u32 __user *uaddr2; | ||
| 28 | } futex; | ||
| 29 | /* For nanosleep */ | ||
| 30 | struct { | ||
| 31 | clockid_t clockid; | ||
| 32 | struct timespec __user *rmtp; | ||
| 33 | #ifdef CONFIG_COMPAT | ||
| 34 | struct compat_timespec __user *compat_rmtp; | ||
| 35 | #endif | ||
| 36 | u64 expires; | ||
| 37 | } nanosleep; | ||
| 38 | /* For poll */ | ||
| 39 | struct { | ||
| 40 | struct pollfd __user *ufds; | ||
| 41 | int nfds; | ||
| 42 | int has_timeout; | ||
| 43 | unsigned long tv_sec; | ||
| 44 | unsigned long tv_nsec; | ||
| 45 | } poll; | ||
| 46 | }; | ||
| 47 | }; | ||
| 48 | |||
| 49 | extern long do_no_restart_syscall(struct restart_block *parm); | ||
| 50 | |||
| 51 | #endif /* __LINUX_RESTART_BLOCK_H */ | ||
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 4acc552e9279..b6d4568795a7 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
| @@ -198,4 +198,10 @@ enum ring_buffer_flags { | |||
| 198 | RB_FL_OVERWRITE = 1 << 0, | 198 | RB_FL_OVERWRITE = 1 << 0, |
| 199 | }; | 199 | }; |
| 200 | 200 | ||
| 201 | #ifdef CONFIG_RING_BUFFER | ||
| 202 | int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node); | ||
| 203 | #else | ||
| 204 | #define trace_rb_cpu_prepare NULL | ||
| 205 | #endif | ||
| 206 | |||
| 201 | #endif /* _LINUX_RING_BUFFER_H */ | 207 | #endif /* _LINUX_RING_BUFFER_H */ |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b46bb5620a76..15321fb1df6b 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
| @@ -137,11 +137,19 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) | |||
| 137 | * anon_vma helper functions. | 137 | * anon_vma helper functions. |
| 138 | */ | 138 | */ |
| 139 | void anon_vma_init(void); /* create anon_vma_cachep */ | 139 | void anon_vma_init(void); /* create anon_vma_cachep */ |
| 140 | int anon_vma_prepare(struct vm_area_struct *); | 140 | int __anon_vma_prepare(struct vm_area_struct *); |
| 141 | void unlink_anon_vmas(struct vm_area_struct *); | 141 | void unlink_anon_vmas(struct vm_area_struct *); |
| 142 | int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); | 142 | int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); |
| 143 | int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); | 143 | int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); |
| 144 | 144 | ||
| 145 | static inline int anon_vma_prepare(struct vm_area_struct *vma) | ||
| 146 | { | ||
| 147 | if (likely(vma->anon_vma)) | ||
| 148 | return 0; | ||
| 149 | |||
| 150 | return __anon_vma_prepare(vma); | ||
| 151 | } | ||
| 152 | |||
| 145 | static inline void anon_vma_merge(struct vm_area_struct *vma, | 153 | static inline void anon_vma_merge(struct vm_area_struct *vma, |
| 146 | struct vm_area_struct *next) | 154 | struct vm_area_struct *next) |
| 147 | { | 155 | { |
diff --git a/include/linux/rmi.h b/include/linux/rmi.h index e0aca1476001..64125443f8a6 100644 --- a/include/linux/rmi.h +++ b/include/linux/rmi.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
| 14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
| 15 | #include <linux/input.h> | 15 | #include <linux/input.h> |
| 16 | #include <linux/kfifo.h> | ||
| 16 | #include <linux/list.h> | 17 | #include <linux/list.h> |
| 17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 18 | #include <linux/types.h> | 19 | #include <linux/types.h> |
| @@ -99,6 +100,8 @@ struct rmi_2d_sensor_platform_data { | |||
| 99 | bool topbuttonpad; | 100 | bool topbuttonpad; |
| 100 | bool kernel_tracking; | 101 | bool kernel_tracking; |
| 101 | int dmax; | 102 | int dmax; |
| 103 | int dribble; | ||
| 104 | int palm_detect; | ||
| 102 | }; | 105 | }; |
| 103 | 106 | ||
| 104 | /** | 107 | /** |
| @@ -106,7 +109,7 @@ struct rmi_2d_sensor_platform_data { | |||
| 106 | * @buttonpad - the touchpad is a buttonpad, so enable only the first actual | 109 | * @buttonpad - the touchpad is a buttonpad, so enable only the first actual |
| 107 | * button that is found. | 110 | * button that is found. |
| 108 | * @trackstick_buttons - Set when the function 30 is handling the physical | 111 | * @trackstick_buttons - Set when the function 30 is handling the physical |
| 109 | * buttons of the trackstick (as a PD/2 passthrough device. | 112 | * buttons of the trackstick (as a PS/2 passthrough device). |
| 110 | * @disable - the touchpad incorrectly reports F30 and it should be ignored. | 113 | * @disable - the touchpad incorrectly reports F30 and it should be ignored. |
| 111 | * This is a special case which is due to misconfigured firmware. | 114 | * This is a special case which is due to misconfigured firmware. |
| 112 | */ | 115 | */ |
| @@ -116,14 +119,17 @@ struct rmi_f30_data { | |||
| 116 | bool disable; | 119 | bool disable; |
| 117 | }; | 120 | }; |
| 118 | 121 | ||
| 119 | /** | 122 | |
| 120 | * struct rmi_f01_power - override default power management settings. | 123 | /* |
| 121 | * | 124 | * Set the state of a register |
| 125 | * DEFAULT - use the default value set by the firmware config | ||
| 126 | * OFF - explicitly disable the register | ||
| 127 | * ON - explicitly enable the register | ||
| 122 | */ | 128 | */ |
| 123 | enum rmi_f01_nosleep { | 129 | enum rmi_reg_state { |
| 124 | RMI_F01_NOSLEEP_DEFAULT = 0, | 130 | RMI_REG_STATE_DEFAULT = 0, |
| 125 | RMI_F01_NOSLEEP_OFF = 1, | 131 | RMI_REG_STATE_OFF = 1, |
| 126 | RMI_F01_NOSLEEP_ON = 2 | 132 | RMI_REG_STATE_ON = 2 |
| 127 | }; | 133 | }; |
| 128 | 134 | ||
| 129 | /** | 135 | /** |
| @@ -143,7 +149,7 @@ enum rmi_f01_nosleep { | |||
| 143 | * when the touch sensor is in doze mode, in units of 10ms. | 149 | * when the touch sensor is in doze mode, in units of 10ms. |
| 144 | */ | 150 | */ |
| 145 | struct rmi_f01_power_management { | 151 | struct rmi_f01_power_management { |
| 146 | enum rmi_f01_nosleep nosleep; | 152 | enum rmi_reg_state nosleep; |
| 147 | u8 wakeup_threshold; | 153 | u8 wakeup_threshold; |
| 148 | u8 doze_holdoff; | 154 | u8 doze_holdoff; |
| 149 | u8 doze_interval; | 155 | u8 doze_interval; |
| @@ -204,16 +210,18 @@ struct rmi_device_platform_data_spi { | |||
| 204 | * @reset_delay_ms - after issuing a reset command to the touch sensor, the | 210 | * @reset_delay_ms - after issuing a reset command to the touch sensor, the |
| 205 | * driver waits a few milliseconds to give the firmware a chance to | 211 | * driver waits a few milliseconds to give the firmware a chance to |
| 206 | * to re-initialize. You can override the default wait period here. | 212 | * to re-initialize. You can override the default wait period here. |
| 213 | * @irq: irq associated with the attn gpio line, or negative | ||
| 207 | */ | 214 | */ |
| 208 | struct rmi_device_platform_data { | 215 | struct rmi_device_platform_data { |
| 209 | int reset_delay_ms; | 216 | int reset_delay_ms; |
| 217 | int irq; | ||
| 210 | 218 | ||
| 211 | struct rmi_device_platform_data_spi spi_data; | 219 | struct rmi_device_platform_data_spi spi_data; |
| 212 | 220 | ||
| 213 | /* function handler pdata */ | 221 | /* function handler pdata */ |
| 214 | struct rmi_2d_sensor_platform_data *sensor_pdata; | 222 | struct rmi_2d_sensor_platform_data sensor_pdata; |
| 215 | struct rmi_f01_power_management power_management; | 223 | struct rmi_f01_power_management power_management; |
| 216 | struct rmi_f30_data *f30_data; | 224 | struct rmi_f30_data f30_data; |
| 217 | }; | 225 | }; |
| 218 | 226 | ||
| 219 | /** | 227 | /** |
| @@ -264,9 +272,6 @@ struct rmi_transport_dev { | |||
| 264 | struct rmi_device_platform_data pdata; | 272 | struct rmi_device_platform_data pdata; |
| 265 | 273 | ||
| 266 | struct input_dev *input; | 274 | struct input_dev *input; |
| 267 | |||
| 268 | void *attn_data; | ||
| 269 | int attn_size; | ||
| 270 | }; | 275 | }; |
| 271 | 276 | ||
| 272 | /** | 277 | /** |
| @@ -324,17 +329,24 @@ struct rmi_device { | |||
| 324 | 329 | ||
| 325 | }; | 330 | }; |
| 326 | 331 | ||
| 332 | struct rmi4_attn_data { | ||
| 333 | unsigned long irq_status; | ||
| 334 | size_t size; | ||
| 335 | void *data; | ||
| 336 | }; | ||
| 337 | |||
| 327 | struct rmi_driver_data { | 338 | struct rmi_driver_data { |
| 328 | struct list_head function_list; | 339 | struct list_head function_list; |
| 329 | 340 | ||
| 330 | struct rmi_device *rmi_dev; | 341 | struct rmi_device *rmi_dev; |
| 331 | 342 | ||
| 332 | struct rmi_function *f01_container; | 343 | struct rmi_function *f01_container; |
| 333 | bool f01_bootloader_mode; | 344 | struct rmi_function *f34_container; |
| 345 | bool bootloader_mode; | ||
| 334 | 346 | ||
| 335 | u32 attn_count; | ||
| 336 | int num_of_irq_regs; | 347 | int num_of_irq_regs; |
| 337 | int irq_count; | 348 | int irq_count; |
| 349 | void *irq_memory; | ||
| 338 | unsigned long *irq_status; | 350 | unsigned long *irq_status; |
| 339 | unsigned long *fn_irq_bits; | 351 | unsigned long *fn_irq_bits; |
| 340 | unsigned long *current_irq_mask; | 352 | unsigned long *current_irq_mask; |
| @@ -343,17 +355,23 @@ struct rmi_driver_data { | |||
| 343 | struct input_dev *input; | 355 | struct input_dev *input; |
| 344 | 356 | ||
| 345 | u8 pdt_props; | 357 | u8 pdt_props; |
| 346 | u8 bsr; | 358 | |
| 359 | u8 num_rx_electrodes; | ||
| 360 | u8 num_tx_electrodes; | ||
| 347 | 361 | ||
| 348 | bool enabled; | 362 | bool enabled; |
| 363 | struct mutex enabled_mutex; | ||
| 349 | 364 | ||
| 350 | void *data; | 365 | struct rmi4_attn_data attn_data; |
| 366 | DECLARE_KFIFO(attn_fifo, struct rmi4_attn_data, 16); | ||
| 351 | }; | 367 | }; |
| 352 | 368 | ||
| 353 | int rmi_register_transport_device(struct rmi_transport_dev *xport); | 369 | int rmi_register_transport_device(struct rmi_transport_dev *xport); |
| 354 | void rmi_unregister_transport_device(struct rmi_transport_dev *xport); | 370 | void rmi_unregister_transport_device(struct rmi_transport_dev *xport); |
| 355 | int rmi_process_interrupt_requests(struct rmi_device *rmi_dev); | ||
| 356 | 371 | ||
| 357 | int rmi_driver_suspend(struct rmi_device *rmi_dev); | 372 | void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status, |
| 358 | int rmi_driver_resume(struct rmi_device *rmi_dev); | 373 | void *data, size_t size); |
| 374 | |||
| 375 | int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake); | ||
| 376 | int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake); | ||
| 359 | #endif | 377 | #endif |
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index 452d393cc8dd..18f9e1ae4b7e 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | 37 | ||
| 38 | #include <linux/types.h> | 38 | #include <linux/types.h> |
| 39 | #include <linux/device.h> | 39 | #include <linux/device.h> |
| 40 | #include <linux/err.h> | ||
| 40 | #include <linux/mod_devicetable.h> | 41 | #include <linux/mod_devicetable.h> |
| 41 | #include <linux/kref.h> | 42 | #include <linux/kref.h> |
| 42 | #include <linux/mutex.h> | 43 | #include <linux/mutex.h> |
| @@ -64,6 +65,7 @@ struct rpmsg_channel_info { | |||
| 64 | * rpmsg_device - device that belong to the rpmsg bus | 65 | * rpmsg_device - device that belong to the rpmsg bus |
| 65 | * @dev: the device struct | 66 | * @dev: the device struct |
| 66 | * @id: device id (used to match between rpmsg drivers and devices) | 67 | * @id: device id (used to match between rpmsg drivers and devices) |
| 68 | * @driver_override: driver name to force a match | ||
| 67 | * @src: local address | 69 | * @src: local address |
| 68 | * @dst: destination address | 70 | * @dst: destination address |
| 69 | * @ept: the rpmsg endpoint of this channel | 71 | * @ept: the rpmsg endpoint of this channel |
| @@ -72,6 +74,7 @@ struct rpmsg_channel_info { | |||
| 72 | struct rpmsg_device { | 74 | struct rpmsg_device { |
| 73 | struct device dev; | 75 | struct device dev; |
| 74 | struct rpmsg_device_id id; | 76 | struct rpmsg_device_id id; |
| 77 | char *driver_override; | ||
| 75 | u32 src; | 78 | u32 src; |
| 76 | u32 dst; | 79 | u32 dst; |
| 77 | struct rpmsg_endpoint *ept; | 80 | struct rpmsg_endpoint *ept; |
| @@ -132,6 +135,8 @@ struct rpmsg_driver { | |||
| 132 | int (*callback)(struct rpmsg_device *, void *, int, void *, u32); | 135 | int (*callback)(struct rpmsg_device *, void *, int, void *, u32); |
| 133 | }; | 136 | }; |
| 134 | 137 | ||
| 138 | #if IS_ENABLED(CONFIG_RPMSG) | ||
| 139 | |||
| 135 | int register_rpmsg_device(struct rpmsg_device *dev); | 140 | int register_rpmsg_device(struct rpmsg_device *dev); |
| 136 | void unregister_rpmsg_device(struct rpmsg_device *dev); | 141 | void unregister_rpmsg_device(struct rpmsg_device *dev); |
| 137 | int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner); | 142 | int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner); |
| @@ -141,6 +146,116 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *, | |||
| 141 | rpmsg_rx_cb_t cb, void *priv, | 146 | rpmsg_rx_cb_t cb, void *priv, |
| 142 | struct rpmsg_channel_info chinfo); | 147 | struct rpmsg_channel_info chinfo); |
| 143 | 148 | ||
| 149 | int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len); | ||
| 150 | int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); | ||
| 151 | int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, | ||
| 152 | void *data, int len); | ||
| 153 | |||
| 154 | int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len); | ||
| 155 | int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); | ||
| 156 | int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, | ||
| 157 | void *data, int len); | ||
| 158 | |||
| 159 | #else | ||
| 160 | |||
| 161 | static inline int register_rpmsg_device(struct rpmsg_device *dev) | ||
| 162 | { | ||
| 163 | return -ENXIO; | ||
| 164 | } | ||
| 165 | |||
| 166 | static inline void unregister_rpmsg_device(struct rpmsg_device *dev) | ||
| 167 | { | ||
| 168 | /* This shouldn't be possible */ | ||
| 169 | WARN_ON(1); | ||
| 170 | } | ||
| 171 | |||
| 172 | static inline int __register_rpmsg_driver(struct rpmsg_driver *drv, | ||
| 173 | struct module *owner) | ||
| 174 | { | ||
| 175 | /* This shouldn't be possible */ | ||
| 176 | WARN_ON(1); | ||
| 177 | |||
| 178 | return -ENXIO; | ||
| 179 | } | ||
| 180 | |||
| 181 | static inline void unregister_rpmsg_driver(struct rpmsg_driver *drv) | ||
| 182 | { | ||
| 183 | /* This shouldn't be possible */ | ||
| 184 | WARN_ON(1); | ||
| 185 | } | ||
| 186 | |||
| 187 | static inline void rpmsg_destroy_ept(struct rpmsg_endpoint *ept) | ||
| 188 | { | ||
| 189 | /* This shouldn't be possible */ | ||
| 190 | WARN_ON(1); | ||
| 191 | } | ||
| 192 | |||
| 193 | static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev, | ||
| 194 | rpmsg_rx_cb_t cb, | ||
| 195 | void *priv, | ||
| 196 | struct rpmsg_channel_info chinfo) | ||
| 197 | { | ||
| 198 | /* This shouldn't be possible */ | ||
| 199 | WARN_ON(1); | ||
| 200 | |||
| 201 | return ERR_PTR(-ENXIO); | ||
| 202 | } | ||
| 203 | |||
| 204 | static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len) | ||
| 205 | { | ||
| 206 | /* This shouldn't be possible */ | ||
| 207 | WARN_ON(1); | ||
| 208 | |||
| 209 | return -ENXIO; | ||
| 210 | } | ||
| 211 | |||
| 212 | static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, | ||
| 213 | u32 dst) | ||
| 214 | { | ||
| 215 | /* This shouldn't be possible */ | ||
| 216 | WARN_ON(1); | ||
| 217 | |||
| 218 | return -ENXIO; | ||
| 219 | |||
| 220 | } | ||
| 221 | |||
| 222 | static inline int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, | ||
| 223 | u32 dst, void *data, int len) | ||
| 224 | { | ||
| 225 | /* This shouldn't be possible */ | ||
| 226 | WARN_ON(1); | ||
| 227 | |||
| 228 | return -ENXIO; | ||
| 229 | } | ||
| 230 | |||
| 231 | static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len) | ||
| 232 | { | ||
| 233 | /* This shouldn't be possible */ | ||
| 234 | WARN_ON(1); | ||
| 235 | |||
| 236 | return -ENXIO; | ||
| 237 | } | ||
| 238 | |||
| 239 | static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, | ||
| 240 | int len, u32 dst) | ||
| 241 | { | ||
| 242 | /* This shouldn't be possible */ | ||
| 243 | WARN_ON(1); | ||
| 244 | |||
| 245 | return -ENXIO; | ||
| 246 | } | ||
| 247 | |||
| 248 | static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, | ||
| 249 | u32 dst, void *data, int len) | ||
| 250 | { | ||
| 251 | /* This shouldn't be possible */ | ||
| 252 | WARN_ON(1); | ||
| 253 | |||
| 254 | return -ENXIO; | ||
| 255 | } | ||
| 256 | |||
| 257 | #endif /* IS_ENABLED(CONFIG_RPMSG) */ | ||
| 258 | |||
| 144 | /* use a macro to avoid include chaining to get THIS_MODULE */ | 259 | /* use a macro to avoid include chaining to get THIS_MODULE */ |
| 145 | #define register_rpmsg_driver(drv) \ | 260 | #define register_rpmsg_driver(drv) \ |
| 146 | __register_rpmsg_driver(drv, THIS_MODULE) | 261 | __register_rpmsg_driver(drv, THIS_MODULE) |
| @@ -157,14 +272,4 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *, | |||
| 157 | module_driver(__rpmsg_driver, register_rpmsg_driver, \ | 272 | module_driver(__rpmsg_driver, register_rpmsg_driver, \ |
| 158 | unregister_rpmsg_driver) | 273 | unregister_rpmsg_driver) |
| 159 | 274 | ||
| 160 | int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len); | ||
| 161 | int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); | ||
| 162 | int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, | ||
| 163 | void *data, int len); | ||
| 164 | |||
| 165 | int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len); | ||
| 166 | int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); | ||
| 167 | int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, | ||
| 168 | void *data, int len); | ||
| 169 | |||
| 170 | #endif /* _LINUX_RPMSG_H */ | 275 | #endif /* _LINUX_RPMSG_H */ |
diff --git a/include/linux/rpmsg/qcom_smd.h b/include/linux/rpmsg/qcom_smd.h new file mode 100644 index 000000000000..e674b2e3074b --- /dev/null +++ b/include/linux/rpmsg/qcom_smd.h | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | |||
| 2 | #ifndef _LINUX_RPMSG_QCOM_SMD_H | ||
| 3 | #define _LINUX_RPMSG_QCOM_SMD_H | ||
| 4 | |||
| 5 | #include <linux/device.h> | ||
| 6 | |||
| 7 | struct qcom_smd_edge; | ||
| 8 | |||
| 9 | #if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD) || IS_ENABLED(CONFIG_QCOM_SMD) | ||
| 10 | |||
| 11 | struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, | ||
| 12 | struct device_node *node); | ||
| 13 | int qcom_smd_unregister_edge(struct qcom_smd_edge *edge); | ||
| 14 | |||
| 15 | #else | ||
| 16 | |||
| 17 | static inline struct qcom_smd_edge * | ||
| 18 | qcom_smd_register_edge(struct device *parent, | ||
| 19 | struct device_node *node) | ||
| 20 | { | ||
| 21 | return ERR_PTR(-ENXIO); | ||
| 22 | } | ||
| 23 | |||
| 24 | static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) | ||
| 25 | { | ||
| 26 | /* This shouldn't be possible */ | ||
| 27 | WARN_ON(1); | ||
| 28 | return -ENXIO; | ||
| 29 | } | ||
| 30 | |||
| 31 | #endif | ||
| 32 | |||
| 33 | #endif | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index e9c009dc3a4a..4d1905245c7a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -262,20 +262,9 @@ extern char ___assert_task_state[1 - 2*!!( | |||
| 262 | #define set_task_state(tsk, state_value) \ | 262 | #define set_task_state(tsk, state_value) \ |
| 263 | do { \ | 263 | do { \ |
| 264 | (tsk)->task_state_change = _THIS_IP_; \ | 264 | (tsk)->task_state_change = _THIS_IP_; \ |
| 265 | smp_store_mb((tsk)->state, (state_value)); \ | 265 | smp_store_mb((tsk)->state, (state_value)); \ |
| 266 | } while (0) | 266 | } while (0) |
| 267 | 267 | ||
| 268 | /* | ||
| 269 | * set_current_state() includes a barrier so that the write of current->state | ||
| 270 | * is correctly serialised wrt the caller's subsequent test of whether to | ||
| 271 | * actually sleep: | ||
| 272 | * | ||
| 273 | * set_current_state(TASK_UNINTERRUPTIBLE); | ||
| 274 | * if (do_i_need_to_sleep()) | ||
| 275 | * schedule(); | ||
| 276 | * | ||
| 277 | * If the caller does not need such serialisation then use __set_current_state() | ||
| 278 | */ | ||
| 279 | #define __set_current_state(state_value) \ | 268 | #define __set_current_state(state_value) \ |
| 280 | do { \ | 269 | do { \ |
| 281 | current->task_state_change = _THIS_IP_; \ | 270 | current->task_state_change = _THIS_IP_; \ |
| @@ -284,11 +273,19 @@ extern char ___assert_task_state[1 - 2*!!( | |||
| 284 | #define set_current_state(state_value) \ | 273 | #define set_current_state(state_value) \ |
| 285 | do { \ | 274 | do { \ |
| 286 | current->task_state_change = _THIS_IP_; \ | 275 | current->task_state_change = _THIS_IP_; \ |
| 287 | smp_store_mb(current->state, (state_value)); \ | 276 | smp_store_mb(current->state, (state_value)); \ |
| 288 | } while (0) | 277 | } while (0) |
| 289 | 278 | ||
| 290 | #else | 279 | #else |
| 291 | 280 | ||
| 281 | /* | ||
| 282 | * @tsk had better be current, or you get to keep the pieces. | ||
| 283 | * | ||
| 284 | * The only reason is that computing current can be more expensive than | ||
| 285 | * using a pointer that's already available. | ||
| 286 | * | ||
| 287 | * Therefore, see set_current_state(). | ||
| 288 | */ | ||
| 292 | #define __set_task_state(tsk, state_value) \ | 289 | #define __set_task_state(tsk, state_value) \ |
| 293 | do { (tsk)->state = (state_value); } while (0) | 290 | do { (tsk)->state = (state_value); } while (0) |
| 294 | #define set_task_state(tsk, state_value) \ | 291 | #define set_task_state(tsk, state_value) \ |
| @@ -299,11 +296,34 @@ extern char ___assert_task_state[1 - 2*!!( | |||
| 299 | * is correctly serialised wrt the caller's subsequent test of whether to | 296 | * is correctly serialised wrt the caller's subsequent test of whether to |
| 300 | * actually sleep: | 297 | * actually sleep: |
| 301 | * | 298 | * |
| 299 | * for (;;) { | ||
| 302 | * set_current_state(TASK_UNINTERRUPTIBLE); | 300 | * set_current_state(TASK_UNINTERRUPTIBLE); |
| 303 | * if (do_i_need_to_sleep()) | 301 | * if (!need_sleep) |
| 304 | * schedule(); | 302 | * break; |
| 305 | * | 303 | * |
| 306 | * If the caller does not need such serialisation then use __set_current_state() | 304 | * schedule(); |
| 305 | * } | ||
| 306 | * __set_current_state(TASK_RUNNING); | ||
| 307 | * | ||
| 308 | * If the caller does not need such serialisation (because, for instance, the | ||
| 309 | * condition test and condition change and wakeup are under the same lock) then | ||
| 310 | * use __set_current_state(). | ||
| 311 | * | ||
| 312 | * The above is typically ordered against the wakeup, which does: | ||
| 313 | * | ||
| 314 | * need_sleep = false; | ||
| 315 | * wake_up_state(p, TASK_UNINTERRUPTIBLE); | ||
| 316 | * | ||
| 317 | * Where wake_up_state() (and all other wakeup primitives) imply enough | ||
| 318 | * barriers to order the store of the variable against wakeup. | ||
| 319 | * | ||
| 320 | * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, | ||
| 321 | * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a | ||
| 322 | * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). | ||
| 323 | * | ||
| 324 | * This is obviously fine, since they both store the exact same value. | ||
| 325 | * | ||
| 326 | * Also see the comments of try_to_wake_up(). | ||
| 307 | */ | 327 | */ |
| 308 | #define __set_current_state(state_value) \ | 328 | #define __set_current_state(state_value) \ |
| 309 | do { current->state = (state_value); } while (0) | 329 | do { current->state = (state_value); } while (0) |
| @@ -520,7 +540,11 @@ static inline int get_dumpable(struct mm_struct *mm) | |||
| 520 | /* leave room for more dump flags */ | 540 | /* leave room for more dump flags */ |
| 521 | #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ | 541 | #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ |
| 522 | #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ | 542 | #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ |
| 523 | #define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ | 543 | /* |
| 544 | * This one-shot flag is dropped due to necessity of changing exe once again | ||
| 545 | * on NFS restore | ||
| 546 | */ | ||
| 547 | //#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ | ||
| 524 | 548 | ||
| 525 | #define MMF_HAS_UPROBES 19 /* has uprobes */ | 549 | #define MMF_HAS_UPROBES 19 /* has uprobes */ |
| 526 | #define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ | 550 | #define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ |
| @@ -989,7 +1013,7 @@ enum cpu_idle_type { | |||
| 989 | * already in a wake queue, the wakeup will happen soon and the second | 1013 | * already in a wake queue, the wakeup will happen soon and the second |
| 990 | * waker can just skip it. | 1014 | * waker can just skip it. |
| 991 | * | 1015 | * |
| 992 | * The WAKE_Q macro declares and initializes the list head. | 1016 | * The DEFINE_WAKE_Q macro declares and initializes the list head. |
| 993 | * wake_up_q() does NOT reinitialize the list; it's expected to be | 1017 | * wake_up_q() does NOT reinitialize the list; it's expected to be |
| 994 | * called near the end of a function, where the fact that the queue is | 1018 | * called near the end of a function, where the fact that the queue is |
| 995 | * not used again will be easy to see by inspection. | 1019 | * not used again will be easy to see by inspection. |
| @@ -1009,7 +1033,7 @@ struct wake_q_head { | |||
| 1009 | 1033 | ||
| 1010 | #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) | 1034 | #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) |
| 1011 | 1035 | ||
| 1012 | #define WAKE_Q(name) \ | 1036 | #define DEFINE_WAKE_Q(name) \ |
| 1013 | struct wake_q_head name = { WAKE_Q_TAIL, &name.first } | 1037 | struct wake_q_head name = { WAKE_Q_TAIL, &name.first } |
| 1014 | 1038 | ||
| 1015 | extern void wake_q_add(struct wake_q_head *head, | 1039 | extern void wake_q_add(struct wake_q_head *head, |
| @@ -1057,6 +1081,8 @@ static inline int cpu_numa_flags(void) | |||
| 1057 | } | 1081 | } |
| 1058 | #endif | 1082 | #endif |
| 1059 | 1083 | ||
| 1084 | extern int arch_asym_cpu_priority(int cpu); | ||
| 1085 | |||
| 1060 | struct sched_domain_attr { | 1086 | struct sched_domain_attr { |
| 1061 | int relax_domain_level; | 1087 | int relax_domain_level; |
| 1062 | }; | 1088 | }; |
| @@ -1627,7 +1653,10 @@ struct task_struct { | |||
| 1627 | int __user *set_child_tid; /* CLONE_CHILD_SETTID */ | 1653 | int __user *set_child_tid; /* CLONE_CHILD_SETTID */ |
| 1628 | int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ | 1654 | int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ |
| 1629 | 1655 | ||
| 1630 | cputime_t utime, stime, utimescaled, stimescaled; | 1656 | cputime_t utime, stime; |
| 1657 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME | ||
| 1658 | cputime_t utimescaled, stimescaled; | ||
| 1659 | #endif | ||
| 1631 | cputime_t gtime; | 1660 | cputime_t gtime; |
| 1632 | struct prev_cputime prev_cputime; | 1661 | struct prev_cputime prev_cputime; |
| 1633 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN | 1662 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
| @@ -1656,6 +1685,7 @@ struct task_struct { | |||
| 1656 | struct list_head cpu_timers[3]; | 1685 | struct list_head cpu_timers[3]; |
| 1657 | 1686 | ||
| 1658 | /* process credentials */ | 1687 | /* process credentials */ |
| 1688 | const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ | ||
| 1659 | const struct cred __rcu *real_cred; /* objective and real subjective task | 1689 | const struct cred __rcu *real_cred; /* objective and real subjective task |
| 1660 | * credentials (COW) */ | 1690 | * credentials (COW) */ |
| 1661 | const struct cred __rcu *cred; /* effective (overridable) subjective task | 1691 | const struct cred __rcu *cred; /* effective (overridable) subjective task |
| @@ -1791,6 +1821,9 @@ struct task_struct { | |||
| 1791 | /* cg_list protected by css_set_lock and tsk->alloc_lock */ | 1821 | /* cg_list protected by css_set_lock and tsk->alloc_lock */ |
| 1792 | struct list_head cg_list; | 1822 | struct list_head cg_list; |
| 1793 | #endif | 1823 | #endif |
| 1824 | #ifdef CONFIG_INTEL_RDT_A | ||
| 1825 | int closid; | ||
| 1826 | #endif | ||
| 1794 | #ifdef CONFIG_FUTEX | 1827 | #ifdef CONFIG_FUTEX |
| 1795 | struct robust_list_head __user *robust_list; | 1828 | struct robust_list_head __user *robust_list; |
| 1796 | #ifdef CONFIG_COMPAT | 1829 | #ifdef CONFIG_COMPAT |
| @@ -2220,40 +2253,45 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask); | |||
| 2220 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN | 2253 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
| 2221 | extern void task_cputime(struct task_struct *t, | 2254 | extern void task_cputime(struct task_struct *t, |
| 2222 | cputime_t *utime, cputime_t *stime); | 2255 | cputime_t *utime, cputime_t *stime); |
| 2223 | extern void task_cputime_scaled(struct task_struct *t, | ||
| 2224 | cputime_t *utimescaled, cputime_t *stimescaled); | ||
| 2225 | extern cputime_t task_gtime(struct task_struct *t); | 2256 | extern cputime_t task_gtime(struct task_struct *t); |
| 2226 | #else | 2257 | #else |
| 2227 | static inline void task_cputime(struct task_struct *t, | 2258 | static inline void task_cputime(struct task_struct *t, |
| 2228 | cputime_t *utime, cputime_t *stime) | 2259 | cputime_t *utime, cputime_t *stime) |
| 2229 | { | 2260 | { |
| 2230 | if (utime) | 2261 | *utime = t->utime; |
| 2231 | *utime = t->utime; | 2262 | *stime = t->stime; |
| 2232 | if (stime) | ||
| 2233 | *stime = t->stime; | ||
| 2234 | } | 2263 | } |
| 2235 | 2264 | ||
| 2265 | static inline cputime_t task_gtime(struct task_struct *t) | ||
| 2266 | { | ||
| 2267 | return t->gtime; | ||
| 2268 | } | ||
| 2269 | #endif | ||
| 2270 | |||
| 2271 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME | ||
| 2236 | static inline void task_cputime_scaled(struct task_struct *t, | 2272 | static inline void task_cputime_scaled(struct task_struct *t, |
| 2237 | cputime_t *utimescaled, | 2273 | cputime_t *utimescaled, |
| 2238 | cputime_t *stimescaled) | 2274 | cputime_t *stimescaled) |
| 2239 | { | 2275 | { |
| 2240 | if (utimescaled) | 2276 | *utimescaled = t->utimescaled; |
| 2241 | *utimescaled = t->utimescaled; | 2277 | *stimescaled = t->stimescaled; |
| 2242 | if (stimescaled) | ||
| 2243 | *stimescaled = t->stimescaled; | ||
| 2244 | } | 2278 | } |
| 2245 | 2279 | #else | |
| 2246 | static inline cputime_t task_gtime(struct task_struct *t) | 2280 | static inline void task_cputime_scaled(struct task_struct *t, |
| 2281 | cputime_t *utimescaled, | ||
| 2282 | cputime_t *stimescaled) | ||
| 2247 | { | 2283 | { |
| 2248 | return t->gtime; | 2284 | task_cputime(t, utimescaled, stimescaled); |
| 2249 | } | 2285 | } |
| 2250 | #endif | 2286 | #endif |
| 2287 | |||
| 2251 | extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); | 2288 | extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); |
| 2252 | extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); | 2289 | extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); |
| 2253 | 2290 | ||
| 2254 | /* | 2291 | /* |
| 2255 | * Per process flags | 2292 | * Per process flags |
| 2256 | */ | 2293 | */ |
| 2294 | #define PF_IDLE 0x00000002 /* I am an IDLE thread */ | ||
| 2257 | #define PF_EXITING 0x00000004 /* getting shut down */ | 2295 | #define PF_EXITING 0x00000004 /* getting shut down */ |
| 2258 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ | 2296 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ |
| 2259 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ | 2297 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ |
| @@ -2444,6 +2482,10 @@ static inline void calc_load_enter_idle(void) { } | |||
| 2444 | static inline void calc_load_exit_idle(void) { } | 2482 | static inline void calc_load_exit_idle(void) { } |
| 2445 | #endif /* CONFIG_NO_HZ_COMMON */ | 2483 | #endif /* CONFIG_NO_HZ_COMMON */ |
| 2446 | 2484 | ||
| 2485 | #ifndef cpu_relax_yield | ||
| 2486 | #define cpu_relax_yield() cpu_relax() | ||
| 2487 | #endif | ||
| 2488 | |||
| 2447 | /* | 2489 | /* |
| 2448 | * Do not use outside of architecture code which knows its limitations. | 2490 | * Do not use outside of architecture code which knows its limitations. |
| 2449 | * | 2491 | * |
| @@ -2611,7 +2653,7 @@ extern struct task_struct *idle_task(int cpu); | |||
| 2611 | */ | 2653 | */ |
| 2612 | static inline bool is_idle_task(const struct task_struct *p) | 2654 | static inline bool is_idle_task(const struct task_struct *p) |
| 2613 | { | 2655 | { |
| 2614 | return p->pid == 0; | 2656 | return !!(p->flags & PF_IDLE); |
| 2615 | } | 2657 | } |
| 2616 | extern struct task_struct *curr_task(int cpu); | 2658 | extern struct task_struct *curr_task(int cpu); |
| 2617 | extern void ia64_set_curr_task(int cpu, struct task_struct *p); | 2659 | extern void ia64_set_curr_task(int cpu, struct task_struct *p); |
| @@ -3508,6 +3550,18 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | |||
| 3508 | 3550 | ||
| 3509 | #endif /* CONFIG_SMP */ | 3551 | #endif /* CONFIG_SMP */ |
| 3510 | 3552 | ||
| 3553 | /* | ||
| 3554 | * In order to reduce various lock holder preemption latencies provide an | ||
| 3555 | * interface to see if a vCPU is currently running or not. | ||
| 3556 | * | ||
| 3557 | * This allows us to terminate optimistic spin loops and block, analogous to | ||
| 3558 | * the native optimistic spin heuristic of testing if the lock owner task is | ||
| 3559 | * running or not. | ||
| 3560 | */ | ||
| 3561 | #ifndef vcpu_is_preempted | ||
| 3562 | # define vcpu_is_preempted(cpu) false | ||
| 3563 | #endif | ||
| 3564 | |||
| 3511 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); | 3565 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
| 3512 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); | 3566 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
| 3513 | 3567 | ||
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 22db1e63707e..441145351301 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h | |||
| @@ -36,7 +36,6 @@ extern unsigned int sysctl_numa_balancing_scan_size; | |||
| 36 | extern unsigned int sysctl_sched_migration_cost; | 36 | extern unsigned int sysctl_sched_migration_cost; |
| 37 | extern unsigned int sysctl_sched_nr_migrate; | 37 | extern unsigned int sysctl_sched_nr_migrate; |
| 38 | extern unsigned int sysctl_sched_time_avg; | 38 | extern unsigned int sysctl_sched_time_avg; |
| 39 | extern unsigned int sysctl_sched_shares_window; | ||
| 40 | 39 | ||
| 41 | int sched_proc_update_handler(struct ctl_table *table, int write, | 40 | int sched_proc_update_handler(struct ctl_table *table, int write, |
| 42 | void __user *buffer, size_t *length, | 41 | void __user *buffer, size_t *length, |
diff --git a/include/linux/seg6.h b/include/linux/seg6.h new file mode 100644 index 000000000000..7a66d2b4c5a6 --- /dev/null +++ b/include/linux/seg6.h | |||
| @@ -0,0 +1,6 @@ | |||
| 1 | #ifndef _LINUX_SEG6_H | ||
| 2 | #define _LINUX_SEG6_H | ||
| 3 | |||
| 4 | #include <uapi/linux/seg6.h> | ||
| 5 | |||
| 6 | #endif | ||
diff --git a/include/linux/seg6_genl.h b/include/linux/seg6_genl.h new file mode 100644 index 000000000000..d6c3fb4f3734 --- /dev/null +++ b/include/linux/seg6_genl.h | |||
| @@ -0,0 +1,6 @@ | |||
| 1 | #ifndef _LINUX_SEG6_GENL_H | ||
| 2 | #define _LINUX_SEG6_GENL_H | ||
| 3 | |||
| 4 | #include <uapi/linux/seg6_genl.h> | ||
| 5 | |||
| 6 | #endif | ||
diff --git a/include/linux/seg6_hmac.h b/include/linux/seg6_hmac.h new file mode 100644 index 000000000000..da437ebdc6cd --- /dev/null +++ b/include/linux/seg6_hmac.h | |||
| @@ -0,0 +1,6 @@ | |||
| 1 | #ifndef _LINUX_SEG6_HMAC_H | ||
| 2 | #define _LINUX_SEG6_HMAC_H | ||
| 3 | |||
| 4 | #include <uapi/linux/seg6_hmac.h> | ||
| 5 | |||
| 6 | #endif | ||
diff --git a/include/linux/seg6_iptunnel.h b/include/linux/seg6_iptunnel.h new file mode 100644 index 000000000000..5377cf6a5a02 --- /dev/null +++ b/include/linux/seg6_iptunnel.h | |||
| @@ -0,0 +1,6 @@ | |||
| 1 | #ifndef _LINUX_SEG6_IPTUNNEL_H | ||
| 2 | #define _LINUX_SEG6_IPTUNNEL_H | ||
| 3 | |||
| 4 | #include <uapi/linux/seg6_iptunnel.h> | ||
| 5 | |||
| 6 | #endif | ||
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h index a1ba6a5ccdd6..c58c535d12a8 100644 --- a/include/linux/seqno-fence.h +++ b/include/linux/seqno-fence.h | |||
| @@ -20,7 +20,7 @@ | |||
| 20 | #ifndef __LINUX_SEQNO_FENCE_H | 20 | #ifndef __LINUX_SEQNO_FENCE_H |
| 21 | #define __LINUX_SEQNO_FENCE_H | 21 | #define __LINUX_SEQNO_FENCE_H |
| 22 | 22 | ||
| 23 | #include <linux/fence.h> | 23 | #include <linux/dma-fence.h> |
| 24 | #include <linux/dma-buf.h> | 24 | #include <linux/dma-buf.h> |
| 25 | 25 | ||
| 26 | enum seqno_fence_condition { | 26 | enum seqno_fence_condition { |
| @@ -29,15 +29,15 @@ enum seqno_fence_condition { | |||
| 29 | }; | 29 | }; |
| 30 | 30 | ||
| 31 | struct seqno_fence { | 31 | struct seqno_fence { |
| 32 | struct fence base; | 32 | struct dma_fence base; |
| 33 | 33 | ||
| 34 | const struct fence_ops *ops; | 34 | const struct dma_fence_ops *ops; |
| 35 | struct dma_buf *sync_buf; | 35 | struct dma_buf *sync_buf; |
| 36 | uint32_t seqno_ofs; | 36 | uint32_t seqno_ofs; |
| 37 | enum seqno_fence_condition condition; | 37 | enum seqno_fence_condition condition; |
| 38 | }; | 38 | }; |
| 39 | 39 | ||
| 40 | extern const struct fence_ops seqno_fence_ops; | 40 | extern const struct dma_fence_ops seqno_fence_ops; |
| 41 | 41 | ||
| 42 | /** | 42 | /** |
| 43 | * to_seqno_fence - cast a fence to a seqno_fence | 43 | * to_seqno_fence - cast a fence to a seqno_fence |
| @@ -47,7 +47,7 @@ extern const struct fence_ops seqno_fence_ops; | |||
| 47 | * or the seqno_fence otherwise. | 47 | * or the seqno_fence otherwise. |
| 48 | */ | 48 | */ |
| 49 | static inline struct seqno_fence * | 49 | static inline struct seqno_fence * |
| 50 | to_seqno_fence(struct fence *fence) | 50 | to_seqno_fence(struct dma_fence *fence) |
| 51 | { | 51 | { |
| 52 | if (fence->ops != &seqno_fence_ops) | 52 | if (fence->ops != &seqno_fence_ops) |
| 53 | return NULL; | 53 | return NULL; |
| @@ -83,9 +83,9 @@ to_seqno_fence(struct fence *fence) | |||
| 83 | * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the | 83 | * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the |
| 84 | * device's vm can be expensive. | 84 | * device's vm can be expensive. |
| 85 | * | 85 | * |
| 86 | * It is recommended for creators of seqno_fence to call fence_signal | 86 | * It is recommended for creators of seqno_fence to call dma_fence_signal() |
| 87 | * before destruction. This will prevent possible issues from wraparound at | 87 | * before destruction. This will prevent possible issues from wraparound at |
| 88 | * time of issue vs time of check, since users can check fence_is_signaled | 88 | * time of issue vs time of check, since users can check dma_fence_is_signaled() |
| 89 | * before submitting instructions for the hardware to wait on the fence. | 89 | * before submitting instructions for the hardware to wait on the fence. |
| 90 | * However, when ops.enable_signaling is not called, it doesn't have to be | 90 | * However, when ops.enable_signaling is not called, it doesn't have to be |
| 91 | * done as soon as possible, just before there's any real danger of seqno | 91 | * done as soon as possible, just before there's any real danger of seqno |
| @@ -96,18 +96,18 @@ seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock, | |||
| 96 | struct dma_buf *sync_buf, uint32_t context, | 96 | struct dma_buf *sync_buf, uint32_t context, |
| 97 | uint32_t seqno_ofs, uint32_t seqno, | 97 | uint32_t seqno_ofs, uint32_t seqno, |
| 98 | enum seqno_fence_condition cond, | 98 | enum seqno_fence_condition cond, |
| 99 | const struct fence_ops *ops) | 99 | const struct dma_fence_ops *ops) |
| 100 | { | 100 | { |
| 101 | BUG_ON(!fence || !sync_buf || !ops); | 101 | BUG_ON(!fence || !sync_buf || !ops); |
| 102 | BUG_ON(!ops->wait || !ops->enable_signaling || | 102 | BUG_ON(!ops->wait || !ops->enable_signaling || |
| 103 | !ops->get_driver_name || !ops->get_timeline_name); | 103 | !ops->get_driver_name || !ops->get_timeline_name); |
| 104 | 104 | ||
| 105 | /* | 105 | /* |
| 106 | * ops is used in fence_init for get_driver_name, so needs to be | 106 | * ops is used in dma_fence_init for get_driver_name, so needs to be |
| 107 | * initialized first | 107 | * initialized first |
| 108 | */ | 108 | */ |
| 109 | fence->ops = ops; | 109 | fence->ops = ops; |
| 110 | fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); | 110 | dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); |
| 111 | get_dma_buf(sync_buf); | 111 | get_dma_buf(sync_buf); |
| 112 | fence->sync_buf = sync_buf; | 112 | fence->sync_buf = sync_buf; |
| 113 | fence->seqno_ofs = seqno_ofs; | 113 | fence->seqno_ofs = seqno_ofs; |
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 48ec7651989b..61fbb440449c 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h | |||
| @@ -36,6 +36,8 @@ struct plat_serial8250_port { | |||
| 36 | void (*set_termios)(struct uart_port *, | 36 | void (*set_termios)(struct uart_port *, |
| 37 | struct ktermios *new, | 37 | struct ktermios *new, |
| 38 | struct ktermios *old); | 38 | struct ktermios *old); |
| 39 | void (*set_ldisc)(struct uart_port *, | ||
| 40 | struct ktermios *); | ||
| 39 | unsigned int (*get_mctrl)(struct uart_port *); | 41 | unsigned int (*get_mctrl)(struct uart_port *); |
| 40 | int (*handle_irq)(struct uart_port *); | 42 | int (*handle_irq)(struct uart_port *); |
| 41 | void (*pm)(struct uart_port *, unsigned int state, | 43 | void (*pm)(struct uart_port *, unsigned int state, |
| @@ -94,7 +96,7 @@ struct uart_8250_port { | |||
| 94 | struct uart_port port; | 96 | struct uart_port port; |
| 95 | struct timer_list timer; /* "no irq" timer */ | 97 | struct timer_list timer; /* "no irq" timer */ |
| 96 | struct list_head list; /* ports on this IRQ */ | 98 | struct list_head list; /* ports on this IRQ */ |
| 97 | unsigned short capabilities; /* port capabilities */ | 99 | u32 capabilities; /* port capabilities */ |
| 98 | unsigned short bugs; /* port bugs */ | 100 | unsigned short bugs; /* port bugs */ |
| 99 | bool fifo_bug; /* min RX trigger if enabled */ | 101 | bool fifo_bug; /* min RX trigger if enabled */ |
| 100 | unsigned int tx_loadsz; /* transmit fifo load size */ | 102 | unsigned int tx_loadsz; /* transmit fifo load size */ |
| @@ -149,6 +151,8 @@ extern int early_serial8250_setup(struct earlycon_device *device, | |||
| 149 | const char *options); | 151 | const char *options); |
| 150 | extern void serial8250_do_set_termios(struct uart_port *port, | 152 | extern void serial8250_do_set_termios(struct uart_port *port, |
| 151 | struct ktermios *termios, struct ktermios *old); | 153 | struct ktermios *termios, struct ktermios *old); |
| 154 | extern void serial8250_do_set_ldisc(struct uart_port *port, | ||
| 155 | struct ktermios *termios); | ||
| 152 | extern unsigned int serial8250_do_get_mctrl(struct uart_port *port); | 156 | extern unsigned int serial8250_do_get_mctrl(struct uart_port *port); |
| 153 | extern int serial8250_do_startup(struct uart_port *port); | 157 | extern int serial8250_do_startup(struct uart_port *port); |
| 154 | extern void serial8250_do_shutdown(struct uart_port *port); | 158 | extern void serial8250_do_shutdown(struct uart_port *port); |
| @@ -168,6 +172,6 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe); | |||
| 168 | 172 | ||
| 169 | extern void serial8250_set_isa_configurator(void (*v) | 173 | extern void serial8250_set_isa_configurator(void (*v) |
| 170 | (int port, struct uart_port *up, | 174 | (int port, struct uart_port *up, |
| 171 | unsigned short *capabilities)); | 175 | u32 *capabilities)); |
| 172 | 176 | ||
| 173 | #endif | 177 | #endif |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 344201437017..5def8e830fb0 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
| @@ -111,8 +111,8 @@ struct uart_icount { | |||
| 111 | __u32 buf_overrun; | 111 | __u32 buf_overrun; |
| 112 | }; | 112 | }; |
| 113 | 113 | ||
| 114 | typedef unsigned int __bitwise__ upf_t; | 114 | typedef unsigned int __bitwise upf_t; |
| 115 | typedef unsigned int __bitwise__ upstat_t; | 115 | typedef unsigned int __bitwise upstat_t; |
| 116 | 116 | ||
| 117 | struct uart_port { | 117 | struct uart_port { |
| 118 | spinlock_t lock; /* port lock */ | 118 | spinlock_t lock; /* port lock */ |
| @@ -123,6 +123,8 @@ struct uart_port { | |||
| 123 | void (*set_termios)(struct uart_port *, | 123 | void (*set_termios)(struct uart_port *, |
| 124 | struct ktermios *new, | 124 | struct ktermios *new, |
| 125 | struct ktermios *old); | 125 | struct ktermios *old); |
| 126 | void (*set_ldisc)(struct uart_port *, | ||
| 127 | struct ktermios *); | ||
| 126 | unsigned int (*get_mctrl)(struct uart_port *); | 128 | unsigned int (*get_mctrl)(struct uart_port *); |
| 127 | void (*set_mctrl)(struct uart_port *, unsigned int); | 129 | void (*set_mctrl)(struct uart_port *, unsigned int); |
| 128 | int (*startup)(struct uart_port *port); | 130 | int (*startup)(struct uart_port *port); |
diff --git a/include/linux/signal.h b/include/linux/signal.h index b63f63eaa39c..5308304993be 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
| @@ -97,6 +97,23 @@ static inline int sigisemptyset(sigset_t *set) | |||
| 97 | } | 97 | } |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2) | ||
| 101 | { | ||
| 102 | switch (_NSIG_WORDS) { | ||
| 103 | case 4: | ||
| 104 | return (set1->sig[3] == set2->sig[3]) && | ||
| 105 | (set1->sig[2] == set2->sig[2]) && | ||
| 106 | (set1->sig[1] == set2->sig[1]) && | ||
| 107 | (set1->sig[0] == set2->sig[0]); | ||
| 108 | case 2: | ||
| 109 | return (set1->sig[1] == set2->sig[1]) && | ||
| 110 | (set1->sig[0] == set2->sig[0]); | ||
| 111 | case 1: | ||
| 112 | return set1->sig[0] == set2->sig[0]; | ||
| 113 | } | ||
| 114 | return 0; | ||
| 115 | } | ||
| 116 | |||
| 100 | #define sigmask(sig) (1UL << ((sig) - 1)) | 117 | #define sigmask(sig) (1UL << ((sig) - 1)) |
| 101 | 118 | ||
| 102 | #ifndef __HAVE_ARCH_SIG_SETOPS | 119 | #ifndef __HAVE_ARCH_SIG_SETOPS |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 32810f279f8e..b53c0cfd417e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -645,8 +645,15 @@ struct sk_buff { | |||
| 645 | struct rb_node rbnode; /* used in netem & tcp stack */ | 645 | struct rb_node rbnode; /* used in netem & tcp stack */ |
| 646 | }; | 646 | }; |
| 647 | struct sock *sk; | 647 | struct sock *sk; |
| 648 | struct net_device *dev; | ||
| 649 | 648 | ||
| 649 | union { | ||
| 650 | struct net_device *dev; | ||
| 651 | /* Some protocols might use this space to store information, | ||
| 652 | * while device pointer would be NULL. | ||
| 653 | * UDP receive path is one user. | ||
| 654 | */ | ||
| 655 | unsigned long dev_scratch; | ||
| 656 | }; | ||
| 650 | /* | 657 | /* |
| 651 | * This is the control buffer. It is free to use for every | 658 | * This is the control buffer. It is free to use for every |
| 652 | * layer. Please put your private variables there. If you | 659 | * layer. Please put your private variables there. If you |
| @@ -1087,7 +1094,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) | |||
| 1087 | } | 1094 | } |
| 1088 | 1095 | ||
| 1089 | void __skb_get_hash(struct sk_buff *skb); | 1096 | void __skb_get_hash(struct sk_buff *skb); |
| 1090 | u32 __skb_get_hash_symmetric(struct sk_buff *skb); | 1097 | u32 __skb_get_hash_symmetric(const struct sk_buff *skb); |
| 1091 | u32 skb_get_poff(const struct sk_buff *skb); | 1098 | u32 skb_get_poff(const struct sk_buff *skb); |
| 1092 | u32 __skb_get_poff(const struct sk_buff *skb, void *data, | 1099 | u32 __skb_get_poff(const struct sk_buff *skb, void *data, |
| 1093 | const struct flow_keys *keys, int hlen); | 1100 | const struct flow_keys *keys, int hlen); |
| @@ -1799,11 +1806,11 @@ static inline unsigned int skb_headlen(const struct sk_buff *skb) | |||
| 1799 | return skb->len - skb->data_len; | 1806 | return skb->len - skb->data_len; |
| 1800 | } | 1807 | } |
| 1801 | 1808 | ||
| 1802 | static inline int skb_pagelen(const struct sk_buff *skb) | 1809 | static inline unsigned int skb_pagelen(const struct sk_buff *skb) |
| 1803 | { | 1810 | { |
| 1804 | int i, len = 0; | 1811 | unsigned int i, len = 0; |
| 1805 | 1812 | ||
| 1806 | for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) | 1813 | for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--) |
| 1807 | len += skb_frag_size(&skb_shinfo(skb)->frags[i]); | 1814 | len += skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 1808 | return len + skb_headlen(skb); | 1815 | return len + skb_headlen(skb); |
| 1809 | } | 1816 | } |
| @@ -1966,6 +1973,8 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) | |||
| 1966 | return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; | 1973 | return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; |
| 1967 | } | 1974 | } |
| 1968 | 1975 | ||
| 1976 | void skb_condense(struct sk_buff *skb); | ||
| 1977 | |||
| 1969 | /** | 1978 | /** |
| 1970 | * skb_headroom - bytes at buffer head | 1979 | * skb_headroom - bytes at buffer head |
| 1971 | * @skb: buffer to check | 1980 | * @skb: buffer to check |
| @@ -2809,12 +2818,12 @@ static inline int skb_add_data(struct sk_buff *skb, | |||
| 2809 | 2818 | ||
| 2810 | if (skb->ip_summed == CHECKSUM_NONE) { | 2819 | if (skb->ip_summed == CHECKSUM_NONE) { |
| 2811 | __wsum csum = 0; | 2820 | __wsum csum = 0; |
| 2812 | if (csum_and_copy_from_iter(skb_put(skb, copy), copy, | 2821 | if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy, |
| 2813 | &csum, from) == copy) { | 2822 | &csum, from)) { |
| 2814 | skb->csum = csum_block_add(skb->csum, csum, off); | 2823 | skb->csum = csum_block_add(skb->csum, csum, off); |
| 2815 | return 0; | 2824 | return 0; |
| 2816 | } | 2825 | } |
| 2817 | } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy) | 2826 | } else if (copy_from_iter_full(skb_put(skb, copy), copy, from)) |
| 2818 | return 0; | 2827 | return 0; |
| 2819 | 2828 | ||
| 2820 | __skb_trim(skb, off); | 2829 | __skb_trim(skb, off); |
| @@ -3033,9 +3042,13 @@ static inline void skb_frag_list_init(struct sk_buff *skb) | |||
| 3033 | int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, | 3042 | int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, |
| 3034 | const struct sk_buff *skb); | 3043 | const struct sk_buff *skb); |
| 3035 | struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags, | 3044 | struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags, |
| 3045 | void (*destructor)(struct sock *sk, | ||
| 3046 | struct sk_buff *skb), | ||
| 3036 | int *peeked, int *off, int *err, | 3047 | int *peeked, int *off, int *err, |
| 3037 | struct sk_buff **last); | 3048 | struct sk_buff **last); |
| 3038 | struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, | 3049 | struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, |
| 3050 | void (*destructor)(struct sock *sk, | ||
| 3051 | struct sk_buff *skb), | ||
| 3039 | int *peeked, int *off, int *err); | 3052 | int *peeked, int *off, int *err); |
| 3040 | struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, | 3053 | struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, |
| 3041 | int *err); | 3054 | int *err); |
| @@ -3214,7 +3227,7 @@ static inline ktime_t net_timedelta(ktime_t t) | |||
| 3214 | 3227 | ||
| 3215 | static inline ktime_t net_invalid_timestamp(void) | 3228 | static inline ktime_t net_invalid_timestamp(void) |
| 3216 | { | 3229 | { |
| 3217 | return ktime_set(0, 0); | 3230 | return 0; |
| 3218 | } | 3231 | } |
| 3219 | 3232 | ||
| 3220 | struct sk_buff *skb_clone_sk(struct sk_buff *skb); | 3233 | struct sk_buff *skb_clone_sk(struct sk_buff *skb); |
diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h index e302c447e057..129bc674dcf5 100644 --- a/include/linux/smc91x.h +++ b/include/linux/smc91x.h | |||
| @@ -39,6 +39,7 @@ struct smc91x_platdata { | |||
| 39 | unsigned long flags; | 39 | unsigned long flags; |
| 40 | unsigned char leda; | 40 | unsigned char leda; |
| 41 | unsigned char ledb; | 41 | unsigned char ledb; |
| 42 | bool pxa_u16_align4; /* PXA buggy u16 writes on 4*n+2 addresses */ | ||
| 42 | }; | 43 | }; |
| 43 | 44 | ||
| 44 | #endif /* __SMC91X_H__ */ | 45 | #endif /* __SMC91X_H__ */ |
diff --git a/include/linux/soc/qcom/wcnss_ctrl.h b/include/linux/soc/qcom/wcnss_ctrl.h index a37bc5538f19..eab64976a73b 100644 --- a/include/linux/soc/qcom/wcnss_ctrl.h +++ b/include/linux/soc/qcom/wcnss_ctrl.h | |||
| @@ -3,6 +3,19 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/soc/qcom/smd.h> | 4 | #include <linux/soc/qcom/smd.h> |
| 5 | 5 | ||
| 6 | #if IS_ENABLED(CONFIG_QCOM_WCNSS_CTRL) | ||
| 7 | |||
| 6 | struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb); | 8 | struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb); |
| 7 | 9 | ||
| 10 | #else | ||
| 11 | |||
| 12 | static inline struct qcom_smd_channel* | ||
| 13 | qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb) | ||
| 14 | { | ||
| 15 | WARN_ON(1); | ||
| 16 | return ERR_PTR(-ENXIO); | ||
| 17 | } | ||
| 18 | |||
| 19 | #endif | ||
| 20 | |||
| 8 | #endif | 21 | #endif |
diff --git a/include/linux/soc/renesas/rcar-rst.h b/include/linux/soc/renesas/rcar-rst.h new file mode 100644 index 000000000000..a18e0783946b --- /dev/null +++ b/include/linux/soc/renesas/rcar-rst.h | |||
| @@ -0,0 +1,6 @@ | |||
| 1 | #ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__ | ||
| 2 | #define __LINUX_SOC_RENESAS_RCAR_RST_H__ | ||
| 3 | |||
| 4 | int rcar_rst_read_mode_pins(u32 *mode); | ||
| 5 | |||
| 6 | #endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */ | ||
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h new file mode 100644 index 000000000000..0ccbc138c26a --- /dev/null +++ b/include/linux/soc/ti/ti_sci_protocol.h | |||
| @@ -0,0 +1,249 @@ | |||
| 1 | /* | ||
| 2 | * Texas Instruments System Control Interface Protocol | ||
| 3 | * | ||
| 4 | * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ | ||
| 5 | * Nishanth Menon | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
| 12 | * kind, whether express or implied; without even the implied warranty | ||
| 13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #ifndef __TISCI_PROTOCOL_H | ||
| 18 | #define __TISCI_PROTOCOL_H | ||
| 19 | |||
| 20 | /** | ||
| 21 | * struct ti_sci_version_info - version information structure | ||
| 22 | * @abi_major: Major ABI version. Change here implies risk of backward | ||
| 23 | * compatibility break. | ||
| 24 | * @abi_minor: Minor ABI version. Change here implies new feature addition, | ||
| 25 | * or compatible change in ABI. | ||
| 26 | * @firmware_revision: Firmware revision (not usually used). | ||
| 27 | * @firmware_description: Firmware description (not usually used). | ||
| 28 | */ | ||
| 29 | struct ti_sci_version_info { | ||
| 30 | u8 abi_major; | ||
| 31 | u8 abi_minor; | ||
| 32 | u16 firmware_revision; | ||
| 33 | char firmware_description[32]; | ||
| 34 | }; | ||
| 35 | |||
| 36 | struct ti_sci_handle; | ||
| 37 | |||
| 38 | /** | ||
| 39 | * struct ti_sci_core_ops - SoC Core Operations | ||
| 40 | * @reboot_device: Reboot the SoC | ||
| 41 | * Returns 0 for successful request(ideally should never return), | ||
| 42 | * else returns corresponding error value. | ||
| 43 | */ | ||
| 44 | struct ti_sci_core_ops { | ||
| 45 | int (*reboot_device)(const struct ti_sci_handle *handle); | ||
| 46 | }; | ||
| 47 | |||
| 48 | /** | ||
| 49 | * struct ti_sci_dev_ops - Device control operations | ||
| 50 | * @get_device: Command to request for device managed by TISCI | ||
| 51 | * Returns 0 for successful exclusive request, else returns | ||
| 52 | * corresponding error message. | ||
| 53 | * @idle_device: Command to idle a device managed by TISCI | ||
| 54 | * Returns 0 for successful exclusive request, else returns | ||
| 55 | * corresponding error message. | ||
| 56 | * @put_device: Command to release a device managed by TISCI | ||
| 57 | * Returns 0 for successful release, else returns corresponding | ||
| 58 | * error message. | ||
| 59 | * @is_valid: Check if the device ID is a valid ID. | ||
| 60 | * Returns 0 if the ID is valid, else returns corresponding error. | ||
| 61 | * @get_context_loss_count: Command to retrieve context loss counter - this | ||
| 62 | * increments every time the device looses context. Overflow | ||
| 63 | * is possible. | ||
| 64 | * - count: pointer to u32 which will retrieve counter | ||
| 65 | * Returns 0 for successful information request and count has | ||
| 66 | * proper data, else returns corresponding error message. | ||
| 67 | * @is_idle: Reports back about device idle state | ||
| 68 | * - req_state: Returns requested idle state | ||
| 69 | * Returns 0 for successful information request and req_state and | ||
| 70 | * current_state has proper data, else returns corresponding error | ||
| 71 | * message. | ||
| 72 | * @is_stop: Reports back about device stop state | ||
| 73 | * - req_state: Returns requested stop state | ||
| 74 | * - current_state: Returns current stop state | ||
| 75 | * Returns 0 for successful information request and req_state and | ||
| 76 | * current_state has proper data, else returns corresponding error | ||
| 77 | * message. | ||
| 78 | * @is_on: Reports back about device ON(or active) state | ||
| 79 | * - req_state: Returns requested ON state | ||
| 80 | * - current_state: Returns current ON state | ||
| 81 | * Returns 0 for successful information request and req_state and | ||
| 82 | * current_state has proper data, else returns corresponding error | ||
| 83 | * message. | ||
| 84 | * @is_transitioning: Reports back if the device is in the middle of transition | ||
| 85 | * of state. | ||
| 86 | * -current_state: Returns 'true' if currently transitioning. | ||
| 87 | * @set_device_resets: Command to configure resets for device managed by TISCI. | ||
| 88 | * -reset_state: Device specific reset bit field | ||
| 89 | * Returns 0 for successful request, else returns | ||
| 90 | * corresponding error message. | ||
| 91 | * @get_device_resets: Command to read state of resets for device managed | ||
| 92 | * by TISCI. | ||
| 93 | * -reset_state: pointer to u32 which will retrieve resets | ||
| 94 | * Returns 0 for successful request, else returns | ||
| 95 | * corresponding error message. | ||
| 96 | * | ||
| 97 | * NOTE: for all these functions, the following parameters are generic in | ||
| 98 | * nature: | ||
| 99 | * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle | ||
| 100 | * -id: Device Identifier | ||
| 101 | * | ||
| 102 | * Request for the device - NOTE: the client MUST maintain integrity of | ||
| 103 | * usage count by balancing get_device with put_device. No refcounting is | ||
| 104 | * managed by driver for that purpose. | ||
| 105 | */ | ||
| 106 | struct ti_sci_dev_ops { | ||
| 107 | int (*get_device)(const struct ti_sci_handle *handle, u32 id); | ||
| 108 | int (*idle_device)(const struct ti_sci_handle *handle, u32 id); | ||
| 109 | int (*put_device)(const struct ti_sci_handle *handle, u32 id); | ||
| 110 | int (*is_valid)(const struct ti_sci_handle *handle, u32 id); | ||
| 111 | int (*get_context_loss_count)(const struct ti_sci_handle *handle, | ||
| 112 | u32 id, u32 *count); | ||
| 113 | int (*is_idle)(const struct ti_sci_handle *handle, u32 id, | ||
| 114 | bool *requested_state); | ||
| 115 | int (*is_stop)(const struct ti_sci_handle *handle, u32 id, | ||
| 116 | bool *req_state, bool *current_state); | ||
| 117 | int (*is_on)(const struct ti_sci_handle *handle, u32 id, | ||
| 118 | bool *req_state, bool *current_state); | ||
| 119 | int (*is_transitioning)(const struct ti_sci_handle *handle, u32 id, | ||
| 120 | bool *current_state); | ||
| 121 | int (*set_device_resets)(const struct ti_sci_handle *handle, u32 id, | ||
| 122 | u32 reset_state); | ||
| 123 | int (*get_device_resets)(const struct ti_sci_handle *handle, u32 id, | ||
| 124 | u32 *reset_state); | ||
| 125 | }; | ||
| 126 | |||
| 127 | /** | ||
| 128 | * struct ti_sci_clk_ops - Clock control operations | ||
| 129 | * @get_clock: Request for activation of clock and manage by processor | ||
| 130 | * - needs_ssc: 'true' if Spread Spectrum clock is desired. | ||
| 131 | * - can_change_freq: 'true' if frequency change is desired. | ||
| 132 | * - enable_input_term: 'true' if input termination is desired. | ||
| 133 | * @idle_clock: Request for Idling a clock managed by processor | ||
| 134 | * @put_clock: Release the clock to be auto managed by TISCI | ||
| 135 | * @is_auto: Is the clock being auto managed | ||
| 136 | * - req_state: state indicating if the clock is auto managed | ||
| 137 | * @is_on: Is the clock ON | ||
| 138 | * - req_state: if the clock is requested to be forced ON | ||
| 139 | * - current_state: if the clock is currently ON | ||
| 140 | * @is_off: Is the clock OFF | ||
| 141 | * - req_state: if the clock is requested to be forced OFF | ||
| 142 | * - current_state: if the clock is currently Gated | ||
| 143 | * @set_parent: Set the clock source of a specific device clock | ||
| 144 | * - parent_id: Parent clock identifier to set. | ||
| 145 | * @get_parent: Get the current clock source of a specific device clock | ||
| 146 | * - parent_id: Parent clock identifier which is the parent. | ||
| 147 | * @get_num_parents: Get the number of parents of the current clock source | ||
| 148 | * - num_parents: returns the number of parent clocks. | ||
| 149 | * @get_best_match_freq: Find a best matching frequency for a frequency | ||
| 150 | * range. | ||
| 151 | * - match_freq: Best matching frequency in Hz. | ||
| 152 | * @set_freq: Set the Clock frequency | ||
| 153 | * @get_freq: Get the Clock frequency | ||
| 154 | * - current_freq: Frequency in Hz that the clock is at. | ||
| 155 | * | ||
| 156 | * NOTE: for all these functions, the following parameters are generic in | ||
| 157 | * nature: | ||
| 158 | * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle | ||
| 159 | * -did: Device identifier this request is for | ||
| 160 | * -cid: Clock identifier for the device for this request. | ||
| 161 | * Each device has it's own set of clock inputs. This indexes | ||
| 162 | * which clock input to modify. | ||
| 163 | * -min_freq: The minimum allowable frequency in Hz. This is the minimum | ||
| 164 | * allowable programmed frequency and does not account for clock | ||
| 165 | * tolerances and jitter. | ||
| 166 | * -target_freq: The target clock frequency in Hz. A frequency will be | ||
| 167 | * processed as close to this target frequency as possible. | ||
| 168 | * -max_freq: The maximum allowable frequency in Hz. This is the maximum | ||
| 169 | * allowable programmed frequency and does not account for clock | ||
| 170 | * tolerances and jitter. | ||
| 171 | * | ||
| 172 | * Request for the clock - NOTE: the client MUST maintain integrity of | ||
| 173 | * usage count by balancing get_clock with put_clock. No refcounting is | ||
| 174 | * managed by driver for that purpose. | ||
| 175 | */ | ||
| 176 | struct ti_sci_clk_ops { | ||
| 177 | int (*get_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid, | ||
| 178 | bool needs_ssc, bool can_change_freq, | ||
| 179 | bool enable_input_term); | ||
| 180 | int (*idle_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid); | ||
| 181 | int (*put_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid); | ||
| 182 | int (*is_auto)(const struct ti_sci_handle *handle, u32 did, u8 cid, | ||
| 183 | bool *req_state); | ||
| 184 | int (*is_on)(const struct ti_sci_handle *handle, u32 did, u8 cid, | ||
| 185 | bool *req_state, bool *current_state); | ||
| 186 | int (*is_off)(const struct ti_sci_handle *handle, u32 did, u8 cid, | ||
| 187 | bool *req_state, bool *current_state); | ||
| 188 | int (*set_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid, | ||
| 189 | u8 parent_id); | ||
| 190 | int (*get_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid, | ||
| 191 | u8 *parent_id); | ||
| 192 | int (*get_num_parents)(const struct ti_sci_handle *handle, u32 did, | ||
| 193 | u8 cid, u8 *num_parents); | ||
| 194 | int (*get_best_match_freq)(const struct ti_sci_handle *handle, u32 did, | ||
| 195 | u8 cid, u64 min_freq, u64 target_freq, | ||
| 196 | u64 max_freq, u64 *match_freq); | ||
| 197 | int (*set_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid, | ||
| 198 | u64 min_freq, u64 target_freq, u64 max_freq); | ||
| 199 | int (*get_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid, | ||
| 200 | u64 *current_freq); | ||
| 201 | }; | ||
| 202 | |||
| 203 | /** | ||
| 204 | * struct ti_sci_ops - Function support for TI SCI | ||
| 205 | * @dev_ops: Device specific operations | ||
| 206 | * @clk_ops: Clock specific operations | ||
| 207 | */ | ||
| 208 | struct ti_sci_ops { | ||
| 209 | struct ti_sci_core_ops core_ops; | ||
| 210 | struct ti_sci_dev_ops dev_ops; | ||
| 211 | struct ti_sci_clk_ops clk_ops; | ||
| 212 | }; | ||
| 213 | |||
| 214 | /** | ||
| 215 | * struct ti_sci_handle - Handle returned to TI SCI clients for usage. | ||
| 216 | * @version: structure containing version information | ||
| 217 | * @ops: operations that are made available to TI SCI clients | ||
| 218 | */ | ||
| 219 | struct ti_sci_handle { | ||
| 220 | struct ti_sci_version_info version; | ||
| 221 | struct ti_sci_ops ops; | ||
| 222 | }; | ||
| 223 | |||
| 224 | #if IS_ENABLED(CONFIG_TI_SCI_PROTOCOL) | ||
| 225 | const struct ti_sci_handle *ti_sci_get_handle(struct device *dev); | ||
| 226 | int ti_sci_put_handle(const struct ti_sci_handle *handle); | ||
| 227 | const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev); | ||
| 228 | |||
| 229 | #else /* CONFIG_TI_SCI_PROTOCOL */ | ||
| 230 | |||
| 231 | static inline const struct ti_sci_handle *ti_sci_get_handle(struct device *dev) | ||
| 232 | { | ||
| 233 | return ERR_PTR(-EINVAL); | ||
| 234 | } | ||
| 235 | |||
| 236 | static inline int ti_sci_put_handle(const struct ti_sci_handle *handle) | ||
| 237 | { | ||
| 238 | return -EINVAL; | ||
| 239 | } | ||
| 240 | |||
| 241 | static inline | ||
| 242 | const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev) | ||
| 243 | { | ||
| 244 | return ERR_PTR(-EINVAL); | ||
| 245 | } | ||
| 246 | |||
| 247 | #endif /* CONFIG_TI_SCI_PROTOCOL */ | ||
| 248 | |||
| 249 | #endif /* __TISCI_PROTOCOL_H */ | ||
diff --git a/include/linux/stm.h b/include/linux/stm.h index 8369d8a8cabd..210ff2292361 100644 --- a/include/linux/stm.h +++ b/include/linux/stm.h | |||
| @@ -133,7 +133,7 @@ int stm_source_register_device(struct device *parent, | |||
| 133 | struct stm_source_data *data); | 133 | struct stm_source_data *data); |
| 134 | void stm_source_unregister_device(struct stm_source_data *data); | 134 | void stm_source_unregister_device(struct stm_source_data *data); |
| 135 | 135 | ||
| 136 | int stm_source_write(struct stm_source_data *data, unsigned int chan, | 136 | int notrace stm_source_write(struct stm_source_data *data, unsigned int chan, |
| 137 | const char *buf, size_t count); | 137 | const char *buf, size_t count); |
| 138 | 138 | ||
| 139 | #endif /* _STM_H_ */ | 139 | #endif /* _STM_H_ */ |
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 705840e0438f..266dab9ad782 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h | |||
| @@ -88,6 +88,9 @@ struct stmmac_mdio_bus_data { | |||
| 88 | 88 | ||
| 89 | struct stmmac_dma_cfg { | 89 | struct stmmac_dma_cfg { |
| 90 | int pbl; | 90 | int pbl; |
| 91 | int txpbl; | ||
| 92 | int rxpbl; | ||
| 93 | bool pblx8; | ||
| 91 | int fixed_burst; | 94 | int fixed_burst; |
| 92 | int mixed_burst; | 95 | int mixed_burst; |
| 93 | bool aal; | 96 | bool aal; |
| @@ -135,8 +138,6 @@ struct plat_stmmacenet_data { | |||
| 135 | void (*bus_setup)(void __iomem *ioaddr); | 138 | void (*bus_setup)(void __iomem *ioaddr); |
| 136 | int (*init)(struct platform_device *pdev, void *priv); | 139 | int (*init)(struct platform_device *pdev, void *priv); |
| 137 | void (*exit)(struct platform_device *pdev, void *priv); | 140 | void (*exit)(struct platform_device *pdev, void *priv); |
| 138 | void (*suspend)(struct platform_device *pdev, void *priv); | ||
| 139 | void (*resume)(struct platform_device *pdev, void *priv); | ||
| 140 | void *bsp_priv; | 141 | void *bsp_priv; |
| 141 | struct stmmac_axi *axi; | 142 | struct stmmac_axi *axi; |
| 142 | int has_gmac4; | 143 | int has_gmac4; |
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index cc3ae16eac68..757fb963696c 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h | |||
| @@ -79,7 +79,6 @@ struct svc_rdma_op_ctxt { | |||
| 79 | struct ib_cqe reg_cqe; | 79 | struct ib_cqe reg_cqe; |
| 80 | struct ib_cqe inv_cqe; | 80 | struct ib_cqe inv_cqe; |
| 81 | struct list_head dto_q; | 81 | struct list_head dto_q; |
| 82 | enum ib_wc_status wc_status; | ||
| 83 | u32 byte_len; | 82 | u32 byte_len; |
| 84 | u32 position; | 83 | u32 position; |
| 85 | struct svcxprt_rdma *xprt; | 84 | struct svcxprt_rdma *xprt; |
| @@ -139,7 +138,7 @@ struct svcxprt_rdma { | |||
| 139 | int sc_max_sge_rd; /* max sge for read target */ | 138 | int sc_max_sge_rd; /* max sge for read target */ |
| 140 | bool sc_snd_w_inv; /* OK to use Send With Invalidate */ | 139 | bool sc_snd_w_inv; /* OK to use Send With Invalidate */ |
| 141 | 140 | ||
| 142 | atomic_t sc_sq_count; /* Number of SQ WR on queue */ | 141 | atomic_t sc_sq_avail; /* SQEs ready to be consumed */ |
| 143 | unsigned int sc_sq_depth; /* Depth of SQ */ | 142 | unsigned int sc_sq_depth; /* Depth of SQ */ |
| 144 | unsigned int sc_rq_depth; /* Depth of RQ */ | 143 | unsigned int sc_rq_depth; /* Depth of RQ */ |
| 145 | u32 sc_max_requests; /* Forward credits */ | 144 | u32 sc_max_requests; /* Forward credits */ |
| @@ -148,7 +147,6 @@ struct svcxprt_rdma { | |||
| 148 | 147 | ||
| 149 | struct ib_pd *sc_pd; | 148 | struct ib_pd *sc_pd; |
| 150 | 149 | ||
| 151 | atomic_t sc_dma_used; | ||
| 152 | spinlock_t sc_ctxt_lock; | 150 | spinlock_t sc_ctxt_lock; |
| 153 | struct list_head sc_ctxts; | 151 | struct list_head sc_ctxts; |
| 154 | int sc_ctxt_used; | 152 | int sc_ctxt_used; |
| @@ -200,7 +198,6 @@ static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma, | |||
| 200 | struct svc_rdma_op_ctxt *ctxt) | 198 | struct svc_rdma_op_ctxt *ctxt) |
| 201 | { | 199 | { |
| 202 | ctxt->mapped_sges++; | 200 | ctxt->mapped_sges++; |
| 203 | atomic_inc(&rdma->sc_dma_used); | ||
| 204 | } | 201 | } |
| 205 | 202 | ||
| 206 | /* svc_rdma_backchannel.c */ | 203 | /* svc_rdma_backchannel.c */ |
| @@ -236,8 +233,6 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *, | |||
| 236 | extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *, | 233 | extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *, |
| 237 | struct svc_rdma_req_map *, bool); | 234 | struct svc_rdma_req_map *, bool); |
| 238 | extern int svc_rdma_sendto(struct svc_rqst *); | 235 | extern int svc_rdma_sendto(struct svc_rqst *); |
| 239 | extern struct rpcrdma_read_chunk * | ||
| 240 | svc_rdma_get_read_chunk(struct rpcrdma_msg *); | ||
| 241 | extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, | 236 | extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, |
| 242 | int); | 237 | int); |
| 243 | 238 | ||
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index d9718378a8be..0c729c3c8549 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
| @@ -194,6 +194,8 @@ struct platform_freeze_ops { | |||
| 194 | }; | 194 | }; |
| 195 | 195 | ||
| 196 | #ifdef CONFIG_SUSPEND | 196 | #ifdef CONFIG_SUSPEND |
| 197 | extern suspend_state_t mem_sleep_default; | ||
| 198 | |||
| 197 | /** | 199 | /** |
| 198 | * suspend_set_ops - set platform dependent suspend operations | 200 | * suspend_set_ops - set platform dependent suspend operations |
| 199 | * @ops: The new suspend operations to set. | 201 | * @ops: The new suspend operations to set. |
diff --git a/include/linux/swap.h b/include/linux/swap.h index a56523cefb9b..09f4be179ff3 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -246,39 +246,7 @@ struct swap_info_struct { | |||
| 246 | void *workingset_eviction(struct address_space *mapping, struct page *page); | 246 | void *workingset_eviction(struct address_space *mapping, struct page *page); |
| 247 | bool workingset_refault(void *shadow); | 247 | bool workingset_refault(void *shadow); |
| 248 | void workingset_activation(struct page *page); | 248 | void workingset_activation(struct page *page); |
| 249 | extern struct list_lru workingset_shadow_nodes; | 249 | void workingset_update_node(struct radix_tree_node *node, void *private); |
| 250 | |||
| 251 | static inline unsigned int workingset_node_pages(struct radix_tree_node *node) | ||
| 252 | { | ||
| 253 | return node->count & RADIX_TREE_COUNT_MASK; | ||
| 254 | } | ||
| 255 | |||
| 256 | static inline void workingset_node_pages_inc(struct radix_tree_node *node) | ||
| 257 | { | ||
| 258 | node->count++; | ||
| 259 | } | ||
| 260 | |||
| 261 | static inline void workingset_node_pages_dec(struct radix_tree_node *node) | ||
| 262 | { | ||
| 263 | VM_WARN_ON_ONCE(!workingset_node_pages(node)); | ||
| 264 | node->count--; | ||
| 265 | } | ||
| 266 | |||
| 267 | static inline unsigned int workingset_node_shadows(struct radix_tree_node *node) | ||
| 268 | { | ||
| 269 | return node->count >> RADIX_TREE_COUNT_SHIFT; | ||
| 270 | } | ||
| 271 | |||
| 272 | static inline void workingset_node_shadows_inc(struct radix_tree_node *node) | ||
| 273 | { | ||
| 274 | node->count += 1U << RADIX_TREE_COUNT_SHIFT; | ||
| 275 | } | ||
| 276 | |||
| 277 | static inline void workingset_node_shadows_dec(struct radix_tree_node *node) | ||
| 278 | { | ||
| 279 | VM_WARN_ON_ONCE(!workingset_node_shadows(node)); | ||
| 280 | node->count -= 1U << RADIX_TREE_COUNT_SHIFT; | ||
| 281 | } | ||
| 282 | 250 | ||
| 283 | /* linux/mm/page_alloc.c */ | 251 | /* linux/mm/page_alloc.c */ |
| 284 | extern unsigned long totalram_pages; | 252 | extern unsigned long totalram_pages; |
| @@ -351,6 +319,9 @@ extern int kswapd_run(int nid); | |||
| 351 | extern void kswapd_stop(int nid); | 319 | extern void kswapd_stop(int nid); |
| 352 | 320 | ||
| 353 | #ifdef CONFIG_SWAP | 321 | #ifdef CONFIG_SWAP |
| 322 | |||
| 323 | #include <linux/blk_types.h> /* for bio_end_io_t */ | ||
| 324 | |||
| 354 | /* linux/mm/page_io.c */ | 325 | /* linux/mm/page_io.c */ |
| 355 | extern int swap_readpage(struct page *); | 326 | extern int swap_readpage(struct page *); |
| 356 | extern int swap_writepage(struct page *page, struct writeback_control *wbc); | 327 | extern int swap_writepage(struct page *page, struct writeback_control *wbc); |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 5f81f8a187f2..183f37c8a5e1 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
| @@ -44,11 +44,13 @@ enum dma_sync_target { | |||
| 44 | extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, | 44 | extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, |
| 45 | dma_addr_t tbl_dma_addr, | 45 | dma_addr_t tbl_dma_addr, |
| 46 | phys_addr_t phys, size_t size, | 46 | phys_addr_t phys, size_t size, |
| 47 | enum dma_data_direction dir); | 47 | enum dma_data_direction dir, |
| 48 | unsigned long attrs); | ||
| 48 | 49 | ||
| 49 | extern void swiotlb_tbl_unmap_single(struct device *hwdev, | 50 | extern void swiotlb_tbl_unmap_single(struct device *hwdev, |
| 50 | phys_addr_t tlb_addr, | 51 | phys_addr_t tlb_addr, |
| 51 | size_t size, enum dma_data_direction dir); | 52 | size_t size, enum dma_data_direction dir, |
| 53 | unsigned long attrs); | ||
| 52 | 54 | ||
| 53 | extern void swiotlb_tbl_sync_single(struct device *hwdev, | 55 | extern void swiotlb_tbl_sync_single(struct device *hwdev, |
| 54 | phys_addr_t tlb_addr, | 56 | phys_addr_t tlb_addr, |
| @@ -73,14 +75,6 @@ extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | |||
| 73 | unsigned long attrs); | 75 | unsigned long attrs); |
| 74 | 76 | ||
| 75 | extern int | 77 | extern int |
| 76 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, | ||
| 77 | enum dma_data_direction dir); | ||
| 78 | |||
| 79 | extern void | ||
| 80 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | ||
| 81 | enum dma_data_direction dir); | ||
| 82 | |||
| 83 | extern int | ||
| 84 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | 78 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, |
| 85 | enum dma_data_direction dir, | 79 | enum dma_data_direction dir, |
| 86 | unsigned long attrs); | 80 | unsigned long attrs); |
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h index aa17ccfc2f57..3e3ab84fc4cd 100644 --- a/include/linux/sync_file.h +++ b/include/linux/sync_file.h | |||
| @@ -18,8 +18,8 @@ | |||
| 18 | #include <linux/ktime.h> | 18 | #include <linux/ktime.h> |
| 19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
| 20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
| 21 | #include <linux/fence.h> | 21 | #include <linux/dma-fence.h> |
| 22 | #include <linux/fence-array.h> | 22 | #include <linux/dma-fence-array.h> |
| 23 | 23 | ||
| 24 | /** | 24 | /** |
| 25 | * struct sync_file - sync file to export to the userspace | 25 | * struct sync_file - sync file to export to the userspace |
| @@ -41,13 +41,13 @@ struct sync_file { | |||
| 41 | 41 | ||
| 42 | wait_queue_head_t wq; | 42 | wait_queue_head_t wq; |
| 43 | 43 | ||
| 44 | struct fence *fence; | 44 | struct dma_fence *fence; |
| 45 | struct fence_cb cb; | 45 | struct dma_fence_cb cb; |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | #define POLL_ENABLED FENCE_FLAG_USER_BITS | 48 | #define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS |
| 49 | 49 | ||
| 50 | struct sync_file *sync_file_create(struct fence *fence); | 50 | struct sync_file *sync_file_create(struct dma_fence *fence); |
| 51 | struct fence *sync_file_get_fence(int fd); | 51 | struct dma_fence *sync_file_get_fence(int fd); |
| 52 | 52 | ||
| 53 | #endif /* _LINUX_SYNC_H */ | 53 | #endif /* _LINUX_SYNC_H */ |
diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h index 2739ccb69571..bed223b70217 100644 --- a/include/linux/sys_soc.h +++ b/include/linux/sys_soc.h | |||
| @@ -13,6 +13,7 @@ struct soc_device_attribute { | |||
| 13 | const char *family; | 13 | const char *family; |
| 14 | const char *revision; | 14 | const char *revision; |
| 15 | const char *soc_id; | 15 | const char *soc_id; |
| 16 | const void *data; | ||
| 16 | }; | 17 | }; |
| 17 | 18 | ||
| 18 | /** | 19 | /** |
| @@ -34,4 +35,12 @@ void soc_device_unregister(struct soc_device *soc_dev); | |||
| 34 | */ | 35 | */ |
| 35 | struct device *soc_device_to_device(struct soc_device *soc); | 36 | struct device *soc_device_to_device(struct soc_device *soc); |
| 36 | 37 | ||
| 38 | #ifdef CONFIG_SOC_BUS | ||
| 39 | const struct soc_device_attribute *soc_device_match( | ||
| 40 | const struct soc_device_attribute *matches); | ||
| 41 | #else | ||
| 42 | static inline const struct soc_device_attribute *soc_device_match( | ||
| 43 | const struct soc_device_attribute *matches) { return NULL; } | ||
| 44 | #endif | ||
| 45 | |||
| 37 | #endif /* __SOC_BUS_H */ | 46 | #endif /* __SOC_BUS_H */ |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index a17ae7b85218..fc5848dad7a4 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
| @@ -123,6 +123,7 @@ struct tcp_request_sock { | |||
| 123 | u32 txhash; | 123 | u32 txhash; |
| 124 | u32 rcv_isn; | 124 | u32 rcv_isn; |
| 125 | u32 snt_isn; | 125 | u32 snt_isn; |
| 126 | u32 ts_off; | ||
| 126 | u32 last_oow_ack_time; /* last SYNACK */ | 127 | u32 last_oow_ack_time; /* last SYNACK */ |
| 127 | u32 rcv_nxt; /* the ack # by SYNACK. For | 128 | u32 rcv_nxt; /* the ack # by SYNACK. For |
| 128 | * FastOpen it's the seq# | 129 | * FastOpen it's the seq# |
| @@ -176,8 +177,6 @@ struct tcp_sock { | |||
| 176 | * sum(delta(snd_una)), or how many bytes | 177 | * sum(delta(snd_una)), or how many bytes |
| 177 | * were acked. | 178 | * were acked. |
| 178 | */ | 179 | */ |
| 179 | struct u64_stats_sync syncp; /* protects 64bit vars (cf tcp_get_info()) */ | ||
| 180 | |||
| 181 | u32 snd_una; /* First byte we want an ack for */ | 180 | u32 snd_una; /* First byte we want an ack for */ |
| 182 | u32 snd_sml; /* Last byte of the most recently transmitted small packet */ | 181 | u32 snd_sml; /* Last byte of the most recently transmitted small packet */ |
| 183 | u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ | 182 | u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ |
| @@ -187,7 +186,6 @@ struct tcp_sock { | |||
| 187 | u32 tsoffset; /* timestamp offset */ | 186 | u32 tsoffset; /* timestamp offset */ |
| 188 | 187 | ||
| 189 | struct list_head tsq_node; /* anchor in tsq_tasklet.head list */ | 188 | struct list_head tsq_node; /* anchor in tsq_tasklet.head list */ |
| 190 | unsigned long tsq_flags; | ||
| 191 | 189 | ||
| 192 | /* Data for direct copy to user */ | 190 | /* Data for direct copy to user */ |
| 193 | struct { | 191 | struct { |
| @@ -213,8 +211,11 @@ struct tcp_sock { | |||
| 213 | u8 reord; /* reordering detected */ | 211 | u8 reord; /* reordering detected */ |
| 214 | } rack; | 212 | } rack; |
| 215 | u16 advmss; /* Advertised MSS */ | 213 | u16 advmss; /* Advertised MSS */ |
| 216 | u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ | 214 | u32 chrono_start; /* Start time in jiffies of a TCP chrono */ |
| 217 | unused:7; | 215 | u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ |
| 216 | u8 chrono_type:2, /* current chronograph type */ | ||
| 217 | rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ | ||
| 218 | unused:5; | ||
| 218 | u8 nonagle : 4,/* Disable Nagle algorithm? */ | 219 | u8 nonagle : 4,/* Disable Nagle algorithm? */ |
| 219 | thin_lto : 1,/* Use linear timeouts for thin streams */ | 220 | thin_lto : 1,/* Use linear timeouts for thin streams */ |
| 220 | thin_dupack : 1,/* Fast retransmit on first dupack */ | 221 | thin_dupack : 1,/* Fast retransmit on first dupack */ |
| @@ -362,7 +363,7 @@ struct tcp_sock { | |||
| 362 | u32 *saved_syn; | 363 | u32 *saved_syn; |
| 363 | }; | 364 | }; |
| 364 | 365 | ||
| 365 | enum tsq_flags { | 366 | enum tsq_enum { |
| 366 | TSQ_THROTTLED, | 367 | TSQ_THROTTLED, |
| 367 | TSQ_QUEUED, | 368 | TSQ_QUEUED, |
| 368 | TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */ | 369 | TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */ |
| @@ -373,6 +374,15 @@ enum tsq_flags { | |||
| 373 | */ | 374 | */ |
| 374 | }; | 375 | }; |
| 375 | 376 | ||
| 377 | enum tsq_flags { | ||
| 378 | TSQF_THROTTLED = (1UL << TSQ_THROTTLED), | ||
| 379 | TSQF_QUEUED = (1UL << TSQ_QUEUED), | ||
| 380 | TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED), | ||
| 381 | TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED), | ||
| 382 | TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED), | ||
| 383 | TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED), | ||
| 384 | }; | ||
| 385 | |||
| 376 | static inline struct tcp_sock *tcp_sk(const struct sock *sk) | 386 | static inline struct tcp_sock *tcp_sk(const struct sock *sk) |
| 377 | { | 387 | { |
| 378 | return (struct tcp_sock *)sk; | 388 | return (struct tcp_sock *)sk; |
| @@ -427,4 +437,6 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp) | |||
| 427 | tp->saved_syn = NULL; | 437 | tp->saved_syn = NULL; |
| 428 | } | 438 | } |
| 429 | 439 | ||
| 440 | struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk); | ||
| 441 | |||
| 430 | #endif /* _LINUX_TCP_H */ | 442 | #endif /* _LINUX_TCP_H */ |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 511182a88e76..e275e98bdceb 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/of.h> | 28 | #include <linux/of.h> |
| 29 | #include <linux/idr.h> | 29 | #include <linux/idr.h> |
| 30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
| 31 | #include <linux/sysfs.h> | ||
| 31 | #include <linux/workqueue.h> | 32 | #include <linux/workqueue.h> |
| 32 | #include <uapi/linux/thermal.h> | 33 | #include <uapi/linux/thermal.h> |
| 33 | 34 | ||
| @@ -204,6 +205,7 @@ struct thermal_zone_device { | |||
| 204 | int id; | 205 | int id; |
| 205 | char type[THERMAL_NAME_LENGTH]; | 206 | char type[THERMAL_NAME_LENGTH]; |
| 206 | struct device device; | 207 | struct device device; |
| 208 | struct attribute_group trips_attribute_group; | ||
| 207 | struct thermal_attr *trip_temp_attrs; | 209 | struct thermal_attr *trip_temp_attrs; |
| 208 | struct thermal_attr *trip_type_attrs; | 210 | struct thermal_attr *trip_type_attrs; |
| 209 | struct thermal_attr *trip_hyst_attrs; | 211 | struct thermal_attr *trip_hyst_attrs; |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 2873baf5372a..58373875e8ee 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
| @@ -9,50 +9,17 @@ | |||
| 9 | 9 | ||
| 10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
| 11 | #include <linux/bug.h> | 11 | #include <linux/bug.h> |
| 12 | 12 | #include <linux/restart_block.h> | |
| 13 | struct timespec; | ||
| 14 | struct compat_timespec; | ||
| 15 | 13 | ||
| 16 | #ifdef CONFIG_THREAD_INFO_IN_TASK | 14 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 17 | #define current_thread_info() ((struct thread_info *)current) | ||
| 18 | #endif | ||
| 19 | |||
| 20 | /* | 15 | /* |
| 21 | * System call restart block. | 16 | * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the |
| 17 | * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels, | ||
| 18 | * including <asm/current.h> can cause a circular dependency on some platforms. | ||
| 22 | */ | 19 | */ |
| 23 | struct restart_block { | 20 | #include <asm/current.h> |
| 24 | long (*fn)(struct restart_block *); | 21 | #define current_thread_info() ((struct thread_info *)current) |
| 25 | union { | ||
| 26 | /* For futex_wait and futex_wait_requeue_pi */ | ||
| 27 | struct { | ||
| 28 | u32 __user *uaddr; | ||
| 29 | u32 val; | ||
| 30 | u32 flags; | ||
| 31 | u32 bitset; | ||
| 32 | u64 time; | ||
| 33 | u32 __user *uaddr2; | ||
| 34 | } futex; | ||
| 35 | /* For nanosleep */ | ||
| 36 | struct { | ||
| 37 | clockid_t clockid; | ||
| 38 | struct timespec __user *rmtp; | ||
| 39 | #ifdef CONFIG_COMPAT | ||
| 40 | struct compat_timespec __user *compat_rmtp; | ||
| 41 | #endif | 22 | #endif |
| 42 | u64 expires; | ||
| 43 | } nanosleep; | ||
| 44 | /* For poll */ | ||
| 45 | struct { | ||
| 46 | struct pollfd __user *ufds; | ||
| 47 | int nfds; | ||
| 48 | int has_timeout; | ||
| 49 | unsigned long tv_sec; | ||
| 50 | unsigned long tv_nsec; | ||
| 51 | } poll; | ||
| 52 | }; | ||
| 53 | }; | ||
| 54 | |||
| 55 | extern long do_no_restart_syscall(struct restart_block *parm); | ||
| 56 | 23 | ||
| 57 | #include <linux/bitops.h> | 24 | #include <linux/bitops.h> |
| 58 | #include <asm/thread_info.h> | 25 | #include <asm/thread_info.h> |
diff --git a/include/linux/tick.h b/include/linux/tick.h index 62be0786d6d0..a04fea19676f 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
| @@ -127,9 +127,7 @@ static inline void tick_nohz_idle_exit(void) { } | |||
| 127 | 127 | ||
| 128 | static inline ktime_t tick_nohz_get_sleep_length(void) | 128 | static inline ktime_t tick_nohz_get_sleep_length(void) |
| 129 | { | 129 | { |
| 130 | ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; | 130 | return NSEC_PER_SEC / HZ; |
| 131 | |||
| 132 | return len; | ||
| 133 | } | 131 | } |
| 134 | static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } | 132 | static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } |
| 135 | static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } | 133 | static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } |
diff --git a/include/linux/time.h b/include/linux/time.h index 4cea09d94208..23f0f5ce3090 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
| @@ -172,8 +172,6 @@ extern int do_setitimer(int which, struct itimerval *value, | |||
| 172 | struct itimerval *ovalue); | 172 | struct itimerval *ovalue); |
| 173 | extern int do_getitimer(int which, struct itimerval *value); | 173 | extern int do_getitimer(int which, struct itimerval *value); |
| 174 | 174 | ||
| 175 | extern unsigned int alarm_setitimer(unsigned int seconds); | ||
| 176 | |||
| 177 | extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); | 175 | extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); |
| 178 | 176 | ||
| 179 | struct tms; | 177 | struct tms; |
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h index 4382035a75bb..2496ad4cfc99 100644 --- a/include/linux/timecounter.h +++ b/include/linux/timecounter.h | |||
| @@ -20,7 +20,7 @@ | |||
| 20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
| 21 | 21 | ||
| 22 | /* simplify initialization of mask field */ | 22 | /* simplify initialization of mask field */ |
| 23 | #define CYCLECOUNTER_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) | 23 | #define CYCLECOUNTER_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) |
| 24 | 24 | ||
| 25 | /** | 25 | /** |
| 26 | * struct cyclecounter - hardware abstraction for a free running counter | 26 | * struct cyclecounter - hardware abstraction for a free running counter |
| @@ -37,8 +37,8 @@ | |||
| 37 | * @shift: cycle to nanosecond divisor (power of two) | 37 | * @shift: cycle to nanosecond divisor (power of two) |
| 38 | */ | 38 | */ |
| 39 | struct cyclecounter { | 39 | struct cyclecounter { |
| 40 | cycle_t (*read)(const struct cyclecounter *cc); | 40 | u64 (*read)(const struct cyclecounter *cc); |
| 41 | cycle_t mask; | 41 | u64 mask; |
| 42 | u32 mult; | 42 | u32 mult; |
| 43 | u32 shift; | 43 | u32 shift; |
| 44 | }; | 44 | }; |
| @@ -63,7 +63,7 @@ struct cyclecounter { | |||
| 63 | */ | 63 | */ |
| 64 | struct timecounter { | 64 | struct timecounter { |
| 65 | const struct cyclecounter *cc; | 65 | const struct cyclecounter *cc; |
| 66 | cycle_t cycle_last; | 66 | u64 cycle_last; |
| 67 | u64 nsec; | 67 | u64 nsec; |
| 68 | u64 mask; | 68 | u64 mask; |
| 69 | u64 frac; | 69 | u64 frac; |
| @@ -77,7 +77,7 @@ struct timecounter { | |||
| 77 | * @frac: pointer to storage for the fractional nanoseconds. | 77 | * @frac: pointer to storage for the fractional nanoseconds. |
| 78 | */ | 78 | */ |
| 79 | static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, | 79 | static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, |
| 80 | cycle_t cycles, u64 mask, u64 *frac) | 80 | u64 cycles, u64 mask, u64 *frac) |
| 81 | { | 81 | { |
| 82 | u64 ns = (u64) cycles; | 82 | u64 ns = (u64) cycles; |
| 83 | 83 | ||
| @@ -134,6 +134,6 @@ extern u64 timecounter_read(struct timecounter *tc); | |||
| 134 | * in the past. | 134 | * in the past. |
| 135 | */ | 135 | */ |
| 136 | extern u64 timecounter_cyc2time(struct timecounter *tc, | 136 | extern u64 timecounter_cyc2time(struct timecounter *tc, |
| 137 | cycle_t cycle_tstamp); | 137 | u64 cycle_tstamp); |
| 138 | 138 | ||
| 139 | #endif | 139 | #endif |
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index e88005459035..110f4532188c 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h | |||
| @@ -29,9 +29,9 @@ | |||
| 29 | */ | 29 | */ |
| 30 | struct tk_read_base { | 30 | struct tk_read_base { |
| 31 | struct clocksource *clock; | 31 | struct clocksource *clock; |
| 32 | cycle_t (*read)(struct clocksource *cs); | 32 | u64 (*read)(struct clocksource *cs); |
| 33 | cycle_t mask; | 33 | u64 mask; |
| 34 | cycle_t cycle_last; | 34 | u64 cycle_last; |
| 35 | u32 mult; | 35 | u32 mult; |
| 36 | u32 shift; | 36 | u32 shift; |
| 37 | u64 xtime_nsec; | 37 | u64 xtime_nsec; |
| @@ -97,7 +97,7 @@ struct timekeeper { | |||
| 97 | struct timespec64 raw_time; | 97 | struct timespec64 raw_time; |
| 98 | 98 | ||
| 99 | /* The following members are for timekeeping internal use */ | 99 | /* The following members are for timekeeping internal use */ |
| 100 | cycle_t cycle_interval; | 100 | u64 cycle_interval; |
| 101 | u64 xtime_interval; | 101 | u64 xtime_interval; |
| 102 | s64 xtime_remainder; | 102 | s64 xtime_remainder; |
| 103 | u32 raw_interval; | 103 | u32 raw_interval; |
| @@ -136,7 +136,7 @@ extern void update_vsyscall_tz(void); | |||
| 136 | 136 | ||
| 137 | extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm, | 137 | extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm, |
| 138 | struct clocksource *c, u32 mult, | 138 | struct clocksource *c, u32 mult, |
| 139 | cycle_t cycle_last); | 139 | u64 cycle_last); |
| 140 | extern void update_vsyscall_tz(void); | 140 | extern void update_vsyscall_tz(void); |
| 141 | 141 | ||
| 142 | #else | 142 | #else |
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 09168c52ab64..d2e804e15c3e 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h | |||
| @@ -249,6 +249,7 @@ static inline u64 ktime_get_raw_ns(void) | |||
| 249 | 249 | ||
| 250 | extern u64 ktime_get_mono_fast_ns(void); | 250 | extern u64 ktime_get_mono_fast_ns(void); |
| 251 | extern u64 ktime_get_raw_fast_ns(void); | 251 | extern u64 ktime_get_raw_fast_ns(void); |
| 252 | extern u64 ktime_get_boot_fast_ns(void); | ||
| 252 | 253 | ||
| 253 | /* | 254 | /* |
| 254 | * Timespec interfaces utilizing the ktime based ones | 255 | * Timespec interfaces utilizing the ktime based ones |
| @@ -292,7 +293,7 @@ extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, | |||
| 292 | * @cs_was_changed_seq: The sequence number of clocksource change events | 293 | * @cs_was_changed_seq: The sequence number of clocksource change events |
| 293 | */ | 294 | */ |
| 294 | struct system_time_snapshot { | 295 | struct system_time_snapshot { |
| 295 | cycle_t cycles; | 296 | u64 cycles; |
| 296 | ktime_t real; | 297 | ktime_t real; |
| 297 | ktime_t raw; | 298 | ktime_t raw; |
| 298 | unsigned int clock_was_set_seq; | 299 | unsigned int clock_was_set_seq; |
| @@ -320,7 +321,7 @@ struct system_device_crosststamp { | |||
| 320 | * timekeeping code to verify comparibility of two cycle values | 321 | * timekeeping code to verify comparibility of two cycle values |
| 321 | */ | 322 | */ |
| 322 | struct system_counterval_t { | 323 | struct system_counterval_t { |
| 323 | cycle_t cycles; | 324 | u64 cycles; |
| 324 | struct clocksource *cs; | 325 | struct clocksource *cs; |
| 325 | }; | 326 | }; |
| 326 | 327 | ||
diff --git a/include/linux/trace.h b/include/linux/trace.h new file mode 100644 index 000000000000..9330a58e2651 --- /dev/null +++ b/include/linux/trace.h | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | #ifndef _LINUX_TRACE_H | ||
| 2 | #define _LINUX_TRACE_H | ||
| 3 | |||
| 4 | #ifdef CONFIG_TRACING | ||
| 5 | /* | ||
| 6 | * The trace export - an export of Ftrace output. The trace_export | ||
| 7 | * can process traces and export them to a registered destination as | ||
| 8 | * an addition to the current only output of Ftrace - i.e. ring buffer. | ||
| 9 | * | ||
| 10 | * If you want traces to be sent to some other place rather than ring | ||
| 11 | * buffer only, just need to register a new trace_export and implement | ||
| 12 | * its own .write() function for writing traces to the storage. | ||
| 13 | * | ||
| 14 | * next - pointer to the next trace_export | ||
| 15 | * write - copy traces which have been delt with ->commit() to | ||
| 16 | * the destination | ||
| 17 | */ | ||
| 18 | struct trace_export { | ||
| 19 | struct trace_export __rcu *next; | ||
| 20 | void (*write)(const void *, unsigned int); | ||
| 21 | }; | ||
| 22 | |||
| 23 | int register_ftrace_export(struct trace_export *export); | ||
| 24 | int unregister_ftrace_export(struct trace_export *export); | ||
| 25 | |||
| 26 | #endif /* CONFIG_TRACING */ | ||
| 27 | |||
| 28 | #endif /* _LINUX_TRACE_H */ | ||
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h index 4ac89acb6136..a03192052066 100644 --- a/include/linux/tracepoint-defs.h +++ b/include/linux/tracepoint-defs.h | |||
| @@ -29,7 +29,7 @@ struct tracepoint_func { | |||
| 29 | struct tracepoint { | 29 | struct tracepoint { |
| 30 | const char *name; /* Tracepoint name */ | 30 | const char *name; /* Tracepoint name */ |
| 31 | struct static_key key; | 31 | struct static_key key; |
| 32 | void (*regfunc)(void); | 32 | int (*regfunc)(void); |
| 33 | void (*unregfunc)(void); | 33 | void (*unregfunc)(void); |
| 34 | struct tracepoint_func __rcu *funcs; | 34 | struct tracepoint_func __rcu *funcs; |
| 35 | }; | 35 | }; |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index be586c632a0c..f72fcfe0e66a 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
| @@ -81,7 +81,7 @@ static inline void tracepoint_synchronize_unregister(void) | |||
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS | 83 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS |
| 84 | extern void syscall_regfunc(void); | 84 | extern int syscall_regfunc(void); |
| 85 | extern void syscall_unregfunc(void); | 85 | extern void syscall_unregfunc(void); |
| 86 | #endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */ | 86 | #endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */ |
| 87 | 87 | ||
diff --git a/include/linux/types.h b/include/linux/types.h index baf718324f4a..1e7bd24848fc 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
| @@ -154,8 +154,8 @@ typedef u64 dma_addr_t; | |||
| 154 | typedef u32 dma_addr_t; | 154 | typedef u32 dma_addr_t; |
| 155 | #endif | 155 | #endif |
| 156 | 156 | ||
| 157 | typedef unsigned __bitwise__ gfp_t; | 157 | typedef unsigned __bitwise gfp_t; |
| 158 | typedef unsigned __bitwise__ fmode_t; | 158 | typedef unsigned __bitwise fmode_t; |
| 159 | 159 | ||
| 160 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | 160 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
| 161 | typedef u64 phys_addr_t; | 161 | typedef u64 phys_addr_t; |
| @@ -228,8 +228,5 @@ struct callback_head { | |||
| 228 | typedef void (*rcu_callback_t)(struct rcu_head *head); | 228 | typedef void (*rcu_callback_t)(struct rcu_head *head); |
| 229 | typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func); | 229 | typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func); |
| 230 | 230 | ||
| 231 | /* clocksource cycle base type */ | ||
| 232 | typedef u64 cycle_t; | ||
| 233 | |||
| 234 | #endif /* __ASSEMBLY__ */ | 231 | #endif /* __ASSEMBLY__ */ |
| 235 | #endif /* _LINUX_TYPES_H */ | 232 | #endif /* _LINUX_TYPES_H */ |
diff --git a/include/linux/udp.h b/include/linux/udp.h index d1fd8cd39478..c0f530809d1f 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h | |||
| @@ -79,6 +79,9 @@ struct udp_sock { | |||
| 79 | int (*gro_complete)(struct sock *sk, | 79 | int (*gro_complete)(struct sock *sk, |
| 80 | struct sk_buff *skb, | 80 | struct sk_buff *skb, |
| 81 | int nhoff); | 81 | int nhoff); |
| 82 | |||
| 83 | /* This field is dirtied by udp_recvmsg() */ | ||
| 84 | int forward_deficit; | ||
| 82 | }; | 85 | }; |
| 83 | 86 | ||
| 84 | static inline struct udp_sock *udp_sk(const struct sock *sk) | 87 | static inline struct udp_sock *udp_sk(const struct sock *sk) |
diff --git a/include/linux/uio.h b/include/linux/uio.h index 6e22b544d039..804e34c6f981 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
| @@ -89,7 +89,9 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | |||
| 89 | struct iov_iter *i); | 89 | struct iov_iter *i); |
| 90 | size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); | 90 | size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); |
| 91 | size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); | 91 | size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); |
| 92 | bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i); | ||
| 92 | size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); | 93 | size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); |
| 94 | bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i); | ||
| 93 | size_t iov_iter_zero(size_t bytes, struct iov_iter *); | 95 | size_t iov_iter_zero(size_t bytes, struct iov_iter *); |
| 94 | unsigned long iov_iter_alignment(const struct iov_iter *i); | 96 | unsigned long iov_iter_alignment(const struct iov_iter *i); |
| 95 | unsigned long iov_iter_gap_alignment(const struct iov_iter *i); | 97 | unsigned long iov_iter_gap_alignment(const struct iov_iter *i); |
| @@ -125,7 +127,7 @@ static inline bool iter_is_iovec(const struct iov_iter *i) | |||
| 125 | * | 127 | * |
| 126 | * The ?: is just for type safety. | 128 | * The ?: is just for type safety. |
| 127 | */ | 129 | */ |
| 128 | #define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & RW_MASK) | 130 | #define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE)) |
| 129 | 131 | ||
| 130 | /* | 132 | /* |
| 131 | * Cap the iov_iter by given limit; note that the second argument is | 133 | * Cap the iov_iter by given limit; note that the second argument is |
| @@ -155,6 +157,7 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) | |||
| 155 | } | 157 | } |
| 156 | size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); | 158 | size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); |
| 157 | size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); | 159 | size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); |
| 160 | bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); | ||
| 158 | 161 | ||
| 159 | int import_iovec(int type, const struct iovec __user * uvector, | 162 | int import_iovec(int type, const struct iovec __user * uvector, |
| 160 | unsigned nr_segs, unsigned fast_segs, | 163 | unsigned nr_segs, unsigned fast_segs, |
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 4a29c75b146e..0a294e950df8 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
| 28 | #include <linux/rbtree.h> | 28 | #include <linux/rbtree.h> |
| 29 | #include <linux/types.h> | 29 | #include <linux/types.h> |
| 30 | #include <linux/wait.h> | ||
| 30 | 31 | ||
| 31 | struct vm_area_struct; | 32 | struct vm_area_struct; |
| 32 | struct mm_struct; | 33 | struct mm_struct; |
diff --git a/include/linux/usb.h b/include/linux/usb.h index eba1f10e8cfd..7e68259360de 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
| @@ -1160,7 +1160,7 @@ extern struct bus_type usb_bus_type; | |||
| 1160 | * @minor_base: the start of the minor range for this driver. | 1160 | * @minor_base: the start of the minor range for this driver. |
| 1161 | * | 1161 | * |
| 1162 | * This structure is used for the usb_register_dev() and | 1162 | * This structure is used for the usb_register_dev() and |
| 1163 | * usb_unregister_dev() functions, to consolidate a number of the | 1163 | * usb_deregister_dev() functions, to consolidate a number of the |
| 1164 | * parameters used for them. | 1164 | * parameters used for them. |
| 1165 | */ | 1165 | */ |
| 1166 | struct usb_class_driver { | 1166 | struct usb_class_driver { |
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 8e81f9eb95e4..e4516e9ded0f 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h | |||
| @@ -429,7 +429,9 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev) | |||
| 429 | */ | 429 | */ |
| 430 | static inline size_t usb_ep_align(struct usb_ep *ep, size_t len) | 430 | static inline size_t usb_ep_align(struct usb_ep *ep, size_t len) |
| 431 | { | 431 | { |
| 432 | return round_up(len, (size_t)le16_to_cpu(ep->desc->wMaxPacketSize)); | 432 | int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc) & 0x7ff; |
| 433 | |||
| 434 | return round_up(len, max_packet_size); | ||
| 433 | } | 435 | } |
| 434 | 436 | ||
| 435 | /** | 437 | /** |
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 66fc13705ab7..40edf6a8533e 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h | |||
| @@ -566,21 +566,22 @@ extern void usb_ep0_reinit(struct usb_device *); | |||
| 566 | ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) | 566 | ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) |
| 567 | 567 | ||
| 568 | /* class requests from the USB 2.0 hub spec, table 11-15 */ | 568 | /* class requests from the USB 2.0 hub spec, table 11-15 */ |
| 569 | #define HUB_CLASS_REQ(dir, type, request) ((((dir) | (type)) << 8) | (request)) | ||
| 569 | /* GetBusState and SetHubDescriptor are optional, omitted */ | 570 | /* GetBusState and SetHubDescriptor are optional, omitted */ |
| 570 | #define ClearHubFeature (0x2000 | USB_REQ_CLEAR_FEATURE) | 571 | #define ClearHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_CLEAR_FEATURE) |
| 571 | #define ClearPortFeature (0x2300 | USB_REQ_CLEAR_FEATURE) | 572 | #define ClearPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_CLEAR_FEATURE) |
| 572 | #define GetHubDescriptor (0xa000 | USB_REQ_GET_DESCRIPTOR) | 573 | #define GetHubDescriptor HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_DESCRIPTOR) |
| 573 | #define GetHubStatus (0xa000 | USB_REQ_GET_STATUS) | 574 | #define GetHubStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_STATUS) |
| 574 | #define GetPortStatus (0xa300 | USB_REQ_GET_STATUS) | 575 | #define GetPortStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, USB_REQ_GET_STATUS) |
| 575 | #define SetHubFeature (0x2000 | USB_REQ_SET_FEATURE) | 576 | #define SetHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_SET_FEATURE) |
| 576 | #define SetPortFeature (0x2300 | USB_REQ_SET_FEATURE) | 577 | #define SetPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_SET_FEATURE) |
| 577 | 578 | ||
| 578 | 579 | ||
| 579 | /*-------------------------------------------------------------------------*/ | 580 | /*-------------------------------------------------------------------------*/ |
| 580 | 581 | ||
| 581 | /* class requests from USB 3.1 hub spec, table 10-7 */ | 582 | /* class requests from USB 3.1 hub spec, table 10-7 */ |
| 582 | #define SetHubDepth (0x2000 | HUB_SET_DEPTH) | 583 | #define SetHubDepth HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, HUB_SET_DEPTH) |
| 583 | #define GetPortErrorCount (0xa300 | HUB_GET_PORT_ERR_COUNT) | 584 | #define GetPortErrorCount HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_PORT_ERR_COUNT) |
| 584 | 585 | ||
| 585 | /* | 586 | /* |
| 586 | * Generic bandwidth allocation constants/support | 587 | * Generic bandwidth allocation constants/support |
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index dd66a952e8cd..11b92b047a1e 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h | |||
| @@ -27,7 +27,7 @@ | |||
| 27 | #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) | 27 | #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) |
| 28 | #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) | 28 | #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) |
| 29 | 29 | ||
| 30 | extern int handle_userfault(struct fault_env *fe, unsigned long reason); | 30 | extern int handle_userfault(struct vm_fault *vmf, unsigned long reason); |
| 31 | 31 | ||
| 32 | extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, | 32 | extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, |
| 33 | unsigned long src_start, unsigned long len); | 33 | unsigned long src_start, unsigned long len); |
| @@ -55,7 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) | |||
| 55 | #else /* CONFIG_USERFAULTFD */ | 55 | #else /* CONFIG_USERFAULTFD */ |
| 56 | 56 | ||
| 57 | /* mm helpers */ | 57 | /* mm helpers */ |
| 58 | static inline int handle_userfault(struct fault_env *fe, unsigned long reason) | 58 | static inline int handle_userfault(struct vm_fault *vmf, unsigned long reason) |
| 59 | { | 59 | { |
| 60 | return VM_FAULT_SIGBUS; | 60 | return VM_FAULT_SIGBUS; |
| 61 | } | 61 | } |
diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 0ecae0b1cd34..edf9b2cad277 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h | |||
| @@ -75,7 +75,16 @@ struct vfio_iommu_driver_ops { | |||
| 75 | struct iommu_group *group); | 75 | struct iommu_group *group); |
| 76 | void (*detach_group)(void *iommu_data, | 76 | void (*detach_group)(void *iommu_data, |
| 77 | struct iommu_group *group); | 77 | struct iommu_group *group); |
| 78 | 78 | int (*pin_pages)(void *iommu_data, unsigned long *user_pfn, | |
| 79 | int npage, int prot, | ||
| 80 | unsigned long *phys_pfn); | ||
| 81 | int (*unpin_pages)(void *iommu_data, | ||
| 82 | unsigned long *user_pfn, int npage); | ||
| 83 | int (*register_notifier)(void *iommu_data, | ||
| 84 | unsigned long *events, | ||
| 85 | struct notifier_block *nb); | ||
| 86 | int (*unregister_notifier)(void *iommu_data, | ||
| 87 | struct notifier_block *nb); | ||
| 79 | }; | 88 | }; |
| 80 | 89 | ||
| 81 | extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); | 90 | extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); |
| @@ -92,6 +101,36 @@ extern int vfio_external_user_iommu_id(struct vfio_group *group); | |||
| 92 | extern long vfio_external_check_extension(struct vfio_group *group, | 101 | extern long vfio_external_check_extension(struct vfio_group *group, |
| 93 | unsigned long arg); | 102 | unsigned long arg); |
| 94 | 103 | ||
| 104 | #define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long)) | ||
| 105 | |||
| 106 | extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, | ||
| 107 | int npage, int prot, unsigned long *phys_pfn); | ||
| 108 | extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, | ||
| 109 | int npage); | ||
| 110 | |||
| 111 | /* each type has independent events */ | ||
| 112 | enum vfio_notify_type { | ||
| 113 | VFIO_IOMMU_NOTIFY = 0, | ||
| 114 | VFIO_GROUP_NOTIFY = 1, | ||
| 115 | }; | ||
| 116 | |||
| 117 | /* events for VFIO_IOMMU_NOTIFY */ | ||
| 118 | #define VFIO_IOMMU_NOTIFY_DMA_UNMAP BIT(0) | ||
| 119 | |||
| 120 | /* events for VFIO_GROUP_NOTIFY */ | ||
| 121 | #define VFIO_GROUP_NOTIFY_SET_KVM BIT(0) | ||
| 122 | |||
| 123 | extern int vfio_register_notifier(struct device *dev, | ||
| 124 | enum vfio_notify_type type, | ||
| 125 | unsigned long *required_events, | ||
| 126 | struct notifier_block *nb); | ||
| 127 | extern int vfio_unregister_notifier(struct device *dev, | ||
| 128 | enum vfio_notify_type type, | ||
| 129 | struct notifier_block *nb); | ||
| 130 | |||
| 131 | struct kvm; | ||
| 132 | extern void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm); | ||
| 133 | |||
| 95 | /* | 134 | /* |
| 96 | * Sub-module helpers | 135 | * Sub-module helpers |
| 97 | */ | 136 | */ |
| @@ -103,6 +142,13 @@ extern struct vfio_info_cap_header *vfio_info_cap_add( | |||
| 103 | struct vfio_info_cap *caps, size_t size, u16 id, u16 version); | 142 | struct vfio_info_cap *caps, size_t size, u16 id, u16 version); |
| 104 | extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset); | 143 | extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset); |
| 105 | 144 | ||
| 145 | extern int vfio_info_add_capability(struct vfio_info_cap *caps, | ||
| 146 | int cap_type_id, void *cap_type); | ||
| 147 | |||
| 148 | extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, | ||
| 149 | int num_irqs, int max_irq_type, | ||
| 150 | size_t *data_size); | ||
| 151 | |||
| 106 | struct pci_dev; | 152 | struct pci_dev; |
| 107 | #ifdef CONFIG_EEH | 153 | #ifdef CONFIG_EEH |
| 108 | extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev); | 154 | extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev); |
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 1c912f85e041..66204007d7ac 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h | |||
| @@ -58,7 +58,7 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, | |||
| 58 | struct virtio_net_hdr *hdr, | 58 | struct virtio_net_hdr *hdr, |
| 59 | bool little_endian) | 59 | bool little_endian) |
| 60 | { | 60 | { |
| 61 | memset(hdr, 0, sizeof(*hdr)); | 61 | memset(hdr, 0, sizeof(*hdr)); /* no info leak */ |
| 62 | 62 | ||
| 63 | if (skb_is_gso(skb)) { | 63 | if (skb_is_gso(skb)) { |
| 64 | struct skb_shared_info *sinfo = skb_shinfo(skb); | 64 | struct skb_shared_info *sinfo = skb_shinfo(skb); |
| @@ -98,4 +98,4 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, | |||
| 98 | return 0; | 98 | return 0; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | #endif /* _LINUX_VIRTIO_BYTEORDER */ | 101 | #endif /* _LINUX_VIRTIO_NET_H */ |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 3d9d786a943c..d68edffbf142 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
| @@ -82,6 +82,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align, | |||
| 82 | const void *caller); | 82 | const void *caller); |
| 83 | 83 | ||
| 84 | extern void vfree(const void *addr); | 84 | extern void vfree(const void *addr); |
| 85 | extern void vfree_atomic(const void *addr); | ||
| 85 | 86 | ||
| 86 | extern void *vmap(struct page **pages, unsigned int count, | 87 | extern void *vmap(struct page **pages, unsigned int count, |
| 87 | unsigned long flags, pgprot_t prot); | 88 | unsigned long flags, pgprot_t prot); |
diff --git a/include/linux/vme.h b/include/linux/vme.h index ea6095deba20..8c589176c2f8 100644 --- a/include/linux/vme.h +++ b/include/linux/vme.h | |||
| @@ -113,7 +113,6 @@ struct vme_driver { | |||
| 113 | int (*match)(struct vme_dev *); | 113 | int (*match)(struct vme_dev *); |
| 114 | int (*probe)(struct vme_dev *); | 114 | int (*probe)(struct vme_dev *); |
| 115 | int (*remove)(struct vme_dev *); | 115 | int (*remove)(struct vme_dev *); |
| 116 | void (*shutdown)(void); | ||
| 117 | struct device_driver driver; | 116 | struct device_driver driver; |
| 118 | struct list_head devices; | 117 | struct list_head devices; |
| 119 | }; | 118 | }; |
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 6abd24f258bc..833fdd4794a0 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h | |||
| @@ -191,5 +191,7 @@ extern void vt_set_led_state(int console, int leds); | |||
| 191 | extern void vt_kbd_con_start(int console); | 191 | extern void vt_kbd_con_start(int console); |
| 192 | extern void vt_kbd_con_stop(int console); | 192 | extern void vt_kbd_con_stop(int console); |
| 193 | 193 | ||
| 194 | void vc_scrolldelta_helper(struct vc_data *c, int lines, | ||
| 195 | unsigned int rolled_over, void *_base, unsigned int size); | ||
| 194 | 196 | ||
| 195 | #endif /* _VT_KERN_H */ | 197 | #endif /* _VT_KERN_H */ |
diff --git a/include/linux/wait.h b/include/linux/wait.h index 2408e8d5c05c..1421132e9086 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -510,7 +510,7 @@ do { \ | |||
| 510 | hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \ | 510 | hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \ |
| 511 | HRTIMER_MODE_REL); \ | 511 | HRTIMER_MODE_REL); \ |
| 512 | hrtimer_init_sleeper(&__t, current); \ | 512 | hrtimer_init_sleeper(&__t, current); \ |
| 513 | if ((timeout).tv64 != KTIME_MAX) \ | 513 | if ((timeout) != KTIME_MAX) \ |
| 514 | hrtimer_start_range_ns(&__t.timer, timeout, \ | 514 | hrtimer_start_range_ns(&__t.timer, timeout, \ |
| 515 | current->timer_slack_ns, \ | 515 | current->timer_slack_ns, \ |
| 516 | HRTIMER_MODE_REL); \ | 516 | HRTIMER_MODE_REL); \ |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index fc6e22186405..a26cc437293c 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -119,18 +119,30 @@ struct delayed_work { | |||
| 119 | int cpu; | 119 | int cpu; |
| 120 | }; | 120 | }; |
| 121 | 121 | ||
| 122 | /* | 122 | /** |
| 123 | * A struct for workqueue attributes. This can be used to change | 123 | * struct workqueue_attrs - A struct for workqueue attributes. |
| 124 | * attributes of an unbound workqueue. | ||
| 125 | * | 124 | * |
| 126 | * Unlike other fields, ->no_numa isn't a property of a worker_pool. It | 125 | * This can be used to change attributes of an unbound workqueue. |
| 127 | * only modifies how apply_workqueue_attrs() select pools and thus doesn't | ||
| 128 | * participate in pool hash calculations or equality comparisons. | ||
| 129 | */ | 126 | */ |
| 130 | struct workqueue_attrs { | 127 | struct workqueue_attrs { |
| 131 | int nice; /* nice level */ | 128 | /** |
| 132 | cpumask_var_t cpumask; /* allowed CPUs */ | 129 | * @nice: nice level |
| 133 | bool no_numa; /* disable NUMA affinity */ | 130 | */ |
| 131 | int nice; | ||
| 132 | |||
| 133 | /** | ||
| 134 | * @cpumask: allowed CPUs | ||
| 135 | */ | ||
| 136 | cpumask_var_t cpumask; | ||
| 137 | |||
| 138 | /** | ||
| 139 | * @no_numa: disable NUMA affinity | ||
| 140 | * | ||
| 141 | * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It | ||
| 142 | * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus | ||
| 143 | * doesn't participate in pool hash calculations or equality comparisons. | ||
| 144 | */ | ||
| 145 | bool no_numa; | ||
| 134 | }; | 146 | }; |
| 135 | 147 | ||
| 136 | static inline struct delayed_work *to_delayed_work(struct work_struct *work) | 148 | static inline struct delayed_work *to_delayed_work(struct work_struct *work) |
| @@ -272,7 +284,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } | |||
| 272 | 284 | ||
| 273 | /* | 285 | /* |
| 274 | * Workqueue flags and constants. For details, please refer to | 286 | * Workqueue flags and constants. For details, please refer to |
| 275 | * Documentation/workqueue.txt. | 287 | * Documentation/core-api/workqueue.rst. |
| 276 | */ | 288 | */ |
| 277 | enum { | 289 | enum { |
| 278 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ | 290 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ |
| @@ -370,7 +382,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, | |||
| 370 | * @args...: args for @fmt | 382 | * @args...: args for @fmt |
| 371 | * | 383 | * |
| 372 | * Allocate a workqueue with the specified parameters. For detailed | 384 | * Allocate a workqueue with the specified parameters. For detailed |
| 373 | * information on WQ_* flags, please refer to Documentation/workqueue.txt. | 385 | * information on WQ_* flags, please refer to |
| 386 | * Documentation/core-api/workqueue.rst. | ||
| 374 | * | 387 | * |
| 375 | * The __lock_name macro dance is to guarantee that single lock_class_key | 388 | * The __lock_name macro dance is to guarantee that single lock_class_key |
| 376 | * doesn't end up with different namesm, which isn't allowed by lockdep. | 389 | * doesn't end up with different namesm, which isn't allowed by lockdep. |
| @@ -590,14 +603,6 @@ static inline bool schedule_delayed_work(struct delayed_work *dwork, | |||
| 590 | return queue_delayed_work(system_wq, dwork, delay); | 603 | return queue_delayed_work(system_wq, dwork, delay); |
| 591 | } | 604 | } |
| 592 | 605 | ||
| 593 | /** | ||
| 594 | * keventd_up - is workqueue initialized yet? | ||
| 595 | */ | ||
| 596 | static inline bool keventd_up(void) | ||
| 597 | { | ||
| 598 | return system_wq != NULL; | ||
| 599 | } | ||
| 600 | |||
| 601 | #ifndef CONFIG_SMP | 606 | #ifndef CONFIG_SMP |
| 602 | static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) | 607 | static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) |
| 603 | { | 608 | { |
| @@ -632,4 +637,7 @@ int workqueue_online_cpu(unsigned int cpu); | |||
| 632 | int workqueue_offline_cpu(unsigned int cpu); | 637 | int workqueue_offline_cpu(unsigned int cpu); |
| 633 | #endif | 638 | #endif |
| 634 | 639 | ||
| 640 | int __init workqueue_init_early(void); | ||
| 641 | int __init workqueue_init(void); | ||
| 642 | |||
| 635 | #endif | 643 | #endif |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 797100e10010..5527d910ba3d 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
| @@ -9,6 +9,9 @@ | |||
| 9 | #include <linux/fs.h> | 9 | #include <linux/fs.h> |
| 10 | #include <linux/flex_proportions.h> | 10 | #include <linux/flex_proportions.h> |
| 11 | #include <linux/backing-dev-defs.h> | 11 | #include <linux/backing-dev-defs.h> |
| 12 | #include <linux/blk_types.h> | ||
| 13 | |||
| 14 | struct bio; | ||
| 12 | 15 | ||
| 13 | DECLARE_PER_CPU(int, dirty_throttle_leaks); | 16 | DECLARE_PER_CPU(int, dirty_throttle_leaks); |
| 14 | 17 | ||
| @@ -100,6 +103,16 @@ struct writeback_control { | |||
| 100 | #endif | 103 | #endif |
| 101 | }; | 104 | }; |
| 102 | 105 | ||
| 106 | static inline int wbc_to_write_flags(struct writeback_control *wbc) | ||
| 107 | { | ||
| 108 | if (wbc->sync_mode == WB_SYNC_ALL) | ||
| 109 | return REQ_SYNC; | ||
| 110 | else if (wbc->for_kupdate || wbc->for_background) | ||
| 111 | return REQ_BACKGROUND; | ||
| 112 | |||
| 113 | return 0; | ||
| 114 | } | ||
| 115 | |||
| 103 | /* | 116 | /* |
| 104 | * A wb_domain represents a domain that wb's (bdi_writeback's) belong to | 117 | * A wb_domain represents a domain that wb's (bdi_writeback's) belong to |
| 105 | * and are measured against each other in. There always is one global | 118 | * and are measured against each other in. There always is one global |
| @@ -362,7 +375,6 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); | |||
| 362 | unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); | 375 | unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); |
| 363 | 376 | ||
| 364 | void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time); | 377 | void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time); |
| 365 | void page_writeback_init(void); | ||
| 366 | void balance_dirty_pages_ratelimited(struct address_space *mapping); | 378 | void balance_dirty_pages_ratelimited(struct address_space *mapping); |
| 367 | bool wb_over_bg_thresh(struct bdi_writeback *wb); | 379 | bool wb_over_bg_thresh(struct bdi_writeback *wb); |
| 368 | 380 | ||
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index 2bb5deb0012e..7b0066814fa0 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h | |||
| @@ -120,7 +120,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, | |||
| 120 | struct ww_class *ww_class) | 120 | struct ww_class *ww_class) |
| 121 | { | 121 | { |
| 122 | ctx->task = current; | 122 | ctx->task = current; |
| 123 | ctx->stamp = atomic_long_inc_return(&ww_class->stamp); | 123 | ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp); |
| 124 | ctx->acquired = 0; | 124 | ctx->acquired = 0; |
| 125 | #ifdef CONFIG_DEBUG_MUTEXES | 125 | #ifdef CONFIG_DEBUG_MUTEXES |
| 126 | ctx->ww_class = ww_class; | 126 | ctx->ww_class = ww_class; |
