diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-11-23 03:04:05 -0500 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-11-23 03:04:05 -0500 |
commit | 92907cbbef8625bb3998d1eb385fc88f23c97a3f (patch) | |
tree | 15626ff9287e37c3cb81c7286d6db5a7fd77c854 /include/linux | |
parent | 15fbfccfe92c62ae8d1ecc647c44157ed01ac02e (diff) | |
parent | 1ec218373b8ebda821aec00bb156a9c94fad9cd4 (diff) |
Merge tag 'v4.4-rc2' into drm-intel-next-queued
Linux 4.4-rc2
Backmerge to get at
commit 1b0e3a049efe471c399674fd954500ce97438d30
Author: Imre Deak <imre.deak@intel.com>
Date: Thu Nov 5 23:04:11 2015 +0200
drm/i915/skl: disable display side power well support for now
so that we can proplery re-eanble skl power wells in -next.
Conflicts are just adjacent lines changed, except for intel_fbdev.c
where we need to interleave the changs. Nothing nefarious.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'include/linux')
319 files changed, 8662 insertions, 3102 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7235c4851460..054833939995 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -49,7 +49,7 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) | |||
49 | return adev ? adev->handle : NULL; | 49 | return adev ? adev->handle : NULL; |
50 | } | 50 | } |
51 | 51 | ||
52 | #define ACPI_COMPANION(dev) to_acpi_node((dev)->fwnode) | 52 | #define ACPI_COMPANION(dev) to_acpi_device_node((dev)->fwnode) |
53 | #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ | 53 | #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ |
54 | acpi_fwnode_handle(adev) : NULL) | 54 | acpi_fwnode_handle(adev) : NULL) |
55 | #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) | 55 | #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) |
@@ -69,7 +69,7 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) | |||
69 | 69 | ||
70 | static inline bool has_acpi_companion(struct device *dev) | 70 | static inline bool has_acpi_companion(struct device *dev) |
71 | { | 71 | { |
72 | return is_acpi_node(dev->fwnode); | 72 | return is_acpi_device_node(dev->fwnode); |
73 | } | 73 | } |
74 | 74 | ||
75 | static inline void acpi_preset_companion(struct device *dev, | 75 | static inline void acpi_preset_companion(struct device *dev, |
@@ -131,6 +131,12 @@ static inline void acpi_initrd_override(void *data, size_t size) | |||
131 | (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ | 131 | (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ |
132 | ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) | 132 | ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) |
133 | 133 | ||
134 | struct acpi_subtable_proc { | ||
135 | int id; | ||
136 | acpi_tbl_entry_handler handler; | ||
137 | int count; | ||
138 | }; | ||
139 | |||
134 | char * __acpi_map_table (unsigned long phys_addr, unsigned long size); | 140 | char * __acpi_map_table (unsigned long phys_addr, unsigned long size); |
135 | void __acpi_unmap_table(char *map, unsigned long size); | 141 | void __acpi_unmap_table(char *map, unsigned long size); |
136 | int early_acpi_boot_init(void); | 142 | int early_acpi_boot_init(void); |
@@ -146,9 +152,16 @@ int __init acpi_parse_entries(char *id, unsigned long table_size, | |||
146 | struct acpi_table_header *table_header, | 152 | struct acpi_table_header *table_header, |
147 | int entry_id, unsigned int max_entries); | 153 | int entry_id, unsigned int max_entries); |
148 | int __init acpi_table_parse_entries(char *id, unsigned long table_size, | 154 | int __init acpi_table_parse_entries(char *id, unsigned long table_size, |
149 | int entry_id, | 155 | int entry_id, |
150 | acpi_tbl_entry_handler handler, | 156 | acpi_tbl_entry_handler handler, |
151 | unsigned int max_entries); | 157 | unsigned int max_entries); |
158 | int __init acpi_table_parse_entries(char *id, unsigned long table_size, | ||
159 | int entry_id, | ||
160 | acpi_tbl_entry_handler handler, | ||
161 | unsigned int max_entries); | ||
162 | int __init acpi_table_parse_entries_array(char *id, unsigned long table_size, | ||
163 | struct acpi_subtable_proc *proc, int proc_num, | ||
164 | unsigned int max_entries); | ||
152 | int acpi_table_parse_madt(enum acpi_madt_type id, | 165 | int acpi_table_parse_madt(enum acpi_madt_type id, |
153 | acpi_tbl_entry_handler handler, | 166 | acpi_tbl_entry_handler handler, |
154 | unsigned int max_entries); | 167 | unsigned int max_entries); |
@@ -193,6 +206,12 @@ int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base); | |||
193 | void acpi_irq_stats_init(void); | 206 | void acpi_irq_stats_init(void); |
194 | extern u32 acpi_irq_handled; | 207 | extern u32 acpi_irq_handled; |
195 | extern u32 acpi_irq_not_handled; | 208 | extern u32 acpi_irq_not_handled; |
209 | extern unsigned int acpi_sci_irq; | ||
210 | #define INVALID_ACPI_IRQ ((unsigned)-1) | ||
211 | static inline bool acpi_sci_irq_valid(void) | ||
212 | { | ||
213 | return acpi_sci_irq != INVALID_ACPI_IRQ; | ||
214 | } | ||
196 | 215 | ||
197 | extern int sbf_port; | 216 | extern int sbf_port; |
198 | extern unsigned long acpi_realmode_flags; | 217 | extern unsigned long acpi_realmode_flags; |
@@ -201,6 +220,9 @@ int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity | |||
201 | int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); | 220 | int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); |
202 | int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); | 221 | int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); |
203 | 222 | ||
223 | void acpi_set_irq_model(enum acpi_irq_model_id model, | ||
224 | struct fwnode_handle *fwnode); | ||
225 | |||
204 | #ifdef CONFIG_X86_IO_APIC | 226 | #ifdef CONFIG_X86_IO_APIC |
205 | extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); | 227 | extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); |
206 | #else | 228 | #else |
@@ -217,6 +239,7 @@ struct pci_dev; | |||
217 | 239 | ||
218 | int acpi_pci_irq_enable (struct pci_dev *dev); | 240 | int acpi_pci_irq_enable (struct pci_dev *dev); |
219 | void acpi_penalize_isa_irq(int irq, int active); | 241 | void acpi_penalize_isa_irq(int irq, int active); |
242 | bool acpi_isa_irq_available(int irq); | ||
220 | void acpi_penalize_sci_irq(int irq, int trigger, int polarity); | 243 | void acpi_penalize_sci_irq(int irq, int trigger, int polarity); |
221 | void acpi_pci_irq_disable (struct pci_dev *dev); | 244 | void acpi_pci_irq_disable (struct pci_dev *dev); |
222 | 245 | ||
@@ -461,7 +484,22 @@ static inline bool is_acpi_node(struct fwnode_handle *fwnode) | |||
461 | return false; | 484 | return false; |
462 | } | 485 | } |
463 | 486 | ||
464 | static inline struct acpi_device *to_acpi_node(struct fwnode_handle *fwnode) | 487 | static inline bool is_acpi_device_node(struct fwnode_handle *fwnode) |
488 | { | ||
489 | return false; | ||
490 | } | ||
491 | |||
492 | static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode) | ||
493 | { | ||
494 | return NULL; | ||
495 | } | ||
496 | |||
497 | static inline bool is_acpi_data_node(struct fwnode_handle *fwnode) | ||
498 | { | ||
499 | return false; | ||
500 | } | ||
501 | |||
502 | static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode) | ||
465 | { | 503 | { |
466 | return NULL; | 504 | return NULL; |
467 | } | 505 | } |
@@ -476,6 +514,11 @@ static inline bool has_acpi_companion(struct device *dev) | |||
476 | return false; | 514 | return false; |
477 | } | 515 | } |
478 | 516 | ||
517 | static inline void acpi_preset_companion(struct device *dev, | ||
518 | struct acpi_device *parent, u64 addr) | ||
519 | { | ||
520 | } | ||
521 | |||
479 | static inline const char *acpi_dev_name(struct acpi_device *adev) | 522 | static inline const char *acpi_dev_name(struct acpi_device *adev) |
480 | { | 523 | { |
481 | return NULL; | 524 | return NULL; |
@@ -558,11 +601,16 @@ static inline int acpi_device_modalias(struct device *dev, | |||
558 | return -ENODEV; | 601 | return -ENODEV; |
559 | } | 602 | } |
560 | 603 | ||
561 | static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent) | 604 | static inline bool acpi_dma_supported(struct acpi_device *adev) |
562 | { | 605 | { |
563 | return false; | 606 | return false; |
564 | } | 607 | } |
565 | 608 | ||
609 | static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) | ||
610 | { | ||
611 | return DEV_DMA_NOT_SUPPORTED; | ||
612 | } | ||
613 | |||
566 | #define ACPI_PTR(_ptr) (NULL) | 614 | #define ACPI_PTR(_ptr) (NULL) |
567 | 615 | ||
568 | #endif /* !CONFIG_ACPI */ | 616 | #endif /* !CONFIG_ACPI */ |
@@ -743,22 +791,76 @@ struct acpi_reference_args { | |||
743 | #ifdef CONFIG_ACPI | 791 | #ifdef CONFIG_ACPI |
744 | int acpi_dev_get_property(struct acpi_device *adev, const char *name, | 792 | int acpi_dev_get_property(struct acpi_device *adev, const char *name, |
745 | acpi_object_type type, const union acpi_object **obj); | 793 | acpi_object_type type, const union acpi_object **obj); |
746 | int acpi_dev_get_property_array(struct acpi_device *adev, const char *name, | 794 | int acpi_node_get_property_reference(struct fwnode_handle *fwnode, |
747 | acpi_object_type type, | 795 | const char *name, size_t index, |
748 | const union acpi_object **obj); | 796 | struct acpi_reference_args *args); |
749 | int acpi_dev_get_property_reference(struct acpi_device *adev, | 797 | |
750 | const char *name, size_t index, | 798 | int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname, |
751 | struct acpi_reference_args *args); | 799 | void **valptr); |
752 | |||
753 | int acpi_dev_prop_get(struct acpi_device *adev, const char *propname, | ||
754 | void **valptr); | ||
755 | int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, | 800 | int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, |
756 | enum dev_prop_type proptype, void *val); | 801 | enum dev_prop_type proptype, void *val); |
802 | int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname, | ||
803 | enum dev_prop_type proptype, void *val, size_t nval); | ||
757 | int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, | 804 | int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, |
758 | enum dev_prop_type proptype, void *val, size_t nval); | 805 | enum dev_prop_type proptype, void *val, size_t nval); |
759 | 806 | ||
760 | struct acpi_device *acpi_get_next_child(struct device *dev, | 807 | struct fwnode_handle *acpi_get_next_subnode(struct device *dev, |
761 | struct acpi_device *child); | 808 | struct fwnode_handle *subnode); |
809 | |||
810 | struct acpi_probe_entry; | ||
811 | typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, | ||
812 | struct acpi_probe_entry *); | ||
813 | |||
814 | #define ACPI_TABLE_ID_LEN 5 | ||
815 | |||
816 | /** | ||
817 | * struct acpi_probe_entry - boot-time probing entry | ||
818 | * @id: ACPI table name | ||
819 | * @type: Optional subtable type to match | ||
820 | * (if @id contains subtables) | ||
821 | * @subtable_valid: Optional callback to check the validity of | ||
822 | * the subtable | ||
823 | * @probe_table: Callback to the driver being probed when table | ||
824 | * match is successful | ||
825 | * @probe_subtbl: Callback to the driver being probed when table and | ||
826 | * subtable match (and optional callback is successful) | ||
827 | * @driver_data: Sideband data provided back to the driver | ||
828 | */ | ||
829 | struct acpi_probe_entry { | ||
830 | __u8 id[ACPI_TABLE_ID_LEN]; | ||
831 | __u8 type; | ||
832 | acpi_probe_entry_validate_subtbl subtable_valid; | ||
833 | union { | ||
834 | acpi_tbl_table_handler probe_table; | ||
835 | acpi_tbl_entry_handler probe_subtbl; | ||
836 | }; | ||
837 | kernel_ulong_t driver_data; | ||
838 | }; | ||
839 | |||
840 | #define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ | ||
841 | static const struct acpi_probe_entry __acpi_probe_##name \ | ||
842 | __used __section(__##table##_acpi_probe_table) \ | ||
843 | = { \ | ||
844 | .id = table_id, \ | ||
845 | .type = subtable, \ | ||
846 | .subtable_valid = valid, \ | ||
847 | .probe_table = (acpi_tbl_table_handler)fn, \ | ||
848 | .driver_data = data, \ | ||
849 | } | ||
850 | |||
851 | #define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table | ||
852 | #define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end | ||
853 | |||
854 | int __acpi_probe_device_table(struct acpi_probe_entry *start, int nr); | ||
855 | |||
856 | #define acpi_probe_device_table(t) \ | ||
857 | ({ \ | ||
858 | extern struct acpi_probe_entry ACPI_PROBE_TABLE(t), \ | ||
859 | ACPI_PROBE_TABLE_END(t); \ | ||
860 | __acpi_probe_device_table(&ACPI_PROBE_TABLE(t), \ | ||
861 | (&ACPI_PROBE_TABLE_END(t) - \ | ||
862 | &ACPI_PROBE_TABLE(t))); \ | ||
863 | }) | ||
762 | #else | 864 | #else |
763 | static inline int acpi_dev_get_property(struct acpi_device *adev, | 865 | static inline int acpi_dev_get_property(struct acpi_device *adev, |
764 | const char *name, acpi_object_type type, | 866 | const char *name, acpi_object_type type, |
@@ -766,16 +868,17 @@ static inline int acpi_dev_get_property(struct acpi_device *adev, | |||
766 | { | 868 | { |
767 | return -ENXIO; | 869 | return -ENXIO; |
768 | } | 870 | } |
769 | static inline int acpi_dev_get_property_array(struct acpi_device *adev, | 871 | |
770 | const char *name, | 872 | static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, |
771 | acpi_object_type type, | 873 | const char *name, const char *cells_name, |
772 | const union acpi_object **obj) | 874 | size_t index, struct acpi_reference_args *args) |
773 | { | 875 | { |
774 | return -ENXIO; | 876 | return -ENXIO; |
775 | } | 877 | } |
776 | static inline int acpi_dev_get_property_reference(struct acpi_device *adev, | 878 | |
777 | const char *name, const char *cells_name, | 879 | static inline int acpi_node_prop_get(struct fwnode_handle *fwnode, |
778 | size_t index, struct acpi_reference_args *args) | 880 | const char *propname, |
881 | void **valptr) | ||
779 | { | 882 | { |
780 | return -ENXIO; | 883 | return -ENXIO; |
781 | } | 884 | } |
@@ -795,6 +898,14 @@ static inline int acpi_dev_prop_read_single(struct acpi_device *adev, | |||
795 | return -ENXIO; | 898 | return -ENXIO; |
796 | } | 899 | } |
797 | 900 | ||
901 | static inline int acpi_node_prop_read(struct fwnode_handle *fwnode, | ||
902 | const char *propname, | ||
903 | enum dev_prop_type proptype, | ||
904 | void *val, size_t nval) | ||
905 | { | ||
906 | return -ENXIO; | ||
907 | } | ||
908 | |||
798 | static inline int acpi_dev_prop_read(struct acpi_device *adev, | 909 | static inline int acpi_dev_prop_read(struct acpi_device *adev, |
799 | const char *propname, | 910 | const char *propname, |
800 | enum dev_prop_type proptype, | 911 | enum dev_prop_type proptype, |
@@ -803,12 +914,22 @@ static inline int acpi_dev_prop_read(struct acpi_device *adev, | |||
803 | return -ENXIO; | 914 | return -ENXIO; |
804 | } | 915 | } |
805 | 916 | ||
806 | static inline struct acpi_device *acpi_get_next_child(struct device *dev, | 917 | static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev, |
807 | struct acpi_device *child) | 918 | struct fwnode_handle *subnode) |
808 | { | 919 | { |
809 | return NULL; | 920 | return NULL; |
810 | } | 921 | } |
811 | 922 | ||
923 | #define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \ | ||
924 | static const void * __acpi_table_##name[] \ | ||
925 | __attribute__((unused)) \ | ||
926 | = { (void *) table_id, \ | ||
927 | (void *) subtable, \ | ||
928 | (void *) valid, \ | ||
929 | (void *) fn, \ | ||
930 | (void *) data } | ||
931 | |||
932 | #define acpi_probe_device_table(t) ({ int __r = 0; __r;}) | ||
812 | #endif | 933 | #endif |
813 | 934 | ||
814 | #endif /*_LINUX_ACPI_H*/ | 935 | #endif /*_LINUX_ACPI_H*/ |
diff --git a/include/linux/acpi_irq.h b/include/linux/acpi_irq.h deleted file mode 100644 index f10c87265855..000000000000 --- a/include/linux/acpi_irq.h +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | #ifndef _LINUX_ACPI_IRQ_H | ||
2 | #define _LINUX_ACPI_IRQ_H | ||
3 | |||
4 | #include <linux/irq.h> | ||
5 | |||
6 | #ifndef acpi_irq_init | ||
7 | static inline void acpi_irq_init(void) { } | ||
8 | #endif | ||
9 | |||
10 | #endif /* _LINUX_ACPI_IRQ_H */ | ||
diff --git a/include/linux/aer.h b/include/linux/aer.h index 4fef65e57023..744b997d6a94 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h | |||
@@ -42,6 +42,7 @@ struct aer_capability_regs { | |||
42 | int pci_enable_pcie_error_reporting(struct pci_dev *dev); | 42 | int pci_enable_pcie_error_reporting(struct pci_dev *dev); |
43 | int pci_disable_pcie_error_reporting(struct pci_dev *dev); | 43 | int pci_disable_pcie_error_reporting(struct pci_dev *dev); |
44 | int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); | 44 | int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); |
45 | int pci_cleanup_aer_error_status_regs(struct pci_dev *dev); | ||
45 | #else | 46 | #else |
46 | static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) | 47 | static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) |
47 | { | 48 | { |
@@ -55,6 +56,10 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) | |||
55 | { | 56 | { |
56 | return -EINVAL; | 57 | return -EINVAL; |
57 | } | 58 | } |
59 | static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) | ||
60 | { | ||
61 | return -EINVAL; | ||
62 | } | ||
58 | #endif | 63 | #endif |
59 | 64 | ||
60 | void cper_print_aer(struct pci_dev *dev, int cper_severity, | 65 | void cper_print_aer(struct pci_dev *dev, int cper_severity, |
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 50fc66868402..9006c4e75cf7 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h | |||
@@ -41,8 +41,6 @@ struct amba_driver { | |||
41 | int (*probe)(struct amba_device *, const struct amba_id *); | 41 | int (*probe)(struct amba_device *, const struct amba_id *); |
42 | int (*remove)(struct amba_device *); | 42 | int (*remove)(struct amba_device *); |
43 | void (*shutdown)(struct amba_device *); | 43 | void (*shutdown)(struct amba_device *); |
44 | int (*suspend)(struct amba_device *, pm_message_t); | ||
45 | int (*resume)(struct amba_device *); | ||
46 | const struct amba_id *id_table; | 44 | const struct amba_id *id_table; |
47 | }; | 45 | }; |
48 | 46 | ||
diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h deleted file mode 100644 index df0356220730..000000000000 --- a/include/linux/arcdevice.h +++ /dev/null | |||
@@ -1,342 +0,0 @@ | |||
1 | /* | ||
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | ||
3 | * operating system. NET is implemented using the BSD Socket | ||
4 | * interface as the means of communication with the user level. | ||
5 | * | ||
6 | * Definitions used by the ARCnet driver. | ||
7 | * | ||
8 | * Authors: Avery Pennarun and David Woodhouse | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | */ | ||
16 | #ifndef _LINUX_ARCDEVICE_H | ||
17 | #define _LINUX_ARCDEVICE_H | ||
18 | |||
19 | #include <asm/timex.h> | ||
20 | #include <linux/if_arcnet.h> | ||
21 | |||
22 | #ifdef __KERNEL__ | ||
23 | #include <linux/irqreturn.h> | ||
24 | |||
25 | /* | ||
26 | * RECON_THRESHOLD is the maximum number of RECON messages to receive | ||
27 | * within one minute before printing a "cabling problem" warning. The | ||
28 | * default value should be fine. | ||
29 | * | ||
30 | * After that, a "cabling restored" message will be printed on the next IRQ | ||
31 | * if no RECON messages have been received for 10 seconds. | ||
32 | * | ||
33 | * Do not define RECON_THRESHOLD at all if you want to disable this feature. | ||
34 | */ | ||
35 | #define RECON_THRESHOLD 30 | ||
36 | |||
37 | |||
38 | /* | ||
39 | * Define this to the minimum "timeout" value. If a transmit takes longer | ||
40 | * than TX_TIMEOUT jiffies, Linux will abort the TX and retry. On a large | ||
41 | * network, or one with heavy network traffic, this timeout may need to be | ||
42 | * increased. The larger it is, though, the longer it will be between | ||
43 | * necessary transmits - don't set this too high. | ||
44 | */ | ||
45 | #define TX_TIMEOUT (HZ * 200 / 1000) | ||
46 | |||
47 | |||
48 | /* Display warnings about the driver being an ALPHA version. */ | ||
49 | #undef ALPHA_WARNING | ||
50 | |||
51 | |||
52 | /* | ||
53 | * Debugging bitflags: each option can be enabled individually. | ||
54 | * | ||
55 | * Note: only debug flags included in the ARCNET_DEBUG_MAX define will | ||
56 | * actually be available. GCC will (at least, GCC 2.7.0 will) notice | ||
57 | * lines using a BUGLVL not in ARCNET_DEBUG_MAX and automatically optimize | ||
58 | * them out. | ||
59 | */ | ||
60 | #define D_NORMAL 1 /* important operational info */ | ||
61 | #define D_EXTRA 2 /* useful, but non-vital information */ | ||
62 | #define D_INIT 4 /* show init/probe messages */ | ||
63 | #define D_INIT_REASONS 8 /* show reasons for discarding probes */ | ||
64 | #define D_RECON 32 /* print a message whenever token is lost */ | ||
65 | #define D_PROTO 64 /* debug auto-protocol support */ | ||
66 | /* debug levels below give LOTS of output during normal operation! */ | ||
67 | #define D_DURING 128 /* trace operations (including irq's) */ | ||
68 | #define D_TX 256 /* show tx packets */ | ||
69 | #define D_RX 512 /* show rx packets */ | ||
70 | #define D_SKB 1024 /* show skb's */ | ||
71 | #define D_SKB_SIZE 2048 /* show skb sizes */ | ||
72 | #define D_TIMING 4096 /* show time needed to copy buffers to card */ | ||
73 | #define D_DEBUG 8192 /* Very detailed debug line for line */ | ||
74 | |||
75 | #ifndef ARCNET_DEBUG_MAX | ||
76 | #define ARCNET_DEBUG_MAX (127) /* change to ~0 if you want detailed debugging */ | ||
77 | #endif | ||
78 | |||
79 | #ifndef ARCNET_DEBUG | ||
80 | #define ARCNET_DEBUG (D_NORMAL|D_EXTRA) | ||
81 | #endif | ||
82 | extern int arcnet_debug; | ||
83 | |||
84 | /* macros to simplify debug checking */ | ||
85 | #define BUGLVL(x) if ((ARCNET_DEBUG_MAX)&arcnet_debug&(x)) | ||
86 | #define BUGMSG2(x,msg,args...) do { BUGLVL(x) printk(msg, ## args); } while (0) | ||
87 | #define BUGMSG(x,msg,args...) \ | ||
88 | BUGMSG2(x, "%s%6s: " msg, \ | ||
89 | x==D_NORMAL ? KERN_WARNING \ | ||
90 | : x < D_DURING ? KERN_INFO : KERN_DEBUG, \ | ||
91 | dev->name , ## args) | ||
92 | |||
93 | /* see how long a function call takes to run, expressed in CPU cycles */ | ||
94 | #define TIME(name, bytes, call) BUGLVL(D_TIMING) { \ | ||
95 | unsigned long _x, _y; \ | ||
96 | _x = get_cycles(); \ | ||
97 | call; \ | ||
98 | _y = get_cycles(); \ | ||
99 | BUGMSG(D_TIMING, \ | ||
100 | "%s: %d bytes in %lu cycles == " \ | ||
101 | "%lu Kbytes/100Mcycle\n",\ | ||
102 | name, bytes, _y - _x, \ | ||
103 | 100000000 / 1024 * bytes / (_y - _x + 1));\ | ||
104 | } \ | ||
105 | else { \ | ||
106 | call;\ | ||
107 | } | ||
108 | |||
109 | |||
110 | /* | ||
111 | * Time needed to reset the card - in ms (milliseconds). This works on my | ||
112 | * SMC PC100. I can't find a reference that tells me just how long I | ||
113 | * should wait. | ||
114 | */ | ||
115 | #define RESETtime (300) | ||
116 | |||
117 | /* | ||
118 | * These are the max/min lengths of packet payload, not including the | ||
119 | * arc_hardware header, but definitely including the soft header. | ||
120 | * | ||
121 | * Note: packet sizes 254, 255, 256 are impossible because of the way | ||
122 | * ARCnet registers work That's why RFC1201 defines "exception" packets. | ||
123 | * In non-RFC1201 protocols, we have to just tack some extra bytes on the | ||
124 | * end. | ||
125 | */ | ||
126 | #define MTU 253 /* normal packet max size */ | ||
127 | #define MinTU 257 /* extended packet min size */ | ||
128 | #define XMTU 508 /* extended packet max size */ | ||
129 | |||
130 | /* status/interrupt mask bit fields */ | ||
131 | #define TXFREEflag 0x01 /* transmitter available */ | ||
132 | #define TXACKflag 0x02 /* transmitted msg. ackd */ | ||
133 | #define RECONflag 0x04 /* network reconfigured */ | ||
134 | #define TESTflag 0x08 /* test flag */ | ||
135 | #define EXCNAKflag 0x08 /* excesive nak flag */ | ||
136 | #define RESETflag 0x10 /* power-on-reset */ | ||
137 | #define RES1flag 0x20 /* reserved - usually set by jumper */ | ||
138 | #define RES2flag 0x40 /* reserved - usually set by jumper */ | ||
139 | #define NORXflag 0x80 /* receiver inhibited */ | ||
140 | |||
141 | /* Flags used for IO-mapped memory operations */ | ||
142 | #define AUTOINCflag 0x40 /* Increase location with each access */ | ||
143 | #define IOMAPflag 0x02 /* (for 90xx) Use IO mapped memory, not mmap */ | ||
144 | #define ENABLE16flag 0x80 /* (for 90xx) Enable 16-bit mode */ | ||
145 | |||
146 | /* in the command register, the following bits have these meanings: | ||
147 | * 0-2 command | ||
148 | * 3-4 page number (for enable rcv/xmt command) | ||
149 | * 7 receive broadcasts | ||
150 | */ | ||
151 | #define NOTXcmd 0x01 /* disable transmitter */ | ||
152 | #define NORXcmd 0x02 /* disable receiver */ | ||
153 | #define TXcmd 0x03 /* enable transmitter */ | ||
154 | #define RXcmd 0x04 /* enable receiver */ | ||
155 | #define CONFIGcmd 0x05 /* define configuration */ | ||
156 | #define CFLAGScmd 0x06 /* clear flags */ | ||
157 | #define TESTcmd 0x07 /* load test flags */ | ||
158 | |||
159 | /* flags for "clear flags" command */ | ||
160 | #define RESETclear 0x08 /* power-on-reset */ | ||
161 | #define CONFIGclear 0x10 /* system reconfigured */ | ||
162 | |||
163 | #define EXCNAKclear 0x0E /* Clear and acknowledge the excive nak bit */ | ||
164 | |||
165 | /* flags for "load test flags" command */ | ||
166 | #define TESTload 0x08 /* test flag (diagnostic) */ | ||
167 | |||
168 | /* byte deposited into first address of buffers on reset */ | ||
169 | #define TESTvalue 0321 /* that's octal for 0xD1 :) */ | ||
170 | |||
171 | /* for "enable receiver" command */ | ||
172 | #define RXbcasts 0x80 /* receive broadcasts */ | ||
173 | |||
174 | /* flags for "define configuration" command */ | ||
175 | #define NORMALconf 0x00 /* 1-249 byte packets */ | ||
176 | #define EXTconf 0x08 /* 250-504 byte packets */ | ||
177 | |||
178 | /* card feature flags, set during auto-detection. | ||
179 | * (currently only used by com20020pci) | ||
180 | */ | ||
181 | #define ARC_IS_5MBIT 1 /* card default speed is 5MBit */ | ||
182 | #define ARC_CAN_10MBIT 2 /* card uses COM20022, supporting 10MBit, | ||
183 | but default is 2.5MBit. */ | ||
184 | |||
185 | |||
186 | /* information needed to define an encapsulation driver */ | ||
187 | struct ArcProto { | ||
188 | char suffix; /* a for RFC1201, e for ether-encap, etc. */ | ||
189 | int mtu; /* largest possible packet */ | ||
190 | int is_ip; /* This is a ip plugin - not a raw thing */ | ||
191 | |||
192 | void (*rx) (struct net_device * dev, int bufnum, | ||
193 | struct archdr * pkthdr, int length); | ||
194 | int (*build_header) (struct sk_buff * skb, struct net_device *dev, | ||
195 | unsigned short ethproto, uint8_t daddr); | ||
196 | |||
197 | /* these functions return '1' if the skb can now be freed */ | ||
198 | int (*prepare_tx) (struct net_device * dev, struct archdr * pkt, int length, | ||
199 | int bufnum); | ||
200 | int (*continue_tx) (struct net_device * dev, int bufnum); | ||
201 | int (*ack_tx) (struct net_device * dev, int acked); | ||
202 | }; | ||
203 | |||
204 | extern struct ArcProto *arc_proto_map[256], *arc_proto_default, | ||
205 | *arc_bcast_proto, *arc_raw_proto; | ||
206 | |||
207 | |||
208 | /* | ||
209 | * "Incoming" is information needed for each address that could be sending | ||
210 | * to us. Mostly for partially-received split packets. | ||
211 | */ | ||
212 | struct Incoming { | ||
213 | struct sk_buff *skb; /* packet data buffer */ | ||
214 | __be16 sequence; /* sequence number of assembly */ | ||
215 | uint8_t lastpacket, /* number of last packet (from 1) */ | ||
216 | numpackets; /* number of packets in split */ | ||
217 | }; | ||
218 | |||
219 | |||
220 | /* only needed for RFC1201 */ | ||
221 | struct Outgoing { | ||
222 | struct ArcProto *proto; /* protocol driver that owns this: | ||
223 | * if NULL, no packet is pending. | ||
224 | */ | ||
225 | struct sk_buff *skb; /* buffer from upper levels */ | ||
226 | struct archdr *pkt; /* a pointer into the skb */ | ||
227 | uint16_t length, /* bytes total */ | ||
228 | dataleft, /* bytes left */ | ||
229 | segnum, /* segment being sent */ | ||
230 | numsegs; /* number of segments */ | ||
231 | }; | ||
232 | |||
233 | |||
234 | struct arcnet_local { | ||
235 | uint8_t config, /* current value of CONFIG register */ | ||
236 | timeout, /* Extended timeout for COM20020 */ | ||
237 | backplane, /* Backplane flag for COM20020 */ | ||
238 | clockp, /* COM20020 clock divider */ | ||
239 | clockm, /* COM20020 clock multiplier flag */ | ||
240 | setup, /* Contents of setup1 register */ | ||
241 | setup2, /* Contents of setup2 register */ | ||
242 | intmask; /* current value of INTMASK register */ | ||
243 | uint8_t default_proto[256]; /* default encap to use for each host */ | ||
244 | int cur_tx, /* buffer used by current transmit, or -1 */ | ||
245 | next_tx, /* buffer where a packet is ready to send */ | ||
246 | cur_rx; /* current receive buffer */ | ||
247 | int lastload_dest, /* can last loaded packet be acked? */ | ||
248 | lasttrans_dest; /* can last TX'd packet be acked? */ | ||
249 | int timed_out; /* need to process TX timeout and drop packet */ | ||
250 | unsigned long last_timeout; /* time of last reported timeout */ | ||
251 | char *card_name; /* card ident string */ | ||
252 | int card_flags; /* special card features */ | ||
253 | |||
254 | |||
255 | /* On preemtive and SMB a lock is needed */ | ||
256 | spinlock_t lock; | ||
257 | |||
258 | /* | ||
259 | * Buffer management: an ARCnet card has 4 x 512-byte buffers, each of | ||
260 | * which can be used for either sending or receiving. The new dynamic | ||
261 | * buffer management routines use a simple circular queue of available | ||
262 | * buffers, and take them as they're needed. This way, we simplify | ||
263 | * situations in which we (for example) want to pre-load a transmit | ||
264 | * buffer, or start receiving while we copy a received packet to | ||
265 | * memory. | ||
266 | * | ||
267 | * The rules: only the interrupt handler is allowed to _add_ buffers to | ||
268 | * the queue; thus, this doesn't require a lock. Both the interrupt | ||
269 | * handler and the transmit function will want to _remove_ buffers, so | ||
270 | * we need to handle the situation where they try to do it at the same | ||
271 | * time. | ||
272 | * | ||
273 | * If next_buf == first_free_buf, the queue is empty. Since there are | ||
274 | * only four possible buffers, the queue should never be full. | ||
275 | */ | ||
276 | atomic_t buf_lock; | ||
277 | int buf_queue[5]; | ||
278 | int next_buf, first_free_buf; | ||
279 | |||
280 | /* network "reconfiguration" handling */ | ||
281 | unsigned long first_recon; /* time of "first" RECON message to count */ | ||
282 | unsigned long last_recon; /* time of most recent RECON */ | ||
283 | int num_recons; /* number of RECONs between first and last. */ | ||
284 | int network_down; /* do we think the network is down? */ | ||
285 | |||
286 | int excnak_pending; /* We just got an excesive nak interrupt */ | ||
287 | |||
288 | struct { | ||
289 | uint16_t sequence; /* sequence number (incs with each packet) */ | ||
290 | __be16 aborted_seq; | ||
291 | |||
292 | struct Incoming incoming[256]; /* one from each address */ | ||
293 | } rfc1201; | ||
294 | |||
295 | /* really only used by rfc1201, but we'll pretend it's not */ | ||
296 | struct Outgoing outgoing; /* packet currently being sent */ | ||
297 | |||
298 | /* hardware-specific functions */ | ||
299 | struct { | ||
300 | struct module *owner; | ||
301 | void (*command) (struct net_device * dev, int cmd); | ||
302 | int (*status) (struct net_device * dev); | ||
303 | void (*intmask) (struct net_device * dev, int mask); | ||
304 | int (*reset) (struct net_device * dev, int really_reset); | ||
305 | void (*open) (struct net_device * dev); | ||
306 | void (*close) (struct net_device * dev); | ||
307 | |||
308 | void (*copy_to_card) (struct net_device * dev, int bufnum, int offset, | ||
309 | void *buf, int count); | ||
310 | void (*copy_from_card) (struct net_device * dev, int bufnum, int offset, | ||
311 | void *buf, int count); | ||
312 | } hw; | ||
313 | |||
314 | void __iomem *mem_start; /* pointer to ioremap'ed MMIO */ | ||
315 | }; | ||
316 | |||
317 | |||
318 | #define ARCRESET(x) (lp->hw.reset(dev, (x))) | ||
319 | #define ACOMMAND(x) (lp->hw.command(dev, (x))) | ||
320 | #define ASTATUS() (lp->hw.status(dev)) | ||
321 | #define AINTMASK(x) (lp->hw.intmask(dev, (x))) | ||
322 | |||
323 | |||
324 | |||
325 | #if ARCNET_DEBUG_MAX & D_SKB | ||
326 | void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc); | ||
327 | #else | ||
328 | #define arcnet_dump_skb(dev,skb,desc) ; | ||
329 | #endif | ||
330 | |||
331 | void arcnet_unregister_proto(struct ArcProto *proto); | ||
332 | irqreturn_t arcnet_interrupt(int irq, void *dev_id); | ||
333 | struct net_device *alloc_arcdev(const char *name); | ||
334 | |||
335 | int arcnet_open(struct net_device *dev); | ||
336 | int arcnet_close(struct net_device *dev); | ||
337 | netdev_tx_t arcnet_send_packet(struct sk_buff *skb, | ||
338 | struct net_device *dev); | ||
339 | void arcnet_timeout(struct net_device *dev); | ||
340 | |||
341 | #endif /* __KERNEL__ */ | ||
342 | #endif /* _LINUX_ARCDEVICE_H */ | ||
diff --git a/include/linux/atmel_tc.h b/include/linux/atmel_tc.h index b87c1c7c242a..468fdfa643f0 100644 --- a/include/linux/atmel_tc.h +++ b/include/linux/atmel_tc.h | |||
@@ -67,6 +67,7 @@ struct atmel_tc { | |||
67 | const struct atmel_tcb_config *tcb_config; | 67 | const struct atmel_tcb_config *tcb_config; |
68 | int irq[3]; | 68 | int irq[3]; |
69 | struct clk *clk[3]; | 69 | struct clk *clk[3]; |
70 | struct clk *slow_clk; | ||
70 | struct list_head node; | 71 | struct list_head node; |
71 | bool allocated; | 72 | bool allocated; |
72 | }; | 73 | }; |
diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 00a5763e850e..301de78d65f7 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h | |||
@@ -81,6 +81,30 @@ | |||
81 | #endif | 81 | #endif |
82 | #endif /* atomic_add_return_relaxed */ | 82 | #endif /* atomic_add_return_relaxed */ |
83 | 83 | ||
84 | /* atomic_inc_return_relaxed */ | ||
85 | #ifndef atomic_inc_return_relaxed | ||
86 | #define atomic_inc_return_relaxed atomic_inc_return | ||
87 | #define atomic_inc_return_acquire atomic_inc_return | ||
88 | #define atomic_inc_return_release atomic_inc_return | ||
89 | |||
90 | #else /* atomic_inc_return_relaxed */ | ||
91 | |||
92 | #ifndef atomic_inc_return_acquire | ||
93 | #define atomic_inc_return_acquire(...) \ | ||
94 | __atomic_op_acquire(atomic_inc_return, __VA_ARGS__) | ||
95 | #endif | ||
96 | |||
97 | #ifndef atomic_inc_return_release | ||
98 | #define atomic_inc_return_release(...) \ | ||
99 | __atomic_op_release(atomic_inc_return, __VA_ARGS__) | ||
100 | #endif | ||
101 | |||
102 | #ifndef atomic_inc_return | ||
103 | #define atomic_inc_return(...) \ | ||
104 | __atomic_op_fence(atomic_inc_return, __VA_ARGS__) | ||
105 | #endif | ||
106 | #endif /* atomic_inc_return_relaxed */ | ||
107 | |||
84 | /* atomic_sub_return_relaxed */ | 108 | /* atomic_sub_return_relaxed */ |
85 | #ifndef atomic_sub_return_relaxed | 109 | #ifndef atomic_sub_return_relaxed |
86 | #define atomic_sub_return_relaxed atomic_sub_return | 110 | #define atomic_sub_return_relaxed atomic_sub_return |
@@ -105,6 +129,30 @@ | |||
105 | #endif | 129 | #endif |
106 | #endif /* atomic_sub_return_relaxed */ | 130 | #endif /* atomic_sub_return_relaxed */ |
107 | 131 | ||
132 | /* atomic_dec_return_relaxed */ | ||
133 | #ifndef atomic_dec_return_relaxed | ||
134 | #define atomic_dec_return_relaxed atomic_dec_return | ||
135 | #define atomic_dec_return_acquire atomic_dec_return | ||
136 | #define atomic_dec_return_release atomic_dec_return | ||
137 | |||
138 | #else /* atomic_dec_return_relaxed */ | ||
139 | |||
140 | #ifndef atomic_dec_return_acquire | ||
141 | #define atomic_dec_return_acquire(...) \ | ||
142 | __atomic_op_acquire(atomic_dec_return, __VA_ARGS__) | ||
143 | #endif | ||
144 | |||
145 | #ifndef atomic_dec_return_release | ||
146 | #define atomic_dec_return_release(...) \ | ||
147 | __atomic_op_release(atomic_dec_return, __VA_ARGS__) | ||
148 | #endif | ||
149 | |||
150 | #ifndef atomic_dec_return | ||
151 | #define atomic_dec_return(...) \ | ||
152 | __atomic_op_fence(atomic_dec_return, __VA_ARGS__) | ||
153 | #endif | ||
154 | #endif /* atomic_dec_return_relaxed */ | ||
155 | |||
108 | /* atomic_xchg_relaxed */ | 156 | /* atomic_xchg_relaxed */ |
109 | #ifndef atomic_xchg_relaxed | 157 | #ifndef atomic_xchg_relaxed |
110 | #define atomic_xchg_relaxed atomic_xchg | 158 | #define atomic_xchg_relaxed atomic_xchg |
@@ -185,6 +233,31 @@ | |||
185 | #endif | 233 | #endif |
186 | #endif /* atomic64_add_return_relaxed */ | 234 | #endif /* atomic64_add_return_relaxed */ |
187 | 235 | ||
236 | /* atomic64_inc_return_relaxed */ | ||
237 | #ifndef atomic64_inc_return_relaxed | ||
238 | #define atomic64_inc_return_relaxed atomic64_inc_return | ||
239 | #define atomic64_inc_return_acquire atomic64_inc_return | ||
240 | #define atomic64_inc_return_release atomic64_inc_return | ||
241 | |||
242 | #else /* atomic64_inc_return_relaxed */ | ||
243 | |||
244 | #ifndef atomic64_inc_return_acquire | ||
245 | #define atomic64_inc_return_acquire(...) \ | ||
246 | __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) | ||
247 | #endif | ||
248 | |||
249 | #ifndef atomic64_inc_return_release | ||
250 | #define atomic64_inc_return_release(...) \ | ||
251 | __atomic_op_release(atomic64_inc_return, __VA_ARGS__) | ||
252 | #endif | ||
253 | |||
254 | #ifndef atomic64_inc_return | ||
255 | #define atomic64_inc_return(...) \ | ||
256 | __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) | ||
257 | #endif | ||
258 | #endif /* atomic64_inc_return_relaxed */ | ||
259 | |||
260 | |||
188 | /* atomic64_sub_return_relaxed */ | 261 | /* atomic64_sub_return_relaxed */ |
189 | #ifndef atomic64_sub_return_relaxed | 262 | #ifndef atomic64_sub_return_relaxed |
190 | #define atomic64_sub_return_relaxed atomic64_sub_return | 263 | #define atomic64_sub_return_relaxed atomic64_sub_return |
@@ -209,6 +282,30 @@ | |||
209 | #endif | 282 | #endif |
210 | #endif /* atomic64_sub_return_relaxed */ | 283 | #endif /* atomic64_sub_return_relaxed */ |
211 | 284 | ||
285 | /* atomic64_dec_return_relaxed */ | ||
286 | #ifndef atomic64_dec_return_relaxed | ||
287 | #define atomic64_dec_return_relaxed atomic64_dec_return | ||
288 | #define atomic64_dec_return_acquire atomic64_dec_return | ||
289 | #define atomic64_dec_return_release atomic64_dec_return | ||
290 | |||
291 | #else /* atomic64_dec_return_relaxed */ | ||
292 | |||
293 | #ifndef atomic64_dec_return_acquire | ||
294 | #define atomic64_dec_return_acquire(...) \ | ||
295 | __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) | ||
296 | #endif | ||
297 | |||
298 | #ifndef atomic64_dec_return_release | ||
299 | #define atomic64_dec_return_release(...) \ | ||
300 | __atomic_op_release(atomic64_dec_return, __VA_ARGS__) | ||
301 | #endif | ||
302 | |||
303 | #ifndef atomic64_dec_return | ||
304 | #define atomic64_dec_return(...) \ | ||
305 | __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) | ||
306 | #endif | ||
307 | #endif /* atomic64_dec_return_relaxed */ | ||
308 | |||
212 | /* atomic64_xchg_relaxed */ | 309 | /* atomic64_xchg_relaxed */ |
213 | #ifndef atomic64_xchg_relaxed | 310 | #ifndef atomic64_xchg_relaxed |
214 | #define atomic64_xchg_relaxed atomic64_xchg | 311 | #define atomic64_xchg_relaxed atomic64_xchg |
@@ -451,7 +548,6 @@ static inline int atomic_dec_if_positive(atomic_t *v) | |||
451 | } | 548 | } |
452 | #endif | 549 | #endif |
453 | 550 | ||
454 | #include <asm-generic/atomic-long.h> | ||
455 | #ifdef CONFIG_GENERIC_ATOMIC64 | 551 | #ifdef CONFIG_GENERIC_ATOMIC64 |
456 | #include <asm-generic/atomic64.h> | 552 | #include <asm-generic/atomic64.h> |
457 | #endif | 553 | #endif |
@@ -463,4 +559,6 @@ static inline void atomic64_andnot(long long i, atomic64_t *v) | |||
463 | } | 559 | } |
464 | #endif | 560 | #endif |
465 | 561 | ||
562 | #include <asm-generic/atomic-long.h> | ||
563 | |||
466 | #endif /* _LINUX_ATOMIC_H */ | 564 | #endif /* _LINUX_ATOMIC_H */ |
diff --git a/include/linux/audit.h b/include/linux/audit.h index b2abc996c25d..20eba1eb0a3c 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
@@ -143,7 +143,7 @@ extern void __audit_inode_child(const struct inode *parent, | |||
143 | extern void __audit_seccomp(unsigned long syscall, long signr, int code); | 143 | extern void __audit_seccomp(unsigned long syscall, long signr, int code); |
144 | extern void __audit_ptrace(struct task_struct *t); | 144 | extern void __audit_ptrace(struct task_struct *t); |
145 | 145 | ||
146 | static inline int audit_dummy_context(void) | 146 | static inline bool audit_dummy_context(void) |
147 | { | 147 | { |
148 | void *p = current->audit_context; | 148 | void *p = current->audit_context; |
149 | return !p || *(int *)p; | 149 | return !p || *(int *)p; |
@@ -345,9 +345,9 @@ static inline void audit_syscall_entry(int major, unsigned long a0, | |||
345 | { } | 345 | { } |
346 | static inline void audit_syscall_exit(void *pt_regs) | 346 | static inline void audit_syscall_exit(void *pt_regs) |
347 | { } | 347 | { } |
348 | static inline int audit_dummy_context(void) | 348 | static inline bool audit_dummy_context(void) |
349 | { | 349 | { |
350 | return 1; | 350 | return true; |
351 | } | 351 | } |
352 | static inline struct filename *audit_reusename(const __user char *name) | 352 | static inline struct filename *audit_reusename(const __user char *name) |
353 | { | 353 | { |
@@ -457,7 +457,7 @@ extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp | |||
457 | extern __printf(2, 3) | 457 | extern __printf(2, 3) |
458 | void audit_log_format(struct audit_buffer *ab, const char *fmt, ...); | 458 | void audit_log_format(struct audit_buffer *ab, const char *fmt, ...); |
459 | extern void audit_log_end(struct audit_buffer *ab); | 459 | extern void audit_log_end(struct audit_buffer *ab); |
460 | extern int audit_string_contains_control(const char *string, | 460 | extern bool audit_string_contains_control(const char *string, |
461 | size_t len); | 461 | size_t len); |
462 | extern void audit_log_n_hex(struct audit_buffer *ab, | 462 | extern void audit_log_n_hex(struct audit_buffer *ab, |
463 | const unsigned char *buf, | 463 | const unsigned char *buf, |
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index a23209b43842..1b4d69f68c33 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h | |||
@@ -116,6 +116,8 @@ struct bdi_writeback { | |||
116 | struct list_head work_list; | 116 | struct list_head work_list; |
117 | struct delayed_work dwork; /* work item used for writeback */ | 117 | struct delayed_work dwork; /* work item used for writeback */ |
118 | 118 | ||
119 | struct list_head bdi_node; /* anchored at bdi->wb_list */ | ||
120 | |||
119 | #ifdef CONFIG_CGROUP_WRITEBACK | 121 | #ifdef CONFIG_CGROUP_WRITEBACK |
120 | struct percpu_ref refcnt; /* used only for !root wb's */ | 122 | struct percpu_ref refcnt; /* used only for !root wb's */ |
121 | struct fprop_local_percpu memcg_completions; | 123 | struct fprop_local_percpu memcg_completions; |
@@ -150,6 +152,7 @@ struct backing_dev_info { | |||
150 | atomic_long_t tot_write_bandwidth; | 152 | atomic_long_t tot_write_bandwidth; |
151 | 153 | ||
152 | struct bdi_writeback wb; /* the root writeback info for this bdi */ | 154 | struct bdi_writeback wb; /* the root writeback info for this bdi */ |
155 | struct list_head wb_list; /* list of all wbs */ | ||
153 | #ifdef CONFIG_CGROUP_WRITEBACK | 156 | #ifdef CONFIG_CGROUP_WRITEBACK |
154 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ | 157 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ |
155 | struct rb_root cgwb_congested_tree; /* their congested states */ | 158 | struct rb_root cgwb_congested_tree; /* their congested states */ |
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index d5eb4ad1c534..c82794f20110 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
@@ -13,19 +13,22 @@ | |||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/blkdev.h> | 14 | #include <linux/blkdev.h> |
15 | #include <linux/writeback.h> | 15 | #include <linux/writeback.h> |
16 | #include <linux/memcontrol.h> | ||
17 | #include <linux/blk-cgroup.h> | 16 | #include <linux/blk-cgroup.h> |
18 | #include <linux/backing-dev-defs.h> | 17 | #include <linux/backing-dev-defs.h> |
19 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
20 | 19 | ||
21 | int __must_check bdi_init(struct backing_dev_info *bdi); | 20 | int __must_check bdi_init(struct backing_dev_info *bdi); |
22 | void bdi_destroy(struct backing_dev_info *bdi); | 21 | void bdi_exit(struct backing_dev_info *bdi); |
23 | 22 | ||
24 | __printf(3, 4) | 23 | __printf(3, 4) |
25 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, | 24 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, |
26 | const char *fmt, ...); | 25 | const char *fmt, ...); |
27 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); | 26 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); |
27 | void bdi_unregister(struct backing_dev_info *bdi); | ||
28 | |||
28 | int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); | 29 | int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); |
30 | void bdi_destroy(struct backing_dev_info *bdi); | ||
31 | |||
29 | void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, | 32 | void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, |
30 | bool range_cyclic, enum wb_reason reason); | 33 | bool range_cyclic, enum wb_reason reason); |
31 | void wb_start_background_writeback(struct bdi_writeback *wb); | 34 | void wb_start_background_writeback(struct bdi_writeback *wb); |
@@ -263,8 +266,8 @@ static inline bool inode_cgwb_enabled(struct inode *inode) | |||
263 | { | 266 | { |
264 | struct backing_dev_info *bdi = inode_to_bdi(inode); | 267 | struct backing_dev_info *bdi = inode_to_bdi(inode); |
265 | 268 | ||
266 | return cgroup_on_dfl(mem_cgroup_root_css->cgroup) && | 269 | return cgroup_subsys_on_dfl(memory_cgrp_subsys) && |
267 | cgroup_on_dfl(blkcg_root_css->cgroup) && | 270 | cgroup_subsys_on_dfl(io_cgrp_subsys) && |
268 | bdi_cap_account_dirty(bdi) && | 271 | bdi_cap_account_dirty(bdi) && |
269 | (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && | 272 | (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && |
270 | (inode->i_sb->s_iflags & SB_I_CGROUPWB); | 273 | (inode->i_sb->s_iflags & SB_I_CGROUPWB); |
@@ -408,61 +411,6 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) | |||
408 | rcu_read_unlock(); | 411 | rcu_read_unlock(); |
409 | } | 412 | } |
410 | 413 | ||
411 | struct wb_iter { | ||
412 | int start_memcg_id; | ||
413 | struct radix_tree_iter tree_iter; | ||
414 | void **slot; | ||
415 | }; | ||
416 | |||
417 | static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter, | ||
418 | struct backing_dev_info *bdi) | ||
419 | { | ||
420 | struct radix_tree_iter *titer = &iter->tree_iter; | ||
421 | |||
422 | WARN_ON_ONCE(!rcu_read_lock_held()); | ||
423 | |||
424 | if (iter->start_memcg_id >= 0) { | ||
425 | iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id); | ||
426 | iter->start_memcg_id = -1; | ||
427 | } else { | ||
428 | iter->slot = radix_tree_next_slot(iter->slot, titer, 0); | ||
429 | } | ||
430 | |||
431 | if (!iter->slot) | ||
432 | iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0); | ||
433 | if (iter->slot) | ||
434 | return *iter->slot; | ||
435 | return NULL; | ||
436 | } | ||
437 | |||
438 | static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter, | ||
439 | struct backing_dev_info *bdi, | ||
440 | int start_memcg_id) | ||
441 | { | ||
442 | iter->start_memcg_id = start_memcg_id; | ||
443 | |||
444 | if (start_memcg_id) | ||
445 | return __wb_iter_next(iter, bdi); | ||
446 | else | ||
447 | return &bdi->wb; | ||
448 | } | ||
449 | |||
450 | /** | ||
451 | * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order | ||
452 | * @wb_cur: cursor struct bdi_writeback pointer | ||
453 | * @bdi: bdi to walk wb's of | ||
454 | * @iter: pointer to struct wb_iter to be used as iteration buffer | ||
455 | * @start_memcg_id: memcg ID to start iteration from | ||
456 | * | ||
457 | * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending | ||
458 | * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter | ||
459 | * to be used as temp storage during iteration. rcu_read_lock() must be | ||
460 | * held throughout iteration. | ||
461 | */ | ||
462 | #define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \ | ||
463 | for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \ | ||
464 | (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi)) | ||
465 | |||
466 | #else /* CONFIG_CGROUP_WRITEBACK */ | 414 | #else /* CONFIG_CGROUP_WRITEBACK */ |
467 | 415 | ||
468 | static inline bool inode_cgwb_enabled(struct inode *inode) | 416 | static inline bool inode_cgwb_enabled(struct inode *inode) |
@@ -522,14 +470,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg) | |||
522 | { | 470 | { |
523 | } | 471 | } |
524 | 472 | ||
525 | struct wb_iter { | ||
526 | int next_id; | ||
527 | }; | ||
528 | |||
529 | #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \ | ||
530 | for ((iter)->next_id = (start_blkcg_id); \ | ||
531 | ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); ) | ||
532 | |||
533 | static inline int inode_congested(struct inode *inode, int cong_bits) | 473 | static inline int inode_congested(struct inode *inode, int cong_bits) |
534 | { | 474 | { |
535 | return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); | 475 | return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); |
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 2ff4a9961e1d..3feb1b2d75d8 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h | |||
@@ -151,6 +151,8 @@ struct bcma_host_ops { | |||
151 | #define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */ | 151 | #define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */ |
152 | #define BCMA_CORE_USB30_DEV 0x83D | 152 | #define BCMA_CORE_USB30_DEV 0x83D |
153 | #define BCMA_CORE_ARM_CR4 0x83E | 153 | #define BCMA_CORE_ARM_CR4 0x83E |
154 | #define BCMA_CORE_ARM_CA7 0x847 | ||
155 | #define BCMA_CORE_SYS_MEM 0x849 | ||
154 | #define BCMA_CORE_DEFAULT 0xFFF | 156 | #define BCMA_CORE_DEFAULT 0xFFF |
155 | 157 | ||
156 | #define BCMA_MAX_NR_CORES 16 | 158 | #define BCMA_MAX_NR_CORES 16 |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index e63553386ae7..2b8ed123ad36 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
@@ -164,6 +164,8 @@ static inline __u8 ror8(__u8 word, unsigned int shift) | |||
164 | * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit | 164 | * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit |
165 | * @value: value to sign extend | 165 | * @value: value to sign extend |
166 | * @index: 0 based bit index (0<=index<32) to sign bit | 166 | * @index: 0 based bit index (0<=index<32) to sign bit |
167 | * | ||
168 | * This is safe to use for 16- and 8-bit types as well. | ||
167 | */ | 169 | */ |
168 | static inline __s32 sign_extend32(__u32 value, int index) | 170 | static inline __s32 sign_extend32(__u32 value, int index) |
169 | { | 171 | { |
@@ -171,6 +173,17 @@ static inline __s32 sign_extend32(__u32 value, int index) | |||
171 | return (__s32)(value << shift) >> shift; | 173 | return (__s32)(value << shift) >> shift; |
172 | } | 174 | } |
173 | 175 | ||
176 | /** | ||
177 | * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit | ||
178 | * @value: value to sign extend | ||
179 | * @index: 0 based bit index (0<=index<64) to sign bit | ||
180 | */ | ||
181 | static inline __s64 sign_extend64(__u64 value, int index) | ||
182 | { | ||
183 | __u8 shift = 63 - index; | ||
184 | return (__s64)(value << shift) >> shift; | ||
185 | } | ||
186 | |||
174 | static inline unsigned fls_long(unsigned long l) | 187 | static inline unsigned fls_long(unsigned long l) |
175 | { | 188 | { |
176 | if (sizeof(l) == 4) | 189 | if (sizeof(l) == 4) |
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 0a5cc7a1109b..c02e669945e9 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h | |||
@@ -713,9 +713,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, | |||
713 | 713 | ||
714 | if (!throtl) { | 714 | if (!throtl) { |
715 | blkg = blkg ?: q->root_blkg; | 715 | blkg = blkg ?: q->root_blkg; |
716 | blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags, | 716 | blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw, |
717 | bio->bi_iter.bi_size); | 717 | bio->bi_iter.bi_size); |
718 | blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1); | 718 | blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1); |
719 | } | 719 | } |
720 | 720 | ||
721 | rcu_read_unlock(); | 721 | rcu_read_unlock(); |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 37d1602c4f7a..daf17d70aeca 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -59,6 +59,9 @@ struct blk_mq_hw_ctx { | |||
59 | 59 | ||
60 | struct blk_mq_cpu_notifier cpu_notifier; | 60 | struct blk_mq_cpu_notifier cpu_notifier; |
61 | struct kobject kobj; | 61 | struct kobject kobj; |
62 | |||
63 | unsigned long poll_invoked; | ||
64 | unsigned long poll_success; | ||
62 | }; | 65 | }; |
63 | 66 | ||
64 | struct blk_mq_tag_set { | 67 | struct blk_mq_tag_set { |
@@ -97,6 +100,8 @@ typedef void (exit_request_fn)(void *, struct request *, unsigned int, | |||
97 | typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, | 100 | typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, |
98 | bool); | 101 | bool); |
99 | typedef void (busy_tag_iter_fn)(struct request *, void *, bool); | 102 | typedef void (busy_tag_iter_fn)(struct request *, void *, bool); |
103 | typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int); | ||
104 | |||
100 | 105 | ||
101 | struct blk_mq_ops { | 106 | struct blk_mq_ops { |
102 | /* | 107 | /* |
@@ -114,6 +119,11 @@ struct blk_mq_ops { | |||
114 | */ | 119 | */ |
115 | timeout_fn *timeout; | 120 | timeout_fn *timeout; |
116 | 121 | ||
122 | /* | ||
123 | * Called to poll for completion of a specific tag. | ||
124 | */ | ||
125 | poll_fn *poll; | ||
126 | |||
117 | softirq_done_fn *complete; | 127 | softirq_done_fn *complete; |
118 | 128 | ||
119 | /* | 129 | /* |
@@ -145,7 +155,6 @@ enum { | |||
145 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, | 155 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, |
146 | BLK_MQ_F_TAG_SHARED = 1 << 1, | 156 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
147 | BLK_MQ_F_SG_MERGE = 1 << 2, | 157 | BLK_MQ_F_SG_MERGE = 1 << 2, |
148 | BLK_MQ_F_SYSFS_UP = 1 << 3, | ||
149 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, | 158 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, |
150 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, | 159 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, |
151 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, | 160 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, |
@@ -167,7 +176,6 @@ enum { | |||
167 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); | 176 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); |
168 | struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, | 177 | struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, |
169 | struct request_queue *q); | 178 | struct request_queue *q); |
170 | void blk_mq_finish_init(struct request_queue *q); | ||
171 | int blk_mq_register_disk(struct gendisk *); | 179 | int blk_mq_register_disk(struct gendisk *); |
172 | void blk_mq_unregister_disk(struct gendisk *); | 180 | void blk_mq_unregister_disk(struct gendisk *); |
173 | 181 | ||
@@ -215,7 +223,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); | |||
215 | void blk_mq_cancel_requeue_work(struct request_queue *q); | 223 | void blk_mq_cancel_requeue_work(struct request_queue *q); |
216 | void blk_mq_kick_requeue_list(struct request_queue *q); | 224 | void blk_mq_kick_requeue_list(struct request_queue *q); |
217 | void blk_mq_abort_requeue_list(struct request_queue *q); | 225 | void blk_mq_abort_requeue_list(struct request_queue *q); |
218 | void blk_mq_complete_request(struct request *rq); | 226 | void blk_mq_complete_request(struct request *rq, int error); |
219 | 227 | ||
220 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); | 228 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
221 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); | 229 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); |
@@ -224,8 +232,6 @@ void blk_mq_start_hw_queues(struct request_queue *q); | |||
224 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); | 232 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
225 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); | 233 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
226 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); | 234 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
227 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, | ||
228 | void *priv); | ||
229 | void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, | 235 | void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, |
230 | void *priv); | 236 | void *priv); |
231 | void blk_mq_freeze_queue(struct request_queue *q); | 237 | void blk_mq_freeze_queue(struct request_queue *q); |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index e8130138f29d..0fb65843ec1e 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -244,4 +244,28 @@ enum rq_flag_bits { | |||
244 | #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) | 244 | #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) |
245 | #define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT) | 245 | #define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT) |
246 | 246 | ||
247 | typedef unsigned int blk_qc_t; | ||
248 | #define BLK_QC_T_NONE -1U | ||
249 | #define BLK_QC_T_SHIFT 16 | ||
250 | |||
251 | static inline bool blk_qc_t_valid(blk_qc_t cookie) | ||
252 | { | ||
253 | return cookie != BLK_QC_T_NONE; | ||
254 | } | ||
255 | |||
256 | static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num) | ||
257 | { | ||
258 | return tag | (queue_num << BLK_QC_T_SHIFT); | ||
259 | } | ||
260 | |||
261 | static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) | ||
262 | { | ||
263 | return cookie >> BLK_QC_T_SHIFT; | ||
264 | } | ||
265 | |||
266 | static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) | ||
267 | { | ||
268 | return cookie & ((1u << BLK_QC_T_SHIFT) - 1); | ||
269 | } | ||
270 | |||
247 | #endif /* __LINUX_BLK_TYPES_H */ | 271 | #endif /* __LINUX_BLK_TYPES_H */ |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 99da9ebc7377..c0d2b7927c1f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -35,6 +35,7 @@ struct sg_io_hdr; | |||
35 | struct bsg_job; | 35 | struct bsg_job; |
36 | struct blkcg_gq; | 36 | struct blkcg_gq; |
37 | struct blk_flush_queue; | 37 | struct blk_flush_queue; |
38 | struct pr_ops; | ||
38 | 39 | ||
39 | #define BLKDEV_MIN_RQ 4 | 40 | #define BLKDEV_MIN_RQ 4 |
40 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ | 41 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
@@ -208,7 +209,7 @@ static inline unsigned short req_get_ioprio(struct request *req) | |||
208 | struct blk_queue_ctx; | 209 | struct blk_queue_ctx; |
209 | 210 | ||
210 | typedef void (request_fn_proc) (struct request_queue *q); | 211 | typedef void (request_fn_proc) (struct request_queue *q); |
211 | typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); | 212 | typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); |
212 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 213 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
213 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); | 214 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); |
214 | 215 | ||
@@ -369,6 +370,10 @@ struct request_queue { | |||
369 | */ | 370 | */ |
370 | struct kobject mq_kobj; | 371 | struct kobject mq_kobj; |
371 | 372 | ||
373 | #ifdef CONFIG_BLK_DEV_INTEGRITY | ||
374 | struct blk_integrity integrity; | ||
375 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | ||
376 | |||
372 | #ifdef CONFIG_PM | 377 | #ifdef CONFIG_PM |
373 | struct device *dev; | 378 | struct device *dev; |
374 | int rpm_status; | 379 | int rpm_status; |
@@ -450,12 +455,14 @@ struct request_queue { | |||
450 | #endif | 455 | #endif |
451 | struct rcu_head rcu_head; | 456 | struct rcu_head rcu_head; |
452 | wait_queue_head_t mq_freeze_wq; | 457 | wait_queue_head_t mq_freeze_wq; |
453 | struct percpu_ref mq_usage_counter; | 458 | struct percpu_ref q_usage_counter; |
454 | struct list_head all_q_node; | 459 | struct list_head all_q_node; |
455 | 460 | ||
456 | struct blk_mq_tag_set *tag_set; | 461 | struct blk_mq_tag_set *tag_set; |
457 | struct list_head tag_set_list; | 462 | struct list_head tag_set_list; |
458 | struct bio_set *bio_split; | 463 | struct bio_set *bio_split; |
464 | |||
465 | bool mq_sysfs_init_done; | ||
459 | }; | 466 | }; |
460 | 467 | ||
461 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 468 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
@@ -480,6 +487,7 @@ struct request_queue { | |||
480 | #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ | 487 | #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ |
481 | #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ | 488 | #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ |
482 | #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ | 489 | #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ |
490 | #define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ | ||
483 | 491 | ||
484 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 492 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
485 | (1 << QUEUE_FLAG_STACKABLE) | \ | 493 | (1 << QUEUE_FLAG_STACKABLE) | \ |
@@ -754,7 +762,7 @@ static inline void rq_flush_dcache_pages(struct request *rq) | |||
754 | 762 | ||
755 | extern int blk_register_queue(struct gendisk *disk); | 763 | extern int blk_register_queue(struct gendisk *disk); |
756 | extern void blk_unregister_queue(struct gendisk *disk); | 764 | extern void blk_unregister_queue(struct gendisk *disk); |
757 | extern void generic_make_request(struct bio *bio); | 765 | extern blk_qc_t generic_make_request(struct bio *bio); |
758 | extern void blk_rq_init(struct request_queue *q, struct request *rq); | 766 | extern void blk_rq_init(struct request_queue *q, struct request *rq); |
759 | extern void blk_put_request(struct request *); | 767 | extern void blk_put_request(struct request *); |
760 | extern void __blk_put_request(struct request_queue *, struct request *); | 768 | extern void __blk_put_request(struct request_queue *, struct request *); |
@@ -786,6 +794,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
786 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 794 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
787 | struct scsi_ioctl_command __user *); | 795 | struct scsi_ioctl_command __user *); |
788 | 796 | ||
797 | extern int blk_queue_enter(struct request_queue *q, gfp_t gfp); | ||
798 | extern void blk_queue_exit(struct request_queue *q); | ||
789 | extern void blk_start_queue(struct request_queue *q); | 799 | extern void blk_start_queue(struct request_queue *q); |
790 | extern void blk_stop_queue(struct request_queue *q); | 800 | extern void blk_stop_queue(struct request_queue *q); |
791 | extern void blk_sync_queue(struct request_queue *q); | 801 | extern void blk_sync_queue(struct request_queue *q); |
@@ -807,6 +817,8 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *, | |||
807 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 817 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
808 | struct request *, int, rq_end_io_fn *); | 818 | struct request *, int, rq_end_io_fn *); |
809 | 819 | ||
820 | bool blk_poll(struct request_queue *q, blk_qc_t cookie); | ||
821 | |||
810 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | 822 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
811 | { | 823 | { |
812 | return bdev->bd_disk->queue; /* this is never NULL */ | 824 | return bdev->bd_disk->queue; /* this is never NULL */ |
@@ -1460,22 +1472,13 @@ struct blk_integrity_iter { | |||
1460 | 1472 | ||
1461 | typedef int (integrity_processing_fn) (struct blk_integrity_iter *); | 1473 | typedef int (integrity_processing_fn) (struct blk_integrity_iter *); |
1462 | 1474 | ||
1463 | struct blk_integrity { | 1475 | struct blk_integrity_profile { |
1464 | integrity_processing_fn *generate_fn; | 1476 | integrity_processing_fn *generate_fn; |
1465 | integrity_processing_fn *verify_fn; | 1477 | integrity_processing_fn *verify_fn; |
1466 | 1478 | const char *name; | |
1467 | unsigned short flags; | ||
1468 | unsigned short tuple_size; | ||
1469 | unsigned short interval; | ||
1470 | unsigned short tag_size; | ||
1471 | |||
1472 | const char *name; | ||
1473 | |||
1474 | struct kobject kobj; | ||
1475 | }; | 1479 | }; |
1476 | 1480 | ||
1477 | extern bool blk_integrity_is_initialized(struct gendisk *); | 1481 | extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); |
1478 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | ||
1479 | extern void blk_integrity_unregister(struct gendisk *); | 1482 | extern void blk_integrity_unregister(struct gendisk *); |
1480 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); | 1483 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); |
1481 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, | 1484 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, |
@@ -1486,15 +1489,20 @@ extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, | |||
1486 | extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, | 1489 | extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, |
1487 | struct bio *); | 1490 | struct bio *); |
1488 | 1491 | ||
1489 | static inline | 1492 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) |
1490 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | ||
1491 | { | 1493 | { |
1492 | return bdev->bd_disk->integrity; | 1494 | struct blk_integrity *bi = &disk->queue->integrity; |
1495 | |||
1496 | if (!bi->profile) | ||
1497 | return NULL; | ||
1498 | |||
1499 | return bi; | ||
1493 | } | 1500 | } |
1494 | 1501 | ||
1495 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) | 1502 | static inline |
1503 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | ||
1496 | { | 1504 | { |
1497 | return disk->integrity; | 1505 | return blk_get_integrity(bdev->bd_disk); |
1498 | } | 1506 | } |
1499 | 1507 | ||
1500 | static inline bool blk_integrity_rq(struct request *rq) | 1508 | static inline bool blk_integrity_rq(struct request *rq) |
@@ -1568,10 +1576,9 @@ static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) | |||
1568 | { | 1576 | { |
1569 | return 0; | 1577 | return 0; |
1570 | } | 1578 | } |
1571 | static inline int blk_integrity_register(struct gendisk *d, | 1579 | static inline void blk_integrity_register(struct gendisk *d, |
1572 | struct blk_integrity *b) | 1580 | struct blk_integrity *b) |
1573 | { | 1581 | { |
1574 | return 0; | ||
1575 | } | 1582 | } |
1576 | static inline void blk_integrity_unregister(struct gendisk *d) | 1583 | static inline void blk_integrity_unregister(struct gendisk *d) |
1577 | { | 1584 | { |
@@ -1596,10 +1603,7 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq, | |||
1596 | { | 1603 | { |
1597 | return true; | 1604 | return true; |
1598 | } | 1605 | } |
1599 | static inline bool blk_integrity_is_initialized(struct gendisk *g) | 1606 | |
1600 | { | ||
1601 | return 0; | ||
1602 | } | ||
1603 | static inline bool integrity_req_gap_back_merge(struct request *req, | 1607 | static inline bool integrity_req_gap_back_merge(struct request *req, |
1604 | struct bio *next) | 1608 | struct bio *next) |
1605 | { | 1609 | { |
@@ -1631,6 +1635,7 @@ struct block_device_operations { | |||
1631 | /* this callback is with swap_lock and sometimes page table lock held */ | 1635 | /* this callback is with swap_lock and sometimes page table lock held */ |
1632 | void (*swap_slot_free_notify) (struct block_device *, unsigned long); | 1636 | void (*swap_slot_free_notify) (struct block_device *, unsigned long); |
1633 | struct module *owner; | 1637 | struct module *owner; |
1638 | const struct pr_ops *pr_ops; | ||
1634 | }; | 1639 | }; |
1635 | 1640 | ||
1636 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, | 1641 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, |
diff --git a/include/linux/blkpg.h b/include/linux/blkpg.h new file mode 100644 index 000000000000..bef124fde61e --- /dev/null +++ b/include/linux/blkpg.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef _LINUX_BLKPG_H | ||
2 | #define _LINUX_BLKPG_H | ||
3 | |||
4 | /* | ||
5 | * Partition table and disk geometry handling | ||
6 | */ | ||
7 | |||
8 | #include <linux/compat.h> | ||
9 | #include <uapi/linux/blkpg.h> | ||
10 | |||
11 | #ifdef CONFIG_COMPAT | ||
12 | /* For 32-bit/64-bit compatibility of struct blkpg_ioctl_arg */ | ||
13 | struct blkpg_compat_ioctl_arg { | ||
14 | compat_int_t op; | ||
15 | compat_int_t flags; | ||
16 | compat_int_t datalen; | ||
17 | compat_uptr_t data; | ||
18 | }; | ||
19 | #endif | ||
20 | |||
21 | #endif /* _LINUX_BLKPG_H */ | ||
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f57d7fed9ec3..de464e6683b6 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <uapi/linux/bpf.h> | 10 | #include <uapi/linux/bpf.h> |
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/file.h> | 12 | #include <linux/file.h> |
13 | #include <linux/perf_event.h> | ||
14 | 13 | ||
15 | struct bpf_map; | 14 | struct bpf_map; |
16 | 15 | ||
@@ -37,6 +36,8 @@ struct bpf_map { | |||
37 | u32 key_size; | 36 | u32 key_size; |
38 | u32 value_size; | 37 | u32 value_size; |
39 | u32 max_entries; | 38 | u32 max_entries; |
39 | u32 pages; | ||
40 | struct user_struct *user; | ||
40 | const struct bpf_map_ops *ops; | 41 | const struct bpf_map_ops *ops; |
41 | struct work_struct work; | 42 | struct work_struct work; |
42 | }; | 43 | }; |
@@ -101,6 +102,8 @@ enum bpf_access_type { | |||
101 | BPF_WRITE = 2 | 102 | BPF_WRITE = 2 |
102 | }; | 103 | }; |
103 | 104 | ||
105 | struct bpf_prog; | ||
106 | |||
104 | struct bpf_verifier_ops { | 107 | struct bpf_verifier_ops { |
105 | /* return eBPF function prototype for verification */ | 108 | /* return eBPF function prototype for verification */ |
106 | const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id); | 109 | const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id); |
@@ -112,7 +115,7 @@ struct bpf_verifier_ops { | |||
112 | 115 | ||
113 | u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, | 116 | u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, |
114 | int src_reg, int ctx_off, | 117 | int src_reg, int ctx_off, |
115 | struct bpf_insn *insn); | 118 | struct bpf_insn *insn, struct bpf_prog *prog); |
116 | }; | 119 | }; |
117 | 120 | ||
118 | struct bpf_prog_type_list { | 121 | struct bpf_prog_type_list { |
@@ -121,14 +124,13 @@ struct bpf_prog_type_list { | |||
121 | enum bpf_prog_type type; | 124 | enum bpf_prog_type type; |
122 | }; | 125 | }; |
123 | 126 | ||
124 | struct bpf_prog; | ||
125 | |||
126 | struct bpf_prog_aux { | 127 | struct bpf_prog_aux { |
127 | atomic_t refcnt; | 128 | atomic_t refcnt; |
128 | u32 used_map_cnt; | 129 | u32 used_map_cnt; |
129 | const struct bpf_verifier_ops *ops; | 130 | const struct bpf_verifier_ops *ops; |
130 | struct bpf_map **used_maps; | 131 | struct bpf_map **used_maps; |
131 | struct bpf_prog *prog; | 132 | struct bpf_prog *prog; |
133 | struct user_struct *user; | ||
132 | union { | 134 | union { |
133 | struct work_struct work; | 135 | struct work_struct work; |
134 | struct rcu_head rcu; | 136 | struct rcu_head rcu; |
@@ -165,9 +167,18 @@ struct bpf_prog *bpf_prog_get(u32 ufd); | |||
165 | void bpf_prog_put(struct bpf_prog *prog); | 167 | void bpf_prog_put(struct bpf_prog *prog); |
166 | void bpf_prog_put_rcu(struct bpf_prog *prog); | 168 | void bpf_prog_put_rcu(struct bpf_prog *prog); |
167 | 169 | ||
168 | struct bpf_map *bpf_map_get(struct fd f); | 170 | struct bpf_map *bpf_map_get(u32 ufd); |
171 | struct bpf_map *__bpf_map_get(struct fd f); | ||
169 | void bpf_map_put(struct bpf_map *map); | 172 | void bpf_map_put(struct bpf_map *map); |
170 | 173 | ||
174 | extern int sysctl_unprivileged_bpf_disabled; | ||
175 | |||
176 | int bpf_map_new_fd(struct bpf_map *map); | ||
177 | int bpf_prog_new_fd(struct bpf_prog *prog); | ||
178 | |||
179 | int bpf_obj_pin_user(u32 ufd, const char __user *pathname); | ||
180 | int bpf_obj_get_user(const char __user *pathname); | ||
181 | |||
171 | /* verify correctness of eBPF program */ | 182 | /* verify correctness of eBPF program */ |
172 | int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); | 183 | int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); |
173 | #else | 184 | #else |
@@ -190,7 +201,6 @@ extern const struct bpf_func_proto bpf_map_lookup_elem_proto; | |||
190 | extern const struct bpf_func_proto bpf_map_update_elem_proto; | 201 | extern const struct bpf_func_proto bpf_map_update_elem_proto; |
191 | extern const struct bpf_func_proto bpf_map_delete_elem_proto; | 202 | extern const struct bpf_func_proto bpf_map_delete_elem_proto; |
192 | 203 | ||
193 | extern const struct bpf_func_proto bpf_perf_event_read_proto; | ||
194 | extern const struct bpf_func_proto bpf_get_prandom_u32_proto; | 204 | extern const struct bpf_func_proto bpf_get_prandom_u32_proto; |
195 | extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; | 205 | extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; |
196 | extern const struct bpf_func_proto bpf_tail_call_proto; | 206 | extern const struct bpf_func_proto bpf_tail_call_proto; |
@@ -201,4 +211,8 @@ extern const struct bpf_func_proto bpf_get_current_comm_proto; | |||
201 | extern const struct bpf_func_proto bpf_skb_vlan_push_proto; | 211 | extern const struct bpf_func_proto bpf_skb_vlan_push_proto; |
202 | extern const struct bpf_func_proto bpf_skb_vlan_pop_proto; | 212 | extern const struct bpf_func_proto bpf_skb_vlan_pop_proto; |
203 | 213 | ||
214 | /* Shared helpers among cBPF and eBPF. */ | ||
215 | void bpf_user_rnd_init_once(void); | ||
216 | u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); | ||
217 | |||
204 | #endif /* _LINUX_BPF_H */ | 218 | #endif /* _LINUX_BPF_H */ |
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 697ca7795bd9..59f4a7304419 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h | |||
@@ -30,6 +30,8 @@ | |||
30 | #define PHY_ID_BCM7439_2 0xae025080 | 30 | #define PHY_ID_BCM7439_2 0xae025080 |
31 | #define PHY_ID_BCM7445 0x600d8510 | 31 | #define PHY_ID_BCM7445 0x600d8510 |
32 | 32 | ||
33 | #define PHY_ID_BCM_CYGNUS 0xae025200 | ||
34 | |||
33 | #define PHY_BCM_OUI_MASK 0xfffffc00 | 35 | #define PHY_BCM_OUI_MASK 0xfffffc00 |
34 | #define PHY_BCM_OUI_1 0x00206000 | 36 | #define PHY_BCM_OUI_1 0x00206000 |
35 | #define PHY_BCM_OUI_2 0x0143bc00 | 37 | #define PHY_BCM_OUI_2 0x0143bc00 |
@@ -138,7 +140,10 @@ | |||
138 | 140 | ||
139 | /* 01010: Auto Power-Down */ | 141 | /* 01010: Auto Power-Down */ |
140 | #define BCM54XX_SHD_APD 0x0a | 142 | #define BCM54XX_SHD_APD 0x0a |
143 | #define BCM_APD_CLR_MASK 0xFE9F /* clear bits 5, 6 & 8 */ | ||
141 | #define BCM54XX_SHD_APD_EN 0x0020 | 144 | #define BCM54XX_SHD_APD_EN 0x0020 |
145 | #define BCM_NO_ANEG_APD_EN 0x0060 /* bits 5 & 6 */ | ||
146 | #define BCM_APD_SINGLELP_EN 0x0100 /* Bit 8 */ | ||
142 | 147 | ||
143 | #define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ | 148 | #define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ |
144 | /* LED3 / ~LINKSPD[2] selector */ | 149 | /* LED3 / ~LINKSPD[2] selector */ |
@@ -209,27 +214,13 @@ | |||
209 | #define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */ | 214 | #define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */ |
210 | #define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */ | 215 | #define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */ |
211 | 216 | ||
212 | /* | ||
213 | * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T | ||
214 | * 0x1c shadow registers. | ||
215 | */ | ||
216 | static inline int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow) | ||
217 | { | ||
218 | phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow)); | ||
219 | return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD)); | ||
220 | } | ||
221 | |||
222 | static inline int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow, | ||
223 | u16 val) | ||
224 | { | ||
225 | return phy_write(phydev, MII_BCM54XX_SHD, | ||
226 | MII_BCM54XX_SHD_WRITE | | ||
227 | MII_BCM54XX_SHD_VAL(shadow) | | ||
228 | MII_BCM54XX_SHD_DATA(val)); | ||
229 | } | ||
230 | |||
231 | #define BRCM_CL45VEN_EEE_CONTROL 0x803d | 217 | #define BRCM_CL45VEN_EEE_CONTROL 0x803d |
232 | #define LPI_FEATURE_EN 0x8000 | 218 | #define LPI_FEATURE_EN 0x8000 |
233 | #define LPI_FEATURE_EN_DIG1000X 0x4000 | 219 | #define LPI_FEATURE_EN_DIG1000X 0x4000 |
234 | 220 | ||
221 | /* Core register definitions*/ | ||
222 | #define MII_BRCM_CORE_BASE1E 0x1E | ||
223 | #define MII_BRCM_CORE_EXPB0 0xB0 | ||
224 | #define MII_BRCM_CORE_EXPB1 0xB1 | ||
225 | |||
235 | #endif /* _LINUX_BRCMPHY_H */ | 226 | #endif /* _LINUX_BRCMPHY_H */ |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index e6797ded700e..89d9aa9e79bf 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -227,8 +227,6 @@ int cont_write_begin(struct file *, struct address_space *, loff_t, | |||
227 | get_block_t *, loff_t *); | 227 | get_block_t *, loff_t *); |
228 | int generic_cont_expand_simple(struct inode *inode, loff_t size); | 228 | int generic_cont_expand_simple(struct inode *inode, loff_t size); |
229 | int block_commit_write(struct page *page, unsigned from, unsigned to); | 229 | int block_commit_write(struct page *page, unsigned from, unsigned to); |
230 | int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | ||
231 | get_block_t get_block); | ||
232 | int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | 230 | int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, |
233 | get_block_t get_block); | 231 | get_block_t get_block); |
234 | /* Convert errno to return value from ->page_mkwrite() call */ | 232 | /* Convert errno to return value from ->page_mkwrite() call */ |
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index c3a9c8fc60fa..735f9f8c4e43 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h | |||
@@ -14,9 +14,10 @@ | |||
14 | #define _CAN_DEV_H | 14 | #define _CAN_DEV_H |
15 | 15 | ||
16 | #include <linux/can.h> | 16 | #include <linux/can.h> |
17 | #include <linux/can/netlink.h> | ||
18 | #include <linux/can/error.h> | 17 | #include <linux/can/error.h> |
19 | #include <linux/can/led.h> | 18 | #include <linux/can/led.h> |
19 | #include <linux/can/netlink.h> | ||
20 | #include <linux/netdevice.h> | ||
20 | 21 | ||
21 | /* | 22 | /* |
22 | * CAN mode | 23 | * CAN mode |
@@ -77,7 +78,7 @@ struct can_priv { | |||
77 | #define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC)) | 78 | #define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC)) |
78 | 79 | ||
79 | /* Drop a given socketbuffer if it does not contain a valid CAN frame. */ | 80 | /* Drop a given socketbuffer if it does not contain a valid CAN frame. */ |
80 | static inline int can_dropped_invalid_skb(struct net_device *dev, | 81 | static inline bool can_dropped_invalid_skb(struct net_device *dev, |
81 | struct sk_buff *skb) | 82 | struct sk_buff *skb) |
82 | { | 83 | { |
83 | const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; | 84 | const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; |
@@ -93,12 +94,12 @@ static inline int can_dropped_invalid_skb(struct net_device *dev, | |||
93 | } else | 94 | } else |
94 | goto inval_skb; | 95 | goto inval_skb; |
95 | 96 | ||
96 | return 0; | 97 | return false; |
97 | 98 | ||
98 | inval_skb: | 99 | inval_skb: |
99 | kfree_skb(skb); | 100 | kfree_skb(skb); |
100 | dev->stats.tx_dropped++; | 101 | dev->stats.tx_dropped++; |
101 | return 1; | 102 | return true; |
102 | } | 103 | } |
103 | 104 | ||
104 | static inline bool can_is_canfd_skb(const struct sk_buff *skb) | 105 | static inline bool can_is_canfd_skb(const struct sk_buff *skb) |
diff --git a/include/linux/can/led.h b/include/linux/can/led.h index 146de4506d21..2746f7c2f87d 100644 --- a/include/linux/can/led.h +++ b/include/linux/can/led.h | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/if.h> | 12 | #include <linux/if.h> |
13 | #include <linux/leds.h> | 13 | #include <linux/leds.h> |
14 | #include <linux/netdevice.h> | ||
14 | 15 | ||
15 | enum can_led_event { | 16 | enum can_led_event { |
16 | CAN_LED_EVENT_OPEN, | 17 | CAN_LED_EVENT_OPEN, |
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 397c5cd09794..3e3799cdc6e6 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h | |||
@@ -29,8 +29,9 @@ | |||
29 | #define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ | 29 | #define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ |
30 | #define CEPH_OPT_MYIP (1<<2) /* specified my ip */ | 30 | #define CEPH_OPT_MYIP (1<<2) /* specified my ip */ |
31 | #define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ | 31 | #define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ |
32 | #define CEPH_OPT_NOMSGAUTH (1<<4) /* not require cephx message signature */ | 32 | #define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */ |
33 | #define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */ | 33 | #define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */ |
34 | #define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */ | ||
34 | 35 | ||
35 | #define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) | 36 | #define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) |
36 | 37 | ||
@@ -137,6 +138,7 @@ struct ceph_client { | |||
137 | #endif | 138 | #endif |
138 | }; | 139 | }; |
139 | 140 | ||
141 | #define from_msgr(ms) container_of(ms, struct ceph_client, msgr) | ||
140 | 142 | ||
141 | 143 | ||
142 | /* | 144 | /* |
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index b2371d9b51fa..71b1d6cdcb5d 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h | |||
@@ -43,10 +43,9 @@ struct ceph_connection_operations { | |||
43 | struct ceph_msg * (*alloc_msg) (struct ceph_connection *con, | 43 | struct ceph_msg * (*alloc_msg) (struct ceph_connection *con, |
44 | struct ceph_msg_header *hdr, | 44 | struct ceph_msg_header *hdr, |
45 | int *skip); | 45 | int *skip); |
46 | int (*sign_message) (struct ceph_connection *con, struct ceph_msg *msg); | ||
47 | 46 | ||
48 | int (*check_message_signature) (struct ceph_connection *con, | 47 | int (*sign_message) (struct ceph_msg *msg); |
49 | struct ceph_msg *msg); | 48 | int (*check_message_signature) (struct ceph_msg *msg); |
50 | }; | 49 | }; |
51 | 50 | ||
52 | /* use format string %s%d */ | 51 | /* use format string %s%d */ |
@@ -58,8 +57,6 @@ struct ceph_messenger { | |||
58 | 57 | ||
59 | atomic_t stopping; | 58 | atomic_t stopping; |
60 | possible_net_t net; | 59 | possible_net_t net; |
61 | bool nocrc; | ||
62 | bool tcp_nodelay; | ||
63 | 60 | ||
64 | /* | 61 | /* |
65 | * the global_seq counts connections i (attempt to) initiate | 62 | * the global_seq counts connections i (attempt to) initiate |
@@ -67,9 +64,6 @@ struct ceph_messenger { | |||
67 | */ | 64 | */ |
68 | u32 global_seq; | 65 | u32 global_seq; |
69 | spinlock_t global_seq_lock; | 66 | spinlock_t global_seq_lock; |
70 | |||
71 | u64 supported_features; | ||
72 | u64 required_features; | ||
73 | }; | 67 | }; |
74 | 68 | ||
75 | enum ceph_msg_data_type { | 69 | enum ceph_msg_data_type { |
@@ -268,11 +262,7 @@ extern void ceph_msgr_exit(void); | |||
268 | extern void ceph_msgr_flush(void); | 262 | extern void ceph_msgr_flush(void); |
269 | 263 | ||
270 | extern void ceph_messenger_init(struct ceph_messenger *msgr, | 264 | extern void ceph_messenger_init(struct ceph_messenger *msgr, |
271 | struct ceph_entity_addr *myaddr, | 265 | struct ceph_entity_addr *myaddr); |
272 | u64 supported_features, | ||
273 | u64 required_features, | ||
274 | bool nocrc, | ||
275 | bool tcp_nodelay); | ||
276 | extern void ceph_messenger_fini(struct ceph_messenger *msgr); | 266 | extern void ceph_messenger_fini(struct ceph_messenger *msgr); |
277 | 267 | ||
278 | extern void ceph_con_init(struct ceph_connection *con, void *private, | 268 | extern void ceph_con_init(struct ceph_connection *con, void *private, |
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 8492721b39be..60d44b26276d 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h | |||
@@ -76,6 +76,7 @@ enum { | |||
76 | CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ | 76 | CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ |
77 | CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ | 77 | CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ |
78 | CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ | 78 | CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ |
79 | CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ | ||
79 | 80 | ||
80 | /* internal flags, do not use outside cgroup core proper */ | 81 | /* internal flags, do not use outside cgroup core proper */ |
81 | __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ | 82 | __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ |
@@ -83,6 +84,17 @@ enum { | |||
83 | }; | 84 | }; |
84 | 85 | ||
85 | /* | 86 | /* |
87 | * cgroup_file is the handle for a file instance created in a cgroup which | ||
88 | * is used, for example, to generate file changed notifications. This can | ||
89 | * be obtained by setting cftype->file_offset. | ||
90 | */ | ||
91 | struct cgroup_file { | ||
92 | /* do not access any fields from outside cgroup core */ | ||
93 | struct list_head node; /* anchored at css->files */ | ||
94 | struct kernfs_node *kn; | ||
95 | }; | ||
96 | |||
97 | /* | ||
86 | * Per-subsystem/per-cgroup state maintained by the system. This is the | 98 | * Per-subsystem/per-cgroup state maintained by the system. This is the |
87 | * fundamental structural building block that controllers deal with. | 99 | * fundamental structural building block that controllers deal with. |
88 | * | 100 | * |
@@ -122,6 +134,9 @@ struct cgroup_subsys_state { | |||
122 | */ | 134 | */ |
123 | u64 serial_nr; | 135 | u64 serial_nr; |
124 | 136 | ||
137 | /* all cgroup_files associated with this css */ | ||
138 | struct list_head files; | ||
139 | |||
125 | /* percpu_ref killing and RCU release */ | 140 | /* percpu_ref killing and RCU release */ |
126 | struct rcu_head rcu_head; | 141 | struct rcu_head rcu_head; |
127 | struct work_struct destroy_work; | 142 | struct work_struct destroy_work; |
@@ -196,6 +211,9 @@ struct css_set { | |||
196 | */ | 211 | */ |
197 | struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; | 212 | struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; |
198 | 213 | ||
214 | /* all css_task_iters currently walking this cset */ | ||
215 | struct list_head task_iters; | ||
216 | |||
199 | /* For RCU-protected deletion */ | 217 | /* For RCU-protected deletion */ |
200 | struct rcu_head rcu_head; | 218 | struct rcu_head rcu_head; |
201 | }; | 219 | }; |
@@ -217,16 +235,16 @@ struct cgroup { | |||
217 | int id; | 235 | int id; |
218 | 236 | ||
219 | /* | 237 | /* |
220 | * If this cgroup contains any tasks, it contributes one to | 238 | * Each non-empty css_set associated with this cgroup contributes |
221 | * populated_cnt. All children with non-zero popuplated_cnt of | 239 | * one to populated_cnt. All children with non-zero popuplated_cnt |
222 | * their own contribute one. The count is zero iff there's no task | 240 | * of their own contribute one. The count is zero iff there's no |
223 | * in this cgroup or its subtree. | 241 | * task in this cgroup or its subtree. |
224 | */ | 242 | */ |
225 | int populated_cnt; | 243 | int populated_cnt; |
226 | 244 | ||
227 | struct kernfs_node *kn; /* cgroup kernfs entry */ | 245 | struct kernfs_node *kn; /* cgroup kernfs entry */ |
228 | struct kernfs_node *procs_kn; /* kn for "cgroup.procs" */ | 246 | struct cgroup_file procs_file; /* handle for "cgroup.procs" */ |
229 | struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */ | 247 | struct cgroup_file events_file; /* handle for "cgroup.events" */ |
230 | 248 | ||
231 | /* | 249 | /* |
232 | * The bitmask of subsystems enabled on the child cgroups. | 250 | * The bitmask of subsystems enabled on the child cgroups. |
@@ -324,11 +342,6 @@ struct cftype { | |||
324 | */ | 342 | */ |
325 | char name[MAX_CFTYPE_NAME]; | 343 | char name[MAX_CFTYPE_NAME]; |
326 | unsigned long private; | 344 | unsigned long private; |
327 | /* | ||
328 | * If not 0, file mode is set to this value, otherwise it will | ||
329 | * be figured out automatically | ||
330 | */ | ||
331 | umode_t mode; | ||
332 | 345 | ||
333 | /* | 346 | /* |
334 | * The maximum length of string, excluding trailing nul, that can | 347 | * The maximum length of string, excluding trailing nul, that can |
@@ -340,6 +353,14 @@ struct cftype { | |||
340 | unsigned int flags; | 353 | unsigned int flags; |
341 | 354 | ||
342 | /* | 355 | /* |
356 | * If non-zero, should contain the offset from the start of css to | ||
357 | * a struct cgroup_file field. cgroup will record the handle of | ||
358 | * the created file into it. The recorded handle can be used as | ||
359 | * long as the containing css remains accessible. | ||
360 | */ | ||
361 | unsigned int file_offset; | ||
362 | |||
363 | /* | ||
343 | * Fields used for internal bookkeeping. Initialized automatically | 364 | * Fields used for internal bookkeeping. Initialized automatically |
344 | * during registration. | 365 | * during registration. |
345 | */ | 366 | */ |
@@ -414,12 +435,10 @@ struct cgroup_subsys { | |||
414 | int (*can_fork)(struct task_struct *task, void **priv_p); | 435 | int (*can_fork)(struct task_struct *task, void **priv_p); |
415 | void (*cancel_fork)(struct task_struct *task, void *priv); | 436 | void (*cancel_fork)(struct task_struct *task, void *priv); |
416 | void (*fork)(struct task_struct *task, void *priv); | 437 | void (*fork)(struct task_struct *task, void *priv); |
417 | void (*exit)(struct cgroup_subsys_state *css, | 438 | void (*exit)(struct task_struct *task); |
418 | struct cgroup_subsys_state *old_css, | 439 | void (*free)(struct task_struct *task); |
419 | struct task_struct *task); | ||
420 | void (*bind)(struct cgroup_subsys_state *root_css); | 440 | void (*bind)(struct cgroup_subsys_state *root_css); |
421 | 441 | ||
422 | int disabled; | ||
423 | int early_init; | 442 | int early_init; |
424 | 443 | ||
425 | /* | 444 | /* |
@@ -473,8 +492,31 @@ struct cgroup_subsys { | |||
473 | unsigned int depends_on; | 492 | unsigned int depends_on; |
474 | }; | 493 | }; |
475 | 494 | ||
476 | void cgroup_threadgroup_change_begin(struct task_struct *tsk); | 495 | extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; |
477 | void cgroup_threadgroup_change_end(struct task_struct *tsk); | 496 | |
497 | /** | ||
498 | * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups | ||
499 | * @tsk: target task | ||
500 | * | ||
501 | * Called from threadgroup_change_begin() and allows cgroup operations to | ||
502 | * synchronize against threadgroup changes using a percpu_rw_semaphore. | ||
503 | */ | ||
504 | static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) | ||
505 | { | ||
506 | percpu_down_read(&cgroup_threadgroup_rwsem); | ||
507 | } | ||
508 | |||
509 | /** | ||
510 | * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups | ||
511 | * @tsk: target task | ||
512 | * | ||
513 | * Called from threadgroup_change_end(). Counterpart of | ||
514 | * cgroup_threadcgroup_change_begin(). | ||
515 | */ | ||
516 | static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) | ||
517 | { | ||
518 | percpu_up_read(&cgroup_threadgroup_rwsem); | ||
519 | } | ||
478 | 520 | ||
479 | #else /* CONFIG_CGROUPS */ | 521 | #else /* CONFIG_CGROUPS */ |
480 | 522 | ||
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index eb7ca55f72ef..22e3754f89c5 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -13,10 +13,10 @@ | |||
13 | #include <linux/nodemask.h> | 13 | #include <linux/nodemask.h> |
14 | #include <linux/rculist.h> | 14 | #include <linux/rculist.h> |
15 | #include <linux/cgroupstats.h> | 15 | #include <linux/cgroupstats.h> |
16 | #include <linux/rwsem.h> | ||
17 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
18 | #include <linux/seq_file.h> | 17 | #include <linux/seq_file.h> |
19 | #include <linux/kernfs.h> | 18 | #include <linux/kernfs.h> |
19 | #include <linux/jump_label.h> | ||
20 | 20 | ||
21 | #include <linux/cgroup-defs.h> | 21 | #include <linux/cgroup-defs.h> |
22 | 22 | ||
@@ -41,6 +41,10 @@ struct css_task_iter { | |||
41 | struct list_head *task_pos; | 41 | struct list_head *task_pos; |
42 | struct list_head *tasks_head; | 42 | struct list_head *tasks_head; |
43 | struct list_head *mg_tasks_head; | 43 | struct list_head *mg_tasks_head; |
44 | |||
45 | struct css_set *cur_cset; | ||
46 | struct task_struct *cur_task; | ||
47 | struct list_head iters_node; /* css_set->task_iters */ | ||
44 | }; | 48 | }; |
45 | 49 | ||
46 | extern struct cgroup_root cgrp_dfl_root; | 50 | extern struct cgroup_root cgrp_dfl_root; |
@@ -50,6 +54,26 @@ extern struct css_set init_css_set; | |||
50 | #include <linux/cgroup_subsys.h> | 54 | #include <linux/cgroup_subsys.h> |
51 | #undef SUBSYS | 55 | #undef SUBSYS |
52 | 56 | ||
57 | #define SUBSYS(_x) \ | ||
58 | extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \ | ||
59 | extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key; | ||
60 | #include <linux/cgroup_subsys.h> | ||
61 | #undef SUBSYS | ||
62 | |||
63 | /** | ||
64 | * cgroup_subsys_enabled - fast test on whether a subsys is enabled | ||
65 | * @ss: subsystem in question | ||
66 | */ | ||
67 | #define cgroup_subsys_enabled(ss) \ | ||
68 | static_branch_likely(&ss ## _enabled_key) | ||
69 | |||
70 | /** | ||
71 | * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy | ||
72 | * @ss: subsystem in question | ||
73 | */ | ||
74 | #define cgroup_subsys_on_dfl(ss) \ | ||
75 | static_branch_likely(&ss ## _on_dfl_key) | ||
76 | |||
53 | bool css_has_online_children(struct cgroup_subsys_state *css); | 77 | bool css_has_online_children(struct cgroup_subsys_state *css); |
54 | struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); | 78 | struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); |
55 | struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, | 79 | struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, |
@@ -78,6 +102,7 @@ extern void cgroup_cancel_fork(struct task_struct *p, | |||
78 | extern void cgroup_post_fork(struct task_struct *p, | 102 | extern void cgroup_post_fork(struct task_struct *p, |
79 | void *old_ss_priv[CGROUP_CANFORK_COUNT]); | 103 | void *old_ss_priv[CGROUP_CANFORK_COUNT]); |
80 | void cgroup_exit(struct task_struct *p); | 104 | void cgroup_exit(struct task_struct *p); |
105 | void cgroup_free(struct task_struct *p); | ||
81 | 106 | ||
82 | int cgroup_init_early(void); | 107 | int cgroup_init_early(void); |
83 | int cgroup_init(void); | 108 | int cgroup_init(void); |
@@ -211,11 +236,33 @@ void css_task_iter_end(struct css_task_iter *it); | |||
211 | * cgroup_taskset_for_each - iterate cgroup_taskset | 236 | * cgroup_taskset_for_each - iterate cgroup_taskset |
212 | * @task: the loop cursor | 237 | * @task: the loop cursor |
213 | * @tset: taskset to iterate | 238 | * @tset: taskset to iterate |
239 | * | ||
240 | * @tset may contain multiple tasks and they may belong to multiple | ||
241 | * processes. When there are multiple tasks in @tset, if a task of a | ||
242 | * process is in @tset, all tasks of the process are in @tset. Also, all | ||
243 | * are guaranteed to share the same source and destination csses. | ||
244 | * | ||
245 | * Iteration is not in any specific order. | ||
214 | */ | 246 | */ |
215 | #define cgroup_taskset_for_each(task, tset) \ | 247 | #define cgroup_taskset_for_each(task, tset) \ |
216 | for ((task) = cgroup_taskset_first((tset)); (task); \ | 248 | for ((task) = cgroup_taskset_first((tset)); (task); \ |
217 | (task) = cgroup_taskset_next((tset))) | 249 | (task) = cgroup_taskset_next((tset))) |
218 | 250 | ||
251 | /** | ||
252 | * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset | ||
253 | * @leader: the loop cursor | ||
254 | * @tset: takset to iterate | ||
255 | * | ||
256 | * Iterate threadgroup leaders of @tset. For single-task migrations, @tset | ||
257 | * may not contain any. | ||
258 | */ | ||
259 | #define cgroup_taskset_for_each_leader(leader, tset) \ | ||
260 | for ((leader) = cgroup_taskset_first((tset)); (leader); \ | ||
261 | (leader) = cgroup_taskset_next((tset))) \ | ||
262 | if ((leader) != (leader)->group_leader) \ | ||
263 | ; \ | ||
264 | else | ||
265 | |||
219 | /* | 266 | /* |
220 | * Inline functions. | 267 | * Inline functions. |
221 | */ | 268 | */ |
@@ -320,11 +367,11 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) | |||
320 | */ | 367 | */ |
321 | #ifdef CONFIG_PROVE_RCU | 368 | #ifdef CONFIG_PROVE_RCU |
322 | extern struct mutex cgroup_mutex; | 369 | extern struct mutex cgroup_mutex; |
323 | extern struct rw_semaphore css_set_rwsem; | 370 | extern spinlock_t css_set_lock; |
324 | #define task_css_set_check(task, __c) \ | 371 | #define task_css_set_check(task, __c) \ |
325 | rcu_dereference_check((task)->cgroups, \ | 372 | rcu_dereference_check((task)->cgroups, \ |
326 | lockdep_is_held(&cgroup_mutex) || \ | 373 | lockdep_is_held(&cgroup_mutex) || \ |
327 | lockdep_is_held(&css_set_rwsem) || \ | 374 | lockdep_is_held(&css_set_lock) || \ |
328 | ((task)->flags & PF_EXITING) || (__c)) | 375 | ((task)->flags & PF_EXITING) || (__c)) |
329 | #else | 376 | #else |
330 | #define task_css_set_check(task, __c) \ | 377 | #define task_css_set_check(task, __c) \ |
@@ -412,68 +459,10 @@ static inline struct cgroup *task_cgroup(struct task_struct *task, | |||
412 | return task_css(task, subsys_id)->cgroup; | 459 | return task_css(task, subsys_id)->cgroup; |
413 | } | 460 | } |
414 | 461 | ||
415 | /** | ||
416 | * cgroup_on_dfl - test whether a cgroup is on the default hierarchy | ||
417 | * @cgrp: the cgroup of interest | ||
418 | * | ||
419 | * The default hierarchy is the v2 interface of cgroup and this function | ||
420 | * can be used to test whether a cgroup is on the default hierarchy for | ||
421 | * cases where a subsystem should behave differnetly depending on the | ||
422 | * interface version. | ||
423 | * | ||
424 | * The set of behaviors which change on the default hierarchy are still | ||
425 | * being determined and the mount option is prefixed with __DEVEL__. | ||
426 | * | ||
427 | * List of changed behaviors: | ||
428 | * | ||
429 | * - Mount options "noprefix", "xattr", "clone_children", "release_agent" | ||
430 | * and "name" are disallowed. | ||
431 | * | ||
432 | * - When mounting an existing superblock, mount options should match. | ||
433 | * | ||
434 | * - Remount is disallowed. | ||
435 | * | ||
436 | * - rename(2) is disallowed. | ||
437 | * | ||
438 | * - "tasks" is removed. Everything should be at process granularity. Use | ||
439 | * "cgroup.procs" instead. | ||
440 | * | ||
441 | * - "cgroup.procs" is not sorted. pids will be unique unless they got | ||
442 | * recycled inbetween reads. | ||
443 | * | ||
444 | * - "release_agent" and "notify_on_release" are removed. Replacement | ||
445 | * notification mechanism will be implemented. | ||
446 | * | ||
447 | * - "cgroup.clone_children" is removed. | ||
448 | * | ||
449 | * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup | ||
450 | * and its descendants contain no task; otherwise, 1. The file also | ||
451 | * generates kernfs notification which can be monitored through poll and | ||
452 | * [di]notify when the value of the file changes. | ||
453 | * | ||
454 | * - cpuset: tasks will be kept in empty cpusets when hotplug happens and | ||
455 | * take masks of ancestors with non-empty cpus/mems, instead of being | ||
456 | * moved to an ancestor. | ||
457 | * | ||
458 | * - cpuset: a task can be moved into an empty cpuset, and again it takes | ||
459 | * masks of ancestors. | ||
460 | * | ||
461 | * - memcg: use_hierarchy is on by default and the cgroup file for the flag | ||
462 | * is not created. | ||
463 | * | ||
464 | * - blkcg: blk-throttle becomes properly hierarchical. | ||
465 | * | ||
466 | * - debug: disallowed on the default hierarchy. | ||
467 | */ | ||
468 | static inline bool cgroup_on_dfl(const struct cgroup *cgrp) | ||
469 | { | ||
470 | return cgrp->root == &cgrp_dfl_root; | ||
471 | } | ||
472 | |||
473 | /* no synchronization, the result can only be used as a hint */ | 462 | /* no synchronization, the result can only be used as a hint */ |
474 | static inline bool cgroup_has_tasks(struct cgroup *cgrp) | 463 | static inline bool cgroup_is_populated(struct cgroup *cgrp) |
475 | { | 464 | { |
476 | return !list_empty(&cgrp->cset_links); | 465 | return cgrp->populated_cnt; |
477 | } | 466 | } |
478 | 467 | ||
479 | /* returns ino associated with a cgroup */ | 468 | /* returns ino associated with a cgroup */ |
@@ -527,6 +516,19 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) | |||
527 | pr_cont_kernfs_path(cgrp->kn); | 516 | pr_cont_kernfs_path(cgrp->kn); |
528 | } | 517 | } |
529 | 518 | ||
519 | /** | ||
520 | * cgroup_file_notify - generate a file modified event for a cgroup_file | ||
521 | * @cfile: target cgroup_file | ||
522 | * | ||
523 | * @cfile must have been obtained by setting cftype->file_offset. | ||
524 | */ | ||
525 | static inline void cgroup_file_notify(struct cgroup_file *cfile) | ||
526 | { | ||
527 | /* might not have been created due to one of the CFTYPE selector flags */ | ||
528 | if (cfile->kn) | ||
529 | kernfs_notify(cfile->kn); | ||
530 | } | ||
531 | |||
530 | #else /* !CONFIG_CGROUPS */ | 532 | #else /* !CONFIG_CGROUPS */ |
531 | 533 | ||
532 | struct cgroup_subsys_state; | 534 | struct cgroup_subsys_state; |
@@ -546,6 +548,7 @@ static inline void cgroup_cancel_fork(struct task_struct *p, | |||
546 | static inline void cgroup_post_fork(struct task_struct *p, | 548 | static inline void cgroup_post_fork(struct task_struct *p, |
547 | void *ss_priv[CGROUP_CANFORK_COUNT]) {} | 549 | void *ss_priv[CGROUP_CANFORK_COUNT]) {} |
548 | static inline void cgroup_exit(struct task_struct *p) {} | 550 | static inline void cgroup_exit(struct task_struct *p) {} |
551 | static inline void cgroup_free(struct task_struct *p) {} | ||
549 | 552 | ||
550 | static inline int cgroup_init_early(void) { return 0; } | 553 | static inline int cgroup_init_early(void) { return 0; } |
551 | static inline int cgroup_init(void) { return 0; } | 554 | static inline int cgroup_init(void) { return 0; } |
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 3ecc07d0da77..c56988ac63f7 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h | |||
@@ -500,13 +500,14 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name, | |||
500 | * | 500 | * |
501 | * Clock with adjustable fractional divider affecting its output frequency. | 501 | * Clock with adjustable fractional divider affecting its output frequency. |
502 | */ | 502 | */ |
503 | |||
504 | struct clk_fractional_divider { | 503 | struct clk_fractional_divider { |
505 | struct clk_hw hw; | 504 | struct clk_hw hw; |
506 | void __iomem *reg; | 505 | void __iomem *reg; |
507 | u8 mshift; | 506 | u8 mshift; |
507 | u8 mwidth; | ||
508 | u32 mmask; | 508 | u32 mmask; |
509 | u8 nshift; | 509 | u8 nshift; |
510 | u8 nwidth; | ||
510 | u32 nmask; | 511 | u32 nmask; |
511 | u8 flags; | 512 | u8 flags; |
512 | spinlock_t *lock; | 513 | spinlock_t *lock; |
@@ -518,6 +519,41 @@ struct clk *clk_register_fractional_divider(struct device *dev, | |||
518 | void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, | 519 | void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, |
519 | u8 clk_divider_flags, spinlock_t *lock); | 520 | u8 clk_divider_flags, spinlock_t *lock); |
520 | 521 | ||
522 | /** | ||
523 | * struct clk_multiplier - adjustable multiplier clock | ||
524 | * | ||
525 | * @hw: handle between common and hardware-specific interfaces | ||
526 | * @reg: register containing the multiplier | ||
527 | * @shift: shift to the multiplier bit field | ||
528 | * @width: width of the multiplier bit field | ||
529 | * @lock: register lock | ||
530 | * | ||
531 | * Clock with an adjustable multiplier affecting its output frequency. | ||
532 | * Implements .recalc_rate, .set_rate and .round_rate | ||
533 | * | ||
534 | * Flags: | ||
535 | * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read | ||
536 | * from the register, with 0 being a valid value effectively | ||
537 | * zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is | ||
538 | * set, then a null multiplier will be considered as a bypass, | ||
539 | * leaving the parent rate unmodified. | ||
540 | * CLK_MULTIPLIER_ROUND_CLOSEST - Makes the best calculated divider to be | ||
541 | * rounded to the closest integer instead of the down one. | ||
542 | */ | ||
543 | struct clk_multiplier { | ||
544 | struct clk_hw hw; | ||
545 | void __iomem *reg; | ||
546 | u8 shift; | ||
547 | u8 width; | ||
548 | u8 flags; | ||
549 | spinlock_t *lock; | ||
550 | }; | ||
551 | |||
552 | #define CLK_MULTIPLIER_ZERO_BYPASS BIT(0) | ||
553 | #define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1) | ||
554 | |||
555 | extern const struct clk_ops clk_multiplier_ops; | ||
556 | |||
521 | /*** | 557 | /*** |
522 | * struct clk_composite - aggregate clock of mux, divider and gate clocks | 558 | * struct clk_composite - aggregate clock of mux, divider and gate clocks |
523 | * | 559 | * |
@@ -606,7 +642,7 @@ void clk_unregister(struct clk *clk); | |||
606 | void devm_clk_unregister(struct device *dev, struct clk *clk); | 642 | void devm_clk_unregister(struct device *dev, struct clk *clk); |
607 | 643 | ||
608 | /* helper functions */ | 644 | /* helper functions */ |
609 | const char *__clk_get_name(struct clk *clk); | 645 | const char *__clk_get_name(const struct clk *clk); |
610 | const char *clk_hw_get_name(const struct clk_hw *hw); | 646 | const char *clk_hw_get_name(const struct clk_hw *hw); |
611 | struct clk_hw *__clk_get_hw(struct clk *clk); | 647 | struct clk_hw *__clk_get_hw(struct clk *clk); |
612 | unsigned int clk_hw_get_num_parents(const struct clk_hw *hw); | 648 | unsigned int clk_hw_get_num_parents(const struct clk_hw *hw); |
@@ -618,6 +654,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw); | |||
618 | unsigned long __clk_get_flags(struct clk *clk); | 654 | unsigned long __clk_get_flags(struct clk *clk); |
619 | unsigned long clk_hw_get_flags(const struct clk_hw *hw); | 655 | unsigned long clk_hw_get_flags(const struct clk_hw *hw); |
620 | bool clk_hw_is_prepared(const struct clk_hw *hw); | 656 | bool clk_hw_is_prepared(const struct clk_hw *hw); |
657 | bool clk_hw_is_enabled(const struct clk_hw *hw); | ||
621 | bool __clk_is_enabled(struct clk *clk); | 658 | bool __clk_is_enabled(struct clk *clk); |
622 | struct clk *__clk_lookup(const char *name); | 659 | struct clk *__clk_lookup(const char *name); |
623 | int __clk_mux_determine_rate(struct clk_hw *hw, | 660 | int __clk_mux_determine_rate(struct clk_hw *hw, |
@@ -690,6 +727,15 @@ static inline struct clk *of_clk_src_onecell_get( | |||
690 | { | 727 | { |
691 | return ERR_PTR(-ENOENT); | 728 | return ERR_PTR(-ENOENT); |
692 | } | 729 | } |
730 | static inline int of_clk_get_parent_count(struct device_node *np) | ||
731 | { | ||
732 | return 0; | ||
733 | } | ||
734 | static inline int of_clk_parent_fill(struct device_node *np, | ||
735 | const char **parents, unsigned int size) | ||
736 | { | ||
737 | return 0; | ||
738 | } | ||
693 | static inline const char *of_clk_get_parent_name(struct device_node *np, | 739 | static inline const char *of_clk_get_parent_name(struct device_node *np, |
694 | int index) | 740 | int index) |
695 | { | 741 | { |
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index 7669f7618f39..1e6932222e11 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h | |||
@@ -164,6 +164,7 @@ extern void __iomem *at91_pmc_base; | |||
164 | #define AT91_PMC_MOSCSELS (1 << 16) /* Main Oscillator Selection [some SAM9] */ | 164 | #define AT91_PMC_MOSCSELS (1 << 16) /* Main Oscillator Selection [some SAM9] */ |
165 | #define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */ | 165 | #define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */ |
166 | #define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */ | 166 | #define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */ |
167 | #define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */ | ||
167 | #define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */ | 168 | #define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */ |
168 | 169 | ||
169 | #define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */ | 170 | #define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */ |
@@ -182,13 +183,18 @@ extern void __iomem *at91_pmc_base; | |||
182 | #define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */ | 183 | #define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */ |
183 | 184 | ||
184 | #define AT91_PMC_PCR 0x10c /* Peripheral Control Register [some SAM9 and SAMA5] */ | 185 | #define AT91_PMC_PCR 0x10c /* Peripheral Control Register [some SAM9 and SAMA5] */ |
185 | #define AT91_PMC_PCR_PID (0x3f << 0) /* Peripheral ID */ | 186 | #define AT91_PMC_PCR_PID_MASK 0x3f |
186 | #define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */ | 187 | #define AT91_PMC_PCR_GCKCSS_OFFSET 8 |
187 | #define AT91_PMC_PCR_DIV(n) ((n) << 16) /* Divisor Value */ | 188 | #define AT91_PMC_PCR_GCKCSS_MASK (0x7 << AT91_PMC_PCR_GCKCSS_OFFSET) |
188 | #define AT91_PMC_PCR_DIV0 0x0 /* Peripheral clock is MCK */ | 189 | #define AT91_PMC_PCR_GCKCSS(n) ((n) << AT91_PMC_PCR_GCKCSS_OFFSET) /* GCK Clock Source Selection */ |
189 | #define AT91_PMC_PCR_DIV2 0x1 /* Peripheral clock is MCK/2 */ | 190 | #define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */ |
190 | #define AT91_PMC_PCR_DIV4 0x2 /* Peripheral clock is MCK/4 */ | 191 | #define AT91_PMC_PCR_DIV_OFFSET 16 |
191 | #define AT91_PMC_PCR_DIV8 0x3 /* Peripheral clock is MCK/8 */ | 192 | #define AT91_PMC_PCR_DIV_MASK (0x3 << AT91_PMC_PCR_DIV_OFFSET) |
192 | #define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */ | 193 | #define AT91_PMC_PCR_DIV(n) ((n) << AT91_PMC_PCR_DIV_OFFSET) /* Divisor Value */ |
194 | #define AT91_PMC_PCR_GCKDIV_OFFSET 20 | ||
195 | #define AT91_PMC_PCR_GCKDIV_MASK (0xff << AT91_PMC_PCR_GCKDIV_OFFSET) | ||
196 | #define AT91_PMC_PCR_GCKDIV(n) ((n) << AT91_PMC_PCR_GCKDIV_OFFSET) /* Generated Clock Divisor Value */ | ||
197 | #define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */ | ||
198 | #define AT91_PMC_PCR_GCKEN (0x1 << 29) /* GCK Enable */ | ||
193 | 199 | ||
194 | #endif | 200 | #endif |
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 278dd279a7a8..7784b597e959 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
@@ -246,16 +246,13 @@ extern int clocksource_i8253_init(void); | |||
246 | #define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ | 246 | #define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ |
247 | OF_DECLARE_1(clksrc, name, compat, fn) | 247 | OF_DECLARE_1(clksrc, name, compat, fn) |
248 | 248 | ||
249 | #ifdef CONFIG_CLKSRC_OF | 249 | #ifdef CONFIG_CLKSRC_PROBE |
250 | extern void clocksource_of_init(void); | 250 | extern void clocksource_probe(void); |
251 | #else | 251 | #else |
252 | static inline void clocksource_of_init(void) {} | 252 | static inline void clocksource_probe(void) {} |
253 | #endif | 253 | #endif |
254 | 254 | ||
255 | #ifdef CONFIG_ACPI | 255 | #define CLOCKSOURCE_ACPI_DECLARE(name, table_id, fn) \ |
256 | void acpi_generic_timer_init(void); | 256 | ACPI_DECLARE_PROBE_ENTRY(clksrc, name, table_id, 0, NULL, 0, fn) |
257 | #else | ||
258 | static inline void acpi_generic_timer_init(void) { } | ||
259 | #endif | ||
260 | 257 | ||
261 | #endif /* _LINUX_CLOCKSOURCE_H */ | 258 | #endif /* _LINUX_CLOCKSOURCE_H */ |
diff --git a/include/linux/cma.h b/include/linux/cma.h index f7ef093ec49a..29f9e774ab76 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h | |||
@@ -26,6 +26,6 @@ extern int __init cma_declare_contiguous(phys_addr_t base, | |||
26 | extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, | 26 | extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
27 | unsigned int order_per_bit, | 27 | unsigned int order_per_bit, |
28 | struct cma **res_cma); | 28 | struct cma **res_cma); |
29 | extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align); | 29 | extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align); |
30 | extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); | 30 | extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); |
31 | #endif | 31 | #endif |
diff --git a/include/linux/com20020.h b/include/linux/com20020.h deleted file mode 100644 index 85898995b234..000000000000 --- a/include/linux/com20020.h +++ /dev/null | |||
@@ -1,145 +0,0 @@ | |||
1 | /* | ||
2 | * Linux ARCnet driver - COM20020 chipset support - function declarations | ||
3 | * | ||
4 | * Written 1997 by David Woodhouse. | ||
5 | * Written 1994-1999 by Avery Pennarun. | ||
6 | * Derived from skeleton.c by Donald Becker. | ||
7 | * | ||
8 | * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com) | ||
9 | * for sponsoring the further development of this driver. | ||
10 | * | ||
11 | * ********************** | ||
12 | * | ||
13 | * The original copyright of skeleton.c was as follows: | ||
14 | * | ||
15 | * skeleton.c Written 1993 by Donald Becker. | ||
16 | * Copyright 1993 United States Government as represented by the | ||
17 | * Director, National Security Agency. This software may only be used | ||
18 | * and distributed according to the terms of the GNU General Public License as | ||
19 | * modified by SRC, incorporated herein by reference. | ||
20 | * | ||
21 | * ********************** | ||
22 | * | ||
23 | * For more details, see drivers/net/arcnet.c | ||
24 | * | ||
25 | * ********************** | ||
26 | */ | ||
27 | #ifndef __COM20020_H | ||
28 | #define __COM20020_H | ||
29 | |||
30 | int com20020_check(struct net_device *dev); | ||
31 | int com20020_found(struct net_device *dev, int shared); | ||
32 | extern const struct net_device_ops com20020_netdev_ops; | ||
33 | |||
34 | /* The number of low I/O ports used by the card. */ | ||
35 | #define ARCNET_TOTAL_SIZE 8 | ||
36 | |||
37 | /* various register addresses */ | ||
38 | #ifdef CONFIG_SA1100_CT6001 | ||
39 | #define BUS_ALIGN 2 /* 8 bit device on a 16 bit bus - needs padding */ | ||
40 | #else | ||
41 | #define BUS_ALIGN 1 | ||
42 | #endif | ||
43 | |||
44 | #define PLX_PCI_MAX_CARDS 2 | ||
45 | |||
46 | struct com20020_pci_channel_map { | ||
47 | u32 bar; | ||
48 | u32 offset; | ||
49 | u32 size; /* 0x00 - auto, e.g. length of entire bar */ | ||
50 | }; | ||
51 | |||
52 | struct com20020_pci_card_info { | ||
53 | const char *name; | ||
54 | int devcount; | ||
55 | |||
56 | struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS]; | ||
57 | |||
58 | unsigned int flags; | ||
59 | }; | ||
60 | |||
61 | struct com20020_priv { | ||
62 | struct com20020_pci_card_info *ci; | ||
63 | struct list_head list_dev; | ||
64 | }; | ||
65 | |||
66 | struct com20020_dev { | ||
67 | struct list_head list; | ||
68 | struct net_device *dev; | ||
69 | |||
70 | struct com20020_priv *pci_priv; | ||
71 | int index; | ||
72 | }; | ||
73 | |||
74 | #define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */ | ||
75 | #define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */ | ||
76 | #define _COMMAND (ioaddr+BUS_ALIGN*1) /* standard arcnet commands */ | ||
77 | #define _DIAGSTAT (ioaddr+BUS_ALIGN*1) /* diagnostic status register */ | ||
78 | #define _ADDR_HI (ioaddr+BUS_ALIGN*2) /* control registers for IO-mapped memory */ | ||
79 | #define _ADDR_LO (ioaddr+BUS_ALIGN*3) | ||
80 | #define _MEMDATA (ioaddr+BUS_ALIGN*4) /* data port for IO-mapped memory */ | ||
81 | #define _SUBADR (ioaddr+BUS_ALIGN*5) /* the extended port _XREG refers to */ | ||
82 | #define _CONFIG (ioaddr+BUS_ALIGN*6) /* configuration register */ | ||
83 | #define _XREG (ioaddr+BUS_ALIGN*7) /* extra registers (indexed by _CONFIG | ||
84 | or _SUBADR) */ | ||
85 | |||
86 | /* in the ADDR_HI register */ | ||
87 | #define RDDATAflag 0x80 /* next access is a read (not a write) */ | ||
88 | |||
89 | /* in the DIAGSTAT register */ | ||
90 | #define NEWNXTIDflag 0x02 /* ID to which token is passed has changed */ | ||
91 | |||
92 | /* in the CONFIG register */ | ||
93 | #define RESETcfg 0x80 /* put card in reset state */ | ||
94 | #define TXENcfg 0x20 /* enable TX */ | ||
95 | |||
96 | /* in SETUP register */ | ||
97 | #define PROMISCset 0x10 /* enable RCV_ALL */ | ||
98 | #define P1MODE 0x80 /* enable P1-MODE for Backplane */ | ||
99 | #define SLOWARB 0x01 /* enable Slow Arbitration for >=5Mbps */ | ||
100 | |||
101 | /* COM2002x */ | ||
102 | #define SUB_TENTATIVE 0 /* tentative node ID */ | ||
103 | #define SUB_NODE 1 /* node ID */ | ||
104 | #define SUB_SETUP1 2 /* various options */ | ||
105 | #define SUB_TEST 3 /* test/diag register */ | ||
106 | |||
107 | /* COM20022 only */ | ||
108 | #define SUB_SETUP2 4 /* sundry options */ | ||
109 | #define SUB_BUSCTL 5 /* bus control options */ | ||
110 | #define SUB_DMACOUNT 6 /* DMA count options */ | ||
111 | |||
112 | #define SET_SUBADR(x) do { \ | ||
113 | if ((x) < 4) \ | ||
114 | { \ | ||
115 | lp->config = (lp->config & ~0x03) | (x); \ | ||
116 | SETCONF; \ | ||
117 | } \ | ||
118 | else \ | ||
119 | { \ | ||
120 | outb(x, _SUBADR); \ | ||
121 | } \ | ||
122 | } while (0) | ||
123 | |||
124 | #undef ARCRESET | ||
125 | #undef ASTATUS | ||
126 | #undef ACOMMAND | ||
127 | #undef AINTMASK | ||
128 | |||
129 | #define ARCRESET { outb(lp->config | 0x80, _CONFIG); \ | ||
130 | udelay(5); \ | ||
131 | outb(lp->config , _CONFIG); \ | ||
132 | } | ||
133 | #define ARCRESET0 { outb(0x18 | 0x80, _CONFIG); \ | ||
134 | udelay(5); \ | ||
135 | outb(0x18 , _CONFIG); \ | ||
136 | } | ||
137 | |||
138 | #define ASTATUS() inb(_STATUS) | ||
139 | #define ADIAGSTATUS() inb(_DIAGSTAT) | ||
140 | #define ACOMMAND(cmd) outb((cmd),_COMMAND) | ||
141 | #define AINTMASK(msk) outb((msk),_INTMASK) | ||
142 | |||
143 | #define SETCONF outb(lp->config, _CONFIG) | ||
144 | |||
145 | #endif /* __COM20020_H */ | ||
diff --git a/include/linux/compaction.h b/include/linux/compaction.h index aa8f61cf3a19..4cd4ddf64cc7 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h | |||
@@ -15,7 +15,8 @@ | |||
15 | /* For more detailed tracepoint output */ | 15 | /* For more detailed tracepoint output */ |
16 | #define COMPACT_NO_SUITABLE_PAGE 5 | 16 | #define COMPACT_NO_SUITABLE_PAGE 5 |
17 | #define COMPACT_NOT_SUITABLE_ZONE 6 | 17 | #define COMPACT_NOT_SUITABLE_ZONE 6 |
18 | /* When adding new state, please change compaction_status_string, too */ | 18 | #define COMPACT_CONTENDED 7 |
19 | /* When adding new states, please adjust include/trace/events/compaction.h */ | ||
19 | 20 | ||
20 | /* Used to signal whether compaction detected need_sched() or lock contention */ | 21 | /* Used to signal whether compaction detected need_sched() or lock contention */ |
21 | /* No contention detected */ | 22 | /* No contention detected */ |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index dfaa7b3e9ae9..22ab246feed3 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
@@ -205,11 +205,31 @@ | |||
205 | 205 | ||
206 | #if GCC_VERSION >= 40600 | 206 | #if GCC_VERSION >= 40600 |
207 | /* | 207 | /* |
208 | * Tell the optimizer that something else uses this function or variable. | 208 | * When used with Link Time Optimization, gcc can optimize away C functions or |
209 | * variables which are referenced only from assembly code. __visible tells the | ||
210 | * optimizer that something else uses this function or variable, thus preventing | ||
211 | * this. | ||
209 | */ | 212 | */ |
210 | #define __visible __attribute__((externally_visible)) | 213 | #define __visible __attribute__((externally_visible)) |
211 | #endif | 214 | #endif |
212 | 215 | ||
216 | |||
217 | #if GCC_VERSION >= 40900 && !defined(__CHECKER__) | ||
218 | /* | ||
219 | * __assume_aligned(n, k): Tell the optimizer that the returned | ||
220 | * pointer can be assumed to be k modulo n. The second argument is | ||
221 | * optional (default 0), so we use a variadic macro to make the | ||
222 | * shorthand. | ||
223 | * | ||
224 | * Beware: Do not apply this to functions which may return | ||
225 | * ERR_PTRs. Also, it is probably unwise to apply it to functions | ||
226 | * returning extra information in the low bits (but in that case the | ||
227 | * compiler should see some alignment anyway, when the return value is | ||
228 | * massaged by 'flags = ptr & 3; ptr &= ~3;'). | ||
229 | */ | ||
230 | #define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) | ||
231 | #endif | ||
232 | |||
213 | /* | 233 | /* |
214 | * GCC 'asm goto' miscompiles certain code sequences: | 234 | * GCC 'asm goto' miscompiles certain code sequences: |
215 | * | 235 | * |
@@ -237,12 +257,25 @@ | |||
237 | #define KASAN_ABI_VERSION 3 | 257 | #define KASAN_ABI_VERSION 3 |
238 | #endif | 258 | #endif |
239 | 259 | ||
260 | #if GCC_VERSION >= 40902 | ||
261 | /* | ||
262 | * Tell the compiler that address safety instrumentation (KASAN) | ||
263 | * should not be applied to that function. | ||
264 | * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 | ||
265 | */ | ||
266 | #define __no_sanitize_address __attribute__((no_sanitize_address)) | ||
267 | #endif | ||
268 | |||
240 | #endif /* gcc version >= 40000 specific checks */ | 269 | #endif /* gcc version >= 40000 specific checks */ |
241 | 270 | ||
242 | #if !defined(__noclone) | 271 | #if !defined(__noclone) |
243 | #define __noclone /* not needed */ | 272 | #define __noclone /* not needed */ |
244 | #endif | 273 | #endif |
245 | 274 | ||
275 | #if !defined(__no_sanitize_address) | ||
276 | #define __no_sanitize_address | ||
277 | #endif | ||
278 | |||
246 | /* | 279 | /* |
247 | * A trick to suppress uninitialized variable warning without generating any | 280 | * A trick to suppress uninitialized variable warning without generating any |
248 | * code | 281 | * code |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c836eb2dc44d..4dac1036594f 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -56,7 +56,7 @@ extern void __chk_io_ptr(const volatile void __iomem *); | |||
56 | #include <linux/compiler-gcc.h> | 56 | #include <linux/compiler-gcc.h> |
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | #ifdef CC_USING_HOTPATCH | 59 | #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) |
60 | #define notrace __attribute__((hotpatch(0,0))) | 60 | #define notrace __attribute__((hotpatch(0,0))) |
61 | #else | 61 | #else |
62 | #define notrace __attribute__((no_instrument_function)) | 62 | #define notrace __attribute__((no_instrument_function)) |
@@ -198,19 +198,45 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
198 | 198 | ||
199 | #include <uapi/linux/types.h> | 199 | #include <uapi/linux/types.h> |
200 | 200 | ||
201 | static __always_inline void __read_once_size(const volatile void *p, void *res, int size) | 201 | #define __READ_ONCE_SIZE \ |
202 | ({ \ | ||
203 | switch (size) { \ | ||
204 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ | ||
205 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ | ||
206 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ | ||
207 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ | ||
208 | default: \ | ||
209 | barrier(); \ | ||
210 | __builtin_memcpy((void *)res, (const void *)p, size); \ | ||
211 | barrier(); \ | ||
212 | } \ | ||
213 | }) | ||
214 | |||
215 | static __always_inline | ||
216 | void __read_once_size(const volatile void *p, void *res, int size) | ||
202 | { | 217 | { |
203 | switch (size) { | 218 | __READ_ONCE_SIZE; |
204 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; | 219 | } |
205 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; | 220 | |
206 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; | 221 | #ifdef CONFIG_KASAN |
207 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; | 222 | /* |
208 | default: | 223 | * This function is not 'inline' because __no_sanitize_address confilcts |
209 | barrier(); | 224 | * with inlining. Attempt to inline it may cause a build failure. |
210 | __builtin_memcpy((void *)res, (const void *)p, size); | 225 | * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 |
211 | barrier(); | 226 | * '__maybe_unused' allows us to avoid defined-but-not-used warnings. |
212 | } | 227 | */ |
228 | static __no_sanitize_address __maybe_unused | ||
229 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) | ||
230 | { | ||
231 | __READ_ONCE_SIZE; | ||
213 | } | 232 | } |
233 | #else | ||
234 | static __always_inline | ||
235 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) | ||
236 | { | ||
237 | __READ_ONCE_SIZE; | ||
238 | } | ||
239 | #endif | ||
214 | 240 | ||
215 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) | 241 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
216 | { | 242 | { |
@@ -248,8 +274,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
248 | * required ordering. | 274 | * required ordering. |
249 | */ | 275 | */ |
250 | 276 | ||
251 | #define READ_ONCE(x) \ | 277 | #define __READ_ONCE(x, check) \ |
252 | ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) | 278 | ({ \ |
279 | union { typeof(x) __val; char __c[1]; } __u; \ | ||
280 | if (check) \ | ||
281 | __read_once_size(&(x), __u.__c, sizeof(x)); \ | ||
282 | else \ | ||
283 | __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ | ||
284 | __u.__val; \ | ||
285 | }) | ||
286 | #define READ_ONCE(x) __READ_ONCE(x, 1) | ||
287 | |||
288 | /* | ||
289 | * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need | ||
290 | * to hide memory access from KASAN. | ||
291 | */ | ||
292 | #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) | ||
253 | 293 | ||
254 | #define WRITE_ONCE(x, val) \ | 294 | #define WRITE_ONCE(x, val) \ |
255 | ({ \ | 295 | ({ \ |
@@ -259,22 +299,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
259 | __u.__val; \ | 299 | __u.__val; \ |
260 | }) | 300 | }) |
261 | 301 | ||
262 | /** | ||
263 | * READ_ONCE_CTRL - Read a value heading a control dependency | ||
264 | * @x: The value to be read, heading the control dependency | ||
265 | * | ||
266 | * Control dependencies are tricky. See Documentation/memory-barriers.txt | ||
267 | * for important information on how to use them. Note that in many cases, | ||
268 | * use of smp_load_acquire() will be much simpler. Control dependencies | ||
269 | * should be avoided except on the hottest of hotpaths. | ||
270 | */ | ||
271 | #define READ_ONCE_CTRL(x) \ | ||
272 | ({ \ | ||
273 | typeof(x) __val = READ_ONCE(x); \ | ||
274 | smp_read_barrier_depends(); /* Enforce control dependency. */ \ | ||
275 | __val; \ | ||
276 | }) | ||
277 | |||
278 | #endif /* __KERNEL__ */ | 302 | #endif /* __KERNEL__ */ |
279 | 303 | ||
280 | #endif /* __ASSEMBLY__ */ | 304 | #endif /* __ASSEMBLY__ */ |
@@ -393,6 +417,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
393 | #define __visible | 417 | #define __visible |
394 | #endif | 418 | #endif |
395 | 419 | ||
420 | /* | ||
421 | * Assume alignment of return value. | ||
422 | */ | ||
423 | #ifndef __assume_aligned | ||
424 | #define __assume_aligned(a, ...) | ||
425 | #endif | ||
426 | |||
427 | |||
396 | /* Are two types/vars the same type (ignoring qualifiers)? */ | 428 | /* Are two types/vars the same type (ignoring qualifiers)? */ |
397 | #ifndef __same_type | 429 | #ifndef __same_type |
398 | # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) | 430 | # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) |
diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 63a36e89d0eb..758a029011b1 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h | |||
@@ -125,86 +125,33 @@ struct configfs_attribute { | |||
125 | const char *ca_name; | 125 | const char *ca_name; |
126 | struct module *ca_owner; | 126 | struct module *ca_owner; |
127 | umode_t ca_mode; | 127 | umode_t ca_mode; |
128 | ssize_t (*show)(struct config_item *, char *); | ||
129 | ssize_t (*store)(struct config_item *, const char *, size_t); | ||
128 | }; | 130 | }; |
129 | 131 | ||
130 | /* | 132 | #define CONFIGFS_ATTR(_pfx, _name) \ |
131 | * Users often need to create attribute structures for their configurable | 133 | static struct configfs_attribute _pfx##attr_##_name = { \ |
132 | * attributes, containing a configfs_attribute member and function pointers | 134 | .ca_name = __stringify(_name), \ |
133 | * for the show() and store() operations on that attribute. If they don't | 135 | .ca_mode = S_IRUGO | S_IWUSR, \ |
134 | * need anything else on the extended attribute structure, they can use | 136 | .ca_owner = THIS_MODULE, \ |
135 | * this macro to define it The argument _item is the name of the | 137 | .show = _pfx##_name##_show, \ |
136 | * config_item structure. | 138 | .store = _pfx##_name##_store, \ |
137 | */ | ||
138 | #define CONFIGFS_ATTR_STRUCT(_item) \ | ||
139 | struct _item##_attribute { \ | ||
140 | struct configfs_attribute attr; \ | ||
141 | ssize_t (*show)(struct _item *, char *); \ | ||
142 | ssize_t (*store)(struct _item *, const char *, size_t); \ | ||
143 | } | 139 | } |
144 | 140 | ||
145 | /* | 141 | #define CONFIGFS_ATTR_RO(_pfx, _name) \ |
146 | * With the extended attribute structure, users can use this macro | 142 | static struct configfs_attribute _pfx##attr_##_name = { \ |
147 | * (similar to sysfs' __ATTR) to make defining attributes easier. | 143 | .ca_name = __stringify(_name), \ |
148 | * An example: | 144 | .ca_mode = S_IRUGO, \ |
149 | * #define MYITEM_ATTR(_name, _mode, _show, _store) \ | 145 | .ca_owner = THIS_MODULE, \ |
150 | * struct myitem_attribute childless_attr_##_name = \ | 146 | .show = _pfx##_name##_show, \ |
151 | * __CONFIGFS_ATTR(_name, _mode, _show, _store) | ||
152 | */ | ||
153 | #define __CONFIGFS_ATTR(_name, _mode, _show, _store) \ | ||
154 | { \ | ||
155 | .attr = { \ | ||
156 | .ca_name = __stringify(_name), \ | ||
157 | .ca_mode = _mode, \ | ||
158 | .ca_owner = THIS_MODULE, \ | ||
159 | }, \ | ||
160 | .show = _show, \ | ||
161 | .store = _store, \ | ||
162 | } | ||
163 | /* Here is a readonly version, only requiring a show() operation */ | ||
164 | #define __CONFIGFS_ATTR_RO(_name, _show) \ | ||
165 | { \ | ||
166 | .attr = { \ | ||
167 | .ca_name = __stringify(_name), \ | ||
168 | .ca_mode = 0444, \ | ||
169 | .ca_owner = THIS_MODULE, \ | ||
170 | }, \ | ||
171 | .show = _show, \ | ||
172 | } | 147 | } |
173 | 148 | ||
174 | /* | 149 | #define CONFIGFS_ATTR_WO(_pfx, _name) \ |
175 | * With these extended attributes, the simple show_attribute() and | 150 | static struct configfs_attribute _pfx##attr_##_name = { \ |
176 | * store_attribute() operations need to call the show() and store() of the | 151 | .ca_name = __stringify(_name), \ |
177 | * attributes. This is a common pattern, so we provide a macro to define | 152 | .ca_mode = S_IWUSR, \ |
178 | * them. The argument _item is the name of the config_item structure. | 153 | .ca_owner = THIS_MODULE, \ |
179 | * This macro expects the attributes to be named "struct <name>_attribute" | 154 | .store = _pfx##_name##_store, \ |
180 | * and the function to_<name>() to exist; | ||
181 | */ | ||
182 | #define CONFIGFS_ATTR_OPS(_item) \ | ||
183 | static ssize_t _item##_attr_show(struct config_item *item, \ | ||
184 | struct configfs_attribute *attr, \ | ||
185 | char *page) \ | ||
186 | { \ | ||
187 | struct _item *_item = to_##_item(item); \ | ||
188 | struct _item##_attribute *_item##_attr = \ | ||
189 | container_of(attr, struct _item##_attribute, attr); \ | ||
190 | ssize_t ret = 0; \ | ||
191 | \ | ||
192 | if (_item##_attr->show) \ | ||
193 | ret = _item##_attr->show(_item, page); \ | ||
194 | return ret; \ | ||
195 | } \ | ||
196 | static ssize_t _item##_attr_store(struct config_item *item, \ | ||
197 | struct configfs_attribute *attr, \ | ||
198 | const char *page, size_t count) \ | ||
199 | { \ | ||
200 | struct _item *_item = to_##_item(item); \ | ||
201 | struct _item##_attribute *_item##_attr = \ | ||
202 | container_of(attr, struct _item##_attribute, attr); \ | ||
203 | ssize_t ret = -EINVAL; \ | ||
204 | \ | ||
205 | if (_item##_attr->store) \ | ||
206 | ret = _item##_attr->store(_item, page, count); \ | ||
207 | return ret; \ | ||
208 | } | 155 | } |
209 | 156 | ||
210 | /* | 157 | /* |
@@ -223,8 +170,6 @@ static ssize_t _item##_attr_store(struct config_item *item, \ | |||
223 | */ | 170 | */ |
224 | struct configfs_item_operations { | 171 | struct configfs_item_operations { |
225 | void (*release)(struct config_item *); | 172 | void (*release)(struct config_item *); |
226 | ssize_t (*show_attribute)(struct config_item *, struct configfs_attribute *,char *); | ||
227 | ssize_t (*store_attribute)(struct config_item *,struct configfs_attribute *,const char *, size_t); | ||
228 | int (*allow_link)(struct config_item *src, struct config_item *target); | 173 | int (*allow_link)(struct config_item *src, struct config_item *target); |
229 | int (*drop_link)(struct config_item *src, struct config_item *target); | 174 | int (*drop_link)(struct config_item *src, struct config_item *target); |
230 | }; | 175 | }; |
@@ -252,6 +197,16 @@ static inline struct configfs_subsystem *to_configfs_subsystem(struct config_gro | |||
252 | int configfs_register_subsystem(struct configfs_subsystem *subsys); | 197 | int configfs_register_subsystem(struct configfs_subsystem *subsys); |
253 | void configfs_unregister_subsystem(struct configfs_subsystem *subsys); | 198 | void configfs_unregister_subsystem(struct configfs_subsystem *subsys); |
254 | 199 | ||
200 | int configfs_register_group(struct config_group *parent_group, | ||
201 | struct config_group *group); | ||
202 | void configfs_unregister_group(struct config_group *group); | ||
203 | |||
204 | struct config_group * | ||
205 | configfs_register_default_group(struct config_group *parent_group, | ||
206 | const char *name, | ||
207 | struct config_item_type *item_type); | ||
208 | void configfs_unregister_default_group(struct config_group *group); | ||
209 | |||
255 | /* These functions can sleep and can alloc with GFP_KERNEL */ | 210 | /* These functions can sleep and can alloc with GFP_KERNEL */ |
256 | /* WARNING: These cannot be called underneath configfs callbacks!! */ | 211 | /* WARNING: These cannot be called underneath configfs callbacks!! */ |
257 | int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target); | 212 | int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target); |
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 008fc67d0d96..68b575afe5f5 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h | |||
@@ -10,6 +10,10 @@ | |||
10 | #ifdef CONFIG_CONTEXT_TRACKING | 10 | #ifdef CONFIG_CONTEXT_TRACKING |
11 | extern void context_tracking_cpu_set(int cpu); | 11 | extern void context_tracking_cpu_set(int cpu); |
12 | 12 | ||
13 | /* Called with interrupts disabled. */ | ||
14 | extern void __context_tracking_enter(enum ctx_state state); | ||
15 | extern void __context_tracking_exit(enum ctx_state state); | ||
16 | |||
13 | extern void context_tracking_enter(enum ctx_state state); | 17 | extern void context_tracking_enter(enum ctx_state state); |
14 | extern void context_tracking_exit(enum ctx_state state); | 18 | extern void context_tracking_exit(enum ctx_state state); |
15 | extern void context_tracking_user_enter(void); | 19 | extern void context_tracking_user_enter(void); |
@@ -18,13 +22,13 @@ extern void context_tracking_user_exit(void); | |||
18 | static inline void user_enter(void) | 22 | static inline void user_enter(void) |
19 | { | 23 | { |
20 | if (context_tracking_is_enabled()) | 24 | if (context_tracking_is_enabled()) |
21 | context_tracking_user_enter(); | 25 | context_tracking_enter(CONTEXT_USER); |
22 | 26 | ||
23 | } | 27 | } |
24 | static inline void user_exit(void) | 28 | static inline void user_exit(void) |
25 | { | 29 | { |
26 | if (context_tracking_is_enabled()) | 30 | if (context_tracking_is_enabled()) |
27 | context_tracking_user_exit(); | 31 | context_tracking_exit(CONTEXT_USER); |
28 | } | 32 | } |
29 | 33 | ||
30 | static inline enum ctx_state exception_enter(void) | 34 | static inline enum ctx_state exception_enter(void) |
@@ -88,13 +92,13 @@ static inline void guest_enter(void) | |||
88 | current->flags |= PF_VCPU; | 92 | current->flags |= PF_VCPU; |
89 | 93 | ||
90 | if (context_tracking_is_enabled()) | 94 | if (context_tracking_is_enabled()) |
91 | context_tracking_enter(CONTEXT_GUEST); | 95 | __context_tracking_enter(CONTEXT_GUEST); |
92 | } | 96 | } |
93 | 97 | ||
94 | static inline void guest_exit(void) | 98 | static inline void guest_exit(void) |
95 | { | 99 | { |
96 | if (context_tracking_is_enabled()) | 100 | if (context_tracking_is_enabled()) |
97 | context_tracking_exit(CONTEXT_GUEST); | 101 | __context_tracking_exit(CONTEXT_GUEST); |
98 | 102 | ||
99 | if (vtime_accounting_enabled()) | 103 | if (vtime_accounting_enabled()) |
100 | vtime_guest_exit(current); | 104 | vtime_guest_exit(current); |
diff --git a/include/linux/coresight.h b/include/linux/coresight.h index c69e1b932809..a7cabfa23b55 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h | |||
@@ -207,7 +207,7 @@ struct coresight_ops_link { | |||
207 | * Operations available for sources. | 207 | * Operations available for sources. |
208 | * @trace_id: returns the value of the component's trace ID as known | 208 | * @trace_id: returns the value of the component's trace ID as known |
209 | to the HW. | 209 | to the HW. |
210 | * @enable: enables tracing from a source. | 210 | * @enable: enables tracing for a source. |
211 | * @disable: disables tracing for a source. | 211 | * @disable: disables tracing for a source. |
212 | */ | 212 | */ |
213 | struct coresight_ops_source { | 213 | struct coresight_ops_source { |
diff --git a/include/linux/count_zeros.h b/include/linux/count_zeros.h new file mode 100644 index 000000000000..363da78c4f64 --- /dev/null +++ b/include/linux/count_zeros.h | |||
@@ -0,0 +1,57 @@ | |||
1 | /* Count leading and trailing zeros functions | ||
2 | * | ||
3 | * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_BITOPS_COUNT_ZEROS_H_ | ||
13 | #define _LINUX_BITOPS_COUNT_ZEROS_H_ | ||
14 | |||
15 | #include <asm/bitops.h> | ||
16 | |||
17 | /** | ||
18 | * count_leading_zeros - Count the number of zeros from the MSB back | ||
19 | * @x: The value | ||
20 | * | ||
21 | * Count the number of leading zeros from the MSB going towards the LSB in @x. | ||
22 | * | ||
23 | * If the MSB of @x is set, the result is 0. | ||
24 | * If only the LSB of @x is set, then the result is BITS_PER_LONG-1. | ||
25 | * If @x is 0 then the result is COUNT_LEADING_ZEROS_0. | ||
26 | */ | ||
27 | static inline int count_leading_zeros(unsigned long x) | ||
28 | { | ||
29 | if (sizeof(x) == 4) | ||
30 | return BITS_PER_LONG - fls(x); | ||
31 | else | ||
32 | return BITS_PER_LONG - fls64(x); | ||
33 | } | ||
34 | |||
35 | #define COUNT_LEADING_ZEROS_0 BITS_PER_LONG | ||
36 | |||
37 | /** | ||
38 | * count_trailing_zeros - Count the number of zeros from the LSB forwards | ||
39 | * @x: The value | ||
40 | * | ||
41 | * Count the number of trailing zeros from the LSB going towards the MSB in @x. | ||
42 | * | ||
43 | * If the LSB of @x is set, the result is 0. | ||
44 | * If only the MSB of @x is set, then the result is BITS_PER_LONG-1. | ||
45 | * If @x is 0 then the result is COUNT_TRAILING_ZEROS_0. | ||
46 | */ | ||
47 | static inline int count_trailing_zeros(unsigned long x) | ||
48 | { | ||
49 | #define COUNT_TRAILING_ZEROS_0 (-1) | ||
50 | |||
51 | if (sizeof(x) == 4) | ||
52 | return ffs(x); | ||
53 | else | ||
54 | return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0; | ||
55 | } | ||
56 | |||
57 | #endif /* _LINUX_BITOPS_COUNT_ZEROS_H_ */ | ||
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 23c30bdcca86..d2ca8c38f9c4 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -228,7 +228,6 @@ extern struct bus_type cpu_subsys; | |||
228 | extern void cpu_hotplug_begin(void); | 228 | extern void cpu_hotplug_begin(void); |
229 | extern void cpu_hotplug_done(void); | 229 | extern void cpu_hotplug_done(void); |
230 | extern void get_online_cpus(void); | 230 | extern void get_online_cpus(void); |
231 | extern bool try_get_online_cpus(void); | ||
232 | extern void put_online_cpus(void); | 231 | extern void put_online_cpus(void); |
233 | extern void cpu_hotplug_disable(void); | 232 | extern void cpu_hotplug_disable(void); |
234 | extern void cpu_hotplug_enable(void); | 233 | extern void cpu_hotplug_enable(void); |
@@ -246,7 +245,6 @@ int cpu_down(unsigned int cpu); | |||
246 | static inline void cpu_hotplug_begin(void) {} | 245 | static inline void cpu_hotplug_begin(void) {} |
247 | static inline void cpu_hotplug_done(void) {} | 246 | static inline void cpu_hotplug_done(void) {} |
248 | #define get_online_cpus() do { } while (0) | 247 | #define get_online_cpus() do { } while (0) |
249 | #define try_get_online_cpus() true | ||
250 | #define put_online_cpus() do { } while (0) | 248 | #define put_online_cpus() do { } while (0) |
251 | #define cpu_hotplug_disable() do { } while (0) | 249 | #define cpu_hotplug_disable() do { } while (0) |
252 | #define cpu_hotplug_enable() do { } while (0) | 250 | #define cpu_hotplug_enable() do { } while (0) |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index dca22de98d94..ef4c5b1a860f 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -65,7 +65,6 @@ struct cpufreq_policy { | |||
65 | unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs | 65 | unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs |
66 | should set cpufreq */ | 66 | should set cpufreq */ |
67 | unsigned int cpu; /* cpu managing this policy, must be online */ | 67 | unsigned int cpu; /* cpu managing this policy, must be online */ |
68 | unsigned int kobj_cpu; /* cpu managing sysfs files, can be offline */ | ||
69 | 68 | ||
70 | struct clk *clk; | 69 | struct clk *clk; |
71 | struct cpufreq_cpuinfo cpuinfo;/* see above */ | 70 | struct cpufreq_cpuinfo cpuinfo;/* see above */ |
@@ -149,10 +148,6 @@ static inline bool policy_is_shared(struct cpufreq_policy *policy) | |||
149 | 148 | ||
150 | /* /sys/devices/system/cpu/cpufreq: entry point for global variables */ | 149 | /* /sys/devices/system/cpu/cpufreq: entry point for global variables */ |
151 | extern struct kobject *cpufreq_global_kobject; | 150 | extern struct kobject *cpufreq_global_kobject; |
152 | int cpufreq_get_global_kobject(void); | ||
153 | void cpufreq_put_global_kobject(void); | ||
154 | int cpufreq_sysfs_create_file(const struct attribute *attr); | ||
155 | void cpufreq_sysfs_remove_file(const struct attribute *attr); | ||
156 | 151 | ||
157 | #ifdef CONFIG_CPU_FREQ | 152 | #ifdef CONFIG_CPU_FREQ |
158 | unsigned int cpufreq_get(unsigned int cpu); | 153 | unsigned int cpufreq_get(unsigned int cpu); |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 1b357997cac5..85a868ccb493 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -93,7 +93,7 @@ extern int current_cpuset_is_being_rebound(void); | |||
93 | 93 | ||
94 | extern void rebuild_sched_domains(void); | 94 | extern void rebuild_sched_domains(void); |
95 | 95 | ||
96 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); | 96 | extern void cpuset_print_current_mems_allowed(void); |
97 | 97 | ||
98 | /* | 98 | /* |
99 | * read_mems_allowed_begin is required when making decisions involving | 99 | * read_mems_allowed_begin is required when making decisions involving |
@@ -104,6 +104,9 @@ extern void cpuset_print_task_mems_allowed(struct task_struct *p); | |||
104 | */ | 104 | */ |
105 | static inline unsigned int read_mems_allowed_begin(void) | 105 | static inline unsigned int read_mems_allowed_begin(void) |
106 | { | 106 | { |
107 | if (!cpusets_enabled()) | ||
108 | return 0; | ||
109 | |||
107 | return read_seqcount_begin(¤t->mems_allowed_seq); | 110 | return read_seqcount_begin(¤t->mems_allowed_seq); |
108 | } | 111 | } |
109 | 112 | ||
@@ -115,6 +118,9 @@ static inline unsigned int read_mems_allowed_begin(void) | |||
115 | */ | 118 | */ |
116 | static inline bool read_mems_allowed_retry(unsigned int seq) | 119 | static inline bool read_mems_allowed_retry(unsigned int seq) |
117 | { | 120 | { |
121 | if (!cpusets_enabled()) | ||
122 | return false; | ||
123 | |||
118 | return read_seqcount_retry(¤t->mems_allowed_seq, seq); | 124 | return read_seqcount_retry(¤t->mems_allowed_seq, seq); |
119 | } | 125 | } |
120 | 126 | ||
@@ -219,7 +225,7 @@ static inline void rebuild_sched_domains(void) | |||
219 | partition_sched_domains(1, NULL, NULL); | 225 | partition_sched_domains(1, NULL, NULL); |
220 | } | 226 | } |
221 | 227 | ||
222 | static inline void cpuset_print_task_mems_allowed(struct task_struct *p) | 228 | static inline void cpuset_print_current_mems_allowed(void) |
223 | { | 229 | { |
224 | } | 230 | } |
225 | 231 | ||
diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 221025423e6c..61d042bbbf60 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h | |||
@@ -202,16 +202,16 @@ struct dccp_service_list { | |||
202 | #define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1) | 202 | #define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1) |
203 | #define DCCP_SERVICE_CODE_IS_ABSENT 0 | 203 | #define DCCP_SERVICE_CODE_IS_ABSENT 0 |
204 | 204 | ||
205 | static inline int dccp_list_has_service(const struct dccp_service_list *sl, | 205 | static inline bool dccp_list_has_service(const struct dccp_service_list *sl, |
206 | const __be32 service) | 206 | const __be32 service) |
207 | { | 207 | { |
208 | if (likely(sl != NULL)) { | 208 | if (likely(sl != NULL)) { |
209 | u32 i = sl->dccpsl_nr; | 209 | u32 i = sl->dccpsl_nr; |
210 | while (i--) | 210 | while (i--) |
211 | if (sl->dccpsl_list[i] == service) | 211 | if (sl->dccpsl_list[i] == service) |
212 | return 1; | 212 | return true; |
213 | } | 213 | } |
214 | return 0; | 214 | return false; |
215 | } | 215 | } |
216 | 216 | ||
217 | struct dccp_ackvec; | 217 | struct dccp_ackvec; |
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 9beb636b97eb..19c066dce1da 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h | |||
@@ -79,6 +79,8 @@ struct dentry *debugfs_create_u32(const char *name, umode_t mode, | |||
79 | struct dentry *parent, u32 *value); | 79 | struct dentry *parent, u32 *value); |
80 | struct dentry *debugfs_create_u64(const char *name, umode_t mode, | 80 | struct dentry *debugfs_create_u64(const char *name, umode_t mode, |
81 | struct dentry *parent, u64 *value); | 81 | struct dentry *parent, u64 *value); |
82 | struct dentry *debugfs_create_ulong(const char *name, umode_t mode, | ||
83 | struct dentry *parent, unsigned long *value); | ||
82 | struct dentry *debugfs_create_x8(const char *name, umode_t mode, | 84 | struct dentry *debugfs_create_x8(const char *name, umode_t mode, |
83 | struct dentry *parent, u8 *value); | 85 | struct dentry *parent, u8 *value); |
84 | struct dentry *debugfs_create_x16(const char *name, umode_t mode, | 86 | struct dentry *debugfs_create_x16(const char *name, umode_t mode, |
@@ -92,7 +94,7 @@ struct dentry *debugfs_create_size_t(const char *name, umode_t mode, | |||
92 | struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode, | 94 | struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode, |
93 | struct dentry *parent, atomic_t *value); | 95 | struct dentry *parent, atomic_t *value); |
94 | struct dentry *debugfs_create_bool(const char *name, umode_t mode, | 96 | struct dentry *debugfs_create_bool(const char *name, umode_t mode, |
95 | struct dentry *parent, u32 *value); | 97 | struct dentry *parent, bool *value); |
96 | 98 | ||
97 | struct dentry *debugfs_create_blob(const char *name, umode_t mode, | 99 | struct dentry *debugfs_create_blob(const char *name, umode_t mode, |
98 | struct dentry *parent, | 100 | struct dentry *parent, |
@@ -243,7 +245,7 @@ static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t m | |||
243 | 245 | ||
244 | static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode, | 246 | static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode, |
245 | struct dentry *parent, | 247 | struct dentry *parent, |
246 | u32 *value) | 248 | bool *value) |
247 | { | 249 | { |
248 | return ERR_PTR(-ENODEV); | 250 | return ERR_PTR(-ENODEV); |
249 | } | 251 | } |
diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h new file mode 100644 index 000000000000..7adf6cc4b305 --- /dev/null +++ b/include/linux/devfreq_cooling.h | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * devfreq_cooling: Thermal cooling device implementation for devices using | ||
3 | * devfreq | ||
4 | * | ||
5 | * Copyright (C) 2014-2015 ARM Limited | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
12 | * kind, whether express or implied; without even the implied warranty | ||
13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | |||
17 | #ifndef __DEVFREQ_COOLING_H__ | ||
18 | #define __DEVFREQ_COOLING_H__ | ||
19 | |||
20 | #include <linux/devfreq.h> | ||
21 | #include <linux/thermal.h> | ||
22 | |||
23 | #ifdef CONFIG_DEVFREQ_THERMAL | ||
24 | |||
25 | /** | ||
26 | * struct devfreq_cooling_power - Devfreq cooling power ops | ||
27 | * @get_static_power: Take voltage, in mV, and return the static power | ||
28 | * in mW. If NULL, the static power is assumed | ||
29 | * to be 0. | ||
30 | * @get_dynamic_power: Take voltage, in mV, and frequency, in HZ, and | ||
31 | * return the dynamic power draw in mW. If NULL, | ||
32 | * a simple power model is used. | ||
33 | * @dyn_power_coeff: Coefficient for the simple dynamic power model in | ||
34 | * mW/(MHz mV mV). | ||
35 | * If get_dynamic_power() is NULL, then the | ||
36 | * dynamic power is calculated as | ||
37 | * @dyn_power_coeff * frequency * voltage^2 | ||
38 | */ | ||
39 | struct devfreq_cooling_power { | ||
40 | unsigned long (*get_static_power)(unsigned long voltage); | ||
41 | unsigned long (*get_dynamic_power)(unsigned long freq, | ||
42 | unsigned long voltage); | ||
43 | unsigned long dyn_power_coeff; | ||
44 | }; | ||
45 | |||
46 | struct thermal_cooling_device * | ||
47 | of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, | ||
48 | struct devfreq_cooling_power *dfc_power); | ||
49 | struct thermal_cooling_device * | ||
50 | of_devfreq_cooling_register(struct device_node *np, struct devfreq *df); | ||
51 | struct thermal_cooling_device *devfreq_cooling_register(struct devfreq *df); | ||
52 | void devfreq_cooling_unregister(struct thermal_cooling_device *dfc); | ||
53 | |||
54 | #else /* !CONFIG_DEVFREQ_THERMAL */ | ||
55 | |||
56 | struct thermal_cooling_device * | ||
57 | of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, | ||
58 | struct devfreq_cooling_power *dfc_power) | ||
59 | { | ||
60 | return ERR_PTR(-EINVAL); | ||
61 | } | ||
62 | |||
63 | static inline struct thermal_cooling_device * | ||
64 | of_devfreq_cooling_register(struct device_node *np, struct devfreq *df) | ||
65 | { | ||
66 | return ERR_PTR(-EINVAL); | ||
67 | } | ||
68 | |||
69 | static inline struct thermal_cooling_device * | ||
70 | devfreq_cooling_register(struct devfreq *df) | ||
71 | { | ||
72 | return ERR_PTR(-EINVAL); | ||
73 | } | ||
74 | |||
75 | static inline void | ||
76 | devfreq_cooling_unregister(struct thermal_cooling_device *dfc) | ||
77 | { | ||
78 | } | ||
79 | |||
80 | #endif /* CONFIG_DEVFREQ_THERMAL */ | ||
81 | #endif /* __DEVFREQ_COOLING_H__ */ | ||
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 76d23fa8c7d3..ec1c61c87d89 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -79,8 +79,8 @@ typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, | |||
79 | 79 | ||
80 | typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); | 80 | typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); |
81 | 81 | ||
82 | typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, | 82 | typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, |
83 | unsigned long arg); | 83 | struct block_device **bdev, fmode_t *mode); |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * These iteration functions are typically used to check (and combine) | 86 | * These iteration functions are typically used to check (and combine) |
@@ -156,7 +156,7 @@ struct target_type { | |||
156 | dm_resume_fn resume; | 156 | dm_resume_fn resume; |
157 | dm_status_fn status; | 157 | dm_status_fn status; |
158 | dm_message_fn message; | 158 | dm_message_fn message; |
159 | dm_ioctl_fn ioctl; | 159 | dm_prepare_ioctl_fn prepare_ioctl; |
160 | dm_busy_fn busy; | 160 | dm_busy_fn busy; |
161 | dm_iterate_devices_fn iterate_devices; | 161 | dm_iterate_devices_fn iterate_devices; |
162 | dm_io_hints_fn io_hints; | 162 | dm_io_hints_fn io_hints; |
diff --git a/include/linux/device.h b/include/linux/device.h index 5d7bc6349930..b8f411b57dcb 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -604,13 +604,21 @@ typedef void (*dr_release_t)(struct device *dev, void *res); | |||
604 | typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); | 604 | typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); |
605 | 605 | ||
606 | #ifdef CONFIG_DEBUG_DEVRES | 606 | #ifdef CONFIG_DEBUG_DEVRES |
607 | extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp, | 607 | extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, |
608 | const char *name); | 608 | int nid, const char *name); |
609 | #define devres_alloc(release, size, gfp) \ | 609 | #define devres_alloc(release, size, gfp) \ |
610 | __devres_alloc(release, size, gfp, #release) | 610 | __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) |
611 | #define devres_alloc_node(release, size, gfp, nid) \ | ||
612 | __devres_alloc_node(release, size, gfp, nid, #release) | ||
611 | #else | 613 | #else |
612 | extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp); | 614 | extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, |
615 | int nid); | ||
616 | static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) | ||
617 | { | ||
618 | return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); | ||
619 | } | ||
613 | #endif | 620 | #endif |
621 | |||
614 | extern void devres_for_each_res(struct device *dev, dr_release_t release, | 622 | extern void devres_for_each_res(struct device *dev, dr_release_t release, |
615 | dr_match_t match, void *match_data, | 623 | dr_match_t match, void *match_data, |
616 | void (*fn)(struct device *, void *, void *), | 624 | void (*fn)(struct device *, void *, void *), |
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index 569bbd039896..fec734df1524 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h | |||
@@ -111,7 +111,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, | |||
111 | return ret; | 111 | return ret; |
112 | } | 112 | } |
113 | 113 | ||
114 | struct page *dma_alloc_from_contiguous(struct device *dev, int count, | 114 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, |
115 | unsigned int order); | 115 | unsigned int order); |
116 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, | 116 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, |
117 | int count); | 117 | int count); |
@@ -144,7 +144,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size, | |||
144 | } | 144 | } |
145 | 145 | ||
146 | static inline | 146 | static inline |
147 | struct page *dma_alloc_from_contiguous(struct device *dev, int count, | 147 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, |
148 | unsigned int order) | 148 | unsigned int order) |
149 | { | 149 | { |
150 | return NULL; | 150 | return NULL; |
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h new file mode 100644 index 000000000000..fc481037478a --- /dev/null +++ b/include/linux/dma-iommu.h | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014-2015 ARM Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | #ifndef __DMA_IOMMU_H | ||
17 | #define __DMA_IOMMU_H | ||
18 | |||
19 | #ifdef __KERNEL__ | ||
20 | #include <asm/errno.h> | ||
21 | |||
22 | #ifdef CONFIG_IOMMU_DMA | ||
23 | #include <linux/iommu.h> | ||
24 | |||
25 | int iommu_dma_init(void); | ||
26 | |||
27 | /* Domain management interface for IOMMU drivers */ | ||
28 | int iommu_get_dma_cookie(struct iommu_domain *domain); | ||
29 | void iommu_put_dma_cookie(struct iommu_domain *domain); | ||
30 | |||
31 | /* Setup call for arch DMA mapping code */ | ||
32 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size); | ||
33 | |||
34 | /* General helpers for DMA-API <-> IOMMU-API interaction */ | ||
35 | int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); | ||
36 | |||
37 | /* | ||
38 | * These implement the bulk of the relevant DMA mapping callbacks, but require | ||
39 | * the arch code to take care of attributes and cache maintenance | ||
40 | */ | ||
41 | struct page **iommu_dma_alloc(struct device *dev, size_t size, | ||
42 | gfp_t gfp, int prot, dma_addr_t *handle, | ||
43 | void (*flush_page)(struct device *, const void *, phys_addr_t)); | ||
44 | void iommu_dma_free(struct device *dev, struct page **pages, size_t size, | ||
45 | dma_addr_t *handle); | ||
46 | |||
47 | int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma); | ||
48 | |||
49 | dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, | ||
50 | unsigned long offset, size_t size, int prot); | ||
51 | int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
52 | int nents, int prot); | ||
53 | |||
54 | /* | ||
55 | * Arch code with no special attribute handling may use these | ||
56 | * directly as DMA mapping callbacks for simplicity | ||
57 | */ | ||
58 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | ||
59 | enum dma_data_direction dir, struct dma_attrs *attrs); | ||
60 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
61 | enum dma_data_direction dir, struct dma_attrs *attrs); | ||
62 | int iommu_dma_supported(struct device *dev, u64 mask); | ||
63 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); | ||
64 | |||
65 | #else | ||
66 | |||
67 | struct iommu_domain; | ||
68 | |||
69 | static inline int iommu_dma_init(void) | ||
70 | { | ||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | static inline int iommu_get_dma_cookie(struct iommu_domain *domain) | ||
75 | { | ||
76 | return -ENODEV; | ||
77 | } | ||
78 | |||
79 | static inline void iommu_put_dma_cookie(struct iommu_domain *domain) | ||
80 | { | ||
81 | } | ||
82 | |||
83 | #endif /* CONFIG_IOMMU_DMA */ | ||
84 | #endif /* __KERNEL__ */ | ||
85 | #endif /* __DMA_IOMMU_H */ | ||
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index ac07ff090919..2e551e2d2d03 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _LINUX_DMA_MAPPING_H | 1 | #ifndef _LINUX_DMA_MAPPING_H |
2 | #define _LINUX_DMA_MAPPING_H | 2 | #define _LINUX_DMA_MAPPING_H |
3 | 3 | ||
4 | #include <linux/sizes.h> | ||
4 | #include <linux/string.h> | 5 | #include <linux/string.h> |
5 | #include <linux/device.h> | 6 | #include <linux/device.h> |
6 | #include <linux/err.h> | 7 | #include <linux/err.h> |
@@ -145,7 +146,9 @@ static inline void arch_teardown_dma_ops(struct device *dev) { } | |||
145 | 146 | ||
146 | static inline unsigned int dma_get_max_seg_size(struct device *dev) | 147 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
147 | { | 148 | { |
148 | return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536; | 149 | if (dev->dma_parms && dev->dma_parms->max_segment_size) |
150 | return dev->dma_parms->max_segment_size; | ||
151 | return SZ_64K; | ||
149 | } | 152 | } |
150 | 153 | ||
151 | static inline unsigned int dma_set_max_seg_size(struct device *dev, | 154 | static inline unsigned int dma_set_max_seg_size(struct device *dev, |
@@ -154,14 +157,15 @@ static inline unsigned int dma_set_max_seg_size(struct device *dev, | |||
154 | if (dev->dma_parms) { | 157 | if (dev->dma_parms) { |
155 | dev->dma_parms->max_segment_size = size; | 158 | dev->dma_parms->max_segment_size = size; |
156 | return 0; | 159 | return 0; |
157 | } else | 160 | } |
158 | return -EIO; | 161 | return -EIO; |
159 | } | 162 | } |
160 | 163 | ||
161 | static inline unsigned long dma_get_seg_boundary(struct device *dev) | 164 | static inline unsigned long dma_get_seg_boundary(struct device *dev) |
162 | { | 165 | { |
163 | return dev->dma_parms ? | 166 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) |
164 | dev->dma_parms->segment_boundary_mask : 0xffffffff; | 167 | return dev->dma_parms->segment_boundary_mask; |
168 | return DMA_BIT_MASK(32); | ||
165 | } | 169 | } |
166 | 170 | ||
167 | static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) | 171 | static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) |
@@ -169,8 +173,8 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) | |||
169 | if (dev->dma_parms) { | 173 | if (dev->dma_parms) { |
170 | dev->dma_parms->segment_boundary_mask = mask; | 174 | dev->dma_parms->segment_boundary_mask = mask; |
171 | return 0; | 175 | return 0; |
172 | } else | 176 | } |
173 | return -EIO; | 177 | return -EIO; |
174 | } | 178 | } |
175 | 179 | ||
176 | #ifndef dma_max_pfn | 180 | #ifndef dma_max_pfn |
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h index 234393a6997b..79df69dc629c 100644 --- a/include/linux/dma/hsu.h +++ b/include/linux/dma/hsu.h | |||
@@ -35,14 +35,23 @@ struct hsu_dma_chip { | |||
35 | unsigned int length; | 35 | unsigned int length; |
36 | unsigned int offset; | 36 | unsigned int offset; |
37 | struct hsu_dma *hsu; | 37 | struct hsu_dma *hsu; |
38 | struct hsu_dma_platform_data *pdata; | ||
39 | }; | 38 | }; |
40 | 39 | ||
40 | #if IS_ENABLED(CONFIG_HSU_DMA) | ||
41 | /* Export to the internal users */ | 41 | /* Export to the internal users */ |
42 | irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr); | 42 | irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr); |
43 | 43 | ||
44 | /* Export to the platform drivers */ | 44 | /* Export to the platform drivers */ |
45 | int hsu_dma_probe(struct hsu_dma_chip *chip); | 45 | int hsu_dma_probe(struct hsu_dma_chip *chip); |
46 | int hsu_dma_remove(struct hsu_dma_chip *chip); | 46 | int hsu_dma_remove(struct hsu_dma_chip *chip); |
47 | #else | ||
48 | static inline irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, | ||
49 | unsigned short nr) | ||
50 | { | ||
51 | return IRQ_NONE; | ||
52 | } | ||
53 | static inline int hsu_dma_probe(struct hsu_dma_chip *chip) { return -ENODEV; } | ||
54 | static inline int hsu_dma_remove(struct hsu_dma_chip *chip) { return 0; } | ||
55 | #endif /* CONFIG_HSU_DMA */ | ||
47 | 56 | ||
48 | #endif /* _DMA_HSU_H */ | 57 | #endif /* _DMA_HSU_H */ |
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index 7ac17f57250e..187c10299722 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h | |||
@@ -20,6 +20,14 @@ | |||
20 | #define CONTEXT_TT_MULTI_LEVEL 0 | 20 | #define CONTEXT_TT_MULTI_LEVEL 0 |
21 | #define CONTEXT_TT_DEV_IOTLB 1 | 21 | #define CONTEXT_TT_DEV_IOTLB 1 |
22 | #define CONTEXT_TT_PASS_THROUGH 2 | 22 | #define CONTEXT_TT_PASS_THROUGH 2 |
23 | /* Extended context entry types */ | ||
24 | #define CONTEXT_TT_PT_PASID 4 | ||
25 | #define CONTEXT_TT_PT_PASID_DEV_IOTLB 5 | ||
26 | #define CONTEXT_TT_MASK (7ULL << 2) | ||
27 | |||
28 | #define CONTEXT_DINVE (1ULL << 8) | ||
29 | #define CONTEXT_PRS (1ULL << 9) | ||
30 | #define CONTEXT_PASIDE (1ULL << 11) | ||
23 | 31 | ||
24 | struct intel_iommu; | 32 | struct intel_iommu; |
25 | struct dmar_domain; | 33 | struct dmar_domain; |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 7ea9184eaa13..c47c68e535e8 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -645,6 +645,7 @@ enum dmaengine_alignment { | |||
645 | * The function takes a buffer of size buf_len. The callback function will | 645 | * The function takes a buffer of size buf_len. The callback function will |
646 | * be called after period_len bytes have been transferred. | 646 | * be called after period_len bytes have been transferred. |
647 | * @device_prep_interleaved_dma: Transfer expression in a generic way. | 647 | * @device_prep_interleaved_dma: Transfer expression in a generic way. |
648 | * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address | ||
648 | * @device_config: Pushes a new configuration to a channel, return 0 or an error | 649 | * @device_config: Pushes a new configuration to a channel, return 0 or an error |
649 | * code | 650 | * code |
650 | * @device_pause: Pauses any transfer happening on a channel. Returns | 651 | * @device_pause: Pauses any transfer happening on a channel. Returns |
@@ -727,6 +728,9 @@ struct dma_device { | |||
727 | struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( | 728 | struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( |
728 | struct dma_chan *chan, struct dma_interleaved_template *xt, | 729 | struct dma_chan *chan, struct dma_interleaved_template *xt, |
729 | unsigned long flags); | 730 | unsigned long flags); |
731 | struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)( | ||
732 | struct dma_chan *chan, dma_addr_t dst, u64 data, | ||
733 | unsigned long flags); | ||
730 | 734 | ||
731 | int (*device_config)(struct dma_chan *chan, | 735 | int (*device_config)(struct dma_chan *chan, |
732 | struct dma_slave_config *config); | 736 | struct dma_slave_config *config); |
diff --git a/include/linux/edac.h b/include/linux/edac.h index da3b72e95db3..4fe67b853de0 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h | |||
@@ -769,12 +769,10 @@ struct mem_ctl_info { | |||
769 | /* the internal state of this controller instance */ | 769 | /* the internal state of this controller instance */ |
770 | int op_state; | 770 | int op_state; |
771 | 771 | ||
772 | #ifdef CONFIG_EDAC_DEBUG | ||
773 | struct dentry *debugfs; | 772 | struct dentry *debugfs; |
774 | u8 fake_inject_layer[EDAC_MAX_LAYERS]; | 773 | u8 fake_inject_layer[EDAC_MAX_LAYERS]; |
775 | u32 fake_inject_ue; | 774 | bool fake_inject_ue; |
776 | u16 fake_inject_count; | 775 | u16 fake_inject_count; |
777 | #endif | ||
778 | }; | 776 | }; |
779 | 777 | ||
780 | /* | 778 | /* |
diff --git a/include/linux/efi.h b/include/linux/efi.h index 85ef051ac6fb..569b5a866bb1 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
@@ -99,6 +99,7 @@ typedef struct { | |||
99 | #define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ | 99 | #define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ |
100 | #define EFI_MEMORY_MORE_RELIABLE \ | 100 | #define EFI_MEMORY_MORE_RELIABLE \ |
101 | ((u64)0x0000000000010000ULL) /* higher reliability */ | 101 | ((u64)0x0000000000010000ULL) /* higher reliability */ |
102 | #define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */ | ||
102 | #define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */ | 103 | #define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */ |
103 | #define EFI_MEMORY_DESCRIPTOR_VERSION 1 | 104 | #define EFI_MEMORY_DESCRIPTOR_VERSION 1 |
104 | 105 | ||
@@ -595,6 +596,9 @@ void efi_native_runtime_setup(void); | |||
595 | #define DEVICE_TREE_GUID \ | 596 | #define DEVICE_TREE_GUID \ |
596 | EFI_GUID( 0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 ) | 597 | EFI_GUID( 0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 ) |
597 | 598 | ||
599 | #define EFI_PROPERTIES_TABLE_GUID \ | ||
600 | EFI_GUID( 0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5 ) | ||
601 | |||
598 | typedef struct { | 602 | typedef struct { |
599 | efi_guid_t guid; | 603 | efi_guid_t guid; |
600 | u64 table; | 604 | u64 table; |
@@ -676,7 +680,7 @@ typedef struct { | |||
676 | } efi_system_table_t; | 680 | } efi_system_table_t; |
677 | 681 | ||
678 | struct efi_memory_map { | 682 | struct efi_memory_map { |
679 | void *phys_map; | 683 | phys_addr_t phys_map; |
680 | void *map; | 684 | void *map; |
681 | void *map_end; | 685 | void *map_end; |
682 | int nr_map; | 686 | int nr_map; |
@@ -808,6 +812,15 @@ typedef struct _efi_file_io_interface { | |||
808 | #define EFI_FILE_MODE_WRITE 0x0000000000000002 | 812 | #define EFI_FILE_MODE_WRITE 0x0000000000000002 |
809 | #define EFI_FILE_MODE_CREATE 0x8000000000000000 | 813 | #define EFI_FILE_MODE_CREATE 0x8000000000000000 |
810 | 814 | ||
815 | typedef struct { | ||
816 | u32 version; | ||
817 | u32 length; | ||
818 | u64 memory_protection_attribute; | ||
819 | } efi_properties_table_t; | ||
820 | |||
821 | #define EFI_PROPERTIES_TABLE_VERSION 0x00010000 | ||
822 | #define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1 | ||
823 | |||
811 | #define EFI_INVALID_TABLE_ADDR (~0UL) | 824 | #define EFI_INVALID_TABLE_ADDR (~0UL) |
812 | 825 | ||
813 | /* | 826 | /* |
@@ -830,6 +843,7 @@ extern struct efi { | |||
830 | unsigned long runtime; /* runtime table */ | 843 | unsigned long runtime; /* runtime table */ |
831 | unsigned long config_table; /* config tables */ | 844 | unsigned long config_table; /* config tables */ |
832 | unsigned long esrt; /* ESRT table */ | 845 | unsigned long esrt; /* ESRT table */ |
846 | unsigned long properties_table; /* properties table */ | ||
833 | efi_get_time_t *get_time; | 847 | efi_get_time_t *get_time; |
834 | efi_set_time_t *set_time; | 848 | efi_set_time_t *set_time; |
835 | efi_get_wakeup_time_t *get_wakeup_time; | 849 | efi_get_wakeup_time_t *get_wakeup_time; |
@@ -901,13 +915,19 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource, | |||
901 | struct resource *data_resource, struct resource *bss_resource); | 915 | struct resource *data_resource, struct resource *bss_resource); |
902 | extern void efi_get_time(struct timespec *now); | 916 | extern void efi_get_time(struct timespec *now); |
903 | extern void efi_reserve_boot_services(void); | 917 | extern void efi_reserve_boot_services(void); |
904 | extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose); | 918 | extern int efi_get_fdt_params(struct efi_fdt_params *params); |
905 | extern struct efi_memory_map memmap; | 919 | extern struct efi_memory_map memmap; |
906 | extern struct kobject *efi_kobj; | 920 | extern struct kobject *efi_kobj; |
907 | 921 | ||
908 | extern int efi_reboot_quirk_mode; | 922 | extern int efi_reboot_quirk_mode; |
909 | extern bool efi_poweroff_required(void); | 923 | extern bool efi_poweroff_required(void); |
910 | 924 | ||
925 | #ifdef CONFIG_EFI_FAKE_MEMMAP | ||
926 | extern void __init efi_fake_memmap(void); | ||
927 | #else | ||
928 | static inline void efi_fake_memmap(void) { } | ||
929 | #endif | ||
930 | |||
911 | /* Iterate through an efi_memory_map */ | 931 | /* Iterate through an efi_memory_map */ |
912 | #define for_each_efi_memory_desc(m, md) \ | 932 | #define for_each_efi_memory_desc(m, md) \ |
913 | for ((md) = (m)->map; \ | 933 | for ((md) = (m)->map; \ |
@@ -959,6 +979,7 @@ extern int __init efi_setup_pcdp_console(char *); | |||
959 | #define EFI_PARAVIRT 6 /* Access is via a paravirt interface */ | 979 | #define EFI_PARAVIRT 6 /* Access is via a paravirt interface */ |
960 | #define EFI_ARCH_1 7 /* First arch-specific bit */ | 980 | #define EFI_ARCH_1 7 /* First arch-specific bit */ |
961 | #define EFI_DBG 8 /* Print additional debug info at runtime */ | 981 | #define EFI_DBG 8 /* Print additional debug info at runtime */ |
982 | #define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */ | ||
962 | 983 | ||
963 | #ifdef CONFIG_EFI | 984 | #ifdef CONFIG_EFI |
964 | /* | 985 | /* |
diff --git a/include/linux/extcon.h b/include/linux/extcon.h index c0f8c4fc5d45..7abf674c388c 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h | |||
@@ -31,32 +31,42 @@ | |||
31 | /* | 31 | /* |
32 | * Define the unique id of supported external connectors | 32 | * Define the unique id of supported external connectors |
33 | */ | 33 | */ |
34 | #define EXTCON_NONE 0 | 34 | #define EXTCON_NONE 0 |
35 | 35 | ||
36 | #define EXTCON_USB 1 /* USB connector */ | 36 | /* USB external connector */ |
37 | #define EXTCON_USB_HOST 2 | 37 | #define EXTCON_USB 1 |
38 | 38 | #define EXTCON_USB_HOST 2 | |
39 | #define EXTCON_TA 3 /* Charger connector */ | 39 | |
40 | #define EXTCON_FAST_CHARGER 4 | 40 | /* Charging external connector */ |
41 | #define EXTCON_SLOW_CHARGER 5 | 41 | #define EXTCON_CHG_USB_SDP 5 /* Standard Downstream Port */ |
42 | #define EXTCON_CHARGE_DOWNSTREAM 6 | 42 | #define EXTCON_CHG_USB_DCP 6 /* Dedicated Charging Port */ |
43 | 43 | #define EXTCON_CHG_USB_CDP 7 /* Charging Downstream Port */ | |
44 | #define EXTCON_LINE_IN 7 /* Audio/Video connector */ | 44 | #define EXTCON_CHG_USB_ACA 8 /* Accessory Charger Adapter */ |
45 | #define EXTCON_LINE_OUT 8 | 45 | #define EXTCON_CHG_USB_FAST 9 |
46 | #define EXTCON_MICROPHONE 9 | 46 | #define EXTCON_CHG_USB_SLOW 10 |
47 | #define EXTCON_HEADPHONE 10 | 47 | |
48 | #define EXTCON_HDMI 11 | 48 | /* Jack external connector */ |
49 | #define EXTCON_MHL 12 | 49 | #define EXTCON_JACK_MICROPHONE 20 |
50 | #define EXTCON_DVI 13 | 50 | #define EXTCON_JACK_HEADPHONE 21 |
51 | #define EXTCON_VGA 14 | 51 | #define EXTCON_JACK_LINE_IN 22 |
52 | #define EXTCON_SPDIF_IN 15 | 52 | #define EXTCON_JACK_LINE_OUT 23 |
53 | #define EXTCON_SPDIF_OUT 16 | 53 | #define EXTCON_JACK_VIDEO_IN 24 |
54 | #define EXTCON_VIDEO_IN 17 | 54 | #define EXTCON_JACK_VIDEO_OUT 25 |
55 | #define EXTCON_VIDEO_OUT 18 | 55 | #define EXTCON_JACK_SPDIF_IN 26 /* Sony Philips Digital InterFace */ |
56 | 56 | #define EXTCON_JACK_SPDIF_OUT 27 | |
57 | #define EXTCON_DOCK 19 /* Misc connector */ | 57 | |
58 | #define EXTCON_JIG 20 | 58 | /* Display external connector */ |
59 | #define EXTCON_MECHANICAL 21 | 59 | #define EXTCON_DISP_HDMI 40 /* High-Definition Multimedia Interface */ |
60 | #define EXTCON_DISP_MHL 41 /* Mobile High-Definition Link */ | ||
61 | #define EXTCON_DISP_DVI 42 /* Digital Visual Interface */ | ||
62 | #define EXTCON_DISP_VGA 43 /* Video Graphics Array */ | ||
63 | |||
64 | /* Miscellaneous external connector */ | ||
65 | #define EXTCON_DOCK 60 | ||
66 | #define EXTCON_JIG 61 | ||
67 | #define EXTCON_MECHANICAL 62 | ||
68 | |||
69 | #define EXTCON_NUM 63 | ||
60 | 70 | ||
61 | struct extcon_cable; | 71 | struct extcon_cable; |
62 | 72 | ||
diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h index 0b17ad43fbfc..7cacafb78b09 100644 --- a/include/linux/extcon/extcon-gpio.h +++ b/include/linux/extcon/extcon-gpio.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * External connector (extcon) class generic GPIO driver | 2 | * Single-state GPIO extcon driver based on extcon class |
3 | * | 3 | * |
4 | * Copyright (C) 2012 Samsung Electronics | 4 | * Copyright (C) 2012 Samsung Electronics |
5 | * Author: MyungJoo Ham <myungjoo.ham@samsung.com> | 5 | * Author: MyungJoo Ham <myungjoo.ham@samsung.com> |
@@ -16,43 +16,31 @@ | |||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
18 | * GNU General Public License for more details. | 18 | * GNU General Public License for more details. |
19 | * | 19 | */ |
20 | */ | ||
21 | #ifndef __EXTCON_GPIO_H__ | 20 | #ifndef __EXTCON_GPIO_H__ |
22 | #define __EXTCON_GPIO_H__ __FILE__ | 21 | #define __EXTCON_GPIO_H__ __FILE__ |
23 | 22 | ||
24 | #include <linux/extcon.h> | 23 | #include <linux/extcon.h> |
25 | 24 | ||
26 | /** | 25 | /** |
27 | * struct gpio_extcon_platform_data - A simple GPIO-controlled extcon device. | 26 | * struct gpio_extcon_pdata - A simple GPIO-controlled extcon device. |
28 | * @name: The name of this GPIO extcon device. | 27 | * @extcon_id: The unique id of specific external connector. |
29 | * @gpio: Corresponding GPIO. | 28 | * @gpio: Corresponding GPIO. |
30 | * @gpio_active_low: Boolean describing whether gpio active state is 1 or 0 | 29 | * @gpio_active_low: Boolean describing whether gpio active state is 1 or 0 |
31 | * If true, low state of gpio means active. | 30 | * If true, low state of gpio means active. |
32 | * If false, high state of gpio means active. | 31 | * If false, high state of gpio means active. |
33 | * @debounce: Debounce time for GPIO IRQ in ms. | 32 | * @debounce: Debounce time for GPIO IRQ in ms. |
34 | * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW). | 33 | * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW). |
35 | * @state_on: print_state is overriden with state_on if attached. | ||
36 | * If NULL, default method of extcon class is used. | ||
37 | * @state_off: print_state is overriden with state_off if detached. | ||
38 | * If NUll, default method of extcon class is used. | ||
39 | * @check_on_resume: Boolean describing whether to check the state of gpio | 34 | * @check_on_resume: Boolean describing whether to check the state of gpio |
40 | * while resuming from sleep. | 35 | * while resuming from sleep. |
41 | * | ||
42 | * Note that in order for state_on or state_off to be valid, both state_on | ||
43 | * and state_off should be not NULL. If at least one of them is NULL, | ||
44 | * the print_state is not overriden. | ||
45 | */ | 36 | */ |
46 | struct gpio_extcon_platform_data { | 37 | struct gpio_extcon_pdata { |
47 | const char *name; | 38 | unsigned int extcon_id; |
48 | unsigned gpio; | 39 | unsigned gpio; |
49 | bool gpio_active_low; | 40 | bool gpio_active_low; |
50 | unsigned long debounce; | 41 | unsigned long debounce; |
51 | unsigned long irq_flags; | 42 | unsigned long irq_flags; |
52 | 43 | ||
53 | /* if NULL, "0" or "1" will be printed */ | ||
54 | const char *state_on; | ||
55 | const char *state_off; | ||
56 | bool check_on_resume; | 44 | bool check_on_resume; |
57 | }; | 45 | }; |
58 | 46 | ||
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 798fad9e420d..3159a7dba034 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h | |||
@@ -18,7 +18,7 @@ struct fault_attr { | |||
18 | atomic_t times; | 18 | atomic_t times; |
19 | atomic_t space; | 19 | atomic_t space; |
20 | unsigned long verbose; | 20 | unsigned long verbose; |
21 | u32 task_filter; | 21 | bool task_filter; |
22 | unsigned long stacktrace_depth; | 22 | unsigned long stacktrace_depth; |
23 | unsigned long require_start; | 23 | unsigned long require_start; |
24 | unsigned long require_end; | 24 | unsigned long require_end; |
diff --git a/include/linux/fb.h b/include/linux/fb.h index be40dbaed11e..3d003805aac3 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -483,7 +483,10 @@ struct fb_info { | |||
483 | #ifdef CONFIG_FB_TILEBLITTING | 483 | #ifdef CONFIG_FB_TILEBLITTING |
484 | struct fb_tile_ops *tileops; /* Tile Blitting */ | 484 | struct fb_tile_ops *tileops; /* Tile Blitting */ |
485 | #endif | 485 | #endif |
486 | char __iomem *screen_base; /* Virtual address */ | 486 | union { |
487 | char __iomem *screen_base; /* Virtual address */ | ||
488 | char *screen_buffer; | ||
489 | }; | ||
487 | unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */ | 490 | unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */ |
488 | void *pseudo_palette; /* Fake palette of 16 colors */ | 491 | void *pseudo_palette; /* Fake palette of 16 colors */ |
489 | #define FBINFO_STATE_RUNNING 0 | 492 | #define FBINFO_STATE_RUNNING 0 |
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 674e3e226465..5295535b60c6 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h | |||
@@ -26,6 +26,7 @@ struct fdtable { | |||
26 | struct file __rcu **fd; /* current fd array */ | 26 | struct file __rcu **fd; /* current fd array */ |
27 | unsigned long *close_on_exec; | 27 | unsigned long *close_on_exec; |
28 | unsigned long *open_fds; | 28 | unsigned long *open_fds; |
29 | unsigned long *full_fds_bits; | ||
29 | struct rcu_head rcu; | 30 | struct rcu_head rcu; |
30 | }; | 31 | }; |
31 | 32 | ||
@@ -59,6 +60,7 @@ struct files_struct { | |||
59 | int next_fd; | 60 | int next_fd; |
60 | unsigned long close_on_exec_init[1]; | 61 | unsigned long close_on_exec_init[1]; |
61 | unsigned long open_fds_init[1]; | 62 | unsigned long open_fds_init[1]; |
63 | unsigned long full_fds_bits_init[1]; | ||
62 | struct file __rcu * fd_array[NR_OPEN_DEFAULT]; | 64 | struct file __rcu * fd_array[NR_OPEN_DEFAULT]; |
63 | }; | 65 | }; |
64 | 66 | ||
diff --git a/include/linux/fence.h b/include/linux/fence.h index 39efee130d2b..bb522011383b 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h | |||
@@ -280,6 +280,22 @@ fence_is_signaled(struct fence *fence) | |||
280 | } | 280 | } |
281 | 281 | ||
282 | /** | 282 | /** |
283 | * fence_is_later - return if f1 is chronologically later than f2 | ||
284 | * @f1: [in] the first fence from the same context | ||
285 | * @f2: [in] the second fence from the same context | ||
286 | * | ||
287 | * Returns true if f1 is chronologically later than f2. Both fences must be | ||
288 | * from the same context, since a seqno is not re-used across contexts. | ||
289 | */ | ||
290 | static inline bool fence_is_later(struct fence *f1, struct fence *f2) | ||
291 | { | ||
292 | if (WARN_ON(f1->context != f2->context)) | ||
293 | return false; | ||
294 | |||
295 | return f1->seqno - f2->seqno < INT_MAX; | ||
296 | } | ||
297 | |||
298 | /** | ||
283 | * fence_later - return the chronologically later fence | 299 | * fence_later - return the chronologically later fence |
284 | * @f1: [in] the first fence from the same context | 300 | * @f1: [in] the first fence from the same context |
285 | * @f2: [in] the second fence from the same context | 301 | * @f2: [in] the second fence from the same context |
@@ -298,14 +314,15 @@ static inline struct fence *fence_later(struct fence *f1, struct fence *f2) | |||
298 | * set if enable_signaling wasn't called, and enabling that here is | 314 | * set if enable_signaling wasn't called, and enabling that here is |
299 | * overkill. | 315 | * overkill. |
300 | */ | 316 | */ |
301 | if (f2->seqno - f1->seqno <= INT_MAX) | 317 | if (fence_is_later(f1, f2)) |
302 | return fence_is_signaled(f2) ? NULL : f2; | ||
303 | else | ||
304 | return fence_is_signaled(f1) ? NULL : f1; | 318 | return fence_is_signaled(f1) ? NULL : f1; |
319 | else | ||
320 | return fence_is_signaled(f2) ? NULL : f2; | ||
305 | } | 321 | } |
306 | 322 | ||
307 | signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); | 323 | signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); |
308 | 324 | signed long fence_wait_any_timeout(struct fence **fences, uint32_t count, | |
325 | bool intr, signed long timeout); | ||
309 | 326 | ||
310 | /** | 327 | /** |
311 | * fence_wait - sleep until the fence gets signaled | 328 | * fence_wait - sleep until the fence gets signaled |
diff --git a/include/linux/filter.h b/include/linux/filter.h index fa2cab985e57..4165e9ac9e36 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/printk.h> | 13 | #include <linux/printk.h> |
14 | #include <linux/workqueue.h> | 14 | #include <linux/workqueue.h> |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <net/sch_generic.h> | ||
16 | 17 | ||
17 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
18 | 19 | ||
@@ -302,10 +303,6 @@ struct bpf_prog_aux; | |||
302 | bpf_size; \ | 303 | bpf_size; \ |
303 | }) | 304 | }) |
304 | 305 | ||
305 | /* Macro to invoke filter function. */ | ||
306 | #define SK_RUN_FILTER(filter, ctx) \ | ||
307 | (*filter->prog->bpf_func)(ctx, filter->prog->insnsi) | ||
308 | |||
309 | #ifdef CONFIG_COMPAT | 306 | #ifdef CONFIG_COMPAT |
310 | /* A struct sock_filter is architecture independent. */ | 307 | /* A struct sock_filter is architecture independent. */ |
311 | struct compat_sock_fprog { | 308 | struct compat_sock_fprog { |
@@ -326,8 +323,12 @@ struct bpf_binary_header { | |||
326 | 323 | ||
327 | struct bpf_prog { | 324 | struct bpf_prog { |
328 | u16 pages; /* Number of allocated pages */ | 325 | u16 pages; /* Number of allocated pages */ |
329 | bool jited; /* Is our filter JIT'ed? */ | 326 | kmemcheck_bitfield_begin(meta); |
330 | bool gpl_compatible; /* Is our filter GPL compatible? */ | 327 | u16 jited:1, /* Is our filter JIT'ed? */ |
328 | gpl_compatible:1, /* Is filter GPL compatible? */ | ||
329 | cb_access:1, /* Is control block accessed? */ | ||
330 | dst_needed:1; /* Do we need dst entry? */ | ||
331 | kmemcheck_bitfield_end(meta); | ||
331 | u32 len; /* Number of filter blocks */ | 332 | u32 len; /* Number of filter blocks */ |
332 | enum bpf_prog_type type; /* Type of BPF program */ | 333 | enum bpf_prog_type type; /* Type of BPF program */ |
333 | struct bpf_prog_aux *aux; /* Auxiliary fields */ | 334 | struct bpf_prog_aux *aux; /* Auxiliary fields */ |
@@ -349,6 +350,39 @@ struct sk_filter { | |||
349 | 350 | ||
350 | #define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) | 351 | #define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) |
351 | 352 | ||
353 | static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, | ||
354 | struct sk_buff *skb) | ||
355 | { | ||
356 | u8 *cb_data = qdisc_skb_cb(skb)->data; | ||
357 | u8 saved_cb[QDISC_CB_PRIV_LEN]; | ||
358 | u32 res; | ||
359 | |||
360 | BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != | ||
361 | QDISC_CB_PRIV_LEN); | ||
362 | |||
363 | if (unlikely(prog->cb_access)) { | ||
364 | memcpy(saved_cb, cb_data, sizeof(saved_cb)); | ||
365 | memset(cb_data, 0, sizeof(saved_cb)); | ||
366 | } | ||
367 | |||
368 | res = BPF_PROG_RUN(prog, skb); | ||
369 | |||
370 | if (unlikely(prog->cb_access)) | ||
371 | memcpy(cb_data, saved_cb, sizeof(saved_cb)); | ||
372 | |||
373 | return res; | ||
374 | } | ||
375 | |||
376 | static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, | ||
377 | struct sk_buff *skb) | ||
378 | { | ||
379 | u8 *cb_data = qdisc_skb_cb(skb)->data; | ||
380 | |||
381 | if (unlikely(prog->cb_access)) | ||
382 | memset(cb_data, 0, QDISC_CB_PRIV_LEN); | ||
383 | return BPF_PROG_RUN(prog, skb); | ||
384 | } | ||
385 | |||
352 | static inline unsigned int bpf_prog_size(unsigned int proglen) | 386 | static inline unsigned int bpf_prog_size(unsigned int proglen) |
353 | { | 387 | { |
354 | return max(sizeof(struct bpf_prog), | 388 | return max(sizeof(struct bpf_prog), |
@@ -408,7 +442,7 @@ typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, | |||
408 | 442 | ||
409 | int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); | 443 | int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); |
410 | int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, | 444 | int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, |
411 | bpf_aux_classic_check_t trans); | 445 | bpf_aux_classic_check_t trans, bool save_orig); |
412 | void bpf_prog_destroy(struct bpf_prog *fp); | 446 | void bpf_prog_destroy(struct bpf_prog *fp); |
413 | 447 | ||
414 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); | 448 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h new file mode 100644 index 000000000000..0940bf45e2f2 --- /dev/null +++ b/include/linux/fpga/fpga-mgr.h | |||
@@ -0,0 +1,127 @@ | |||
1 | /* | ||
2 | * FPGA Framework | ||
3 | * | ||
4 | * Copyright (C) 2013-2015 Altera Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/mutex.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | |||
21 | #ifndef _LINUX_FPGA_MGR_H | ||
22 | #define _LINUX_FPGA_MGR_H | ||
23 | |||
24 | struct fpga_manager; | ||
25 | |||
26 | /** | ||
27 | * enum fpga_mgr_states - fpga framework states | ||
28 | * @FPGA_MGR_STATE_UNKNOWN: can't determine state | ||
29 | * @FPGA_MGR_STATE_POWER_OFF: FPGA power is off | ||
30 | * @FPGA_MGR_STATE_POWER_UP: FPGA reports power is up | ||
31 | * @FPGA_MGR_STATE_RESET: FPGA in reset state | ||
32 | * @FPGA_MGR_STATE_FIRMWARE_REQ: firmware request in progress | ||
33 | * @FPGA_MGR_STATE_FIRMWARE_REQ_ERR: firmware request failed | ||
34 | * @FPGA_MGR_STATE_WRITE_INIT: preparing FPGA for programming | ||
35 | * @FPGA_MGR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage | ||
36 | * @FPGA_MGR_STATE_WRITE: writing image to FPGA | ||
37 | * @FPGA_MGR_STATE_WRITE_ERR: Error while writing FPGA | ||
38 | * @FPGA_MGR_STATE_WRITE_COMPLETE: Doing post programming steps | ||
39 | * @FPGA_MGR_STATE_WRITE_COMPLETE_ERR: Error during WRITE_COMPLETE | ||
40 | * @FPGA_MGR_STATE_OPERATING: FPGA is programmed and operating | ||
41 | */ | ||
42 | enum fpga_mgr_states { | ||
43 | /* default FPGA states */ | ||
44 | FPGA_MGR_STATE_UNKNOWN, | ||
45 | FPGA_MGR_STATE_POWER_OFF, | ||
46 | FPGA_MGR_STATE_POWER_UP, | ||
47 | FPGA_MGR_STATE_RESET, | ||
48 | |||
49 | /* getting an image for loading */ | ||
50 | FPGA_MGR_STATE_FIRMWARE_REQ, | ||
51 | FPGA_MGR_STATE_FIRMWARE_REQ_ERR, | ||
52 | |||
53 | /* write sequence: init, write, complete */ | ||
54 | FPGA_MGR_STATE_WRITE_INIT, | ||
55 | FPGA_MGR_STATE_WRITE_INIT_ERR, | ||
56 | FPGA_MGR_STATE_WRITE, | ||
57 | FPGA_MGR_STATE_WRITE_ERR, | ||
58 | FPGA_MGR_STATE_WRITE_COMPLETE, | ||
59 | FPGA_MGR_STATE_WRITE_COMPLETE_ERR, | ||
60 | |||
61 | /* fpga is programmed and operating */ | ||
62 | FPGA_MGR_STATE_OPERATING, | ||
63 | }; | ||
64 | |||
65 | /* | ||
66 | * FPGA Manager flags | ||
67 | * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported | ||
68 | */ | ||
69 | #define FPGA_MGR_PARTIAL_RECONFIG BIT(0) | ||
70 | |||
71 | /** | ||
72 | * struct fpga_manager_ops - ops for low level fpga manager drivers | ||
73 | * @state: returns an enum value of the FPGA's state | ||
74 | * @write_init: prepare the FPGA to receive confuration data | ||
75 | * @write: write count bytes of configuration data to the FPGA | ||
76 | * @write_complete: set FPGA to operating state after writing is done | ||
77 | * @fpga_remove: optional: Set FPGA into a specific state during driver remove | ||
78 | * | ||
79 | * fpga_manager_ops are the low level functions implemented by a specific | ||
80 | * fpga manager driver. The optional ones are tested for NULL before being | ||
81 | * called, so leaving them out is fine. | ||
82 | */ | ||
83 | struct fpga_manager_ops { | ||
84 | enum fpga_mgr_states (*state)(struct fpga_manager *mgr); | ||
85 | int (*write_init)(struct fpga_manager *mgr, u32 flags, | ||
86 | const char *buf, size_t count); | ||
87 | int (*write)(struct fpga_manager *mgr, const char *buf, size_t count); | ||
88 | int (*write_complete)(struct fpga_manager *mgr, u32 flags); | ||
89 | void (*fpga_remove)(struct fpga_manager *mgr); | ||
90 | }; | ||
91 | |||
92 | /** | ||
93 | * struct fpga_manager - fpga manager structure | ||
94 | * @name: name of low level fpga manager | ||
95 | * @dev: fpga manager device | ||
96 | * @ref_mutex: only allows one reference to fpga manager | ||
97 | * @state: state of fpga manager | ||
98 | * @mops: pointer to struct of fpga manager ops | ||
99 | * @priv: low level driver private date | ||
100 | */ | ||
101 | struct fpga_manager { | ||
102 | const char *name; | ||
103 | struct device dev; | ||
104 | struct mutex ref_mutex; | ||
105 | enum fpga_mgr_states state; | ||
106 | const struct fpga_manager_ops *mops; | ||
107 | void *priv; | ||
108 | }; | ||
109 | |||
110 | #define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) | ||
111 | |||
112 | int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, | ||
113 | const char *buf, size_t count); | ||
114 | |||
115 | int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags, | ||
116 | const char *image_name); | ||
117 | |||
118 | struct fpga_manager *of_fpga_mgr_get(struct device_node *node); | ||
119 | |||
120 | void fpga_mgr_put(struct fpga_manager *mgr); | ||
121 | |||
122 | int fpga_mgr_register(struct device *dev, const char *name, | ||
123 | const struct fpga_manager_ops *mops, void *priv); | ||
124 | |||
125 | void fpga_mgr_unregister(struct device *dev); | ||
126 | |||
127 | #endif /*_LINUX_FPGA_MGR_H */ | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index 72d8a844c692..3aa514254161 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -1053,12 +1053,11 @@ extern void locks_remove_file(struct file *); | |||
1053 | extern void locks_release_private(struct file_lock *); | 1053 | extern void locks_release_private(struct file_lock *); |
1054 | extern void posix_test_lock(struct file *, struct file_lock *); | 1054 | extern void posix_test_lock(struct file *, struct file_lock *); |
1055 | extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); | 1055 | extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); |
1056 | extern int posix_lock_inode_wait(struct inode *, struct file_lock *); | ||
1057 | extern int posix_unblock_lock(struct file_lock *); | 1056 | extern int posix_unblock_lock(struct file_lock *); |
1058 | extern int vfs_test_lock(struct file *, struct file_lock *); | 1057 | extern int vfs_test_lock(struct file *, struct file_lock *); |
1059 | extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); | 1058 | extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); |
1060 | extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); | 1059 | extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); |
1061 | extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl); | 1060 | extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl); |
1062 | extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); | 1061 | extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); |
1063 | extern void lease_get_mtime(struct inode *, struct timespec *time); | 1062 | extern void lease_get_mtime(struct inode *, struct timespec *time); |
1064 | extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); | 1063 | extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); |
@@ -1144,12 +1143,6 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl, | |||
1144 | return -ENOLCK; | 1143 | return -ENOLCK; |
1145 | } | 1144 | } |
1146 | 1145 | ||
1147 | static inline int posix_lock_inode_wait(struct inode *inode, | ||
1148 | struct file_lock *fl) | ||
1149 | { | ||
1150 | return -ENOLCK; | ||
1151 | } | ||
1152 | |||
1153 | static inline int posix_unblock_lock(struct file_lock *waiter) | 1146 | static inline int posix_unblock_lock(struct file_lock *waiter) |
1154 | { | 1147 | { |
1155 | return -ENOENT; | 1148 | return -ENOENT; |
@@ -1171,8 +1164,7 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) | |||
1171 | return 0; | 1164 | return 0; |
1172 | } | 1165 | } |
1173 | 1166 | ||
1174 | static inline int flock_lock_inode_wait(struct inode *inode, | 1167 | static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) |
1175 | struct file_lock *request) | ||
1176 | { | 1168 | { |
1177 | return -ENOLCK; | 1169 | return -ENOLCK; |
1178 | } | 1170 | } |
@@ -1215,14 +1207,9 @@ static inline struct inode *file_inode(const struct file *f) | |||
1215 | return f->f_inode; | 1207 | return f->f_inode; |
1216 | } | 1208 | } |
1217 | 1209 | ||
1218 | static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl) | 1210 | static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) |
1219 | { | 1211 | { |
1220 | return posix_lock_inode_wait(file_inode(filp), fl); | 1212 | return locks_lock_inode_wait(file_inode(filp), fl); |
1221 | } | ||
1222 | |||
1223 | static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl) | ||
1224 | { | ||
1225 | return flock_lock_inode_wait(file_inode(filp), fl); | ||
1226 | } | 1213 | } |
1227 | 1214 | ||
1228 | struct fasync_struct { | 1215 | struct fasync_struct { |
@@ -1678,8 +1665,6 @@ struct inode_operations { | |||
1678 | umode_t create_mode, int *opened); | 1665 | umode_t create_mode, int *opened); |
1679 | int (*tmpfile) (struct inode *, struct dentry *, umode_t); | 1666 | int (*tmpfile) (struct inode *, struct dentry *, umode_t); |
1680 | int (*set_acl)(struct inode *, struct posix_acl *, int); | 1667 | int (*set_acl)(struct inode *, struct posix_acl *, int); |
1681 | |||
1682 | /* WARNING: probably going away soon, do not use! */ | ||
1683 | } ____cacheline_aligned; | 1668 | } ____cacheline_aligned; |
1684 | 1669 | ||
1685 | ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, | 1670 | ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, |
@@ -2422,6 +2407,7 @@ extern int write_inode_now(struct inode *, int); | |||
2422 | extern int filemap_fdatawrite(struct address_space *); | 2407 | extern int filemap_fdatawrite(struct address_space *); |
2423 | extern int filemap_flush(struct address_space *); | 2408 | extern int filemap_flush(struct address_space *); |
2424 | extern int filemap_fdatawait(struct address_space *); | 2409 | extern int filemap_fdatawait(struct address_space *); |
2410 | extern void filemap_fdatawait_keep_errors(struct address_space *); | ||
2425 | extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, | 2411 | extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, |
2426 | loff_t lend); | 2412 | loff_t lend); |
2427 | extern int filemap_write_and_wait(struct address_space *mapping); | 2413 | extern int filemap_write_and_wait(struct address_space *mapping); |
@@ -2625,7 +2611,7 @@ static inline void remove_inode_hash(struct inode *inode) | |||
2625 | extern void inode_sb_list_add(struct inode *inode); | 2611 | extern void inode_sb_list_add(struct inode *inode); |
2626 | 2612 | ||
2627 | #ifdef CONFIG_BLOCK | 2613 | #ifdef CONFIG_BLOCK |
2628 | extern void submit_bio(int, struct bio *); | 2614 | extern blk_qc_t submit_bio(int, struct bio *); |
2629 | extern int bdev_read_only(struct block_device *); | 2615 | extern int bdev_read_only(struct block_device *); |
2630 | #endif | 2616 | #endif |
2631 | extern int set_blocksize(struct block_device *, int); | 2617 | extern int set_blocksize(struct block_device *, int); |
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h new file mode 100644 index 000000000000..84d971ff3fba --- /dev/null +++ b/include/linux/fsl/guts.h | |||
@@ -0,0 +1,192 @@ | |||
1 | /** | ||
2 | * Freecale 85xx and 86xx Global Utilties register set | ||
3 | * | ||
4 | * Authors: Jeff Brown | ||
5 | * Timur Tabi <timur@freescale.com> | ||
6 | * | ||
7 | * Copyright 2004,2007,2012 Freescale Semiconductor, Inc | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | */ | ||
14 | |||
15 | #ifndef __FSL_GUTS_H__ | ||
16 | #define __FSL_GUTS_H__ | ||
17 | |||
18 | #include <linux/types.h> | ||
19 | |||
20 | /** | ||
21 | * Global Utility Registers. | ||
22 | * | ||
23 | * Not all registers defined in this structure are available on all chips, so | ||
24 | * you are expected to know whether a given register actually exists on your | ||
25 | * chip before you access it. | ||
26 | * | ||
27 | * Also, some registers are similar on different chips but have slightly | ||
28 | * different names. In these cases, one name is chosen to avoid extraneous | ||
29 | * #ifdefs. | ||
30 | */ | ||
31 | struct ccsr_guts { | ||
32 | __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ | ||
33 | __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ | ||
34 | __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ | ||
35 | __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ | ||
36 | __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ | ||
37 | __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ | ||
38 | u8 res018[0x20 - 0x18]; | ||
39 | __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ | ||
40 | u8 res024[0x30 - 0x24]; | ||
41 | __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ | ||
42 | u8 res034[0x40 - 0x34]; | ||
43 | __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ | ||
44 | u8 res044[0x50 - 0x44]; | ||
45 | __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ | ||
46 | u8 res054[0x60 - 0x54]; | ||
47 | __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ | ||
48 | __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ | ||
49 | __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ | ||
50 | u8 res06c[0x70 - 0x6c]; | ||
51 | __be32 devdisr; /* 0x.0070 - Device Disable Control */ | ||
52 | #define CCSR_GUTS_DEVDISR_TB1 0x00001000 | ||
53 | #define CCSR_GUTS_DEVDISR_TB0 0x00004000 | ||
54 | __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ | ||
55 | u8 res078[0x7c - 0x78]; | ||
56 | __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ | ||
57 | __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ | ||
58 | __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ | ||
59 | __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ | ||
60 | __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ | ||
61 | __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ | ||
62 | __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ | ||
63 | __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ | ||
64 | __be32 autorstsr; /* 0x.009c - Automatic reset status register */ | ||
65 | __be32 pvr; /* 0x.00a0 - Processor Version Register */ | ||
66 | __be32 svr; /* 0x.00a4 - System Version Register */ | ||
67 | u8 res0a8[0xb0 - 0xa8]; | ||
68 | __be32 rstcr; /* 0x.00b0 - Reset Control Register */ | ||
69 | u8 res0b4[0xc0 - 0xb4]; | ||
70 | __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register | ||
71 | Called 'elbcvselcr' on 86xx SOCs */ | ||
72 | u8 res0c4[0x100 - 0xc4]; | ||
73 | __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers | ||
74 | There are 16 registers */ | ||
75 | u8 res140[0x224 - 0x140]; | ||
76 | __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ | ||
77 | __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ | ||
78 | u8 res22c[0x604 - 0x22c]; | ||
79 | __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ | ||
80 | u8 res608[0x800 - 0x608]; | ||
81 | __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ | ||
82 | u8 res804[0x900 - 0x804]; | ||
83 | __be32 ircr; /* 0x.0900 - Infrared Control Register */ | ||
84 | u8 res904[0x908 - 0x904]; | ||
85 | __be32 dmacr; /* 0x.0908 - DMA Control Register */ | ||
86 | u8 res90c[0x914 - 0x90c]; | ||
87 | __be32 elbccr; /* 0x.0914 - eLBC Control Register */ | ||
88 | u8 res918[0xb20 - 0x918]; | ||
89 | __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ | ||
90 | __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ | ||
91 | __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ | ||
92 | u8 resb2c[0xe00 - 0xb2c]; | ||
93 | __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ | ||
94 | u8 rese04[0xe10 - 0xe04]; | ||
95 | __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ | ||
96 | u8 rese14[0xe20 - 0xe14]; | ||
97 | __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ | ||
98 | __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ | ||
99 | u8 rese28[0xf04 - 0xe28]; | ||
100 | __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ | ||
101 | __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ | ||
102 | u8 resf0c[0xf2c - 0xf0c]; | ||
103 | __be32 itcr; /* 0x.0f2c - Internal transaction control register */ | ||
104 | u8 resf30[0xf40 - 0xf30]; | ||
105 | __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ | ||
106 | __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ | ||
107 | } __attribute__ ((packed)); | ||
108 | |||
109 | |||
110 | /* Alternate function signal multiplex control */ | ||
111 | #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) | ||
112 | |||
113 | #ifdef CONFIG_PPC_86xx | ||
114 | |||
115 | #define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */ | ||
116 | #define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */ | ||
117 | |||
118 | /* | ||
119 | * Set the DMACR register in the GUTS | ||
120 | * | ||
121 | * The DMACR register determines the source of initiated transfers for each | ||
122 | * channel on each DMA controller. Rather than have a bunch of repetitive | ||
123 | * macros for the bit patterns, we just have a function that calculates | ||
124 | * them. | ||
125 | * | ||
126 | * guts: Pointer to GUTS structure | ||
127 | * co: The DMA controller (0 or 1) | ||
128 | * ch: The channel on the DMA controller (0, 1, 2, or 3) | ||
129 | * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx) | ||
130 | */ | ||
131 | static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts, | ||
132 | unsigned int co, unsigned int ch, unsigned int device) | ||
133 | { | ||
134 | unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch)); | ||
135 | |||
136 | clrsetbits_be32(&guts->dmacr, 3 << shift, device << shift); | ||
137 | } | ||
138 | |||
139 | #define CCSR_GUTS_PMUXCR_LDPSEL 0x00010000 | ||
140 | #define CCSR_GUTS_PMUXCR_SSI1_MASK 0x0000C000 /* Bitmask for SSI1 */ | ||
141 | #define CCSR_GUTS_PMUXCR_SSI1_LA 0x00000000 /* Latched address */ | ||
142 | #define CCSR_GUTS_PMUXCR_SSI1_HI 0x00004000 /* High impedance */ | ||
143 | #define CCSR_GUTS_PMUXCR_SSI1_SSI 0x00008000 /* Used for SSI1 */ | ||
144 | #define CCSR_GUTS_PMUXCR_SSI2_MASK 0x00003000 /* Bitmask for SSI2 */ | ||
145 | #define CCSR_GUTS_PMUXCR_SSI2_LA 0x00000000 /* Latched address */ | ||
146 | #define CCSR_GUTS_PMUXCR_SSI2_HI 0x00001000 /* High impedance */ | ||
147 | #define CCSR_GUTS_PMUXCR_SSI2_SSI 0x00002000 /* Used for SSI2 */ | ||
148 | #define CCSR_GUTS_PMUXCR_LA_22_25_LA 0x00000000 /* Latched Address */ | ||
149 | #define CCSR_GUTS_PMUXCR_LA_22_25_HI 0x00000400 /* High impedance */ | ||
150 | #define CCSR_GUTS_PMUXCR_DBGDRV 0x00000200 /* Signals not driven */ | ||
151 | #define CCSR_GUTS_PMUXCR_DMA2_0 0x00000008 | ||
152 | #define CCSR_GUTS_PMUXCR_DMA2_3 0x00000004 | ||
153 | #define CCSR_GUTS_PMUXCR_DMA1_0 0x00000002 | ||
154 | #define CCSR_GUTS_PMUXCR_DMA1_3 0x00000001 | ||
155 | |||
156 | /* | ||
157 | * Set the DMA external control bits in the GUTS | ||
158 | * | ||
159 | * The DMA external control bits in the PMUXCR are only meaningful for | ||
160 | * channels 0 and 3. Any other channels are ignored. | ||
161 | * | ||
162 | * guts: Pointer to GUTS structure | ||
163 | * co: The DMA controller (0 or 1) | ||
164 | * ch: The channel on the DMA controller (0, 1, 2, or 3) | ||
165 | * value: the new value for the bit (0 or 1) | ||
166 | */ | ||
167 | static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts, | ||
168 | unsigned int co, unsigned int ch, unsigned int value) | ||
169 | { | ||
170 | if ((ch == 0) || (ch == 3)) { | ||
171 | unsigned int shift = 2 * (co + 1) - (ch & 1) - 1; | ||
172 | |||
173 | clrsetbits_be32(&guts->pmuxcr, 1 << shift, value << shift); | ||
174 | } | ||
175 | } | ||
176 | |||
177 | #define CCSR_GUTS_CLKDVDR_PXCKEN 0x80000000 | ||
178 | #define CCSR_GUTS_CLKDVDR_SSICKEN 0x20000000 | ||
179 | #define CCSR_GUTS_CLKDVDR_PXCKINV 0x10000000 | ||
180 | #define CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT 25 | ||
181 | #define CCSR_GUTS_CLKDVDR_PXCKDLY_MASK 0x06000000 | ||
182 | #define CCSR_GUTS_CLKDVDR_PXCKDLY(x) \ | ||
183 | (((x) & 3) << CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT) | ||
184 | #define CCSR_GUTS_CLKDVDR_PXCLK_SHIFT 16 | ||
185 | #define CCSR_GUTS_CLKDVDR_PXCLK_MASK 0x001F0000 | ||
186 | #define CCSR_GUTS_CLKDVDR_PXCLK(x) (((x) & 31) << CCSR_GUTS_CLKDVDR_PXCLK_SHIFT) | ||
187 | #define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF | ||
188 | #define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK) | ||
189 | |||
190 | #endif | ||
191 | |||
192 | #endif | ||
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 6cd8c0ee4b6f..eae6548efbf0 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -263,7 +263,18 @@ static inline void ftrace_kill(void) { } | |||
263 | #endif /* CONFIG_FUNCTION_TRACER */ | 263 | #endif /* CONFIG_FUNCTION_TRACER */ |
264 | 264 | ||
265 | #ifdef CONFIG_STACK_TRACER | 265 | #ifdef CONFIG_STACK_TRACER |
266 | |||
267 | #define STACK_TRACE_ENTRIES 500 | ||
268 | |||
269 | struct stack_trace; | ||
270 | |||
271 | extern unsigned stack_trace_index[]; | ||
272 | extern struct stack_trace stack_trace_max; | ||
273 | extern unsigned long stack_trace_max_size; | ||
274 | extern arch_spinlock_t stack_trace_max_lock; | ||
275 | |||
266 | extern int stack_tracer_enabled; | 276 | extern int stack_tracer_enabled; |
277 | void stack_trace_print(void); | ||
267 | int | 278 | int |
268 | stack_trace_sysctl(struct ctl_table *table, int write, | 279 | stack_trace_sysctl(struct ctl_table *table, int write, |
269 | void __user *buffer, size_t *lenp, | 280 | void __user *buffer, size_t *lenp, |
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 0408545bce42..851671742790 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h | |||
@@ -16,7 +16,9 @@ enum fwnode_type { | |||
16 | FWNODE_INVALID = 0, | 16 | FWNODE_INVALID = 0, |
17 | FWNODE_OF, | 17 | FWNODE_OF, |
18 | FWNODE_ACPI, | 18 | FWNODE_ACPI, |
19 | FWNODE_ACPI_DATA, | ||
19 | FWNODE_PDATA, | 20 | FWNODE_PDATA, |
21 | FWNODE_IRQCHIP, | ||
20 | }; | 22 | }; |
21 | 23 | ||
22 | struct fwnode_handle { | 24 | struct fwnode_handle { |
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index 09460d6d6682..a4c61cbce777 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h | |||
@@ -8,7 +8,7 @@ | |||
8 | extern void genl_lock(void); | 8 | extern void genl_lock(void); |
9 | extern void genl_unlock(void); | 9 | extern void genl_unlock(void); |
10 | #ifdef CONFIG_LOCKDEP | 10 | #ifdef CONFIG_LOCKDEP |
11 | extern int lockdep_genl_is_held(void); | 11 | extern bool lockdep_genl_is_held(void); |
12 | #endif | 12 | #endif |
13 | 13 | ||
14 | /* for synchronisation between af_netlink and genetlink */ | 14 | /* for synchronisation between af_netlink and genetlink */ |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 2adbfa6d02bc..847cc1d91634 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -163,6 +163,18 @@ struct disk_part_tbl { | |||
163 | 163 | ||
164 | struct disk_events; | 164 | struct disk_events; |
165 | 165 | ||
166 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | ||
167 | |||
168 | struct blk_integrity { | ||
169 | struct blk_integrity_profile *profile; | ||
170 | unsigned char flags; | ||
171 | unsigned char tuple_size; | ||
172 | unsigned char interval_exp; | ||
173 | unsigned char tag_size; | ||
174 | }; | ||
175 | |||
176 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | ||
177 | |||
166 | struct gendisk { | 178 | struct gendisk { |
167 | /* major, first_minor and minors are input parameters only, | 179 | /* major, first_minor and minors are input parameters only, |
168 | * don't use directly. Use disk_devt() and disk_max_parts(). | 180 | * don't use directly. Use disk_devt() and disk_max_parts(). |
@@ -198,8 +210,8 @@ struct gendisk { | |||
198 | atomic_t sync_io; /* RAID */ | 210 | atomic_t sync_io; /* RAID */ |
199 | struct disk_events *ev; | 211 | struct disk_events *ev; |
200 | #ifdef CONFIG_BLK_DEV_INTEGRITY | 212 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
201 | struct blk_integrity *integrity; | 213 | struct kobject integrity_kobj; |
202 | #endif | 214 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
203 | int node_id; | 215 | int node_id; |
204 | }; | 216 | }; |
205 | 217 | ||
@@ -727,6 +739,16 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) | |||
727 | #endif | 739 | #endif |
728 | } | 740 | } |
729 | 741 | ||
742 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | ||
743 | extern void blk_integrity_add(struct gendisk *); | ||
744 | extern void blk_integrity_del(struct gendisk *); | ||
745 | extern void blk_integrity_revalidate(struct gendisk *); | ||
746 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | ||
747 | static inline void blk_integrity_add(struct gendisk *disk) { } | ||
748 | static inline void blk_integrity_del(struct gendisk *disk) { } | ||
749 | static inline void blk_integrity_revalidate(struct gendisk *disk) { } | ||
750 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | ||
751 | |||
730 | #else /* CONFIG_BLOCK */ | 752 | #else /* CONFIG_BLOCK */ |
731 | 753 | ||
732 | static inline void printk_all_partitions(void) { } | 754 | static inline void printk_all_partitions(void) { } |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index f92cbd2f4450..8942af0813e3 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -14,7 +14,7 @@ struct vm_area_struct; | |||
14 | #define ___GFP_HIGHMEM 0x02u | 14 | #define ___GFP_HIGHMEM 0x02u |
15 | #define ___GFP_DMA32 0x04u | 15 | #define ___GFP_DMA32 0x04u |
16 | #define ___GFP_MOVABLE 0x08u | 16 | #define ___GFP_MOVABLE 0x08u |
17 | #define ___GFP_WAIT 0x10u | 17 | #define ___GFP_RECLAIMABLE 0x10u |
18 | #define ___GFP_HIGH 0x20u | 18 | #define ___GFP_HIGH 0x20u |
19 | #define ___GFP_IO 0x40u | 19 | #define ___GFP_IO 0x40u |
20 | #define ___GFP_FS 0x80u | 20 | #define ___GFP_FS 0x80u |
@@ -29,18 +29,17 @@ struct vm_area_struct; | |||
29 | #define ___GFP_NOMEMALLOC 0x10000u | 29 | #define ___GFP_NOMEMALLOC 0x10000u |
30 | #define ___GFP_HARDWALL 0x20000u | 30 | #define ___GFP_HARDWALL 0x20000u |
31 | #define ___GFP_THISNODE 0x40000u | 31 | #define ___GFP_THISNODE 0x40000u |
32 | #define ___GFP_RECLAIMABLE 0x80000u | 32 | #define ___GFP_ATOMIC 0x80000u |
33 | #define ___GFP_NOACCOUNT 0x100000u | 33 | #define ___GFP_NOACCOUNT 0x100000u |
34 | #define ___GFP_NOTRACK 0x200000u | 34 | #define ___GFP_NOTRACK 0x200000u |
35 | #define ___GFP_NO_KSWAPD 0x400000u | 35 | #define ___GFP_DIRECT_RECLAIM 0x400000u |
36 | #define ___GFP_OTHER_NODE 0x800000u | 36 | #define ___GFP_OTHER_NODE 0x800000u |
37 | #define ___GFP_WRITE 0x1000000u | 37 | #define ___GFP_WRITE 0x1000000u |
38 | #define ___GFP_KSWAPD_RECLAIM 0x2000000u | ||
38 | /* If the above are modified, __GFP_BITS_SHIFT may need updating */ | 39 | /* If the above are modified, __GFP_BITS_SHIFT may need updating */ |
39 | 40 | ||
40 | /* | 41 | /* |
41 | * GFP bitmasks.. | 42 | * Physical address zone modifiers (see linux/mmzone.h - low four bits) |
42 | * | ||
43 | * Zone modifiers (see linux/mmzone.h - low three bits) | ||
44 | * | 43 | * |
45 | * Do not put any conditional on these. If necessary modify the definitions | 44 | * Do not put any conditional on these. If necessary modify the definitions |
46 | * without the underscores and use them consistently. The definitions here may | 45 | * without the underscores and use them consistently. The definitions here may |
@@ -50,116 +49,229 @@ struct vm_area_struct; | |||
50 | #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) | 49 | #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) |
51 | #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) | 50 | #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) |
52 | #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ | 51 | #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ |
52 | #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ | ||
53 | #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) | 53 | #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) |
54 | |||
55 | /* | ||
56 | * Page mobility and placement hints | ||
57 | * | ||
58 | * These flags provide hints about how mobile the page is. Pages with similar | ||
59 | * mobility are placed within the same pageblocks to minimise problems due | ||
60 | * to external fragmentation. | ||
61 | * | ||
62 | * __GFP_MOVABLE (also a zone modifier) indicates that the page can be | ||
63 | * moved by page migration during memory compaction or can be reclaimed. | ||
64 | * | ||
65 | * __GFP_RECLAIMABLE is used for slab allocations that specify | ||
66 | * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. | ||
67 | * | ||
68 | * __GFP_WRITE indicates the caller intends to dirty the page. Where possible, | ||
69 | * these pages will be spread between local zones to avoid all the dirty | ||
70 | * pages being in one zone (fair zone allocation policy). | ||
71 | * | ||
72 | * __GFP_HARDWALL enforces the cpuset memory allocation policy. | ||
73 | * | ||
74 | * __GFP_THISNODE forces the allocation to be satisified from the requested | ||
75 | * node with no fallbacks or placement policy enforcements. | ||
76 | */ | ||
77 | #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) | ||
78 | #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) | ||
79 | #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) | ||
80 | #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) | ||
81 | |||
54 | /* | 82 | /* |
55 | * Action modifiers - doesn't change the zoning | 83 | * Watermark modifiers -- controls access to emergency reserves |
84 | * | ||
85 | * __GFP_HIGH indicates that the caller is high-priority and that granting | ||
86 | * the request is necessary before the system can make forward progress. | ||
87 | * For example, creating an IO context to clean pages. | ||
88 | * | ||
89 | * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is | ||
90 | * high priority. Users are typically interrupt handlers. This may be | ||
91 | * used in conjunction with __GFP_HIGH | ||
92 | * | ||
93 | * __GFP_MEMALLOC allows access to all memory. This should only be used when | ||
94 | * the caller guarantees the allocation will allow more memory to be freed | ||
95 | * very shortly e.g. process exiting or swapping. Users either should | ||
96 | * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). | ||
97 | * | ||
98 | * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. | ||
99 | * This takes precedence over the __GFP_MEMALLOC flag if both are set. | ||
100 | * | ||
101 | * __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement. | ||
102 | */ | ||
103 | #define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) | ||
104 | #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) | ||
105 | #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) | ||
106 | #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) | ||
107 | #define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) | ||
108 | |||
109 | /* | ||
110 | * Reclaim modifiers | ||
111 | * | ||
112 | * __GFP_IO can start physical IO. | ||
113 | * | ||
114 | * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the | ||
115 | * allocator recursing into the filesystem which might already be holding | ||
116 | * locks. | ||
117 | * | ||
118 | * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. | ||
119 | * This flag can be cleared to avoid unnecessary delays when a fallback | ||
120 | * option is available. | ||
121 | * | ||
122 | * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when | ||
123 | * the low watermark is reached and have it reclaim pages until the high | ||
124 | * watermark is reached. A caller may wish to clear this flag when fallback | ||
125 | * options are available and the reclaim is likely to disrupt the system. The | ||
126 | * canonical example is THP allocation where a fallback is cheap but | ||
127 | * reclaim/compaction may cause indirect stalls. | ||
128 | * | ||
129 | * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. | ||
56 | * | 130 | * |
57 | * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt | 131 | * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt |
58 | * _might_ fail. This depends upon the particular VM implementation. | 132 | * _might_ fail. This depends upon the particular VM implementation. |
59 | * | 133 | * |
60 | * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller | 134 | * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller |
61 | * cannot handle allocation failures. New users should be evaluated carefully | 135 | * cannot handle allocation failures. New users should be evaluated carefully |
62 | * (and the flag should be used only when there is no reasonable failure policy) | 136 | * (and the flag should be used only when there is no reasonable failure |
63 | * but it is definitely preferable to use the flag rather than opencode endless | 137 | * policy) but it is definitely preferable to use the flag rather than |
64 | * loop around allocator. | 138 | * opencode endless loop around allocator. |
65 | * | 139 | * |
66 | * __GFP_NORETRY: The VM implementation must not retry indefinitely and will | 140 | * __GFP_NORETRY: The VM implementation must not retry indefinitely and will |
67 | * return NULL when direct reclaim and memory compaction have failed to allow | 141 | * return NULL when direct reclaim and memory compaction have failed to allow |
68 | * the allocation to succeed. The OOM killer is not called with the current | 142 | * the allocation to succeed. The OOM killer is not called with the current |
69 | * implementation. | 143 | * implementation. |
70 | * | ||
71 | * __GFP_MOVABLE: Flag that this page will be movable by the page migration | ||
72 | * mechanism or reclaimed | ||
73 | */ | 144 | */ |
74 | #define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */ | 145 | #define __GFP_IO ((__force gfp_t)___GFP_IO) |
75 | #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */ | 146 | #define __GFP_FS ((__force gfp_t)___GFP_FS) |
76 | #define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */ | 147 | #define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ |
77 | #define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */ | 148 | #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ |
78 | #define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */ | 149 | #define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) |
79 | #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */ | 150 | #define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) |
80 | #define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */ | 151 | #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) |
81 | #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */ | 152 | #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) |
82 | #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */ | ||
83 | #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */ | ||
84 | #define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */ | ||
85 | #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */ | ||
86 | #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves. | ||
87 | * This takes precedence over the | ||
88 | * __GFP_MEMALLOC flag if both are | ||
89 | * set | ||
90 | */ | ||
91 | #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ | ||
92 | #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ | ||
93 | #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ | ||
94 | #define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */ | ||
95 | #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ | ||
96 | |||
97 | #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) | ||
98 | #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ | ||
99 | #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ | ||
100 | 153 | ||
101 | /* | 154 | /* |
102 | * This may seem redundant, but it's a way of annotating false positives vs. | 155 | * Action modifiers |
103 | * allocations that simply cannot be supported (e.g. page tables). | 156 | * |
157 | * __GFP_COLD indicates that the caller does not expect to be used in the near | ||
158 | * future. Where possible, a cache-cold page will be returned. | ||
159 | * | ||
160 | * __GFP_NOWARN suppresses allocation failure reports. | ||
161 | * | ||
162 | * __GFP_COMP address compound page metadata. | ||
163 | * | ||
164 | * __GFP_ZERO returns a zeroed page on success. | ||
165 | * | ||
166 | * __GFP_NOTRACK avoids tracking with kmemcheck. | ||
167 | * | ||
168 | * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of | ||
169 | * distinguishing in the source between false positives and allocations that | ||
170 | * cannot be supported (e.g. page tables). | ||
171 | * | ||
172 | * __GFP_OTHER_NODE is for allocations that are on a remote node but that | ||
173 | * should not be accounted for as a remote allocation in vmstat. A | ||
174 | * typical user would be khugepaged collapsing a huge page on a remote | ||
175 | * node. | ||
104 | */ | 176 | */ |
177 | #define __GFP_COLD ((__force gfp_t)___GFP_COLD) | ||
178 | #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) | ||
179 | #define __GFP_COMP ((__force gfp_t)___GFP_COMP) | ||
180 | #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) | ||
181 | #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) | ||
105 | #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) | 182 | #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) |
183 | #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) | ||
106 | 184 | ||
107 | #define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */ | 185 | /* Room for N __GFP_FOO bits */ |
186 | #define __GFP_BITS_SHIFT 26 | ||
108 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) | 187 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
109 | 188 | ||
110 | /* This equals 0, but use constants in case they ever change */ | 189 | /* |
111 | #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) | 190 | * Useful GFP flag combinations that are commonly used. It is recommended |
112 | /* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ | 191 | * that subsystems start with one of these combinations and then set/clear |
113 | #define GFP_ATOMIC (__GFP_HIGH) | 192 | * __GFP_FOO flags as necessary. |
114 | #define GFP_NOIO (__GFP_WAIT) | 193 | * |
115 | #define GFP_NOFS (__GFP_WAIT | __GFP_IO) | 194 | * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower |
116 | #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) | 195 | * watermark is applied to allow access to "atomic reserves" |
117 | #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ | 196 | * |
197 | * GFP_KERNEL is typical for kernel-internal allocations. The caller requires | ||
198 | * ZONE_NORMAL or a lower zone for direct access but can direct reclaim. | ||
199 | * | ||
200 | * GFP_NOWAIT is for kernel allocations that should not stall for direct | ||
201 | * reclaim, start physical IO or use any filesystem callback. | ||
202 | * | ||
203 | * GFP_NOIO will use direct reclaim to discard clean pages or slab pages | ||
204 | * that do not require the starting of any physical IO. | ||
205 | * | ||
206 | * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. | ||
207 | * | ||
208 | * GFP_USER is for userspace allocations that also need to be directly | ||
209 | * accessibly by the kernel or hardware. It is typically used by hardware | ||
210 | * for buffers that are mapped to userspace (e.g. graphics) that hardware | ||
211 | * still must DMA to. cpuset limits are enforced for these allocations. | ||
212 | * | ||
213 | * GFP_DMA exists for historical reasons and should be avoided where possible. | ||
214 | * The flags indicates that the caller requires that the lowest zone be | ||
215 | * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but | ||
216 | * it would require careful auditing as some users really require it and | ||
217 | * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the | ||
218 | * lowest zone as a type of emergency reserve. | ||
219 | * | ||
220 | * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit | ||
221 | * address. | ||
222 | * | ||
223 | * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, | ||
224 | * do not need to be directly accessible by the kernel but that cannot | ||
225 | * move once in use. An example may be a hardware allocation that maps | ||
226 | * data directly into userspace but has no addressing limitations. | ||
227 | * | ||
228 | * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not | ||
229 | * need direct access to but can use kmap() when access is required. They | ||
230 | * are expected to be movable via page reclaim or page migration. Typically, | ||
231 | * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE. | ||
232 | * | ||
233 | * GFP_TRANSHUGE is used for THP allocations. They are compound allocations | ||
234 | * that will fail quickly if memory is not available and will not wake | ||
235 | * kswapd on failure. | ||
236 | */ | ||
237 | #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) | ||
238 | #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) | ||
239 | #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) | ||
240 | #define GFP_NOIO (__GFP_RECLAIM) | ||
241 | #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) | ||
242 | #define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \ | ||
118 | __GFP_RECLAIMABLE) | 243 | __GFP_RECLAIMABLE) |
119 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) | 244 | #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) |
245 | #define GFP_DMA __GFP_DMA | ||
246 | #define GFP_DMA32 __GFP_DMA32 | ||
120 | #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) | 247 | #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) |
121 | #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) | 248 | #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) |
122 | #define GFP_IOFS (__GFP_IO | __GFP_FS) | 249 | #define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ |
123 | #define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ | 250 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \ |
124 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ | 251 | ~__GFP_KSWAPD_RECLAIM) |
125 | __GFP_NO_KSWAPD) | ||
126 | 252 | ||
127 | /* This mask makes up all the page movable related flags */ | 253 | /* Convert GFP flags to their corresponding migrate type */ |
128 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) | 254 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) |
255 | #define GFP_MOVABLE_SHIFT 3 | ||
129 | 256 | ||
130 | /* Control page allocator reclaim behavior */ | ||
131 | #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | ||
132 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | ||
133 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) | ||
134 | |||
135 | /* Control slab gfp mask during early boot */ | ||
136 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) | ||
137 | |||
138 | /* Control allocation constraints */ | ||
139 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | ||
140 | |||
141 | /* Do not use these with a slab allocator */ | ||
142 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) | ||
143 | |||
144 | /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some | ||
145 | platforms, used as appropriate on others */ | ||
146 | |||
147 | #define GFP_DMA __GFP_DMA | ||
148 | |||
149 | /* 4GB DMA on some platforms */ | ||
150 | #define GFP_DMA32 __GFP_DMA32 | ||
151 | |||
152 | /* Convert GFP flags to their corresponding migrate type */ | ||
153 | static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) | 257 | static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) |
154 | { | 258 | { |
155 | WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); | 259 | VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); |
260 | BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); | ||
261 | BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); | ||
156 | 262 | ||
157 | if (unlikely(page_group_by_mobility_disabled)) | 263 | if (unlikely(page_group_by_mobility_disabled)) |
158 | return MIGRATE_UNMOVABLE; | 264 | return MIGRATE_UNMOVABLE; |
159 | 265 | ||
160 | /* Group based on mobility */ | 266 | /* Group based on mobility */ |
161 | return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | | 267 | return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; |
162 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); | 268 | } |
269 | #undef GFP_MOVABLE_MASK | ||
270 | #undef GFP_MOVABLE_SHIFT | ||
271 | |||
272 | static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) | ||
273 | { | ||
274 | return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM); | ||
163 | } | 275 | } |
164 | 276 | ||
165 | #ifdef CONFIG_HIGHMEM | 277 | #ifdef CONFIG_HIGHMEM |
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 14cac67c2012..fb0fde686cb1 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h | |||
@@ -400,6 +400,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio) | |||
400 | { | 400 | { |
401 | return ERR_PTR(-EINVAL); | 401 | return ERR_PTR(-EINVAL); |
402 | } | 402 | } |
403 | |||
403 | static inline int desc_to_gpio(const struct gpio_desc *desc) | 404 | static inline int desc_to_gpio(const struct gpio_desc *desc) |
404 | { | 405 | { |
405 | /* GPIO can never have been requested */ | 406 | /* GPIO can never have been requested */ |
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 1aed31c5ffba..d1baebf350d8 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h | |||
@@ -206,6 +206,9 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, | |||
206 | 206 | ||
207 | #endif /* CONFIG_GPIOLIB_IRQCHIP */ | 207 | #endif /* CONFIG_GPIOLIB_IRQCHIP */ |
208 | 208 | ||
209 | int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset); | ||
210 | void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset); | ||
211 | |||
209 | #ifdef CONFIG_PINCTRL | 212 | #ifdef CONFIG_PINCTRL |
210 | 213 | ||
211 | /** | 214 | /** |
diff --git a/include/linux/hid.h b/include/linux/hid.h index f17980de2662..251a1d382e23 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
@@ -698,8 +698,8 @@ struct hid_driver { | |||
698 | int (*input_mapped)(struct hid_device *hdev, | 698 | int (*input_mapped)(struct hid_device *hdev, |
699 | struct hid_input *hidinput, struct hid_field *field, | 699 | struct hid_input *hidinput, struct hid_field *field, |
700 | struct hid_usage *usage, unsigned long **bit, int *max); | 700 | struct hid_usage *usage, unsigned long **bit, int *max); |
701 | void (*input_configured)(struct hid_device *hdev, | 701 | int (*input_configured)(struct hid_device *hdev, |
702 | struct hid_input *hidinput); | 702 | struct hid_input *hidinput); |
703 | void (*feature_mapping)(struct hid_device *hdev, | 703 | void (*feature_mapping)(struct hid_device *hdev, |
704 | struct hid_field *field, | 704 | struct hid_field *field, |
705 | struct hid_usage *usage); | 705 | struct hid_usage *usage); |
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 6aefcd0031a6..bb3f3297062a 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -78,7 +78,6 @@ static inline void __kunmap_atomic(void *addr) | |||
78 | } | 78 | } |
79 | 79 | ||
80 | #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) | 80 | #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) |
81 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) | ||
82 | 81 | ||
83 | #define kmap_flush_unused() do {} while(0) | 82 | #define kmap_flush_unused() do {} while(0) |
84 | #endif | 83 | #endif |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 5e35379f58a5..685c262e0be8 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -483,6 +483,17 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, | |||
483 | #define hugepages_supported() (HPAGE_SHIFT != 0) | 483 | #define hugepages_supported() (HPAGE_SHIFT != 0) |
484 | #endif | 484 | #endif |
485 | 485 | ||
486 | void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); | ||
487 | |||
488 | static inline void hugetlb_count_add(long l, struct mm_struct *mm) | ||
489 | { | ||
490 | atomic_long_add(l, &mm->hugetlb_usage); | ||
491 | } | ||
492 | |||
493 | static inline void hugetlb_count_sub(long l, struct mm_struct *mm) | ||
494 | { | ||
495 | atomic_long_sub(l, &mm->hugetlb_usage); | ||
496 | } | ||
486 | #else /* CONFIG_HUGETLB_PAGE */ | 497 | #else /* CONFIG_HUGETLB_PAGE */ |
487 | struct hstate {}; | 498 | struct hstate {}; |
488 | #define alloc_huge_page(v, a, r) NULL | 499 | #define alloc_huge_page(v, a, r) NULL |
@@ -519,6 +530,14 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, | |||
519 | { | 530 | { |
520 | return &mm->page_table_lock; | 531 | return &mm->page_table_lock; |
521 | } | 532 | } |
533 | |||
534 | static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) | ||
535 | { | ||
536 | } | ||
537 | |||
538 | static inline void hugetlb_count_sub(long l, struct mm_struct *mm) | ||
539 | { | ||
540 | } | ||
522 | #endif /* CONFIG_HUGETLB_PAGE */ | 541 | #endif /* CONFIG_HUGETLB_PAGE */ |
523 | 542 | ||
524 | static inline spinlock_t *huge_pte_lock(struct hstate *h, | 543 | static inline spinlock_t *huge_pte_lock(struct hstate *h, |
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index bcc853eccc85..24154c26d469 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h | |||
@@ -32,7 +32,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) | |||
32 | 32 | ||
33 | if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) | 33 | if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) |
34 | return NULL; | 34 | return NULL; |
35 | return (struct hugetlb_cgroup *)page[2].lru.next; | 35 | return (struct hugetlb_cgroup *)page[2].private; |
36 | } | 36 | } |
37 | 37 | ||
38 | static inline | 38 | static inline |
@@ -42,15 +42,13 @@ int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) | |||
42 | 42 | ||
43 | if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) | 43 | if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) |
44 | return -1; | 44 | return -1; |
45 | page[2].lru.next = (void *)h_cg; | 45 | page[2].private = (unsigned long)h_cg; |
46 | return 0; | 46 | return 0; |
47 | } | 47 | } |
48 | 48 | ||
49 | static inline bool hugetlb_cgroup_disabled(void) | 49 | static inline bool hugetlb_cgroup_disabled(void) |
50 | { | 50 | { |
51 | if (hugetlb_cgrp_subsys.disabled) | 51 | return !cgroup_subsys_enabled(hugetlb_cgrp_subsys); |
52 | return true; | ||
53 | return false; | ||
54 | } | 52 | } |
55 | 53 | ||
56 | extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, | 54 | extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 54733d5b503e..8fdc17b84739 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #define _HYPERV_H | 26 | #define _HYPERV_H |
27 | 27 | ||
28 | #include <uapi/linux/hyperv.h> | 28 | #include <uapi/linux/hyperv.h> |
29 | #include <uapi/asm/hyperv.h> | ||
29 | 30 | ||
30 | #include <linux/types.h> | 31 | #include <linux/types.h> |
31 | #include <linux/scatterlist.h> | 32 | #include <linux/scatterlist.h> |
diff --git a/include/linux/i2c-ocores.h b/include/linux/i2c-ocores.h index 1c06b5c7c308..01edd96fe1f7 100644 --- a/include/linux/i2c-ocores.h +++ b/include/linux/i2c-ocores.h | |||
@@ -15,6 +15,7 @@ struct ocores_i2c_platform_data { | |||
15 | u32 reg_shift; /* register offset shift value */ | 15 | u32 reg_shift; /* register offset shift value */ |
16 | u32 reg_io_width; /* register io read/write width */ | 16 | u32 reg_io_width; /* register io read/write width */ |
17 | u32 clock_khz; /* input clock in kHz */ | 17 | u32 clock_khz; /* input clock in kHz */ |
18 | bool big_endian; /* registers are big endian */ | ||
18 | u8 num_devices; /* number of devices in the devices list */ | 19 | u8 num_devices; /* number of devices in the devices list */ |
19 | struct i2c_board_info const *devices; /* devices connected to the bus */ | 20 | struct i2c_board_info const *devices; /* devices connected to the bus */ |
20 | }; | 21 | }; |
diff --git a/include/linux/i2c/i2c-rcar.h b/include/linux/i2c/i2c-rcar.h deleted file mode 100644 index 496f5c2b23c9..000000000000 --- a/include/linux/i2c/i2c-rcar.h +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | #ifndef __I2C_R_CAR_H__ | ||
2 | #define __I2C_R_CAR_H__ | ||
3 | |||
4 | #include <linux/platform_device.h> | ||
5 | |||
6 | struct i2c_rcar_platform_data { | ||
7 | u32 bus_speed; | ||
8 | }; | ||
9 | |||
10 | #endif /* __I2C_R_CAR_H__ */ | ||
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index cfa906f28b7a..452c0b0d2f32 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
@@ -121,7 +121,7 @@ | |||
121 | #define IEEE80211_MAX_SN IEEE80211_SN_MASK | 121 | #define IEEE80211_MAX_SN IEEE80211_SN_MASK |
122 | #define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1) | 122 | #define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1) |
123 | 123 | ||
124 | static inline int ieee80211_sn_less(u16 sn1, u16 sn2) | 124 | static inline bool ieee80211_sn_less(u16 sn1, u16 sn2) |
125 | { | 125 | { |
126 | return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1); | 126 | return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1); |
127 | } | 127 | } |
@@ -250,7 +250,7 @@ struct ieee80211_qos_hdr { | |||
250 | * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set | 250 | * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set |
251 | * @fc: frame control bytes in little-endian byteorder | 251 | * @fc: frame control bytes in little-endian byteorder |
252 | */ | 252 | */ |
253 | static inline int ieee80211_has_tods(__le16 fc) | 253 | static inline bool ieee80211_has_tods(__le16 fc) |
254 | { | 254 | { |
255 | return (fc & cpu_to_le16(IEEE80211_FCTL_TODS)) != 0; | 255 | return (fc & cpu_to_le16(IEEE80211_FCTL_TODS)) != 0; |
256 | } | 256 | } |
@@ -259,7 +259,7 @@ static inline int ieee80211_has_tods(__le16 fc) | |||
259 | * ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set | 259 | * ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set |
260 | * @fc: frame control bytes in little-endian byteorder | 260 | * @fc: frame control bytes in little-endian byteorder |
261 | */ | 261 | */ |
262 | static inline int ieee80211_has_fromds(__le16 fc) | 262 | static inline bool ieee80211_has_fromds(__le16 fc) |
263 | { | 263 | { |
264 | return (fc & cpu_to_le16(IEEE80211_FCTL_FROMDS)) != 0; | 264 | return (fc & cpu_to_le16(IEEE80211_FCTL_FROMDS)) != 0; |
265 | } | 265 | } |
@@ -268,7 +268,7 @@ static inline int ieee80211_has_fromds(__le16 fc) | |||
268 | * ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set | 268 | * ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set |
269 | * @fc: frame control bytes in little-endian byteorder | 269 | * @fc: frame control bytes in little-endian byteorder |
270 | */ | 270 | */ |
271 | static inline int ieee80211_has_a4(__le16 fc) | 271 | static inline bool ieee80211_has_a4(__le16 fc) |
272 | { | 272 | { |
273 | __le16 tmp = cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); | 273 | __le16 tmp = cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); |
274 | return (fc & tmp) == tmp; | 274 | return (fc & tmp) == tmp; |
@@ -278,7 +278,7 @@ static inline int ieee80211_has_a4(__le16 fc) | |||
278 | * ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set | 278 | * ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set |
279 | * @fc: frame control bytes in little-endian byteorder | 279 | * @fc: frame control bytes in little-endian byteorder |
280 | */ | 280 | */ |
281 | static inline int ieee80211_has_morefrags(__le16 fc) | 281 | static inline bool ieee80211_has_morefrags(__le16 fc) |
282 | { | 282 | { |
283 | return (fc & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) != 0; | 283 | return (fc & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) != 0; |
284 | } | 284 | } |
@@ -287,7 +287,7 @@ static inline int ieee80211_has_morefrags(__le16 fc) | |||
287 | * ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set | 287 | * ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set |
288 | * @fc: frame control bytes in little-endian byteorder | 288 | * @fc: frame control bytes in little-endian byteorder |
289 | */ | 289 | */ |
290 | static inline int ieee80211_has_retry(__le16 fc) | 290 | static inline bool ieee80211_has_retry(__le16 fc) |
291 | { | 291 | { |
292 | return (fc & cpu_to_le16(IEEE80211_FCTL_RETRY)) != 0; | 292 | return (fc & cpu_to_le16(IEEE80211_FCTL_RETRY)) != 0; |
293 | } | 293 | } |
@@ -296,7 +296,7 @@ static inline int ieee80211_has_retry(__le16 fc) | |||
296 | * ieee80211_has_pm - check if IEEE80211_FCTL_PM is set | 296 | * ieee80211_has_pm - check if IEEE80211_FCTL_PM is set |
297 | * @fc: frame control bytes in little-endian byteorder | 297 | * @fc: frame control bytes in little-endian byteorder |
298 | */ | 298 | */ |
299 | static inline int ieee80211_has_pm(__le16 fc) | 299 | static inline bool ieee80211_has_pm(__le16 fc) |
300 | { | 300 | { |
301 | return (fc & cpu_to_le16(IEEE80211_FCTL_PM)) != 0; | 301 | return (fc & cpu_to_le16(IEEE80211_FCTL_PM)) != 0; |
302 | } | 302 | } |
@@ -305,7 +305,7 @@ static inline int ieee80211_has_pm(__le16 fc) | |||
305 | * ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set | 305 | * ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set |
306 | * @fc: frame control bytes in little-endian byteorder | 306 | * @fc: frame control bytes in little-endian byteorder |
307 | */ | 307 | */ |
308 | static inline int ieee80211_has_moredata(__le16 fc) | 308 | static inline bool ieee80211_has_moredata(__le16 fc) |
309 | { | 309 | { |
310 | return (fc & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) != 0; | 310 | return (fc & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) != 0; |
311 | } | 311 | } |
@@ -314,7 +314,7 @@ static inline int ieee80211_has_moredata(__le16 fc) | |||
314 | * ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set | 314 | * ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set |
315 | * @fc: frame control bytes in little-endian byteorder | 315 | * @fc: frame control bytes in little-endian byteorder |
316 | */ | 316 | */ |
317 | static inline int ieee80211_has_protected(__le16 fc) | 317 | static inline bool ieee80211_has_protected(__le16 fc) |
318 | { | 318 | { |
319 | return (fc & cpu_to_le16(IEEE80211_FCTL_PROTECTED)) != 0; | 319 | return (fc & cpu_to_le16(IEEE80211_FCTL_PROTECTED)) != 0; |
320 | } | 320 | } |
@@ -323,7 +323,7 @@ static inline int ieee80211_has_protected(__le16 fc) | |||
323 | * ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set | 323 | * ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set |
324 | * @fc: frame control bytes in little-endian byteorder | 324 | * @fc: frame control bytes in little-endian byteorder |
325 | */ | 325 | */ |
326 | static inline int ieee80211_has_order(__le16 fc) | 326 | static inline bool ieee80211_has_order(__le16 fc) |
327 | { | 327 | { |
328 | return (fc & cpu_to_le16(IEEE80211_FCTL_ORDER)) != 0; | 328 | return (fc & cpu_to_le16(IEEE80211_FCTL_ORDER)) != 0; |
329 | } | 329 | } |
@@ -332,7 +332,7 @@ static inline int ieee80211_has_order(__le16 fc) | |||
332 | * ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT | 332 | * ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT |
333 | * @fc: frame control bytes in little-endian byteorder | 333 | * @fc: frame control bytes in little-endian byteorder |
334 | */ | 334 | */ |
335 | static inline int ieee80211_is_mgmt(__le16 fc) | 335 | static inline bool ieee80211_is_mgmt(__le16 fc) |
336 | { | 336 | { |
337 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == | 337 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == |
338 | cpu_to_le16(IEEE80211_FTYPE_MGMT); | 338 | cpu_to_le16(IEEE80211_FTYPE_MGMT); |
@@ -342,7 +342,7 @@ static inline int ieee80211_is_mgmt(__le16 fc) | |||
342 | * ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL | 342 | * ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL |
343 | * @fc: frame control bytes in little-endian byteorder | 343 | * @fc: frame control bytes in little-endian byteorder |
344 | */ | 344 | */ |
345 | static inline int ieee80211_is_ctl(__le16 fc) | 345 | static inline bool ieee80211_is_ctl(__le16 fc) |
346 | { | 346 | { |
347 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == | 347 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == |
348 | cpu_to_le16(IEEE80211_FTYPE_CTL); | 348 | cpu_to_le16(IEEE80211_FTYPE_CTL); |
@@ -352,7 +352,7 @@ static inline int ieee80211_is_ctl(__le16 fc) | |||
352 | * ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA | 352 | * ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA |
353 | * @fc: frame control bytes in little-endian byteorder | 353 | * @fc: frame control bytes in little-endian byteorder |
354 | */ | 354 | */ |
355 | static inline int ieee80211_is_data(__le16 fc) | 355 | static inline bool ieee80211_is_data(__le16 fc) |
356 | { | 356 | { |
357 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == | 357 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == |
358 | cpu_to_le16(IEEE80211_FTYPE_DATA); | 358 | cpu_to_le16(IEEE80211_FTYPE_DATA); |
@@ -362,7 +362,7 @@ static inline int ieee80211_is_data(__le16 fc) | |||
362 | * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set | 362 | * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set |
363 | * @fc: frame control bytes in little-endian byteorder | 363 | * @fc: frame control bytes in little-endian byteorder |
364 | */ | 364 | */ |
365 | static inline int ieee80211_is_data_qos(__le16 fc) | 365 | static inline bool ieee80211_is_data_qos(__le16 fc) |
366 | { | 366 | { |
367 | /* | 367 | /* |
368 | * mask with QOS_DATA rather than IEEE80211_FCTL_STYPE as we just need | 368 | * mask with QOS_DATA rather than IEEE80211_FCTL_STYPE as we just need |
@@ -376,7 +376,7 @@ static inline int ieee80211_is_data_qos(__le16 fc) | |||
376 | * ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data | 376 | * ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data |
377 | * @fc: frame control bytes in little-endian byteorder | 377 | * @fc: frame control bytes in little-endian byteorder |
378 | */ | 378 | */ |
379 | static inline int ieee80211_is_data_present(__le16 fc) | 379 | static inline bool ieee80211_is_data_present(__le16 fc) |
380 | { | 380 | { |
381 | /* | 381 | /* |
382 | * mask with 0x40 and test that that bit is clear to only return true | 382 | * mask with 0x40 and test that that bit is clear to only return true |
@@ -390,7 +390,7 @@ static inline int ieee80211_is_data_present(__le16 fc) | |||
390 | * ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ | 390 | * ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ |
391 | * @fc: frame control bytes in little-endian byteorder | 391 | * @fc: frame control bytes in little-endian byteorder |
392 | */ | 392 | */ |
393 | static inline int ieee80211_is_assoc_req(__le16 fc) | 393 | static inline bool ieee80211_is_assoc_req(__le16 fc) |
394 | { | 394 | { |
395 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 395 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
396 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ); | 396 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ); |
@@ -400,7 +400,7 @@ static inline int ieee80211_is_assoc_req(__le16 fc) | |||
400 | * ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP | 400 | * ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP |
401 | * @fc: frame control bytes in little-endian byteorder | 401 | * @fc: frame control bytes in little-endian byteorder |
402 | */ | 402 | */ |
403 | static inline int ieee80211_is_assoc_resp(__le16 fc) | 403 | static inline bool ieee80211_is_assoc_resp(__le16 fc) |
404 | { | 404 | { |
405 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 405 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
406 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_RESP); | 406 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_RESP); |
@@ -410,7 +410,7 @@ static inline int ieee80211_is_assoc_resp(__le16 fc) | |||
410 | * ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ | 410 | * ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ |
411 | * @fc: frame control bytes in little-endian byteorder | 411 | * @fc: frame control bytes in little-endian byteorder |
412 | */ | 412 | */ |
413 | static inline int ieee80211_is_reassoc_req(__le16 fc) | 413 | static inline bool ieee80211_is_reassoc_req(__le16 fc) |
414 | { | 414 | { |
415 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 415 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
416 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ); | 416 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ); |
@@ -420,7 +420,7 @@ static inline int ieee80211_is_reassoc_req(__le16 fc) | |||
420 | * ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP | 420 | * ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP |
421 | * @fc: frame control bytes in little-endian byteorder | 421 | * @fc: frame control bytes in little-endian byteorder |
422 | */ | 422 | */ |
423 | static inline int ieee80211_is_reassoc_resp(__le16 fc) | 423 | static inline bool ieee80211_is_reassoc_resp(__le16 fc) |
424 | { | 424 | { |
425 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 425 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
426 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_RESP); | 426 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_RESP); |
@@ -430,7 +430,7 @@ static inline int ieee80211_is_reassoc_resp(__le16 fc) | |||
430 | * ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ | 430 | * ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ |
431 | * @fc: frame control bytes in little-endian byteorder | 431 | * @fc: frame control bytes in little-endian byteorder |
432 | */ | 432 | */ |
433 | static inline int ieee80211_is_probe_req(__le16 fc) | 433 | static inline bool ieee80211_is_probe_req(__le16 fc) |
434 | { | 434 | { |
435 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 435 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
436 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ); | 436 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ); |
@@ -440,7 +440,7 @@ static inline int ieee80211_is_probe_req(__le16 fc) | |||
440 | * ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP | 440 | * ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP |
441 | * @fc: frame control bytes in little-endian byteorder | 441 | * @fc: frame control bytes in little-endian byteorder |
442 | */ | 442 | */ |
443 | static inline int ieee80211_is_probe_resp(__le16 fc) | 443 | static inline bool ieee80211_is_probe_resp(__le16 fc) |
444 | { | 444 | { |
445 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 445 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
446 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); | 446 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); |
@@ -450,7 +450,7 @@ static inline int ieee80211_is_probe_resp(__le16 fc) | |||
450 | * ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON | 450 | * ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON |
451 | * @fc: frame control bytes in little-endian byteorder | 451 | * @fc: frame control bytes in little-endian byteorder |
452 | */ | 452 | */ |
453 | static inline int ieee80211_is_beacon(__le16 fc) | 453 | static inline bool ieee80211_is_beacon(__le16 fc) |
454 | { | 454 | { |
455 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 455 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
456 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); | 456 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); |
@@ -460,7 +460,7 @@ static inline int ieee80211_is_beacon(__le16 fc) | |||
460 | * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM | 460 | * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM |
461 | * @fc: frame control bytes in little-endian byteorder | 461 | * @fc: frame control bytes in little-endian byteorder |
462 | */ | 462 | */ |
463 | static inline int ieee80211_is_atim(__le16 fc) | 463 | static inline bool ieee80211_is_atim(__le16 fc) |
464 | { | 464 | { |
465 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 465 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
466 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ATIM); | 466 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ATIM); |
@@ -470,7 +470,7 @@ static inline int ieee80211_is_atim(__le16 fc) | |||
470 | * ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC | 470 | * ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC |
471 | * @fc: frame control bytes in little-endian byteorder | 471 | * @fc: frame control bytes in little-endian byteorder |
472 | */ | 472 | */ |
473 | static inline int ieee80211_is_disassoc(__le16 fc) | 473 | static inline bool ieee80211_is_disassoc(__le16 fc) |
474 | { | 474 | { |
475 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 475 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
476 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DISASSOC); | 476 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DISASSOC); |
@@ -480,7 +480,7 @@ static inline int ieee80211_is_disassoc(__le16 fc) | |||
480 | * ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH | 480 | * ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH |
481 | * @fc: frame control bytes in little-endian byteorder | 481 | * @fc: frame control bytes in little-endian byteorder |
482 | */ | 482 | */ |
483 | static inline int ieee80211_is_auth(__le16 fc) | 483 | static inline bool ieee80211_is_auth(__le16 fc) |
484 | { | 484 | { |
485 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 485 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
486 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); | 486 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); |
@@ -490,7 +490,7 @@ static inline int ieee80211_is_auth(__le16 fc) | |||
490 | * ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH | 490 | * ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH |
491 | * @fc: frame control bytes in little-endian byteorder | 491 | * @fc: frame control bytes in little-endian byteorder |
492 | */ | 492 | */ |
493 | static inline int ieee80211_is_deauth(__le16 fc) | 493 | static inline bool ieee80211_is_deauth(__le16 fc) |
494 | { | 494 | { |
495 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 495 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
496 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH); | 496 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH); |
@@ -500,7 +500,7 @@ static inline int ieee80211_is_deauth(__le16 fc) | |||
500 | * ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION | 500 | * ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION |
501 | * @fc: frame control bytes in little-endian byteorder | 501 | * @fc: frame control bytes in little-endian byteorder |
502 | */ | 502 | */ |
503 | static inline int ieee80211_is_action(__le16 fc) | 503 | static inline bool ieee80211_is_action(__le16 fc) |
504 | { | 504 | { |
505 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 505 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
506 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); | 506 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); |
@@ -510,7 +510,7 @@ static inline int ieee80211_is_action(__le16 fc) | |||
510 | * ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ | 510 | * ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ |
511 | * @fc: frame control bytes in little-endian byteorder | 511 | * @fc: frame control bytes in little-endian byteorder |
512 | */ | 512 | */ |
513 | static inline int ieee80211_is_back_req(__le16 fc) | 513 | static inline bool ieee80211_is_back_req(__le16 fc) |
514 | { | 514 | { |
515 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 515 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
516 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); | 516 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); |
@@ -520,7 +520,7 @@ static inline int ieee80211_is_back_req(__le16 fc) | |||
520 | * ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK | 520 | * ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK |
521 | * @fc: frame control bytes in little-endian byteorder | 521 | * @fc: frame control bytes in little-endian byteorder |
522 | */ | 522 | */ |
523 | static inline int ieee80211_is_back(__le16 fc) | 523 | static inline bool ieee80211_is_back(__le16 fc) |
524 | { | 524 | { |
525 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 525 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
526 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK); | 526 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK); |
@@ -530,7 +530,7 @@ static inline int ieee80211_is_back(__le16 fc) | |||
530 | * ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL | 530 | * ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL |
531 | * @fc: frame control bytes in little-endian byteorder | 531 | * @fc: frame control bytes in little-endian byteorder |
532 | */ | 532 | */ |
533 | static inline int ieee80211_is_pspoll(__le16 fc) | 533 | static inline bool ieee80211_is_pspoll(__le16 fc) |
534 | { | 534 | { |
535 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 535 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
536 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); | 536 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); |
@@ -540,7 +540,7 @@ static inline int ieee80211_is_pspoll(__le16 fc) | |||
540 | * ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS | 540 | * ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS |
541 | * @fc: frame control bytes in little-endian byteorder | 541 | * @fc: frame control bytes in little-endian byteorder |
542 | */ | 542 | */ |
543 | static inline int ieee80211_is_rts(__le16 fc) | 543 | static inline bool ieee80211_is_rts(__le16 fc) |
544 | { | 544 | { |
545 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 545 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
546 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); | 546 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); |
@@ -550,7 +550,7 @@ static inline int ieee80211_is_rts(__le16 fc) | |||
550 | * ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS | 550 | * ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS |
551 | * @fc: frame control bytes in little-endian byteorder | 551 | * @fc: frame control bytes in little-endian byteorder |
552 | */ | 552 | */ |
553 | static inline int ieee80211_is_cts(__le16 fc) | 553 | static inline bool ieee80211_is_cts(__le16 fc) |
554 | { | 554 | { |
555 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 555 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
556 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); | 556 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); |
@@ -560,7 +560,7 @@ static inline int ieee80211_is_cts(__le16 fc) | |||
560 | * ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK | 560 | * ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK |
561 | * @fc: frame control bytes in little-endian byteorder | 561 | * @fc: frame control bytes in little-endian byteorder |
562 | */ | 562 | */ |
563 | static inline int ieee80211_is_ack(__le16 fc) | 563 | static inline bool ieee80211_is_ack(__le16 fc) |
564 | { | 564 | { |
565 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 565 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
566 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK); | 566 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK); |
@@ -570,7 +570,7 @@ static inline int ieee80211_is_ack(__le16 fc) | |||
570 | * ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND | 570 | * ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND |
571 | * @fc: frame control bytes in little-endian byteorder | 571 | * @fc: frame control bytes in little-endian byteorder |
572 | */ | 572 | */ |
573 | static inline int ieee80211_is_cfend(__le16 fc) | 573 | static inline bool ieee80211_is_cfend(__le16 fc) |
574 | { | 574 | { |
575 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 575 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
576 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFEND); | 576 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFEND); |
@@ -580,7 +580,7 @@ static inline int ieee80211_is_cfend(__le16 fc) | |||
580 | * ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK | 580 | * ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK |
581 | * @fc: frame control bytes in little-endian byteorder | 581 | * @fc: frame control bytes in little-endian byteorder |
582 | */ | 582 | */ |
583 | static inline int ieee80211_is_cfendack(__le16 fc) | 583 | static inline bool ieee80211_is_cfendack(__le16 fc) |
584 | { | 584 | { |
585 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 585 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
586 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFENDACK); | 586 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFENDACK); |
@@ -590,7 +590,7 @@ static inline int ieee80211_is_cfendack(__le16 fc) | |||
590 | * ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame | 590 | * ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame |
591 | * @fc: frame control bytes in little-endian byteorder | 591 | * @fc: frame control bytes in little-endian byteorder |
592 | */ | 592 | */ |
593 | static inline int ieee80211_is_nullfunc(__le16 fc) | 593 | static inline bool ieee80211_is_nullfunc(__le16 fc) |
594 | { | 594 | { |
595 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 595 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
596 | cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC); | 596 | cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC); |
@@ -600,7 +600,7 @@ static inline int ieee80211_is_nullfunc(__le16 fc) | |||
600 | * ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame | 600 | * ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame |
601 | * @fc: frame control bytes in little-endian byteorder | 601 | * @fc: frame control bytes in little-endian byteorder |
602 | */ | 602 | */ |
603 | static inline int ieee80211_is_qos_nullfunc(__le16 fc) | 603 | static inline bool ieee80211_is_qos_nullfunc(__le16 fc) |
604 | { | 604 | { |
605 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == | 605 | return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == |
606 | cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); | 606 | cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); |
@@ -624,7 +624,7 @@ static inline bool ieee80211_is_bufferable_mmpdu(__le16 fc) | |||
624 | * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set | 624 | * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set |
625 | * @seq_ctrl: frame sequence control bytes in little-endian byteorder | 625 | * @seq_ctrl: frame sequence control bytes in little-endian byteorder |
626 | */ | 626 | */ |
627 | static inline int ieee80211_is_first_frag(__le16 seq_ctrl) | 627 | static inline bool ieee80211_is_first_frag(__le16 seq_ctrl) |
628 | { | 628 | { |
629 | return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0; | 629 | return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0; |
630 | } | 630 | } |
@@ -1379,6 +1379,7 @@ struct ieee80211_ht_operation { | |||
1379 | 1379 | ||
1380 | 1380 | ||
1381 | /* block-ack parameters */ | 1381 | /* block-ack parameters */ |
1382 | #define IEEE80211_ADDBA_PARAM_AMSDU_MASK 0x0001 | ||
1382 | #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 | 1383 | #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 |
1383 | #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C | 1384 | #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C |
1384 | #define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0 | 1385 | #define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0 |
@@ -1745,8 +1746,7 @@ enum ieee80211_eid { | |||
1745 | WLAN_EID_TIM = 5, | 1746 | WLAN_EID_TIM = 5, |
1746 | WLAN_EID_IBSS_PARAMS = 6, | 1747 | WLAN_EID_IBSS_PARAMS = 6, |
1747 | WLAN_EID_COUNTRY = 7, | 1748 | WLAN_EID_COUNTRY = 7, |
1748 | WLAN_EID_HP_PARAMS = 8, | 1749 | /* 8, 9 reserved */ |
1749 | WLAN_EID_HP_TABLE = 9, | ||
1750 | WLAN_EID_REQUEST = 10, | 1750 | WLAN_EID_REQUEST = 10, |
1751 | WLAN_EID_QBSS_LOAD = 11, | 1751 | WLAN_EID_QBSS_LOAD = 11, |
1752 | WLAN_EID_EDCA_PARAM_SET = 12, | 1752 | WLAN_EID_EDCA_PARAM_SET = 12, |
@@ -1932,6 +1932,8 @@ enum ieee80211_category { | |||
1932 | WLAN_CATEGORY_HT = 7, | 1932 | WLAN_CATEGORY_HT = 7, |
1933 | WLAN_CATEGORY_SA_QUERY = 8, | 1933 | WLAN_CATEGORY_SA_QUERY = 8, |
1934 | WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, | 1934 | WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, |
1935 | WLAN_CATEGORY_WNM = 10, | ||
1936 | WLAN_CATEGORY_WNM_UNPROTECTED = 11, | ||
1935 | WLAN_CATEGORY_TDLS = 12, | 1937 | WLAN_CATEGORY_TDLS = 12, |
1936 | WLAN_CATEGORY_MESH_ACTION = 13, | 1938 | WLAN_CATEGORY_MESH_ACTION = 13, |
1937 | WLAN_CATEGORY_MULTIHOP_ACTION = 14, | 1939 | WLAN_CATEGORY_MULTIHOP_ACTION = 14, |
@@ -2396,7 +2398,10 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) | |||
2396 | category = ((u8 *) hdr) + 24; | 2398 | category = ((u8 *) hdr) + 24; |
2397 | return *category != WLAN_CATEGORY_PUBLIC && | 2399 | return *category != WLAN_CATEGORY_PUBLIC && |
2398 | *category != WLAN_CATEGORY_HT && | 2400 | *category != WLAN_CATEGORY_HT && |
2401 | *category != WLAN_CATEGORY_WNM_UNPROTECTED && | ||
2399 | *category != WLAN_CATEGORY_SELF_PROTECTED && | 2402 | *category != WLAN_CATEGORY_SELF_PROTECTED && |
2403 | *category != WLAN_CATEGORY_UNPROT_DMG && | ||
2404 | *category != WLAN_CATEGORY_VHT && | ||
2400 | *category != WLAN_CATEGORY_VENDOR_SPECIFIC; | 2405 | *category != WLAN_CATEGORY_VENDOR_SPECIFIC; |
2401 | } | 2406 | } |
2402 | 2407 | ||
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h index 1dc1f4ed4001..d3e415674dac 100644 --- a/include/linux/ieee802154.h +++ b/include/linux/ieee802154.h | |||
@@ -25,12 +25,22 @@ | |||
25 | 25 | ||
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/random.h> | 27 | #include <linux/random.h> |
28 | #include <asm/byteorder.h> | ||
29 | 28 | ||
30 | #define IEEE802154_MTU 127 | 29 | #define IEEE802154_MTU 127 |
31 | #define IEEE802154_ACK_PSDU_LEN 5 | 30 | #define IEEE802154_ACK_PSDU_LEN 5 |
32 | #define IEEE802154_MIN_PSDU_LEN 9 | 31 | #define IEEE802154_MIN_PSDU_LEN 9 |
33 | #define IEEE802154_FCS_LEN 2 | 32 | #define IEEE802154_FCS_LEN 2 |
33 | #define IEEE802154_MAX_AUTH_TAG_LEN 16 | ||
34 | |||
35 | /* General MAC frame format: | ||
36 | * 2 bytes: Frame Control | ||
37 | * 1 byte: Sequence Number | ||
38 | * 20 bytes: Addressing fields | ||
39 | * 14 bytes: Auxiliary Security Header | ||
40 | */ | ||
41 | #define IEEE802154_MAX_HEADER_LEN (2 + 1 + 20 + 14) | ||
42 | #define IEEE802154_MIN_HEADER_LEN (IEEE802154_ACK_PSDU_LEN - \ | ||
43 | IEEE802154_FCS_LEN) | ||
34 | 44 | ||
35 | #define IEEE802154_PAN_ID_BROADCAST 0xffff | 45 | #define IEEE802154_PAN_ID_BROADCAST 0xffff |
36 | #define IEEE802154_ADDR_SHORT_BROADCAST 0xffff | 46 | #define IEEE802154_ADDR_SHORT_BROADCAST 0xffff |
@@ -205,6 +215,41 @@ enum { | |||
205 | IEEE802154_SCAN_IN_PROGRESS = 0xfc, | 215 | IEEE802154_SCAN_IN_PROGRESS = 0xfc, |
206 | }; | 216 | }; |
207 | 217 | ||
218 | /* frame control handling */ | ||
219 | #define IEEE802154_FCTL_FTYPE 0x0003 | ||
220 | #define IEEE802154_FCTL_ACKREQ 0x0020 | ||
221 | #define IEEE802154_FCTL_INTRA_PAN 0x0040 | ||
222 | |||
223 | #define IEEE802154_FTYPE_DATA 0x0001 | ||
224 | |||
225 | /* | ||
226 | * ieee802154_is_data - check if type is IEEE802154_FTYPE_DATA | ||
227 | * @fc: frame control bytes in little-endian byteorder | ||
228 | */ | ||
229 | static inline int ieee802154_is_data(__le16 fc) | ||
230 | { | ||
231 | return (fc & cpu_to_le16(IEEE802154_FCTL_FTYPE)) == | ||
232 | cpu_to_le16(IEEE802154_FTYPE_DATA); | ||
233 | } | ||
234 | |||
235 | /** | ||
236 | * ieee802154_is_ackreq - check if acknowledgment request bit is set | ||
237 | * @fc: frame control bytes in little-endian byteorder | ||
238 | */ | ||
239 | static inline bool ieee802154_is_ackreq(__le16 fc) | ||
240 | { | ||
241 | return fc & cpu_to_le16(IEEE802154_FCTL_ACKREQ); | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * ieee802154_is_intra_pan - check if intra pan id communication | ||
246 | * @fc: frame control bytes in little-endian byteorder | ||
247 | */ | ||
248 | static inline bool ieee802154_is_intra_pan(__le16 fc) | ||
249 | { | ||
250 | return fc & cpu_to_le16(IEEE802154_FCTL_INTRA_PAN); | ||
251 | } | ||
252 | |||
208 | /** | 253 | /** |
209 | * ieee802154_is_valid_psdu_len - check if psdu len is valid | 254 | * ieee802154_is_valid_psdu_len - check if psdu len is valid |
210 | * available lengths: | 255 | * available lengths: |
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index dad8b00beed2..a338a688ee4a 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h | |||
@@ -46,6 +46,12 @@ struct br_ip_list { | |||
46 | #define BR_LEARNING_SYNC BIT(9) | 46 | #define BR_LEARNING_SYNC BIT(9) |
47 | #define BR_PROXYARP_WIFI BIT(10) | 47 | #define BR_PROXYARP_WIFI BIT(10) |
48 | 48 | ||
49 | /* values as per ieee8021QBridgeFdbAgingTime */ | ||
50 | #define BR_MIN_AGEING_TIME (10 * HZ) | ||
51 | #define BR_MAX_AGEING_TIME (1000000 * HZ) | ||
52 | |||
53 | #define BR_DEFAULT_AGEING_TIME (300 * HZ) | ||
54 | |||
49 | extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); | 55 | extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); |
50 | 56 | ||
51 | typedef int br_should_route_hook_t(struct sk_buff *skb); | 57 | typedef int br_should_route_hook_t(struct sk_buff *skb); |
diff --git a/include/linux/if_link.h b/include/linux/if_link.h index ae5d0d22955d..f923d15b432c 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h | |||
@@ -24,5 +24,6 @@ struct ifla_vf_info { | |||
24 | __u32 min_tx_rate; | 24 | __u32 min_tx_rate; |
25 | __u32 max_tx_rate; | 25 | __u32 max_tx_rate; |
26 | __u32 rss_query_en; | 26 | __u32 rss_query_en; |
27 | __u32 trusted; | ||
27 | }; | 28 | }; |
28 | #endif /* _LINUX_IF_LINK_H */ | 29 | #endif /* _LINUX_IF_LINK_H */ |
diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 908429216d9f..9c9de11549a7 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h | |||
@@ -110,7 +110,7 @@ struct ip_mc_list { | |||
110 | #define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value) | 110 | #define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value) |
111 | #define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value) | 111 | #define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value) |
112 | 112 | ||
113 | extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u16 proto); | 113 | extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto); |
114 | extern int igmp_rcv(struct sk_buff *); | 114 | extern int igmp_rcv(struct sk_buff *); |
115 | extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); | 115 | extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); |
116 | extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); | 116 | extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); |
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 3c17cd7fdf06..2fe939c73cd2 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h | |||
@@ -271,6 +271,10 @@ void st_sensors_power_enable(struct iio_dev *indio_dev); | |||
271 | 271 | ||
272 | void st_sensors_power_disable(struct iio_dev *indio_dev); | 272 | void st_sensors_power_disable(struct iio_dev *indio_dev); |
273 | 273 | ||
274 | int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev, | ||
275 | unsigned reg, unsigned writeval, | ||
276 | unsigned *readval); | ||
277 | |||
274 | int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr); | 278 | int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr); |
275 | 279 | ||
276 | int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable); | 280 | int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable); |
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 7bb7f673cb3f..19c94c9acc81 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h | |||
@@ -294,6 +294,7 @@ static inline s64 iio_get_time_ns(void) | |||
294 | #define INDIO_BUFFER_TRIGGERED 0x02 | 294 | #define INDIO_BUFFER_TRIGGERED 0x02 |
295 | #define INDIO_BUFFER_SOFTWARE 0x04 | 295 | #define INDIO_BUFFER_SOFTWARE 0x04 |
296 | #define INDIO_BUFFER_HARDWARE 0x08 | 296 | #define INDIO_BUFFER_HARDWARE 0x08 |
297 | #define INDIO_EVENT_TRIGGERED 0x10 | ||
297 | 298 | ||
298 | #define INDIO_ALL_BUFFER_MODES \ | 299 | #define INDIO_ALL_BUFFER_MODES \ |
299 | (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE) | 300 | (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE) |
@@ -457,6 +458,7 @@ struct iio_buffer_setup_ops { | |||
457 | * @scan_index_timestamp:[INTERN] cache of the index to the timestamp | 458 | * @scan_index_timestamp:[INTERN] cache of the index to the timestamp |
458 | * @trig: [INTERN] current device trigger (buffer modes) | 459 | * @trig: [INTERN] current device trigger (buffer modes) |
459 | * @pollfunc: [DRIVER] function run on trigger being received | 460 | * @pollfunc: [DRIVER] function run on trigger being received |
461 | * @pollfunc_event: [DRIVER] function run on events trigger being received | ||
460 | * @channels: [DRIVER] channel specification structure table | 462 | * @channels: [DRIVER] channel specification structure table |
461 | * @num_channels: [DRIVER] number of channels specified in @channels. | 463 | * @num_channels: [DRIVER] number of channels specified in @channels. |
462 | * @channel_attr_list: [INTERN] keep track of automatically created channel | 464 | * @channel_attr_list: [INTERN] keep track of automatically created channel |
@@ -495,6 +497,7 @@ struct iio_dev { | |||
495 | unsigned scan_index_timestamp; | 497 | unsigned scan_index_timestamp; |
496 | struct iio_trigger *trig; | 498 | struct iio_trigger *trig; |
497 | struct iio_poll_func *pollfunc; | 499 | struct iio_poll_func *pollfunc; |
500 | struct iio_poll_func *pollfunc_event; | ||
498 | 501 | ||
499 | struct iio_chan_spec const *channels; | 502 | struct iio_chan_spec const *channels; |
500 | int num_channels; | 503 | int num_channels; |
diff --git a/include/linux/iio/triggered_event.h b/include/linux/iio/triggered_event.h new file mode 100644 index 000000000000..8fe8537085bb --- /dev/null +++ b/include/linux/iio/triggered_event.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #ifndef _LINUX_IIO_TRIGGERED_EVENT_H_ | ||
2 | #define _LINUX_IIO_TRIGGERED_EVENT_H_ | ||
3 | |||
4 | #include <linux/interrupt.h> | ||
5 | |||
6 | int iio_triggered_event_setup(struct iio_dev *indio_dev, | ||
7 | irqreturn_t (*h)(int irq, void *p), | ||
8 | irqreturn_t (*thread)(int irq, void *p)); | ||
9 | void iio_triggered_event_cleanup(struct iio_dev *indio_dev); | ||
10 | |||
11 | #endif | ||
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index a4328cea376a..ee971f335a8b 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h | |||
@@ -171,7 +171,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, | |||
171 | __be32 local, int scope); | 171 | __be32 local, int scope); |
172 | struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, | 172 | struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, |
173 | __be32 mask); | 173 | __be32 mask); |
174 | static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) | 174 | static __inline__ bool inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) |
175 | { | 175 | { |
176 | return !((addr^ifa->ifa_address)&ifa->ifa_mask); | 176 | return !((addr^ifa->ifa_address)&ifa->ifa_mask); |
177 | } | 177 | } |
@@ -180,15 +180,15 @@ static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) | |||
180 | * Check if a mask is acceptable. | 180 | * Check if a mask is acceptable. |
181 | */ | 181 | */ |
182 | 182 | ||
183 | static __inline__ int bad_mask(__be32 mask, __be32 addr) | 183 | static __inline__ bool bad_mask(__be32 mask, __be32 addr) |
184 | { | 184 | { |
185 | __u32 hmask; | 185 | __u32 hmask; |
186 | if (addr & (mask = ~mask)) | 186 | if (addr & (mask = ~mask)) |
187 | return 1; | 187 | return true; |
188 | hmask = ntohl(mask); | 188 | hmask = ntohl(mask); |
189 | if (hmask & (hmask+1)) | 189 | if (hmask & (hmask+1)) |
190 | return 1; | 190 | return true; |
191 | return 0; | 191 | return false; |
192 | } | 192 | } |
193 | 193 | ||
194 | #define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \ | 194 | #define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \ |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index e38681f4912d..1c1ff7e4faa4 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -25,13 +25,6 @@ | |||
25 | extern struct files_struct init_files; | 25 | extern struct files_struct init_files; |
26 | extern struct fs_struct init_fs; | 26 | extern struct fs_struct init_fs; |
27 | 27 | ||
28 | #ifdef CONFIG_CGROUPS | ||
29 | #define INIT_GROUP_RWSEM(sig) \ | ||
30 | .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem), | ||
31 | #else | ||
32 | #define INIT_GROUP_RWSEM(sig) | ||
33 | #endif | ||
34 | |||
35 | #ifdef CONFIG_CPUSETS | 28 | #ifdef CONFIG_CPUSETS |
36 | #define INIT_CPUSET_SEQ(tsk) \ | 29 | #define INIT_CPUSET_SEQ(tsk) \ |
37 | .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), | 30 | .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), |
@@ -59,12 +52,12 @@ extern struct fs_struct init_fs; | |||
59 | .rlim = INIT_RLIMITS, \ | 52 | .rlim = INIT_RLIMITS, \ |
60 | .cputimer = { \ | 53 | .cputimer = { \ |
61 | .cputime_atomic = INIT_CPUTIME_ATOMIC, \ | 54 | .cputime_atomic = INIT_CPUTIME_ATOMIC, \ |
62 | .running = 0, \ | 55 | .running = false, \ |
56 | .checking_timer = false, \ | ||
63 | }, \ | 57 | }, \ |
64 | INIT_PREV_CPUTIME(sig) \ | 58 | INIT_PREV_CPUTIME(sig) \ |
65 | .cred_guard_mutex = \ | 59 | .cred_guard_mutex = \ |
66 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ | 60 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ |
67 | INIT_GROUP_RWSEM(sig) \ | ||
68 | } | 61 | } |
69 | 62 | ||
70 | extern struct nsproxy init_nsproxy; | 63 | extern struct nsproxy init_nsproxy; |
diff --git a/include/linux/input.h b/include/linux/input.h index 82ce323b9986..1e967694e9a5 100644 --- a/include/linux/input.h +++ b/include/linux/input.h | |||
@@ -469,6 +469,8 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke); | |||
469 | int input_set_keycode(struct input_dev *dev, | 469 | int input_set_keycode(struct input_dev *dev, |
470 | const struct input_keymap_entry *ke); | 470 | const struct input_keymap_entry *ke); |
471 | 471 | ||
472 | void input_enable_softrepeat(struct input_dev *dev, int delay, int period); | ||
473 | |||
472 | extern struct class input_class; | 474 | extern struct class input_class; |
473 | 475 | ||
474 | /** | 476 | /** |
diff --git a/include/linux/input/edt-ft5x06.h b/include/linux/input/edt-ft5x06.h deleted file mode 100644 index 8a1e0d1a0124..000000000000 --- a/include/linux/input/edt-ft5x06.h +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | #ifndef _EDT_FT5X06_H | ||
2 | #define _EDT_FT5X06_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (c) 2012 Simon Budig, <simon.budig@kernelconcepts.de> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | struct edt_ft5x06_platform_data { | ||
13 | int irq_pin; | ||
14 | int reset_pin; | ||
15 | |||
16 | /* startup defaults for operational parameters */ | ||
17 | bool use_parameters; | ||
18 | u8 gain; | ||
19 | u8 threshold; | ||
20 | u8 offset; | ||
21 | u8 report_rate; | ||
22 | }; | ||
23 | |||
24 | #endif /* _EDT_FT5X06_H */ | ||
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 6240063bdcac..821273ca4873 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -1,5 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, Intel Corporation. | 2 | * Copyright © 2006-2015, Intel Corporation. |
3 | * | ||
4 | * Authors: Ashok Raj <ashok.raj@intel.com> | ||
5 | * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | ||
6 | * David Woodhouse <David.Woodhouse@intel.com> | ||
3 | * | 7 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms and conditions of the GNU General Public License, | 9 | * under the terms and conditions of the GNU General Public License, |
@@ -13,10 +17,6 @@ | |||
13 | * You should have received a copy of the GNU General Public License along with | 17 | * You should have received a copy of the GNU General Public License along with |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | 18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | 19 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
16 | * | ||
17 | * Copyright (C) 2006-2008 Intel Corporation | ||
18 | * Author: Ashok Raj <ashok.raj@intel.com> | ||
19 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | ||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #ifndef _INTEL_IOMMU_H_ | 22 | #ifndef _INTEL_IOMMU_H_ |
@@ -25,7 +25,10 @@ | |||
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/iova.h> | 26 | #include <linux/iova.h> |
27 | #include <linux/io.h> | 27 | #include <linux/io.h> |
28 | #include <linux/idr.h> | ||
28 | #include <linux/dma_remapping.h> | 29 | #include <linux/dma_remapping.h> |
30 | #include <linux/mmu_notifier.h> | ||
31 | #include <linux/list.h> | ||
29 | #include <asm/cacheflush.h> | 32 | #include <asm/cacheflush.h> |
30 | #include <asm/iommu.h> | 33 | #include <asm/iommu.h> |
31 | 34 | ||
@@ -57,16 +60,21 @@ | |||
57 | #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ | 60 | #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ |
58 | #define DMAR_ICS_REG 0x9c /* Invalidation complete status register */ | 61 | #define DMAR_ICS_REG 0x9c /* Invalidation complete status register */ |
59 | #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ | 62 | #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ |
63 | #define DMAR_PQH_REG 0xc0 /* Page request queue head register */ | ||
64 | #define DMAR_PQT_REG 0xc8 /* Page request queue tail register */ | ||
65 | #define DMAR_PQA_REG 0xd0 /* Page request queue address register */ | ||
66 | #define DMAR_PRS_REG 0xdc /* Page request status register */ | ||
67 | #define DMAR_PECTL_REG 0xe0 /* Page request event control register */ | ||
68 | #define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */ | ||
69 | #define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */ | ||
70 | #define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */ | ||
60 | 71 | ||
61 | #define OFFSET_STRIDE (9) | 72 | #define OFFSET_STRIDE (9) |
62 | /* | 73 | |
63 | #define dmar_readl(dmar, reg) readl(dmar + reg) | 74 | #ifdef CONFIG_64BIT |
64 | #define dmar_readq(dmar, reg) ({ \ | 75 | #define dmar_readq(a) readq(a) |
65 | u32 lo, hi; \ | 76 | #define dmar_writeq(a,v) writeq(v,a) |
66 | lo = readl(dmar + reg); \ | 77 | #else |
67 | hi = readl(dmar + reg + 4); \ | ||
68 | (((u64) hi) << 32) + lo; }) | ||
69 | */ | ||
70 | static inline u64 dmar_readq(void __iomem *addr) | 78 | static inline u64 dmar_readq(void __iomem *addr) |
71 | { | 79 | { |
72 | u32 lo, hi; | 80 | u32 lo, hi; |
@@ -80,6 +88,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
80 | writel((u32)val, addr); | 88 | writel((u32)val, addr); |
81 | writel((u32)(val >> 32), addr + 4); | 89 | writel((u32)(val >> 32), addr + 4); |
82 | } | 90 | } |
91 | #endif | ||
83 | 92 | ||
84 | #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) | 93 | #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) |
85 | #define DMAR_VER_MINOR(v) ((v) & 0x0f) | 94 | #define DMAR_VER_MINOR(v) ((v) & 0x0f) |
@@ -123,7 +132,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
123 | #define ecap_srs(e) ((e >> 31) & 0x1) | 132 | #define ecap_srs(e) ((e >> 31) & 0x1) |
124 | #define ecap_ers(e) ((e >> 30) & 0x1) | 133 | #define ecap_ers(e) ((e >> 30) & 0x1) |
125 | #define ecap_prs(e) ((e >> 29) & 0x1) | 134 | #define ecap_prs(e) ((e >> 29) & 0x1) |
126 | /* PASID support used to be on bit 28 */ | 135 | #define ecap_broken_pasid(e) ((e >> 28) & 0x1) |
127 | #define ecap_dis(e) ((e >> 27) & 0x1) | 136 | #define ecap_dis(e) ((e >> 27) & 0x1) |
128 | #define ecap_nest(e) ((e >> 26) & 0x1) | 137 | #define ecap_nest(e) ((e >> 26) & 0x1) |
129 | #define ecap_mts(e) ((e >> 25) & 0x1) | 138 | #define ecap_mts(e) ((e >> 25) & 0x1) |
@@ -253,6 +262,11 @@ enum { | |||
253 | #define QI_DIOTLB_TYPE 0x3 | 262 | #define QI_DIOTLB_TYPE 0x3 |
254 | #define QI_IEC_TYPE 0x4 | 263 | #define QI_IEC_TYPE 0x4 |
255 | #define QI_IWD_TYPE 0x5 | 264 | #define QI_IWD_TYPE 0x5 |
265 | #define QI_EIOTLB_TYPE 0x6 | ||
266 | #define QI_PC_TYPE 0x7 | ||
267 | #define QI_DEIOTLB_TYPE 0x8 | ||
268 | #define QI_PGRP_RESP_TYPE 0x9 | ||
269 | #define QI_PSTRM_RESP_TYPE 0xa | ||
256 | 270 | ||
257 | #define QI_IEC_SELECTIVE (((u64)1) << 4) | 271 | #define QI_IEC_SELECTIVE (((u64)1) << 4) |
258 | #define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32)) | 272 | #define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32)) |
@@ -280,6 +294,53 @@ enum { | |||
280 | #define QI_DEV_IOTLB_SIZE 1 | 294 | #define QI_DEV_IOTLB_SIZE 1 |
281 | #define QI_DEV_IOTLB_MAX_INVS 32 | 295 | #define QI_DEV_IOTLB_MAX_INVS 32 |
282 | 296 | ||
297 | #define QI_PC_PASID(pasid) (((u64)pasid) << 32) | ||
298 | #define QI_PC_DID(did) (((u64)did) << 16) | ||
299 | #define QI_PC_GRAN(gran) (((u64)gran) << 4) | ||
300 | |||
301 | #define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0)) | ||
302 | #define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) | ||
303 | |||
304 | #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) | ||
305 | #define QI_EIOTLB_GL(gl) (((u64)gl) << 7) | ||
306 | #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) | ||
307 | #define QI_EIOTLB_AM(am) (((u64)am)) | ||
308 | #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) | ||
309 | #define QI_EIOTLB_DID(did) (((u64)did) << 16) | ||
310 | #define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) | ||
311 | |||
312 | #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) | ||
313 | #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) | ||
314 | #define QI_DEV_EIOTLB_GLOB(g) ((u64)g) | ||
315 | #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) | ||
316 | #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) | ||
317 | #define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16) | ||
318 | #define QI_DEV_EIOTLB_MAX_INVS 32 | ||
319 | |||
320 | #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) | ||
321 | #define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32) | ||
322 | #define QI_PGRP_RESP_CODE(res) ((u64)(res)) | ||
323 | #define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) | ||
324 | #define QI_PGRP_DID(did) (((u64)(did)) << 16) | ||
325 | #define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) | ||
326 | |||
327 | #define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK) | ||
328 | #define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4) | ||
329 | #define QI_PSTRM_RESP_CODE(res) ((u64)(res)) | ||
330 | #define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55) | ||
331 | #define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32) | ||
332 | #define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24) | ||
333 | #define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4) | ||
334 | |||
335 | #define QI_RESP_SUCCESS 0x0 | ||
336 | #define QI_RESP_INVALID 0x1 | ||
337 | #define QI_RESP_FAILURE 0xf | ||
338 | |||
339 | #define QI_GRAN_ALL_ALL 0 | ||
340 | #define QI_GRAN_NONG_ALL 1 | ||
341 | #define QI_GRAN_NONG_PASID 2 | ||
342 | #define QI_GRAN_PSI_PASID 3 | ||
343 | |||
283 | struct qi_desc { | 344 | struct qi_desc { |
284 | u64 low, high; | 345 | u64 low, high; |
285 | }; | 346 | }; |
@@ -327,6 +388,10 @@ enum { | |||
327 | #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) | 388 | #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) |
328 | #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) | 389 | #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) |
329 | 390 | ||
391 | struct pasid_entry; | ||
392 | struct pasid_state_entry; | ||
393 | struct page_req_dsc; | ||
394 | |||
330 | struct intel_iommu { | 395 | struct intel_iommu { |
331 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ | 396 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ |
332 | u64 reg_phys; /* physical address of hw register set */ | 397 | u64 reg_phys; /* physical address of hw register set */ |
@@ -338,7 +403,7 @@ struct intel_iommu { | |||
338 | int seq_id; /* sequence id of the iommu */ | 403 | int seq_id; /* sequence id of the iommu */ |
339 | int agaw; /* agaw of this iommu */ | 404 | int agaw; /* agaw of this iommu */ |
340 | int msagaw; /* max sagaw of this iommu */ | 405 | int msagaw; /* max sagaw of this iommu */ |
341 | unsigned int irq; | 406 | unsigned int irq, pr_irq; |
342 | u16 segment; /* PCI segment# */ | 407 | u16 segment; /* PCI segment# */ |
343 | unsigned char name[13]; /* Device Name */ | 408 | unsigned char name[13]; /* Device Name */ |
344 | 409 | ||
@@ -350,6 +415,18 @@ struct intel_iommu { | |||
350 | 415 | ||
351 | struct iommu_flush flush; | 416 | struct iommu_flush flush; |
352 | #endif | 417 | #endif |
418 | #ifdef CONFIG_INTEL_IOMMU_SVM | ||
419 | /* These are large and need to be contiguous, so we allocate just | ||
420 | * one for now. We'll maybe want to rethink that if we truly give | ||
421 | * devices away to userspace processes (e.g. for DPDK) and don't | ||
422 | * want to trust that userspace will use *only* the PASID it was | ||
423 | * told to. But while it's all driver-arbitrated, we're fine. */ | ||
424 | struct pasid_entry *pasid_table; | ||
425 | struct pasid_state_entry *pasid_state_table; | ||
426 | struct page_req_dsc *prq; | ||
427 | unsigned char prq_name[16]; /* Name for PRQ interrupt */ | ||
428 | struct idr pasid_idr; | ||
429 | #endif | ||
353 | struct q_inval *qi; /* Queued invalidation info */ | 430 | struct q_inval *qi; /* Queued invalidation info */ |
354 | u32 *iommu_state; /* Store iommu states between suspend and resume.*/ | 431 | u32 *iommu_state; /* Store iommu states between suspend and resume.*/ |
355 | 432 | ||
@@ -389,6 +466,38 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); | |||
389 | 466 | ||
390 | extern int dmar_ir_support(void); | 467 | extern int dmar_ir_support(void); |
391 | 468 | ||
469 | #ifdef CONFIG_INTEL_IOMMU_SVM | ||
470 | extern int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu); | ||
471 | extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu); | ||
472 | extern int intel_svm_enable_prq(struct intel_iommu *iommu); | ||
473 | extern int intel_svm_finish_prq(struct intel_iommu *iommu); | ||
474 | |||
475 | struct svm_dev_ops; | ||
476 | |||
477 | struct intel_svm_dev { | ||
478 | struct list_head list; | ||
479 | struct rcu_head rcu; | ||
480 | struct device *dev; | ||
481 | struct svm_dev_ops *ops; | ||
482 | int users; | ||
483 | u16 did; | ||
484 | u16 dev_iotlb:1; | ||
485 | u16 sid, qdep; | ||
486 | }; | ||
487 | |||
488 | struct intel_svm { | ||
489 | struct mmu_notifier notifier; | ||
490 | struct mm_struct *mm; | ||
491 | struct intel_iommu *iommu; | ||
492 | int flags; | ||
493 | int pasid; | ||
494 | struct list_head devs; | ||
495 | }; | ||
496 | |||
497 | extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev); | ||
498 | extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); | ||
499 | #endif | ||
500 | |||
392 | extern const struct attribute_group *intel_iommu_groups[]; | 501 | extern const struct attribute_group *intel_iommu_groups[]; |
393 | 502 | ||
394 | #endif | 503 | #endif |
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h new file mode 100644 index 000000000000..3c25794042f9 --- /dev/null +++ b/include/linux/intel-svm.h | |||
@@ -0,0 +1,121 @@ | |||
1 | /* | ||
2 | * Copyright © 2015 Intel Corporation. | ||
3 | * | ||
4 | * Authors: David Woodhouse <David.Woodhouse@intel.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef __INTEL_SVM_H__ | ||
17 | #define __INTEL_SVM_H__ | ||
18 | |||
19 | struct device; | ||
20 | |||
21 | struct svm_dev_ops { | ||
22 | void (*fault_cb)(struct device *dev, int pasid, u64 address, | ||
23 | u32 private, int rwxp, int response); | ||
24 | }; | ||
25 | |||
26 | /* Values for rxwp in fault_cb callback */ | ||
27 | #define SVM_REQ_READ (1<<3) | ||
28 | #define SVM_REQ_WRITE (1<<2) | ||
29 | #define SVM_REQ_EXEC (1<<1) | ||
30 | #define SVM_REQ_PRIV (1<<0) | ||
31 | |||
32 | |||
33 | /* | ||
34 | * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main" | ||
35 | * PASID for the current process. Even if a PASID already exists, a new one | ||
36 | * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID | ||
37 | * will not be given to subsequent callers. This facility allows a driver to | ||
38 | * disambiguate between multiple device contexts which access the same MM, | ||
39 | * if there is no other way to do so. It should be used sparingly, if at all. | ||
40 | */ | ||
41 | #define SVM_FLAG_PRIVATE_PASID (1<<0) | ||
42 | |||
43 | /* | ||
44 | * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only | ||
45 | * for access to kernel addresses. No IOTLB flushes are automatically done | ||
46 | * for kernel mappings; it is valid only for access to the kernel's static | ||
47 | * 1:1 mapping of physical memory — not to vmalloc or even module mappings. | ||
48 | * A future API addition may permit the use of such ranges, by means of an | ||
49 | * explicit IOTLB flush call (akin to the DMA API's unmap method). | ||
50 | * | ||
51 | * It is unlikely that we will ever hook into flush_tlb_kernel_range() to | ||
52 | * do such IOTLB flushes automatically. | ||
53 | */ | ||
54 | #define SVM_FLAG_SUPERVISOR_MODE (1<<1) | ||
55 | |||
56 | #ifdef CONFIG_INTEL_IOMMU_SVM | ||
57 | |||
58 | /** | ||
59 | * intel_svm_bind_mm() - Bind the current process to a PASID | ||
60 | * @dev: Device to be granted acccess | ||
61 | * @pasid: Address for allocated PASID | ||
62 | * @flags: Flags. Later for requesting supervisor mode, etc. | ||
63 | * @ops: Callbacks to device driver | ||
64 | * | ||
65 | * This function attempts to enable PASID support for the given device. | ||
66 | * If the @pasid argument is non-%NULL, a PASID is allocated for access | ||
67 | * to the MM of the current process. | ||
68 | * | ||
69 | * By using a %NULL value for the @pasid argument, this function can | ||
70 | * be used to simply validate that PASID support is available for the | ||
71 | * given device — i.e. that it is behind an IOMMU which has the | ||
72 | * requisite support, and is enabled. | ||
73 | * | ||
74 | * Page faults are handled transparently by the IOMMU code, and there | ||
75 | * should be no need for the device driver to be involved. If a page | ||
76 | * fault cannot be handled (i.e. is an invalid address rather than | ||
77 | * just needs paging in), then the page request will be completed by | ||
78 | * the core IOMMU code with appropriate status, and the device itself | ||
79 | * can then report the resulting fault to its driver via whatever | ||
80 | * mechanism is appropriate. | ||
81 | * | ||
82 | * Multiple calls from the same process may result in the same PASID | ||
83 | * being re-used. A reference count is kept. | ||
84 | */ | ||
85 | extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, | ||
86 | struct svm_dev_ops *ops); | ||
87 | |||
88 | /** | ||
89 | * intel_svm_unbind_mm() - Unbind a specified PASID | ||
90 | * @dev: Device for which PASID was allocated | ||
91 | * @pasid: PASID value to be unbound | ||
92 | * | ||
93 | * This function allows a PASID to be retired when the device no | ||
94 | * longer requires access to the address space of a given process. | ||
95 | * | ||
96 | * If the use count for the PASID in question reaches zero, the | ||
97 | * PASID is revoked and may no longer be used by hardware. | ||
98 | * | ||
99 | * Device drivers are required to ensure that no access (including | ||
100 | * page requests) is currently outstanding for the PASID in question, | ||
101 | * before calling this function. | ||
102 | */ | ||
103 | extern int intel_svm_unbind_mm(struct device *dev, int pasid); | ||
104 | |||
105 | #else /* CONFIG_INTEL_IOMMU_SVM */ | ||
106 | |||
107 | static inline int intel_svm_bind_mm(struct device *dev, int *pasid, | ||
108 | int flags, struct svm_dev_ops *ops) | ||
109 | { | ||
110 | return -ENOSYS; | ||
111 | } | ||
112 | |||
113 | static inline int intel_svm_unbind_mm(struct device *dev, int pasid) | ||
114 | { | ||
115 | BUG(); | ||
116 | } | ||
117 | #endif /* CONFIG_INTEL_IOMMU_SVM */ | ||
118 | |||
119 | #define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL)) | ||
120 | |||
121 | #endif /* __INTEL_SVM_H__ */ | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index be7e75c945e9..ad16809c8596 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -102,6 +102,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); | |||
102 | * @flags: flags (see IRQF_* above) | 102 | * @flags: flags (see IRQF_* above) |
103 | * @thread_fn: interrupt handler function for threaded interrupts | 103 | * @thread_fn: interrupt handler function for threaded interrupts |
104 | * @thread: thread pointer for threaded interrupts | 104 | * @thread: thread pointer for threaded interrupts |
105 | * @secondary: pointer to secondary irqaction (force threading) | ||
105 | * @thread_flags: flags related to @thread | 106 | * @thread_flags: flags related to @thread |
106 | * @thread_mask: bitmask for keeping track of @thread activity | 107 | * @thread_mask: bitmask for keeping track of @thread activity |
107 | * @dir: pointer to the proc/irq/NN/name entry | 108 | * @dir: pointer to the proc/irq/NN/name entry |
@@ -113,6 +114,7 @@ struct irqaction { | |||
113 | struct irqaction *next; | 114 | struct irqaction *next; |
114 | irq_handler_t thread_fn; | 115 | irq_handler_t thread_fn; |
115 | struct task_struct *thread; | 116 | struct task_struct *thread; |
117 | struct irqaction *secondary; | ||
116 | unsigned int irq; | 118 | unsigned int irq; |
117 | unsigned int flags; | 119 | unsigned int flags; |
118 | unsigned long thread_flags; | 120 | unsigned long thread_flags; |
diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h new file mode 100644 index 000000000000..11d7e840d913 --- /dev/null +++ b/include/linux/io-64-nonatomic-hi-lo.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef _LINUX_IO_64_NONATOMIC_HI_LO_H_ | ||
2 | #define _LINUX_IO_64_NONATOMIC_HI_LO_H_ | ||
3 | |||
4 | #include <linux/io.h> | ||
5 | #include <asm-generic/int-ll64.h> | ||
6 | |||
7 | static inline __u64 hi_lo_readq(const volatile void __iomem *addr) | ||
8 | { | ||
9 | const volatile u32 __iomem *p = addr; | ||
10 | u32 low, high; | ||
11 | |||
12 | high = readl(p + 1); | ||
13 | low = readl(p); | ||
14 | |||
15 | return low + ((u64)high << 32); | ||
16 | } | ||
17 | |||
18 | static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr) | ||
19 | { | ||
20 | writel(val >> 32, addr + 4); | ||
21 | writel(val, addr); | ||
22 | } | ||
23 | |||
24 | #ifndef readq | ||
25 | #define readq hi_lo_readq | ||
26 | #endif | ||
27 | |||
28 | #ifndef writeq | ||
29 | #define writeq hi_lo_writeq | ||
30 | #endif | ||
31 | |||
32 | #endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */ | ||
diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h new file mode 100644 index 000000000000..1a4315f97360 --- /dev/null +++ b/include/linux/io-64-nonatomic-lo-hi.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef _LINUX_IO_64_NONATOMIC_LO_HI_H_ | ||
2 | #define _LINUX_IO_64_NONATOMIC_LO_HI_H_ | ||
3 | |||
4 | #include <linux/io.h> | ||
5 | #include <asm-generic/int-ll64.h> | ||
6 | |||
7 | static inline __u64 lo_hi_readq(const volatile void __iomem *addr) | ||
8 | { | ||
9 | const volatile u32 __iomem *p = addr; | ||
10 | u32 low, high; | ||
11 | |||
12 | low = readl(p); | ||
13 | high = readl(p + 1); | ||
14 | |||
15 | return low + ((u64)high << 32); | ||
16 | } | ||
17 | |||
18 | static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr) | ||
19 | { | ||
20 | writel(val, addr); | ||
21 | writel(val >> 32, addr + 4); | ||
22 | } | ||
23 | |||
24 | #ifndef readq | ||
25 | #define readq lo_hi_readq | ||
26 | #endif | ||
27 | |||
28 | #ifndef writeq | ||
29 | #define writeq lo_hi_writeq | ||
30 | #endif | ||
31 | |||
32 | #endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */ | ||
diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h index bbced83b32ee..376a27c9cc6a 100644 --- a/include/linux/iommu-common.h +++ b/include/linux/iommu-common.h | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #define IOMMU_POOL_HASHBITS 4 | 8 | #define IOMMU_POOL_HASHBITS 4 |
9 | #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) | 9 | #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) |
10 | #define IOMMU_ERROR_CODE (~(unsigned long) 0) | ||
10 | 11 | ||
11 | struct iommu_pool { | 12 | struct iommu_pool { |
12 | unsigned long start; | 13 | unsigned long start; |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index f9c1b6d0f2e4..f28dff313b07 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
@@ -81,6 +81,7 @@ struct iommu_domain { | |||
81 | iommu_fault_handler_t handler; | 81 | iommu_fault_handler_t handler; |
82 | void *handler_token; | 82 | void *handler_token; |
83 | struct iommu_domain_geometry geometry; | 83 | struct iommu_domain_geometry geometry; |
84 | void *iova_cookie; | ||
84 | }; | 85 | }; |
85 | 86 | ||
86 | enum iommu_cap { | 87 | enum iommu_cap { |
@@ -167,7 +168,7 @@ struct iommu_ops { | |||
167 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); | 168 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); |
168 | int (*add_device)(struct device *dev); | 169 | int (*add_device)(struct device *dev); |
169 | void (*remove_device)(struct device *dev); | 170 | void (*remove_device)(struct device *dev); |
170 | int (*device_group)(struct device *dev, unsigned int *groupid); | 171 | struct iommu_group *(*device_group)(struct device *dev); |
171 | int (*domain_get_attr)(struct iommu_domain *domain, | 172 | int (*domain_get_attr)(struct iommu_domain *domain, |
172 | enum iommu_attr attr, void *data); | 173 | enum iommu_attr attr, void *data); |
173 | int (*domain_set_attr)(struct iommu_domain *domain, | 174 | int (*domain_set_attr)(struct iommu_domain *domain, |
@@ -316,6 +317,11 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain, | |||
316 | return domain->ops->map_sg(domain, iova, sg, nents, prot); | 317 | return domain->ops->map_sg(domain, iova, sg, nents, prot); |
317 | } | 318 | } |
318 | 319 | ||
320 | /* PCI device grouping function */ | ||
321 | extern struct iommu_group *pci_device_group(struct device *dev); | ||
322 | /* Generic device grouping function */ | ||
323 | extern struct iommu_group *generic_device_group(struct device *dev); | ||
324 | |||
319 | #else /* CONFIG_IOMMU_API */ | 325 | #else /* CONFIG_IOMMU_API */ |
320 | 326 | ||
321 | struct iommu_ops {}; | 327 | struct iommu_ops {}; |
diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 388e3ae94f7a..24bea087e7af 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h | |||
@@ -94,6 +94,7 @@ struct resource { | |||
94 | /* PnP I/O specific bits (IORESOURCE_BITS) */ | 94 | /* PnP I/O specific bits (IORESOURCE_BITS) */ |
95 | #define IORESOURCE_IO_16BIT_ADDR (1<<0) | 95 | #define IORESOURCE_IO_16BIT_ADDR (1<<0) |
96 | #define IORESOURCE_IO_FIXED (1<<1) | 96 | #define IORESOURCE_IO_FIXED (1<<1) |
97 | #define IORESOURCE_IO_SPARSE (1<<2) | ||
97 | 98 | ||
98 | /* PCI ROM control bits (IORESOURCE_BITS) */ | 99 | /* PCI ROM control bits (IORESOURCE_BITS) */ |
99 | #define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ | 100 | #define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ |
diff --git a/include/linux/iova.h b/include/linux/iova.h index 3920a19d8194..92f7177db2ce 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h | |||
@@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) | |||
68 | return iova >> iova_shift(iovad); | 68 | return iova >> iova_shift(iovad); |
69 | } | 69 | } |
70 | 70 | ||
71 | int iommu_iova_cache_init(void); | 71 | int iova_cache_get(void); |
72 | void iommu_iova_cache_destroy(void); | 72 | void iova_cache_put(void); |
73 | 73 | ||
74 | struct iova *alloc_iova_mem(void); | 74 | struct iova *alloc_iova_mem(void); |
75 | void free_iova_mem(struct iova *iova); | 75 | void free_iova_mem(struct iova *iova); |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index f1f32af6d9b9..0ef2a97ccdb5 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -264,9 +264,9 @@ struct tcp6_timewait_sock { | |||
264 | }; | 264 | }; |
265 | 265 | ||
266 | #if IS_ENABLED(CONFIG_IPV6) | 266 | #if IS_ENABLED(CONFIG_IPV6) |
267 | static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) | 267 | static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk) |
268 | { | 268 | { |
269 | return inet_sk(__sk)->pinet6; | 269 | return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL; |
270 | } | 270 | } |
271 | 271 | ||
272 | static inline struct raw6_sock *raw6_sk(const struct sock *sk) | 272 | static inline struct raw6_sock *raw6_sk(const struct sock *sk) |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 11bf09288ddb..3c1c96786248 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -67,11 +67,12 @@ enum irqchip_irq_state; | |||
67 | * request/setup_irq() | 67 | * request/setup_irq() |
68 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) | 68 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) |
69 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context | 69 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context |
70 | * IRQ_NESTED_TRHEAD - Interrupt nests into another thread | 70 | * IRQ_NESTED_THREAD - Interrupt nests into another thread |
71 | * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable | 71 | * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable |
72 | * IRQ_IS_POLLED - Always polled by another interrupt. Exclude | 72 | * IRQ_IS_POLLED - Always polled by another interrupt. Exclude |
73 | * it from the spurious interrupt detection | 73 | * it from the spurious interrupt detection |
74 | * mechanism and from core side polling. | 74 | * mechanism and from core side polling. |
75 | * IRQ_DISABLE_UNLAZY - Disable lazy irq disable | ||
75 | */ | 76 | */ |
76 | enum { | 77 | enum { |
77 | IRQ_TYPE_NONE = 0x00000000, | 78 | IRQ_TYPE_NONE = 0x00000000, |
@@ -97,13 +98,14 @@ enum { | |||
97 | IRQ_NOTHREAD = (1 << 16), | 98 | IRQ_NOTHREAD = (1 << 16), |
98 | IRQ_PER_CPU_DEVID = (1 << 17), | 99 | IRQ_PER_CPU_DEVID = (1 << 17), |
99 | IRQ_IS_POLLED = (1 << 18), | 100 | IRQ_IS_POLLED = (1 << 18), |
101 | IRQ_DISABLE_UNLAZY = (1 << 19), | ||
100 | }; | 102 | }; |
101 | 103 | ||
102 | #define IRQF_MODIFY_MASK \ | 104 | #define IRQF_MODIFY_MASK \ |
103 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ | 105 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ |
104 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ | 106 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ |
105 | IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ | 107 | IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ |
106 | IRQ_IS_POLLED) | 108 | IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) |
107 | 109 | ||
108 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) | 110 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
109 | 111 | ||
@@ -297,21 +299,6 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) | |||
297 | __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; | 299 | __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; |
298 | } | 300 | } |
299 | 301 | ||
300 | /* | ||
301 | * Functions for chained handlers which can be enabled/disabled by the | ||
302 | * standard disable_irq/enable_irq calls. Must be called with | ||
303 | * irq_desc->lock held. | ||
304 | */ | ||
305 | static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) | ||
306 | { | ||
307 | __irqd_to_state(d) |= IRQD_IRQ_INPROGRESS; | ||
308 | } | ||
309 | |||
310 | static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) | ||
311 | { | ||
312 | __irqd_to_state(d) &= ~IRQD_IRQ_INPROGRESS; | ||
313 | } | ||
314 | |||
315 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | 302 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) |
316 | { | 303 | { |
317 | return d->hwirq; | 304 | return d->hwirq; |
@@ -452,6 +439,8 @@ extern int irq_set_affinity_locked(struct irq_data *data, | |||
452 | const struct cpumask *cpumask, bool force); | 439 | const struct cpumask *cpumask, bool force); |
453 | extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info); | 440 | extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info); |
454 | 441 | ||
442 | extern void irq_migrate_all_off_this_cpu(void); | ||
443 | |||
455 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) | 444 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
456 | void irq_move_irq(struct irq_data *data); | 445 | void irq_move_irq(struct irq_data *data); |
457 | void irq_move_masked_irq(struct irq_data *data); | 446 | void irq_move_masked_irq(struct irq_data *data); |
diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h new file mode 100644 index 000000000000..1551b5b2f4c2 --- /dev/null +++ b/include/linux/irqbypass.h | |||
@@ -0,0 +1,90 @@ | |||
1 | /* | ||
2 | * IRQ offload/bypass manager | ||
3 | * | ||
4 | * Copyright (C) 2015 Red Hat, Inc. | ||
5 | * Copyright (c) 2015 Linaro Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #ifndef IRQBYPASS_H | ||
12 | #define IRQBYPASS_H | ||
13 | |||
14 | #include <linux/list.h> | ||
15 | |||
16 | struct irq_bypass_consumer; | ||
17 | |||
18 | /* | ||
19 | * Theory of operation | ||
20 | * | ||
21 | * The IRQ bypass manager is a simple set of lists and callbacks that allows | ||
22 | * IRQ producers (ex. physical interrupt sources) to be matched to IRQ | ||
23 | * consumers (ex. virtualization hardware that allows IRQ bypass or offload) | ||
24 | * via a shared token (ex. eventfd_ctx). Producers and consumers register | ||
25 | * independently. When a token match is found, the optional @stop callback | ||
26 | * will be called for each participant. The pair will then be connected via | ||
27 | * the @add_* callbacks, and finally the optional @start callback will allow | ||
28 | * any final coordination. When either participant is unregistered, the | ||
29 | * process is repeated using the @del_* callbacks in place of the @add_* | ||
30 | * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings | ||
31 | * are not supported. | ||
32 | */ | ||
33 | |||
34 | /** | ||
35 | * struct irq_bypass_producer - IRQ bypass producer definition | ||
36 | * @node: IRQ bypass manager private list management | ||
37 | * @token: opaque token to match between producer and consumer | ||
38 | * @irq: Linux IRQ number for the producer device | ||
39 | * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional) | ||
40 | * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional) | ||
41 | * @stop: Perform any quiesce operations necessary prior to add/del (optional) | ||
42 | * @start: Perform any startup operations necessary after add/del (optional) | ||
43 | * | ||
44 | * The IRQ bypass producer structure represents an interrupt source for | ||
45 | * participation in possible host bypass, for instance an interrupt vector | ||
46 | * for a physical device assigned to a VM. | ||
47 | */ | ||
48 | struct irq_bypass_producer { | ||
49 | struct list_head node; | ||
50 | void *token; | ||
51 | int irq; | ||
52 | int (*add_consumer)(struct irq_bypass_producer *, | ||
53 | struct irq_bypass_consumer *); | ||
54 | void (*del_consumer)(struct irq_bypass_producer *, | ||
55 | struct irq_bypass_consumer *); | ||
56 | void (*stop)(struct irq_bypass_producer *); | ||
57 | void (*start)(struct irq_bypass_producer *); | ||
58 | }; | ||
59 | |||
60 | /** | ||
61 | * struct irq_bypass_consumer - IRQ bypass consumer definition | ||
62 | * @node: IRQ bypass manager private list management | ||
63 | * @token: opaque token to match between producer and consumer | ||
64 | * @add_producer: Connect the IRQ consumer to an IRQ producer | ||
65 | * @del_producer: Disconnect the IRQ consumer from an IRQ producer | ||
66 | * @stop: Perform any quiesce operations necessary prior to add/del (optional) | ||
67 | * @start: Perform any startup operations necessary after add/del (optional) | ||
68 | * | ||
69 | * The IRQ bypass consumer structure represents an interrupt sink for | ||
70 | * participation in possible host bypass, for instance a hypervisor may | ||
71 | * support offloads to allow bypassing the host entirely or offload | ||
72 | * portions of the interrupt handling to the VM. | ||
73 | */ | ||
74 | struct irq_bypass_consumer { | ||
75 | struct list_head node; | ||
76 | void *token; | ||
77 | int (*add_producer)(struct irq_bypass_consumer *, | ||
78 | struct irq_bypass_producer *); | ||
79 | void (*del_producer)(struct irq_bypass_consumer *, | ||
80 | struct irq_bypass_producer *); | ||
81 | void (*stop)(struct irq_bypass_consumer *); | ||
82 | void (*start)(struct irq_bypass_consumer *); | ||
83 | }; | ||
84 | |||
85 | int irq_bypass_register_producer(struct irq_bypass_producer *); | ||
86 | void irq_bypass_unregister_producer(struct irq_bypass_producer *); | ||
87 | int irq_bypass_register_consumer(struct irq_bypass_consumer *); | ||
88 | void irq_bypass_unregister_consumer(struct irq_bypass_consumer *); | ||
89 | |||
90 | #endif /* IRQBYPASS_H */ | ||
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h index 638887376e58..89c34b200671 100644 --- a/include/linux/irqchip.h +++ b/include/linux/irqchip.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #ifndef _LINUX_IRQCHIP_H | 11 | #ifndef _LINUX_IRQCHIP_H |
12 | #define _LINUX_IRQCHIP_H | 12 | #define _LINUX_IRQCHIP_H |
13 | 13 | ||
14 | #include <linux/acpi.h> | ||
14 | #include <linux/of.h> | 15 | #include <linux/of.h> |
15 | 16 | ||
16 | /* | 17 | /* |
@@ -25,6 +26,22 @@ | |||
25 | */ | 26 | */ |
26 | #define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn) | 27 | #define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn) |
27 | 28 | ||
29 | /* | ||
30 | * This macro must be used by the different irqchip drivers to declare | ||
31 | * the association between their version and their initialization function. | ||
32 | * | ||
33 | * @name: name that must be unique accross all IRQCHIP_ACPI_DECLARE of the | ||
34 | * same file. | ||
35 | * @subtable: Subtable to be identified in MADT | ||
36 | * @validate: Function to be called on that subtable to check its validity. | ||
37 | * Can be NULL. | ||
38 | * @data: data to be checked by the validate function. | ||
39 | * @fn: initialization function | ||
40 | */ | ||
41 | #define IRQCHIP_ACPI_DECLARE(name, subtable, validate, data, fn) \ | ||
42 | ACPI_DECLARE_PROBE_ENTRY(irqchip, name, ACPI_SIG_MADT, \ | ||
43 | subtable, validate, data, fn) | ||
44 | |||
28 | #ifdef CONFIG_IRQCHIP | 45 | #ifdef CONFIG_IRQCHIP |
29 | void irqchip_init(void); | 46 | void irqchip_init(void); |
30 | #else | 47 | #else |
diff --git a/include/linux/irqchip/arm-gic-acpi.h b/include/linux/irqchip/arm-gic-acpi.h deleted file mode 100644 index de3419ed3937..000000000000 --- a/include/linux/irqchip/arm-gic-acpi.h +++ /dev/null | |||
@@ -1,31 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014, Linaro Ltd. | ||
3 | * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #ifndef ARM_GIC_ACPI_H_ | ||
11 | #define ARM_GIC_ACPI_H_ | ||
12 | |||
13 | #ifdef CONFIG_ACPI | ||
14 | |||
15 | /* | ||
16 | * Hard code here, we can not get memory size from MADT (but FDT does), | ||
17 | * Actually no need to do that, because this size can be inferred | ||
18 | * from GIC spec. | ||
19 | */ | ||
20 | #define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K) | ||
21 | #define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K) | ||
22 | |||
23 | struct acpi_table_header; | ||
24 | |||
25 | int gic_v2_acpi_init(struct acpi_table_header *table); | ||
26 | void acpi_gic_init(void); | ||
27 | #else | ||
28 | static inline void acpi_gic_init(void) { } | ||
29 | #endif | ||
30 | |||
31 | #endif /* ARM_GIC_ACPI_H_ */ | ||
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 9eeeb9589acf..c9ae0c6ec050 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
@@ -18,8 +18,6 @@ | |||
18 | #ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H | 18 | #ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H |
19 | #define __LINUX_IRQCHIP_ARM_GIC_V3_H | 19 | #define __LINUX_IRQCHIP_ARM_GIC_V3_H |
20 | 20 | ||
21 | #include <asm/sysreg.h> | ||
22 | |||
23 | /* | 21 | /* |
24 | * Distributor registers. We assume we're running non-secure, with ARE | 22 | * Distributor registers. We assume we're running non-secure, with ARE |
25 | * being set. Secure-only and non-ARE registers are not described. | 23 | * being set. Secure-only and non-ARE registers are not described. |
@@ -231,6 +229,7 @@ | |||
231 | #define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) | 229 | #define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) |
232 | #define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) | 230 | #define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) |
233 | #define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) | 231 | #define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) |
232 | #define GITS_BASER_PAGES_MAX 256 | ||
234 | 233 | ||
235 | #define GITS_BASER_TYPE_NONE 0 | 234 | #define GITS_BASER_TYPE_NONE 0 |
236 | #define GITS_BASER_TYPE_DEVICE 1 | 235 | #define GITS_BASER_TYPE_DEVICE 1 |
@@ -266,16 +265,16 @@ | |||
266 | /* | 265 | /* |
267 | * Hypervisor interface registers (SRE only) | 266 | * Hypervisor interface registers (SRE only) |
268 | */ | 267 | */ |
269 | #define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1) | 268 | #define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) |
270 | 269 | ||
271 | #define ICH_LR_EOI (1UL << 41) | 270 | #define ICH_LR_EOI (1ULL << 41) |
272 | #define ICH_LR_GROUP (1UL << 60) | 271 | #define ICH_LR_GROUP (1ULL << 60) |
273 | #define ICH_LR_HW (1UL << 61) | 272 | #define ICH_LR_HW (1ULL << 61) |
274 | #define ICH_LR_STATE (3UL << 62) | 273 | #define ICH_LR_STATE (3ULL << 62) |
275 | #define ICH_LR_PENDING_BIT (1UL << 62) | 274 | #define ICH_LR_PENDING_BIT (1ULL << 62) |
276 | #define ICH_LR_ACTIVE_BIT (1UL << 63) | 275 | #define ICH_LR_ACTIVE_BIT (1ULL << 63) |
277 | #define ICH_LR_PHYS_ID_SHIFT 32 | 276 | #define ICH_LR_PHYS_ID_SHIFT 32 |
278 | #define ICH_LR_PHYS_ID_MASK (0x3ffUL << ICH_LR_PHYS_ID_SHIFT) | 277 | #define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) |
279 | 278 | ||
280 | #define ICH_MISR_EOI (1 << 0) | 279 | #define ICH_MISR_EOI (1 << 0) |
281 | #define ICH_MISR_U (1 << 1) | 280 | #define ICH_MISR_U (1 << 1) |
@@ -292,19 +291,8 @@ | |||
292 | #define ICH_VMCR_PMR_SHIFT 24 | 291 | #define ICH_VMCR_PMR_SHIFT 24 |
293 | #define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) | 292 | #define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) |
294 | 293 | ||
295 | #define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) | ||
296 | #define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1) | ||
297 | #define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) | ||
298 | #define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) | ||
299 | #define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) | ||
300 | #define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4) | ||
301 | #define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) | ||
302 | #define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) | ||
303 | |||
304 | #define ICC_IAR1_EL1_SPURIOUS 0x3ff | 294 | #define ICC_IAR1_EL1_SPURIOUS 0x3ff |
305 | 295 | ||
306 | #define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5) | ||
307 | |||
308 | #define ICC_SRE_EL2_SRE (1 << 0) | 296 | #define ICC_SRE_EL2_SRE (1 << 0) |
309 | #define ICC_SRE_EL2_ENABLE (1 << 3) | 297 | #define ICC_SRE_EL2_ENABLE (1 << 3) |
310 | 298 | ||
@@ -320,54 +308,10 @@ | |||
320 | #define ICC_SGI1R_AFFINITY_3_SHIFT 48 | 308 | #define ICC_SGI1R_AFFINITY_3_SHIFT 48 |
321 | #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) | 309 | #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) |
322 | 310 | ||
323 | /* | 311 | #include <asm/arch_gicv3.h> |
324 | * System register definitions | ||
325 | */ | ||
326 | #define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4) | ||
327 | #define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0) | ||
328 | #define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1) | ||
329 | #define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2) | ||
330 | #define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3) | ||
331 | #define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5) | ||
332 | #define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7) | ||
333 | |||
334 | #define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x) | ||
335 | #define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x) | ||
336 | |||
337 | #define ICH_LR0_EL2 __LR0_EL2(0) | ||
338 | #define ICH_LR1_EL2 __LR0_EL2(1) | ||
339 | #define ICH_LR2_EL2 __LR0_EL2(2) | ||
340 | #define ICH_LR3_EL2 __LR0_EL2(3) | ||
341 | #define ICH_LR4_EL2 __LR0_EL2(4) | ||
342 | #define ICH_LR5_EL2 __LR0_EL2(5) | ||
343 | #define ICH_LR6_EL2 __LR0_EL2(6) | ||
344 | #define ICH_LR7_EL2 __LR0_EL2(7) | ||
345 | #define ICH_LR8_EL2 __LR8_EL2(0) | ||
346 | #define ICH_LR9_EL2 __LR8_EL2(1) | ||
347 | #define ICH_LR10_EL2 __LR8_EL2(2) | ||
348 | #define ICH_LR11_EL2 __LR8_EL2(3) | ||
349 | #define ICH_LR12_EL2 __LR8_EL2(4) | ||
350 | #define ICH_LR13_EL2 __LR8_EL2(5) | ||
351 | #define ICH_LR14_EL2 __LR8_EL2(6) | ||
352 | #define ICH_LR15_EL2 __LR8_EL2(7) | ||
353 | |||
354 | #define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) | ||
355 | #define ICH_AP0R0_EL2 __AP0Rx_EL2(0) | ||
356 | #define ICH_AP0R1_EL2 __AP0Rx_EL2(1) | ||
357 | #define ICH_AP0R2_EL2 __AP0Rx_EL2(2) | ||
358 | #define ICH_AP0R3_EL2 __AP0Rx_EL2(3) | ||
359 | |||
360 | #define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x) | ||
361 | #define ICH_AP1R0_EL2 __AP1Rx_EL2(0) | ||
362 | #define ICH_AP1R1_EL2 __AP1Rx_EL2(1) | ||
363 | #define ICH_AP1R2_EL2 __AP1Rx_EL2(2) | ||
364 | #define ICH_AP1R3_EL2 __AP1Rx_EL2(3) | ||
365 | 312 | ||
366 | #ifndef __ASSEMBLY__ | 313 | #ifndef __ASSEMBLY__ |
367 | 314 | ||
368 | #include <linux/stringify.h> | ||
369 | #include <asm/msi.h> | ||
370 | |||
371 | /* | 315 | /* |
372 | * We need a value to serve as a irq-type for LPIs. Choose one that will | 316 | * We need a value to serve as a irq-type for LPIs. Choose one that will |
373 | * hopefully pique the interest of the reviewer. | 317 | * hopefully pique the interest of the reviewer. |
@@ -385,23 +329,26 @@ struct rdists { | |||
385 | u64 flags; | 329 | u64 flags; |
386 | }; | 330 | }; |
387 | 331 | ||
388 | static inline void gic_write_eoir(u64 irq) | ||
389 | { | ||
390 | asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq)); | ||
391 | isb(); | ||
392 | } | ||
393 | |||
394 | static inline void gic_write_dir(u64 irq) | ||
395 | { | ||
396 | asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq)); | ||
397 | isb(); | ||
398 | } | ||
399 | |||
400 | struct irq_domain; | 332 | struct irq_domain; |
401 | int its_cpu_init(void); | 333 | int its_cpu_init(void); |
402 | int its_init(struct device_node *node, struct rdists *rdists, | 334 | int its_init(struct device_node *node, struct rdists *rdists, |
403 | struct irq_domain *domain); | 335 | struct irq_domain *domain); |
404 | 336 | ||
337 | static inline bool gic_enable_sre(void) | ||
338 | { | ||
339 | u32 val; | ||
340 | |||
341 | val = gic_read_sre(); | ||
342 | if (val & ICC_SRE_EL1_SRE) | ||
343 | return true; | ||
344 | |||
345 | val |= ICC_SRE_EL1_SRE; | ||
346 | gic_write_sre(val); | ||
347 | val = gic_read_sre(); | ||
348 | |||
349 | return !!(val & ICC_SRE_EL1_SRE); | ||
350 | } | ||
351 | |||
405 | #endif | 352 | #endif |
406 | 353 | ||
407 | #endif | 354 | #endif |
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index b8901dfd9e95..bae69e5d693c 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h | |||
@@ -100,16 +100,11 @@ | |||
100 | 100 | ||
101 | struct device_node; | 101 | struct device_node; |
102 | 102 | ||
103 | void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, | ||
104 | u32 offset, struct device_node *); | ||
105 | void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); | 103 | void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); |
106 | int gic_cpu_if_down(unsigned int gic_nr); | 104 | int gic_cpu_if_down(unsigned int gic_nr); |
107 | 105 | ||
108 | static inline void gic_init(unsigned int nr, int start, | 106 | void gic_init(unsigned int nr, int start, |
109 | void __iomem *dist , void __iomem *cpu) | 107 | void __iomem *dist , void __iomem *cpu); |
110 | { | ||
111 | gic_init_bases(nr, start, dist, cpu, 0, NULL); | ||
112 | } | ||
113 | 108 | ||
114 | int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); | 109 | int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); |
115 | 110 | ||
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h index 4e6861605050..ce824db48d64 100644 --- a/include/linux/irqchip/mips-gic.h +++ b/include/linux/irqchip/mips-gic.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #define __LINUX_IRQCHIP_MIPS_GIC_H | 9 | #define __LINUX_IRQCHIP_MIPS_GIC_H |
10 | 10 | ||
11 | #include <linux/clocksource.h> | 11 | #include <linux/clocksource.h> |
12 | #include <linux/ioport.h> | ||
12 | 13 | ||
13 | #define GIC_MAX_INTRS 256 | 14 | #define GIC_MAX_INTRS 256 |
14 | 15 | ||
@@ -245,6 +246,8 @@ | |||
245 | #define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) | 246 | #define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) |
246 | #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) | 247 | #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) |
247 | 248 | ||
249 | #ifdef CONFIG_MIPS_GIC | ||
250 | |||
248 | extern unsigned int gic_present; | 251 | extern unsigned int gic_present; |
249 | 252 | ||
250 | extern void gic_init(unsigned long gic_base_addr, | 253 | extern void gic_init(unsigned long gic_base_addr, |
@@ -264,4 +267,18 @@ extern unsigned int plat_ipi_resched_int_xlate(unsigned int); | |||
264 | extern int gic_get_c0_compare_int(void); | 267 | extern int gic_get_c0_compare_int(void); |
265 | extern int gic_get_c0_perfcount_int(void); | 268 | extern int gic_get_c0_perfcount_int(void); |
266 | extern int gic_get_c0_fdc_int(void); | 269 | extern int gic_get_c0_fdc_int(void); |
270 | extern int gic_get_usm_range(struct resource *gic_usm_res); | ||
271 | |||
272 | #else /* CONFIG_MIPS_GIC */ | ||
273 | |||
274 | #define gic_present 0 | ||
275 | |||
276 | static inline int gic_get_usm_range(struct resource *gic_usm_res) | ||
277 | { | ||
278 | /* Shouldn't be called. */ | ||
279 | return -1; | ||
280 | } | ||
281 | |||
282 | #endif /* CONFIG_MIPS_GIC */ | ||
283 | |||
267 | #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */ | 284 | #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */ |
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index d3ca79236fb0..d5e5c5bef28c 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
@@ -5,9 +5,10 @@ | |||
5 | * helpful for interrupt controllers to implement mapping between hardware | 5 | * helpful for interrupt controllers to implement mapping between hardware |
6 | * irq numbers and the Linux irq number space. | 6 | * irq numbers and the Linux irq number space. |
7 | * | 7 | * |
8 | * irq_domains also have a hook for translating device tree interrupt | 8 | * irq_domains also have hooks for translating device tree or other |
9 | * representation into a hardware irq number that can be mapped back to a | 9 | * firmware interrupt representations into a hardware irq number that |
10 | * Linux irq number without any extra platform support code. | 10 | * can be mapped back to a Linux irq number without any extra platform |
11 | * support code. | ||
11 | * | 12 | * |
12 | * Interrupt controller "domain" data structure. This could be defined as a | 13 | * Interrupt controller "domain" data structure. This could be defined as a |
13 | * irq domain controller. That is, it handles the mapping between hardware | 14 | * irq domain controller. That is, it handles the mapping between hardware |
@@ -17,16 +18,12 @@ | |||
17 | * model). It's the domain callbacks that are responsible for setting the | 18 | * model). It's the domain callbacks that are responsible for setting the |
18 | * irq_chip on a given irq_desc after it's been mapped. | 19 | * irq_chip on a given irq_desc after it's been mapped. |
19 | * | 20 | * |
20 | * The host code and data structures are agnostic to whether or not | 21 | * The host code and data structures use a fwnode_handle pointer to |
21 | * we use an open firmware device-tree. We do have references to struct | 22 | * identify the domain. In some cases, and in order to preserve source |
22 | * device_node in two places: in irq_find_host() to find the host matching | 23 | * code compatibility, this fwnode pointer is "upgraded" to a DT |
23 | * a given interrupt controller node, and of course as an argument to its | 24 | * device_node. For those firmware infrastructures that do not provide |
24 | * counterpart domain->ops->match() callback. However, those are treated as | 25 | * a unique identifier for an interrupt controller, the irq_domain |
25 | * generic pointers by the core and the fact that it's actually a device-node | 26 | * code offers a fwnode allocator. |
26 | * pointer is purely a convention between callers and implementation. This | ||
27 | * code could thus be used on other architectures by replacing those two | ||
28 | * by some sort of arch-specific void * "token" used to identify interrupt | ||
29 | * controllers. | ||
30 | */ | 27 | */ |
31 | 28 | ||
32 | #ifndef _LINUX_IRQDOMAIN_H | 29 | #ifndef _LINUX_IRQDOMAIN_H |
@@ -34,6 +31,7 @@ | |||
34 | 31 | ||
35 | #include <linux/types.h> | 32 | #include <linux/types.h> |
36 | #include <linux/irqhandler.h> | 33 | #include <linux/irqhandler.h> |
34 | #include <linux/of.h> | ||
37 | #include <linux/radix-tree.h> | 35 | #include <linux/radix-tree.h> |
38 | 36 | ||
39 | struct device_node; | 37 | struct device_node; |
@@ -45,6 +43,24 @@ struct irq_data; | |||
45 | /* Number of irqs reserved for a legacy isa controller */ | 43 | /* Number of irqs reserved for a legacy isa controller */ |
46 | #define NUM_ISA_INTERRUPTS 16 | 44 | #define NUM_ISA_INTERRUPTS 16 |
47 | 45 | ||
46 | #define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16 | ||
47 | |||
48 | /** | ||
49 | * struct irq_fwspec - generic IRQ specifier structure | ||
50 | * | ||
51 | * @fwnode: Pointer to a firmware-specific descriptor | ||
52 | * @param_count: Number of device-specific parameters | ||
53 | * @param: Device-specific parameters | ||
54 | * | ||
55 | * This structure, directly modeled after of_phandle_args, is used to | ||
56 | * pass a device-specific description of an interrupt. | ||
57 | */ | ||
58 | struct irq_fwspec { | ||
59 | struct fwnode_handle *fwnode; | ||
60 | int param_count; | ||
61 | u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS]; | ||
62 | }; | ||
63 | |||
48 | /* | 64 | /* |
49 | * Should several domains have the same device node, but serve | 65 | * Should several domains have the same device node, but serve |
50 | * different purposes (for example one domain is for PCI/MSI, and the | 66 | * different purposes (for example one domain is for PCI/MSI, and the |
@@ -91,6 +107,8 @@ struct irq_domain_ops { | |||
91 | unsigned int nr_irqs); | 107 | unsigned int nr_irqs); |
92 | void (*activate)(struct irq_domain *d, struct irq_data *irq_data); | 108 | void (*activate)(struct irq_domain *d, struct irq_data *irq_data); |
93 | void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); | 109 | void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); |
110 | int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, | ||
111 | unsigned long *out_hwirq, unsigned int *out_type); | ||
94 | #endif | 112 | #endif |
95 | }; | 113 | }; |
96 | 114 | ||
@@ -130,7 +148,7 @@ struct irq_domain { | |||
130 | unsigned int flags; | 148 | unsigned int flags; |
131 | 149 | ||
132 | /* Optional data */ | 150 | /* Optional data */ |
133 | struct device_node *of_node; | 151 | struct fwnode_handle *fwnode; |
134 | enum irq_domain_bus_token bus_token; | 152 | enum irq_domain_bus_token bus_token; |
135 | struct irq_domain_chip_generic *gc; | 153 | struct irq_domain_chip_generic *gc; |
136 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | 154 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
@@ -161,8 +179,15 @@ enum { | |||
161 | IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), | 179 | IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), |
162 | }; | 180 | }; |
163 | 181 | ||
182 | static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) | ||
183 | { | ||
184 | return to_of_node(d->fwnode); | ||
185 | } | ||
186 | |||
164 | #ifdef CONFIG_IRQ_DOMAIN | 187 | #ifdef CONFIG_IRQ_DOMAIN |
165 | struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, | 188 | struct fwnode_handle *irq_domain_alloc_fwnode(void *data); |
189 | void irq_domain_free_fwnode(struct fwnode_handle *fwnode); | ||
190 | struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, | ||
166 | irq_hw_number_t hwirq_max, int direct_max, | 191 | irq_hw_number_t hwirq_max, int direct_max, |
167 | const struct irq_domain_ops *ops, | 192 | const struct irq_domain_ops *ops, |
168 | void *host_data); | 193 | void *host_data); |
@@ -177,10 +202,21 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, | |||
177 | irq_hw_number_t first_hwirq, | 202 | irq_hw_number_t first_hwirq, |
178 | const struct irq_domain_ops *ops, | 203 | const struct irq_domain_ops *ops, |
179 | void *host_data); | 204 | void *host_data); |
180 | extern struct irq_domain *irq_find_matching_host(struct device_node *node, | 205 | extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, |
181 | enum irq_domain_bus_token bus_token); | 206 | enum irq_domain_bus_token bus_token); |
182 | extern void irq_set_default_host(struct irq_domain *host); | 207 | extern void irq_set_default_host(struct irq_domain *host); |
183 | 208 | ||
209 | static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) | ||
210 | { | ||
211 | return node ? &node->fwnode : NULL; | ||
212 | } | ||
213 | |||
214 | static inline struct irq_domain *irq_find_matching_host(struct device_node *node, | ||
215 | enum irq_domain_bus_token bus_token) | ||
216 | { | ||
217 | return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token); | ||
218 | } | ||
219 | |||
184 | static inline struct irq_domain *irq_find_host(struct device_node *node) | 220 | static inline struct irq_domain *irq_find_host(struct device_node *node) |
185 | { | 221 | { |
186 | return irq_find_matching_host(node, DOMAIN_BUS_ANY); | 222 | return irq_find_matching_host(node, DOMAIN_BUS_ANY); |
@@ -198,14 +234,14 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no | |||
198 | const struct irq_domain_ops *ops, | 234 | const struct irq_domain_ops *ops, |
199 | void *host_data) | 235 | void *host_data) |
200 | { | 236 | { |
201 | return __irq_domain_add(of_node, size, size, 0, ops, host_data); | 237 | return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); |
202 | } | 238 | } |
203 | static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, | 239 | static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, |
204 | unsigned int max_irq, | 240 | unsigned int max_irq, |
205 | const struct irq_domain_ops *ops, | 241 | const struct irq_domain_ops *ops, |
206 | void *host_data) | 242 | void *host_data) |
207 | { | 243 | { |
208 | return __irq_domain_add(of_node, 0, max_irq, max_irq, ops, host_data); | 244 | return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data); |
209 | } | 245 | } |
210 | static inline struct irq_domain *irq_domain_add_legacy_isa( | 246 | static inline struct irq_domain *irq_domain_add_legacy_isa( |
211 | struct device_node *of_node, | 247 | struct device_node *of_node, |
@@ -219,7 +255,22 @@ static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node | |||
219 | const struct irq_domain_ops *ops, | 255 | const struct irq_domain_ops *ops, |
220 | void *host_data) | 256 | void *host_data) |
221 | { | 257 | { |
222 | return __irq_domain_add(of_node, 0, ~0, 0, ops, host_data); | 258 | return __irq_domain_add(of_node_to_fwnode(of_node), 0, ~0, 0, ops, host_data); |
259 | } | ||
260 | |||
261 | static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode, | ||
262 | unsigned int size, | ||
263 | const struct irq_domain_ops *ops, | ||
264 | void *host_data) | ||
265 | { | ||
266 | return __irq_domain_add(fwnode, size, size, 0, ops, host_data); | ||
267 | } | ||
268 | |||
269 | static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode, | ||
270 | const struct irq_domain_ops *ops, | ||
271 | void *host_data) | ||
272 | { | ||
273 | return __irq_domain_add(fwnode, 0, ~0, 0, ops, host_data); | ||
223 | } | 274 | } |
224 | 275 | ||
225 | extern void irq_domain_remove(struct irq_domain *host); | 276 | extern void irq_domain_remove(struct irq_domain *host); |
@@ -234,6 +285,7 @@ extern void irq_domain_disassociate(struct irq_domain *domain, | |||
234 | 285 | ||
235 | extern unsigned int irq_create_mapping(struct irq_domain *host, | 286 | extern unsigned int irq_create_mapping(struct irq_domain *host, |
236 | irq_hw_number_t hwirq); | 287 | irq_hw_number_t hwirq); |
288 | extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec); | ||
237 | extern void irq_dispose_mapping(unsigned int virq); | 289 | extern void irq_dispose_mapping(unsigned int virq); |
238 | 290 | ||
239 | /** | 291 | /** |
@@ -285,10 +337,23 @@ extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, | |||
285 | void *chip_data, irq_flow_handler_t handler, | 337 | void *chip_data, irq_flow_handler_t handler, |
286 | void *handler_data, const char *handler_name); | 338 | void *handler_data, const char *handler_name); |
287 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | 339 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
288 | extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, | 340 | extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, |
289 | unsigned int flags, unsigned int size, | 341 | unsigned int flags, unsigned int size, |
290 | struct device_node *node, | 342 | struct fwnode_handle *fwnode, |
291 | const struct irq_domain_ops *ops, void *host_data); | 343 | const struct irq_domain_ops *ops, void *host_data); |
344 | |||
345 | static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, | ||
346 | unsigned int flags, | ||
347 | unsigned int size, | ||
348 | struct device_node *node, | ||
349 | const struct irq_domain_ops *ops, | ||
350 | void *host_data) | ||
351 | { | ||
352 | return irq_domain_create_hierarchy(parent, flags, size, | ||
353 | of_node_to_fwnode(node), | ||
354 | ops, host_data); | ||
355 | } | ||
356 | |||
292 | extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, | 357 | extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, |
293 | unsigned int nr_irqs, int node, void *arg, | 358 | unsigned int nr_irqs, int node, void *arg, |
294 | bool realloc); | 359 | bool realloc); |
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h index e374e369fb2f..eb1bdcf95f2e 100644 --- a/include/linux/irqreturn.h +++ b/include/linux/irqreturn.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | /** | 4 | /** |
5 | * enum irqreturn | 5 | * enum irqreturn |
6 | * @IRQ_NONE interrupt was not from this device | 6 | * @IRQ_NONE interrupt was not from this device or was not handled |
7 | * @IRQ_HANDLED interrupt was handled by this device | 7 | * @IRQ_HANDLED interrupt was handled by this device |
8 | * @IRQ_WAKE_THREAD handler requests to wake the handler thread | 8 | * @IRQ_WAKE_THREAD handler requests to wake the handler thread |
9 | */ | 9 | */ |
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index df07e78487d5..65407f6c9120 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h | |||
@@ -278,6 +278,7 @@ typedef struct journal_superblock_s | |||
278 | /* 0x0400 */ | 278 | /* 0x0400 */ |
279 | } journal_superblock_t; | 279 | } journal_superblock_t; |
280 | 280 | ||
281 | /* Use the jbd2_{has,set,clear}_feature_* helpers; these will be removed */ | ||
281 | #define JBD2_HAS_COMPAT_FEATURE(j,mask) \ | 282 | #define JBD2_HAS_COMPAT_FEATURE(j,mask) \ |
282 | ((j)->j_format_version >= 2 && \ | 283 | ((j)->j_format_version >= 2 && \ |
283 | ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask)))) | 284 | ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask)))) |
@@ -288,7 +289,7 @@ typedef struct journal_superblock_s | |||
288 | ((j)->j_format_version >= 2 && \ | 289 | ((j)->j_format_version >= 2 && \ |
289 | ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask)))) | 290 | ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask)))) |
290 | 291 | ||
291 | #define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001 | 292 | #define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001 |
292 | 293 | ||
293 | #define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001 | 294 | #define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001 |
294 | #define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002 | 295 | #define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002 |
@@ -296,6 +297,8 @@ typedef struct journal_superblock_s | |||
296 | #define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008 | 297 | #define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008 |
297 | #define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010 | 298 | #define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010 |
298 | 299 | ||
300 | /* See "journal feature predicate functions" below */ | ||
301 | |||
299 | /* Features known to this kernel version: */ | 302 | /* Features known to this kernel version: */ |
300 | #define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM | 303 | #define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM |
301 | #define JBD2_KNOWN_ROCOMPAT_FEATURES 0 | 304 | #define JBD2_KNOWN_ROCOMPAT_FEATURES 0 |
@@ -1034,6 +1037,69 @@ struct journal_s | |||
1034 | __u32 j_csum_seed; | 1037 | __u32 j_csum_seed; |
1035 | }; | 1038 | }; |
1036 | 1039 | ||
1040 | /* journal feature predicate functions */ | ||
1041 | #define JBD2_FEATURE_COMPAT_FUNCS(name, flagname) \ | ||
1042 | static inline bool jbd2_has_feature_##name(journal_t *j) \ | ||
1043 | { \ | ||
1044 | return ((j)->j_format_version >= 2 && \ | ||
1045 | ((j)->j_superblock->s_feature_compat & \ | ||
1046 | cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname)) != 0); \ | ||
1047 | } \ | ||
1048 | static inline void jbd2_set_feature_##name(journal_t *j) \ | ||
1049 | { \ | ||
1050 | (j)->j_superblock->s_feature_compat |= \ | ||
1051 | cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname); \ | ||
1052 | } \ | ||
1053 | static inline void jbd2_clear_feature_##name(journal_t *j) \ | ||
1054 | { \ | ||
1055 | (j)->j_superblock->s_feature_compat &= \ | ||
1056 | ~cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname); \ | ||
1057 | } | ||
1058 | |||
1059 | #define JBD2_FEATURE_RO_COMPAT_FUNCS(name, flagname) \ | ||
1060 | static inline bool jbd2_has_feature_##name(journal_t *j) \ | ||
1061 | { \ | ||
1062 | return ((j)->j_format_version >= 2 && \ | ||
1063 | ((j)->j_superblock->s_feature_ro_compat & \ | ||
1064 | cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname)) != 0); \ | ||
1065 | } \ | ||
1066 | static inline void jbd2_set_feature_##name(journal_t *j) \ | ||
1067 | { \ | ||
1068 | (j)->j_superblock->s_feature_ro_compat |= \ | ||
1069 | cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname); \ | ||
1070 | } \ | ||
1071 | static inline void jbd2_clear_feature_##name(journal_t *j) \ | ||
1072 | { \ | ||
1073 | (j)->j_superblock->s_feature_ro_compat &= \ | ||
1074 | ~cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname); \ | ||
1075 | } | ||
1076 | |||
1077 | #define JBD2_FEATURE_INCOMPAT_FUNCS(name, flagname) \ | ||
1078 | static inline bool jbd2_has_feature_##name(journal_t *j) \ | ||
1079 | { \ | ||
1080 | return ((j)->j_format_version >= 2 && \ | ||
1081 | ((j)->j_superblock->s_feature_incompat & \ | ||
1082 | cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname)) != 0); \ | ||
1083 | } \ | ||
1084 | static inline void jbd2_set_feature_##name(journal_t *j) \ | ||
1085 | { \ | ||
1086 | (j)->j_superblock->s_feature_incompat |= \ | ||
1087 | cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname); \ | ||
1088 | } \ | ||
1089 | static inline void jbd2_clear_feature_##name(journal_t *j) \ | ||
1090 | { \ | ||
1091 | (j)->j_superblock->s_feature_incompat &= \ | ||
1092 | ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname); \ | ||
1093 | } | ||
1094 | |||
1095 | JBD2_FEATURE_COMPAT_FUNCS(checksum, CHECKSUM) | ||
1096 | |||
1097 | JBD2_FEATURE_INCOMPAT_FUNCS(revoke, REVOKE) | ||
1098 | JBD2_FEATURE_INCOMPAT_FUNCS(64bit, 64BIT) | ||
1099 | JBD2_FEATURE_INCOMPAT_FUNCS(async_commit, ASYNC_COMMIT) | ||
1100 | JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2) | ||
1101 | JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3) | ||
1102 | |||
1037 | /* | 1103 | /* |
1038 | * Journal flag definitions | 1104 | * Journal flag definitions |
1039 | */ | 1105 | */ |
@@ -1046,6 +1112,7 @@ struct journal_s | |||
1046 | #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file | 1112 | #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file |
1047 | * data write error in ordered | 1113 | * data write error in ordered |
1048 | * mode */ | 1114 | * mode */ |
1115 | #define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */ | ||
1049 | 1116 | ||
1050 | /* | 1117 | /* |
1051 | * Function declarations for the journaling transaction and buffer | 1118 | * Function declarations for the journaling transaction and buffer |
@@ -1338,13 +1405,17 @@ static inline int tid_geq(tid_t x, tid_t y) | |||
1338 | extern int jbd2_journal_blocks_per_page(struct inode *inode); | 1405 | extern int jbd2_journal_blocks_per_page(struct inode *inode); |
1339 | extern size_t journal_tag_bytes(journal_t *journal); | 1406 | extern size_t journal_tag_bytes(journal_t *journal); |
1340 | 1407 | ||
1408 | static inline bool jbd2_journal_has_csum_v2or3_feature(journal_t *j) | ||
1409 | { | ||
1410 | return jbd2_has_feature_csum2(j) || jbd2_has_feature_csum3(j); | ||
1411 | } | ||
1412 | |||
1341 | static inline int jbd2_journal_has_csum_v2or3(journal_t *journal) | 1413 | static inline int jbd2_journal_has_csum_v2or3(journal_t *journal) |
1342 | { | 1414 | { |
1343 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) || | 1415 | WARN_ON_ONCE(jbd2_journal_has_csum_v2or3_feature(journal) && |
1344 | JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) | 1416 | journal->j_chksum_driver == NULL); |
1345 | return 1; | ||
1346 | 1417 | ||
1347 | return 0; | 1418 | return journal->j_chksum_driver != NULL; |
1348 | } | 1419 | } |
1349 | 1420 | ||
1350 | /* | 1421 | /* |
@@ -1444,4 +1515,7 @@ static inline tid_t jbd2_get_latest_transaction(journal_t *journal) | |||
1444 | 1515 | ||
1445 | #endif /* __KERNEL__ */ | 1516 | #endif /* __KERNEL__ */ |
1446 | 1517 | ||
1518 | #define EFSBADCRC EBADMSG /* Bad CRC detected */ | ||
1519 | #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ | ||
1520 | |||
1447 | #endif /* _LINUX_JBD2_H */ | 1521 | #endif /* _LINUX_JBD2_H */ |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index f1094238ab2a..8dde55974f18 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
@@ -214,11 +214,6 @@ static inline int jump_label_apply_nops(struct module *mod) | |||
214 | #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE | 214 | #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE |
215 | #define jump_label_enabled static_key_enabled | 215 | #define jump_label_enabled static_key_enabled |
216 | 216 | ||
217 | static inline bool static_key_enabled(struct static_key *key) | ||
218 | { | ||
219 | return static_key_count(key) > 0; | ||
220 | } | ||
221 | |||
222 | static inline void static_key_enable(struct static_key *key) | 217 | static inline void static_key_enable(struct static_key *key) |
223 | { | 218 | { |
224 | int count = static_key_count(key); | 219 | int count = static_key_count(key); |
@@ -265,6 +260,17 @@ struct static_key_false { | |||
265 | #define DEFINE_STATIC_KEY_FALSE(name) \ | 260 | #define DEFINE_STATIC_KEY_FALSE(name) \ |
266 | struct static_key_false name = STATIC_KEY_FALSE_INIT | 261 | struct static_key_false name = STATIC_KEY_FALSE_INIT |
267 | 262 | ||
263 | extern bool ____wrong_branch_error(void); | ||
264 | |||
265 | #define static_key_enabled(x) \ | ||
266 | ({ \ | ||
267 | if (!__builtin_types_compatible_p(typeof(*x), struct static_key) && \ | ||
268 | !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\ | ||
269 | !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \ | ||
270 | ____wrong_branch_error(); \ | ||
271 | static_key_count((struct static_key *)x) > 0; \ | ||
272 | }) | ||
273 | |||
268 | #ifdef HAVE_JUMP_LABEL | 274 | #ifdef HAVE_JUMP_LABEL |
269 | 275 | ||
270 | /* | 276 | /* |
@@ -323,8 +329,6 @@ struct static_key_false { | |||
323 | * See jump_label_type() / jump_label_init_type(). | 329 | * See jump_label_type() / jump_label_init_type(). |
324 | */ | 330 | */ |
325 | 331 | ||
326 | extern bool ____wrong_branch_error(void); | ||
327 | |||
328 | #define static_branch_likely(x) \ | 332 | #define static_branch_likely(x) \ |
329 | ({ \ | 333 | ({ \ |
330 | bool branch; \ | 334 | bool branch; \ |
diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h index c838abe3ee0a..052c7b32cc91 100644 --- a/include/linux/kdev_t.h +++ b/include/linux/kdev_t.h | |||
@@ -20,7 +20,7 @@ | |||
20 | }) | 20 | }) |
21 | 21 | ||
22 | /* acceptable for old filesystems */ | 22 | /* acceptable for old filesystems */ |
23 | static inline int old_valid_dev(dev_t dev) | 23 | static inline bool old_valid_dev(dev_t dev) |
24 | { | 24 | { |
25 | return MAJOR(dev) < 256 && MINOR(dev) < 256; | 25 | return MAJOR(dev) < 256 && MINOR(dev) < 256; |
26 | } | 26 | } |
@@ -35,7 +35,7 @@ static inline dev_t old_decode_dev(u16 val) | |||
35 | return MKDEV((val >> 8) & 255, val & 255); | 35 | return MKDEV((val >> 8) & 255, val & 255); |
36 | } | 36 | } |
37 | 37 | ||
38 | static inline int new_valid_dev(dev_t dev) | 38 | static inline bool new_valid_dev(dev_t dev) |
39 | { | 39 | { |
40 | return 1; | 40 | return 1; |
41 | } | 41 | } |
@@ -54,11 +54,6 @@ static inline dev_t new_decode_dev(u32 dev) | |||
54 | return MKDEV(major, minor); | 54 | return MKDEV(major, minor); |
55 | } | 55 | } |
56 | 56 | ||
57 | static inline int huge_valid_dev(dev_t dev) | ||
58 | { | ||
59 | return 1; | ||
60 | } | ||
61 | |||
62 | static inline u64 huge_encode_dev(dev_t dev) | 57 | static inline u64 huge_encode_dev(dev_t dev) |
63 | { | 58 | { |
64 | return new_encode_dev(dev); | 59 | return new_encode_dev(dev); |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 5582410727cb..350dfb08aee3 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -200,28 +200,28 @@ extern int _cond_resched(void); | |||
200 | 200 | ||
201 | #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) | 201 | #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) |
202 | 202 | ||
203 | /* | 203 | /** |
204 | * abs() handles unsigned and signed longs, ints, shorts and chars. For all | 204 | * abs - return absolute value of an argument |
205 | * input types abs() returns a signed long. | 205 | * @x: the value. If it is unsigned type, it is converted to signed type first |
206 | * abs() should not be used for 64-bit types (s64, u64, long long) - use abs64() | 206 | * (s64, long or int depending on its size). |
207 | * for those. | 207 | * |
208 | * Return: an absolute value of x. If x is 64-bit, macro's return type is s64, | ||
209 | * otherwise it is signed long. | ||
208 | */ | 210 | */ |
209 | #define abs(x) ({ \ | 211 | #define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \ |
210 | long ret; \ | 212 | s64 __x = (x); \ |
211 | if (sizeof(x) == sizeof(long)) { \ | 213 | (__x < 0) ? -__x : __x; \ |
212 | long __x = (x); \ | 214 | }), ({ \ |
213 | ret = (__x < 0) ? -__x : __x; \ | 215 | long ret; \ |
214 | } else { \ | 216 | if (sizeof(x) == sizeof(long)) { \ |
215 | int __x = (x); \ | 217 | long __x = (x); \ |
216 | ret = (__x < 0) ? -__x : __x; \ | 218 | ret = (__x < 0) ? -__x : __x; \ |
217 | } \ | 219 | } else { \ |
218 | ret; \ | 220 | int __x = (x); \ |
219 | }) | 221 | ret = (__x < 0) ? -__x : __x; \ |
220 | 222 | } \ | |
221 | #define abs64(x) ({ \ | 223 | ret; \ |
222 | s64 __x = (x); \ | 224 | })) |
223 | (__x < 0) ? -__x : __x; \ | ||
224 | }) | ||
225 | 225 | ||
226 | /** | 226 | /** |
227 | * reciprocal_scale - "scale" a value into range [0, ep_ro) | 227 | * reciprocal_scale - "scale" a value into range [0, ep_ro) |
@@ -413,6 +413,8 @@ extern __printf(2, 3) | |||
413 | char *kasprintf(gfp_t gfp, const char *fmt, ...); | 413 | char *kasprintf(gfp_t gfp, const char *fmt, ...); |
414 | extern __printf(2, 0) | 414 | extern __printf(2, 0) |
415 | char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); | 415 | char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); |
416 | extern __printf(2, 0) | ||
417 | const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args); | ||
416 | 418 | ||
417 | extern __scanf(2, 3) | 419 | extern __scanf(2, 3) |
418 | int sscanf(const char *, const char *, ...); | 420 | int sscanf(const char *, const char *, ...); |
diff --git a/include/linux/key-type.h b/include/linux/key-type.h index ff9f1d394235..7463355a198b 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h | |||
@@ -40,8 +40,7 @@ struct key_construction { | |||
40 | */ | 40 | */ |
41 | struct key_preparsed_payload { | 41 | struct key_preparsed_payload { |
42 | char *description; /* Proposed key description (or NULL) */ | 42 | char *description; /* Proposed key description (or NULL) */ |
43 | void *type_data[2]; /* Private key-type data */ | 43 | union key_payload payload; /* Proposed payload */ |
44 | void *payload[2]; /* Proposed payload */ | ||
45 | const void *data; /* Raw data */ | 44 | const void *data; /* Raw data */ |
46 | size_t datalen; /* Raw datalen */ | 45 | size_t datalen; /* Raw datalen */ |
47 | size_t quotalen; /* Quota length for proposed payload */ | 46 | size_t quotalen; /* Quota length for proposed payload */ |
diff --git a/include/linux/key.h b/include/linux/key.h index e1d4715f3222..66f705243985 100644 --- a/include/linux/key.h +++ b/include/linux/key.h | |||
@@ -89,6 +89,11 @@ struct keyring_index_key { | |||
89 | size_t desc_len; | 89 | size_t desc_len; |
90 | }; | 90 | }; |
91 | 91 | ||
92 | union key_payload { | ||
93 | void __rcu *rcu_data0; | ||
94 | void *data[4]; | ||
95 | }; | ||
96 | |||
92 | /*****************************************************************************/ | 97 | /*****************************************************************************/ |
93 | /* | 98 | /* |
94 | * key reference with possession attribute handling | 99 | * key reference with possession attribute handling |
@@ -186,28 +191,18 @@ struct key { | |||
186 | }; | 191 | }; |
187 | }; | 192 | }; |
188 | 193 | ||
189 | /* type specific data | ||
190 | * - this is used by the keyring type to index the name | ||
191 | */ | ||
192 | union { | ||
193 | struct list_head link; | ||
194 | unsigned long x[2]; | ||
195 | void *p[2]; | ||
196 | int reject_error; | ||
197 | } type_data; | ||
198 | |||
199 | /* key data | 194 | /* key data |
200 | * - this is used to hold the data actually used in cryptography or | 195 | * - this is used to hold the data actually used in cryptography or |
201 | * whatever | 196 | * whatever |
202 | */ | 197 | */ |
203 | union { | 198 | union { |
204 | union { | 199 | union key_payload payload; |
205 | unsigned long value; | 200 | struct { |
206 | void __rcu *rcudata; | 201 | /* Keyring bits */ |
207 | void *data; | 202 | struct list_head name_link; |
208 | void *data2[2]; | 203 | struct assoc_array keys; |
209 | } payload; | 204 | }; |
210 | struct assoc_array keys; | 205 | int reject_error; |
211 | }; | 206 | }; |
212 | }; | 207 | }; |
213 | 208 | ||
@@ -336,12 +331,12 @@ static inline bool key_is_instantiated(const struct key *key) | |||
336 | } | 331 | } |
337 | 332 | ||
338 | #define rcu_dereference_key(KEY) \ | 333 | #define rcu_dereference_key(KEY) \ |
339 | (rcu_dereference_protected((KEY)->payload.rcudata, \ | 334 | (rcu_dereference_protected((KEY)->payload.rcu_data0, \ |
340 | rwsem_is_locked(&((struct key *)(KEY))->sem))) | 335 | rwsem_is_locked(&((struct key *)(KEY))->sem))) |
341 | 336 | ||
342 | #define rcu_assign_keypointer(KEY, PAYLOAD) \ | 337 | #define rcu_assign_keypointer(KEY, PAYLOAD) \ |
343 | do { \ | 338 | do { \ |
344 | rcu_assign_pointer((KEY)->payload.rcudata, (PAYLOAD)); \ | 339 | rcu_assign_pointer((KEY)->payload.rcu_data0, (PAYLOAD)); \ |
345 | } while (0) | 340 | } while (0) |
346 | 341 | ||
347 | #ifdef CONFIG_SYSCTL | 342 | #ifdef CONFIG_SYSCTL |
diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 637f67002c5a..e6284591599e 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h | |||
@@ -66,7 +66,7 @@ struct kobject { | |||
66 | struct kobject *parent; | 66 | struct kobject *parent; |
67 | struct kset *kset; | 67 | struct kset *kset; |
68 | struct kobj_type *ktype; | 68 | struct kobj_type *ktype; |
69 | struct kernfs_node *sd; | 69 | struct kernfs_node *sd; /* sysfs directory entry */ |
70 | struct kref kref; | 70 | struct kref kref; |
71 | #ifdef CONFIG_DEBUG_KOBJECT_RELEASE | 71 | #ifdef CONFIG_DEBUG_KOBJECT_RELEASE |
72 | struct delayed_work release; | 72 | struct delayed_work release; |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1bef9e21e725..5706a2108f0a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/irqflags.h> | 25 | #include <linux/irqflags.h> |
26 | #include <linux/context_tracking.h> | 26 | #include <linux/context_tracking.h> |
27 | #include <linux/irqbypass.h> | ||
27 | #include <asm/signal.h> | 28 | #include <asm/signal.h> |
28 | 29 | ||
29 | #include <linux/kvm.h> | 30 | #include <linux/kvm.h> |
@@ -140,6 +141,8 @@ static inline bool is_error_page(struct page *page) | |||
140 | #define KVM_REQ_APIC_PAGE_RELOAD 25 | 141 | #define KVM_REQ_APIC_PAGE_RELOAD 25 |
141 | #define KVM_REQ_SMI 26 | 142 | #define KVM_REQ_SMI 26 |
142 | #define KVM_REQ_HV_CRASH 27 | 143 | #define KVM_REQ_HV_CRASH 27 |
144 | #define KVM_REQ_IOAPIC_EOI_EXIT 28 | ||
145 | #define KVM_REQ_HV_RESET 29 | ||
143 | 146 | ||
144 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 147 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
145 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 | 148 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 |
@@ -231,6 +234,9 @@ struct kvm_vcpu { | |||
231 | unsigned long requests; | 234 | unsigned long requests; |
232 | unsigned long guest_debug; | 235 | unsigned long guest_debug; |
233 | 236 | ||
237 | int pre_pcpu; | ||
238 | struct list_head blocked_vcpu_list; | ||
239 | |||
234 | struct mutex mutex; | 240 | struct mutex mutex; |
235 | struct kvm_run *run; | 241 | struct kvm_run *run; |
236 | 242 | ||
@@ -329,6 +335,18 @@ struct kvm_kernel_irq_routing_entry { | |||
329 | struct hlist_node link; | 335 | struct hlist_node link; |
330 | }; | 336 | }; |
331 | 337 | ||
338 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING | ||
339 | struct kvm_irq_routing_table { | ||
340 | int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; | ||
341 | u32 nr_rt_entries; | ||
342 | /* | ||
343 | * Array indexed by gsi. Each entry contains list of irq chips | ||
344 | * the gsi is connected to. | ||
345 | */ | ||
346 | struct hlist_head map[0]; | ||
347 | }; | ||
348 | #endif | ||
349 | |||
332 | #ifndef KVM_PRIVATE_MEM_SLOTS | 350 | #ifndef KVM_PRIVATE_MEM_SLOTS |
333 | #define KVM_PRIVATE_MEM_SLOTS 0 | 351 | #define KVM_PRIVATE_MEM_SLOTS 0 |
334 | #endif | 352 | #endif |
@@ -455,10 +473,14 @@ void vcpu_put(struct kvm_vcpu *vcpu); | |||
455 | 473 | ||
456 | #ifdef __KVM_HAVE_IOAPIC | 474 | #ifdef __KVM_HAVE_IOAPIC |
457 | void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); | 475 | void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); |
476 | void kvm_arch_irq_routing_update(struct kvm *kvm); | ||
458 | #else | 477 | #else |
459 | static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) | 478 | static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) |
460 | { | 479 | { |
461 | } | 480 | } |
481 | static inline void kvm_arch_irq_routing_update(struct kvm *kvm) | ||
482 | { | ||
483 | } | ||
462 | #endif | 484 | #endif |
463 | 485 | ||
464 | #ifdef CONFIG_HAVE_KVM_IRQFD | 486 | #ifdef CONFIG_HAVE_KVM_IRQFD |
@@ -625,6 +647,8 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, | |||
625 | void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); | 647 | void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); |
626 | 648 | ||
627 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); | 649 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
650 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); | ||
651 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); | ||
628 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); | 652 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
629 | int kvm_vcpu_yield_to(struct kvm_vcpu *target); | 653 | int kvm_vcpu_yield_to(struct kvm_vcpu *target); |
630 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); | 654 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); |
@@ -803,10 +827,13 @@ int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); | |||
803 | 827 | ||
804 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, | 828 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, |
805 | bool line_status); | 829 | bool line_status); |
806 | int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); | ||
807 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, | 830 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, |
808 | int irq_source_id, int level, bool line_status); | 831 | int irq_source_id, int level, bool line_status); |
832 | int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, | ||
833 | struct kvm *kvm, int irq_source_id, | ||
834 | int level, bool line_status); | ||
809 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); | 835 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); |
836 | void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); | ||
810 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); | 837 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
811 | void kvm_register_irq_ack_notifier(struct kvm *kvm, | 838 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
812 | struct kvm_irq_ack_notifier *kian); | 839 | struct kvm_irq_ack_notifier *kian); |
@@ -1002,6 +1029,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) | |||
1002 | #endif | 1029 | #endif |
1003 | 1030 | ||
1004 | int kvm_setup_default_irq_routing(struct kvm *kvm); | 1031 | int kvm_setup_default_irq_routing(struct kvm *kvm); |
1032 | int kvm_setup_empty_irq_routing(struct kvm *kvm); | ||
1005 | int kvm_set_irq_routing(struct kvm *kvm, | 1033 | int kvm_set_irq_routing(struct kvm *kvm, |
1006 | const struct kvm_irq_routing_entry *entries, | 1034 | const struct kvm_irq_routing_entry *entries, |
1007 | unsigned nr, | 1035 | unsigned nr, |
@@ -1144,5 +1172,16 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | |||
1144 | { | 1172 | { |
1145 | } | 1173 | } |
1146 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ | 1174 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
1147 | #endif | ||
1148 | 1175 | ||
1176 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS | ||
1177 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, | ||
1178 | struct irq_bypass_producer *); | ||
1179 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, | ||
1180 | struct irq_bypass_producer *); | ||
1181 | void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); | ||
1182 | void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); | ||
1183 | int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, | ||
1184 | uint32_t guest_irq, bool set); | ||
1185 | #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ | ||
1186 | |||
1187 | #endif | ||
diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h new file mode 100644 index 000000000000..0c1de05098c8 --- /dev/null +++ b/include/linux/kvm_irqfd.h | |||
@@ -0,0 +1,71 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * irqfd: Allows an fd to be used to inject an interrupt to the guest | ||
12 | * Credit goes to Avi Kivity for the original idea. | ||
13 | */ | ||
14 | |||
15 | #ifndef __LINUX_KVM_IRQFD_H | ||
16 | #define __LINUX_KVM_IRQFD_H | ||
17 | |||
18 | #include <linux/kvm_host.h> | ||
19 | #include <linux/poll.h> | ||
20 | |||
21 | /* | ||
22 | * Resampling irqfds are a special variety of irqfds used to emulate | ||
23 | * level triggered interrupts. The interrupt is asserted on eventfd | ||
24 | * trigger. On acknowledgment through the irq ack notifier, the | ||
25 | * interrupt is de-asserted and userspace is notified through the | ||
26 | * resamplefd. All resamplers on the same gsi are de-asserted | ||
27 | * together, so we don't need to track the state of each individual | ||
28 | * user. We can also therefore share the same irq source ID. | ||
29 | */ | ||
30 | struct kvm_kernel_irqfd_resampler { | ||
31 | struct kvm *kvm; | ||
32 | /* | ||
33 | * List of resampling struct _irqfd objects sharing this gsi. | ||
34 | * RCU list modified under kvm->irqfds.resampler_lock | ||
35 | */ | ||
36 | struct list_head list; | ||
37 | struct kvm_irq_ack_notifier notifier; | ||
38 | /* | ||
39 | * Entry in list of kvm->irqfd.resampler_list. Use for sharing | ||
40 | * resamplers among irqfds on the same gsi. | ||
41 | * Accessed and modified under kvm->irqfds.resampler_lock | ||
42 | */ | ||
43 | struct list_head link; | ||
44 | }; | ||
45 | |||
46 | struct kvm_kernel_irqfd { | ||
47 | /* Used for MSI fast-path */ | ||
48 | struct kvm *kvm; | ||
49 | wait_queue_t wait; | ||
50 | /* Update side is protected by irqfds.lock */ | ||
51 | struct kvm_kernel_irq_routing_entry irq_entry; | ||
52 | seqcount_t irq_entry_sc; | ||
53 | /* Used for level IRQ fast-path */ | ||
54 | int gsi; | ||
55 | struct work_struct inject; | ||
56 | /* The resampler used by this irqfd (resampler-only) */ | ||
57 | struct kvm_kernel_irqfd_resampler *resampler; | ||
58 | /* Eventfd notified on resample (resampler-only) */ | ||
59 | struct eventfd_ctx *resamplefd; | ||
60 | /* Entry in list of irqfds for a resampler (resampler-only) */ | ||
61 | struct list_head resampler_link; | ||
62 | /* Used for setup/shutdown */ | ||
63 | struct eventfd_ctx *eventfd; | ||
64 | struct list_head list; | ||
65 | poll_table pt; | ||
66 | struct work_struct shutdown; | ||
67 | struct irq_bypass_consumer consumer; | ||
68 | struct irq_bypass_producer *producer; | ||
69 | }; | ||
70 | |||
71 | #endif /* __LINUX_KVM_IRQFD_H */ | ||
diff --git a/include/linux/leds.h b/include/linux/leds.h index b122eeafb5dc..fa359c79c825 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h | |||
@@ -283,6 +283,13 @@ static inline void led_trigger_register_simple(const char *name, | |||
283 | static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {} | 283 | static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {} |
284 | static inline void led_trigger_event(struct led_trigger *trigger, | 284 | static inline void led_trigger_event(struct led_trigger *trigger, |
285 | enum led_brightness event) {} | 285 | enum led_brightness event) {} |
286 | static inline void led_trigger_blink(struct led_trigger *trigger, | ||
287 | unsigned long *delay_on, | ||
288 | unsigned long *delay_off) {} | ||
289 | static inline void led_trigger_blink_oneshot(struct led_trigger *trigger, | ||
290 | unsigned long *delay_on, | ||
291 | unsigned long *delay_off, | ||
292 | int invert) {} | ||
286 | static inline void led_trigger_set_default(struct led_classdev *led_cdev) {} | 293 | static inline void led_trigger_set_default(struct led_classdev *led_cdev) {} |
287 | static inline void led_trigger_set(struct led_classdev *led_cdev, | 294 | static inline void led_trigger_set(struct led_classdev *led_cdev, |
288 | struct led_trigger *trigger) {} | 295 | struct led_trigger *trigger) {} |
diff --git a/include/linux/libata.h b/include/linux/libata.h index c9cfbcdb8d14..83577f8fd15b 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -254,6 +254,7 @@ enum { | |||
254 | 254 | ||
255 | ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */ | 255 | ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */ |
256 | ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */ | 256 | ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */ |
257 | ATA_PFLAG_EXTERNAL = (1 << 22), /* eSATA/external port */ | ||
257 | 258 | ||
258 | /* struct ata_queued_cmd flags */ | 259 | /* struct ata_queued_cmd flags */ |
259 | ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ | 260 | ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ |
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h new file mode 100644 index 000000000000..69c9057e1ab8 --- /dev/null +++ b/include/linux/lightnvm.h | |||
@@ -0,0 +1,522 @@ | |||
1 | #ifndef NVM_H | ||
2 | #define NVM_H | ||
3 | |||
4 | enum { | ||
5 | NVM_IO_OK = 0, | ||
6 | NVM_IO_REQUEUE = 1, | ||
7 | NVM_IO_DONE = 2, | ||
8 | NVM_IO_ERR = 3, | ||
9 | |||
10 | NVM_IOTYPE_NONE = 0, | ||
11 | NVM_IOTYPE_GC = 1, | ||
12 | }; | ||
13 | |||
14 | #ifdef CONFIG_NVM | ||
15 | |||
16 | #include <linux/blkdev.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/file.h> | ||
19 | #include <linux/dmapool.h> | ||
20 | |||
21 | enum { | ||
22 | /* HW Responsibilities */ | ||
23 | NVM_RSP_L2P = 1 << 0, | ||
24 | NVM_RSP_ECC = 1 << 1, | ||
25 | |||
26 | /* Physical Adressing Mode */ | ||
27 | NVM_ADDRMODE_LINEAR = 0, | ||
28 | NVM_ADDRMODE_CHANNEL = 1, | ||
29 | |||
30 | /* Plane programming mode for LUN */ | ||
31 | NVM_PLANE_SINGLE = 0, | ||
32 | NVM_PLANE_DOUBLE = 1, | ||
33 | NVM_PLANE_QUAD = 2, | ||
34 | |||
35 | /* Status codes */ | ||
36 | NVM_RSP_SUCCESS = 0x0, | ||
37 | NVM_RSP_NOT_CHANGEABLE = 0x1, | ||
38 | NVM_RSP_ERR_FAILWRITE = 0x40ff, | ||
39 | NVM_RSP_ERR_EMPTYPAGE = 0x42ff, | ||
40 | |||
41 | /* Device opcodes */ | ||
42 | NVM_OP_HBREAD = 0x02, | ||
43 | NVM_OP_HBWRITE = 0x81, | ||
44 | NVM_OP_PWRITE = 0x91, | ||
45 | NVM_OP_PREAD = 0x92, | ||
46 | NVM_OP_ERASE = 0x90, | ||
47 | |||
48 | /* PPA Command Flags */ | ||
49 | NVM_IO_SNGL_ACCESS = 0x0, | ||
50 | NVM_IO_DUAL_ACCESS = 0x1, | ||
51 | NVM_IO_QUAD_ACCESS = 0x2, | ||
52 | |||
53 | NVM_IO_SUSPEND = 0x80, | ||
54 | NVM_IO_SLC_MODE = 0x100, | ||
55 | NVM_IO_SCRAMBLE_DISABLE = 0x200, | ||
56 | }; | ||
57 | |||
58 | struct nvm_id_group { | ||
59 | u8 mtype; | ||
60 | u8 fmtype; | ||
61 | u16 res16; | ||
62 | u8 num_ch; | ||
63 | u8 num_lun; | ||
64 | u8 num_pln; | ||
65 | u16 num_blk; | ||
66 | u16 num_pg; | ||
67 | u16 fpg_sz; | ||
68 | u16 csecs; | ||
69 | u16 sos; | ||
70 | u32 trdt; | ||
71 | u32 trdm; | ||
72 | u32 tprt; | ||
73 | u32 tprm; | ||
74 | u32 tbet; | ||
75 | u32 tbem; | ||
76 | u32 mpos; | ||
77 | u16 cpar; | ||
78 | u8 res[913]; | ||
79 | } __packed; | ||
80 | |||
81 | struct nvm_addr_format { | ||
82 | u8 ch_offset; | ||
83 | u8 ch_len; | ||
84 | u8 lun_offset; | ||
85 | u8 lun_len; | ||
86 | u8 pln_offset; | ||
87 | u8 pln_len; | ||
88 | u8 blk_offset; | ||
89 | u8 blk_len; | ||
90 | u8 pg_offset; | ||
91 | u8 pg_len; | ||
92 | u8 sect_offset; | ||
93 | u8 sect_len; | ||
94 | u8 res[4]; | ||
95 | }; | ||
96 | |||
97 | struct nvm_id { | ||
98 | u8 ver_id; | ||
99 | u8 vmnt; | ||
100 | u8 cgrps; | ||
101 | u8 res[5]; | ||
102 | u32 cap; | ||
103 | u32 dom; | ||
104 | struct nvm_addr_format ppaf; | ||
105 | u8 ppat; | ||
106 | u8 resv[224]; | ||
107 | struct nvm_id_group groups[4]; | ||
108 | } __packed; | ||
109 | |||
110 | struct nvm_target { | ||
111 | struct list_head list; | ||
112 | struct nvm_tgt_type *type; | ||
113 | struct gendisk *disk; | ||
114 | }; | ||
115 | |||
116 | struct nvm_tgt_instance { | ||
117 | struct nvm_tgt_type *tt; | ||
118 | }; | ||
119 | |||
120 | #define ADDR_EMPTY (~0ULL) | ||
121 | |||
122 | #define NVM_VERSION_MAJOR 1 | ||
123 | #define NVM_VERSION_MINOR 0 | ||
124 | #define NVM_VERSION_PATCH 0 | ||
125 | |||
126 | #define NVM_SEC_BITS (8) | ||
127 | #define NVM_PL_BITS (6) | ||
128 | #define NVM_PG_BITS (16) | ||
129 | #define NVM_BLK_BITS (16) | ||
130 | #define NVM_LUN_BITS (10) | ||
131 | #define NVM_CH_BITS (8) | ||
132 | |||
133 | struct ppa_addr { | ||
134 | union { | ||
135 | /* Channel-based PPA format in nand 4x2x2x2x8x10 */ | ||
136 | struct { | ||
137 | u64 ch : 4; | ||
138 | u64 sec : 2; /* 4 sectors per page */ | ||
139 | u64 pl : 2; /* 4 planes per LUN */ | ||
140 | u64 lun : 2; /* 4 LUNs per channel */ | ||
141 | u64 pg : 8; /* 256 pages per block */ | ||
142 | u64 blk : 10;/* 1024 blocks per plane */ | ||
143 | u64 resved : 36; | ||
144 | } chnl; | ||
145 | |||
146 | /* Generic structure for all addresses */ | ||
147 | struct { | ||
148 | u64 sec : NVM_SEC_BITS; | ||
149 | u64 pl : NVM_PL_BITS; | ||
150 | u64 pg : NVM_PG_BITS; | ||
151 | u64 blk : NVM_BLK_BITS; | ||
152 | u64 lun : NVM_LUN_BITS; | ||
153 | u64 ch : NVM_CH_BITS; | ||
154 | } g; | ||
155 | |||
156 | u64 ppa; | ||
157 | }; | ||
158 | } __packed; | ||
159 | |||
160 | struct nvm_rq { | ||
161 | struct nvm_tgt_instance *ins; | ||
162 | struct nvm_dev *dev; | ||
163 | |||
164 | struct bio *bio; | ||
165 | |||
166 | union { | ||
167 | struct ppa_addr ppa_addr; | ||
168 | dma_addr_t dma_ppa_list; | ||
169 | }; | ||
170 | |||
171 | struct ppa_addr *ppa_list; | ||
172 | |||
173 | void *metadata; | ||
174 | dma_addr_t dma_metadata; | ||
175 | |||
176 | uint8_t opcode; | ||
177 | uint16_t nr_pages; | ||
178 | uint16_t flags; | ||
179 | }; | ||
180 | |||
181 | static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) | ||
182 | { | ||
183 | return pdu - sizeof(struct nvm_rq); | ||
184 | } | ||
185 | |||
186 | static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) | ||
187 | { | ||
188 | return rqdata + 1; | ||
189 | } | ||
190 | |||
191 | struct nvm_block; | ||
192 | |||
193 | typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); | ||
194 | typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *); | ||
195 | typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); | ||
196 | typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, | ||
197 | nvm_l2p_update_fn *, void *); | ||
198 | typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int, | ||
199 | nvm_bb_update_fn *, void *); | ||
200 | typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); | ||
201 | typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); | ||
202 | typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *); | ||
203 | typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *); | ||
204 | typedef void (nvm_destroy_dma_pool_fn)(void *); | ||
205 | typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t, | ||
206 | dma_addr_t *); | ||
207 | typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); | ||
208 | |||
209 | struct nvm_dev_ops { | ||
210 | nvm_id_fn *identity; | ||
211 | nvm_get_l2p_tbl_fn *get_l2p_tbl; | ||
212 | nvm_op_bb_tbl_fn *get_bb_tbl; | ||
213 | nvm_op_set_bb_fn *set_bb; | ||
214 | |||
215 | nvm_submit_io_fn *submit_io; | ||
216 | nvm_erase_blk_fn *erase_block; | ||
217 | |||
218 | nvm_create_dma_pool_fn *create_dma_pool; | ||
219 | nvm_destroy_dma_pool_fn *destroy_dma_pool; | ||
220 | nvm_dev_dma_alloc_fn *dev_dma_alloc; | ||
221 | nvm_dev_dma_free_fn *dev_dma_free; | ||
222 | |||
223 | uint8_t max_phys_sect; | ||
224 | }; | ||
225 | |||
226 | struct nvm_lun { | ||
227 | int id; | ||
228 | |||
229 | int lun_id; | ||
230 | int chnl_id; | ||
231 | |||
232 | unsigned int nr_free_blocks; /* Number of unused blocks */ | ||
233 | struct nvm_block *blocks; | ||
234 | |||
235 | spinlock_t lock; | ||
236 | }; | ||
237 | |||
238 | struct nvm_block { | ||
239 | struct list_head list; | ||
240 | struct nvm_lun *lun; | ||
241 | unsigned long id; | ||
242 | |||
243 | void *priv; | ||
244 | int type; | ||
245 | }; | ||
246 | |||
247 | struct nvm_dev { | ||
248 | struct nvm_dev_ops *ops; | ||
249 | |||
250 | struct list_head devices; | ||
251 | struct list_head online_targets; | ||
252 | |||
253 | /* Media manager */ | ||
254 | struct nvmm_type *mt; | ||
255 | void *mp; | ||
256 | |||
257 | /* Device information */ | ||
258 | int nr_chnls; | ||
259 | int nr_planes; | ||
260 | int luns_per_chnl; | ||
261 | int sec_per_pg; /* only sectors for a single page */ | ||
262 | int pgs_per_blk; | ||
263 | int blks_per_lun; | ||
264 | int sec_size; | ||
265 | int oob_size; | ||
266 | int addr_mode; | ||
267 | struct nvm_addr_format addr_format; | ||
268 | |||
269 | /* Calculated/Cached values. These do not reflect the actual usable | ||
270 | * blocks at run-time. | ||
271 | */ | ||
272 | int max_rq_size; | ||
273 | int plane_mode; /* drive device in single, double or quad mode */ | ||
274 | |||
275 | int sec_per_pl; /* all sectors across planes */ | ||
276 | int sec_per_blk; | ||
277 | int sec_per_lun; | ||
278 | |||
279 | unsigned long total_pages; | ||
280 | unsigned long total_blocks; | ||
281 | int nr_luns; | ||
282 | unsigned max_pages_per_blk; | ||
283 | |||
284 | void *ppalist_pool; | ||
285 | |||
286 | struct nvm_id identity; | ||
287 | |||
288 | /* Backend device */ | ||
289 | struct request_queue *q; | ||
290 | char name[DISK_NAME_LEN]; | ||
291 | }; | ||
292 | |||
293 | /* fallback conversion */ | ||
294 | static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev, | ||
295 | struct ppa_addr r) | ||
296 | { | ||
297 | struct ppa_addr l; | ||
298 | |||
299 | l.ppa = r.g.sec + | ||
300 | r.g.pg * dev->sec_per_pg + | ||
301 | r.g.blk * (dev->pgs_per_blk * | ||
302 | dev->sec_per_pg) + | ||
303 | r.g.lun * (dev->blks_per_lun * | ||
304 | dev->pgs_per_blk * | ||
305 | dev->sec_per_pg) + | ||
306 | r.g.ch * (dev->blks_per_lun * | ||
307 | dev->pgs_per_blk * | ||
308 | dev->luns_per_chnl * | ||
309 | dev->sec_per_pg); | ||
310 | |||
311 | return l; | ||
312 | } | ||
313 | |||
314 | /* fallback conversion */ | ||
315 | static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev, | ||
316 | struct ppa_addr r) | ||
317 | { | ||
318 | struct ppa_addr l; | ||
319 | int secs, pgs, blks, luns; | ||
320 | sector_t ppa = r.ppa; | ||
321 | |||
322 | l.ppa = 0; | ||
323 | |||
324 | div_u64_rem(ppa, dev->sec_per_pg, &secs); | ||
325 | l.g.sec = secs; | ||
326 | |||
327 | sector_div(ppa, dev->sec_per_pg); | ||
328 | div_u64_rem(ppa, dev->sec_per_blk, &pgs); | ||
329 | l.g.pg = pgs; | ||
330 | |||
331 | sector_div(ppa, dev->pgs_per_blk); | ||
332 | div_u64_rem(ppa, dev->blks_per_lun, &blks); | ||
333 | l.g.blk = blks; | ||
334 | |||
335 | sector_div(ppa, dev->blks_per_lun); | ||
336 | div_u64_rem(ppa, dev->luns_per_chnl, &luns); | ||
337 | l.g.lun = luns; | ||
338 | |||
339 | sector_div(ppa, dev->luns_per_chnl); | ||
340 | l.g.ch = ppa; | ||
341 | |||
342 | return l; | ||
343 | } | ||
344 | |||
345 | static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r) | ||
346 | { | ||
347 | struct ppa_addr l; | ||
348 | |||
349 | l.ppa = 0; | ||
350 | |||
351 | l.chnl.sec = r.g.sec; | ||
352 | l.chnl.pl = r.g.pl; | ||
353 | l.chnl.pg = r.g.pg; | ||
354 | l.chnl.blk = r.g.blk; | ||
355 | l.chnl.lun = r.g.lun; | ||
356 | l.chnl.ch = r.g.ch; | ||
357 | |||
358 | return l; | ||
359 | } | ||
360 | |||
361 | static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r) | ||
362 | { | ||
363 | struct ppa_addr l; | ||
364 | |||
365 | l.ppa = 0; | ||
366 | |||
367 | l.g.sec = r.chnl.sec; | ||
368 | l.g.pl = r.chnl.pl; | ||
369 | l.g.pg = r.chnl.pg; | ||
370 | l.g.blk = r.chnl.blk; | ||
371 | l.g.lun = r.chnl.lun; | ||
372 | l.g.ch = r.chnl.ch; | ||
373 | |||
374 | return l; | ||
375 | } | ||
376 | |||
377 | static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev, | ||
378 | struct ppa_addr gppa) | ||
379 | { | ||
380 | switch (dev->addr_mode) { | ||
381 | case NVM_ADDRMODE_LINEAR: | ||
382 | return __linear_to_generic_addr(dev, gppa); | ||
383 | case NVM_ADDRMODE_CHANNEL: | ||
384 | return __chnl_to_generic_addr(gppa); | ||
385 | default: | ||
386 | BUG(); | ||
387 | } | ||
388 | return gppa; | ||
389 | } | ||
390 | |||
391 | static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev, | ||
392 | struct ppa_addr gppa) | ||
393 | { | ||
394 | switch (dev->addr_mode) { | ||
395 | case NVM_ADDRMODE_LINEAR: | ||
396 | return __generic_to_linear_addr(dev, gppa); | ||
397 | case NVM_ADDRMODE_CHANNEL: | ||
398 | return __generic_to_chnl_addr(gppa); | ||
399 | default: | ||
400 | BUG(); | ||
401 | } | ||
402 | return gppa; | ||
403 | } | ||
404 | |||
405 | static inline int ppa_empty(struct ppa_addr ppa_addr) | ||
406 | { | ||
407 | return (ppa_addr.ppa == ADDR_EMPTY); | ||
408 | } | ||
409 | |||
410 | static inline void ppa_set_empty(struct ppa_addr *ppa_addr) | ||
411 | { | ||
412 | ppa_addr->ppa = ADDR_EMPTY; | ||
413 | } | ||
414 | |||
415 | static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev, | ||
416 | struct nvm_block *blk) | ||
417 | { | ||
418 | struct ppa_addr ppa; | ||
419 | struct nvm_lun *lun = blk->lun; | ||
420 | |||
421 | ppa.ppa = 0; | ||
422 | ppa.g.blk = blk->id % dev->blks_per_lun; | ||
423 | ppa.g.lun = lun->lun_id; | ||
424 | ppa.g.ch = lun->chnl_id; | ||
425 | |||
426 | return ppa; | ||
427 | } | ||
428 | |||
429 | typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); | ||
430 | typedef sector_t (nvm_tgt_capacity_fn)(void *); | ||
431 | typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int); | ||
432 | typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int); | ||
433 | typedef void (nvm_tgt_exit_fn)(void *); | ||
434 | |||
435 | struct nvm_tgt_type { | ||
436 | const char *name; | ||
437 | unsigned int version[3]; | ||
438 | |||
439 | /* target entry points */ | ||
440 | nvm_tgt_make_rq_fn *make_rq; | ||
441 | nvm_tgt_capacity_fn *capacity; | ||
442 | nvm_tgt_end_io_fn *end_io; | ||
443 | |||
444 | /* module-specific init/teardown */ | ||
445 | nvm_tgt_init_fn *init; | ||
446 | nvm_tgt_exit_fn *exit; | ||
447 | |||
448 | /* For internal use */ | ||
449 | struct list_head list; | ||
450 | }; | ||
451 | |||
452 | extern int nvm_register_target(struct nvm_tgt_type *); | ||
453 | extern void nvm_unregister_target(struct nvm_tgt_type *); | ||
454 | |||
455 | extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *); | ||
456 | extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); | ||
457 | |||
458 | typedef int (nvmm_register_fn)(struct nvm_dev *); | ||
459 | typedef void (nvmm_unregister_fn)(struct nvm_dev *); | ||
460 | typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *, | ||
461 | struct nvm_lun *, unsigned long); | ||
462 | typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *); | ||
463 | typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *); | ||
464 | typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *); | ||
465 | typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *); | ||
466 | typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); | ||
467 | typedef int (nvmm_end_io_fn)(struct nvm_rq *, int); | ||
468 | typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, | ||
469 | unsigned long); | ||
470 | typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); | ||
471 | typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *); | ||
472 | |||
473 | struct nvmm_type { | ||
474 | const char *name; | ||
475 | unsigned int version[3]; | ||
476 | |||
477 | nvmm_register_fn *register_mgr; | ||
478 | nvmm_unregister_fn *unregister_mgr; | ||
479 | |||
480 | /* Block administration callbacks */ | ||
481 | nvmm_get_blk_fn *get_blk; | ||
482 | nvmm_put_blk_fn *put_blk; | ||
483 | nvmm_open_blk_fn *open_blk; | ||
484 | nvmm_close_blk_fn *close_blk; | ||
485 | nvmm_flush_blk_fn *flush_blk; | ||
486 | |||
487 | nvmm_submit_io_fn *submit_io; | ||
488 | nvmm_end_io_fn *end_io; | ||
489 | nvmm_erase_blk_fn *erase_blk; | ||
490 | |||
491 | /* Configuration management */ | ||
492 | nvmm_get_lun_fn *get_lun; | ||
493 | |||
494 | /* Statistics */ | ||
495 | nvmm_free_blocks_print_fn *free_blocks_print; | ||
496 | struct list_head list; | ||
497 | }; | ||
498 | |||
499 | extern int nvm_register_mgr(struct nvmm_type *); | ||
500 | extern void nvm_unregister_mgr(struct nvmm_type *); | ||
501 | |||
502 | extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *, | ||
503 | unsigned long); | ||
504 | extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *); | ||
505 | |||
506 | extern int nvm_register(struct request_queue *, char *, | ||
507 | struct nvm_dev_ops *); | ||
508 | extern void nvm_unregister(char *); | ||
509 | |||
510 | extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); | ||
511 | extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); | ||
512 | #else /* CONFIG_NVM */ | ||
513 | struct nvm_dev_ops; | ||
514 | |||
515 | static inline int nvm_register(struct request_queue *q, char *disk_name, | ||
516 | struct nvm_dev_ops *ops) | ||
517 | { | ||
518 | return -EINVAL; | ||
519 | } | ||
520 | static inline void nvm_unregister(char *disk_name) {} | ||
521 | #endif /* CONFIG_NVM */ | ||
522 | #endif /* LIGHTNVM.H */ | ||
diff --git a/include/linux/list.h b/include/linux/list.h index 3e3e64a61002..993395a2e55c 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -87,7 +87,7 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head) | |||
87 | static inline void __list_del(struct list_head * prev, struct list_head * next) | 87 | static inline void __list_del(struct list_head * prev, struct list_head * next) |
88 | { | 88 | { |
89 | next->prev = prev; | 89 | next->prev = prev; |
90 | prev->next = next; | 90 | WRITE_ONCE(prev->next, next); |
91 | } | 91 | } |
92 | 92 | ||
93 | /** | 93 | /** |
@@ -615,7 +615,8 @@ static inline void __hlist_del(struct hlist_node *n) | |||
615 | { | 615 | { |
616 | struct hlist_node *next = n->next; | 616 | struct hlist_node *next = n->next; |
617 | struct hlist_node **pprev = n->pprev; | 617 | struct hlist_node **pprev = n->pprev; |
618 | *pprev = next; | 618 | |
619 | WRITE_ONCE(*pprev, next); | ||
619 | if (next) | 620 | if (next) |
620 | next->pprev = pprev; | 621 | next->pprev = pprev; |
621 | } | 622 | } |
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index 2eb88556c5c5..8132214e8efd 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h | |||
@@ -93,9 +93,10 @@ static inline void __hlist_bl_del(struct hlist_bl_node *n) | |||
93 | LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); | 93 | LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); |
94 | 94 | ||
95 | /* pprev may be `first`, so be careful not to lose the lock bit */ | 95 | /* pprev may be `first`, so be careful not to lose the lock bit */ |
96 | *pprev = (struct hlist_bl_node *) | 96 | WRITE_ONCE(*pprev, |
97 | (struct hlist_bl_node *) | ||
97 | ((unsigned long)next | | 98 | ((unsigned long)next | |
98 | ((unsigned long)*pprev & LIST_BL_LOCKMASK)); | 99 | ((unsigned long)*pprev & LIST_BL_LOCKMASK))); |
99 | if (next) | 100 | if (next) |
100 | next->pprev = pprev; | 101 | next->pprev = pprev; |
101 | } | 102 | } |
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h index f266661d2666..444d2b1313bd 100644 --- a/include/linux/list_nulls.h +++ b/include/linux/list_nulls.h | |||
@@ -76,7 +76,8 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n) | |||
76 | { | 76 | { |
77 | struct hlist_nulls_node *next = n->next; | 77 | struct hlist_nulls_node *next = n->next; |
78 | struct hlist_nulls_node **pprev = n->pprev; | 78 | struct hlist_nulls_node **pprev = n->pprev; |
79 | *pprev = next; | 79 | |
80 | WRITE_ONCE(*pprev, next); | ||
80 | if (!is_a_nulls(next)) | 81 | if (!is_a_nulls(next)) |
81 | next->pprev = pprev; | 82 | next->pprev = pprev; |
82 | } | 83 | } |
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index ff82a32871b5..c15373894a42 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h | |||
@@ -68,6 +68,7 @@ struct nlm_host { | |||
68 | struct nsm_handle *h_nsmhandle; /* NSM status handle */ | 68 | struct nsm_handle *h_nsmhandle; /* NSM status handle */ |
69 | char *h_addrbuf; /* address eyecatcher */ | 69 | char *h_addrbuf; /* address eyecatcher */ |
70 | struct net *net; /* host net */ | 70 | struct net *net; /* host net */ |
71 | char nodename[UNX_MAXNODENAME + 1]; | ||
71 | }; | 72 | }; |
72 | 73 | ||
73 | /* | 74 | /* |
@@ -235,7 +236,8 @@ void nlm_rebind_host(struct nlm_host *); | |||
235 | struct nlm_host * nlm_get_host(struct nlm_host *); | 236 | struct nlm_host * nlm_get_host(struct nlm_host *); |
236 | void nlm_shutdown_hosts(void); | 237 | void nlm_shutdown_hosts(void); |
237 | void nlm_shutdown_hosts_net(struct net *net); | 238 | void nlm_shutdown_hosts_net(struct net *net); |
238 | void nlm_host_rebooted(const struct nlm_reboot *); | 239 | void nlm_host_rebooted(const struct net *net, |
240 | const struct nlm_reboot *); | ||
239 | 241 | ||
240 | /* | 242 | /* |
241 | * Host monitoring | 243 | * Host monitoring |
@@ -243,11 +245,13 @@ void nlm_host_rebooted(const struct nlm_reboot *); | |||
243 | int nsm_monitor(const struct nlm_host *host); | 245 | int nsm_monitor(const struct nlm_host *host); |
244 | void nsm_unmonitor(const struct nlm_host *host); | 246 | void nsm_unmonitor(const struct nlm_host *host); |
245 | 247 | ||
246 | struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, | 248 | struct nsm_handle *nsm_get_handle(const struct net *net, |
249 | const struct sockaddr *sap, | ||
247 | const size_t salen, | 250 | const size_t salen, |
248 | const char *hostname, | 251 | const char *hostname, |
249 | const size_t hostname_len); | 252 | const size_t hostname_len); |
250 | struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info); | 253 | struct nsm_handle *nsm_reboot_lookup(const struct net *net, |
254 | const struct nlm_reboot *info); | ||
251 | void nsm_release(struct nsm_handle *nsm); | 255 | void nsm_release(struct nsm_handle *nsm); |
252 | 256 | ||
253 | /* | 257 | /* |
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index e6982ac3200d..a57f0dfb6db7 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 | 16 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 |
17 | #define MARVELL_PHY_ID_88E1116R 0x01410e40 | 17 | #define MARVELL_PHY_ID_88E1116R 0x01410e40 |
18 | #define MARVELL_PHY_ID_88E1510 0x01410dd0 | 18 | #define MARVELL_PHY_ID_88E1510 0x01410dd0 |
19 | #define MARVELL_PHY_ID_88E1540 0x01410eb0 | ||
19 | #define MARVELL_PHY_ID_88E3016 0x01410e60 | 20 | #define MARVELL_PHY_ID_88E3016 0x01410e60 |
20 | 21 | ||
21 | /* struct phy_device dev_flags definitions */ | 22 | /* struct phy_device dev_flags definitions */ |
diff --git a/include/linux/math64.h b/include/linux/math64.h index c45c089bfdac..6e8b5b270ffe 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h | |||
@@ -142,6 +142,13 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) | |||
142 | } | 142 | } |
143 | #endif /* mul_u64_u32_shr */ | 143 | #endif /* mul_u64_u32_shr */ |
144 | 144 | ||
145 | #ifndef mul_u64_u64_shr | ||
146 | static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) | ||
147 | { | ||
148 | return (u64)(((unsigned __int128)a * mul) >> shift); | ||
149 | } | ||
150 | #endif /* mul_u64_u64_shr */ | ||
151 | |||
145 | #else | 152 | #else |
146 | 153 | ||
147 | #ifndef mul_u64_u32_shr | 154 | #ifndef mul_u64_u32_shr |
@@ -161,6 +168,79 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) | |||
161 | } | 168 | } |
162 | #endif /* mul_u64_u32_shr */ | 169 | #endif /* mul_u64_u32_shr */ |
163 | 170 | ||
171 | #ifndef mul_u64_u64_shr | ||
172 | static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift) | ||
173 | { | ||
174 | union { | ||
175 | u64 ll; | ||
176 | struct { | ||
177 | #ifdef __BIG_ENDIAN | ||
178 | u32 high, low; | ||
179 | #else | ||
180 | u32 low, high; | ||
181 | #endif | ||
182 | } l; | ||
183 | } rl, rm, rn, rh, a0, b0; | ||
184 | u64 c; | ||
185 | |||
186 | a0.ll = a; | ||
187 | b0.ll = b; | ||
188 | |||
189 | rl.ll = (u64)a0.l.low * b0.l.low; | ||
190 | rm.ll = (u64)a0.l.low * b0.l.high; | ||
191 | rn.ll = (u64)a0.l.high * b0.l.low; | ||
192 | rh.ll = (u64)a0.l.high * b0.l.high; | ||
193 | |||
194 | /* | ||
195 | * Each of these lines computes a 64-bit intermediate result into "c", | ||
196 | * starting at bits 32-95. The low 32-bits go into the result of the | ||
197 | * multiplication, the high 32-bits are carried into the next step. | ||
198 | */ | ||
199 | rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low; | ||
200 | rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low; | ||
201 | rh.l.high = (c >> 32) + rh.l.high; | ||
202 | |||
203 | /* | ||
204 | * The 128-bit result of the multiplication is in rl.ll and rh.ll, | ||
205 | * shift it right and throw away the high part of the result. | ||
206 | */ | ||
207 | if (shift == 0) | ||
208 | return rl.ll; | ||
209 | if (shift < 64) | ||
210 | return (rl.ll >> shift) | (rh.ll << (64 - shift)); | ||
211 | return rh.ll >> (shift & 63); | ||
212 | } | ||
213 | #endif /* mul_u64_u64_shr */ | ||
214 | |||
164 | #endif | 215 | #endif |
165 | 216 | ||
217 | #ifndef mul_u64_u32_div | ||
218 | static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) | ||
219 | { | ||
220 | union { | ||
221 | u64 ll; | ||
222 | struct { | ||
223 | #ifdef __BIG_ENDIAN | ||
224 | u32 high, low; | ||
225 | #else | ||
226 | u32 low, high; | ||
227 | #endif | ||
228 | } l; | ||
229 | } u, rl, rh; | ||
230 | |||
231 | u.ll = a; | ||
232 | rl.ll = (u64)u.l.low * mul; | ||
233 | rh.ll = (u64)u.l.high * mul + rl.l.high; | ||
234 | |||
235 | /* Bits 32-63 of the result will be in rh.l.low. */ | ||
236 | rl.l.high = do_div(rh.ll, divisor); | ||
237 | |||
238 | /* Bits 0-31 of the result will be in rl.l.low. */ | ||
239 | do_div(rl.ll, divisor); | ||
240 | |||
241 | rl.l.high = rh.l.low; | ||
242 | return rl.ll; | ||
243 | } | ||
244 | #endif /* mul_u64_u32_div */ | ||
245 | |||
166 | #endif /* _LINUX_MATH64_H */ | 246 | #endif /* _LINUX_MATH64_H */ |
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index 0962b2ca628a..e746919530f5 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h | |||
@@ -8,8 +8,8 @@ | |||
8 | struct mei_cl_device; | 8 | struct mei_cl_device; |
9 | struct mei_device; | 9 | struct mei_device; |
10 | 10 | ||
11 | typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device, | 11 | typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev, |
12 | u32 events, void *context); | 12 | u32 events, void *context); |
13 | 13 | ||
14 | /** | 14 | /** |
15 | * struct mei_cl_device - MEI device handle | 15 | * struct mei_cl_device - MEI device handle |
@@ -45,7 +45,7 @@ struct mei_cl_device { | |||
45 | char name[MEI_CL_NAME_SIZE]; | 45 | char name[MEI_CL_NAME_SIZE]; |
46 | 46 | ||
47 | struct work_struct event_work; | 47 | struct work_struct event_work; |
48 | mei_cl_event_cb_t event_cb; | 48 | mei_cldev_event_cb_t event_cb; |
49 | void *event_context; | 49 | void *event_context; |
50 | unsigned long events_mask; | 50 | unsigned long events_mask; |
51 | unsigned long events; | 51 | unsigned long events; |
@@ -62,33 +62,37 @@ struct mei_cl_driver { | |||
62 | 62 | ||
63 | const struct mei_cl_device_id *id_table; | 63 | const struct mei_cl_device_id *id_table; |
64 | 64 | ||
65 | int (*probe)(struct mei_cl_device *dev, | 65 | int (*probe)(struct mei_cl_device *cldev, |
66 | const struct mei_cl_device_id *id); | 66 | const struct mei_cl_device_id *id); |
67 | int (*remove)(struct mei_cl_device *dev); | 67 | int (*remove)(struct mei_cl_device *cldev); |
68 | }; | 68 | }; |
69 | 69 | ||
70 | int __mei_cl_driver_register(struct mei_cl_driver *driver, | 70 | int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, |
71 | struct module *owner); | 71 | struct module *owner); |
72 | #define mei_cl_driver_register(driver) \ | 72 | #define mei_cldev_driver_register(cldrv) \ |
73 | __mei_cl_driver_register(driver, THIS_MODULE) | 73 | __mei_cldev_driver_register(cldrv, THIS_MODULE) |
74 | 74 | ||
75 | void mei_cl_driver_unregister(struct mei_cl_driver *driver); | 75 | void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); |
76 | 76 | ||
77 | ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length); | 77 | ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); |
78 | ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length); | 78 | ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); |
79 | 79 | ||
80 | int mei_cl_register_event_cb(struct mei_cl_device *device, | 80 | int mei_cldev_register_event_cb(struct mei_cl_device *cldev, |
81 | unsigned long event_mask, | 81 | unsigned long event_mask, |
82 | mei_cl_event_cb_t read_cb, void *context); | 82 | mei_cldev_event_cb_t read_cb, void *context); |
83 | 83 | ||
84 | #define MEI_CL_EVENT_RX 0 | 84 | #define MEI_CL_EVENT_RX 0 |
85 | #define MEI_CL_EVENT_TX 1 | 85 | #define MEI_CL_EVENT_TX 1 |
86 | #define MEI_CL_EVENT_NOTIF 2 | 86 | #define MEI_CL_EVENT_NOTIF 2 |
87 | 87 | ||
88 | void *mei_cl_get_drvdata(const struct mei_cl_device *device); | 88 | const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev); |
89 | void mei_cl_set_drvdata(struct mei_cl_device *device, void *data); | 89 | u8 mei_cldev_ver(const struct mei_cl_device *cldev); |
90 | 90 | ||
91 | int mei_cl_enable_device(struct mei_cl_device *device); | 91 | void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev); |
92 | int mei_cl_disable_device(struct mei_cl_device *device); | 92 | void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data); |
93 | |||
94 | int mei_cldev_enable(struct mei_cl_device *cldev); | ||
95 | int mei_cldev_disable(struct mei_cl_device *cldev); | ||
96 | bool mei_cldev_enabled(struct mei_cl_device *cldev); | ||
93 | 97 | ||
94 | #endif /* _LINUX_MEI_CL_BUS_H */ | 98 | #endif /* _LINUX_MEI_CL_BUS_H */ |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index c518eb589260..24daf8fc4d7c 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
@@ -89,10 +89,6 @@ int memblock_add_range(struct memblock_type *type, | |||
89 | phys_addr_t base, phys_addr_t size, | 89 | phys_addr_t base, phys_addr_t size, |
90 | int nid, unsigned long flags); | 90 | int nid, unsigned long flags); |
91 | 91 | ||
92 | int memblock_remove_range(struct memblock_type *type, | ||
93 | phys_addr_t base, | ||
94 | phys_addr_t size); | ||
95 | |||
96 | void __next_mem_range(u64 *idx, int nid, ulong flags, | 92 | void __next_mem_range(u64 *idx, int nid, ulong flags, |
97 | struct memblock_type *type_a, | 93 | struct memblock_type *type_a, |
98 | struct memblock_type *type_b, phys_addr_t *out_start, | 94 | struct memblock_type *type_b, phys_addr_t *out_start, |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ad800e62cb7a..cd0e2413c358 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -213,6 +213,9 @@ struct mem_cgroup { | |||
213 | /* OOM-Killer disable */ | 213 | /* OOM-Killer disable */ |
214 | int oom_kill_disable; | 214 | int oom_kill_disable; |
215 | 215 | ||
216 | /* handle for "memory.events" */ | ||
217 | struct cgroup_file events_file; | ||
218 | |||
216 | /* protect arrays of thresholds */ | 219 | /* protect arrays of thresholds */ |
217 | struct mutex thresholds_lock; | 220 | struct mutex thresholds_lock; |
218 | 221 | ||
@@ -242,7 +245,6 @@ struct mem_cgroup { | |||
242 | * percpu counter. | 245 | * percpu counter. |
243 | */ | 246 | */ |
244 | struct mem_cgroup_stat_cpu __percpu *stat; | 247 | struct mem_cgroup_stat_cpu __percpu *stat; |
245 | spinlock_t pcp_counter_lock; | ||
246 | 248 | ||
247 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) | 249 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) |
248 | struct cg_proto tcp_mem; | 250 | struct cg_proto tcp_mem; |
@@ -286,6 +288,7 @@ static inline void mem_cgroup_events(struct mem_cgroup *memcg, | |||
286 | unsigned int nr) | 288 | unsigned int nr) |
287 | { | 289 | { |
288 | this_cpu_add(memcg->stat->events[idx], nr); | 290 | this_cpu_add(memcg->stat->events[idx], nr); |
291 | cgroup_file_notify(&memcg->events_file); | ||
289 | } | 292 | } |
290 | 293 | ||
291 | bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); | 294 | bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); |
@@ -298,8 +301,7 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); | |||
298 | void mem_cgroup_uncharge(struct page *page); | 301 | void mem_cgroup_uncharge(struct page *page); |
299 | void mem_cgroup_uncharge_list(struct list_head *page_list); | 302 | void mem_cgroup_uncharge_list(struct list_head *page_list); |
300 | 303 | ||
301 | void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, | 304 | void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage); |
302 | bool lrucare); | ||
303 | 305 | ||
304 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); | 306 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); |
305 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); | 307 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); |
@@ -347,9 +349,7 @@ ino_t page_cgroup_ino(struct page *page); | |||
347 | 349 | ||
348 | static inline bool mem_cgroup_disabled(void) | 350 | static inline bool mem_cgroup_disabled(void) |
349 | { | 351 | { |
350 | if (memory_cgrp_subsys.disabled) | 352 | return !cgroup_subsys_enabled(memory_cgrp_subsys); |
351 | return true; | ||
352 | return false; | ||
353 | } | 353 | } |
354 | 354 | ||
355 | /* | 355 | /* |
@@ -383,7 +383,7 @@ unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) | |||
383 | return mz->lru_size[lru]; | 383 | return mz->lru_size[lru]; |
384 | } | 384 | } |
385 | 385 | ||
386 | static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) | 386 | static inline bool mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) |
387 | { | 387 | { |
388 | unsigned long inactive_ratio; | 388 | unsigned long inactive_ratio; |
389 | unsigned long inactive; | 389 | unsigned long inactive; |
@@ -402,24 +402,26 @@ static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) | |||
402 | return inactive * inactive_ratio < active; | 402 | return inactive * inactive_ratio < active; |
403 | } | 403 | } |
404 | 404 | ||
405 | void mem_cgroup_handle_over_high(void); | ||
406 | |||
405 | void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, | 407 | void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, |
406 | struct task_struct *p); | 408 | struct task_struct *p); |
407 | 409 | ||
408 | static inline void mem_cgroup_oom_enable(void) | 410 | static inline void mem_cgroup_oom_enable(void) |
409 | { | 411 | { |
410 | WARN_ON(current->memcg_oom.may_oom); | 412 | WARN_ON(current->memcg_may_oom); |
411 | current->memcg_oom.may_oom = 1; | 413 | current->memcg_may_oom = 1; |
412 | } | 414 | } |
413 | 415 | ||
414 | static inline void mem_cgroup_oom_disable(void) | 416 | static inline void mem_cgroup_oom_disable(void) |
415 | { | 417 | { |
416 | WARN_ON(!current->memcg_oom.may_oom); | 418 | WARN_ON(!current->memcg_may_oom); |
417 | current->memcg_oom.may_oom = 0; | 419 | current->memcg_may_oom = 0; |
418 | } | 420 | } |
419 | 421 | ||
420 | static inline bool task_in_memcg_oom(struct task_struct *p) | 422 | static inline bool task_in_memcg_oom(struct task_struct *p) |
421 | { | 423 | { |
422 | return p->memcg_oom.memcg; | 424 | return p->memcg_in_oom; |
423 | } | 425 | } |
424 | 426 | ||
425 | bool mem_cgroup_oom_synchronize(bool wait); | 427 | bool mem_cgroup_oom_synchronize(bool wait); |
@@ -536,9 +538,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list) | |||
536 | { | 538 | { |
537 | } | 539 | } |
538 | 540 | ||
539 | static inline void mem_cgroup_migrate(struct page *oldpage, | 541 | static inline void mem_cgroup_replace_page(struct page *old, struct page *new) |
540 | struct page *newpage, | ||
541 | bool lrucare) | ||
542 | { | 542 | { |
543 | } | 543 | } |
544 | 544 | ||
@@ -584,10 +584,10 @@ static inline bool mem_cgroup_disabled(void) | |||
584 | return true; | 584 | return true; |
585 | } | 585 | } |
586 | 586 | ||
587 | static inline int | 587 | static inline bool |
588 | mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) | 588 | mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) |
589 | { | 589 | { |
590 | return 1; | 590 | return true; |
591 | } | 591 | } |
592 | 592 | ||
593 | static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) | 593 | static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) |
@@ -621,6 +621,10 @@ static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) | |||
621 | { | 621 | { |
622 | } | 622 | } |
623 | 623 | ||
624 | static inline void mem_cgroup_handle_over_high(void) | ||
625 | { | ||
626 | } | ||
627 | |||
624 | static inline void mem_cgroup_oom_enable(void) | 628 | static inline void mem_cgroup_oom_enable(void) |
625 | { | 629 | { |
626 | } | 630 | } |
@@ -677,8 +681,9 @@ enum { | |||
677 | 681 | ||
678 | struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); | 682 | struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); |
679 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); | 683 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); |
680 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, | 684 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, |
681 | unsigned long *pdirty, unsigned long *pwriteback); | 685 | unsigned long *pheadroom, unsigned long *pdirty, |
686 | unsigned long *pwriteback); | ||
682 | 687 | ||
683 | #else /* CONFIG_CGROUP_WRITEBACK */ | 688 | #else /* CONFIG_CGROUP_WRITEBACK */ |
684 | 689 | ||
@@ -688,7 +693,8 @@ static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) | |||
688 | } | 693 | } |
689 | 694 | ||
690 | static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, | 695 | static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, |
691 | unsigned long *pavail, | 696 | unsigned long *pfilepages, |
697 | unsigned long *pheadroom, | ||
692 | unsigned long *pdirty, | 698 | unsigned long *pdirty, |
693 | unsigned long *pwriteback) | 699 | unsigned long *pwriteback) |
694 | { | 700 | { |
@@ -745,11 +751,10 @@ static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) | |||
745 | * conditions, but because they are pretty simple, they are expected to be | 751 | * conditions, but because they are pretty simple, they are expected to be |
746 | * fast. | 752 | * fast. |
747 | */ | 753 | */ |
748 | bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, | 754 | int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, |
749 | int order); | 755 | struct mem_cgroup *memcg); |
750 | void __memcg_kmem_commit_charge(struct page *page, | 756 | int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); |
751 | struct mem_cgroup *memcg, int order); | 757 | void __memcg_kmem_uncharge(struct page *page, int order); |
752 | void __memcg_kmem_uncharge_pages(struct page *page, int order); | ||
753 | 758 | ||
754 | /* | 759 | /* |
755 | * helper for acessing a memcg's index. It will be used as an index in the | 760 | * helper for acessing a memcg's index. It will be used as an index in the |
@@ -764,77 +769,42 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) | |||
764 | struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); | 769 | struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); |
765 | void __memcg_kmem_put_cache(struct kmem_cache *cachep); | 770 | void __memcg_kmem_put_cache(struct kmem_cache *cachep); |
766 | 771 | ||
767 | struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr); | 772 | static inline bool __memcg_kmem_bypass(gfp_t gfp) |
768 | |||
769 | int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, | ||
770 | unsigned long nr_pages); | ||
771 | void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages); | ||
772 | |||
773 | /** | ||
774 | * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. | ||
775 | * @gfp: the gfp allocation flags. | ||
776 | * @memcg: a pointer to the memcg this was charged against. | ||
777 | * @order: allocation order. | ||
778 | * | ||
779 | * returns true if the memcg where the current task belongs can hold this | ||
780 | * allocation. | ||
781 | * | ||
782 | * We return true automatically if this allocation is not to be accounted to | ||
783 | * any memcg. | ||
784 | */ | ||
785 | static inline bool | ||
786 | memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) | ||
787 | { | 773 | { |
788 | if (!memcg_kmem_enabled()) | 774 | if (!memcg_kmem_enabled()) |
789 | return true; | 775 | return true; |
790 | |||
791 | if (gfp & __GFP_NOACCOUNT) | 776 | if (gfp & __GFP_NOACCOUNT) |
792 | return true; | 777 | return true; |
793 | /* | ||
794 | * __GFP_NOFAIL allocations will move on even if charging is not | ||
795 | * possible. Therefore we don't even try, and have this allocation | ||
796 | * unaccounted. We could in theory charge it forcibly, but we hope | ||
797 | * those allocations are rare, and won't be worth the trouble. | ||
798 | */ | ||
799 | if (gfp & __GFP_NOFAIL) | ||
800 | return true; | ||
801 | if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) | 778 | if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) |
802 | return true; | 779 | return true; |
803 | 780 | return false; | |
804 | /* If the test is dying, just let it go. */ | ||
805 | if (unlikely(fatal_signal_pending(current))) | ||
806 | return true; | ||
807 | |||
808 | return __memcg_kmem_newpage_charge(gfp, memcg, order); | ||
809 | } | 781 | } |
810 | 782 | ||
811 | /** | 783 | /** |
812 | * memcg_kmem_uncharge_pages: uncharge pages from memcg | 784 | * memcg_kmem_charge: charge a kmem page |
813 | * @page: pointer to struct page being freed | 785 | * @page: page to charge |
814 | * @order: allocation order. | 786 | * @gfp: reclaim mode |
787 | * @order: allocation order | ||
788 | * | ||
789 | * Returns 0 on success, an error code on failure. | ||
815 | */ | 790 | */ |
816 | static inline void | 791 | static __always_inline int memcg_kmem_charge(struct page *page, |
817 | memcg_kmem_uncharge_pages(struct page *page, int order) | 792 | gfp_t gfp, int order) |
818 | { | 793 | { |
819 | if (memcg_kmem_enabled()) | 794 | if (__memcg_kmem_bypass(gfp)) |
820 | __memcg_kmem_uncharge_pages(page, order); | 795 | return 0; |
796 | return __memcg_kmem_charge(page, gfp, order); | ||
821 | } | 797 | } |
822 | 798 | ||
823 | /** | 799 | /** |
824 | * memcg_kmem_commit_charge: embeds correct memcg in a page | 800 | * memcg_kmem_uncharge: uncharge a kmem page |
825 | * @page: pointer to struct page recently allocated | 801 | * @page: page to uncharge |
826 | * @memcg: the memcg structure we charged against | 802 | * @order: allocation order |
827 | * @order: allocation order. | ||
828 | * | ||
829 | * Needs to be called after memcg_kmem_newpage_charge, regardless of success or | ||
830 | * failure of the allocation. if @page is NULL, this function will revert the | ||
831 | * charges. Otherwise, it will commit @page to @memcg. | ||
832 | */ | 803 | */ |
833 | static inline void | 804 | static __always_inline void memcg_kmem_uncharge(struct page *page, int order) |
834 | memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) | ||
835 | { | 805 | { |
836 | if (memcg_kmem_enabled() && memcg) | 806 | if (memcg_kmem_enabled()) |
837 | __memcg_kmem_commit_charge(page, memcg, order); | 807 | __memcg_kmem_uncharge(page, order); |
838 | } | 808 | } |
839 | 809 | ||
840 | /** | 810 | /** |
@@ -847,17 +817,8 @@ memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) | |||
847 | static __always_inline struct kmem_cache * | 817 | static __always_inline struct kmem_cache * |
848 | memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | 818 | memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) |
849 | { | 819 | { |
850 | if (!memcg_kmem_enabled()) | 820 | if (__memcg_kmem_bypass(gfp)) |
851 | return cachep; | ||
852 | if (gfp & __GFP_NOACCOUNT) | ||
853 | return cachep; | ||
854 | if (gfp & __GFP_NOFAIL) | ||
855 | return cachep; | 821 | return cachep; |
856 | if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) | ||
857 | return cachep; | ||
858 | if (unlikely(fatal_signal_pending(current))) | ||
859 | return cachep; | ||
860 | |||
861 | return __memcg_kmem_get_cache(cachep); | 822 | return __memcg_kmem_get_cache(cachep); |
862 | } | 823 | } |
863 | 824 | ||
@@ -866,13 +827,6 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | |||
866 | if (memcg_kmem_enabled()) | 827 | if (memcg_kmem_enabled()) |
867 | __memcg_kmem_put_cache(cachep); | 828 | __memcg_kmem_put_cache(cachep); |
868 | } | 829 | } |
869 | |||
870 | static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) | ||
871 | { | ||
872 | if (!memcg_kmem_enabled()) | ||
873 | return NULL; | ||
874 | return __mem_cgroup_from_kmem(ptr); | ||
875 | } | ||
876 | #else | 830 | #else |
877 | #define for_each_memcg_cache_index(_idx) \ | 831 | #define for_each_memcg_cache_index(_idx) \ |
878 | for (; NULL; ) | 832 | for (; NULL; ) |
@@ -887,18 +841,12 @@ static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) | |||
887 | return false; | 841 | return false; |
888 | } | 842 | } |
889 | 843 | ||
890 | static inline bool | 844 | static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) |
891 | memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) | ||
892 | { | 845 | { |
893 | return true; | 846 | return 0; |
894 | } | 847 | } |
895 | 848 | ||
896 | static inline void memcg_kmem_uncharge_pages(struct page *page, int order) | 849 | static inline void memcg_kmem_uncharge(struct page *page, int order) |
897 | { | ||
898 | } | ||
899 | |||
900 | static inline void | ||
901 | memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) | ||
902 | { | 850 | { |
903 | } | 851 | } |
904 | 852 | ||
@@ -924,11 +872,5 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
924 | static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | 872 | static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) |
925 | { | 873 | { |
926 | } | 874 | } |
927 | |||
928 | static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) | ||
929 | { | ||
930 | return NULL; | ||
931 | } | ||
932 | #endif /* CONFIG_MEMCG_KMEM */ | 875 | #endif /* CONFIG_MEMCG_KMEM */ |
933 | #endif /* _LINUX_MEMCONTROL_H */ | 876 | #endif /* _LINUX_MEMCONTROL_H */ |
934 | |||
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 8f60e899b33c..2ea574ff9714 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
@@ -11,6 +11,7 @@ struct zone; | |||
11 | struct pglist_data; | 11 | struct pglist_data; |
12 | struct mem_section; | 12 | struct mem_section; |
13 | struct memory_block; | 13 | struct memory_block; |
14 | struct resource; | ||
14 | 15 | ||
15 | #ifdef CONFIG_MEMORY_HOTPLUG | 16 | #ifdef CONFIG_MEMORY_HOTPLUG |
16 | 17 | ||
@@ -266,6 +267,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {} | |||
266 | extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, | 267 | extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, |
267 | void *arg, int (*func)(struct memory_block *, void *)); | 268 | void *arg, int (*func)(struct memory_block *, void *)); |
268 | extern int add_memory(int nid, u64 start, u64 size); | 269 | extern int add_memory(int nid, u64 start, u64 size); |
270 | extern int add_memory_resource(int nid, struct resource *resource); | ||
269 | extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default, | 271 | extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default, |
270 | bool for_device); | 272 | bool for_device); |
271 | extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device); | 273 | extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device); |
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h index 8fcad63fab55..d409ceb2231e 100644 --- a/include/linux/mfd/88pm80x.h +++ b/include/linux/mfd/88pm80x.h | |||
@@ -21,6 +21,7 @@ enum { | |||
21 | CHIP_INVALID = 0, | 21 | CHIP_INVALID = 0, |
22 | CHIP_PM800, | 22 | CHIP_PM800, |
23 | CHIP_PM805, | 23 | CHIP_PM805, |
24 | CHIP_PM860, | ||
24 | CHIP_MAX, | 25 | CHIP_MAX, |
25 | }; | 26 | }; |
26 | 27 | ||
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h index 1dc385850ba2..57b45caaea80 100644 --- a/include/linux/mfd/arizona/pdata.h +++ b/include/linux/mfd/arizona/pdata.h | |||
@@ -124,6 +124,9 @@ struct arizona_pdata { | |||
124 | /** Channel to use for headphone detection */ | 124 | /** Channel to use for headphone detection */ |
125 | unsigned int hpdet_channel; | 125 | unsigned int hpdet_channel; |
126 | 126 | ||
127 | /** Use software comparison to determine mic presence */ | ||
128 | bool micd_software_compare; | ||
129 | |||
127 | /** Extra debounce timeout used during initial mic detection (ms) */ | 130 | /** Extra debounce timeout used during initial mic detection (ms) */ |
128 | unsigned int micd_detect_debounce; | 131 | unsigned int micd_detect_debounce; |
129 | 132 | ||
@@ -181,6 +184,9 @@ struct arizona_pdata { | |||
181 | 184 | ||
182 | /** GPIO for primary IRQ (used for edge triggered emulation) */ | 185 | /** GPIO for primary IRQ (used for edge triggered emulation) */ |
183 | int irq_gpio; | 186 | int irq_gpio; |
187 | |||
188 | /** General purpose switch control */ | ||
189 | unsigned int gpsw; | ||
184 | }; | 190 | }; |
185 | 191 | ||
186 | #endif | 192 | #endif |
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h index fdd70b3c7418..cd7e78eae006 100644 --- a/include/linux/mfd/arizona/registers.h +++ b/include/linux/mfd/arizona/registers.h | |||
@@ -242,6 +242,7 @@ | |||
242 | #define ARIZONA_HP1_SHORT_CIRCUIT_CTRL 0x4A0 | 242 | #define ARIZONA_HP1_SHORT_CIRCUIT_CTRL 0x4A0 |
243 | #define ARIZONA_HP2_SHORT_CIRCUIT_CTRL 0x4A1 | 243 | #define ARIZONA_HP2_SHORT_CIRCUIT_CTRL 0x4A1 |
244 | #define ARIZONA_HP3_SHORT_CIRCUIT_CTRL 0x4A2 | 244 | #define ARIZONA_HP3_SHORT_CIRCUIT_CTRL 0x4A2 |
245 | #define ARIZONA_HP_TEST_CTRL_1 0x4A4 | ||
245 | #define ARIZONA_SPK_CTRL_2 0x4B5 | 246 | #define ARIZONA_SPK_CTRL_2 0x4B5 |
246 | #define ARIZONA_SPK_CTRL_3 0x4B6 | 247 | #define ARIZONA_SPK_CTRL_3 0x4B6 |
247 | #define ARIZONA_DAC_COMP_1 0x4DC | 248 | #define ARIZONA_DAC_COMP_1 0x4DC |
@@ -1064,6 +1065,16 @@ | |||
1064 | #define ARIZONA_CLOCK_CONTROL 0xF00 | 1065 | #define ARIZONA_CLOCK_CONTROL 0xF00 |
1065 | #define ARIZONA_ANC_SRC 0xF01 | 1066 | #define ARIZONA_ANC_SRC 0xF01 |
1066 | #define ARIZONA_DSP_STATUS 0xF02 | 1067 | #define ARIZONA_DSP_STATUS 0xF02 |
1068 | #define ARIZONA_ANC_COEFF_START 0xF08 | ||
1069 | #define ARIZONA_ANC_COEFF_END 0xF12 | ||
1070 | #define ARIZONA_FCL_FILTER_CONTROL 0xF15 | ||
1071 | #define ARIZONA_FCL_ADC_REFORMATTER_CONTROL 0xF17 | ||
1072 | #define ARIZONA_FCL_COEFF_START 0xF18 | ||
1073 | #define ARIZONA_FCL_COEFF_END 0xF69 | ||
1074 | #define ARIZONA_FCR_FILTER_CONTROL 0xF70 | ||
1075 | #define ARIZONA_FCR_ADC_REFORMATTER_CONTROL 0xF72 | ||
1076 | #define ARIZONA_FCR_COEFF_START 0xF73 | ||
1077 | #define ARIZONA_FCR_COEFF_END 0xFC4 | ||
1067 | #define ARIZONA_DSP1_CONTROL_1 0x1100 | 1078 | #define ARIZONA_DSP1_CONTROL_1 0x1100 |
1068 | #define ARIZONA_DSP1_CLOCKING_1 0x1101 | 1079 | #define ARIZONA_DSP1_CLOCKING_1 0x1101 |
1069 | #define ARIZONA_DSP1_STATUS_1 0x1104 | 1080 | #define ARIZONA_DSP1_STATUS_1 0x1104 |
@@ -2359,9 +2370,9 @@ | |||
2359 | #define ARIZONA_ACCDET_SRC_MASK 0x2000 /* ACCDET_SRC */ | 2370 | #define ARIZONA_ACCDET_SRC_MASK 0x2000 /* ACCDET_SRC */ |
2360 | #define ARIZONA_ACCDET_SRC_SHIFT 13 /* ACCDET_SRC */ | 2371 | #define ARIZONA_ACCDET_SRC_SHIFT 13 /* ACCDET_SRC */ |
2361 | #define ARIZONA_ACCDET_SRC_WIDTH 1 /* ACCDET_SRC */ | 2372 | #define ARIZONA_ACCDET_SRC_WIDTH 1 /* ACCDET_SRC */ |
2362 | #define ARIZONA_ACCDET_MODE_MASK 0x0003 /* ACCDET_MODE - [1:0] */ | 2373 | #define ARIZONA_ACCDET_MODE_MASK 0x0007 /* ACCDET_MODE - [2:0] */ |
2363 | #define ARIZONA_ACCDET_MODE_SHIFT 0 /* ACCDET_MODE - [1:0] */ | 2374 | #define ARIZONA_ACCDET_MODE_SHIFT 0 /* ACCDET_MODE - [2:0] */ |
2364 | #define ARIZONA_ACCDET_MODE_WIDTH 2 /* ACCDET_MODE - [1:0] */ | 2375 | #define ARIZONA_ACCDET_MODE_WIDTH 3 /* ACCDET_MODE - [2:0] */ |
2365 | 2376 | ||
2366 | /* | 2377 | /* |
2367 | * R667 (0x29B) - Headphone Detect 1 | 2378 | * R667 (0x29B) - Headphone Detect 1 |
@@ -3702,6 +3713,13 @@ | |||
3702 | #define ARIZONA_HP3_SC_ENA_WIDTH 1 /* HP3_SC_ENA */ | 3713 | #define ARIZONA_HP3_SC_ENA_WIDTH 1 /* HP3_SC_ENA */ |
3703 | 3714 | ||
3704 | /* | 3715 | /* |
3716 | * R1188 (0x4A4) HP Test Ctrl 1 | ||
3717 | */ | ||
3718 | #define ARIZONA_HP1_TST_CAP_SEL_MASK 0x0003 /* HP1_TST_CAP_SEL - [1:0] */ | ||
3719 | #define ARIZONA_HP1_TST_CAP_SEL_SHIFT 0 /* HP1_TST_CAP_SEL - [1:0] */ | ||
3720 | #define ARIZONA_HP1_TST_CAP_SEL_WIDTH 2 /* HP1_TST_CAP_SEL - [1:0] */ | ||
3721 | |||
3722 | /* | ||
3705 | * R1244 (0x4DC) - DAC comp 1 | 3723 | * R1244 (0x4DC) - DAC comp 1 |
3706 | */ | 3724 | */ |
3707 | #define ARIZONA_OUT_COMP_COEFF_MASK 0xFFFF /* OUT_COMP_COEFF - [15:0] */ | 3725 | #define ARIZONA_OUT_COMP_COEFF_MASK 0xFFFF /* OUT_COMP_COEFF - [15:0] */ |
@@ -8043,6 +8061,66 @@ | |||
8043 | #define ARIZONA_ISRC3_NOTCH_ENA_WIDTH 1 /* ISRC3_NOTCH_ENA */ | 8061 | #define ARIZONA_ISRC3_NOTCH_ENA_WIDTH 1 /* ISRC3_NOTCH_ENA */ |
8044 | 8062 | ||
8045 | /* | 8063 | /* |
8064 | * R3840 (0xF00) - Clock Control | ||
8065 | */ | ||
8066 | #define ARIZONA_EXT_NG_SEL_CLR 0x0080 /* EXT_NG_SEL_CLR */ | ||
8067 | #define ARIZONA_EXT_NG_SEL_CLR_MASK 0x0080 /* EXT_NG_SEL_CLR */ | ||
8068 | #define ARIZONA_EXT_NG_SEL_CLR_SHIFT 7 /* EXT_NG_SEL_CLR */ | ||
8069 | #define ARIZONA_EXT_NG_SEL_CLR_WIDTH 1 /* EXT_NG_SEL_CLR */ | ||
8070 | #define ARIZONA_EXT_NG_SEL_SET 0x0040 /* EXT_NG_SEL_SET */ | ||
8071 | #define ARIZONA_EXT_NG_SEL_SET_MASK 0x0040 /* EXT_NG_SEL_SET */ | ||
8072 | #define ARIZONA_EXT_NG_SEL_SET_SHIFT 6 /* EXT_NG_SEL_SET */ | ||
8073 | #define ARIZONA_EXT_NG_SEL_SET_WIDTH 1 /* EXT_NG_SEL_SET */ | ||
8074 | #define ARIZONA_CLK_R_ENA_CLR 0x0020 /* CLK_R_ENA_CLR */ | ||
8075 | #define ARIZONA_CLK_R_ENA_CLR_MASK 0x0020 /* CLK_R_ENA_CLR */ | ||
8076 | #define ARIZONA_CLK_R_ENA_CLR_SHIFT 5 /* CLK_R_ENA_CLR */ | ||
8077 | #define ARIZONA_CLK_R_ENA_CLR_WIDTH 1 /* CLK_R_ENA_CLR */ | ||
8078 | #define ARIZONA_CLK_R_ENA_SET 0x0010 /* CLK_R_ENA_SET */ | ||
8079 | #define ARIZONA_CLK_R_ENA_SET_MASK 0x0010 /* CLK_R_ENA_SET */ | ||
8080 | #define ARIZONA_CLK_R_ENA_SET_SHIFT 4 /* CLK_R_ENA_SET */ | ||
8081 | #define ARIZONA_CLK_R_ENA_SET_WIDTH 1 /* CLK_R_ENA_SET */ | ||
8082 | #define ARIZONA_CLK_NG_ENA_CLR 0x0008 /* CLK_NG_ENA_CLR */ | ||
8083 | #define ARIZONA_CLK_NG_ENA_CLR_MASK 0x0008 /* CLK_NG_ENA_CLR */ | ||
8084 | #define ARIZONA_CLK_NG_ENA_CLR_SHIFT 3 /* CLK_NG_ENA_CLR */ | ||
8085 | #define ARIZONA_CLK_NG_ENA_CLR_WIDTH 1 /* CLK_NG_ENA_CLR */ | ||
8086 | #define ARIZONA_CLK_NG_ENA_SET 0x0004 /* CLK_NG_ENA_SET */ | ||
8087 | #define ARIZONA_CLK_NG_ENA_SET_MASK 0x0004 /* CLK_NG_ENA_SET */ | ||
8088 | #define ARIZONA_CLK_NG_ENA_SET_SHIFT 2 /* CLK_NG_ENA_SET */ | ||
8089 | #define ARIZONA_CLK_NG_ENA_SET_WIDTH 1 /* CLK_NG_ENA_SET */ | ||
8090 | #define ARIZONA_CLK_L_ENA_CLR 0x0002 /* CLK_L_ENA_CLR */ | ||
8091 | #define ARIZONA_CLK_L_ENA_CLR_MASK 0x0002 /* CLK_L_ENA_CLR */ | ||
8092 | #define ARIZONA_CLK_L_ENA_CLR_SHIFT 1 /* CLK_L_ENA_CLR */ | ||
8093 | #define ARIZONA_CLK_L_ENA_CLR_WIDTH 1 /* CLK_L_ENA_CLR */ | ||
8094 | #define ARIZONA_CLK_L_ENA_SET 0x0001 /* CLK_L_ENA_SET */ | ||
8095 | #define ARIZONA_CLK_L_ENA_SET_MASK 0x0001 /* CLK_L_ENA_SET */ | ||
8096 | #define ARIZONA_CLK_L_ENA_SET_SHIFT 0 /* CLK_L_ENA_SET */ | ||
8097 | #define ARIZONA_CLK_L_ENA_SET_WIDTH 1 /* CLK_L_ENA_SET */ | ||
8098 | |||
8099 | /* | ||
8100 | * R3841 (0xF01) - ANC SRC | ||
8101 | */ | ||
8102 | #define ARIZONA_IN_RXANCR_SEL_MASK 0x0070 /* IN_RXANCR_SEL - [4:6] */ | ||
8103 | #define ARIZONA_IN_RXANCR_SEL_SHIFT 4 /* IN_RXANCR_SEL - [4:6] */ | ||
8104 | #define ARIZONA_IN_RXANCR_SEL_WIDTH 3 /* IN_RXANCR_SEL - [4:6] */ | ||
8105 | #define ARIZONA_IN_RXANCL_SEL_MASK 0x0007 /* IN_RXANCL_SEL - [0:2] */ | ||
8106 | #define ARIZONA_IN_RXANCL_SEL_SHIFT 0 /* IN_RXANCL_SEL - [0:2] */ | ||
8107 | #define ARIZONA_IN_RXANCL_SEL_WIDTH 3 /* IN_RXANCL_SEL - [0:2] */ | ||
8108 | |||
8109 | /* | ||
8110 | * R3863 (0xF17) - FCL ADC Reformatter Control | ||
8111 | */ | ||
8112 | #define ARIZONA_FCL_MIC_MODE_SEL 0x000C /* FCL_MIC_MODE_SEL - [2:3] */ | ||
8113 | #define ARIZONA_FCL_MIC_MODE_SEL_SHIFT 2 /* FCL_MIC_MODE_SEL - [2:3] */ | ||
8114 | #define ARIZONA_FCL_MIC_MODE_SEL_WIDTH 2 /* FCL_MIC_MODE_SEL - [2:3] */ | ||
8115 | |||
8116 | /* | ||
8117 | * R3954 (0xF72) - FCR ADC Reformatter Control | ||
8118 | */ | ||
8119 | #define ARIZONA_FCR_MIC_MODE_SEL 0x000C /* FCR_MIC_MODE_SEL - [2:3] */ | ||
8120 | #define ARIZONA_FCR_MIC_MODE_SEL_SHIFT 2 /* FCR_MIC_MODE_SEL - [2:3] */ | ||
8121 | #define ARIZONA_FCR_MIC_MODE_SEL_WIDTH 2 /* FCR_MIC_MODE_SEL - [2:3] */ | ||
8122 | |||
8123 | /* | ||
8046 | * R4352 (0x1100) - DSP1 Control 1 | 8124 | * R4352 (0x1100) - DSP1 Control 1 |
8047 | */ | 8125 | */ |
8048 | #define ARIZONA_DSP1_RATE_MASK 0x7800 /* DSP1_RATE - [14:11] */ | 8126 | #define ARIZONA_DSP1_RATE_MASK 0x7800 /* DSP1_RATE - [14:11] */ |
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index cc8ad1e1a307..b24c771cebd5 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #ifndef __LINUX_MFD_AXP20X_H | 11 | #ifndef __LINUX_MFD_AXP20X_H |
12 | #define __LINUX_MFD_AXP20X_H | 12 | #define __LINUX_MFD_AXP20X_H |
13 | 13 | ||
14 | #include <linux/regmap.h> | ||
15 | |||
14 | enum { | 16 | enum { |
15 | AXP152_ID = 0, | 17 | AXP152_ID = 0, |
16 | AXP202_ID, | 18 | AXP202_ID, |
@@ -438,4 +440,26 @@ struct axp288_extcon_pdata { | |||
438 | struct gpio_desc *gpio_mux_cntl; | 440 | struct gpio_desc *gpio_mux_cntl; |
439 | }; | 441 | }; |
440 | 442 | ||
443 | /* generic helper function for reading 9-16 bit wide regs */ | ||
444 | static inline int axp20x_read_variable_width(struct regmap *regmap, | ||
445 | unsigned int reg, unsigned int width) | ||
446 | { | ||
447 | unsigned int reg_val, result; | ||
448 | int err; | ||
449 | |||
450 | err = regmap_read(regmap, reg, ®_val); | ||
451 | if (err) | ||
452 | return err; | ||
453 | |||
454 | result = reg_val << (width - 8); | ||
455 | |||
456 | err = regmap_read(regmap, reg + 1, ®_val); | ||
457 | if (err) | ||
458 | return err; | ||
459 | |||
460 | result |= reg_val; | ||
461 | |||
462 | return result; | ||
463 | } | ||
464 | |||
441 | #endif /* __LINUX_MFD_AXP20X_H */ | 465 | #endif /* __LINUX_MFD_AXP20X_H */ |
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index a76bc100bf97..27dac3ff18b9 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h | |||
@@ -18,6 +18,12 @@ | |||
18 | 18 | ||
19 | struct irq_domain; | 19 | struct irq_domain; |
20 | 20 | ||
21 | /* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */ | ||
22 | struct mfd_cell_acpi_match { | ||
23 | const char *pnpid; | ||
24 | const unsigned long long adr; | ||
25 | }; | ||
26 | |||
21 | /* | 27 | /* |
22 | * This struct describes the MFD part ("cell"). | 28 | * This struct describes the MFD part ("cell"). |
23 | * After registration the copy of this structure will become the platform data | 29 | * After registration the copy of this structure will become the platform data |
@@ -44,8 +50,8 @@ struct mfd_cell { | |||
44 | */ | 50 | */ |
45 | const char *of_compatible; | 51 | const char *of_compatible; |
46 | 52 | ||
47 | /* Matches ACPI PNP id, either _HID or _CID */ | 53 | /* Matches ACPI */ |
48 | const char *acpi_pnpid; | 54 | const struct mfd_cell_acpi_match *acpi_match; |
49 | 55 | ||
50 | /* | 56 | /* |
51 | * These resources can be specified relative to the parent device. | 57 | * These resources can be specified relative to the parent device. |
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index da72671a42fa..494682ce4bf3 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h | |||
@@ -255,5 +255,6 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev); | |||
255 | /* sysfs stuff */ | 255 | /* sysfs stuff */ |
256 | extern struct attribute_group cros_ec_attr_group; | 256 | extern struct attribute_group cros_ec_attr_group; |
257 | extern struct attribute_group cros_ec_lightbar_attr_group; | 257 | extern struct attribute_group cros_ec_lightbar_attr_group; |
258 | extern struct attribute_group cros_ec_vbc_attr_group; | ||
258 | 259 | ||
259 | #endif /* __LINUX_MFD_CROS_EC_H */ | 260 | #endif /* __LINUX_MFD_CROS_EC_H */ |
diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h index c4dd3a8add21..5010f978725c 100644 --- a/include/linux/mfd/da9052/reg.h +++ b/include/linux/mfd/da9052/reg.h | |||
@@ -65,6 +65,9 @@ | |||
65 | #define DA9052_GPIO_2_3_REG 22 | 65 | #define DA9052_GPIO_2_3_REG 22 |
66 | #define DA9052_GPIO_4_5_REG 23 | 66 | #define DA9052_GPIO_4_5_REG 23 |
67 | #define DA9052_GPIO_6_7_REG 24 | 67 | #define DA9052_GPIO_6_7_REG 24 |
68 | #define DA9052_GPIO_8_9_REG 25 | ||
69 | #define DA9052_GPIO_10_11_REG 26 | ||
70 | #define DA9052_GPIO_12_13_REG 27 | ||
68 | #define DA9052_GPIO_14_15_REG 28 | 71 | #define DA9052_GPIO_14_15_REG 28 |
69 | 72 | ||
70 | /* POWER SEQUENCER CONTROL REGISTERS */ | 73 | /* POWER SEQUENCER CONTROL REGISTERS */ |
diff --git a/include/linux/mfd/da9150/core.h b/include/linux/mfd/da9150/core.h index 76e668933a77..1bf50caeb9fa 100644 --- a/include/linux/mfd/da9150/core.h +++ b/include/linux/mfd/da9150/core.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #define __DA9150_CORE_H | 15 | #define __DA9150_CORE_H |
16 | 16 | ||
17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
18 | #include <linux/i2c.h> | ||
18 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
19 | #include <linux/regmap.h> | 20 | #include <linux/regmap.h> |
20 | 21 | ||
@@ -46,23 +47,39 @@ | |||
46 | #define DA9150_IRQ_GPADC 19 | 47 | #define DA9150_IRQ_GPADC 19 |
47 | #define DA9150_IRQ_WKUP 20 | 48 | #define DA9150_IRQ_WKUP 20 |
48 | 49 | ||
50 | /* I2C sub-device address */ | ||
51 | #define DA9150_QIF_I2C_ADDR_LSB 0x5 | ||
52 | |||
53 | struct da9150_fg_pdata { | ||
54 | u32 update_interval; /* msecs */ | ||
55 | u8 warn_soc_lvl; /* % value */ | ||
56 | u8 crit_soc_lvl; /* % value */ | ||
57 | }; | ||
58 | |||
49 | struct da9150_pdata { | 59 | struct da9150_pdata { |
50 | int irq_base; | 60 | int irq_base; |
61 | struct da9150_fg_pdata *fg_pdata; | ||
51 | }; | 62 | }; |
52 | 63 | ||
53 | struct da9150 { | 64 | struct da9150 { |
54 | struct device *dev; | 65 | struct device *dev; |
55 | struct regmap *regmap; | 66 | struct regmap *regmap; |
67 | struct i2c_client *core_qif; | ||
68 | |||
56 | struct regmap_irq_chip_data *regmap_irq_data; | 69 | struct regmap_irq_chip_data *regmap_irq_data; |
57 | int irq; | 70 | int irq; |
58 | int irq_base; | 71 | int irq_base; |
59 | }; | 72 | }; |
60 | 73 | ||
61 | /* Device I/O */ | 74 | /* Device I/O - Query Interface for FG and standard register access */ |
75 | void da9150_read_qif(struct da9150 *da9150, u8 addr, int count, u8 *buf); | ||
76 | void da9150_write_qif(struct da9150 *da9150, u8 addr, int count, const u8 *buf); | ||
77 | |||
62 | u8 da9150_reg_read(struct da9150 *da9150, u16 reg); | 78 | u8 da9150_reg_read(struct da9150 *da9150, u16 reg); |
63 | void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val); | 79 | void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val); |
64 | void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val); | 80 | void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val); |
65 | 81 | ||
66 | void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf); | 82 | void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf); |
67 | void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf); | 83 | void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf); |
84 | |||
68 | #endif /* __DA9150_CORE_H */ | 85 | #endif /* __DA9150_CORE_H */ |
diff --git a/include/linux/mfd/intel_bxtwc.h b/include/linux/mfd/intel_bxtwc.h new file mode 100644 index 000000000000..1a0ee9d6efe9 --- /dev/null +++ b/include/linux/mfd/intel_bxtwc.h | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * intel_bxtwc.h - Header file for Intel Broxton Whiskey Cove PMIC | ||
3 | * | ||
4 | * Copyright (C) 2015 Intel Corporation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/mfd/intel_soc_pmic.h> | ||
17 | |||
18 | #ifndef __INTEL_BXTWC_H__ | ||
19 | #define __INTEL_BXTWC_H__ | ||
20 | |||
21 | /* BXT WC devices */ | ||
22 | #define BXTWC_DEVICE1_ADDR 0x4E | ||
23 | #define BXTWC_DEVICE2_ADDR 0x4F | ||
24 | #define BXTWC_DEVICE3_ADDR 0x5E | ||
25 | |||
26 | /* device1 Registers */ | ||
27 | #define BXTWC_CHIPID 0x4E00 | ||
28 | #define BXTWC_CHIPVER 0x4E01 | ||
29 | |||
30 | #define BXTWC_SCHGRIRQ0_ADDR 0x5E1A | ||
31 | #define BXTWC_CHGRCTRL0_ADDR 0x5E16 | ||
32 | #define BXTWC_CHGRCTRL1_ADDR 0x5E17 | ||
33 | #define BXTWC_CHGRCTRL2_ADDR 0x5E18 | ||
34 | #define BXTWC_CHGRSTATUS_ADDR 0x5E19 | ||
35 | #define BXTWC_THRMBATZONE_ADDR 0x4F22 | ||
36 | |||
37 | #define BXTWC_USBPATH_ADDR 0x5E19 | ||
38 | #define BXTWC_USBPHYCTRL_ADDR 0x5E07 | ||
39 | #define BXTWC_USBIDCTRL_ADDR 0x5E05 | ||
40 | #define BXTWC_USBIDEN_MASK 0x01 | ||
41 | #define BXTWC_USBIDSTAT_ADDR 0x00FF | ||
42 | #define BXTWC_USBSRCDETSTATUS_ADDR 0x5E29 | ||
43 | |||
44 | #define BXTWC_DBGUSBBC1_ADDR 0x5FE0 | ||
45 | #define BXTWC_DBGUSBBC2_ADDR 0x5FE1 | ||
46 | #define BXTWC_DBGUSBBCSTAT_ADDR 0x5FE2 | ||
47 | |||
48 | #define BXTWC_WAKESRC_ADDR 0x4E22 | ||
49 | #define BXTWC_WAKESRC2_ADDR 0x4EE5 | ||
50 | #define BXTWC_CHRTTADDR_ADDR 0x5E22 | ||
51 | #define BXTWC_CHRTTDATA_ADDR 0x5E23 | ||
52 | |||
53 | #define BXTWC_STHRMIRQ0_ADDR 0x4F19 | ||
54 | #define WC_MTHRMIRQ1_ADDR 0x4E12 | ||
55 | #define WC_STHRMIRQ1_ADDR 0x4F1A | ||
56 | #define WC_STHRMIRQ2_ADDR 0x4F1B | ||
57 | |||
58 | #define BXTWC_THRMZN0H_ADDR 0x4F44 | ||
59 | #define BXTWC_THRMZN0L_ADDR 0x4F45 | ||
60 | #define BXTWC_THRMZN1H_ADDR 0x4F46 | ||
61 | #define BXTWC_THRMZN1L_ADDR 0x4F47 | ||
62 | #define BXTWC_THRMZN2H_ADDR 0x4F48 | ||
63 | #define BXTWC_THRMZN2L_ADDR 0x4F49 | ||
64 | #define BXTWC_THRMZN3H_ADDR 0x4F4A | ||
65 | #define BXTWC_THRMZN3L_ADDR 0x4F4B | ||
66 | #define BXTWC_THRMZN4H_ADDR 0x4F4C | ||
67 | #define BXTWC_THRMZN4L_ADDR 0x4F4D | ||
68 | |||
69 | #endif | ||
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h index abcbfcf32d10..cf619dbeace2 100644 --- a/include/linux/mfd/intel_soc_pmic.h +++ b/include/linux/mfd/intel_soc_pmic.h | |||
@@ -25,6 +25,8 @@ struct intel_soc_pmic { | |||
25 | int irq; | 25 | int irq; |
26 | struct regmap *regmap; | 26 | struct regmap *regmap; |
27 | struct regmap_irq_chip_data *irq_chip_data; | 27 | struct regmap_irq_chip_data *irq_chip_data; |
28 | struct regmap_irq_chip_data *irq_chip_data_level2; | ||
29 | struct device *dev; | ||
28 | }; | 30 | }; |
29 | 31 | ||
30 | #endif /* __INTEL_SOC_PMIC_H__ */ | 32 | #endif /* __INTEL_SOC_PMIC_H__ */ |
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h index ff843e7ca23d..7eb7cbac0a9a 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/mfd/rtsx_pci.h | |||
@@ -589,6 +589,7 @@ | |||
589 | #define FORCE_ASPM_NO_ASPM 0x00 | 589 | #define FORCE_ASPM_NO_ASPM 0x00 |
590 | #define PM_CLK_FORCE_CTL 0xFE58 | 590 | #define PM_CLK_FORCE_CTL 0xFE58 |
591 | #define FUNC_FORCE_CTL 0xFE59 | 591 | #define FUNC_FORCE_CTL 0xFE59 |
592 | #define FUNC_FORCE_UPME_XMT_DBG 0x02 | ||
592 | #define PERST_GLITCH_WIDTH 0xFE5C | 593 | #define PERST_GLITCH_WIDTH 0xFE5C |
593 | #define CHANGE_LINK_STATE 0xFE5B | 594 | #define CHANGE_LINK_STATE 0xFE5B |
594 | #define RESET_LOAD_REG 0xFE5E | 595 | #define RESET_LOAD_REG 0xFE5E |
@@ -712,6 +713,7 @@ | |||
712 | #define PHY_RCR1 0x02 | 713 | #define PHY_RCR1 0x02 |
713 | #define PHY_RCR1_ADP_TIME_4 0x0400 | 714 | #define PHY_RCR1_ADP_TIME_4 0x0400 |
714 | #define PHY_RCR1_VCO_COARSE 0x001F | 715 | #define PHY_RCR1_VCO_COARSE 0x001F |
716 | #define PHY_RCR1_INIT_27S 0x0A1F | ||
715 | #define PHY_SSCCR2 0x02 | 717 | #define PHY_SSCCR2 0x02 |
716 | #define PHY_SSCCR2_PLL_NCODE 0x0A00 | 718 | #define PHY_SSCCR2_PLL_NCODE 0x0A00 |
717 | #define PHY_SSCCR2_TIME0 0x001C | 719 | #define PHY_SSCCR2_TIME0 0x001C |
@@ -724,6 +726,7 @@ | |||
724 | #define PHY_RCR2_FREQSEL_12 0x0040 | 726 | #define PHY_RCR2_FREQSEL_12 0x0040 |
725 | #define PHY_RCR2_CDR_SC_12P 0x0010 | 727 | #define PHY_RCR2_CDR_SC_12P 0x0010 |
726 | #define PHY_RCR2_CALIB_LATE 0x0002 | 728 | #define PHY_RCR2_CALIB_LATE 0x0002 |
729 | #define PHY_RCR2_INIT_27S 0xC152 | ||
727 | #define PHY_SSCCR3 0x03 | 730 | #define PHY_SSCCR3 0x03 |
728 | #define PHY_SSCCR3_STEP_IN 0x2740 | 731 | #define PHY_SSCCR3_STEP_IN 0x2740 |
729 | #define PHY_SSCCR3_CHECK_DELAY 0x0008 | 732 | #define PHY_SSCCR3_CHECK_DELAY 0x0008 |
@@ -800,12 +803,14 @@ | |||
800 | #define PHY_ANA1A_RXT_BIST 0x0500 | 803 | #define PHY_ANA1A_RXT_BIST 0x0500 |
801 | #define PHY_ANA1A_TXR_BIST 0x0040 | 804 | #define PHY_ANA1A_TXR_BIST 0x0040 |
802 | #define PHY_ANA1A_REV 0x0006 | 805 | #define PHY_ANA1A_REV 0x0006 |
806 | #define PHY_FLD0_INIT_27S 0x2546 | ||
803 | #define PHY_FLD1 0x1B | 807 | #define PHY_FLD1 0x1B |
804 | #define PHY_FLD2 0x1C | 808 | #define PHY_FLD2 0x1C |
805 | #define PHY_FLD3 0x1D | 809 | #define PHY_FLD3 0x1D |
806 | #define PHY_FLD3_TIMER_4 0x0800 | 810 | #define PHY_FLD3_TIMER_4 0x0800 |
807 | #define PHY_FLD3_TIMER_6 0x0020 | 811 | #define PHY_FLD3_TIMER_6 0x0020 |
808 | #define PHY_FLD3_RXDELINK 0x0004 | 812 | #define PHY_FLD3_RXDELINK 0x0004 |
813 | #define PHY_FLD3_INIT_27S 0x0004 | ||
809 | #define PHY_ANA1D 0x1D | 814 | #define PHY_ANA1D 0x1D |
810 | #define PHY_ANA1D_DEBUG_ADDR 0x0004 | 815 | #define PHY_ANA1D_DEBUG_ADDR 0x0004 |
811 | #define _PHY_FLD0 0x1D | 816 | #define _PHY_FLD0 0x1D |
@@ -824,6 +829,7 @@ | |||
824 | #define PHY_FLD4_BER_COUNT 0x00E0 | 829 | #define PHY_FLD4_BER_COUNT 0x00E0 |
825 | #define PHY_FLD4_BER_TIMER 0x000A | 830 | #define PHY_FLD4_BER_TIMER 0x000A |
826 | #define PHY_FLD4_BER_CHK_EN 0x0001 | 831 | #define PHY_FLD4_BER_CHK_EN 0x0001 |
832 | #define PHY_FLD4_INIT_27S 0x5C7F | ||
827 | #define PHY_DIG1E 0x1E | 833 | #define PHY_DIG1E 0x1E |
828 | #define PHY_DIG1E_REV 0x4000 | 834 | #define PHY_DIG1E_REV 0x4000 |
829 | #define PHY_DIG1E_D0_X_D1 0x1000 | 835 | #define PHY_DIG1E_D0_X_D1 0x1000 |
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 75115384f3fc..a06098639399 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h | |||
@@ -132,6 +132,10 @@ struct sec_platform_data { | |||
132 | int buck2_init; | 132 | int buck2_init; |
133 | int buck3_init; | 133 | int buck3_init; |
134 | int buck4_init; | 134 | int buck4_init; |
135 | /* Whether or not manually set PWRHOLD to low during shutdown. */ | ||
136 | bool manual_poweroff; | ||
137 | /* Disable the WRSTBI (buck voltage warm reset) when probing? */ | ||
138 | bool disable_wrstbi; | ||
135 | }; | 139 | }; |
136 | 140 | ||
137 | /** | 141 | /** |
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h index 7981a9d77d3f..b288965e8101 100644 --- a/include/linux/mfd/samsung/s2mps11.h +++ b/include/linux/mfd/samsung/s2mps11.h | |||
@@ -179,6 +179,7 @@ enum s2mps11_regulators { | |||
179 | #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1) | 179 | #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1) |
180 | #define S2MPS11_RAMP_DELAY 25000 /* uV/us */ | 180 | #define S2MPS11_RAMP_DELAY 25000 /* uV/us */ |
181 | 181 | ||
182 | #define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4) | ||
182 | 183 | ||
183 | #define S2MPS11_BUCK2_RAMP_SHIFT 6 | 184 | #define S2MPS11_BUCK2_RAMP_SHIFT 6 |
184 | #define S2MPS11_BUCK34_RAMP_SHIFT 4 | 185 | #define S2MPS11_BUCK34_RAMP_SHIFT 4 |
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h index b1fd675fa36f..239e977ba45d 100644 --- a/include/linux/mfd/samsung/s2mps13.h +++ b/include/linux/mfd/samsung/s2mps13.h | |||
@@ -184,5 +184,6 @@ enum s2mps13_regulators { | |||
184 | * Let's assume that default value will be set. | 184 | * Let's assume that default value will be set. |
185 | */ | 185 | */ |
186 | #define S2MPS13_BUCK_RAMP_DELAY 12500 | 186 | #define S2MPS13_BUCK_RAMP_DELAY 12500 |
187 | #define S2MPS13_REG_WRSTBI_MASK BIT(5) | ||
187 | 188 | ||
188 | #endif /* __LINUX_MFD_S2MPS13_H */ | 189 | #endif /* __LINUX_MFD_S2MPS13_H */ |
diff --git a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h new file mode 100644 index 000000000000..4585d6105d68 --- /dev/null +++ b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Freescale Semiconductor, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef __LINUX_IMX7_IOMUXC_GPR_H | ||
10 | #define __LINUX_IMX7_IOMUXC_GPR_H | ||
11 | |||
12 | #define IOMUXC_GPR0 0x00 | ||
13 | #define IOMUXC_GPR1 0x04 | ||
14 | #define IOMUXC_GPR2 0x08 | ||
15 | #define IOMUXC_GPR3 0x0c | ||
16 | #define IOMUXC_GPR4 0x10 | ||
17 | #define IOMUXC_GPR5 0x14 | ||
18 | #define IOMUXC_GPR6 0x18 | ||
19 | #define IOMUXC_GPR7 0x1c | ||
20 | #define IOMUXC_GPR8 0x20 | ||
21 | #define IOMUXC_GPR9 0x24 | ||
22 | #define IOMUXC_GPR10 0x28 | ||
23 | #define IOMUXC_GPR11 0x2c | ||
24 | #define IOMUXC_GPR12 0x30 | ||
25 | #define IOMUXC_GPR13 0x34 | ||
26 | #define IOMUXC_GPR14 0x38 | ||
27 | #define IOMUXC_GPR15 0x3c | ||
28 | #define IOMUXC_GPR16 0x40 | ||
29 | #define IOMUXC_GPR17 0x44 | ||
30 | #define IOMUXC_GPR18 0x48 | ||
31 | #define IOMUXC_GPR19 0x4c | ||
32 | #define IOMUXC_GPR20 0x50 | ||
33 | #define IOMUXC_GPR21 0x54 | ||
34 | #define IOMUXC_GPR22 0x58 | ||
35 | |||
36 | /* For imx7d iomux gpr register field define */ | ||
37 | #define IMX7D_GPR1_IRQ_MASK (0x1 << 12) | ||
38 | #define IMX7D_GPR1_ENET1_TX_CLK_SEL_MASK (0x1 << 13) | ||
39 | #define IMX7D_GPR1_ENET2_TX_CLK_SEL_MASK (0x1 << 14) | ||
40 | #define IMX7D_GPR1_ENET_TX_CLK_SEL_MASK (0x3 << 13) | ||
41 | #define IMX7D_GPR1_ENET1_CLK_DIR_MASK (0x1 << 17) | ||
42 | #define IMX7D_GPR1_ENET2_CLK_DIR_MASK (0x1 << 18) | ||
43 | #define IMX7D_GPR1_ENET_CLK_DIR_MASK (0x3 << 17) | ||
44 | |||
45 | #define IMX7D_GPR5_CSI_MUX_CONTROL_MIPI (0x1 << 4) | ||
46 | |||
47 | #endif /* __LINUX_IMX7_IOMUXC_GPR_H */ | ||
diff --git a/include/linux/mfd/tps6105x.h b/include/linux/mfd/tps6105x.h index 386743dd931c..8bc51180800a 100644 --- a/include/linux/mfd/tps6105x.h +++ b/include/linux/mfd/tps6105x.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #define MFD_TPS6105X_H | 10 | #define MFD_TPS6105X_H |
11 | 11 | ||
12 | #include <linux/i2c.h> | 12 | #include <linux/i2c.h> |
13 | #include <linux/regmap.h> | ||
13 | #include <linux/regulator/machine.h> | 14 | #include <linux/regulator/machine.h> |
14 | 15 | ||
15 | /* | 16 | /* |
@@ -82,20 +83,15 @@ struct tps6105x_platform_data { | |||
82 | 83 | ||
83 | /** | 84 | /** |
84 | * struct tps6105x - state holder for the TPS6105x drivers | 85 | * struct tps6105x - state holder for the TPS6105x drivers |
85 | * @mutex: mutex to serialize I2C accesses | ||
86 | * @i2c_client: corresponding I2C client | 86 | * @i2c_client: corresponding I2C client |
87 | * @regulator: regulator device if used in voltage mode | 87 | * @regulator: regulator device if used in voltage mode |
88 | * @regmap: used for i2c communcation on accessing registers | ||
88 | */ | 89 | */ |
89 | struct tps6105x { | 90 | struct tps6105x { |
90 | struct tps6105x_platform_data *pdata; | 91 | struct tps6105x_platform_data *pdata; |
91 | struct mutex lock; | ||
92 | struct i2c_client *client; | 92 | struct i2c_client *client; |
93 | struct regulator_dev *regulator; | 93 | struct regulator_dev *regulator; |
94 | struct regmap *regmap; | ||
94 | }; | 95 | }; |
95 | 96 | ||
96 | extern int tps6105x_set(struct tps6105x *tps6105x, u8 reg, u8 value); | ||
97 | extern int tps6105x_get(struct tps6105x *tps6105x, u8 reg, u8 *buf); | ||
98 | extern int tps6105x_mask_and_set(struct tps6105x *tps6105x, u8 reg, | ||
99 | u8 bitmask, u8 bitvalues); | ||
100 | |||
101 | #endif | 97 | #endif |
diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h index d5b5f76d57ef..27d7c95fd0da 100644 --- a/include/linux/mic_bus.h +++ b/include/linux/mic_bus.h | |||
@@ -91,7 +91,8 @@ struct mbus_hw_ops { | |||
91 | 91 | ||
92 | struct mbus_device * | 92 | struct mbus_device * |
93 | mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, | 93 | mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, |
94 | struct mbus_hw_ops *hw_ops, void __iomem *mmio_va); | 94 | struct mbus_hw_ops *hw_ops, int index, |
95 | void __iomem *mmio_va); | ||
95 | void mbus_unregister_device(struct mbus_device *mbdev); | 96 | void mbus_unregister_device(struct mbus_device *mbdev); |
96 | 97 | ||
97 | int mbus_register_driver(struct mbus_driver *drv); | 98 | int mbus_register_driver(struct mbus_driver *drv); |
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 81f6e427ba6b..543037465973 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h | |||
@@ -49,6 +49,7 @@ | |||
49 | #define LOOP_CTRL_MINOR 237 | 49 | #define LOOP_CTRL_MINOR 237 |
50 | #define VHOST_NET_MINOR 238 | 50 | #define VHOST_NET_MINOR 238 |
51 | #define UHID_MINOR 239 | 51 | #define UHID_MINOR 239 |
52 | #define USERIO_MINOR 240 | ||
52 | #define MISC_DYNAMIC_MINOR 255 | 53 | #define MISC_DYNAMIC_MINOR 255 |
53 | 54 | ||
54 | struct device; | 55 | struct device; |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index baad4cb8e9b0..7501626ab529 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -214,6 +214,8 @@ enum { | |||
214 | MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28, | 214 | MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28, |
215 | MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29, | 215 | MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29, |
216 | MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30, | 216 | MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30, |
217 | MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31, | ||
218 | MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32, | ||
217 | }; | 219 | }; |
218 | 220 | ||
219 | enum { | 221 | enum { |
@@ -833,6 +835,7 @@ struct mlx4_dev { | |||
833 | struct mlx4_quotas quotas; | 835 | struct mlx4_quotas quotas; |
834 | struct radix_tree_root qp_table_tree; | 836 | struct radix_tree_root qp_table_tree; |
835 | u8 rev_id; | 837 | u8 rev_id; |
838 | u8 port_random_macs; | ||
836 | char board_id[MLX4_BOARD_ID_LEN]; | 839 | char board_id[MLX4_BOARD_ID_LEN]; |
837 | int numa_node; | 840 | int numa_node; |
838 | int oper_log_mgm_entry_size; | 841 | int oper_log_mgm_entry_size; |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index de45a51b3f04..fe052e234906 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
@@ -135,7 +135,10 @@ struct mlx4_rss_context { | |||
135 | 135 | ||
136 | struct mlx4_qp_path { | 136 | struct mlx4_qp_path { |
137 | u8 fl; | 137 | u8 fl; |
138 | u8 vlan_control; | 138 | union { |
139 | u8 vlan_control; | ||
140 | u8 control; | ||
141 | }; | ||
139 | u8 disable_pkey_check; | 142 | u8 disable_pkey_check; |
140 | u8 pkey_index; | 143 | u8 pkey_index; |
141 | u8 counter_index; | 144 | u8 counter_index; |
@@ -156,9 +159,16 @@ struct mlx4_qp_path { | |||
156 | }; | 159 | }; |
157 | 160 | ||
158 | enum { /* fl */ | 161 | enum { /* fl */ |
159 | MLX4_FL_CV = 1 << 6, | 162 | MLX4_FL_CV = 1 << 6, |
160 | MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2 | 163 | MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2, |
164 | MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1, | ||
165 | MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0, | ||
161 | }; | 166 | }; |
167 | |||
168 | enum { /* control */ | ||
169 | MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER = 1 << 7, | ||
170 | }; | ||
171 | |||
162 | enum { /* vlan_control */ | 172 | enum { /* vlan_control */ |
163 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6, | 173 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6, |
164 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED = 1 << 5, /* 802.1p priority tag */ | 174 | MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED = 1 << 5, /* 802.1p priority tag */ |
@@ -254,6 +264,8 @@ enum { | |||
254 | MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32, | 264 | MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32, |
255 | MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32, | 265 | MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32, |
256 | MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32, | 266 | MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32, |
267 | MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32, | ||
268 | MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32, | ||
257 | }; | 269 | }; |
258 | 270 | ||
259 | enum { /* param3 */ | 271 | enum { /* param3 */ |
@@ -436,11 +448,13 @@ enum mlx4_update_qp_attr { | |||
436 | MLX4_UPDATE_QP_VSD = 1 << 1, | 448 | MLX4_UPDATE_QP_VSD = 1 << 1, |
437 | MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2, | 449 | MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2, |
438 | MLX4_UPDATE_QP_QOS_VPORT = 1 << 3, | 450 | MLX4_UPDATE_QP_QOS_VPORT = 1 << 3, |
439 | MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 4) - 1 | 451 | MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB = 1 << 4, |
452 | MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 5) - 1 | ||
440 | }; | 453 | }; |
441 | 454 | ||
442 | enum mlx4_update_qp_params_flags { | 455 | enum mlx4_update_qp_params_flags { |
443 | MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 0, | 456 | MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB = 1 << 0, |
457 | MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 1, | ||
444 | }; | 458 | }; |
445 | 459 | ||
446 | struct mlx4_update_qp_params { | 460 | struct mlx4_update_qp_params { |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8eb3b19af2a4..0b473cbfa7ef 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
@@ -402,17 +402,6 @@ struct mlx5_cmd_teardown_hca_mbox_out { | |||
402 | u8 rsvd[8]; | 402 | u8 rsvd[8]; |
403 | }; | 403 | }; |
404 | 404 | ||
405 | struct mlx5_cmd_query_special_contexts_mbox_in { | ||
406 | struct mlx5_inbox_hdr hdr; | ||
407 | u8 rsvd[8]; | ||
408 | }; | ||
409 | |||
410 | struct mlx5_cmd_query_special_contexts_mbox_out { | ||
411 | struct mlx5_outbox_hdr hdr; | ||
412 | __be32 dump_fill_mkey; | ||
413 | __be32 resd_lkey; | ||
414 | }; | ||
415 | |||
416 | struct mlx5_cmd_layout { | 405 | struct mlx5_cmd_layout { |
417 | u8 type; | 406 | u8 type; |
418 | u8 rsvd0[3]; | 407 | u8 rsvd0[3]; |
@@ -440,7 +429,7 @@ struct health_buffer { | |||
440 | __be32 rsvd2; | 429 | __be32 rsvd2; |
441 | u8 irisc_index; | 430 | u8 irisc_index; |
442 | u8 synd; | 431 | u8 synd; |
443 | __be16 ext_sync; | 432 | __be16 ext_synd; |
444 | }; | 433 | }; |
445 | 434 | ||
446 | struct mlx5_init_seg { | 435 | struct mlx5_init_seg { |
@@ -450,7 +439,8 @@ struct mlx5_init_seg { | |||
450 | __be32 cmdq_addr_h; | 439 | __be32 cmdq_addr_h; |
451 | __be32 cmdq_addr_l_sz; | 440 | __be32 cmdq_addr_l_sz; |
452 | __be32 cmd_dbell; | 441 | __be32 cmd_dbell; |
453 | __be32 rsvd1[121]; | 442 | __be32 rsvd1[120]; |
443 | __be32 initializing; | ||
454 | struct health_buffer health; | 444 | struct health_buffer health; |
455 | __be32 rsvd2[884]; | 445 | __be32 rsvd2[884]; |
456 | __be32 health_counter; | 446 | __be32 health_counter; |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 27b53f9a24ad..5c857f2a20d7 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -391,9 +391,11 @@ struct mlx5_core_health { | |||
391 | struct health_buffer __iomem *health; | 391 | struct health_buffer __iomem *health; |
392 | __be32 __iomem *health_counter; | 392 | __be32 __iomem *health_counter; |
393 | struct timer_list timer; | 393 | struct timer_list timer; |
394 | struct list_head list; | ||
395 | u32 prev; | 394 | u32 prev; |
396 | int miss_counter; | 395 | int miss_counter; |
396 | bool sick; | ||
397 | struct workqueue_struct *wq; | ||
398 | struct work_struct work; | ||
397 | }; | 399 | }; |
398 | 400 | ||
399 | struct mlx5_cq_table { | 401 | struct mlx5_cq_table { |
@@ -485,8 +487,26 @@ struct mlx5_priv { | |||
485 | spinlock_t ctx_lock; | 487 | spinlock_t ctx_lock; |
486 | }; | 488 | }; |
487 | 489 | ||
490 | enum mlx5_device_state { | ||
491 | MLX5_DEVICE_STATE_UP, | ||
492 | MLX5_DEVICE_STATE_INTERNAL_ERROR, | ||
493 | }; | ||
494 | |||
495 | enum mlx5_interface_state { | ||
496 | MLX5_INTERFACE_STATE_DOWN, | ||
497 | MLX5_INTERFACE_STATE_UP, | ||
498 | }; | ||
499 | |||
500 | enum mlx5_pci_status { | ||
501 | MLX5_PCI_STATUS_DISABLED, | ||
502 | MLX5_PCI_STATUS_ENABLED, | ||
503 | }; | ||
504 | |||
488 | struct mlx5_core_dev { | 505 | struct mlx5_core_dev { |
489 | struct pci_dev *pdev; | 506 | struct pci_dev *pdev; |
507 | /* sync pci state */ | ||
508 | struct mutex pci_status_mutex; | ||
509 | enum mlx5_pci_status pci_status; | ||
490 | u8 rev_id; | 510 | u8 rev_id; |
491 | char board_id[MLX5_BOARD_ID_LEN]; | 511 | char board_id[MLX5_BOARD_ID_LEN]; |
492 | struct mlx5_cmd cmd; | 512 | struct mlx5_cmd cmd; |
@@ -495,6 +515,10 @@ struct mlx5_core_dev { | |||
495 | u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; | 515 | u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; |
496 | phys_addr_t iseg_base; | 516 | phys_addr_t iseg_base; |
497 | struct mlx5_init_seg __iomem *iseg; | 517 | struct mlx5_init_seg __iomem *iseg; |
518 | enum mlx5_device_state state; | ||
519 | /* sync interface state */ | ||
520 | struct mutex intf_state_mutex; | ||
521 | enum mlx5_interface_state interface_state; | ||
498 | void (*event) (struct mlx5_core_dev *dev, | 522 | void (*event) (struct mlx5_core_dev *dev, |
499 | enum mlx5_dev_event event, | 523 | enum mlx5_dev_event event, |
500 | unsigned long param); | 524 | unsigned long param); |
@@ -676,8 +700,8 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); | |||
676 | int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); | 700 | int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); |
677 | int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); | 701 | int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); |
678 | void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); | 702 | void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); |
679 | void mlx5_health_cleanup(void); | 703 | void mlx5_health_cleanup(struct mlx5_core_dev *dev); |
680 | void __init mlx5_health_init(void); | 704 | int mlx5_health_init(struct mlx5_core_dev *dev); |
681 | void mlx5_start_health_poll(struct mlx5_core_dev *dev); | 705 | void mlx5_start_health_poll(struct mlx5_core_dev *dev); |
682 | void mlx5_stop_health_poll(struct mlx5_core_dev *dev); | 706 | void mlx5_stop_health_poll(struct mlx5_core_dev *dev); |
683 | int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, | 707 | int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, |
@@ -731,7 +755,7 @@ void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); | |||
731 | #endif | 755 | #endif |
732 | void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); | 756 | void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); |
733 | struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); | 757 | struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); |
734 | void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); | 758 | void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec); |
735 | void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); | 759 | void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); |
736 | int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, | 760 | int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, |
737 | int nent, u64 mask, const char *name, struct mlx5_uar *uar); | 761 | int nent, u64 mask, const char *name, struct mlx5_uar *uar); |
@@ -802,6 +826,11 @@ void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); | |||
802 | int mlx5_query_odp_caps(struct mlx5_core_dev *dev, | 826 | int mlx5_query_odp_caps(struct mlx5_core_dev *dev, |
803 | struct mlx5_odp_caps *odp_caps); | 827 | struct mlx5_odp_caps *odp_caps); |
804 | 828 | ||
829 | static inline int fw_initializing(struct mlx5_core_dev *dev) | ||
830 | { | ||
831 | return ioread32be(&dev->iseg->initializing) >> 31; | ||
832 | } | ||
833 | |||
805 | static inline u32 mlx5_mkey_to_idx(u32 mkey) | 834 | static inline u32 mlx5_mkey_to_idx(u32 mkey) |
806 | { | 835 | { |
807 | return mkey >> 8; | 836 | return mkey >> 8; |
@@ -845,7 +874,6 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); | |||
845 | int mlx5_register_interface(struct mlx5_interface *intf); | 874 | int mlx5_register_interface(struct mlx5_interface *intf); |
846 | void mlx5_unregister_interface(struct mlx5_interface *intf); | 875 | void mlx5_unregister_interface(struct mlx5_interface *intf); |
847 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); | 876 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); |
848 | int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey); | ||
849 | 877 | ||
850 | struct mlx5_profile { | 878 | struct mlx5_profile { |
851 | u64 mask; | 879 | u64 mask; |
@@ -866,4 +894,8 @@ static inline int mlx5_get_gid_table_len(u16 param) | |||
866 | return 8 * (1 << param); | 894 | return 8 * (1 << param); |
867 | } | 895 | } |
868 | 896 | ||
897 | enum { | ||
898 | MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, | ||
899 | }; | ||
900 | |||
869 | #endif /* MLX5_DRIVER_H */ | 901 | #endif /* MLX5_DRIVER_H */ |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index dd2097455a2e..1565324eb620 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
@@ -453,26 +453,28 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { | |||
453 | u8 lro_cap[0x1]; | 453 | u8 lro_cap[0x1]; |
454 | u8 lro_psh_flag[0x1]; | 454 | u8 lro_psh_flag[0x1]; |
455 | u8 lro_time_stamp[0x1]; | 455 | u8 lro_time_stamp[0x1]; |
456 | u8 reserved_0[0x6]; | 456 | u8 reserved_0[0x3]; |
457 | u8 self_lb_en_modifiable[0x1]; | ||
458 | u8 reserved_1[0x2]; | ||
457 | u8 max_lso_cap[0x5]; | 459 | u8 max_lso_cap[0x5]; |
458 | u8 reserved_1[0x4]; | 460 | u8 reserved_2[0x4]; |
459 | u8 rss_ind_tbl_cap[0x4]; | 461 | u8 rss_ind_tbl_cap[0x4]; |
460 | u8 reserved_2[0x3]; | 462 | u8 reserved_3[0x3]; |
461 | u8 tunnel_lso_const_out_ip_id[0x1]; | 463 | u8 tunnel_lso_const_out_ip_id[0x1]; |
462 | u8 reserved_3[0x2]; | 464 | u8 reserved_4[0x2]; |
463 | u8 tunnel_statless_gre[0x1]; | 465 | u8 tunnel_statless_gre[0x1]; |
464 | u8 tunnel_stateless_vxlan[0x1]; | 466 | u8 tunnel_stateless_vxlan[0x1]; |
465 | 467 | ||
466 | u8 reserved_4[0x20]; | 468 | u8 reserved_5[0x20]; |
467 | 469 | ||
468 | u8 reserved_5[0x10]; | 470 | u8 reserved_6[0x10]; |
469 | u8 lro_min_mss_size[0x10]; | 471 | u8 lro_min_mss_size[0x10]; |
470 | 472 | ||
471 | u8 reserved_6[0x120]; | 473 | u8 reserved_7[0x120]; |
472 | 474 | ||
473 | u8 lro_timer_supported_periods[4][0x20]; | 475 | u8 lro_timer_supported_periods[4][0x20]; |
474 | 476 | ||
475 | u8 reserved_7[0x600]; | 477 | u8 reserved_8[0x600]; |
476 | }; | 478 | }; |
477 | 479 | ||
478 | struct mlx5_ifc_roce_cap_bits { | 480 | struct mlx5_ifc_roce_cap_bits { |
@@ -4051,9 +4053,11 @@ struct mlx5_ifc_modify_tis_in_bits { | |||
4051 | }; | 4053 | }; |
4052 | 4054 | ||
4053 | struct mlx5_ifc_modify_tir_bitmask_bits { | 4055 | struct mlx5_ifc_modify_tir_bitmask_bits { |
4054 | u8 reserved[0x20]; | 4056 | u8 reserved_0[0x20]; |
4055 | 4057 | ||
4056 | u8 reserved1[0x1f]; | 4058 | u8 reserved_1[0x1b]; |
4059 | u8 self_lb_en[0x1]; | ||
4060 | u8 reserved_2[0x3]; | ||
4057 | u8 lro[0x1]; | 4061 | u8 lro[0x1]; |
4058 | }; | 4062 | }; |
4059 | 4063 | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index 91c08f6f0dc9..00bad7793788 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -139,6 +139,7 @@ extern unsigned int kobjsize(const void *objp); | |||
139 | 139 | ||
140 | #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ | 140 | #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ |
141 | #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ | 141 | #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ |
142 | #define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ | ||
142 | #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ | 143 | #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ |
143 | #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ | 144 | #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ |
144 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ | 145 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ |
@@ -202,6 +203,9 @@ extern unsigned int kobjsize(const void *objp); | |||
202 | /* This mask defines which mm->def_flags a process can inherit its parent */ | 203 | /* This mask defines which mm->def_flags a process can inherit its parent */ |
203 | #define VM_INIT_DEF_MASK VM_NOHUGEPAGE | 204 | #define VM_INIT_DEF_MASK VM_NOHUGEPAGE |
204 | 205 | ||
206 | /* This mask is used to clear all the VMA flags used by mlock */ | ||
207 | #define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) | ||
208 | |||
205 | /* | 209 | /* |
206 | * mapping from the currently active vm_flags protection bits (the | 210 | * mapping from the currently active vm_flags protection bits (the |
207 | * low four bits) to a page protection mask.. | 211 | * low four bits) to a page protection mask.. |
@@ -426,46 +430,6 @@ static inline void compound_unlock_irqrestore(struct page *page, | |||
426 | #endif | 430 | #endif |
427 | } | 431 | } |
428 | 432 | ||
429 | static inline struct page *compound_head_by_tail(struct page *tail) | ||
430 | { | ||
431 | struct page *head = tail->first_page; | ||
432 | |||
433 | /* | ||
434 | * page->first_page may be a dangling pointer to an old | ||
435 | * compound page, so recheck that it is still a tail | ||
436 | * page before returning. | ||
437 | */ | ||
438 | smp_rmb(); | ||
439 | if (likely(PageTail(tail))) | ||
440 | return head; | ||
441 | return tail; | ||
442 | } | ||
443 | |||
444 | /* | ||
445 | * Since either compound page could be dismantled asynchronously in THP | ||
446 | * or we access asynchronously arbitrary positioned struct page, there | ||
447 | * would be tail flag race. To handle this race, we should call | ||
448 | * smp_rmb() before checking tail flag. compound_head_by_tail() did it. | ||
449 | */ | ||
450 | static inline struct page *compound_head(struct page *page) | ||
451 | { | ||
452 | if (unlikely(PageTail(page))) | ||
453 | return compound_head_by_tail(page); | ||
454 | return page; | ||
455 | } | ||
456 | |||
457 | /* | ||
458 | * If we access compound page synchronously such as access to | ||
459 | * allocated page, there is no need to handle tail flag race, so we can | ||
460 | * check tail flag directly without any synchronization primitive. | ||
461 | */ | ||
462 | static inline struct page *compound_head_fast(struct page *page) | ||
463 | { | ||
464 | if (unlikely(PageTail(page))) | ||
465 | return page->first_page; | ||
466 | return page; | ||
467 | } | ||
468 | |||
469 | /* | 433 | /* |
470 | * The atomic page->_mapcount, starts from -1: so that transitions | 434 | * The atomic page->_mapcount, starts from -1: so that transitions |
471 | * both from it and to it can be tracked, using atomic_inc_and_test | 435 | * both from it and to it can be tracked, using atomic_inc_and_test |
@@ -514,7 +478,7 @@ static inline void get_huge_page_tail(struct page *page) | |||
514 | VM_BUG_ON_PAGE(!PageTail(page), page); | 478 | VM_BUG_ON_PAGE(!PageTail(page), page); |
515 | VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); | 479 | VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); |
516 | VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); | 480 | VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); |
517 | if (compound_tail_refcounted(page->first_page)) | 481 | if (compound_tail_refcounted(compound_head(page))) |
518 | atomic_inc(&page->_mapcount); | 482 | atomic_inc(&page->_mapcount); |
519 | } | 483 | } |
520 | 484 | ||
@@ -537,13 +501,7 @@ static inline struct page *virt_to_head_page(const void *x) | |||
537 | { | 501 | { |
538 | struct page *page = virt_to_page(x); | 502 | struct page *page = virt_to_page(x); |
539 | 503 | ||
540 | /* | 504 | return compound_head(page); |
541 | * We don't need to worry about synchronization of tail flag | ||
542 | * when we call virt_to_head_page() since it is only called for | ||
543 | * already allocated page and this page won't be freed until | ||
544 | * this virt_to_head_page() is finished. So use _fast variant. | ||
545 | */ | ||
546 | return compound_head_fast(page); | ||
547 | } | 505 | } |
548 | 506 | ||
549 | /* | 507 | /* |
@@ -564,28 +522,42 @@ int split_free_page(struct page *page); | |||
564 | /* | 522 | /* |
565 | * Compound pages have a destructor function. Provide a | 523 | * Compound pages have a destructor function. Provide a |
566 | * prototype for that function and accessor functions. | 524 | * prototype for that function and accessor functions. |
567 | * These are _only_ valid on the head of a PG_compound page. | 525 | * These are _only_ valid on the head of a compound page. |
568 | */ | 526 | */ |
527 | typedef void compound_page_dtor(struct page *); | ||
528 | |||
529 | /* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */ | ||
530 | enum compound_dtor_id { | ||
531 | NULL_COMPOUND_DTOR, | ||
532 | COMPOUND_PAGE_DTOR, | ||
533 | #ifdef CONFIG_HUGETLB_PAGE | ||
534 | HUGETLB_PAGE_DTOR, | ||
535 | #endif | ||
536 | NR_COMPOUND_DTORS, | ||
537 | }; | ||
538 | extern compound_page_dtor * const compound_page_dtors[]; | ||
569 | 539 | ||
570 | static inline void set_compound_page_dtor(struct page *page, | 540 | static inline void set_compound_page_dtor(struct page *page, |
571 | compound_page_dtor *dtor) | 541 | enum compound_dtor_id compound_dtor) |
572 | { | 542 | { |
573 | page[1].compound_dtor = dtor; | 543 | VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); |
544 | page[1].compound_dtor = compound_dtor; | ||
574 | } | 545 | } |
575 | 546 | ||
576 | static inline compound_page_dtor *get_compound_page_dtor(struct page *page) | 547 | static inline compound_page_dtor *get_compound_page_dtor(struct page *page) |
577 | { | 548 | { |
578 | return page[1].compound_dtor; | 549 | VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); |
550 | return compound_page_dtors[page[1].compound_dtor]; | ||
579 | } | 551 | } |
580 | 552 | ||
581 | static inline int compound_order(struct page *page) | 553 | static inline unsigned int compound_order(struct page *page) |
582 | { | 554 | { |
583 | if (!PageHead(page)) | 555 | if (!PageHead(page)) |
584 | return 0; | 556 | return 0; |
585 | return page[1].compound_order; | 557 | return page[1].compound_order; |
586 | } | 558 | } |
587 | 559 | ||
588 | static inline void set_compound_order(struct page *page, unsigned long order) | 560 | static inline void set_compound_order(struct page *page, unsigned int order) |
589 | { | 561 | { |
590 | page[1].compound_order = order; | 562 | page[1].compound_order = order; |
591 | } | 563 | } |
@@ -905,6 +877,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone, | |||
905 | #endif | 877 | #endif |
906 | } | 878 | } |
907 | 879 | ||
880 | #ifdef CONFIG_MEMCG | ||
881 | static inline struct mem_cgroup *page_memcg(struct page *page) | ||
882 | { | ||
883 | return page->mem_cgroup; | ||
884 | } | ||
885 | |||
886 | static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) | ||
887 | { | ||
888 | page->mem_cgroup = memcg; | ||
889 | } | ||
890 | #else | ||
891 | static inline struct mem_cgroup *page_memcg(struct page *page) | ||
892 | { | ||
893 | return NULL; | ||
894 | } | ||
895 | |||
896 | static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) | ||
897 | { | ||
898 | } | ||
899 | #endif | ||
900 | |||
908 | /* | 901 | /* |
909 | * Some inline functions in vmstat.h depend on page_zone() | 902 | * Some inline functions in vmstat.h depend on page_zone() |
910 | */ | 903 | */ |
@@ -1547,8 +1540,7 @@ static inline bool ptlock_init(struct page *page) | |||
1547 | * with 0. Make sure nobody took it in use in between. | 1540 | * with 0. Make sure nobody took it in use in between. |
1548 | * | 1541 | * |
1549 | * It can happen if arch try to use slab for page table allocation: | 1542 | * It can happen if arch try to use slab for page table allocation: |
1550 | * slab code uses page->slab_cache and page->first_page (for tail | 1543 | * slab code uses page->slab_cache, which share storage with page->ptl. |
1551 | * pages), which share storage with page->ptl. | ||
1552 | */ | 1544 | */ |
1553 | VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); | 1545 | VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); |
1554 | if (!ptlock_alloc(page)) | 1546 | if (!ptlock_alloc(page)) |
@@ -1585,8 +1577,10 @@ static inline void pgtable_init(void) | |||
1585 | 1577 | ||
1586 | static inline bool pgtable_page_ctor(struct page *page) | 1578 | static inline bool pgtable_page_ctor(struct page *page) |
1587 | { | 1579 | { |
1580 | if (!ptlock_init(page)) | ||
1581 | return false; | ||
1588 | inc_zone_page_state(page, NR_PAGETABLE); | 1582 | inc_zone_page_state(page, NR_PAGETABLE); |
1589 | return ptlock_init(page); | 1583 | return true; |
1590 | } | 1584 | } |
1591 | 1585 | ||
1592 | static inline void pgtable_page_dtor(struct page *page) | 1586 | static inline void pgtable_page_dtor(struct page *page) |
@@ -1816,7 +1810,8 @@ extern void si_meminfo(struct sysinfo * val); | |||
1816 | extern void si_meminfo_node(struct sysinfo *val, int nid); | 1810 | extern void si_meminfo_node(struct sysinfo *val, int nid); |
1817 | 1811 | ||
1818 | extern __printf(3, 4) | 1812 | extern __printf(3, 4) |
1819 | void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...); | 1813 | void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, |
1814 | const char *fmt, ...); | ||
1820 | 1815 | ||
1821 | extern void setup_per_cpu_pageset(void); | 1816 | extern void setup_per_cpu_pageset(void); |
1822 | 1817 | ||
@@ -2015,8 +2010,6 @@ void page_cache_async_readahead(struct address_space *mapping, | |||
2015 | pgoff_t offset, | 2010 | pgoff_t offset, |
2016 | unsigned long size); | 2011 | unsigned long size); |
2017 | 2012 | ||
2018 | unsigned long max_sane_readahead(unsigned long nr); | ||
2019 | |||
2020 | /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ | 2013 | /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ |
2021 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); | 2014 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); |
2022 | 2015 | ||
@@ -2116,6 +2109,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma, | |||
2116 | #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ | 2109 | #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ |
2117 | #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ | 2110 | #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ |
2118 | #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ | 2111 | #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ |
2112 | #define FOLL_MLOCK 0x1000 /* lock present pages */ | ||
2119 | 2113 | ||
2120 | typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, | 2114 | typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, |
2121 | void *data); | 2115 | void *data); |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 3d6baa7d4534..f8d1492a114f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -28,8 +28,6 @@ struct mem_cgroup; | |||
28 | IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) | 28 | IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) |
29 | #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) | 29 | #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) |
30 | 30 | ||
31 | typedef void compound_page_dtor(struct page *); | ||
32 | |||
33 | /* | 31 | /* |
34 | * Each physical page in the system has a struct page associated with | 32 | * Each physical page in the system has a struct page associated with |
35 | * it to keep track of whatever it is we are using the page for at the | 33 | * it to keep track of whatever it is we are using the page for at the |
@@ -113,7 +111,13 @@ struct page { | |||
113 | }; | 111 | }; |
114 | }; | 112 | }; |
115 | 113 | ||
116 | /* Third double word block */ | 114 | /* |
115 | * Third double word block | ||
116 | * | ||
117 | * WARNING: bit 0 of the first word encode PageTail(). That means | ||
118 | * the rest users of the storage space MUST NOT use the bit to | ||
119 | * avoid collision and false-positive PageTail(). | ||
120 | */ | ||
117 | union { | 121 | union { |
118 | struct list_head lru; /* Pageout list, eg. active_list | 122 | struct list_head lru; /* Pageout list, eg. active_list |
119 | * protected by zone->lru_lock ! | 123 | * protected by zone->lru_lock ! |
@@ -131,18 +135,37 @@ struct page { | |||
131 | #endif | 135 | #endif |
132 | }; | 136 | }; |
133 | 137 | ||
134 | struct slab *slab_page; /* slab fields */ | ||
135 | struct rcu_head rcu_head; /* Used by SLAB | 138 | struct rcu_head rcu_head; /* Used by SLAB |
136 | * when destroying via RCU | 139 | * when destroying via RCU |
137 | */ | 140 | */ |
138 | /* First tail page of compound page */ | 141 | /* Tail pages of compound page */ |
139 | struct { | 142 | struct { |
140 | compound_page_dtor *compound_dtor; | 143 | unsigned long compound_head; /* If bit zero is set */ |
141 | unsigned long compound_order; | 144 | |
145 | /* First tail page only */ | ||
146 | #ifdef CONFIG_64BIT | ||
147 | /* | ||
148 | * On 64 bit system we have enough space in struct page | ||
149 | * to encode compound_dtor and compound_order with | ||
150 | * unsigned int. It can help compiler generate better or | ||
151 | * smaller code on some archtectures. | ||
152 | */ | ||
153 | unsigned int compound_dtor; | ||
154 | unsigned int compound_order; | ||
155 | #else | ||
156 | unsigned short int compound_dtor; | ||
157 | unsigned short int compound_order; | ||
158 | #endif | ||
142 | }; | 159 | }; |
143 | 160 | ||
144 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS | 161 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS |
145 | pgtable_t pmd_huge_pte; /* protected by page->ptl */ | 162 | struct { |
163 | unsigned long __pad; /* do not overlay pmd_huge_pte | ||
164 | * with compound_head to avoid | ||
165 | * possible bit 0 collision. | ||
166 | */ | ||
167 | pgtable_t pmd_huge_pte; /* protected by page->ptl */ | ||
168 | }; | ||
146 | #endif | 169 | #endif |
147 | }; | 170 | }; |
148 | 171 | ||
@@ -163,7 +186,6 @@ struct page { | |||
163 | #endif | 186 | #endif |
164 | #endif | 187 | #endif |
165 | struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ | 188 | struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ |
166 | struct page *first_page; /* Compound tail pages */ | ||
167 | }; | 189 | }; |
168 | 190 | ||
169 | #ifdef CONFIG_MEMCG | 191 | #ifdef CONFIG_MEMCG |
@@ -486,6 +508,9 @@ struct mm_struct { | |||
486 | /* address of the bounds directory */ | 508 | /* address of the bounds directory */ |
487 | void __user *bd_addr; | 509 | void __user *bd_addr; |
488 | #endif | 510 | #endif |
511 | #ifdef CONFIG_HUGETLB_PAGE | ||
512 | atomic_long_t hugetlb_usage; | ||
513 | #endif | ||
489 | }; | 514 | }; |
490 | 515 | ||
491 | static inline void mm_init_cpumask(struct mm_struct *mm) | 516 | static inline void mm_init_cpumask(struct mm_struct *mm) |
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index fdd0779ccdfa..eb0151bac50c 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
@@ -269,7 +269,6 @@ struct mmc_card { | |||
269 | /* for byte mode */ | 269 | /* for byte mode */ |
270 | #define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */ | 270 | #define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */ |
271 | /* (missing CIA registers) */ | 271 | /* (missing CIA registers) */ |
272 | #define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */ | ||
273 | #define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */ | 272 | #define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */ |
274 | #define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */ | 273 | #define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */ |
275 | #define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ | 274 | #define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ |
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 258daf914c6d..37967b6da03c 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h | |||
@@ -152,10 +152,8 @@ extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *); | |||
152 | extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, | 152 | extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, |
153 | struct mmc_command *, int); | 153 | struct mmc_command *, int); |
154 | extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); | 154 | extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); |
155 | extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool, | ||
156 | bool, bool); | ||
157 | extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); | 155 | extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); |
158 | extern int mmc_send_tuning(struct mmc_host *host); | 156 | extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); |
159 | extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); | 157 | extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); |
160 | 158 | ||
161 | #define MMC_ERASE_ARG 0x00000000 | 159 | #define MMC_ERASE_ARG 0x00000000 |
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 134c57422740..f67b2ec18e6d 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/scatterlist.h> | 17 | #include <linux/scatterlist.h> |
18 | #include <linux/mmc/core.h> | 18 | #include <linux/mmc/core.h> |
19 | #include <linux/dmaengine.h> | ||
19 | 20 | ||
20 | #define MAX_MCI_SLOTS 2 | 21 | #define MAX_MCI_SLOTS 2 |
21 | 22 | ||
@@ -40,6 +41,17 @@ enum { | |||
40 | 41 | ||
41 | struct mmc_data; | 42 | struct mmc_data; |
42 | 43 | ||
44 | enum { | ||
45 | TRANS_MODE_PIO = 0, | ||
46 | TRANS_MODE_IDMAC, | ||
47 | TRANS_MODE_EDMAC | ||
48 | }; | ||
49 | |||
50 | struct dw_mci_dma_slave { | ||
51 | struct dma_chan *ch; | ||
52 | enum dma_transfer_direction direction; | ||
53 | }; | ||
54 | |||
43 | /** | 55 | /** |
44 | * struct dw_mci - MMC controller state shared between all slots | 56 | * struct dw_mci - MMC controller state shared between all slots |
45 | * @lock: Spinlock protecting the queue and associated data. | 57 | * @lock: Spinlock protecting the queue and associated data. |
@@ -154,7 +166,14 @@ struct dw_mci { | |||
154 | dma_addr_t sg_dma; | 166 | dma_addr_t sg_dma; |
155 | void *sg_cpu; | 167 | void *sg_cpu; |
156 | const struct dw_mci_dma_ops *dma_ops; | 168 | const struct dw_mci_dma_ops *dma_ops; |
169 | /* For idmac */ | ||
157 | unsigned int ring_size; | 170 | unsigned int ring_size; |
171 | |||
172 | /* For edmac */ | ||
173 | struct dw_mci_dma_slave *dms; | ||
174 | /* Registers's physical base address */ | ||
175 | void *phy_regs; | ||
176 | |||
158 | u32 cmd_status; | 177 | u32 cmd_status; |
159 | u32 data_status; | 178 | u32 data_status; |
160 | u32 stop_cmdr; | 179 | u32 stop_cmdr; |
@@ -208,8 +227,8 @@ struct dw_mci { | |||
208 | struct dw_mci_dma_ops { | 227 | struct dw_mci_dma_ops { |
209 | /* DMA Ops */ | 228 | /* DMA Ops */ |
210 | int (*init)(struct dw_mci *host); | 229 | int (*init)(struct dw_mci *host); |
211 | void (*start)(struct dw_mci *host, unsigned int sg_len); | 230 | int (*start)(struct dw_mci *host, unsigned int sg_len); |
212 | void (*complete)(struct dw_mci *host); | 231 | void (*complete)(void *host); |
213 | void (*stop)(struct dw_mci *host); | 232 | void (*stop)(struct dw_mci *host); |
214 | void (*cleanup)(struct dw_mci *host); | 233 | void (*cleanup)(struct dw_mci *host); |
215 | void (*exit)(struct dw_mci *host); | 234 | void (*exit)(struct dw_mci *host); |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 83b81fd865f3..8673ffe3d86e 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -292,18 +292,6 @@ struct mmc_host { | |||
292 | 292 | ||
293 | mmc_pm_flag_t pm_caps; /* supported pm features */ | 293 | mmc_pm_flag_t pm_caps; /* supported pm features */ |
294 | 294 | ||
295 | #ifdef CONFIG_MMC_CLKGATE | ||
296 | int clk_requests; /* internal reference counter */ | ||
297 | unsigned int clk_delay; /* number of MCI clk hold cycles */ | ||
298 | bool clk_gated; /* clock gated */ | ||
299 | struct delayed_work clk_gate_work; /* delayed clock gate */ | ||
300 | unsigned int clk_old; /* old clock value cache */ | ||
301 | spinlock_t clk_lock; /* lock for clk fields */ | ||
302 | struct mutex clk_gate_mutex; /* mutex for clock gating */ | ||
303 | struct device_attribute clkgate_delay_attr; | ||
304 | unsigned long clkgate_delay; | ||
305 | #endif | ||
306 | |||
307 | /* host specific block data */ | 295 | /* host specific block data */ |
308 | unsigned int max_seg_size; /* see blk_queue_max_segment_size */ | 296 | unsigned int max_seg_size; /* see blk_queue_max_segment_size */ |
309 | unsigned short max_segs; /* see blk_queue_max_segments */ | 297 | unsigned short max_segs; /* see blk_queue_max_segments */ |
@@ -423,6 +411,7 @@ int mmc_regulator_get_ocrmask(struct regulator *supply); | |||
423 | int mmc_regulator_set_ocr(struct mmc_host *mmc, | 411 | int mmc_regulator_set_ocr(struct mmc_host *mmc, |
424 | struct regulator *supply, | 412 | struct regulator *supply, |
425 | unsigned short vdd_bit); | 413 | unsigned short vdd_bit); |
414 | int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios); | ||
426 | #else | 415 | #else |
427 | static inline int mmc_regulator_get_ocrmask(struct regulator *supply) | 416 | static inline int mmc_regulator_get_ocrmask(struct regulator *supply) |
428 | { | 417 | { |
@@ -435,6 +424,12 @@ static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, | |||
435 | { | 424 | { |
436 | return 0; | 425 | return 0; |
437 | } | 426 | } |
427 | |||
428 | static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc, | ||
429 | struct mmc_ios *ios) | ||
430 | { | ||
431 | return -EINVAL; | ||
432 | } | ||
438 | #endif | 433 | #endif |
439 | 434 | ||
440 | int mmc_regulator_get_supply(struct mmc_host *mmc); | 435 | int mmc_regulator_get_supply(struct mmc_host *mmc); |
@@ -479,26 +474,6 @@ static inline int mmc_host_packed_wr(struct mmc_host *host) | |||
479 | return host->caps2 & MMC_CAP2_PACKED_WR; | 474 | return host->caps2 & MMC_CAP2_PACKED_WR; |
480 | } | 475 | } |
481 | 476 | ||
482 | #ifdef CONFIG_MMC_CLKGATE | ||
483 | void mmc_host_clk_hold(struct mmc_host *host); | ||
484 | void mmc_host_clk_release(struct mmc_host *host); | ||
485 | unsigned int mmc_host_clk_rate(struct mmc_host *host); | ||
486 | |||
487 | #else | ||
488 | static inline void mmc_host_clk_hold(struct mmc_host *host) | ||
489 | { | ||
490 | } | ||
491 | |||
492 | static inline void mmc_host_clk_release(struct mmc_host *host) | ||
493 | { | ||
494 | } | ||
495 | |||
496 | static inline unsigned int mmc_host_clk_rate(struct mmc_host *host) | ||
497 | { | ||
498 | return host->ios.clock; | ||
499 | } | ||
500 | #endif | ||
501 | |||
502 | static inline int mmc_card_hs(struct mmc_card *card) | 477 | static inline int mmc_card_hs(struct mmc_card *card) |
503 | { | 478 | { |
504 | return card->host->ios.timing == MMC_TIMING_SD_HS || | 479 | return card->host->ios.timing == MMC_TIMING_SD_HS || |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d94347737292..e23a9e704536 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -37,10 +37,10 @@ | |||
37 | 37 | ||
38 | enum { | 38 | enum { |
39 | MIGRATE_UNMOVABLE, | 39 | MIGRATE_UNMOVABLE, |
40 | MIGRATE_RECLAIMABLE, | ||
41 | MIGRATE_MOVABLE, | 40 | MIGRATE_MOVABLE, |
41 | MIGRATE_RECLAIMABLE, | ||
42 | MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ | 42 | MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ |
43 | MIGRATE_RESERVE = MIGRATE_PCPTYPES, | 43 | MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, |
44 | #ifdef CONFIG_CMA | 44 | #ifdef CONFIG_CMA |
45 | /* | 45 | /* |
46 | * MIGRATE_CMA migration type is designed to mimic the way | 46 | * MIGRATE_CMA migration type is designed to mimic the way |
@@ -334,13 +334,16 @@ struct zone { | |||
334 | /* zone watermarks, access with *_wmark_pages(zone) macros */ | 334 | /* zone watermarks, access with *_wmark_pages(zone) macros */ |
335 | unsigned long watermark[NR_WMARK]; | 335 | unsigned long watermark[NR_WMARK]; |
336 | 336 | ||
337 | unsigned long nr_reserved_highatomic; | ||
338 | |||
337 | /* | 339 | /* |
338 | * We don't know if the memory that we're going to allocate will be freeable | 340 | * We don't know if the memory that we're going to allocate will be |
339 | * or/and it will be released eventually, so to avoid totally wasting several | 341 | * freeable or/and it will be released eventually, so to avoid totally |
340 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk | 342 | * wasting several GB of ram we must reserve some of the lower zone |
341 | * to run OOM on the lower zones despite there's tons of freeable ram | 343 | * memory (otherwise we risk to run OOM on the lower zones despite |
342 | * on the higher zones). This array is recalculated at runtime if the | 344 | * there being tons of freeable ram on the higher zones). This array is |
343 | * sysctl_lowmem_reserve_ratio sysctl changes. | 345 | * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl |
346 | * changes. | ||
344 | */ | 347 | */ |
345 | long lowmem_reserve[MAX_NR_ZONES]; | 348 | long lowmem_reserve[MAX_NR_ZONES]; |
346 | 349 | ||
@@ -429,12 +432,6 @@ struct zone { | |||
429 | 432 | ||
430 | const char *name; | 433 | const char *name; |
431 | 434 | ||
432 | /* | ||
433 | * Number of MIGRATE_RESERVE page block. To maintain for just | ||
434 | * optimization. Protected by zone->lock. | ||
435 | */ | ||
436 | int nr_migrate_reserve_block; | ||
437 | |||
438 | #ifdef CONFIG_MEMORY_ISOLATION | 435 | #ifdef CONFIG_MEMORY_ISOLATION |
439 | /* | 436 | /* |
440 | * Number of isolated pageblock. It is used to solve incorrect | 437 | * Number of isolated pageblock. It is used to solve incorrect |
@@ -589,75 +586,8 @@ static inline bool zone_is_empty(struct zone *zone) | |||
589 | * [1] : No fallback (__GFP_THISNODE) | 586 | * [1] : No fallback (__GFP_THISNODE) |
590 | */ | 587 | */ |
591 | #define MAX_ZONELISTS 2 | 588 | #define MAX_ZONELISTS 2 |
592 | |||
593 | |||
594 | /* | ||
595 | * We cache key information from each zonelist for smaller cache | ||
596 | * footprint when scanning for free pages in get_page_from_freelist(). | ||
597 | * | ||
598 | * 1) The BITMAP fullzones tracks which zones in a zonelist have come | ||
599 | * up short of free memory since the last time (last_fullzone_zap) | ||
600 | * we zero'd fullzones. | ||
601 | * 2) The array z_to_n[] maps each zone in the zonelist to its node | ||
602 | * id, so that we can efficiently evaluate whether that node is | ||
603 | * set in the current tasks mems_allowed. | ||
604 | * | ||
605 | * Both fullzones and z_to_n[] are one-to-one with the zonelist, | ||
606 | * indexed by a zones offset in the zonelist zones[] array. | ||
607 | * | ||
608 | * The get_page_from_freelist() routine does two scans. During the | ||
609 | * first scan, we skip zones whose corresponding bit in 'fullzones' | ||
610 | * is set or whose corresponding node in current->mems_allowed (which | ||
611 | * comes from cpusets) is not set. During the second scan, we bypass | ||
612 | * this zonelist_cache, to ensure we look methodically at each zone. | ||
613 | * | ||
614 | * Once per second, we zero out (zap) fullzones, forcing us to | ||
615 | * reconsider nodes that might have regained more free memory. | ||
616 | * The field last_full_zap is the time we last zapped fullzones. | ||
617 | * | ||
618 | * This mechanism reduces the amount of time we waste repeatedly | ||
619 | * reexaming zones for free memory when they just came up low on | ||
620 | * memory momentarilly ago. | ||
621 | * | ||
622 | * The zonelist_cache struct members logically belong in struct | ||
623 | * zonelist. However, the mempolicy zonelists constructed for | ||
624 | * MPOL_BIND are intentionally variable length (and usually much | ||
625 | * shorter). A general purpose mechanism for handling structs with | ||
626 | * multiple variable length members is more mechanism than we want | ||
627 | * here. We resort to some special case hackery instead. | ||
628 | * | ||
629 | * The MPOL_BIND zonelists don't need this zonelist_cache (in good | ||
630 | * part because they are shorter), so we put the fixed length stuff | ||
631 | * at the front of the zonelist struct, ending in a variable length | ||
632 | * zones[], as is needed by MPOL_BIND. | ||
633 | * | ||
634 | * Then we put the optional zonelist cache on the end of the zonelist | ||
635 | * struct. This optional stuff is found by a 'zlcache_ptr' pointer in | ||
636 | * the fixed length portion at the front of the struct. This pointer | ||
637 | * both enables us to find the zonelist cache, and in the case of | ||
638 | * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) | ||
639 | * to know that the zonelist cache is not there. | ||
640 | * | ||
641 | * The end result is that struct zonelists come in two flavors: | ||
642 | * 1) The full, fixed length version, shown below, and | ||
643 | * 2) The custom zonelists for MPOL_BIND. | ||
644 | * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. | ||
645 | * | ||
646 | * Even though there may be multiple CPU cores on a node modifying | ||
647 | * fullzones or last_full_zap in the same zonelist_cache at the same | ||
648 | * time, we don't lock it. This is just hint data - if it is wrong now | ||
649 | * and then, the allocator will still function, perhaps a bit slower. | ||
650 | */ | ||
651 | |||
652 | |||
653 | struct zonelist_cache { | ||
654 | unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */ | ||
655 | DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */ | ||
656 | unsigned long last_full_zap; /* when last zap'd (jiffies) */ | ||
657 | }; | ||
658 | #else | 589 | #else |
659 | #define MAX_ZONELISTS 1 | 590 | #define MAX_ZONELISTS 1 |
660 | struct zonelist_cache; | ||
661 | #endif | 591 | #endif |
662 | 592 | ||
663 | /* | 593 | /* |
@@ -675,9 +605,6 @@ struct zoneref { | |||
675 | * allocation, the other zones are fallback zones, in decreasing | 605 | * allocation, the other zones are fallback zones, in decreasing |
676 | * priority. | 606 | * priority. |
677 | * | 607 | * |
678 | * If zlcache_ptr is not NULL, then it is just the address of zlcache, | ||
679 | * as explained above. If zlcache_ptr is NULL, there is no zlcache. | ||
680 | * * | ||
681 | * To speed the reading of the zonelist, the zonerefs contain the zone index | 608 | * To speed the reading of the zonelist, the zonerefs contain the zone index |
682 | * of the entry being read. Helper functions to access information given | 609 | * of the entry being read. Helper functions to access information given |
683 | * a struct zoneref are | 610 | * a struct zoneref are |
@@ -687,11 +614,7 @@ struct zoneref { | |||
687 | * zonelist_node_idx() - Return the index of the node for an entry | 614 | * zonelist_node_idx() - Return the index of the node for an entry |
688 | */ | 615 | */ |
689 | struct zonelist { | 616 | struct zonelist { |
690 | struct zonelist_cache *zlcache_ptr; // NULL or &zlcache | ||
691 | struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; | 617 | struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; |
692 | #ifdef CONFIG_NUMA | ||
693 | struct zonelist_cache zlcache; // optional ... | ||
694 | #endif | ||
695 | }; | 618 | }; |
696 | 619 | ||
697 | #ifndef CONFIG_DISCONTIGMEM | 620 | #ifndef CONFIG_DISCONTIGMEM |
@@ -817,14 +740,13 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); | |||
817 | bool zone_watermark_ok(struct zone *z, unsigned int order, | 740 | bool zone_watermark_ok(struct zone *z, unsigned int order, |
818 | unsigned long mark, int classzone_idx, int alloc_flags); | 741 | unsigned long mark, int classzone_idx, int alloc_flags); |
819 | bool zone_watermark_ok_safe(struct zone *z, unsigned int order, | 742 | bool zone_watermark_ok_safe(struct zone *z, unsigned int order, |
820 | unsigned long mark, int classzone_idx, int alloc_flags); | 743 | unsigned long mark, int classzone_idx); |
821 | enum memmap_context { | 744 | enum memmap_context { |
822 | MEMMAP_EARLY, | 745 | MEMMAP_EARLY, |
823 | MEMMAP_HOTPLUG, | 746 | MEMMAP_HOTPLUG, |
824 | }; | 747 | }; |
825 | extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, | 748 | extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, |
826 | unsigned long size, | 749 | unsigned long size); |
827 | enum memmap_context context); | ||
828 | 750 | ||
829 | extern void lruvec_init(struct lruvec *lruvec); | 751 | extern void lruvec_init(struct lruvec *lruvec); |
830 | 752 | ||
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 688997a24aad..64f36e09a790 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
@@ -219,6 +219,14 @@ struct serio_device_id { | |||
219 | __u8 proto; | 219 | __u8 proto; |
220 | }; | 220 | }; |
221 | 221 | ||
222 | struct hda_device_id { | ||
223 | __u32 vendor_id; | ||
224 | __u32 rev_id; | ||
225 | __u8 api_version; | ||
226 | const char *name; | ||
227 | unsigned long driver_data; | ||
228 | }; | ||
229 | |||
222 | /* | 230 | /* |
223 | * Struct used for matching a device | 231 | * Struct used for matching a device |
224 | */ | 232 | */ |
@@ -601,15 +609,13 @@ struct ipack_device_id { | |||
601 | 609 | ||
602 | #define MEI_CL_MODULE_PREFIX "mei:" | 610 | #define MEI_CL_MODULE_PREFIX "mei:" |
603 | #define MEI_CL_NAME_SIZE 32 | 611 | #define MEI_CL_NAME_SIZE 32 |
604 | #define MEI_CL_UUID_FMT "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x" | 612 | #define MEI_CL_VERSION_ANY 0xff |
605 | #define MEI_CL_UUID_ARGS(_u) \ | ||
606 | _u[0], _u[1], _u[2], _u[3], _u[4], _u[5], _u[6], _u[7], \ | ||
607 | _u[8], _u[9], _u[10], _u[11], _u[12], _u[13], _u[14], _u[15] | ||
608 | 613 | ||
609 | /** | 614 | /** |
610 | * struct mei_cl_device_id - MEI client device identifier | 615 | * struct mei_cl_device_id - MEI client device identifier |
611 | * @name: helper name | 616 | * @name: helper name |
612 | * @uuid: client uuid | 617 | * @uuid: client uuid |
618 | * @version: client protocol version | ||
613 | * @driver_info: information used by the driver. | 619 | * @driver_info: information used by the driver. |
614 | * | 620 | * |
615 | * identifies mei client device by uuid and name | 621 | * identifies mei client device by uuid and name |
@@ -617,6 +623,7 @@ struct ipack_device_id { | |||
617 | struct mei_cl_device_id { | 623 | struct mei_cl_device_id { |
618 | char name[MEI_CL_NAME_SIZE]; | 624 | char name[MEI_CL_NAME_SIZE]; |
619 | uuid_le uuid; | 625 | uuid_le uuid; |
626 | __u8 version; | ||
620 | kernel_ulong_t driver_info; | 627 | kernel_ulong_t driver_info; |
621 | }; | 628 | }; |
622 | 629 | ||
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index c12f2147c350..52666d90ca94 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h | |||
@@ -386,6 +386,7 @@ extern int param_get_ullong(char *buffer, const struct kernel_param *kp); | |||
386 | extern const struct kernel_param_ops param_ops_charp; | 386 | extern const struct kernel_param_ops param_ops_charp; |
387 | extern int param_set_charp(const char *val, const struct kernel_param *kp); | 387 | extern int param_set_charp(const char *val, const struct kernel_param *kp); |
388 | extern int param_get_charp(char *buffer, const struct kernel_param *kp); | 388 | extern int param_get_charp(char *buffer, const struct kernel_param *kp); |
389 | extern void param_free_charp(void *arg); | ||
389 | #define param_check_charp(name, p) __param_check(name, p, char *) | 390 | #define param_check_charp(name, p) __param_check(name, p, char *) |
390 | 391 | ||
391 | /* We used to allow int as well as bool. We're taking that away! */ | 392 | /* We used to allow int as well as bool. We're taking that away! */ |
diff --git a/include/linux/mpi.h b/include/linux/mpi.h index 641b7d6fd096..3a5abe95affd 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h | |||
@@ -31,12 +31,7 @@ | |||
31 | #define G10_MPI_H | 31 | #define G10_MPI_H |
32 | 32 | ||
33 | #include <linux/types.h> | 33 | #include <linux/types.h> |
34 | 34 | #include <linux/scatterlist.h> | |
35 | /* DSI defines */ | ||
36 | |||
37 | #define SHA1_DIGEST_LENGTH 20 | ||
38 | |||
39 | /*end of DSI defines */ | ||
40 | 35 | ||
41 | #define BYTES_PER_MPI_LIMB (BITS_PER_LONG / 8) | 36 | #define BYTES_PER_MPI_LIMB (BITS_PER_LONG / 8) |
42 | #define BITS_PER_MPI_LIMB BITS_PER_LONG | 37 | #define BITS_PER_MPI_LIMB BITS_PER_LONG |
@@ -78,6 +73,7 @@ void mpi_swap(MPI a, MPI b); | |||
78 | MPI do_encode_md(const void *sha_buffer, unsigned nbits); | 73 | MPI do_encode_md(const void *sha_buffer, unsigned nbits); |
79 | MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes); | 74 | MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes); |
80 | MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread); | 75 | MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread); |
76 | MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len); | ||
81 | int mpi_fromstr(MPI val, const char *str); | 77 | int mpi_fromstr(MPI val, const char *str); |
82 | u32 mpi_get_keyid(MPI a, u32 *keyid); | 78 | u32 mpi_get_keyid(MPI a, u32 *keyid); |
83 | void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); | 79 | void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); |
@@ -85,6 +81,8 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, | |||
85 | int *sign); | 81 | int *sign); |
86 | void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); | 82 | void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); |
87 | int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign); | 83 | int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign); |
84 | int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned *nbytes, | ||
85 | int *sign); | ||
88 | 86 | ||
89 | #define log_mpidump g10_log_mpidump | 87 | #define log_mpidump g10_log_mpidump |
90 | 88 | ||
diff --git a/include/linux/msi.h b/include/linux/msi.h index ad939d0ba816..f71a25e5fd25 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
@@ -163,6 +163,8 @@ struct msi_controller { | |||
163 | 163 | ||
164 | int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, | 164 | int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, |
165 | struct msi_desc *desc); | 165 | struct msi_desc *desc); |
166 | int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev, | ||
167 | int nvec, int type); | ||
166 | void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); | 168 | void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); |
167 | }; | 169 | }; |
168 | 170 | ||
@@ -174,6 +176,7 @@ struct msi_controller { | |||
174 | struct irq_domain; | 176 | struct irq_domain; |
175 | struct irq_chip; | 177 | struct irq_chip; |
176 | struct device_node; | 178 | struct device_node; |
179 | struct fwnode_handle; | ||
177 | struct msi_domain_info; | 180 | struct msi_domain_info; |
178 | 181 | ||
179 | /** | 182 | /** |
@@ -262,7 +265,7 @@ enum { | |||
262 | int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, | 265 | int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, |
263 | bool force); | 266 | bool force); |
264 | 267 | ||
265 | struct irq_domain *msi_create_irq_domain(struct device_node *of_node, | 268 | struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, |
266 | struct msi_domain_info *info, | 269 | struct msi_domain_info *info, |
267 | struct irq_domain *parent); | 270 | struct irq_domain *parent); |
268 | int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, | 271 | int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, |
@@ -270,7 +273,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, | |||
270 | void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); | 273 | void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); |
271 | struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); | 274 | struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); |
272 | 275 | ||
273 | struct irq_domain *platform_msi_create_irq_domain(struct device_node *np, | 276 | struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, |
274 | struct msi_domain_info *info, | 277 | struct msi_domain_info *info, |
275 | struct irq_domain *parent); | 278 | struct irq_domain *parent); |
276 | int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, | 279 | int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, |
@@ -280,19 +283,26 @@ void platform_msi_domain_free_irqs(struct device *dev); | |||
280 | 283 | ||
281 | #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN | 284 | #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN |
282 | void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); | 285 | void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); |
283 | struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, | 286 | struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, |
284 | struct msi_domain_info *info, | 287 | struct msi_domain_info *info, |
285 | struct irq_domain *parent); | 288 | struct irq_domain *parent); |
286 | int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, | 289 | int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, |
287 | int nvec, int type); | 290 | int nvec, int type); |
288 | void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); | 291 | void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); |
289 | struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, | 292 | struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode, |
290 | struct msi_domain_info *info, struct irq_domain *parent); | 293 | struct msi_domain_info *info, struct irq_domain *parent); |
291 | 294 | ||
292 | irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, | 295 | irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, |
293 | struct msi_desc *desc); | 296 | struct msi_desc *desc); |
294 | int pci_msi_domain_check_cap(struct irq_domain *domain, | 297 | int pci_msi_domain_check_cap(struct irq_domain *domain, |
295 | struct msi_domain_info *info, struct device *dev); | 298 | struct msi_domain_info *info, struct device *dev); |
299 | u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); | ||
300 | struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); | ||
301 | #else | ||
302 | static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) | ||
303 | { | ||
304 | return NULL; | ||
305 | } | ||
296 | #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ | 306 | #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ |
297 | 307 | ||
298 | #endif /* LINUX_MSI_H */ | 308 | #endif /* LINUX_MSI_H */ |
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h deleted file mode 100644 index fe722c1fb61d..000000000000 --- a/include/linux/msm_mdp.h +++ /dev/null | |||
@@ -1,79 +0,0 @@ | |||
1 | /* include/linux/msm_mdp.h | ||
2 | * | ||
3 | * Copyright (C) 2007 Google Incorporated | ||
4 | * | ||
5 | * This software is licensed under the terms of the GNU General Public | ||
6 | * License version 2, as published by the Free Software Foundation, and | ||
7 | * may be copied, distributed, and modified under those terms. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | #ifndef _MSM_MDP_H_ | ||
15 | #define _MSM_MDP_H_ | ||
16 | |||
17 | #include <linux/types.h> | ||
18 | |||
19 | #define MSMFB_IOCTL_MAGIC 'm' | ||
20 | #define MSMFB_GRP_DISP _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int) | ||
21 | #define MSMFB_BLIT _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int) | ||
22 | |||
23 | enum { | ||
24 | MDP_RGB_565, /* RGB 565 planar */ | ||
25 | MDP_XRGB_8888, /* RGB 888 padded */ | ||
26 | MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planar w/ Cb is in MSB */ | ||
27 | MDP_ARGB_8888, /* ARGB 888 */ | ||
28 | MDP_RGB_888, /* RGB 888 planar */ | ||
29 | MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planar w/ Cr is in MSB */ | ||
30 | MDP_YCRYCB_H2V1, /* YCrYCb interleave */ | ||
31 | MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */ | ||
32 | MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */ | ||
33 | MDP_RGBA_8888, /* ARGB 888 */ | ||
34 | MDP_BGRA_8888, /* ABGR 888 */ | ||
35 | MDP_RGBX_8888, /* RGBX 888 */ | ||
36 | MDP_IMGTYPE_LIMIT /* Non valid image type after this enum */ | ||
37 | }; | ||
38 | |||
39 | enum { | ||
40 | PMEM_IMG, | ||
41 | FB_IMG, | ||
42 | }; | ||
43 | |||
44 | /* flag values */ | ||
45 | #define MDP_ROT_NOP 0 | ||
46 | #define MDP_FLIP_LR 0x1 | ||
47 | #define MDP_FLIP_UD 0x2 | ||
48 | #define MDP_ROT_90 0x4 | ||
49 | #define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR) | ||
50 | #define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR) | ||
51 | #define MDP_DITHER 0x8 | ||
52 | #define MDP_BLUR 0x10 | ||
53 | |||
54 | #define MDP_TRANSP_NOP 0xffffffff | ||
55 | #define MDP_ALPHA_NOP 0xff | ||
56 | |||
57 | struct mdp_rect { | ||
58 | u32 x, y, w, h; | ||
59 | }; | ||
60 | |||
61 | struct mdp_img { | ||
62 | u32 width, height, format, offset; | ||
63 | int memory_id; /* the file descriptor */ | ||
64 | }; | ||
65 | |||
66 | struct mdp_blit_req { | ||
67 | struct mdp_img src; | ||
68 | struct mdp_img dst; | ||
69 | struct mdp_rect src_rect; | ||
70 | struct mdp_rect dst_rect; | ||
71 | u32 alpha, transp_mask, flags; | ||
72 | }; | ||
73 | |||
74 | struct mdp_blit_req_list { | ||
75 | u32 count; | ||
76 | struct mdp_blit_req req[]; | ||
77 | }; | ||
78 | |||
79 | #endif /* _MSM_MDP_H_ */ | ||
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 272f42952f34..5a9d1d4c2487 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
@@ -504,16 +504,16 @@ struct nand_ecc_ctrl { | |||
504 | int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, | 504 | int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, |
505 | uint8_t *buf, int oob_required, int page); | 505 | uint8_t *buf, int oob_required, int page); |
506 | int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, | 506 | int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, |
507 | const uint8_t *buf, int oob_required); | 507 | const uint8_t *buf, int oob_required, int page); |
508 | int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, | 508 | int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, |
509 | uint8_t *buf, int oob_required, int page); | 509 | uint8_t *buf, int oob_required, int page); |
510 | int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, | 510 | int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, |
511 | uint32_t offs, uint32_t len, uint8_t *buf, int page); | 511 | uint32_t offs, uint32_t len, uint8_t *buf, int page); |
512 | int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip, | 512 | int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip, |
513 | uint32_t offset, uint32_t data_len, | 513 | uint32_t offset, uint32_t data_len, |
514 | const uint8_t *data_buf, int oob_required); | 514 | const uint8_t *data_buf, int oob_required, int page); |
515 | int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, | 515 | int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, |
516 | const uint8_t *buf, int oob_required); | 516 | const uint8_t *buf, int oob_required, int page); |
517 | int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, | 517 | int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, |
518 | int page); | 518 | int page); |
519 | int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, | 519 | int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, |
@@ -544,7 +544,7 @@ struct nand_buffers { | |||
544 | * flash device | 544 | * flash device |
545 | * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the | 545 | * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the |
546 | * flash device. | 546 | * flash device. |
547 | * @dn: [BOARDSPECIFIC] device node describing this instance | 547 | * @flash_node: [BOARDSPECIFIC] device node describing this instance |
548 | * @read_byte: [REPLACEABLE] read one byte from the chip | 548 | * @read_byte: [REPLACEABLE] read one byte from the chip |
549 | * @read_word: [REPLACEABLE] read one word from the chip | 549 | * @read_word: [REPLACEABLE] read one word from the chip |
550 | * @write_byte: [REPLACEABLE] write a single byte to the chip on the | 550 | * @write_byte: [REPLACEABLE] write a single byte to the chip on the |
@@ -556,10 +556,6 @@ struct nand_buffers { | |||
556 | * @block_markbad: [REPLACEABLE] mark a block bad | 556 | * @block_markbad: [REPLACEABLE] mark a block bad |
557 | * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling | 557 | * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling |
558 | * ALE/CLE/nCE. Also used to write command and address | 558 | * ALE/CLE/nCE. Also used to write command and address |
559 | * @init_size: [BOARDSPECIFIC] hardwarespecific function for setting | ||
560 | * mtd->oobsize, mtd->writesize and so on. | ||
561 | * @id_data contains the 8 bytes values of NAND_CMD_READID. | ||
562 | * Return with the bus width. | ||
563 | * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing | 559 | * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing |
564 | * device ready/busy line. If set to NULL no access to | 560 | * device ready/busy line. If set to NULL no access to |
565 | * ready/busy is available and the ready/busy information | 561 | * ready/busy is available and the ready/busy information |
@@ -647,7 +643,7 @@ struct nand_chip { | |||
647 | void __iomem *IO_ADDR_R; | 643 | void __iomem *IO_ADDR_R; |
648 | void __iomem *IO_ADDR_W; | 644 | void __iomem *IO_ADDR_W; |
649 | 645 | ||
650 | struct device_node *dn; | 646 | struct device_node *flash_node; |
651 | 647 | ||
652 | uint8_t (*read_byte)(struct mtd_info *mtd); | 648 | uint8_t (*read_byte)(struct mtd_info *mtd); |
653 | u16 (*read_word)(struct mtd_info *mtd); | 649 | u16 (*read_word)(struct mtd_info *mtd); |
@@ -658,8 +654,6 @@ struct nand_chip { | |||
658 | int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip); | 654 | int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip); |
659 | int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); | 655 | int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); |
660 | void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); | 656 | void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); |
661 | int (*init_size)(struct mtd_info *mtd, struct nand_chip *this, | ||
662 | u8 *id_data); | ||
663 | int (*dev_ready)(struct mtd_info *mtd); | 657 | int (*dev_ready)(struct mtd_info *mtd); |
664 | void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, | 658 | void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, |
665 | int page_addr); | 659 | int page_addr); |
@@ -1030,4 +1024,9 @@ struct nand_sdr_timings { | |||
1030 | 1024 | ||
1031 | /* get timing characteristics from ONFI timing mode. */ | 1025 | /* get timing characteristics from ONFI timing mode. */ |
1032 | const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode); | 1026 | const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode); |
1027 | |||
1028 | int nand_check_erased_ecc_chunk(void *data, int datalen, | ||
1029 | void *ecc, int ecclen, | ||
1030 | void *extraoob, int extraooblen, | ||
1031 | int threshold); | ||
1033 | #endif /* __LINUX_MTD_NAND_H */ | 1032 | #endif /* __LINUX_MTD_NAND_H */ |
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index e5409524bb0a..c8723b62c4cd 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h | |||
@@ -10,6 +10,23 @@ | |||
10 | #ifndef __LINUX_MTD_SPI_NOR_H | 10 | #ifndef __LINUX_MTD_SPI_NOR_H |
11 | #define __LINUX_MTD_SPI_NOR_H | 11 | #define __LINUX_MTD_SPI_NOR_H |
12 | 12 | ||
13 | #include <linux/bitops.h> | ||
14 | #include <linux/mtd/cfi.h> | ||
15 | |||
16 | /* | ||
17 | * Manufacturer IDs | ||
18 | * | ||
19 | * The first byte returned from the flash after sending opcode SPINOR_OP_RDID. | ||
20 | * Sometimes these are the same as CFI IDs, but sometimes they aren't. | ||
21 | */ | ||
22 | #define SNOR_MFR_ATMEL CFI_MFR_ATMEL | ||
23 | #define SNOR_MFR_INTEL CFI_MFR_INTEL | ||
24 | #define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */ | ||
25 | #define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX | ||
26 | #define SNOR_MFR_SPANSION CFI_MFR_AMD | ||
27 | #define SNOR_MFR_SST CFI_MFR_SST | ||
28 | #define SNOR_MFR_WINBOND 0xef | ||
29 | |||
13 | /* | 30 | /* |
14 | * Note on opcode nomenclature: some opcodes have a format like | 31 | * Note on opcode nomenclature: some opcodes have a format like |
15 | * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number | 32 | * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number |
@@ -61,24 +78,24 @@ | |||
61 | #define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */ | 78 | #define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */ |
62 | 79 | ||
63 | /* Status Register bits. */ | 80 | /* Status Register bits. */ |
64 | #define SR_WIP 1 /* Write in progress */ | 81 | #define SR_WIP BIT(0) /* Write in progress */ |
65 | #define SR_WEL 2 /* Write enable latch */ | 82 | #define SR_WEL BIT(1) /* Write enable latch */ |
66 | /* meaning of other SR_* bits may differ between vendors */ | 83 | /* meaning of other SR_* bits may differ between vendors */ |
67 | #define SR_BP0 4 /* Block protect 0 */ | 84 | #define SR_BP0 BIT(2) /* Block protect 0 */ |
68 | #define SR_BP1 8 /* Block protect 1 */ | 85 | #define SR_BP1 BIT(3) /* Block protect 1 */ |
69 | #define SR_BP2 0x10 /* Block protect 2 */ | 86 | #define SR_BP2 BIT(4) /* Block protect 2 */ |
70 | #define SR_SRWD 0x80 /* SR write protect */ | 87 | #define SR_SRWD BIT(7) /* SR write protect */ |
71 | 88 | ||
72 | #define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */ | 89 | #define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */ |
73 | 90 | ||
74 | /* Enhanced Volatile Configuration Register bits */ | 91 | /* Enhanced Volatile Configuration Register bits */ |
75 | #define EVCR_QUAD_EN_MICRON 0x80 /* Micron Quad I/O */ | 92 | #define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */ |
76 | 93 | ||
77 | /* Flag Status Register bits */ | 94 | /* Flag Status Register bits */ |
78 | #define FSR_READY 0x80 | 95 | #define FSR_READY BIT(7) |
79 | 96 | ||
80 | /* Configuration Register bits. */ | 97 | /* Configuration Register bits. */ |
81 | #define CR_QUAD_EN_SPAN 0x2 /* Spansion Quad I/O */ | 98 | #define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */ |
82 | 99 | ||
83 | enum read_mode { | 100 | enum read_mode { |
84 | SPI_NOR_NORMAL = 0, | 101 | SPI_NOR_NORMAL = 0, |
@@ -87,33 +104,6 @@ enum read_mode { | |||
87 | SPI_NOR_QUAD, | 104 | SPI_NOR_QUAD, |
88 | }; | 105 | }; |
89 | 106 | ||
90 | /** | ||
91 | * struct spi_nor_xfer_cfg - Structure for defining a Serial Flash transfer | ||
92 | * @wren: command for "Write Enable", or 0x00 for not required | ||
93 | * @cmd: command for operation | ||
94 | * @cmd_pins: number of pins to send @cmd (1, 2, 4) | ||
95 | * @addr: address for operation | ||
96 | * @addr_pins: number of pins to send @addr (1, 2, 4) | ||
97 | * @addr_width: number of address bytes | ||
98 | * (3,4, or 0 for address not required) | ||
99 | * @mode: mode data | ||
100 | * @mode_pins: number of pins to send @mode (1, 2, 4) | ||
101 | * @mode_cycles: number of mode cycles (0 for mode not required) | ||
102 | * @dummy_cycles: number of dummy cycles (0 for dummy not required) | ||
103 | */ | ||
104 | struct spi_nor_xfer_cfg { | ||
105 | u8 wren; | ||
106 | u8 cmd; | ||
107 | u8 cmd_pins; | ||
108 | u32 addr; | ||
109 | u8 addr_pins; | ||
110 | u8 addr_width; | ||
111 | u8 mode; | ||
112 | u8 mode_pins; | ||
113 | u8 mode_cycles; | ||
114 | u8 dummy_cycles; | ||
115 | }; | ||
116 | |||
117 | #define SPI_NOR_MAX_CMD_SIZE 8 | 107 | #define SPI_NOR_MAX_CMD_SIZE 8 |
118 | enum spi_nor_ops { | 108 | enum spi_nor_ops { |
119 | SPI_NOR_OPS_READ = 0, | 109 | SPI_NOR_OPS_READ = 0, |
@@ -127,11 +117,14 @@ enum spi_nor_option_flags { | |||
127 | SNOR_F_USE_FSR = BIT(0), | 117 | SNOR_F_USE_FSR = BIT(0), |
128 | }; | 118 | }; |
129 | 119 | ||
120 | struct mtd_info; | ||
121 | |||
130 | /** | 122 | /** |
131 | * struct spi_nor - Structure for defining a the SPI NOR layer | 123 | * struct spi_nor - Structure for defining a the SPI NOR layer |
132 | * @mtd: point to a mtd_info structure | 124 | * @mtd: point to a mtd_info structure |
133 | * @lock: the lock for the read/write/erase/lock/unlock operations | 125 | * @lock: the lock for the read/write/erase/lock/unlock operations |
134 | * @dev: point to a spi device, or a spi nor controller device. | 126 | * @dev: point to a spi device, or a spi nor controller device. |
127 | * @flash_node: point to a device node describing this flash instance. | ||
135 | * @page_size: the page size of the SPI NOR | 128 | * @page_size: the page size of the SPI NOR |
136 | * @addr_width: number of address bytes | 129 | * @addr_width: number of address bytes |
137 | * @erase_opcode: the opcode for erasing a sector | 130 | * @erase_opcode: the opcode for erasing a sector |
@@ -141,28 +134,28 @@ enum spi_nor_option_flags { | |||
141 | * @flash_read: the mode of the read | 134 | * @flash_read: the mode of the read |
142 | * @sst_write_second: used by the SST write operation | 135 | * @sst_write_second: used by the SST write operation |
143 | * @flags: flag options for the current SPI-NOR (SNOR_F_*) | 136 | * @flags: flag options for the current SPI-NOR (SNOR_F_*) |
144 | * @cfg: used by the read_xfer/write_xfer | ||
145 | * @cmd_buf: used by the write_reg | 137 | * @cmd_buf: used by the write_reg |
146 | * @prepare: [OPTIONAL] do some preparations for the | 138 | * @prepare: [OPTIONAL] do some preparations for the |
147 | * read/write/erase/lock/unlock operations | 139 | * read/write/erase/lock/unlock operations |
148 | * @unprepare: [OPTIONAL] do some post work after the | 140 | * @unprepare: [OPTIONAL] do some post work after the |
149 | * read/write/erase/lock/unlock operations | 141 | * read/write/erase/lock/unlock operations |
150 | * @read_xfer: [OPTIONAL] the read fundamental primitive | ||
151 | * @write_xfer: [OPTIONAL] the writefundamental primitive | ||
152 | * @read_reg: [DRIVER-SPECIFIC] read out the register | 142 | * @read_reg: [DRIVER-SPECIFIC] read out the register |
153 | * @write_reg: [DRIVER-SPECIFIC] write data to the register | 143 | * @write_reg: [DRIVER-SPECIFIC] write data to the register |
154 | * @read: [DRIVER-SPECIFIC] read data from the SPI NOR | 144 | * @read: [DRIVER-SPECIFIC] read data from the SPI NOR |
155 | * @write: [DRIVER-SPECIFIC] write data to the SPI NOR | 145 | * @write: [DRIVER-SPECIFIC] write data to the SPI NOR |
156 | * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR | 146 | * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR |
157 | * at the offset @offs | 147 | * at the offset @offs |
158 | * @lock: [FLASH-SPECIFIC] lock a region of the SPI NOR | 148 | * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR |
159 | * @unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR | 149 | * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR |
150 | * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is | ||
151 | * completely locked | ||
160 | * @priv: the private data | 152 | * @priv: the private data |
161 | */ | 153 | */ |
162 | struct spi_nor { | 154 | struct spi_nor { |
163 | struct mtd_info *mtd; | 155 | struct mtd_info mtd; |
164 | struct mutex lock; | 156 | struct mutex lock; |
165 | struct device *dev; | 157 | struct device *dev; |
158 | struct device_node *flash_node; | ||
166 | u32 page_size; | 159 | u32 page_size; |
167 | u8 addr_width; | 160 | u8 addr_width; |
168 | u8 erase_opcode; | 161 | u8 erase_opcode; |
@@ -172,18 +165,12 @@ struct spi_nor { | |||
172 | enum read_mode flash_read; | 165 | enum read_mode flash_read; |
173 | bool sst_write_second; | 166 | bool sst_write_second; |
174 | u32 flags; | 167 | u32 flags; |
175 | struct spi_nor_xfer_cfg cfg; | ||
176 | u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; | 168 | u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; |
177 | 169 | ||
178 | int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); | 170 | int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); |
179 | void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); | 171 | void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); |
180 | int (*read_xfer)(struct spi_nor *nor, struct spi_nor_xfer_cfg *cfg, | ||
181 | u8 *buf, size_t len); | ||
182 | int (*write_xfer)(struct spi_nor *nor, struct spi_nor_xfer_cfg *cfg, | ||
183 | u8 *buf, size_t len); | ||
184 | int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); | 172 | int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); |
185 | int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len, | 173 | int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); |
186 | int write_enable); | ||
187 | 174 | ||
188 | int (*read)(struct spi_nor *nor, loff_t from, | 175 | int (*read)(struct spi_nor *nor, loff_t from, |
189 | size_t len, size_t *retlen, u_char *read_buf); | 176 | size_t len, size_t *retlen, u_char *read_buf); |
@@ -193,6 +180,7 @@ struct spi_nor { | |||
193 | 180 | ||
194 | int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); | 181 | int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); |
195 | int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); | 182 | int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); |
183 | int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); | ||
196 | 184 | ||
197 | void *priv; | 185 | void *priv; |
198 | }; | 186 | }; |
diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h index 5d0b2a1dee69..90a803aa42e8 100644 --- a/include/linux/n_r3964.h +++ b/include/linux/n_r3964.h | |||
@@ -152,9 +152,6 @@ struct r3964_info { | |||
152 | unsigned char *rx_buf; /* ring buffer */ | 152 | unsigned char *rx_buf; /* ring buffer */ |
153 | unsigned char *tx_buf; | 153 | unsigned char *tx_buf; |
154 | 154 | ||
155 | wait_queue_head_t read_wait; | ||
156 | //struct wait_queue *read_wait; | ||
157 | |||
158 | struct r3964_block_header *rx_first; | 155 | struct r3964_block_header *rx_first; |
159 | struct r3964_block_header *rx_last; | 156 | struct r3964_block_header *rx_last; |
160 | struct r3964_block_header *tx_first; | 157 | struct r3964_block_header *tx_first; |
@@ -164,8 +161,9 @@ struct r3964_info { | |||
164 | unsigned char last_rx; | 161 | unsigned char last_rx; |
165 | unsigned char bcc; | 162 | unsigned char bcc; |
166 | unsigned int blocks_in_rx_queue; | 163 | unsigned int blocks_in_rx_queue; |
167 | 164 | ||
168 | 165 | struct mutex read_lock; /* serialize r3964_read */ | |
166 | |||
169 | struct r3964_client_info *firstClient; | 167 | struct r3964_client_info *firstClient; |
170 | unsigned int state; | 168 | unsigned int state; |
171 | unsigned int flags; | 169 | unsigned int flags; |
diff --git a/include/linux/net.h b/include/linux/net.h index 049d4b03c4c4..70ac5e28e6b7 100644 --- a/include/linux/net.h +++ b/include/linux/net.h | |||
@@ -24,7 +24,8 @@ | |||
24 | #include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */ | 24 | #include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */ |
25 | #include <linux/kmemcheck.h> | 25 | #include <linux/kmemcheck.h> |
26 | #include <linux/rcupdate.h> | 26 | #include <linux/rcupdate.h> |
27 | #include <linux/jump_label.h> | 27 | #include <linux/once.h> |
28 | |||
28 | #include <uapi/linux/net.h> | 29 | #include <uapi/linux/net.h> |
29 | 30 | ||
30 | struct poll_table_struct; | 31 | struct poll_table_struct; |
@@ -250,22 +251,8 @@ do { \ | |||
250 | } while (0) | 251 | } while (0) |
251 | #endif | 252 | #endif |
252 | 253 | ||
253 | bool __net_get_random_once(void *buf, int nbytes, bool *done, | 254 | #define net_get_random_once(buf, nbytes) \ |
254 | struct static_key *done_key); | 255 | get_random_once((buf), (nbytes)) |
255 | |||
256 | #define net_get_random_once(buf, nbytes) \ | ||
257 | ({ \ | ||
258 | bool ___ret = false; \ | ||
259 | static bool ___done = false; \ | ||
260 | static struct static_key ___once_key = \ | ||
261 | STATIC_KEY_INIT_TRUE; \ | ||
262 | if (static_key_true(&___once_key)) \ | ||
263 | ___ret = __net_get_random_once(buf, \ | ||
264 | nbytes, \ | ||
265 | &___done, \ | ||
266 | &___once_key); \ | ||
267 | ___ret; \ | ||
268 | }) | ||
269 | 256 | ||
270 | int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, | 257 | int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, |
271 | size_t num, size_t len); | 258 | size_t num, size_t len); |
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 9672781c593d..f0d87347df19 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h | |||
@@ -125,6 +125,9 @@ enum { | |||
125 | #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) | 125 | #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) |
126 | #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) | 126 | #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) |
127 | 127 | ||
128 | #define for_each_netdev_feature(mask_addr, bit) \ | ||
129 | for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) | ||
130 | |||
128 | /* Features valid for ethtool to change */ | 131 | /* Features valid for ethtool to change */ |
129 | /* = all defined minus driver/device-class-related */ | 132 | /* = all defined minus driver/device-class-related */ |
130 | #define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ | 133 | #define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ |
@@ -167,6 +170,12 @@ enum { | |||
167 | */ | 170 | */ |
168 | #define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO) | 171 | #define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO) |
169 | 172 | ||
173 | /* | ||
174 | * If upper/master device has these features disabled, they must be disabled | ||
175 | * on all lower/slave devices as well. | ||
176 | */ | ||
177 | #define NETIF_F_UPPER_DISABLES NETIF_F_LRO | ||
178 | |||
170 | /* changeable features with no special hardware requirements */ | 179 | /* changeable features with no special hardware requirements */ |
171 | #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) | 180 | #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) |
172 | 181 | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2d15e3831440..67bfac1abfc1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -718,8 +718,8 @@ struct xps_map { | |||
718 | u16 queues[0]; | 718 | u16 queues[0]; |
719 | }; | 719 | }; |
720 | #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) | 720 | #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) |
721 | #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \ | 721 | #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ |
722 | / sizeof(u16)) | 722 | - sizeof(struct xps_map)) / sizeof(u16)) |
723 | 723 | ||
724 | /* | 724 | /* |
725 | * This structure holds all XPS maps for device. Maps are indexed by CPU. | 725 | * This structure holds all XPS maps for device. Maps are indexed by CPU. |
@@ -881,6 +881,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | |||
881 | * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, | 881 | * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, |
882 | * int max_tx_rate); | 882 | * int max_tx_rate); |
883 | * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); | 883 | * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); |
884 | * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); | ||
884 | * int (*ndo_get_vf_config)(struct net_device *dev, | 885 | * int (*ndo_get_vf_config)(struct net_device *dev, |
885 | * int vf, struct ifla_vf_info *ivf); | 886 | * int vf, struct ifla_vf_info *ivf); |
886 | * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); | 887 | * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); |
@@ -1054,6 +1055,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | |||
1054 | * This function is used to pass protocol port error state information | 1055 | * This function is used to pass protocol port error state information |
1055 | * to the switch driver. The switch driver can react to the proto_down | 1056 | * to the switch driver. The switch driver can react to the proto_down |
1056 | * by doing a phys down on the associated switch port. | 1057 | * by doing a phys down on the associated switch port. |
1058 | * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); | ||
1059 | * This function is used to get egress tunnel information for given skb. | ||
1060 | * This is useful for retrieving outer tunnel header parameters while | ||
1061 | * sampling packet. | ||
1057 | * | 1062 | * |
1058 | */ | 1063 | */ |
1059 | struct net_device_ops { | 1064 | struct net_device_ops { |
@@ -1109,6 +1114,8 @@ struct net_device_ops { | |||
1109 | int max_tx_rate); | 1114 | int max_tx_rate); |
1110 | int (*ndo_set_vf_spoofchk)(struct net_device *dev, | 1115 | int (*ndo_set_vf_spoofchk)(struct net_device *dev, |
1111 | int vf, bool setting); | 1116 | int vf, bool setting); |
1117 | int (*ndo_set_vf_trust)(struct net_device *dev, | ||
1118 | int vf, bool setting); | ||
1112 | int (*ndo_get_vf_config)(struct net_device *dev, | 1119 | int (*ndo_get_vf_config)(struct net_device *dev, |
1113 | int vf, | 1120 | int vf, |
1114 | struct ifla_vf_info *ivf); | 1121 | struct ifla_vf_info *ivf); |
@@ -1227,6 +1234,8 @@ struct net_device_ops { | |||
1227 | int (*ndo_get_iflink)(const struct net_device *dev); | 1234 | int (*ndo_get_iflink)(const struct net_device *dev); |
1228 | int (*ndo_change_proto_down)(struct net_device *dev, | 1235 | int (*ndo_change_proto_down)(struct net_device *dev, |
1229 | bool proto_down); | 1236 | bool proto_down); |
1237 | int (*ndo_fill_metadata_dst)(struct net_device *dev, | ||
1238 | struct sk_buff *skb); | ||
1230 | }; | 1239 | }; |
1231 | 1240 | ||
1232 | /** | 1241 | /** |
@@ -1258,9 +1267,10 @@ struct net_device_ops { | |||
1258 | * @IFF_LIVE_ADDR_CHANGE: device supports hardware address | 1267 | * @IFF_LIVE_ADDR_CHANGE: device supports hardware address |
1259 | * change when it's running | 1268 | * change when it's running |
1260 | * @IFF_MACVLAN: Macvlan device | 1269 | * @IFF_MACVLAN: Macvlan device |
1261 | * @IFF_VRF_MASTER: device is a VRF master | 1270 | * @IFF_L3MDEV_MASTER: device is an L3 master device |
1262 | * @IFF_NO_QUEUE: device can run without qdisc attached | 1271 | * @IFF_NO_QUEUE: device can run without qdisc attached |
1263 | * @IFF_OPENVSWITCH: device is a Open vSwitch master | 1272 | * @IFF_OPENVSWITCH: device is a Open vSwitch master |
1273 | * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device | ||
1264 | */ | 1274 | */ |
1265 | enum netdev_priv_flags { | 1275 | enum netdev_priv_flags { |
1266 | IFF_802_1Q_VLAN = 1<<0, | 1276 | IFF_802_1Q_VLAN = 1<<0, |
@@ -1283,9 +1293,10 @@ enum netdev_priv_flags { | |||
1283 | IFF_XMIT_DST_RELEASE_PERM = 1<<17, | 1293 | IFF_XMIT_DST_RELEASE_PERM = 1<<17, |
1284 | IFF_IPVLAN_MASTER = 1<<18, | 1294 | IFF_IPVLAN_MASTER = 1<<18, |
1285 | IFF_IPVLAN_SLAVE = 1<<19, | 1295 | IFF_IPVLAN_SLAVE = 1<<19, |
1286 | IFF_VRF_MASTER = 1<<20, | 1296 | IFF_L3MDEV_MASTER = 1<<20, |
1287 | IFF_NO_QUEUE = 1<<21, | 1297 | IFF_NO_QUEUE = 1<<21, |
1288 | IFF_OPENVSWITCH = 1<<22, | 1298 | IFF_OPENVSWITCH = 1<<22, |
1299 | IFF_L3MDEV_SLAVE = 1<<23, | ||
1289 | }; | 1300 | }; |
1290 | 1301 | ||
1291 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN | 1302 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN |
@@ -1308,9 +1319,10 @@ enum netdev_priv_flags { | |||
1308 | #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM | 1319 | #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM |
1309 | #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER | 1320 | #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER |
1310 | #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE | 1321 | #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE |
1311 | #define IFF_VRF_MASTER IFF_VRF_MASTER | 1322 | #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER |
1312 | #define IFF_NO_QUEUE IFF_NO_QUEUE | 1323 | #define IFF_NO_QUEUE IFF_NO_QUEUE |
1313 | #define IFF_OPENVSWITCH IFF_OPENVSWITCH | 1324 | #define IFF_OPENVSWITCH IFF_OPENVSWITCH |
1325 | #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE | ||
1314 | 1326 | ||
1315 | /** | 1327 | /** |
1316 | * struct net_device - The DEVICE structure. | 1328 | * struct net_device - The DEVICE structure. |
@@ -1427,7 +1439,6 @@ enum netdev_priv_flags { | |||
1427 | * @dn_ptr: DECnet specific data | 1439 | * @dn_ptr: DECnet specific data |
1428 | * @ip6_ptr: IPv6 specific data | 1440 | * @ip6_ptr: IPv6 specific data |
1429 | * @ax25_ptr: AX.25 specific data | 1441 | * @ax25_ptr: AX.25 specific data |
1430 | * @vrf_ptr: VRF specific data | ||
1431 | * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering | 1442 | * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering |
1432 | * | 1443 | * |
1433 | * @last_rx: Time of last Rx | 1444 | * @last_rx: Time of last Rx |
@@ -1587,6 +1598,9 @@ struct net_device { | |||
1587 | #ifdef CONFIG_NET_SWITCHDEV | 1598 | #ifdef CONFIG_NET_SWITCHDEV |
1588 | const struct switchdev_ops *switchdev_ops; | 1599 | const struct switchdev_ops *switchdev_ops; |
1589 | #endif | 1600 | #endif |
1601 | #ifdef CONFIG_NET_L3_MASTER_DEV | ||
1602 | const struct l3mdev_ops *l3mdev_ops; | ||
1603 | #endif | ||
1590 | 1604 | ||
1591 | const struct header_ops *header_ops; | 1605 | const struct header_ops *header_ops; |
1592 | 1606 | ||
@@ -1646,7 +1660,6 @@ struct net_device { | |||
1646 | struct dn_dev __rcu *dn_ptr; | 1660 | struct dn_dev __rcu *dn_ptr; |
1647 | struct inet6_dev __rcu *ip6_ptr; | 1661 | struct inet6_dev __rcu *ip6_ptr; |
1648 | void *ax25_ptr; | 1662 | void *ax25_ptr; |
1649 | struct net_vrf_dev __rcu *vrf_ptr; | ||
1650 | struct wireless_dev *ieee80211_ptr; | 1663 | struct wireless_dev *ieee80211_ptr; |
1651 | struct wpan_dev *ieee802154_ptr; | 1664 | struct wpan_dev *ieee802154_ptr; |
1652 | #if IS_ENABLED(CONFIG_MPLS_ROUTING) | 1665 | #if IS_ENABLED(CONFIG_MPLS_ROUTING) |
@@ -2055,20 +2068,23 @@ struct pcpu_sw_netstats { | |||
2055 | struct u64_stats_sync syncp; | 2068 | struct u64_stats_sync syncp; |
2056 | }; | 2069 | }; |
2057 | 2070 | ||
2058 | #define netdev_alloc_pcpu_stats(type) \ | 2071 | #define __netdev_alloc_pcpu_stats(type, gfp) \ |
2059 | ({ \ | 2072 | ({ \ |
2060 | typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \ | 2073 | typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ |
2061 | if (pcpu_stats) { \ | 2074 | if (pcpu_stats) { \ |
2062 | int __cpu; \ | 2075 | int __cpu; \ |
2063 | for_each_possible_cpu(__cpu) { \ | 2076 | for_each_possible_cpu(__cpu) { \ |
2064 | typeof(type) *stat; \ | 2077 | typeof(type) *stat; \ |
2065 | stat = per_cpu_ptr(pcpu_stats, __cpu); \ | 2078 | stat = per_cpu_ptr(pcpu_stats, __cpu); \ |
2066 | u64_stats_init(&stat->syncp); \ | 2079 | u64_stats_init(&stat->syncp); \ |
2067 | } \ | 2080 | } \ |
2068 | } \ | 2081 | } \ |
2069 | pcpu_stats; \ | 2082 | pcpu_stats; \ |
2070 | }) | 2083 | }) |
2071 | 2084 | ||
2085 | #define netdev_alloc_pcpu_stats(type) \ | ||
2086 | __netdev_alloc_pcpu_stats(type, GFP_KERNEL); | ||
2087 | |||
2072 | #include <linux/notifier.h> | 2088 | #include <linux/notifier.h> |
2073 | 2089 | ||
2074 | /* netdevice notifier chain. Please remember to update the rtnetlink | 2090 | /* netdevice notifier chain. Please remember to update the rtnetlink |
@@ -2103,6 +2119,7 @@ struct pcpu_sw_netstats { | |||
2103 | #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ | 2119 | #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ |
2104 | #define NETDEV_CHANGEINFODATA 0x0018 | 2120 | #define NETDEV_CHANGEINFODATA 0x0018 |
2105 | #define NETDEV_BONDING_INFO 0x0019 | 2121 | #define NETDEV_BONDING_INFO 0x0019 |
2122 | #define NETDEV_PRECHANGEUPPER 0x001A | ||
2106 | 2123 | ||
2107 | int register_netdevice_notifier(struct notifier_block *nb); | 2124 | int register_netdevice_notifier(struct notifier_block *nb); |
2108 | int unregister_netdevice_notifier(struct notifier_block *nb); | 2125 | int unregister_netdevice_notifier(struct notifier_block *nb); |
@@ -2203,6 +2220,7 @@ void dev_add_offload(struct packet_offload *po); | |||
2203 | void dev_remove_offload(struct packet_offload *po); | 2220 | void dev_remove_offload(struct packet_offload *po); |
2204 | 2221 | ||
2205 | int dev_get_iflink(const struct net_device *dev); | 2222 | int dev_get_iflink(const struct net_device *dev); |
2223 | int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); | ||
2206 | struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, | 2224 | struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, |
2207 | unsigned short mask); | 2225 | unsigned short mask); |
2208 | struct net_device *dev_get_by_name(struct net *net, const char *name); | 2226 | struct net_device *dev_get_by_name(struct net *net, const char *name); |
@@ -2213,12 +2231,8 @@ int dev_open(struct net_device *dev); | |||
2213 | int dev_close(struct net_device *dev); | 2231 | int dev_close(struct net_device *dev); |
2214 | int dev_close_many(struct list_head *head, bool unlink); | 2232 | int dev_close_many(struct list_head *head, bool unlink); |
2215 | void dev_disable_lro(struct net_device *dev); | 2233 | void dev_disable_lro(struct net_device *dev); |
2216 | int dev_loopback_xmit(struct sock *sk, struct sk_buff *newskb); | 2234 | int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); |
2217 | int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb); | 2235 | int dev_queue_xmit(struct sk_buff *skb); |
2218 | static inline int dev_queue_xmit(struct sk_buff *skb) | ||
2219 | { | ||
2220 | return dev_queue_xmit_sk(skb->sk, skb); | ||
2221 | } | ||
2222 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); | 2236 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); |
2223 | int register_netdevice(struct net_device *dev); | 2237 | int register_netdevice(struct net_device *dev); |
2224 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); | 2238 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); |
@@ -2990,11 +3004,7 @@ static inline void dev_consume_skb_any(struct sk_buff *skb) | |||
2990 | 3004 | ||
2991 | int netif_rx(struct sk_buff *skb); | 3005 | int netif_rx(struct sk_buff *skb); |
2992 | int netif_rx_ni(struct sk_buff *skb); | 3006 | int netif_rx_ni(struct sk_buff *skb); |
2993 | int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb); | 3007 | int netif_receive_skb(struct sk_buff *skb); |
2994 | static inline int netif_receive_skb(struct sk_buff *skb) | ||
2995 | { | ||
2996 | return netif_receive_skb_sk(skb->sk, skb); | ||
2997 | } | ||
2998 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); | 3008 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); |
2999 | void napi_gro_flush(struct napi_struct *napi, bool flush_old); | 3009 | void napi_gro_flush(struct napi_struct *napi, bool flush_old); |
3000 | struct sk_buff *napi_get_frags(struct napi_struct *napi); | 3010 | struct sk_buff *napi_get_frags(struct napi_struct *napi); |
@@ -3832,9 +3842,14 @@ static inline bool netif_supports_nofcs(struct net_device *dev) | |||
3832 | return dev->priv_flags & IFF_SUPP_NOFCS; | 3842 | return dev->priv_flags & IFF_SUPP_NOFCS; |
3833 | } | 3843 | } |
3834 | 3844 | ||
3835 | static inline bool netif_is_vrf(const struct net_device *dev) | 3845 | static inline bool netif_is_l3_master(const struct net_device *dev) |
3846 | { | ||
3847 | return dev->priv_flags & IFF_L3MDEV_MASTER; | ||
3848 | } | ||
3849 | |||
3850 | static inline bool netif_is_l3_slave(const struct net_device *dev) | ||
3836 | { | 3851 | { |
3837 | return dev->priv_flags & IFF_VRF_MASTER; | 3852 | return dev->priv_flags & IFF_L3MDEV_SLAVE; |
3838 | } | 3853 | } |
3839 | 3854 | ||
3840 | static inline bool netif_is_bridge_master(const struct net_device *dev) | 3855 | static inline bool netif_is_bridge_master(const struct net_device *dev) |
@@ -3842,30 +3857,14 @@ static inline bool netif_is_bridge_master(const struct net_device *dev) | |||
3842 | return dev->priv_flags & IFF_EBRIDGE; | 3857 | return dev->priv_flags & IFF_EBRIDGE; |
3843 | } | 3858 | } |
3844 | 3859 | ||
3845 | static inline bool netif_is_ovs_master(const struct net_device *dev) | 3860 | static inline bool netif_is_bridge_port(const struct net_device *dev) |
3846 | { | 3861 | { |
3847 | return dev->priv_flags & IFF_OPENVSWITCH; | 3862 | return dev->priv_flags & IFF_BRIDGE_PORT; |
3848 | } | 3863 | } |
3849 | 3864 | ||
3850 | static inline bool netif_index_is_vrf(struct net *net, int ifindex) | 3865 | static inline bool netif_is_ovs_master(const struct net_device *dev) |
3851 | { | 3866 | { |
3852 | bool rc = false; | 3867 | return dev->priv_flags & IFF_OPENVSWITCH; |
3853 | |||
3854 | #if IS_ENABLED(CONFIG_NET_VRF) | ||
3855 | struct net_device *dev; | ||
3856 | |||
3857 | if (ifindex == 0) | ||
3858 | return false; | ||
3859 | |||
3860 | rcu_read_lock(); | ||
3861 | |||
3862 | dev = dev_get_by_index_rcu(net, ifindex); | ||
3863 | if (dev) | ||
3864 | rc = netif_is_vrf(dev); | ||
3865 | |||
3866 | rcu_read_unlock(); | ||
3867 | #endif | ||
3868 | return rc; | ||
3869 | } | 3868 | } |
3870 | 3869 | ||
3871 | /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ | 3870 | /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ |
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 36a652531791..0ad556726181 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h | |||
@@ -54,8 +54,9 @@ struct nf_hook_state { | |||
54 | struct net_device *in; | 54 | struct net_device *in; |
55 | struct net_device *out; | 55 | struct net_device *out; |
56 | struct sock *sk; | 56 | struct sock *sk; |
57 | struct net *net; | ||
57 | struct list_head *hook_list; | 58 | struct list_head *hook_list; |
58 | int (*okfn)(struct sock *, struct sk_buff *); | 59 | int (*okfn)(struct net *, struct sock *, struct sk_buff *); |
59 | }; | 60 | }; |
60 | 61 | ||
61 | static inline void nf_hook_state_init(struct nf_hook_state *p, | 62 | static inline void nf_hook_state_init(struct nf_hook_state *p, |
@@ -65,7 +66,8 @@ static inline void nf_hook_state_init(struct nf_hook_state *p, | |||
65 | struct net_device *indev, | 66 | struct net_device *indev, |
66 | struct net_device *outdev, | 67 | struct net_device *outdev, |
67 | struct sock *sk, | 68 | struct sock *sk, |
68 | int (*okfn)(struct sock *, struct sk_buff *)) | 69 | struct net *net, |
70 | int (*okfn)(struct net *, struct sock *, struct sk_buff *)) | ||
69 | { | 71 | { |
70 | p->hook = hook; | 72 | p->hook = hook; |
71 | p->thresh = thresh; | 73 | p->thresh = thresh; |
@@ -73,11 +75,12 @@ static inline void nf_hook_state_init(struct nf_hook_state *p, | |||
73 | p->in = indev; | 75 | p->in = indev; |
74 | p->out = outdev; | 76 | p->out = outdev; |
75 | p->sk = sk; | 77 | p->sk = sk; |
78 | p->net = net; | ||
76 | p->hook_list = hook_list; | 79 | p->hook_list = hook_list; |
77 | p->okfn = okfn; | 80 | p->okfn = okfn; |
78 | } | 81 | } |
79 | 82 | ||
80 | typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops, | 83 | typedef unsigned int nf_hookfn(void *priv, |
81 | struct sk_buff *skb, | 84 | struct sk_buff *skb, |
82 | const struct nf_hook_state *state); | 85 | const struct nf_hook_state *state); |
83 | 86 | ||
@@ -87,7 +90,6 @@ struct nf_hook_ops { | |||
87 | /* User fills in from here down. */ | 90 | /* User fills in from here down. */ |
88 | nf_hookfn *hook; | 91 | nf_hookfn *hook; |
89 | struct net_device *dev; | 92 | struct net_device *dev; |
90 | struct module *owner; | ||
91 | void *priv; | 93 | void *priv; |
92 | u_int8_t pf; | 94 | u_int8_t pf; |
93 | unsigned int hooknum; | 95 | unsigned int hooknum; |
@@ -167,32 +169,32 @@ int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); | |||
167 | * value indicates the packet has been consumed by the hook. | 169 | * value indicates the packet has been consumed by the hook. |
168 | */ | 170 | */ |
169 | static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, | 171 | static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, |
172 | struct net *net, | ||
170 | struct sock *sk, | 173 | struct sock *sk, |
171 | struct sk_buff *skb, | 174 | struct sk_buff *skb, |
172 | struct net_device *indev, | 175 | struct net_device *indev, |
173 | struct net_device *outdev, | 176 | struct net_device *outdev, |
174 | int (*okfn)(struct sock *, struct sk_buff *), | 177 | int (*okfn)(struct net *, struct sock *, struct sk_buff *), |
175 | int thresh) | 178 | int thresh) |
176 | { | 179 | { |
177 | struct net *net = dev_net(indev ? indev : outdev); | ||
178 | struct list_head *hook_list = &net->nf.hooks[pf][hook]; | 180 | struct list_head *hook_list = &net->nf.hooks[pf][hook]; |
179 | 181 | ||
180 | if (nf_hook_list_active(hook_list, pf, hook)) { | 182 | if (nf_hook_list_active(hook_list, pf, hook)) { |
181 | struct nf_hook_state state; | 183 | struct nf_hook_state state; |
182 | 184 | ||
183 | nf_hook_state_init(&state, hook_list, hook, thresh, | 185 | nf_hook_state_init(&state, hook_list, hook, thresh, |
184 | pf, indev, outdev, sk, okfn); | 186 | pf, indev, outdev, sk, net, okfn); |
185 | return nf_hook_slow(skb, &state); | 187 | return nf_hook_slow(skb, &state); |
186 | } | 188 | } |
187 | return 1; | 189 | return 1; |
188 | } | 190 | } |
189 | 191 | ||
190 | static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, | 192 | static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, |
191 | struct sk_buff *skb, struct net_device *indev, | 193 | struct sock *sk, struct sk_buff *skb, |
192 | struct net_device *outdev, | 194 | struct net_device *indev, struct net_device *outdev, |
193 | int (*okfn)(struct sock *, struct sk_buff *)) | 195 | int (*okfn)(struct net *, struct sock *, struct sk_buff *)) |
194 | { | 196 | { |
195 | return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN); | 197 | return nf_hook_thresh(pf, hook, net, sk, skb, indev, outdev, okfn, INT_MIN); |
196 | } | 198 | } |
197 | 199 | ||
198 | /* Activate hook; either okfn or kfree_skb called, unless a hook | 200 | /* Activate hook; either okfn or kfree_skb called, unless a hook |
@@ -213,36 +215,38 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, | |||
213 | */ | 215 | */ |
214 | 216 | ||
215 | static inline int | 217 | static inline int |
216 | NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk, | 218 | NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, |
217 | struct sk_buff *skb, struct net_device *in, | 219 | struct sk_buff *skb, struct net_device *in, |
218 | struct net_device *out, | 220 | struct net_device *out, |
219 | int (*okfn)(struct sock *, struct sk_buff *), int thresh) | 221 | int (*okfn)(struct net *, struct sock *, struct sk_buff *), |
222 | int thresh) | ||
220 | { | 223 | { |
221 | int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh); | 224 | int ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, thresh); |
222 | if (ret == 1) | 225 | if (ret == 1) |
223 | ret = okfn(sk, skb); | 226 | ret = okfn(net, sk, skb); |
224 | return ret; | 227 | return ret; |
225 | } | 228 | } |
226 | 229 | ||
227 | static inline int | 230 | static inline int |
228 | NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk, | 231 | NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, |
229 | struct sk_buff *skb, struct net_device *in, struct net_device *out, | 232 | struct sk_buff *skb, struct net_device *in, struct net_device *out, |
230 | int (*okfn)(struct sock *, struct sk_buff *), bool cond) | 233 | int (*okfn)(struct net *, struct sock *, struct sk_buff *), |
234 | bool cond) | ||
231 | { | 235 | { |
232 | int ret; | 236 | int ret; |
233 | 237 | ||
234 | if (!cond || | 238 | if (!cond || |
235 | ((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1)) | 239 | ((ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, INT_MIN)) == 1)) |
236 | ret = okfn(sk, skb); | 240 | ret = okfn(net, sk, skb); |
237 | return ret; | 241 | return ret; |
238 | } | 242 | } |
239 | 243 | ||
240 | static inline int | 244 | static inline int |
241 | NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb, | 245 | NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, |
242 | struct net_device *in, struct net_device *out, | 246 | struct net_device *in, struct net_device *out, |
243 | int (*okfn)(struct sock *, struct sk_buff *)) | 247 | int (*okfn)(struct net *, struct sock *, struct sk_buff *)) |
244 | { | 248 | { |
245 | return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN); | 249 | return NF_HOOK_THRESH(pf, hook, net, sk, skb, in, out, okfn, INT_MIN); |
246 | } | 250 | } |
247 | 251 | ||
248 | /* Call setsockopt() */ | 252 | /* Call setsockopt() */ |
@@ -278,7 +282,7 @@ struct nf_afinfo { | |||
278 | struct flowi *fl, bool strict); | 282 | struct flowi *fl, bool strict); |
279 | void (*saveroute)(const struct sk_buff *skb, | 283 | void (*saveroute)(const struct sk_buff *skb, |
280 | struct nf_queue_entry *entry); | 284 | struct nf_queue_entry *entry); |
281 | int (*reroute)(struct sk_buff *skb, | 285 | int (*reroute)(struct net *net, struct sk_buff *skb, |
282 | const struct nf_queue_entry *entry); | 286 | const struct nf_queue_entry *entry); |
283 | int route_key_size; | 287 | int route_key_size; |
284 | }; | 288 | }; |
@@ -342,21 +346,27 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) | |||
342 | } | 346 | } |
343 | 347 | ||
344 | #else /* !CONFIG_NETFILTER */ | 348 | #else /* !CONFIG_NETFILTER */ |
345 | #define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb) | 349 | static inline int |
346 | #define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb) | 350 | NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, |
347 | static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, | 351 | struct sk_buff *skb, struct net_device *in, struct net_device *out, |
348 | struct sock *sk, | 352 | int (*okfn)(struct net *, struct sock *, struct sk_buff *), |
349 | struct sk_buff *skb, | 353 | bool cond) |
350 | struct net_device *indev, | 354 | { |
351 | struct net_device *outdev, | 355 | return okfn(net, sk, skb); |
352 | int (*okfn)(struct sock *sk, struct sk_buff *), int thresh) | 356 | } |
357 | |||
358 | static inline int | ||
359 | NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, | ||
360 | struct sk_buff *skb, struct net_device *in, struct net_device *out, | ||
361 | int (*okfn)(struct net *, struct sock *, struct sk_buff *)) | ||
353 | { | 362 | { |
354 | return okfn(sk, skb); | 363 | return okfn(net, sk, skb); |
355 | } | 364 | } |
356 | static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, | 365 | |
357 | struct sk_buff *skb, struct net_device *indev, | 366 | static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, |
358 | struct net_device *outdev, | 367 | struct sock *sk, struct sk_buff *skb, |
359 | int (*okfn)(struct sock *, struct sk_buff *)) | 368 | struct net_device *indev, struct net_device *outdev, |
369 | int (*okfn)(struct net *, struct sock *, struct sk_buff *)) | ||
360 | { | 370 | { |
361 | return 1; | 371 | return 1; |
362 | } | 372 | } |
@@ -373,24 +383,28 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) | |||
373 | extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; | 383 | extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; |
374 | void nf_ct_attach(struct sk_buff *, const struct sk_buff *); | 384 | void nf_ct_attach(struct sk_buff *, const struct sk_buff *); |
375 | extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; | 385 | extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; |
386 | #else | ||
387 | static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} | ||
388 | #endif | ||
376 | 389 | ||
377 | struct nf_conn; | 390 | struct nf_conn; |
378 | enum ip_conntrack_info; | 391 | enum ip_conntrack_info; |
379 | struct nlattr; | 392 | struct nlattr; |
380 | 393 | ||
381 | struct nfq_ct_hook { | 394 | struct nfnl_ct_hook { |
395 | struct nf_conn *(*get_ct)(const struct sk_buff *skb, | ||
396 | enum ip_conntrack_info *ctinfo); | ||
382 | size_t (*build_size)(const struct nf_conn *ct); | 397 | size_t (*build_size)(const struct nf_conn *ct); |
383 | int (*build)(struct sk_buff *skb, struct nf_conn *ct); | 398 | int (*build)(struct sk_buff *skb, struct nf_conn *ct, |
399 | enum ip_conntrack_info ctinfo, | ||
400 | u_int16_t ct_attr, u_int16_t ct_info_attr); | ||
384 | int (*parse)(const struct nlattr *attr, struct nf_conn *ct); | 401 | int (*parse)(const struct nlattr *attr, struct nf_conn *ct); |
385 | int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct, | 402 | int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct, |
386 | u32 portid, u32 report); | 403 | u32 portid, u32 report); |
387 | void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, | 404 | void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, |
388 | enum ip_conntrack_info ctinfo, s32 off); | 405 | enum ip_conntrack_info ctinfo, s32 off); |
389 | }; | 406 | }; |
390 | extern struct nfq_ct_hook __rcu *nfq_ct_hook; | 407 | extern struct nfnl_ct_hook __rcu *nfnl_ct_hook; |
391 | #else | ||
392 | static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} | ||
393 | #endif | ||
394 | 408 | ||
395 | /** | 409 | /** |
396 | * nf_skb_duplicated - TEE target has sent a packet | 410 | * nf_skb_duplicated - TEE target has sent a packet |
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 48bb01edcf30..0e1f433cc4b7 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h | |||
@@ -421,7 +421,7 @@ extern void ip_set_free(void *members); | |||
421 | extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); | 421 | extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); |
422 | extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); | 422 | extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); |
423 | extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], | 423 | extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], |
424 | size_t len); | 424 | size_t len, size_t align); |
425 | extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], | 425 | extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], |
426 | struct ip_set_ext *ext); | 426 | struct ip_set_ext *ext); |
427 | 427 | ||
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index e955d4730625..249d1bb01e03 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h | |||
@@ -45,11 +45,11 @@ int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, | |||
45 | void nfnl_lock(__u8 subsys_id); | 45 | void nfnl_lock(__u8 subsys_id); |
46 | void nfnl_unlock(__u8 subsys_id); | 46 | void nfnl_unlock(__u8 subsys_id); |
47 | #ifdef CONFIG_PROVE_LOCKING | 47 | #ifdef CONFIG_PROVE_LOCKING |
48 | int lockdep_nfnl_is_held(__u8 subsys_id); | 48 | bool lockdep_nfnl_is_held(__u8 subsys_id); |
49 | #else | 49 | #else |
50 | static inline int lockdep_nfnl_is_held(__u8 subsys_id) | 50 | static inline bool lockdep_nfnl_is_held(__u8 subsys_id) |
51 | { | 51 | { |
52 | return 1; | 52 | return true; |
53 | } | 53 | } |
54 | #endif /* CONFIG_PROVE_LOCKING */ | 54 | #endif /* CONFIG_PROVE_LOCKING */ |
55 | 55 | ||
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index b006b719183f..c5577410c25d 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h | |||
@@ -13,6 +13,7 @@ | |||
13 | * @target: the target extension | 13 | * @target: the target extension |
14 | * @matchinfo: per-match data | 14 | * @matchinfo: per-match data |
15 | * @targetinfo: per-target data | 15 | * @targetinfo: per-target data |
16 | * @net network namespace through which the action was invoked | ||
16 | * @in: input netdevice | 17 | * @in: input netdevice |
17 | * @out: output netdevice | 18 | * @out: output netdevice |
18 | * @fragoff: packet is a fragment, this is the data offset | 19 | * @fragoff: packet is a fragment, this is the data offset |
@@ -24,7 +25,6 @@ | |||
24 | * Fields written to by extensions: | 25 | * Fields written to by extensions: |
25 | * | 26 | * |
26 | * @hotdrop: drop packet if we had inspection problems | 27 | * @hotdrop: drop packet if we had inspection problems |
27 | * Network namespace obtainable using dev_net(in/out) | ||
28 | */ | 28 | */ |
29 | struct xt_action_param { | 29 | struct xt_action_param { |
30 | union { | 30 | union { |
@@ -34,6 +34,7 @@ struct xt_action_param { | |||
34 | union { | 34 | union { |
35 | const void *matchinfo, *targinfo; | 35 | const void *matchinfo, *targinfo; |
36 | }; | 36 | }; |
37 | struct net *net; | ||
37 | const struct net_device *in, *out; | 38 | const struct net_device *in, *out; |
38 | int fragoff; | 39 | int fragoff; |
39 | unsigned int thoff; | 40 | unsigned int thoff; |
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index c22a7fb8d0df..6f074db2f23d 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h | |||
@@ -53,7 +53,6 @@ extern struct xt_table *arpt_register_table(struct net *net, | |||
53 | const struct arpt_replace *repl); | 53 | const struct arpt_replace *repl); |
54 | extern void arpt_unregister_table(struct xt_table *table); | 54 | extern void arpt_unregister_table(struct xt_table *table); |
55 | extern unsigned int arpt_do_table(struct sk_buff *skb, | 55 | extern unsigned int arpt_do_table(struct sk_buff *skb, |
56 | unsigned int hook, | ||
57 | const struct nf_hook_state *state, | 56 | const struct nf_hook_state *state, |
58 | struct xt_table *table); | 57 | struct xt_table *table); |
59 | 58 | ||
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index 2437b8a5d7a9..2ed40c402b5e 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h | |||
@@ -17,7 +17,7 @@ enum nf_br_hook_priorities { | |||
17 | 17 | ||
18 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) | 18 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
19 | 19 | ||
20 | int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb); | 20 | int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb); |
21 | 21 | ||
22 | static inline void br_drop_fake_rtable(struct sk_buff *skb) | 22 | static inline void br_drop_fake_rtable(struct sk_buff *skb) |
23 | { | 23 | { |
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 8ca6d6464ea3..2ea517c7c6b9 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h | |||
@@ -111,9 +111,9 @@ struct ebt_table { | |||
111 | extern struct ebt_table *ebt_register_table(struct net *net, | 111 | extern struct ebt_table *ebt_register_table(struct net *net, |
112 | const struct ebt_table *table); | 112 | const struct ebt_table *table); |
113 | extern void ebt_unregister_table(struct net *net, struct ebt_table *table); | 113 | extern void ebt_unregister_table(struct net *net, struct ebt_table *table); |
114 | extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb, | 114 | extern unsigned int ebt_do_table(struct sk_buff *skb, |
115 | const struct net_device *in, const struct net_device *out, | 115 | const struct nf_hook_state *state, |
116 | struct ebt_table *table); | 116 | struct ebt_table *table); |
117 | 117 | ||
118 | /* Used in the kernel match() functions */ | 118 | /* Used in the kernel match() functions */ |
119 | #define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg)) | 119 | #define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg)) |
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h index cb0727fe2b3d..5fcd375ef175 100644 --- a/include/linux/netfilter_ingress.h +++ b/include/linux/netfilter_ingress.h | |||
@@ -5,10 +5,13 @@ | |||
5 | #include <linux/netdevice.h> | 5 | #include <linux/netdevice.h> |
6 | 6 | ||
7 | #ifdef CONFIG_NETFILTER_INGRESS | 7 | #ifdef CONFIG_NETFILTER_INGRESS |
8 | static inline int nf_hook_ingress_active(struct sk_buff *skb) | 8 | static inline bool nf_hook_ingress_active(const struct sk_buff *skb) |
9 | { | 9 | { |
10 | return nf_hook_list_active(&skb->dev->nf_hooks_ingress, | 10 | #ifdef HAVE_JUMP_LABEL |
11 | NFPROTO_NETDEV, NF_NETDEV_INGRESS); | 11 | if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS])) |
12 | return false; | ||
13 | #endif | ||
14 | return !list_empty(&skb->dev->nf_hooks_ingress); | ||
12 | } | 15 | } |
13 | 16 | ||
14 | static inline int nf_hook_ingress(struct sk_buff *skb) | 17 | static inline int nf_hook_ingress(struct sk_buff *skb) |
@@ -16,8 +19,8 @@ static inline int nf_hook_ingress(struct sk_buff *skb) | |||
16 | struct nf_hook_state state; | 19 | struct nf_hook_state state; |
17 | 20 | ||
18 | nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress, | 21 | nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress, |
19 | NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL, | 22 | NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, |
20 | skb->dev, NULL, NULL); | 23 | skb->dev, NULL, NULL, dev_net(skb->dev), NULL); |
21 | return nf_hook_slow(skb, &state); | 24 | return nf_hook_slow(skb, &state); |
22 | } | 25 | } |
23 | 26 | ||
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 6e4591bb54d4..98c03b2462b5 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h | |||
@@ -6,7 +6,7 @@ | |||
6 | 6 | ||
7 | #include <uapi/linux/netfilter_ipv4.h> | 7 | #include <uapi/linux/netfilter_ipv4.h> |
8 | 8 | ||
9 | int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type); | 9 | int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); |
10 | __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, | 10 | __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, |
11 | unsigned int dataoff, u_int8_t protocol); | 11 | unsigned int dataoff, u_int8_t protocol); |
12 | #endif /*__LINUX_IP_NETFILTER_H*/ | 12 | #endif /*__LINUX_IP_NETFILTER_H*/ |
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index 4073510da485..aa598f942c01 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h | |||
@@ -64,7 +64,6 @@ struct ipt_error { | |||
64 | 64 | ||
65 | extern void *ipt_alloc_initial_table(const struct xt_table *); | 65 | extern void *ipt_alloc_initial_table(const struct xt_table *); |
66 | extern unsigned int ipt_do_table(struct sk_buff *skb, | 66 | extern unsigned int ipt_do_table(struct sk_buff *skb, |
67 | unsigned int hook, | ||
68 | const struct nf_hook_state *state, | 67 | const struct nf_hook_state *state, |
69 | struct xt_table *table); | 68 | struct xt_table *table); |
70 | 69 | ||
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 771574677e83..47c6b04c28c0 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h | |||
@@ -17,12 +17,12 @@ struct nf_ipv6_ops { | |||
17 | int (*chk_addr)(struct net *net, const struct in6_addr *addr, | 17 | int (*chk_addr)(struct net *net, const struct in6_addr *addr, |
18 | const struct net_device *dev, int strict); | 18 | const struct net_device *dev, int strict); |
19 | void (*route_input)(struct sk_buff *skb); | 19 | void (*route_input)(struct sk_buff *skb); |
20 | int (*fragment)(struct sock *sk, struct sk_buff *skb, | 20 | int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, |
21 | int (*output)(struct sock *, struct sk_buff *)); | 21 | int (*output)(struct net *, struct sock *, struct sk_buff *)); |
22 | }; | 22 | }; |
23 | 23 | ||
24 | #ifdef CONFIG_NETFILTER | 24 | #ifdef CONFIG_NETFILTER |
25 | int ip6_route_me_harder(struct sk_buff *skb); | 25 | int ip6_route_me_harder(struct net *net, struct sk_buff *skb); |
26 | __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, | 26 | __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, |
27 | unsigned int dataoff, u_int8_t protocol); | 27 | unsigned int dataoff, u_int8_t protocol); |
28 | 28 | ||
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index b40d2b635778..0f76e5c674f9 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h | |||
@@ -30,7 +30,6 @@ extern struct xt_table *ip6t_register_table(struct net *net, | |||
30 | const struct ip6t_replace *repl); | 30 | const struct ip6t_replace *repl); |
31 | extern void ip6t_unregister_table(struct net *net, struct xt_table *table); | 31 | extern void ip6t_unregister_table(struct net *net, struct xt_table *table); |
32 | extern unsigned int ip6t_do_table(struct sk_buff *skb, | 32 | extern unsigned int ip6t_do_table(struct sk_buff *skb, |
33 | unsigned int hook, | ||
34 | const struct nf_hook_state *state, | 33 | const struct nf_hook_state *state, |
35 | struct xt_table *table); | 34 | struct xt_table *table); |
36 | 35 | ||
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 00121f298269..e7e78537aea2 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h | |||
@@ -130,6 +130,7 @@ enum nfs_opnum4 { | |||
130 | OP_READ_PLUS = 68, | 130 | OP_READ_PLUS = 68, |
131 | OP_SEEK = 69, | 131 | OP_SEEK = 69, |
132 | OP_WRITE_SAME = 70, | 132 | OP_WRITE_SAME = 70, |
133 | OP_CLONE = 71, | ||
133 | 134 | ||
134 | OP_ILLEGAL = 10044, | 135 | OP_ILLEGAL = 10044, |
135 | }; | 136 | }; |
@@ -421,6 +422,7 @@ enum lock_type4 { | |||
421 | #define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0) | 422 | #define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0) |
422 | #define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) | 423 | #define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) |
423 | #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) | 424 | #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) |
425 | #define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13) | ||
424 | #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) | 426 | #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) |
425 | 427 | ||
426 | /* MDS threshold bitmap bits */ | 428 | /* MDS threshold bitmap bits */ |
@@ -501,6 +503,7 @@ enum { | |||
501 | NFSPROC4_CLNT_ALLOCATE, | 503 | NFSPROC4_CLNT_ALLOCATE, |
502 | NFSPROC4_CLNT_DEALLOCATE, | 504 | NFSPROC4_CLNT_DEALLOCATE, |
503 | NFSPROC4_CLNT_LAYOUTSTATS, | 505 | NFSPROC4_CLNT_LAYOUTSTATS, |
506 | NFSPROC4_CLNT_CLONE, | ||
504 | }; | 507 | }; |
505 | 508 | ||
506 | /* nfs41 types */ | 509 | /* nfs41 types */ |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 570a7df2775b..2469ab0bb3a1 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -147,6 +147,7 @@ struct nfs_server { | |||
147 | unsigned int acdirmax; | 147 | unsigned int acdirmax; |
148 | unsigned int namelen; | 148 | unsigned int namelen; |
149 | unsigned int options; /* extra options enabled by mount */ | 149 | unsigned int options; /* extra options enabled by mount */ |
150 | unsigned int clone_blksize; /* granularity of a CLONE operation */ | ||
150 | #define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */ | 151 | #define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */ |
151 | #define NFS_OPTION_MIGRATION 0x00000002 /* - NFSv4 migration enabled */ | 152 | #define NFS_OPTION_MIGRATION 0x00000002 /* - NFSv4 migration enabled */ |
152 | 153 | ||
@@ -243,5 +244,6 @@ struct nfs_server { | |||
243 | #define NFS_CAP_ALLOCATE (1U << 20) | 244 | #define NFS_CAP_ALLOCATE (1U << 20) |
244 | #define NFS_CAP_DEALLOCATE (1U << 21) | 245 | #define NFS_CAP_DEALLOCATE (1U << 21) |
245 | #define NFS_CAP_LAYOUTSTATS (1U << 22) | 246 | #define NFS_CAP_LAYOUTSTATS (1U << 22) |
247 | #define NFS_CAP_CLONE (1U << 23) | ||
246 | 248 | ||
247 | #endif | 249 | #endif |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 52faf7e96c65..570d630f98ae 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -141,6 +141,7 @@ struct nfs_fsinfo { | |||
141 | __u32 lease_time; /* in seconds */ | 141 | __u32 lease_time; /* in seconds */ |
142 | __u32 layouttype; /* supported pnfs layout driver */ | 142 | __u32 layouttype; /* supported pnfs layout driver */ |
143 | __u32 blksize; /* preferred pnfs io block size */ | 143 | __u32 blksize; /* preferred pnfs io block size */ |
144 | __u32 clone_blksize; /* granularity of a CLONE operation */ | ||
144 | }; | 145 | }; |
145 | 146 | ||
146 | struct nfs_fsstat { | 147 | struct nfs_fsstat { |
@@ -359,6 +360,25 @@ struct nfs42_layoutstat_data { | |||
359 | struct nfs42_layoutstat_res res; | 360 | struct nfs42_layoutstat_res res; |
360 | }; | 361 | }; |
361 | 362 | ||
363 | struct nfs42_clone_args { | ||
364 | struct nfs4_sequence_args seq_args; | ||
365 | struct nfs_fh *src_fh; | ||
366 | struct nfs_fh *dst_fh; | ||
367 | nfs4_stateid src_stateid; | ||
368 | nfs4_stateid dst_stateid; | ||
369 | __u64 src_offset; | ||
370 | __u64 dst_offset; | ||
371 | __u64 count; | ||
372 | const u32 *dst_bitmask; | ||
373 | }; | ||
374 | |||
375 | struct nfs42_clone_res { | ||
376 | struct nfs4_sequence_res seq_res; | ||
377 | unsigned int rpc_status; | ||
378 | struct nfs_fattr *dst_fattr; | ||
379 | const struct nfs_server *server; | ||
380 | }; | ||
381 | |||
362 | struct stateowner_id { | 382 | struct stateowner_id { |
363 | __u64 create_time; | 383 | __u64 create_time; |
364 | __u32 uniquifier; | 384 | __u32 uniquifier; |
@@ -528,7 +548,7 @@ struct nfs4_delegreturnargs { | |||
528 | struct nfs4_delegreturnres { | 548 | struct nfs4_delegreturnres { |
529 | struct nfs4_sequence_res seq_res; | 549 | struct nfs4_sequence_res seq_res; |
530 | struct nfs_fattr * fattr; | 550 | struct nfs_fattr * fattr; |
531 | const struct nfs_server *server; | 551 | struct nfs_server *server; |
532 | }; | 552 | }; |
533 | 553 | ||
534 | /* | 554 | /* |
@@ -601,7 +621,7 @@ struct nfs_removeargs { | |||
601 | 621 | ||
602 | struct nfs_removeres { | 622 | struct nfs_removeres { |
603 | struct nfs4_sequence_res seq_res; | 623 | struct nfs4_sequence_res seq_res; |
604 | const struct nfs_server *server; | 624 | struct nfs_server *server; |
605 | struct nfs_fattr *dir_attr; | 625 | struct nfs_fattr *dir_attr; |
606 | struct nfs4_change_info cinfo; | 626 | struct nfs4_change_info cinfo; |
607 | }; | 627 | }; |
@@ -619,7 +639,7 @@ struct nfs_renameargs { | |||
619 | 639 | ||
620 | struct nfs_renameres { | 640 | struct nfs_renameres { |
621 | struct nfs4_sequence_res seq_res; | 641 | struct nfs4_sequence_res seq_res; |
622 | const struct nfs_server *server; | 642 | struct nfs_server *server; |
623 | struct nfs4_change_info old_cinfo; | 643 | struct nfs4_change_info old_cinfo; |
624 | struct nfs_fattr *old_fattr; | 644 | struct nfs_fattr *old_fattr; |
625 | struct nfs4_change_info new_cinfo; | 645 | struct nfs4_change_info new_cinfo; |
@@ -685,7 +705,6 @@ struct nfs_setaclargs { | |||
685 | struct nfs4_sequence_args seq_args; | 705 | struct nfs4_sequence_args seq_args; |
686 | struct nfs_fh * fh; | 706 | struct nfs_fh * fh; |
687 | size_t acl_len; | 707 | size_t acl_len; |
688 | unsigned int acl_pgbase; | ||
689 | struct page ** acl_pages; | 708 | struct page ** acl_pages; |
690 | }; | 709 | }; |
691 | 710 | ||
@@ -697,7 +716,6 @@ struct nfs_getaclargs { | |||
697 | struct nfs4_sequence_args seq_args; | 716 | struct nfs4_sequence_args seq_args; |
698 | struct nfs_fh * fh; | 717 | struct nfs_fh * fh; |
699 | size_t acl_len; | 718 | size_t acl_len; |
700 | unsigned int acl_pgbase; | ||
701 | struct page ** acl_pages; | 719 | struct page ** acl_pages; |
702 | }; | 720 | }; |
703 | 721 | ||
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 78488e099ce7..7ec5b86735f3 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h | |||
@@ -73,6 +73,7 @@ extern int watchdog_user_enabled; | |||
73 | extern int watchdog_thresh; | 73 | extern int watchdog_thresh; |
74 | extern unsigned long *watchdog_cpumask_bits; | 74 | extern unsigned long *watchdog_cpumask_bits; |
75 | extern int sysctl_softlockup_all_cpu_backtrace; | 75 | extern int sysctl_softlockup_all_cpu_backtrace; |
76 | extern int sysctl_hardlockup_all_cpu_backtrace; | ||
76 | struct ctl_table; | 77 | struct ctl_table; |
77 | extern int proc_watchdog(struct ctl_table *, int , | 78 | extern int proc_watchdog(struct ctl_table *, int , |
78 | void __user *, size_t *, loff_t *); | 79 | void __user *, size_t *, loff_t *); |
diff --git a/include/linux/nvme.h b/include/linux/nvme.h index b5812c395351..3af5f454c04a 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h | |||
@@ -15,10 +15,7 @@ | |||
15 | #ifndef _LINUX_NVME_H | 15 | #ifndef _LINUX_NVME_H |
16 | #define _LINUX_NVME_H | 16 | #define _LINUX_NVME_H |
17 | 17 | ||
18 | #include <uapi/linux/nvme.h> | 18 | #include <linux/types.h> |
19 | #include <linux/pci.h> | ||
20 | #include <linux/kref.h> | ||
21 | #include <linux/blk-mq.h> | ||
22 | 19 | ||
23 | struct nvme_bar { | 20 | struct nvme_bar { |
24 | __u64 cap; /* Controller Capabilities */ | 21 | __u64 cap; /* Controller Capabilities */ |
@@ -76,115 +73,528 @@ enum { | |||
76 | NVME_CSTS_SHST_MASK = 3 << 2, | 73 | NVME_CSTS_SHST_MASK = 3 << 2, |
77 | }; | 74 | }; |
78 | 75 | ||
79 | extern unsigned char nvme_io_timeout; | 76 | struct nvme_id_power_state { |
80 | #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) | 77 | __le16 max_power; /* centiwatts */ |
78 | __u8 rsvd2; | ||
79 | __u8 flags; | ||
80 | __le32 entry_lat; /* microseconds */ | ||
81 | __le32 exit_lat; /* microseconds */ | ||
82 | __u8 read_tput; | ||
83 | __u8 read_lat; | ||
84 | __u8 write_tput; | ||
85 | __u8 write_lat; | ||
86 | __le16 idle_power; | ||
87 | __u8 idle_scale; | ||
88 | __u8 rsvd19; | ||
89 | __le16 active_power; | ||
90 | __u8 active_work_scale; | ||
91 | __u8 rsvd23[9]; | ||
92 | }; | ||
81 | 93 | ||
82 | /* | 94 | enum { |
83 | * Represents an NVM Express device. Each nvme_dev is a PCI function. | 95 | NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0, |
84 | */ | 96 | NVME_PS_FLAGS_NON_OP_STATE = 1 << 1, |
85 | struct nvme_dev { | ||
86 | struct list_head node; | ||
87 | struct nvme_queue **queues; | ||
88 | struct request_queue *admin_q; | ||
89 | struct blk_mq_tag_set tagset; | ||
90 | struct blk_mq_tag_set admin_tagset; | ||
91 | u32 __iomem *dbs; | ||
92 | struct device *dev; | ||
93 | struct dma_pool *prp_page_pool; | ||
94 | struct dma_pool *prp_small_pool; | ||
95 | int instance; | ||
96 | unsigned queue_count; | ||
97 | unsigned online_queues; | ||
98 | unsigned max_qid; | ||
99 | int q_depth; | ||
100 | u32 db_stride; | ||
101 | u32 ctrl_config; | ||
102 | struct msix_entry *entry; | ||
103 | struct nvme_bar __iomem *bar; | ||
104 | struct list_head namespaces; | ||
105 | struct kref kref; | ||
106 | struct device *device; | ||
107 | work_func_t reset_workfn; | ||
108 | struct work_struct reset_work; | ||
109 | struct work_struct probe_work; | ||
110 | struct work_struct scan_work; | ||
111 | char name[12]; | ||
112 | char serial[20]; | ||
113 | char model[40]; | ||
114 | char firmware_rev[8]; | ||
115 | bool subsystem; | ||
116 | u32 max_hw_sectors; | ||
117 | u32 stripe_size; | ||
118 | u32 page_size; | ||
119 | void __iomem *cmb; | ||
120 | dma_addr_t cmb_dma_addr; | ||
121 | u64 cmb_size; | ||
122 | u32 cmbsz; | ||
123 | u16 oncs; | ||
124 | u16 abort_limit; | ||
125 | u8 event_limit; | ||
126 | u8 vwc; | ||
127 | }; | 97 | }; |
128 | 98 | ||
129 | /* | 99 | struct nvme_id_ctrl { |
130 | * An NVM Express namespace is equivalent to a SCSI LUN | 100 | __le16 vid; |
131 | */ | 101 | __le16 ssvid; |
132 | struct nvme_ns { | 102 | char sn[20]; |
133 | struct list_head list; | 103 | char mn[40]; |
104 | char fr[8]; | ||
105 | __u8 rab; | ||
106 | __u8 ieee[3]; | ||
107 | __u8 mic; | ||
108 | __u8 mdts; | ||
109 | __le16 cntlid; | ||
110 | __le32 ver; | ||
111 | __u8 rsvd84[172]; | ||
112 | __le16 oacs; | ||
113 | __u8 acl; | ||
114 | __u8 aerl; | ||
115 | __u8 frmw; | ||
116 | __u8 lpa; | ||
117 | __u8 elpe; | ||
118 | __u8 npss; | ||
119 | __u8 avscc; | ||
120 | __u8 apsta; | ||
121 | __le16 wctemp; | ||
122 | __le16 cctemp; | ||
123 | __u8 rsvd270[242]; | ||
124 | __u8 sqes; | ||
125 | __u8 cqes; | ||
126 | __u8 rsvd514[2]; | ||
127 | __le32 nn; | ||
128 | __le16 oncs; | ||
129 | __le16 fuses; | ||
130 | __u8 fna; | ||
131 | __u8 vwc; | ||
132 | __le16 awun; | ||
133 | __le16 awupf; | ||
134 | __u8 nvscc; | ||
135 | __u8 rsvd531; | ||
136 | __le16 acwu; | ||
137 | __u8 rsvd534[2]; | ||
138 | __le32 sgls; | ||
139 | __u8 rsvd540[1508]; | ||
140 | struct nvme_id_power_state psd[32]; | ||
141 | __u8 vs[1024]; | ||
142 | }; | ||
134 | 143 | ||
135 | struct nvme_dev *dev; | 144 | enum { |
136 | struct request_queue *queue; | 145 | NVME_CTRL_ONCS_COMPARE = 1 << 0, |
137 | struct gendisk *disk; | 146 | NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, |
147 | NVME_CTRL_ONCS_DSM = 1 << 2, | ||
148 | NVME_CTRL_VWC_PRESENT = 1 << 0, | ||
149 | }; | ||
138 | 150 | ||
139 | unsigned ns_id; | 151 | struct nvme_lbaf { |
140 | int lba_shift; | 152 | __le16 ms; |
141 | u16 ms; | 153 | __u8 ds; |
142 | bool ext; | 154 | __u8 rp; |
143 | u8 pi_type; | ||
144 | u64 mode_select_num_blocks; | ||
145 | u32 mode_select_block_len; | ||
146 | }; | 155 | }; |
147 | 156 | ||
148 | /* | 157 | struct nvme_id_ns { |
149 | * The nvme_iod describes the data in an I/O, including the list of PRP | 158 | __le64 nsze; |
150 | * entries. You can't see it in this data structure because C doesn't let | 159 | __le64 ncap; |
151 | * me express that. Use nvme_alloc_iod to ensure there's enough space | 160 | __le64 nuse; |
152 | * allocated to store the PRP list. | 161 | __u8 nsfeat; |
153 | */ | 162 | __u8 nlbaf; |
154 | struct nvme_iod { | 163 | __u8 flbas; |
155 | unsigned long private; /* For the use of the submitter of the I/O */ | 164 | __u8 mc; |
156 | int npages; /* In the PRP list. 0 means small pool in use */ | 165 | __u8 dpc; |
157 | int offset; /* Of PRP list */ | 166 | __u8 dps; |
158 | int nents; /* Used in scatterlist */ | 167 | __u8 nmic; |
159 | int length; /* Of data, in bytes */ | 168 | __u8 rescap; |
160 | dma_addr_t first_dma; | 169 | __u8 fpi; |
161 | struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */ | 170 | __u8 rsvd33; |
162 | struct scatterlist sg[0]; | 171 | __le16 nawun; |
163 | }; | 172 | __le16 nawupf; |
164 | 173 | __le16 nacwu; | |
165 | static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) | 174 | __le16 nabsn; |
166 | { | 175 | __le16 nabo; |
167 | return (sector >> (ns->lba_shift - 9)); | 176 | __le16 nabspf; |
168 | } | 177 | __u16 rsvd46; |
169 | 178 | __le64 nvmcap[2]; | |
170 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | 179 | __u8 rsvd64[40]; |
171 | void *buf, unsigned bufflen); | 180 | __u8 nguid[16]; |
172 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | 181 | __u8 eui64[8]; |
173 | void *buffer, void __user *ubuffer, unsigned bufflen, | 182 | struct nvme_lbaf lbaf[16]; |
174 | u32 *result, unsigned timeout); | 183 | __u8 rsvd192[192]; |
175 | int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id); | 184 | __u8 vs[3712]; |
176 | int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid, | 185 | }; |
177 | struct nvme_id_ns **id); | 186 | |
178 | int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log); | 187 | enum { |
179 | int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, | 188 | NVME_NS_FEAT_THIN = 1 << 0, |
180 | dma_addr_t dma_addr, u32 *result); | 189 | NVME_NS_FLBAS_LBA_MASK = 0xf, |
181 | int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, | 190 | NVME_NS_FLBAS_META_EXT = 0x10, |
182 | dma_addr_t dma_addr, u32 *result); | 191 | NVME_LBAF_RP_BEST = 0, |
183 | 192 | NVME_LBAF_RP_BETTER = 1, | |
184 | struct sg_io_hdr; | 193 | NVME_LBAF_RP_GOOD = 2, |
185 | 194 | NVME_LBAF_RP_DEGRADED = 3, | |
186 | int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr); | 195 | NVME_NS_DPC_PI_LAST = 1 << 4, |
187 | int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg); | 196 | NVME_NS_DPC_PI_FIRST = 1 << 3, |
188 | int nvme_sg_get_version_num(int __user *ip); | 197 | NVME_NS_DPC_PI_TYPE3 = 1 << 2, |
198 | NVME_NS_DPC_PI_TYPE2 = 1 << 1, | ||
199 | NVME_NS_DPC_PI_TYPE1 = 1 << 0, | ||
200 | NVME_NS_DPS_PI_FIRST = 1 << 3, | ||
201 | NVME_NS_DPS_PI_MASK = 0x7, | ||
202 | NVME_NS_DPS_PI_TYPE1 = 1, | ||
203 | NVME_NS_DPS_PI_TYPE2 = 2, | ||
204 | NVME_NS_DPS_PI_TYPE3 = 3, | ||
205 | }; | ||
206 | |||
207 | struct nvme_smart_log { | ||
208 | __u8 critical_warning; | ||
209 | __u8 temperature[2]; | ||
210 | __u8 avail_spare; | ||
211 | __u8 spare_thresh; | ||
212 | __u8 percent_used; | ||
213 | __u8 rsvd6[26]; | ||
214 | __u8 data_units_read[16]; | ||
215 | __u8 data_units_written[16]; | ||
216 | __u8 host_reads[16]; | ||
217 | __u8 host_writes[16]; | ||
218 | __u8 ctrl_busy_time[16]; | ||
219 | __u8 power_cycles[16]; | ||
220 | __u8 power_on_hours[16]; | ||
221 | __u8 unsafe_shutdowns[16]; | ||
222 | __u8 media_errors[16]; | ||
223 | __u8 num_err_log_entries[16]; | ||
224 | __le32 warning_temp_time; | ||
225 | __le32 critical_comp_time; | ||
226 | __le16 temp_sensor[8]; | ||
227 | __u8 rsvd216[296]; | ||
228 | }; | ||
229 | |||
230 | enum { | ||
231 | NVME_SMART_CRIT_SPARE = 1 << 0, | ||
232 | NVME_SMART_CRIT_TEMPERATURE = 1 << 1, | ||
233 | NVME_SMART_CRIT_RELIABILITY = 1 << 2, | ||
234 | NVME_SMART_CRIT_MEDIA = 1 << 3, | ||
235 | NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4, | ||
236 | }; | ||
237 | |||
238 | enum { | ||
239 | NVME_AER_NOTICE_NS_CHANGED = 0x0002, | ||
240 | }; | ||
241 | |||
242 | struct nvme_lba_range_type { | ||
243 | __u8 type; | ||
244 | __u8 attributes; | ||
245 | __u8 rsvd2[14]; | ||
246 | __u64 slba; | ||
247 | __u64 nlb; | ||
248 | __u8 guid[16]; | ||
249 | __u8 rsvd48[16]; | ||
250 | }; | ||
251 | |||
252 | enum { | ||
253 | NVME_LBART_TYPE_FS = 0x01, | ||
254 | NVME_LBART_TYPE_RAID = 0x02, | ||
255 | NVME_LBART_TYPE_CACHE = 0x03, | ||
256 | NVME_LBART_TYPE_SWAP = 0x04, | ||
257 | |||
258 | NVME_LBART_ATTRIB_TEMP = 1 << 0, | ||
259 | NVME_LBART_ATTRIB_HIDE = 1 << 1, | ||
260 | }; | ||
261 | |||
262 | struct nvme_reservation_status { | ||
263 | __le32 gen; | ||
264 | __u8 rtype; | ||
265 | __u8 regctl[2]; | ||
266 | __u8 resv5[2]; | ||
267 | __u8 ptpls; | ||
268 | __u8 resv10[13]; | ||
269 | struct { | ||
270 | __le16 cntlid; | ||
271 | __u8 rcsts; | ||
272 | __u8 resv3[5]; | ||
273 | __le64 hostid; | ||
274 | __le64 rkey; | ||
275 | } regctl_ds[]; | ||
276 | }; | ||
277 | |||
278 | /* I/O commands */ | ||
279 | |||
280 | enum nvme_opcode { | ||
281 | nvme_cmd_flush = 0x00, | ||
282 | nvme_cmd_write = 0x01, | ||
283 | nvme_cmd_read = 0x02, | ||
284 | nvme_cmd_write_uncor = 0x04, | ||
285 | nvme_cmd_compare = 0x05, | ||
286 | nvme_cmd_write_zeroes = 0x08, | ||
287 | nvme_cmd_dsm = 0x09, | ||
288 | nvme_cmd_resv_register = 0x0d, | ||
289 | nvme_cmd_resv_report = 0x0e, | ||
290 | nvme_cmd_resv_acquire = 0x11, | ||
291 | nvme_cmd_resv_release = 0x15, | ||
292 | }; | ||
293 | |||
294 | struct nvme_common_command { | ||
295 | __u8 opcode; | ||
296 | __u8 flags; | ||
297 | __u16 command_id; | ||
298 | __le32 nsid; | ||
299 | __le32 cdw2[2]; | ||
300 | __le64 metadata; | ||
301 | __le64 prp1; | ||
302 | __le64 prp2; | ||
303 | __le32 cdw10[6]; | ||
304 | }; | ||
305 | |||
306 | struct nvme_rw_command { | ||
307 | __u8 opcode; | ||
308 | __u8 flags; | ||
309 | __u16 command_id; | ||
310 | __le32 nsid; | ||
311 | __u64 rsvd2; | ||
312 | __le64 metadata; | ||
313 | __le64 prp1; | ||
314 | __le64 prp2; | ||
315 | __le64 slba; | ||
316 | __le16 length; | ||
317 | __le16 control; | ||
318 | __le32 dsmgmt; | ||
319 | __le32 reftag; | ||
320 | __le16 apptag; | ||
321 | __le16 appmask; | ||
322 | }; | ||
323 | |||
324 | enum { | ||
325 | NVME_RW_LR = 1 << 15, | ||
326 | NVME_RW_FUA = 1 << 14, | ||
327 | NVME_RW_DSM_FREQ_UNSPEC = 0, | ||
328 | NVME_RW_DSM_FREQ_TYPICAL = 1, | ||
329 | NVME_RW_DSM_FREQ_RARE = 2, | ||
330 | NVME_RW_DSM_FREQ_READS = 3, | ||
331 | NVME_RW_DSM_FREQ_WRITES = 4, | ||
332 | NVME_RW_DSM_FREQ_RW = 5, | ||
333 | NVME_RW_DSM_FREQ_ONCE = 6, | ||
334 | NVME_RW_DSM_FREQ_PREFETCH = 7, | ||
335 | NVME_RW_DSM_FREQ_TEMP = 8, | ||
336 | NVME_RW_DSM_LATENCY_NONE = 0 << 4, | ||
337 | NVME_RW_DSM_LATENCY_IDLE = 1 << 4, | ||
338 | NVME_RW_DSM_LATENCY_NORM = 2 << 4, | ||
339 | NVME_RW_DSM_LATENCY_LOW = 3 << 4, | ||
340 | NVME_RW_DSM_SEQ_REQ = 1 << 6, | ||
341 | NVME_RW_DSM_COMPRESSED = 1 << 7, | ||
342 | NVME_RW_PRINFO_PRCHK_REF = 1 << 10, | ||
343 | NVME_RW_PRINFO_PRCHK_APP = 1 << 11, | ||
344 | NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12, | ||
345 | NVME_RW_PRINFO_PRACT = 1 << 13, | ||
346 | }; | ||
347 | |||
348 | struct nvme_dsm_cmd { | ||
349 | __u8 opcode; | ||
350 | __u8 flags; | ||
351 | __u16 command_id; | ||
352 | __le32 nsid; | ||
353 | __u64 rsvd2[2]; | ||
354 | __le64 prp1; | ||
355 | __le64 prp2; | ||
356 | __le32 nr; | ||
357 | __le32 attributes; | ||
358 | __u32 rsvd12[4]; | ||
359 | }; | ||
360 | |||
361 | enum { | ||
362 | NVME_DSMGMT_IDR = 1 << 0, | ||
363 | NVME_DSMGMT_IDW = 1 << 1, | ||
364 | NVME_DSMGMT_AD = 1 << 2, | ||
365 | }; | ||
366 | |||
367 | struct nvme_dsm_range { | ||
368 | __le32 cattr; | ||
369 | __le32 nlb; | ||
370 | __le64 slba; | ||
371 | }; | ||
372 | |||
373 | /* Admin commands */ | ||
374 | |||
375 | enum nvme_admin_opcode { | ||
376 | nvme_admin_delete_sq = 0x00, | ||
377 | nvme_admin_create_sq = 0x01, | ||
378 | nvme_admin_get_log_page = 0x02, | ||
379 | nvme_admin_delete_cq = 0x04, | ||
380 | nvme_admin_create_cq = 0x05, | ||
381 | nvme_admin_identify = 0x06, | ||
382 | nvme_admin_abort_cmd = 0x08, | ||
383 | nvme_admin_set_features = 0x09, | ||
384 | nvme_admin_get_features = 0x0a, | ||
385 | nvme_admin_async_event = 0x0c, | ||
386 | nvme_admin_activate_fw = 0x10, | ||
387 | nvme_admin_download_fw = 0x11, | ||
388 | nvme_admin_format_nvm = 0x80, | ||
389 | nvme_admin_security_send = 0x81, | ||
390 | nvme_admin_security_recv = 0x82, | ||
391 | }; | ||
392 | |||
393 | enum { | ||
394 | NVME_QUEUE_PHYS_CONTIG = (1 << 0), | ||
395 | NVME_CQ_IRQ_ENABLED = (1 << 1), | ||
396 | NVME_SQ_PRIO_URGENT = (0 << 1), | ||
397 | NVME_SQ_PRIO_HIGH = (1 << 1), | ||
398 | NVME_SQ_PRIO_MEDIUM = (2 << 1), | ||
399 | NVME_SQ_PRIO_LOW = (3 << 1), | ||
400 | NVME_FEAT_ARBITRATION = 0x01, | ||
401 | NVME_FEAT_POWER_MGMT = 0x02, | ||
402 | NVME_FEAT_LBA_RANGE = 0x03, | ||
403 | NVME_FEAT_TEMP_THRESH = 0x04, | ||
404 | NVME_FEAT_ERR_RECOVERY = 0x05, | ||
405 | NVME_FEAT_VOLATILE_WC = 0x06, | ||
406 | NVME_FEAT_NUM_QUEUES = 0x07, | ||
407 | NVME_FEAT_IRQ_COALESCE = 0x08, | ||
408 | NVME_FEAT_IRQ_CONFIG = 0x09, | ||
409 | NVME_FEAT_WRITE_ATOMIC = 0x0a, | ||
410 | NVME_FEAT_ASYNC_EVENT = 0x0b, | ||
411 | NVME_FEAT_AUTO_PST = 0x0c, | ||
412 | NVME_FEAT_SW_PROGRESS = 0x80, | ||
413 | NVME_FEAT_HOST_ID = 0x81, | ||
414 | NVME_FEAT_RESV_MASK = 0x82, | ||
415 | NVME_FEAT_RESV_PERSIST = 0x83, | ||
416 | NVME_LOG_ERROR = 0x01, | ||
417 | NVME_LOG_SMART = 0x02, | ||
418 | NVME_LOG_FW_SLOT = 0x03, | ||
419 | NVME_LOG_RESERVATION = 0x80, | ||
420 | NVME_FWACT_REPL = (0 << 3), | ||
421 | NVME_FWACT_REPL_ACTV = (1 << 3), | ||
422 | NVME_FWACT_ACTV = (2 << 3), | ||
423 | }; | ||
424 | |||
425 | struct nvme_identify { | ||
426 | __u8 opcode; | ||
427 | __u8 flags; | ||
428 | __u16 command_id; | ||
429 | __le32 nsid; | ||
430 | __u64 rsvd2[2]; | ||
431 | __le64 prp1; | ||
432 | __le64 prp2; | ||
433 | __le32 cns; | ||
434 | __u32 rsvd11[5]; | ||
435 | }; | ||
436 | |||
437 | struct nvme_features { | ||
438 | __u8 opcode; | ||
439 | __u8 flags; | ||
440 | __u16 command_id; | ||
441 | __le32 nsid; | ||
442 | __u64 rsvd2[2]; | ||
443 | __le64 prp1; | ||
444 | __le64 prp2; | ||
445 | __le32 fid; | ||
446 | __le32 dword11; | ||
447 | __u32 rsvd12[4]; | ||
448 | }; | ||
449 | |||
450 | struct nvme_create_cq { | ||
451 | __u8 opcode; | ||
452 | __u8 flags; | ||
453 | __u16 command_id; | ||
454 | __u32 rsvd1[5]; | ||
455 | __le64 prp1; | ||
456 | __u64 rsvd8; | ||
457 | __le16 cqid; | ||
458 | __le16 qsize; | ||
459 | __le16 cq_flags; | ||
460 | __le16 irq_vector; | ||
461 | __u32 rsvd12[4]; | ||
462 | }; | ||
463 | |||
464 | struct nvme_create_sq { | ||
465 | __u8 opcode; | ||
466 | __u8 flags; | ||
467 | __u16 command_id; | ||
468 | __u32 rsvd1[5]; | ||
469 | __le64 prp1; | ||
470 | __u64 rsvd8; | ||
471 | __le16 sqid; | ||
472 | __le16 qsize; | ||
473 | __le16 sq_flags; | ||
474 | __le16 cqid; | ||
475 | __u32 rsvd12[4]; | ||
476 | }; | ||
477 | |||
478 | struct nvme_delete_queue { | ||
479 | __u8 opcode; | ||
480 | __u8 flags; | ||
481 | __u16 command_id; | ||
482 | __u32 rsvd1[9]; | ||
483 | __le16 qid; | ||
484 | __u16 rsvd10; | ||
485 | __u32 rsvd11[5]; | ||
486 | }; | ||
487 | |||
488 | struct nvme_abort_cmd { | ||
489 | __u8 opcode; | ||
490 | __u8 flags; | ||
491 | __u16 command_id; | ||
492 | __u32 rsvd1[9]; | ||
493 | __le16 sqid; | ||
494 | __u16 cid; | ||
495 | __u32 rsvd11[5]; | ||
496 | }; | ||
497 | |||
498 | struct nvme_download_firmware { | ||
499 | __u8 opcode; | ||
500 | __u8 flags; | ||
501 | __u16 command_id; | ||
502 | __u32 rsvd1[5]; | ||
503 | __le64 prp1; | ||
504 | __le64 prp2; | ||
505 | __le32 numd; | ||
506 | __le32 offset; | ||
507 | __u32 rsvd12[4]; | ||
508 | }; | ||
509 | |||
510 | struct nvme_format_cmd { | ||
511 | __u8 opcode; | ||
512 | __u8 flags; | ||
513 | __u16 command_id; | ||
514 | __le32 nsid; | ||
515 | __u64 rsvd2[4]; | ||
516 | __le32 cdw10; | ||
517 | __u32 rsvd11[5]; | ||
518 | }; | ||
519 | |||
520 | struct nvme_command { | ||
521 | union { | ||
522 | struct nvme_common_command common; | ||
523 | struct nvme_rw_command rw; | ||
524 | struct nvme_identify identify; | ||
525 | struct nvme_features features; | ||
526 | struct nvme_create_cq create_cq; | ||
527 | struct nvme_create_sq create_sq; | ||
528 | struct nvme_delete_queue delete_queue; | ||
529 | struct nvme_download_firmware dlfw; | ||
530 | struct nvme_format_cmd format; | ||
531 | struct nvme_dsm_cmd dsm; | ||
532 | struct nvme_abort_cmd abort; | ||
533 | }; | ||
534 | }; | ||
535 | |||
536 | enum { | ||
537 | NVME_SC_SUCCESS = 0x0, | ||
538 | NVME_SC_INVALID_OPCODE = 0x1, | ||
539 | NVME_SC_INVALID_FIELD = 0x2, | ||
540 | NVME_SC_CMDID_CONFLICT = 0x3, | ||
541 | NVME_SC_DATA_XFER_ERROR = 0x4, | ||
542 | NVME_SC_POWER_LOSS = 0x5, | ||
543 | NVME_SC_INTERNAL = 0x6, | ||
544 | NVME_SC_ABORT_REQ = 0x7, | ||
545 | NVME_SC_ABORT_QUEUE = 0x8, | ||
546 | NVME_SC_FUSED_FAIL = 0x9, | ||
547 | NVME_SC_FUSED_MISSING = 0xa, | ||
548 | NVME_SC_INVALID_NS = 0xb, | ||
549 | NVME_SC_CMD_SEQ_ERROR = 0xc, | ||
550 | NVME_SC_SGL_INVALID_LAST = 0xd, | ||
551 | NVME_SC_SGL_INVALID_COUNT = 0xe, | ||
552 | NVME_SC_SGL_INVALID_DATA = 0xf, | ||
553 | NVME_SC_SGL_INVALID_METADATA = 0x10, | ||
554 | NVME_SC_SGL_INVALID_TYPE = 0x11, | ||
555 | NVME_SC_LBA_RANGE = 0x80, | ||
556 | NVME_SC_CAP_EXCEEDED = 0x81, | ||
557 | NVME_SC_NS_NOT_READY = 0x82, | ||
558 | NVME_SC_RESERVATION_CONFLICT = 0x83, | ||
559 | NVME_SC_CQ_INVALID = 0x100, | ||
560 | NVME_SC_QID_INVALID = 0x101, | ||
561 | NVME_SC_QUEUE_SIZE = 0x102, | ||
562 | NVME_SC_ABORT_LIMIT = 0x103, | ||
563 | NVME_SC_ABORT_MISSING = 0x104, | ||
564 | NVME_SC_ASYNC_LIMIT = 0x105, | ||
565 | NVME_SC_FIRMWARE_SLOT = 0x106, | ||
566 | NVME_SC_FIRMWARE_IMAGE = 0x107, | ||
567 | NVME_SC_INVALID_VECTOR = 0x108, | ||
568 | NVME_SC_INVALID_LOG_PAGE = 0x109, | ||
569 | NVME_SC_INVALID_FORMAT = 0x10a, | ||
570 | NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b, | ||
571 | NVME_SC_INVALID_QUEUE = 0x10c, | ||
572 | NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, | ||
573 | NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, | ||
574 | NVME_SC_FEATURE_NOT_PER_NS = 0x10f, | ||
575 | NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, | ||
576 | NVME_SC_BAD_ATTRIBUTES = 0x180, | ||
577 | NVME_SC_INVALID_PI = 0x181, | ||
578 | NVME_SC_READ_ONLY = 0x182, | ||
579 | NVME_SC_WRITE_FAULT = 0x280, | ||
580 | NVME_SC_READ_ERROR = 0x281, | ||
581 | NVME_SC_GUARD_CHECK = 0x282, | ||
582 | NVME_SC_APPTAG_CHECK = 0x283, | ||
583 | NVME_SC_REFTAG_CHECK = 0x284, | ||
584 | NVME_SC_COMPARE_FAILED = 0x285, | ||
585 | NVME_SC_ACCESS_DENIED = 0x286, | ||
586 | NVME_SC_DNR = 0x4000, | ||
587 | }; | ||
588 | |||
589 | struct nvme_completion { | ||
590 | __le32 result; /* Used by admin commands to return data */ | ||
591 | __u32 rsvd; | ||
592 | __le16 sq_head; /* how much of this queue may be reclaimed */ | ||
593 | __le16 sq_id; /* submission queue that generated this entry */ | ||
594 | __u16 command_id; /* of the command which completed */ | ||
595 | __le16 status; /* did the command fail, and if so, why? */ | ||
596 | }; | ||
597 | |||
598 | #define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8)) | ||
189 | 599 | ||
190 | #endif /* _LINUX_NVME_H */ | 600 | #endif /* _LINUX_NVME_H */ |
diff --git a/include/linux/of.h b/include/linux/of.h index 2194b8ca41f9..dd10626a615f 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
@@ -126,6 +126,8 @@ extern raw_spinlock_t devtree_lock; | |||
126 | #define OF_POPULATED 3 /* device already created for the node */ | 126 | #define OF_POPULATED 3 /* device already created for the node */ |
127 | #define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */ | 127 | #define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */ |
128 | 128 | ||
129 | #define OF_BAD_ADDR ((u64)-1) | ||
130 | |||
129 | #ifdef CONFIG_OF | 131 | #ifdef CONFIG_OF |
130 | void of_core_init(void); | 132 | void of_core_init(void); |
131 | 133 | ||
@@ -229,8 +231,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) | |||
229 | #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) | 231 | #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) |
230 | #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) | 232 | #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) |
231 | 233 | ||
232 | #define OF_BAD_ADDR ((u64)-1) | ||
233 | |||
234 | static inline const char *of_node_full_name(const struct device_node *np) | 234 | static inline const char *of_node_full_name(const struct device_node *np) |
235 | { | 235 | { |
236 | return np ? np->full_name : "<no-node>"; | 236 | return np ? np->full_name : "<no-node>"; |
diff --git a/include/linux/of_address.h b/include/linux/of_address.h index d88e81be6368..507daad0bc8d 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h | |||
@@ -57,6 +57,13 @@ extern int of_dma_get_range(struct device_node *np, u64 *dma_addr, | |||
57 | u64 *paddr, u64 *size); | 57 | u64 *paddr, u64 *size); |
58 | extern bool of_dma_is_coherent(struct device_node *np); | 58 | extern bool of_dma_is_coherent(struct device_node *np); |
59 | #else /* CONFIG_OF_ADDRESS */ | 59 | #else /* CONFIG_OF_ADDRESS */ |
60 | |||
61 | static inline u64 of_translate_address(struct device_node *np, | ||
62 | const __be32 *addr) | ||
63 | { | ||
64 | return OF_BAD_ADDR; | ||
65 | } | ||
66 | |||
60 | static inline struct device_node *of_find_matching_node_by_address( | 67 | static inline struct device_node *of_find_matching_node_by_address( |
61 | struct device_node *from, | 68 | struct device_node *from, |
62 | const struct of_device_id *matches, | 69 | const struct of_device_id *matches, |
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index 98ba7525929e..b90d8ec57c1f 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h | |||
@@ -34,7 +34,7 @@ struct of_dma_filter_info { | |||
34 | dma_filter_fn filter_fn; | 34 | dma_filter_fn filter_fn; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | #ifdef CONFIG_OF | 37 | #ifdef CONFIG_DMA_OF |
38 | extern int of_dma_controller_register(struct device_node *np, | 38 | extern int of_dma_controller_register(struct device_node *np, |
39 | struct dma_chan *(*of_dma_xlate) | 39 | struct dma_chan *(*of_dma_xlate) |
40 | (struct of_phandle_args *, struct of_dma *), | 40 | (struct of_phandle_args *, struct of_dma *), |
@@ -80,7 +80,7 @@ static inline int of_dma_router_register(struct device_node *np, | |||
80 | static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | 80 | static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, |
81 | const char *name) | 81 | const char *name) |
82 | { | 82 | { |
83 | return NULL; | 83 | return ERR_PTR(-ENODEV); |
84 | } | 84 | } |
85 | 85 | ||
86 | static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, | 86 | static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, |
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index f3191828f037..87d6d1632dd4 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h | |||
@@ -29,6 +29,7 @@ struct device_node; | |||
29 | */ | 29 | */ |
30 | enum of_gpio_flags { | 30 | enum of_gpio_flags { |
31 | OF_GPIO_ACTIVE_LOW = 0x1, | 31 | OF_GPIO_ACTIVE_LOW = 0x1, |
32 | OF_GPIO_SINGLE_ENDED = 0x2, | ||
32 | }; | 33 | }; |
33 | 34 | ||
34 | #ifdef CONFIG_OF_GPIO | 35 | #ifdef CONFIG_OF_GPIO |
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 4bcbd586a672..039f2eec49ce 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h | |||
@@ -46,6 +46,12 @@ extern int of_irq_get(struct device_node *dev, int index); | |||
46 | extern int of_irq_get_byname(struct device_node *dev, const char *name); | 46 | extern int of_irq_get_byname(struct device_node *dev, const char *name); |
47 | extern int of_irq_to_resource_table(struct device_node *dev, | 47 | extern int of_irq_to_resource_table(struct device_node *dev, |
48 | struct resource *res, int nr_irqs); | 48 | struct resource *res, int nr_irqs); |
49 | extern struct irq_domain *of_msi_get_domain(struct device *dev, | ||
50 | struct device_node *np, | ||
51 | enum irq_domain_bus_token token); | ||
52 | extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, | ||
53 | u32 rid); | ||
54 | extern void of_msi_configure(struct device *dev, struct device_node *np); | ||
49 | #else | 55 | #else |
50 | static inline int of_irq_count(struct device_node *dev) | 56 | static inline int of_irq_count(struct device_node *dev) |
51 | { | 57 | { |
@@ -64,28 +70,42 @@ static inline int of_irq_to_resource_table(struct device_node *dev, | |||
64 | { | 70 | { |
65 | return 0; | 71 | return 0; |
66 | } | 72 | } |
73 | static inline struct irq_domain *of_msi_get_domain(struct device *dev, | ||
74 | struct device_node *np, | ||
75 | enum irq_domain_bus_token token) | ||
76 | { | ||
77 | return NULL; | ||
78 | } | ||
79 | static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev, | ||
80 | u32 rid) | ||
81 | { | ||
82 | return NULL; | ||
83 | } | ||
84 | static inline void of_msi_configure(struct device *dev, struct device_node *np) | ||
85 | { | ||
86 | } | ||
67 | #endif | 87 | #endif |
68 | 88 | ||
69 | #if defined(CONFIG_OF) | 89 | #if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC) |
70 | /* | 90 | /* |
71 | * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC | 91 | * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC |
72 | * implements it differently. However, the prototype is the same for all, | 92 | * implements it differently. However, the prototype is the same for all, |
73 | * so declare it here regardless of the CONFIG_OF_IRQ setting. | 93 | * so declare it here regardless of the CONFIG_OF_IRQ setting. |
74 | */ | 94 | */ |
75 | extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); | 95 | extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); |
76 | extern struct device_node *of_irq_find_parent(struct device_node *child); | 96 | u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in); |
77 | extern void of_msi_configure(struct device *dev, struct device_node *np); | ||
78 | 97 | ||
79 | #else /* !CONFIG_OF */ | 98 | #else /* !CONFIG_OF && !CONFIG_SPARC */ |
80 | static inline unsigned int irq_of_parse_and_map(struct device_node *dev, | 99 | static inline unsigned int irq_of_parse_and_map(struct device_node *dev, |
81 | int index) | 100 | int index) |
82 | { | 101 | { |
83 | return 0; | 102 | return 0; |
84 | } | 103 | } |
85 | 104 | ||
86 | static inline void *of_irq_find_parent(struct device_node *child) | 105 | static inline u32 of_msi_map_rid(struct device *dev, |
106 | struct device_node *msi_np, u32 rid_in) | ||
87 | { | 107 | { |
88 | return NULL; | 108 | return rid_in; |
89 | } | 109 | } |
90 | #endif /* !CONFIG_OF */ | 110 | #endif /* !CONFIG_OF */ |
91 | 111 | ||
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index 29fd3fe1c035..2c51ee78b1c0 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h | |||
@@ -16,7 +16,7 @@ int of_pci_get_devfn(struct device_node *np); | |||
16 | int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); | 16 | int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); |
17 | int of_pci_parse_bus_range(struct device_node *node, struct resource *res); | 17 | int of_pci_parse_bus_range(struct device_node *node, struct resource *res); |
18 | int of_get_pci_domain_nr(struct device_node *node); | 18 | int of_get_pci_domain_nr(struct device_node *node); |
19 | void of_pci_dma_configure(struct pci_dev *pci_dev); | 19 | void of_pci_check_probe_only(void); |
20 | #else | 20 | #else |
21 | static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) | 21 | static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) |
22 | { | 22 | { |
@@ -52,7 +52,7 @@ of_get_pci_domain_nr(struct device_node *node) | |||
52 | return -1; | 52 | return -1; |
53 | } | 53 | } |
54 | 54 | ||
55 | static inline void of_pci_dma_configure(struct pci_dev *pci_dev) { } | 55 | static inline void of_pci_check_probe_only(void) { } |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #if defined(CONFIG_OF_ADDRESS) | 58 | #if defined(CONFIG_OF_ADDRESS) |
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h index e5a70132a240..88fa8af2b937 100644 --- a/include/linux/omap-dma.h +++ b/include/linux/omap-dma.h | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | 19 | ||
20 | #define INT_DMA_LCD 25 | 20 | #define INT_DMA_LCD (NR_IRQS_LEGACY + 25) |
21 | 21 | ||
22 | #define OMAP1_DMA_TOUT_IRQ (1 << 0) | 22 | #define OMAP1_DMA_TOUT_IRQ (1 << 0) |
23 | #define OMAP_DMA_DROP_IRQ (1 << 1) | 23 | #define OMAP_DMA_DROP_IRQ (1 << 1) |
diff --git a/include/linux/once.h b/include/linux/once.h new file mode 100644 index 000000000000..285f12cb40e6 --- /dev/null +++ b/include/linux/once.h | |||
@@ -0,0 +1,57 @@ | |||
1 | #ifndef _LINUX_ONCE_H | ||
2 | #define _LINUX_ONCE_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/jump_label.h> | ||
6 | |||
7 | bool __do_once_start(bool *done, unsigned long *flags); | ||
8 | void __do_once_done(bool *done, struct static_key *once_key, | ||
9 | unsigned long *flags); | ||
10 | |||
11 | /* Call a function exactly once. The idea of DO_ONCE() is to perform | ||
12 | * a function call such as initialization of random seeds, etc, only | ||
13 | * once, where DO_ONCE() can live in the fast-path. After @func has | ||
14 | * been called with the passed arguments, the static key will patch | ||
15 | * out the condition into a nop. DO_ONCE() guarantees type safety of | ||
16 | * arguments! | ||
17 | * | ||
18 | * Not that the following is not equivalent ... | ||
19 | * | ||
20 | * DO_ONCE(func, arg); | ||
21 | * DO_ONCE(func, arg); | ||
22 | * | ||
23 | * ... to this version: | ||
24 | * | ||
25 | * void foo(void) | ||
26 | * { | ||
27 | * DO_ONCE(func, arg); | ||
28 | * } | ||
29 | * | ||
30 | * foo(); | ||
31 | * foo(); | ||
32 | * | ||
33 | * In case the one-time invocation could be triggered from multiple | ||
34 | * places, then a common helper function must be defined, so that only | ||
35 | * a single static key will be placed there! | ||
36 | */ | ||
37 | #define DO_ONCE(func, ...) \ | ||
38 | ({ \ | ||
39 | bool ___ret = false; \ | ||
40 | static bool ___done = false; \ | ||
41 | static struct static_key ___once_key = STATIC_KEY_INIT_TRUE; \ | ||
42 | if (static_key_true(&___once_key)) { \ | ||
43 | unsigned long ___flags; \ | ||
44 | ___ret = __do_once_start(&___done, &___flags); \ | ||
45 | if (unlikely(___ret)) { \ | ||
46 | func(__VA_ARGS__); \ | ||
47 | __do_once_done(&___done, &___once_key, \ | ||
48 | &___flags); \ | ||
49 | } \ | ||
50 | } \ | ||
51 | ___ret; \ | ||
52 | }) | ||
53 | |||
54 | #define get_random_once(buf, nbytes) \ | ||
55 | DO_ONCE(get_random_bytes, (buf), (nbytes)) | ||
56 | |||
57 | #endif /* _LINUX_ONCE_H */ | ||
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 416509e26d6d..bb53c7b86315 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -86,12 +86,7 @@ enum pageflags { | |||
86 | PG_private, /* If pagecache, has fs-private data */ | 86 | PG_private, /* If pagecache, has fs-private data */ |
87 | PG_private_2, /* If pagecache, has fs aux data */ | 87 | PG_private_2, /* If pagecache, has fs aux data */ |
88 | PG_writeback, /* Page is under writeback */ | 88 | PG_writeback, /* Page is under writeback */ |
89 | #ifdef CONFIG_PAGEFLAGS_EXTENDED | ||
90 | PG_head, /* A head page */ | 89 | PG_head, /* A head page */ |
91 | PG_tail, /* A tail page */ | ||
92 | #else | ||
93 | PG_compound, /* A compound page */ | ||
94 | #endif | ||
95 | PG_swapcache, /* Swap page: swp_entry_t in private */ | 90 | PG_swapcache, /* Swap page: swp_entry_t in private */ |
96 | PG_mappedtodisk, /* Has blocks allocated on-disk */ | 91 | PG_mappedtodisk, /* Has blocks allocated on-disk */ |
97 | PG_reclaim, /* To be reclaimed asap */ | 92 | PG_reclaim, /* To be reclaimed asap */ |
@@ -256,7 +251,7 @@ PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim) | |||
256 | * Must use a macro here due to header dependency issues. page_zone() is not | 251 | * Must use a macro here due to header dependency issues. page_zone() is not |
257 | * available at this point. | 252 | * available at this point. |
258 | */ | 253 | */ |
259 | #define PageHighMem(__p) is_highmem(page_zone(__p)) | 254 | #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) |
260 | #else | 255 | #else |
261 | PAGEFLAG_FALSE(HighMem) | 256 | PAGEFLAG_FALSE(HighMem) |
262 | #endif | 257 | #endif |
@@ -398,85 +393,46 @@ static inline void set_page_writeback_keepwrite(struct page *page) | |||
398 | test_set_page_writeback_keepwrite(page); | 393 | test_set_page_writeback_keepwrite(page); |
399 | } | 394 | } |
400 | 395 | ||
401 | #ifdef CONFIG_PAGEFLAGS_EXTENDED | ||
402 | /* | ||
403 | * System with lots of page flags available. This allows separate | ||
404 | * flags for PageHead() and PageTail() checks of compound pages so that bit | ||
405 | * tests can be used in performance sensitive paths. PageCompound is | ||
406 | * generally not used in hot code paths except arch/powerpc/mm/init_64.c | ||
407 | * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages | ||
408 | * and avoid handling those in real mode. | ||
409 | */ | ||
410 | __PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head) | 396 | __PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head) |
411 | __PAGEFLAG(Tail, tail) | ||
412 | 397 | ||
413 | static inline int PageCompound(struct page *page) | 398 | static inline int PageTail(struct page *page) |
414 | { | ||
415 | return page->flags & ((1L << PG_head) | (1L << PG_tail)); | ||
416 | |||
417 | } | ||
418 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
419 | static inline void ClearPageCompound(struct page *page) | ||
420 | { | 399 | { |
421 | BUG_ON(!PageHead(page)); | 400 | return READ_ONCE(page->compound_head) & 1; |
422 | ClearPageHead(page); | ||
423 | } | 401 | } |
424 | #endif | ||
425 | |||
426 | #define PG_head_mask ((1L << PG_head)) | ||
427 | 402 | ||
428 | #else | 403 | static inline void set_compound_head(struct page *page, struct page *head) |
429 | /* | ||
430 | * Reduce page flag use as much as possible by overlapping | ||
431 | * compound page flags with the flags used for page cache pages. Possible | ||
432 | * because PageCompound is always set for compound pages and not for | ||
433 | * pages on the LRU and/or pagecache. | ||
434 | */ | ||
435 | TESTPAGEFLAG(Compound, compound) | ||
436 | __SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound) | ||
437 | |||
438 | /* | ||
439 | * PG_reclaim is used in combination with PG_compound to mark the | ||
440 | * head and tail of a compound page. This saves one page flag | ||
441 | * but makes it impossible to use compound pages for the page cache. | ||
442 | * The PG_reclaim bit would have to be used for reclaim or readahead | ||
443 | * if compound pages enter the page cache. | ||
444 | * | ||
445 | * PG_compound & PG_reclaim => Tail page | ||
446 | * PG_compound & ~PG_reclaim => Head page | ||
447 | */ | ||
448 | #define PG_head_mask ((1L << PG_compound)) | ||
449 | #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) | ||
450 | |||
451 | static inline int PageHead(struct page *page) | ||
452 | { | 404 | { |
453 | return ((page->flags & PG_head_tail_mask) == PG_head_mask); | 405 | WRITE_ONCE(page->compound_head, (unsigned long)head + 1); |
454 | } | 406 | } |
455 | 407 | ||
456 | static inline int PageTail(struct page *page) | 408 | static inline void clear_compound_head(struct page *page) |
457 | { | 409 | { |
458 | return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask); | 410 | WRITE_ONCE(page->compound_head, 0); |
459 | } | 411 | } |
460 | 412 | ||
461 | static inline void __SetPageTail(struct page *page) | 413 | static inline struct page *compound_head(struct page *page) |
462 | { | 414 | { |
463 | page->flags |= PG_head_tail_mask; | 415 | unsigned long head = READ_ONCE(page->compound_head); |
416 | |||
417 | if (unlikely(head & 1)) | ||
418 | return (struct page *) (head - 1); | ||
419 | return page; | ||
464 | } | 420 | } |
465 | 421 | ||
466 | static inline void __ClearPageTail(struct page *page) | 422 | static inline int PageCompound(struct page *page) |
467 | { | 423 | { |
468 | page->flags &= ~PG_head_tail_mask; | 424 | return PageHead(page) || PageTail(page); |
469 | } | ||
470 | 425 | ||
426 | } | ||
471 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 427 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
472 | static inline void ClearPageCompound(struct page *page) | 428 | static inline void ClearPageCompound(struct page *page) |
473 | { | 429 | { |
474 | BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound)); | 430 | BUG_ON(!PageHead(page)); |
475 | clear_bit(PG_compound, &page->flags); | 431 | ClearPageHead(page); |
476 | } | 432 | } |
477 | #endif | 433 | #endif |
478 | 434 | ||
479 | #endif /* !PAGEFLAGS_EXTENDED */ | 435 | #define PG_head_mask ((1L << PG_head)) |
480 | 436 | ||
481 | #ifdef CONFIG_HUGETLB_PAGE | 437 | #ifdef CONFIG_HUGETLB_PAGE |
482 | int PageHuge(struct page *page); | 438 | int PageHuge(struct page *page); |
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index 17fa4f8de3a6..7e62920a3a94 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h | |||
@@ -36,9 +36,9 @@ static inline unsigned long page_counter_read(struct page_counter *counter) | |||
36 | 36 | ||
37 | void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); | 37 | void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); |
38 | void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); | 38 | void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); |
39 | int page_counter_try_charge(struct page_counter *counter, | 39 | bool page_counter_try_charge(struct page_counter *counter, |
40 | unsigned long nr_pages, | 40 | unsigned long nr_pages, |
41 | struct page_counter **fail); | 41 | struct page_counter **fail); |
42 | void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); | 42 | void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); |
43 | int page_counter_limit(struct page_counter *counter, unsigned long limit); | 43 | int page_counter_limit(struct page_counter *counter, unsigned long limit); |
44 | int page_counter_memparse(const char *buf, const char *max, | 44 | int page_counter_memparse(const char *buf, const char *max, |
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index 2baeee12f48e..e942558b3585 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h | |||
@@ -44,7 +44,7 @@ enum pageblock_bits { | |||
44 | #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE | 44 | #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE |
45 | 45 | ||
46 | /* Huge page sizes are variable */ | 46 | /* Huge page sizes are variable */ |
47 | extern int pageblock_order; | 47 | extern unsigned int pageblock_order; |
48 | 48 | ||
49 | #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ | 49 | #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ |
50 | 50 | ||
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a6c78e00ea96..26eabf5ec718 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -69,6 +69,13 @@ static inline gfp_t mapping_gfp_mask(struct address_space * mapping) | |||
69 | return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; | 69 | return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; |
70 | } | 70 | } |
71 | 71 | ||
72 | /* Restricts the given gfp_mask to what the mapping allows. */ | ||
73 | static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, | ||
74 | gfp_t gfp_mask) | ||
75 | { | ||
76 | return mapping_gfp_mask(mapping) & gfp_mask; | ||
77 | } | ||
78 | |||
72 | /* | 79 | /* |
73 | * This is non-atomic. Only to be used before the mapping is activated. | 80 | * This is non-atomic. Only to be used before the mapping is activated. |
74 | * Probably needs a barrier... | 81 | * Probably needs a barrier... |
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index a965efa52152..89ab0572dbc6 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h | |||
@@ -52,6 +52,30 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) | |||
52 | return ACPI_HANDLE(dev); | 52 | return ACPI_HANDLE(dev); |
53 | } | 53 | } |
54 | 54 | ||
55 | struct acpi_pci_root; | ||
56 | struct acpi_pci_root_ops; | ||
57 | |||
58 | struct acpi_pci_root_info { | ||
59 | struct acpi_pci_root *root; | ||
60 | struct acpi_device *bridge; | ||
61 | struct acpi_pci_root_ops *ops; | ||
62 | struct list_head resources; | ||
63 | char name[16]; | ||
64 | }; | ||
65 | |||
66 | struct acpi_pci_root_ops { | ||
67 | struct pci_ops *pci_ops; | ||
68 | int (*init_info)(struct acpi_pci_root_info *info); | ||
69 | void (*release_info)(struct acpi_pci_root_info *info); | ||
70 | int (*prepare_resources)(struct acpi_pci_root_info *info); | ||
71 | }; | ||
72 | |||
73 | extern int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info); | ||
74 | extern struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root, | ||
75 | struct acpi_pci_root_ops *ops, | ||
76 | struct acpi_pci_root_info *info, | ||
77 | void *sd); | ||
78 | |||
55 | void acpi_pci_add_bus(struct pci_bus *bus); | 79 | void acpi_pci_add_bus(struct pci_bus *bus); |
56 | void acpi_pci_remove_bus(struct pci_bus *bus); | 80 | void acpi_pci_remove_bus(struct pci_bus *bus); |
57 | 81 | ||
diff --git a/include/linux/pci.h b/include/linux/pci.h index e90eb22de628..e828e7b4afec 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -820,6 +820,7 @@ void pci_bus_add_device(struct pci_dev *dev); | |||
820 | void pci_read_bridge_bases(struct pci_bus *child); | 820 | void pci_read_bridge_bases(struct pci_bus *child); |
821 | struct resource *pci_find_parent_resource(const struct pci_dev *dev, | 821 | struct resource *pci_find_parent_resource(const struct pci_dev *dev, |
822 | struct resource *res); | 822 | struct resource *res); |
823 | struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev); | ||
823 | u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin); | 824 | u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin); |
824 | int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); | 825 | int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); |
825 | u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); | 826 | u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); |
@@ -1192,6 +1193,17 @@ void pci_unregister_driver(struct pci_driver *dev); | |||
1192 | module_driver(__pci_driver, pci_register_driver, \ | 1193 | module_driver(__pci_driver, pci_register_driver, \ |
1193 | pci_unregister_driver) | 1194 | pci_unregister_driver) |
1194 | 1195 | ||
1196 | /** | ||
1197 | * builtin_pci_driver() - Helper macro for registering a PCI driver | ||
1198 | * @__pci_driver: pci_driver struct | ||
1199 | * | ||
1200 | * Helper macro for PCI drivers which do not do anything special in their | ||
1201 | * init code. This eliminates a lot of boilerplate. Each driver may only | ||
1202 | * use this macro once, and calling it replaces device_initcall(...) | ||
1203 | */ | ||
1204 | #define builtin_pci_driver(__pci_driver) \ | ||
1205 | builtin_driver(__pci_driver, pci_register_driver) | ||
1206 | |||
1195 | struct pci_driver *pci_dev_driver(const struct pci_dev *dev); | 1207 | struct pci_driver *pci_dev_driver(const struct pci_dev *dev); |
1196 | int pci_add_dynid(struct pci_driver *drv, | 1208 | int pci_add_dynid(struct pci_driver *drv, |
1197 | unsigned int vendor, unsigned int device, | 1209 | unsigned int vendor, unsigned int device, |
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 834c4e52cb2d..c2fa3ecb0dce 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h | |||
@@ -5,11 +5,12 @@ | |||
5 | #include <linux/rwsem.h> | 5 | #include <linux/rwsem.h> |
6 | #include <linux/percpu.h> | 6 | #include <linux/percpu.h> |
7 | #include <linux/wait.h> | 7 | #include <linux/wait.h> |
8 | #include <linux/rcu_sync.h> | ||
8 | #include <linux/lockdep.h> | 9 | #include <linux/lockdep.h> |
9 | 10 | ||
10 | struct percpu_rw_semaphore { | 11 | struct percpu_rw_semaphore { |
12 | struct rcu_sync rss; | ||
11 | unsigned int __percpu *fast_read_ctr; | 13 | unsigned int __percpu *fast_read_ctr; |
12 | atomic_t write_ctr; | ||
13 | struct rw_semaphore rw_sem; | 14 | struct rw_semaphore rw_sem; |
14 | atomic_t slow_read_ctr; | 15 | atomic_t slow_read_ctr; |
15 | wait_queue_head_t write_waitq; | 16 | wait_queue_head_t write_waitq; |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 092a0e8a479a..d841d33bcdc9 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -140,33 +140,67 @@ struct hw_perf_event { | |||
140 | }; | 140 | }; |
141 | #endif | 141 | #endif |
142 | }; | 142 | }; |
143 | /* | ||
144 | * If the event is a per task event, this will point to the task in | ||
145 | * question. See the comment in perf_event_alloc(). | ||
146 | */ | ||
143 | struct task_struct *target; | 147 | struct task_struct *target; |
148 | |||
149 | /* | ||
150 | * hw_perf_event::state flags; used to track the PERF_EF_* state. | ||
151 | */ | ||
152 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ | ||
153 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ | ||
154 | #define PERF_HES_ARCH 0x04 | ||
155 | |||
144 | int state; | 156 | int state; |
157 | |||
158 | /* | ||
159 | * The last observed hardware counter value, updated with a | ||
160 | * local64_cmpxchg() such that pmu::read() can be called nested. | ||
161 | */ | ||
145 | local64_t prev_count; | 162 | local64_t prev_count; |
163 | |||
164 | /* | ||
165 | * The period to start the next sample with. | ||
166 | */ | ||
146 | u64 sample_period; | 167 | u64 sample_period; |
168 | |||
169 | /* | ||
170 | * The period we started this sample with. | ||
171 | */ | ||
147 | u64 last_period; | 172 | u64 last_period; |
173 | |||
174 | /* | ||
175 | * However much is left of the current period; note that this is | ||
176 | * a full 64bit value and allows for generation of periods longer | ||
177 | * than hardware might allow. | ||
178 | */ | ||
148 | local64_t period_left; | 179 | local64_t period_left; |
180 | |||
181 | /* | ||
182 | * State for throttling the event, see __perf_event_overflow() and | ||
183 | * perf_adjust_freq_unthr_context(). | ||
184 | */ | ||
149 | u64 interrupts_seq; | 185 | u64 interrupts_seq; |
150 | u64 interrupts; | 186 | u64 interrupts; |
151 | 187 | ||
188 | /* | ||
189 | * State for freq target events, see __perf_event_overflow() and | ||
190 | * perf_adjust_freq_unthr_context(). | ||
191 | */ | ||
152 | u64 freq_time_stamp; | 192 | u64 freq_time_stamp; |
153 | u64 freq_count_stamp; | 193 | u64 freq_count_stamp; |
154 | #endif | 194 | #endif |
155 | }; | 195 | }; |
156 | 196 | ||
157 | /* | ||
158 | * hw_perf_event::state flags | ||
159 | */ | ||
160 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ | ||
161 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ | ||
162 | #define PERF_HES_ARCH 0x04 | ||
163 | |||
164 | struct perf_event; | 197 | struct perf_event; |
165 | 198 | ||
166 | /* | 199 | /* |
167 | * Common implementation detail of pmu::{start,commit,cancel}_txn | 200 | * Common implementation detail of pmu::{start,commit,cancel}_txn |
168 | */ | 201 | */ |
169 | #define PERF_EVENT_TXN 0x1 | 202 | #define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */ |
203 | #define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */ | ||
170 | 204 | ||
171 | /** | 205 | /** |
172 | * pmu::capabilities flags | 206 | * pmu::capabilities flags |
@@ -210,7 +244,19 @@ struct pmu { | |||
210 | 244 | ||
211 | /* | 245 | /* |
212 | * Try and initialize the event for this PMU. | 246 | * Try and initialize the event for this PMU. |
213 | * Should return -ENOENT when the @event doesn't match this PMU. | 247 | * |
248 | * Returns: | ||
249 | * -ENOENT -- @event is not for this PMU | ||
250 | * | ||
251 | * -ENODEV -- @event is for this PMU but PMU not present | ||
252 | * -EBUSY -- @event is for this PMU but PMU temporarily unavailable | ||
253 | * -EINVAL -- @event is for this PMU but @event is not valid | ||
254 | * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported | ||
255 | * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges | ||
256 | * | ||
257 | * 0 -- @event is for this PMU and valid | ||
258 | * | ||
259 | * Other error return values are allowed. | ||
214 | */ | 260 | */ |
215 | int (*event_init) (struct perf_event *event); | 261 | int (*event_init) (struct perf_event *event); |
216 | 262 | ||
@@ -221,27 +267,61 @@ struct pmu { | |||
221 | void (*event_mapped) (struct perf_event *event); /*optional*/ | 267 | void (*event_mapped) (struct perf_event *event); /*optional*/ |
222 | void (*event_unmapped) (struct perf_event *event); /*optional*/ | 268 | void (*event_unmapped) (struct perf_event *event); /*optional*/ |
223 | 269 | ||
270 | /* | ||
271 | * Flags for ->add()/->del()/ ->start()/->stop(). There are | ||
272 | * matching hw_perf_event::state flags. | ||
273 | */ | ||
224 | #define PERF_EF_START 0x01 /* start the counter when adding */ | 274 | #define PERF_EF_START 0x01 /* start the counter when adding */ |
225 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ | 275 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ |
226 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ | 276 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ |
227 | 277 | ||
228 | /* | 278 | /* |
229 | * Adds/Removes a counter to/from the PMU, can be done inside | 279 | * Adds/Removes a counter to/from the PMU, can be done inside a |
230 | * a transaction, see the ->*_txn() methods. | 280 | * transaction, see the ->*_txn() methods. |
281 | * | ||
282 | * The add/del callbacks will reserve all hardware resources required | ||
283 | * to service the event, this includes any counter constraint | ||
284 | * scheduling etc. | ||
285 | * | ||
286 | * Called with IRQs disabled and the PMU disabled on the CPU the event | ||
287 | * is on. | ||
288 | * | ||
289 | * ->add() called without PERF_EF_START should result in the same state | ||
290 | * as ->add() followed by ->stop(). | ||
291 | * | ||
292 | * ->del() must always PERF_EF_UPDATE stop an event. If it calls | ||
293 | * ->stop() that must deal with already being stopped without | ||
294 | * PERF_EF_UPDATE. | ||
231 | */ | 295 | */ |
232 | int (*add) (struct perf_event *event, int flags); | 296 | int (*add) (struct perf_event *event, int flags); |
233 | void (*del) (struct perf_event *event, int flags); | 297 | void (*del) (struct perf_event *event, int flags); |
234 | 298 | ||
235 | /* | 299 | /* |
236 | * Starts/Stops a counter present on the PMU. The PMI handler | 300 | * Starts/Stops a counter present on the PMU. |
237 | * should stop the counter when perf_event_overflow() returns | 301 | * |
238 | * !0. ->start() will be used to continue. | 302 | * The PMI handler should stop the counter when perf_event_overflow() |
303 | * returns !0. ->start() will be used to continue. | ||
304 | * | ||
305 | * Also used to change the sample period. | ||
306 | * | ||
307 | * Called with IRQs disabled and the PMU disabled on the CPU the event | ||
308 | * is on -- will be called from NMI context with the PMU generates | ||
309 | * NMIs. | ||
310 | * | ||
311 | * ->stop() with PERF_EF_UPDATE will read the counter and update | ||
312 | * period/count values like ->read() would. | ||
313 | * | ||
314 | * ->start() with PERF_EF_RELOAD will reprogram the the counter | ||
315 | * value, must be preceded by a ->stop() with PERF_EF_UPDATE. | ||
239 | */ | 316 | */ |
240 | void (*start) (struct perf_event *event, int flags); | 317 | void (*start) (struct perf_event *event, int flags); |
241 | void (*stop) (struct perf_event *event, int flags); | 318 | void (*stop) (struct perf_event *event, int flags); |
242 | 319 | ||
243 | /* | 320 | /* |
244 | * Updates the counter value of the event. | 321 | * Updates the counter value of the event. |
322 | * | ||
323 | * For sampling capable PMUs this will also update the software period | ||
324 | * hw_perf_event::period_left field. | ||
245 | */ | 325 | */ |
246 | void (*read) (struct perf_event *event); | 326 | void (*read) (struct perf_event *event); |
247 | 327 | ||
@@ -252,20 +332,26 @@ struct pmu { | |||
252 | * | 332 | * |
253 | * Start the transaction, after this ->add() doesn't need to | 333 | * Start the transaction, after this ->add() doesn't need to |
254 | * do schedulability tests. | 334 | * do schedulability tests. |
335 | * | ||
336 | * Optional. | ||
255 | */ | 337 | */ |
256 | void (*start_txn) (struct pmu *pmu); /* optional */ | 338 | void (*start_txn) (struct pmu *pmu, unsigned int txn_flags); |
257 | /* | 339 | /* |
258 | * If ->start_txn() disabled the ->add() schedulability test | 340 | * If ->start_txn() disabled the ->add() schedulability test |
259 | * then ->commit_txn() is required to perform one. On success | 341 | * then ->commit_txn() is required to perform one. On success |
260 | * the transaction is closed. On error the transaction is kept | 342 | * the transaction is closed. On error the transaction is kept |
261 | * open until ->cancel_txn() is called. | 343 | * open until ->cancel_txn() is called. |
344 | * | ||
345 | * Optional. | ||
262 | */ | 346 | */ |
263 | int (*commit_txn) (struct pmu *pmu); /* optional */ | 347 | int (*commit_txn) (struct pmu *pmu); |
264 | /* | 348 | /* |
265 | * Will cancel the transaction, assumes ->del() is called | 349 | * Will cancel the transaction, assumes ->del() is called |
266 | * for each successful ->add() during the transaction. | 350 | * for each successful ->add() during the transaction. |
351 | * | ||
352 | * Optional. | ||
267 | */ | 353 | */ |
268 | void (*cancel_txn) (struct pmu *pmu); /* optional */ | 354 | void (*cancel_txn) (struct pmu *pmu); |
269 | 355 | ||
270 | /* | 356 | /* |
271 | * Will return the value for perf_event_mmap_page::index for this event, | 357 | * Will return the value for perf_event_mmap_page::index for this event, |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 4a4e3a092337..05fde31b6dc6 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -213,7 +213,9 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) | |||
213 | void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); | 213 | void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); |
214 | struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); | 214 | struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); |
215 | int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); | 215 | int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); |
216 | int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); | ||
216 | int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); | 217 | int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); |
218 | int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val); | ||
217 | 219 | ||
218 | 220 | ||
219 | #define PHY_INTERRUPT_DISABLED 0x0 | 221 | #define PHY_INTERRUPT_DISABLED 0x0 |
@@ -798,6 +800,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); | |||
798 | int phy_start_interrupts(struct phy_device *phydev); | 800 | int phy_start_interrupts(struct phy_device *phydev); |
799 | void phy_print_status(struct phy_device *phydev); | 801 | void phy_print_status(struct phy_device *phydev); |
800 | void phy_device_free(struct phy_device *phydev); | 802 | void phy_device_free(struct phy_device *phydev); |
803 | int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); | ||
801 | 804 | ||
802 | int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, | 805 | int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, |
803 | int (*run)(struct phy_device *)); | 806 | int (*run)(struct phy_device *)); |
diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h index 281cb91ddcf5..05082e407c4a 100644 --- a/include/linux/pinctrl/devinfo.h +++ b/include/linux/pinctrl/devinfo.h | |||
@@ -24,10 +24,14 @@ | |||
24 | * struct dev_pin_info - pin state container for devices | 24 | * struct dev_pin_info - pin state container for devices |
25 | * @p: pinctrl handle for the containing device | 25 | * @p: pinctrl handle for the containing device |
26 | * @default_state: the default state for the handle, if found | 26 | * @default_state: the default state for the handle, if found |
27 | * @init_state: the state at probe time, if found | ||
28 | * @sleep_state: the state at suspend time, if found | ||
29 | * @idle_state: the state at idle (runtime suspend) time, if found | ||
27 | */ | 30 | */ |
28 | struct dev_pin_info { | 31 | struct dev_pin_info { |
29 | struct pinctrl *p; | 32 | struct pinctrl *p; |
30 | struct pinctrl_state *default_state; | 33 | struct pinctrl_state *default_state; |
34 | struct pinctrl_state *init_state; | ||
31 | #ifdef CONFIG_PM | 35 | #ifdef CONFIG_PM |
32 | struct pinctrl_state *sleep_state; | 36 | struct pinctrl_state *sleep_state; |
33 | struct pinctrl_state *idle_state; | 37 | struct pinctrl_state *idle_state; |
@@ -35,6 +39,7 @@ struct dev_pin_info { | |||
35 | }; | 39 | }; |
36 | 40 | ||
37 | extern int pinctrl_bind_pins(struct device *dev); | 41 | extern int pinctrl_bind_pins(struct device *dev); |
42 | extern int pinctrl_init_done(struct device *dev); | ||
38 | 43 | ||
39 | #else | 44 | #else |
40 | 45 | ||
@@ -45,5 +50,10 @@ static inline int pinctrl_bind_pins(struct device *dev) | |||
45 | return 0; | 50 | return 0; |
46 | } | 51 | } |
47 | 52 | ||
53 | static inline int pinctrl_init_done(struct device *dev) | ||
54 | { | ||
55 | return 0; | ||
56 | } | ||
57 | |||
48 | #endif /* CONFIG_PINCTRL */ | 58 | #endif /* CONFIG_PINCTRL */ |
49 | #endif /* PINCTRL_DEVINFO_H */ | 59 | #endif /* PINCTRL_DEVINFO_H */ |
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index fe65962b264f..d921afd5f109 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h | |||
@@ -20,6 +20,11 @@ | |||
20 | 20 | ||
21 | /** | 21 | /** |
22 | * enum pin_config_param - possible pin configuration parameters | 22 | * enum pin_config_param - possible pin configuration parameters |
23 | * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it | ||
24 | * weakly drives the last value on a tristate bus, also known as a "bus | ||
25 | * holder", "bus keeper" or "repeater". This allows another device on the | ||
26 | * bus to change the value by driving the bus high or low and switching to | ||
27 | * tristate. The argument is ignored. | ||
23 | * @PIN_CONFIG_BIAS_DISABLE: disable any pin bias on the pin, a | 28 | * @PIN_CONFIG_BIAS_DISABLE: disable any pin bias on the pin, a |
24 | * transition from say pull-up to pull-down implies that you disable | 29 | * transition from say pull-up to pull-down implies that you disable |
25 | * pull-up in the process, this setting disables all biasing. | 30 | * pull-up in the process, this setting disables all biasing. |
@@ -29,14 +34,6 @@ | |||
29 | * if for example some other pin is going to drive the signal connected | 34 | * if for example some other pin is going to drive the signal connected |
30 | * to it for a while. Pins used for input are usually always high | 35 | * to it for a while. Pins used for input are usually always high |
31 | * impedance. | 36 | * impedance. |
32 | * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it | ||
33 | * weakly drives the last value on a tristate bus, also known as a "bus | ||
34 | * holder", "bus keeper" or "repeater". This allows another device on the | ||
35 | * bus to change the value by driving the bus high or low and switching to | ||
36 | * tristate. The argument is ignored. | ||
37 | * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high | ||
38 | * impedance to VDD). If the argument is != 0 pull-up is enabled, | ||
39 | * if it is 0, pull-up is total, i.e. the pin is connected to VDD. | ||
40 | * @PIN_CONFIG_BIAS_PULL_DOWN: the pin will be pulled down (usually with high | 37 | * @PIN_CONFIG_BIAS_PULL_DOWN: the pin will be pulled down (usually with high |
41 | * impedance to GROUND). If the argument is != 0 pull-down is enabled, | 38 | * impedance to GROUND). If the argument is != 0 pull-down is enabled, |
42 | * if it is 0, pull-down is total, i.e. the pin is connected to GROUND. | 39 | * if it is 0, pull-down is total, i.e. the pin is connected to GROUND. |
@@ -48,10 +45,9 @@ | |||
48 | * If the argument is != 0 pull up/down is enabled, if it is 0, the | 45 | * If the argument is != 0 pull up/down is enabled, if it is 0, the |
49 | * configuration is ignored. The proper way to disable it is to use | 46 | * configuration is ignored. The proper way to disable it is to use |
50 | * @PIN_CONFIG_BIAS_DISABLE. | 47 | * @PIN_CONFIG_BIAS_DISABLE. |
51 | * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and | 48 | * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high |
52 | * low, this is the most typical case and is typically achieved with two | 49 | * impedance to VDD). If the argument is != 0 pull-up is enabled, |
53 | * active transistors on the output. Setting this config will enable | 50 | * if it is 0, pull-up is total, i.e. the pin is connected to VDD. |
54 | * push-pull mode, the argument is ignored. | ||
55 | * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open | 51 | * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open |
56 | * collector) which means it is usually wired with other output ports | 52 | * collector) which means it is usually wired with other output ports |
57 | * which are then pulled up with an external resistor. Setting this | 53 | * which are then pulled up with an external resistor. Setting this |
@@ -59,28 +55,26 @@ | |||
59 | * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source | 55 | * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source |
60 | * (open emitter). Setting this config will enable open source mode, the | 56 | * (open emitter). Setting this config will enable open source mode, the |
61 | * argument is ignored. | 57 | * argument is ignored. |
58 | * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and | ||
59 | * low, this is the most typical case and is typically achieved with two | ||
60 | * active transistors on the output. Setting this config will enable | ||
61 | * push-pull mode, the argument is ignored. | ||
62 | * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current | 62 | * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current |
63 | * passed as argument. The argument is in mA. | 63 | * passed as argument. The argument is in mA. |
64 | * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode, | ||
65 | * which means it will wait for signals to settle when reading inputs. The | ||
66 | * argument gives the debounce time in usecs. Setting the | ||
67 | * argument to zero turns debouncing off. | ||
64 | * @PIN_CONFIG_INPUT_ENABLE: enable the pin's input. Note that this does not | 68 | * @PIN_CONFIG_INPUT_ENABLE: enable the pin's input. Note that this does not |
65 | * affect the pin's ability to drive output. 1 enables input, 0 disables | 69 | * affect the pin's ability to drive output. 1 enables input, 0 disables |
66 | * input. | 70 | * input. |
67 | * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. | ||
68 | * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, | ||
69 | * schmitt-trigger mode is disabled. | ||
70 | * @PIN_CONFIG_INPUT_SCHMITT: this will configure an input pin to run in | 71 | * @PIN_CONFIG_INPUT_SCHMITT: this will configure an input pin to run in |
71 | * schmitt-trigger mode. If the schmitt-trigger has adjustable hysteresis, | 72 | * schmitt-trigger mode. If the schmitt-trigger has adjustable hysteresis, |
72 | * the threshold value is given on a custom format as argument when | 73 | * the threshold value is given on a custom format as argument when |
73 | * setting pins to this mode. | 74 | * setting pins to this mode. |
74 | * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode, | 75 | * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. |
75 | * which means it will wait for signals to settle when reading inputs. The | 76 | * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, |
76 | * argument gives the debounce time in usecs. Setting the | 77 | * schmitt-trigger mode is disabled. |
77 | * argument to zero turns debouncing off. | ||
78 | * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power | ||
79 | * supplies, the argument to this parameter (on a custom format) tells | ||
80 | * the driver which alternative power source to use. | ||
81 | * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to | ||
82 | * this parameter (on a custom format) tells the driver which alternative | ||
83 | * slew rate to use. | ||
84 | * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power | 78 | * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power |
85 | * operation, if several modes of operation are supported these can be | 79 | * operation, if several modes of operation are supported these can be |
86 | * passed in the argument on a custom form, else just use argument 1 | 80 | * passed in the argument on a custom form, else just use argument 1 |
@@ -89,29 +83,35 @@ | |||
89 | * 1 to indicate high level, argument 0 to indicate low level. (Please | 83 | * 1 to indicate high level, argument 0 to indicate low level. (Please |
90 | * see Documentation/pinctrl.txt, section "GPIO mode pitfalls" for a | 84 | * see Documentation/pinctrl.txt, section "GPIO mode pitfalls" for a |
91 | * discussion around this parameter.) | 85 | * discussion around this parameter.) |
86 | * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power | ||
87 | * supplies, the argument to this parameter (on a custom format) tells | ||
88 | * the driver which alternative power source to use. | ||
89 | * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to | ||
90 | * this parameter (on a custom format) tells the driver which alternative | ||
91 | * slew rate to use. | ||
92 | * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if | 92 | * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if |
93 | * you need to pass in custom configurations to the pin controller, use | 93 | * you need to pass in custom configurations to the pin controller, use |
94 | * PIN_CONFIG_END+1 as the base offset. | 94 | * PIN_CONFIG_END+1 as the base offset. |
95 | */ | 95 | */ |
96 | enum pin_config_param { | 96 | enum pin_config_param { |
97 | PIN_CONFIG_BIAS_BUS_HOLD, | ||
97 | PIN_CONFIG_BIAS_DISABLE, | 98 | PIN_CONFIG_BIAS_DISABLE, |
98 | PIN_CONFIG_BIAS_HIGH_IMPEDANCE, | 99 | PIN_CONFIG_BIAS_HIGH_IMPEDANCE, |
99 | PIN_CONFIG_BIAS_BUS_HOLD, | ||
100 | PIN_CONFIG_BIAS_PULL_UP, | ||
101 | PIN_CONFIG_BIAS_PULL_DOWN, | 100 | PIN_CONFIG_BIAS_PULL_DOWN, |
102 | PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, | 101 | PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, |
103 | PIN_CONFIG_DRIVE_PUSH_PULL, | 102 | PIN_CONFIG_BIAS_PULL_UP, |
104 | PIN_CONFIG_DRIVE_OPEN_DRAIN, | 103 | PIN_CONFIG_DRIVE_OPEN_DRAIN, |
105 | PIN_CONFIG_DRIVE_OPEN_SOURCE, | 104 | PIN_CONFIG_DRIVE_OPEN_SOURCE, |
105 | PIN_CONFIG_DRIVE_PUSH_PULL, | ||
106 | PIN_CONFIG_DRIVE_STRENGTH, | 106 | PIN_CONFIG_DRIVE_STRENGTH, |
107 | PIN_CONFIG_INPUT_DEBOUNCE, | ||
107 | PIN_CONFIG_INPUT_ENABLE, | 108 | PIN_CONFIG_INPUT_ENABLE, |
108 | PIN_CONFIG_INPUT_SCHMITT_ENABLE, | ||
109 | PIN_CONFIG_INPUT_SCHMITT, | 109 | PIN_CONFIG_INPUT_SCHMITT, |
110 | PIN_CONFIG_INPUT_DEBOUNCE, | 110 | PIN_CONFIG_INPUT_SCHMITT_ENABLE, |
111 | PIN_CONFIG_POWER_SOURCE, | ||
112 | PIN_CONFIG_SLEW_RATE, | ||
113 | PIN_CONFIG_LOW_POWER_MODE, | 111 | PIN_CONFIG_LOW_POWER_MODE, |
114 | PIN_CONFIG_OUTPUT, | 112 | PIN_CONFIG_OUTPUT, |
113 | PIN_CONFIG_POWER_SOURCE, | ||
114 | PIN_CONFIG_SLEW_RATE, | ||
115 | PIN_CONFIG_END = 0x7FFF, | 115 | PIN_CONFIG_END = 0x7FFF, |
116 | }; | 116 | }; |
117 | 117 | ||
diff --git a/include/linux/pinctrl/pinctrl-state.h b/include/linux/pinctrl/pinctrl-state.h index b5919f8e6d1a..23073519339f 100644 --- a/include/linux/pinctrl/pinctrl-state.h +++ b/include/linux/pinctrl/pinctrl-state.h | |||
@@ -9,6 +9,13 @@ | |||
9 | * hogs to configure muxing and pins at boot, and also as a state | 9 | * hogs to configure muxing and pins at boot, and also as a state |
10 | * to go into when returning from sleep and idle in | 10 | * to go into when returning from sleep and idle in |
11 | * .pm_runtime_resume() or ordinary .resume() for example. | 11 | * .pm_runtime_resume() or ordinary .resume() for example. |
12 | * @PINCTRL_STATE_INIT: normally the pinctrl will be set to "default" | ||
13 | * before the driver's probe() function is called. There are some | ||
14 | * drivers where that is not appropriate becausing doing so would | ||
15 | * glitch the pins. In those cases you can add an "init" pinctrl | ||
16 | * which is the state of the pins before drive probe. After probe | ||
17 | * if the pins are still in "init" state they'll be moved to | ||
18 | * "default". | ||
12 | * @PINCTRL_STATE_IDLE: the state the pinctrl handle shall be put into | 19 | * @PINCTRL_STATE_IDLE: the state the pinctrl handle shall be put into |
13 | * when the pins are idle. This is a state where the system is relaxed | 20 | * when the pins are idle. This is a state where the system is relaxed |
14 | * but not fully sleeping - some power may be on but clocks gated for | 21 | * but not fully sleeping - some power may be on but clocks gated for |
@@ -20,5 +27,6 @@ | |||
20 | * ordinary .suspend() function. | 27 | * ordinary .suspend() function. |
21 | */ | 28 | */ |
22 | #define PINCTRL_STATE_DEFAULT "default" | 29 | #define PINCTRL_STATE_DEFAULT "default" |
30 | #define PINCTRL_STATE_INIT "init" | ||
23 | #define PINCTRL_STATE_IDLE "idle" | 31 | #define PINCTRL_STATE_IDLE "idle" |
24 | #define PINCTRL_STATE_SLEEP "sleep" | 32 | #define PINCTRL_STATE_SLEEP "sleep" |
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h index 527a85c61924..3c8825b67298 100644 --- a/include/linux/platform_data/atmel.h +++ b/include/linux/platform_data/atmel.h | |||
@@ -9,30 +9,7 @@ | |||
9 | 9 | ||
10 | #include <linux/mtd/nand.h> | 10 | #include <linux/mtd/nand.h> |
11 | #include <linux/mtd/partitions.h> | 11 | #include <linux/mtd/partitions.h> |
12 | #include <linux/device.h> | ||
13 | #include <linux/i2c.h> | ||
14 | #include <linux/leds.h> | ||
15 | #include <linux/spi/spi.h> | ||
16 | #include <linux/usb/atmel_usba_udc.h> | ||
17 | #include <linux/atmel-mci.h> | ||
18 | #include <sound/atmel-ac97c.h> | ||
19 | #include <linux/serial.h> | 12 | #include <linux/serial.h> |
20 | #include <linux/platform_data/macb.h> | ||
21 | |||
22 | /* | ||
23 | * at91: 6 USARTs and one DBGU port (SAM9260) | ||
24 | * avr32: 4 | ||
25 | */ | ||
26 | #define ATMEL_MAX_UART 7 | ||
27 | |||
28 | /* USB Device */ | ||
29 | struct at91_udc_data { | ||
30 | int vbus_pin; /* high == host powering us */ | ||
31 | u8 vbus_active_low; /* vbus polarity */ | ||
32 | u8 vbus_polled; /* Use polling, not interrupt */ | ||
33 | int pullup_pin; /* active == D+ pulled up */ | ||
34 | u8 pullup_active_low; /* true == pullup_pin is active low */ | ||
35 | }; | ||
36 | 13 | ||
37 | /* Compact Flash */ | 14 | /* Compact Flash */ |
38 | struct at91_cf_data { | 15 | struct at91_cf_data { |
@@ -74,11 +51,6 @@ struct atmel_uart_data { | |||
74 | struct serial_rs485 rs485; /* rs485 settings */ | 51 | struct serial_rs485 rs485; /* rs485 settings */ |
75 | }; | 52 | }; |
76 | 53 | ||
77 | /* CAN */ | ||
78 | struct at91_can_data { | ||
79 | void (*transceiver_switch)(int on); | ||
80 | }; | ||
81 | |||
82 | /* FIXME: this needs a better location, but gets stuff building again */ | 54 | /* FIXME: this needs a better location, but gets stuff building again */ |
83 | extern int at91_suspend_entering_slow_clock(void); | 55 | extern int at91_suspend_entering_slow_clock(void); |
84 | 56 | ||
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 87ac14c584f2..03b6095d3b18 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h | |||
@@ -37,6 +37,7 @@ struct dw_dma_slave { | |||
37 | * @nr_channels: Number of channels supported by hardware (max 8) | 37 | * @nr_channels: Number of channels supported by hardware (max 8) |
38 | * @is_private: The device channels should be marked as private and not for | 38 | * @is_private: The device channels should be marked as private and not for |
39 | * by the general purpose DMA channel allocator. | 39 | * by the general purpose DMA channel allocator. |
40 | * @is_memcpy: The device channels do support memory-to-memory transfers. | ||
40 | * @chan_allocation_order: Allocate channels starting from 0 or 7 | 41 | * @chan_allocation_order: Allocate channels starting from 0 or 7 |
41 | * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. | 42 | * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. |
42 | * @block_size: Maximum block size supported by the controller | 43 | * @block_size: Maximum block size supported by the controller |
@@ -47,6 +48,7 @@ struct dw_dma_slave { | |||
47 | struct dw_dma_platform_data { | 48 | struct dw_dma_platform_data { |
48 | unsigned int nr_channels; | 49 | unsigned int nr_channels; |
49 | bool is_private; | 50 | bool is_private; |
51 | bool is_memcpy; | ||
50 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ | 52 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ |
51 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ | 53 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ |
52 | unsigned char chan_allocation_order; | 54 | unsigned char chan_allocation_order; |
diff --git a/include/linux/platform_data/dma-hsu.h b/include/linux/platform_data/dma-hsu.h index 8a1f6a4920b2..3453fa655502 100644 --- a/include/linux/platform_data/dma-hsu.h +++ b/include/linux/platform_data/dma-hsu.h | |||
@@ -18,8 +18,4 @@ struct hsu_dma_slave { | |||
18 | int chan_id; | 18 | int chan_id; |
19 | }; | 19 | }; |
20 | 20 | ||
21 | struct hsu_dma_platform_data { | ||
22 | unsigned short nr_channels; | ||
23 | }; | ||
24 | |||
25 | #endif /* _PLATFORM_DATA_DMA_HSU_H */ | 21 | #endif /* _PLATFORM_DATA_DMA_HSU_H */ |
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index bdb2710e2aab..e2878baeb90e 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h | |||
@@ -41,51 +41,6 @@ | |||
41 | #ifndef EDMA_H_ | 41 | #ifndef EDMA_H_ |
42 | #define EDMA_H_ | 42 | #define EDMA_H_ |
43 | 43 | ||
44 | /* PaRAM slots are laid out like this */ | ||
45 | struct edmacc_param { | ||
46 | u32 opt; | ||
47 | u32 src; | ||
48 | u32 a_b_cnt; | ||
49 | u32 dst; | ||
50 | u32 src_dst_bidx; | ||
51 | u32 link_bcntrld; | ||
52 | u32 src_dst_cidx; | ||
53 | u32 ccnt; | ||
54 | } __packed; | ||
55 | |||
56 | /* fields in edmacc_param.opt */ | ||
57 | #define SAM BIT(0) | ||
58 | #define DAM BIT(1) | ||
59 | #define SYNCDIM BIT(2) | ||
60 | #define STATIC BIT(3) | ||
61 | #define EDMA_FWID (0x07 << 8) | ||
62 | #define TCCMODE BIT(11) | ||
63 | #define EDMA_TCC(t) ((t) << 12) | ||
64 | #define TCINTEN BIT(20) | ||
65 | #define ITCINTEN BIT(21) | ||
66 | #define TCCHEN BIT(22) | ||
67 | #define ITCCHEN BIT(23) | ||
68 | |||
69 | /*ch_status paramater of callback function possible values*/ | ||
70 | #define EDMA_DMA_COMPLETE 1 | ||
71 | #define EDMA_DMA_CC_ERROR 2 | ||
72 | #define EDMA_DMA_TC1_ERROR 3 | ||
73 | #define EDMA_DMA_TC2_ERROR 4 | ||
74 | |||
75 | enum address_mode { | ||
76 | INCR = 0, | ||
77 | FIFO = 1 | ||
78 | }; | ||
79 | |||
80 | enum fifo_width { | ||
81 | W8BIT = 0, | ||
82 | W16BIT = 1, | ||
83 | W32BIT = 2, | ||
84 | W64BIT = 3, | ||
85 | W128BIT = 4, | ||
86 | W256BIT = 5 | ||
87 | }; | ||
88 | |||
89 | enum dma_event_q { | 44 | enum dma_event_q { |
90 | EVENTQ_0 = 0, | 45 | EVENTQ_0 = 0, |
91 | EVENTQ_1 = 1, | 46 | EVENTQ_1 = 1, |
@@ -94,64 +49,10 @@ enum dma_event_q { | |||
94 | EVENTQ_DEFAULT = -1 | 49 | EVENTQ_DEFAULT = -1 |
95 | }; | 50 | }; |
96 | 51 | ||
97 | enum sync_dimension { | ||
98 | ASYNC = 0, | ||
99 | ABSYNC = 1 | ||
100 | }; | ||
101 | |||
102 | #define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan)) | 52 | #define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan)) |
103 | #define EDMA_CTLR(i) ((i) >> 16) | 53 | #define EDMA_CTLR(i) ((i) >> 16) |
104 | #define EDMA_CHAN_SLOT(i) ((i) & 0xffff) | 54 | #define EDMA_CHAN_SLOT(i) ((i) & 0xffff) |
105 | 55 | ||
106 | #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */ | ||
107 | #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */ | ||
108 | #define EDMA_CONT_PARAMS_ANY 1001 | ||
109 | #define EDMA_CONT_PARAMS_FIXED_EXACT 1002 | ||
110 | #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003 | ||
111 | |||
112 | #define EDMA_MAX_CC 2 | ||
113 | |||
114 | /* alloc/free DMA channels and their dedicated parameter RAM slots */ | ||
115 | int edma_alloc_channel(int channel, | ||
116 | void (*callback)(unsigned channel, u16 ch_status, void *data), | ||
117 | void *data, enum dma_event_q); | ||
118 | void edma_free_channel(unsigned channel); | ||
119 | |||
120 | /* alloc/free parameter RAM slots */ | ||
121 | int edma_alloc_slot(unsigned ctlr, int slot); | ||
122 | void edma_free_slot(unsigned slot); | ||
123 | |||
124 | /* alloc/free a set of contiguous parameter RAM slots */ | ||
125 | int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count); | ||
126 | int edma_free_cont_slots(unsigned slot, int count); | ||
127 | |||
128 | /* calls that operate on part of a parameter RAM slot */ | ||
129 | void edma_set_src(unsigned slot, dma_addr_t src_port, | ||
130 | enum address_mode mode, enum fifo_width); | ||
131 | void edma_set_dest(unsigned slot, dma_addr_t dest_port, | ||
132 | enum address_mode mode, enum fifo_width); | ||
133 | dma_addr_t edma_get_position(unsigned slot, bool dst); | ||
134 | void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx); | ||
135 | void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx); | ||
136 | void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt, | ||
137 | u16 bcnt_rld, enum sync_dimension sync_mode); | ||
138 | void edma_link(unsigned from, unsigned to); | ||
139 | void edma_unlink(unsigned from); | ||
140 | |||
141 | /* calls that operate on an entire parameter RAM slot */ | ||
142 | void edma_write_slot(unsigned slot, const struct edmacc_param *params); | ||
143 | void edma_read_slot(unsigned slot, struct edmacc_param *params); | ||
144 | |||
145 | /* channel control operations */ | ||
146 | int edma_start(unsigned channel); | ||
147 | void edma_stop(unsigned channel); | ||
148 | void edma_clean_channel(unsigned channel); | ||
149 | void edma_clear_event(unsigned channel); | ||
150 | void edma_pause(unsigned channel); | ||
151 | void edma_resume(unsigned channel); | ||
152 | |||
153 | void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no); | ||
154 | |||
155 | struct edma_rsv_info { | 56 | struct edma_rsv_info { |
156 | 57 | ||
157 | const s16 (*rsv_chans)[2]; | 58 | const s16 (*rsv_chans)[2]; |
@@ -170,10 +71,11 @@ struct edma_soc_info { | |||
170 | /* Resource reservation for other cores */ | 71 | /* Resource reservation for other cores */ |
171 | struct edma_rsv_info *rsv; | 72 | struct edma_rsv_info *rsv; |
172 | 73 | ||
74 | /* List of channels allocated for memcpy, terminated with -1 */ | ||
75 | s16 *memcpy_channels; | ||
76 | |||
173 | s8 (*queue_priority_mapping)[2]; | 77 | s8 (*queue_priority_mapping)[2]; |
174 | const s16 (*xbar_chans)[2]; | 78 | const s16 (*xbar_chans)[2]; |
175 | }; | 79 | }; |
176 | 80 | ||
177 | int edma_trigger_channel(unsigned); | ||
178 | |||
179 | #endif | 81 | #endif |
diff --git a/include/linux/platform_data/leds-kirkwood-netxbig.h b/include/linux/platform_data/leds-kirkwood-netxbig.h index d2be19a51acd..3c85a735c380 100644 --- a/include/linux/platform_data/leds-kirkwood-netxbig.h +++ b/include/linux/platform_data/leds-kirkwood-netxbig.h | |||
@@ -40,6 +40,7 @@ struct netxbig_led { | |||
40 | int mode_addr; | 40 | int mode_addr; |
41 | int *mode_val; | 41 | int *mode_val; |
42 | int bright_addr; | 42 | int bright_addr; |
43 | int bright_max; | ||
43 | }; | 44 | }; |
44 | 45 | ||
45 | struct netxbig_led_platform_data { | 46 | struct netxbig_led_platform_data { |
diff --git a/include/linux/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h index 11f00cdabe3d..11f00cdabe3d 100644 --- a/include/linux/mdio-gpio.h +++ b/include/linux/platform_data/mdio-gpio.h | |||
diff --git a/include/linux/platform_data/mtd-nand-pxa3xx.h b/include/linux/platform_data/mtd-nand-pxa3xx.h index ac4ea2e641c7..394d15597dc7 100644 --- a/include/linux/platform_data/mtd-nand-pxa3xx.h +++ b/include/linux/platform_data/mtd-nand-pxa3xx.h | |||
@@ -4,30 +4,6 @@ | |||
4 | #include <linux/mtd/mtd.h> | 4 | #include <linux/mtd/mtd.h> |
5 | #include <linux/mtd/partitions.h> | 5 | #include <linux/mtd/partitions.h> |
6 | 6 | ||
7 | struct pxa3xx_nand_timing { | ||
8 | unsigned int tCH; /* Enable signal hold time */ | ||
9 | unsigned int tCS; /* Enable signal setup time */ | ||
10 | unsigned int tWH; /* ND_nWE high duration */ | ||
11 | unsigned int tWP; /* ND_nWE pulse time */ | ||
12 | unsigned int tRH; /* ND_nRE high duration */ | ||
13 | unsigned int tRP; /* ND_nRE pulse width */ | ||
14 | unsigned int tR; /* ND_nWE high to ND_nRE low for read */ | ||
15 | unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */ | ||
16 | unsigned int tAR; /* ND_ALE low to ND_nRE low delay */ | ||
17 | }; | ||
18 | |||
19 | struct pxa3xx_nand_flash { | ||
20 | char *name; | ||
21 | uint32_t chip_id; | ||
22 | unsigned int page_per_block; /* Pages per block (PG_PER_BLK) */ | ||
23 | unsigned int page_size; /* Page size in bytes (PAGE_SZ) */ | ||
24 | unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */ | ||
25 | unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */ | ||
26 | unsigned int num_blocks; /* Number of physical blocks in Flash */ | ||
27 | |||
28 | struct pxa3xx_nand_timing *timing; /* NAND Flash timing */ | ||
29 | }; | ||
30 | |||
31 | /* | 7 | /* |
32 | * Current pxa3xx_nand controller has two chip select which | 8 | * Current pxa3xx_nand controller has two chip select which |
33 | * both be workable. | 9 | * both be workable. |
@@ -63,9 +39,6 @@ struct pxa3xx_nand_platform_data { | |||
63 | 39 | ||
64 | const struct mtd_partition *parts[NUM_CHIP_SELECT]; | 40 | const struct mtd_partition *parts[NUM_CHIP_SELECT]; |
65 | unsigned int nr_parts[NUM_CHIP_SELECT]; | 41 | unsigned int nr_parts[NUM_CHIP_SELECT]; |
66 | |||
67 | const struct pxa3xx_nand_flash * flash; | ||
68 | size_t num_flash; | ||
69 | }; | 42 | }; |
70 | 43 | ||
71 | extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info); | 44 | extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info); |
diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h index ac91707dabcb..a6f9d633f5be 100644 --- a/include/linux/platform_data/nfcmrvl.h +++ b/include/linux/platform_data/nfcmrvl.h | |||
@@ -35,6 +35,14 @@ struct nfcmrvl_platform_data { | |||
35 | unsigned int flow_control; | 35 | unsigned int flow_control; |
36 | /* Tell if firmware supports break control for power management */ | 36 | /* Tell if firmware supports break control for power management */ |
37 | unsigned int break_control; | 37 | unsigned int break_control; |
38 | |||
39 | |||
40 | /* | ||
41 | * I2C specific | ||
42 | */ | ||
43 | |||
44 | unsigned int irq; | ||
45 | unsigned int irq_polarity; | ||
38 | }; | 46 | }; |
39 | 47 | ||
40 | #endif /* _NFCMRVL_PTF_H_ */ | 48 | #endif /* _NFCMRVL_PTF_H_ */ |
diff --git a/include/linux/platform_data/s3c-hsotg.h b/include/linux/platform_data/s3c-hsotg.h index 3f1cbf95ec3b..3982586ba6df 100644 --- a/include/linux/platform_data/s3c-hsotg.h +++ b/include/linux/platform_data/s3c-hsotg.h | |||
@@ -17,19 +17,19 @@ | |||
17 | 17 | ||
18 | struct platform_device; | 18 | struct platform_device; |
19 | 19 | ||
20 | enum s3c_hsotg_dmamode { | 20 | enum dwc2_hsotg_dmamode { |
21 | S3C_HSOTG_DMA_NONE, /* do not use DMA at-all */ | 21 | S3C_HSOTG_DMA_NONE, /* do not use DMA at-all */ |
22 | S3C_HSOTG_DMA_ONLY, /* always use DMA */ | 22 | S3C_HSOTG_DMA_ONLY, /* always use DMA */ |
23 | S3C_HSOTG_DMA_DRV, /* DMA is chosen by driver */ | 23 | S3C_HSOTG_DMA_DRV, /* DMA is chosen by driver */ |
24 | }; | 24 | }; |
25 | 25 | ||
26 | /** | 26 | /** |
27 | * struct s3c_hsotg_plat - platform data for high-speed otg/udc | 27 | * struct dwc2_hsotg_plat - platform data for high-speed otg/udc |
28 | * @dma: Whether to use DMA or not. | 28 | * @dma: Whether to use DMA or not. |
29 | * @is_osc: The clock source is an oscillator, not a crystal | 29 | * @is_osc: The clock source is an oscillator, not a crystal |
30 | */ | 30 | */ |
31 | struct s3c_hsotg_plat { | 31 | struct dwc2_hsotg_plat { |
32 | enum s3c_hsotg_dmamode dma; | 32 | enum dwc2_hsotg_dmamode dma; |
33 | unsigned int is_osc:1; | 33 | unsigned int is_osc:1; |
34 | int phy_type; | 34 | int phy_type; |
35 | 35 | ||
@@ -37,6 +37,6 @@ struct s3c_hsotg_plat { | |||
37 | int (*phy_exit)(struct platform_device *pdev, int type); | 37 | int (*phy_exit)(struct platform_device *pdev, int type); |
38 | }; | 38 | }; |
39 | 39 | ||
40 | extern void s3c_hsotg_set_platdata(struct s3c_hsotg_plat *pd); | 40 | extern void dwc2_hsotg_set_platdata(struct dwc2_hsotg_plat *pd); |
41 | 41 | ||
42 | #endif /* __LINUX_USB_S3C_HSOTG_H */ | 42 | #endif /* __LINUX_USB_S3C_HSOTG_H */ |
diff --git a/include/linux/platform_data/st-nci.h b/include/linux/platform_data/st-nci.h index d9d400a297bd..f6494b347c06 100644 --- a/include/linux/platform_data/st-nci.h +++ b/include/linux/platform_data/st-nci.h | |||
@@ -24,6 +24,8 @@ | |||
24 | struct st_nci_nfc_platform_data { | 24 | struct st_nci_nfc_platform_data { |
25 | unsigned int gpio_reset; | 25 | unsigned int gpio_reset; |
26 | unsigned int irq_polarity; | 26 | unsigned int irq_polarity; |
27 | bool is_ese_present; | ||
28 | bool is_uicc_present; | ||
27 | }; | 29 | }; |
28 | 30 | ||
29 | #endif /* _ST_NCI_H_ */ | 31 | #endif /* _ST_NCI_H_ */ |
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index bba08f44cc97..dc777be5f2e1 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
@@ -270,6 +270,14 @@ extern struct platform_device *__platform_create_bundle( | |||
270 | struct resource *res, unsigned int n_res, | 270 | struct resource *res, unsigned int n_res, |
271 | const void *data, size_t size, struct module *module); | 271 | const void *data, size_t size, struct module *module); |
272 | 272 | ||
273 | int __platform_register_drivers(struct platform_driver * const *drivers, | ||
274 | unsigned int count, struct module *owner); | ||
275 | void platform_unregister_drivers(struct platform_driver * const *drivers, | ||
276 | unsigned int count); | ||
277 | |||
278 | #define platform_register_drivers(drivers, count) \ | ||
279 | __platform_register_drivers(drivers, count, THIS_MODULE) | ||
280 | |||
273 | /* early platform driver interface */ | 281 | /* early platform driver interface */ |
274 | struct early_platform_driver { | 282 | struct early_platform_driver { |
275 | const char *class_str; | 283 | const char *class_str; |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 35d599e7250d..528be6787796 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -732,6 +732,7 @@ extern int pm_generic_poweroff_noirq(struct device *dev); | |||
732 | extern int pm_generic_poweroff_late(struct device *dev); | 732 | extern int pm_generic_poweroff_late(struct device *dev); |
733 | extern int pm_generic_poweroff(struct device *dev); | 733 | extern int pm_generic_poweroff(struct device *dev); |
734 | extern void pm_generic_complete(struct device *dev); | 734 | extern void pm_generic_complete(struct device *dev); |
735 | extern void pm_complete_with_resume_check(struct device *dev); | ||
735 | 736 | ||
736 | #else /* !CONFIG_PM_SLEEP */ | 737 | #else /* !CONFIG_PM_SLEEP */ |
737 | 738 | ||
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index b1cf7e797892..ba4ced38efae 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/err.h> | 15 | #include <linux/err.h> |
16 | #include <linux/of.h> | 16 | #include <linux/of.h> |
17 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
18 | #include <linux/cpuidle.h> | ||
19 | 18 | ||
20 | /* Defines used for the flags field in the struct generic_pm_domain */ | 19 | /* Defines used for the flags field in the struct generic_pm_domain */ |
21 | #define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ | 20 | #define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ |
@@ -38,11 +37,6 @@ struct gpd_dev_ops { | |||
38 | bool (*active_wakeup)(struct device *dev); | 37 | bool (*active_wakeup)(struct device *dev); |
39 | }; | 38 | }; |
40 | 39 | ||
41 | struct gpd_cpuidle_data { | ||
42 | unsigned int saved_exit_latency; | ||
43 | struct cpuidle_state *idle_state; | ||
44 | }; | ||
45 | |||
46 | struct generic_pm_domain { | 40 | struct generic_pm_domain { |
47 | struct dev_pm_domain domain; /* PM domain operations */ | 41 | struct dev_pm_domain domain; /* PM domain operations */ |
48 | struct list_head gpd_list_node; /* Node in the global PM domains list */ | 42 | struct list_head gpd_list_node; /* Node in the global PM domains list */ |
@@ -53,7 +47,6 @@ struct generic_pm_domain { | |||
53 | struct dev_power_governor *gov; | 47 | struct dev_power_governor *gov; |
54 | struct work_struct power_off_work; | 48 | struct work_struct power_off_work; |
55 | const char *name; | 49 | const char *name; |
56 | unsigned int in_progress; /* Number of devices being suspended now */ | ||
57 | atomic_t sd_count; /* Number of subdomains with power "on" */ | 50 | atomic_t sd_count; /* Number of subdomains with power "on" */ |
58 | enum gpd_status status; /* Current state of the domain */ | 51 | enum gpd_status status; /* Current state of the domain */ |
59 | unsigned int device_count; /* Number of devices */ | 52 | unsigned int device_count; /* Number of devices */ |
@@ -68,7 +61,6 @@ struct generic_pm_domain { | |||
68 | s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ | 61 | s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ |
69 | bool max_off_time_changed; | 62 | bool max_off_time_changed; |
70 | bool cached_power_down_ok; | 63 | bool cached_power_down_ok; |
71 | struct gpd_cpuidle_data *cpuidle_data; | ||
72 | int (*attach_dev)(struct generic_pm_domain *domain, | 64 | int (*attach_dev)(struct generic_pm_domain *domain, |
73 | struct device *dev); | 65 | struct device *dev); |
74 | void (*detach_dev)(struct generic_pm_domain *domain, | 66 | void (*detach_dev)(struct generic_pm_domain *domain, |
@@ -89,10 +81,8 @@ struct gpd_link { | |||
89 | }; | 81 | }; |
90 | 82 | ||
91 | struct gpd_timing_data { | 83 | struct gpd_timing_data { |
92 | s64 stop_latency_ns; | 84 | s64 suspend_latency_ns; |
93 | s64 start_latency_ns; | 85 | s64 resume_latency_ns; |
94 | s64 save_state_latency_ns; | ||
95 | s64 restore_state_latency_ns; | ||
96 | s64 effective_constraint_ns; | 86 | s64 effective_constraint_ns; |
97 | bool constraint_changed; | 87 | bool constraint_changed; |
98 | bool cached_stop_ok; | 88 | bool cached_stop_ok; |
@@ -125,29 +115,15 @@ extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, | |||
125 | struct device *dev, | 115 | struct device *dev, |
126 | struct gpd_timing_data *td); | 116 | struct gpd_timing_data *td); |
127 | 117 | ||
128 | extern int __pm_genpd_name_add_device(const char *domain_name, | ||
129 | struct device *dev, | ||
130 | struct gpd_timing_data *td); | ||
131 | |||
132 | extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, | 118 | extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, |
133 | struct device *dev); | 119 | struct device *dev); |
134 | extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | 120 | extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, |
135 | struct generic_pm_domain *new_subdomain); | 121 | struct generic_pm_domain *new_subdomain); |
136 | extern int pm_genpd_add_subdomain_names(const char *master_name, | ||
137 | const char *subdomain_name); | ||
138 | extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | 122 | extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, |
139 | struct generic_pm_domain *target); | 123 | struct generic_pm_domain *target); |
140 | extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); | ||
141 | extern int pm_genpd_name_attach_cpuidle(const char *name, int state); | ||
142 | extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd); | ||
143 | extern int pm_genpd_name_detach_cpuidle(const char *name); | ||
144 | extern void pm_genpd_init(struct generic_pm_domain *genpd, | 124 | extern void pm_genpd_init(struct generic_pm_domain *genpd, |
145 | struct dev_power_governor *gov, bool is_off); | 125 | struct dev_power_governor *gov, bool is_off); |
146 | 126 | ||
147 | extern int pm_genpd_poweron(struct generic_pm_domain *genpd); | ||
148 | extern int pm_genpd_name_poweron(const char *domain_name); | ||
149 | extern void pm_genpd_poweroff_unused(void); | ||
150 | |||
151 | extern struct dev_power_governor simple_qos_governor; | 127 | extern struct dev_power_governor simple_qos_governor; |
152 | extern struct dev_power_governor pm_domain_always_on_gov; | 128 | extern struct dev_power_governor pm_domain_always_on_gov; |
153 | #else | 129 | #else |
@@ -166,12 +142,6 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd, | |||
166 | { | 142 | { |
167 | return -ENOSYS; | 143 | return -ENOSYS; |
168 | } | 144 | } |
169 | static inline int __pm_genpd_name_add_device(const char *domain_name, | ||
170 | struct device *dev, | ||
171 | struct gpd_timing_data *td) | ||
172 | { | ||
173 | return -ENOSYS; | ||
174 | } | ||
175 | static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd, | 145 | static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd, |
176 | struct device *dev) | 146 | struct device *dev) |
177 | { | 147 | { |
@@ -182,45 +152,15 @@ static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | |||
182 | { | 152 | { |
183 | return -ENOSYS; | 153 | return -ENOSYS; |
184 | } | 154 | } |
185 | static inline int pm_genpd_add_subdomain_names(const char *master_name, | ||
186 | const char *subdomain_name) | ||
187 | { | ||
188 | return -ENOSYS; | ||
189 | } | ||
190 | static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | 155 | static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, |
191 | struct generic_pm_domain *target) | 156 | struct generic_pm_domain *target) |
192 | { | 157 | { |
193 | return -ENOSYS; | 158 | return -ENOSYS; |
194 | } | 159 | } |
195 | static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st) | ||
196 | { | ||
197 | return -ENOSYS; | ||
198 | } | ||
199 | static inline int pm_genpd_name_attach_cpuidle(const char *name, int state) | ||
200 | { | ||
201 | return -ENOSYS; | ||
202 | } | ||
203 | static inline int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) | ||
204 | { | ||
205 | return -ENOSYS; | ||
206 | } | ||
207 | static inline int pm_genpd_name_detach_cpuidle(const char *name) | ||
208 | { | ||
209 | return -ENOSYS; | ||
210 | } | ||
211 | static inline void pm_genpd_init(struct generic_pm_domain *genpd, | 160 | static inline void pm_genpd_init(struct generic_pm_domain *genpd, |
212 | struct dev_power_governor *gov, bool is_off) | 161 | struct dev_power_governor *gov, bool is_off) |
213 | { | 162 | { |
214 | } | 163 | } |
215 | static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) | ||
216 | { | ||
217 | return -ENOSYS; | ||
218 | } | ||
219 | static inline int pm_genpd_name_poweron(const char *domain_name) | ||
220 | { | ||
221 | return -ENOSYS; | ||
222 | } | ||
223 | static inline void pm_genpd_poweroff_unused(void) {} | ||
224 | #endif | 164 | #endif |
225 | 165 | ||
226 | static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, | 166 | static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, |
@@ -229,12 +169,6 @@ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, | |||
229 | return __pm_genpd_add_device(genpd, dev, NULL); | 169 | return __pm_genpd_add_device(genpd, dev, NULL); |
230 | } | 170 | } |
231 | 171 | ||
232 | static inline int pm_genpd_name_add_device(const char *domain_name, | ||
233 | struct device *dev) | ||
234 | { | ||
235 | return __pm_genpd_name_add_device(domain_name, dev, NULL); | ||
236 | } | ||
237 | |||
238 | #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP | 172 | #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP |
239 | extern void pm_genpd_syscore_poweroff(struct device *dev); | 173 | extern void pm_genpd_syscore_poweroff(struct device *dev); |
240 | extern void pm_genpd_syscore_poweron(struct device *dev); | 174 | extern void pm_genpd_syscore_poweron(struct device *dev); |
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index e817722ee3f0..9a2e50337af9 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h | |||
@@ -132,37 +132,37 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( | |||
132 | #endif /* CONFIG_PM_OPP */ | 132 | #endif /* CONFIG_PM_OPP */ |
133 | 133 | ||
134 | #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) | 134 | #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) |
135 | int of_init_opp_table(struct device *dev); | 135 | int dev_pm_opp_of_add_table(struct device *dev); |
136 | void of_free_opp_table(struct device *dev); | 136 | void dev_pm_opp_of_remove_table(struct device *dev); |
137 | int of_cpumask_init_opp_table(cpumask_var_t cpumask); | 137 | int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask); |
138 | void of_cpumask_free_opp_table(cpumask_var_t cpumask); | 138 | void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask); |
139 | int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask); | 139 | int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); |
140 | int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask); | 140 | int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); |
141 | #else | 141 | #else |
142 | static inline int of_init_opp_table(struct device *dev) | 142 | static inline int dev_pm_opp_of_add_table(struct device *dev) |
143 | { | 143 | { |
144 | return -EINVAL; | 144 | return -EINVAL; |
145 | } | 145 | } |
146 | 146 | ||
147 | static inline void of_free_opp_table(struct device *dev) | 147 | static inline void dev_pm_opp_of_remove_table(struct device *dev) |
148 | { | 148 | { |
149 | } | 149 | } |
150 | 150 | ||
151 | static inline int of_cpumask_init_opp_table(cpumask_var_t cpumask) | 151 | static inline int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask) |
152 | { | 152 | { |
153 | return -ENOSYS; | 153 | return -ENOSYS; |
154 | } | 154 | } |
155 | 155 | ||
156 | static inline void of_cpumask_free_opp_table(cpumask_var_t cpumask) | 156 | static inline void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask) |
157 | { | 157 | { |
158 | } | 158 | } |
159 | 159 | ||
160 | static inline int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) | 160 | static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) |
161 | { | 161 | { |
162 | return -ENOSYS; | 162 | return -ENOSYS; |
163 | } | 163 | } |
164 | 164 | ||
165 | static inline int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) | 165 | static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) |
166 | { | 166 | { |
167 | return -ENOSYS; | 167 | return -ENOSYS; |
168 | } | 168 | } |
diff --git a/include/linux/pmem.h b/include/linux/pmem.h index 85f810b33917..acfea8ce4a07 100644 --- a/include/linux/pmem.h +++ b/include/linux/pmem.h | |||
@@ -65,11 +65,6 @@ static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t si | |||
65 | memcpy(dst, (void __force const *) src, size); | 65 | memcpy(dst, (void __force const *) src, size); |
66 | } | 66 | } |
67 | 67 | ||
68 | static inline void memunmap_pmem(struct device *dev, void __pmem *addr) | ||
69 | { | ||
70 | devm_memunmap(dev, (void __force *) addr); | ||
71 | } | ||
72 | |||
73 | static inline bool arch_has_pmem_api(void) | 68 | static inline bool arch_has_pmem_api(void) |
74 | { | 69 | { |
75 | return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); | 70 | return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); |
@@ -93,7 +88,7 @@ static inline bool arch_has_wmb_pmem(void) | |||
93 | * These defaults seek to offer decent performance and minimize the | 88 | * These defaults seek to offer decent performance and minimize the |
94 | * window between i/o completion and writes being durable on media. | 89 | * window between i/o completion and writes being durable on media. |
95 | * However, it is undefined / architecture specific whether | 90 | * However, it is undefined / architecture specific whether |
96 | * default_memremap_pmem + default_memcpy_to_pmem is sufficient for | 91 | * ARCH_MEMREMAP_PMEM + default_memcpy_to_pmem is sufficient for |
97 | * making data durable relative to i/o completion. | 92 | * making data durable relative to i/o completion. |
98 | */ | 93 | */ |
99 | static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src, | 94 | static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src, |
@@ -117,25 +112,6 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size) | |||
117 | } | 112 | } |
118 | 113 | ||
119 | /** | 114 | /** |
120 | * memremap_pmem - map physical persistent memory for pmem api | ||
121 | * @offset: physical address of persistent memory | ||
122 | * @size: size of the mapping | ||
123 | * | ||
124 | * Establish a mapping of the architecture specific memory type expected | ||
125 | * by memcpy_to_pmem() and wmb_pmem(). For example, it may be | ||
126 | * the case that an uncacheable or writethrough mapping is sufficient, | ||
127 | * or a writeback mapping provided memcpy_to_pmem() and | ||
128 | * wmb_pmem() arrange for the data to be written through the | ||
129 | * cache to persistent media. | ||
130 | */ | ||
131 | static inline void __pmem *memremap_pmem(struct device *dev, | ||
132 | resource_size_t offset, unsigned long size) | ||
133 | { | ||
134 | return (void __pmem *) devm_memremap(dev, offset, size, | ||
135 | ARCH_MEMREMAP_PMEM); | ||
136 | } | ||
137 | |||
138 | /** | ||
139 | * memcpy_to_pmem - copy data to persistent memory | 115 | * memcpy_to_pmem - copy data to persistent memory |
140 | * @dst: destination buffer for the copy | 116 | * @dst: destination buffer for the copy |
141 | * @src: source buffer for the copy | 117 | * @src: source buffer for the copy |
diff --git a/include/linux/power/bq27x00_battery.h b/include/linux/power/bq27x00_battery.h deleted file mode 100644 index a857f719bf40..000000000000 --- a/include/linux/power/bq27x00_battery.h +++ /dev/null | |||
@@ -1,19 +0,0 @@ | |||
1 | #ifndef __LINUX_BQ27X00_BATTERY_H__ | ||
2 | #define __LINUX_BQ27X00_BATTERY_H__ | ||
3 | |||
4 | /** | ||
5 | * struct bq27000_plaform_data - Platform data for bq27000 devices | ||
6 | * @name: Name of the battery. If NULL the driver will fallback to "bq27000". | ||
7 | * @read: HDQ read callback. | ||
8 | * This function should provide access to the HDQ bus the battery is | ||
9 | * connected to. | ||
10 | * The first parameter is a pointer to the battery device, the second the | ||
11 | * register to be read. The return value should either be the content of | ||
12 | * the passed register or an error value. | ||
13 | */ | ||
14 | struct bq27000_platform_data { | ||
15 | const char *name; | ||
16 | int (*read)(struct device *dev, unsigned int); | ||
17 | }; | ||
18 | |||
19 | #endif | ||
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h new file mode 100644 index 000000000000..45f6a7b5b3cb --- /dev/null +++ b/include/linux/power/bq27xxx_battery.h | |||
@@ -0,0 +1,31 @@ | |||
1 | #ifndef __LINUX_BQ27X00_BATTERY_H__ | ||
2 | #define __LINUX_BQ27X00_BATTERY_H__ | ||
3 | |||
4 | /** | ||
5 | * struct bq27xxx_plaform_data - Platform data for bq27xxx devices | ||
6 | * @name: Name of the battery. | ||
7 | * @chip: Chip class number of this device. | ||
8 | * @read: HDQ read callback. | ||
9 | * This function should provide access to the HDQ bus the battery is | ||
10 | * connected to. | ||
11 | * The first parameter is a pointer to the battery device, the second the | ||
12 | * register to be read. The return value should either be the content of | ||
13 | * the passed register or an error value. | ||
14 | */ | ||
15 | enum bq27xxx_chip { | ||
16 | BQ27000 = 1, /* bq27000, bq27200 */ | ||
17 | BQ27010, /* bq27010, bq27210 */ | ||
18 | BQ27500, /* bq27500, bq27510, bq27520 */ | ||
19 | BQ27530, /* bq27530, bq27531 */ | ||
20 | BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ | ||
21 | BQ27545, /* bq27545 */ | ||
22 | BQ27421, /* bq27421, bq27425, bq27441, bq27621 */ | ||
23 | }; | ||
24 | |||
25 | struct bq27xxx_platform_data { | ||
26 | const char *name; | ||
27 | enum bq27xxx_chip chip; | ||
28 | int (*read)(struct device *dev, unsigned int); | ||
29 | }; | ||
30 | |||
31 | #endif | ||
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h index eadf28cb2fc9..c4fa907c8f14 100644 --- a/include/linux/power/charger-manager.h +++ b/include/linux/power/charger-manager.h | |||
@@ -65,7 +65,7 @@ struct charger_cable { | |||
65 | const char *extcon_name; | 65 | const char *extcon_name; |
66 | const char *name; | 66 | const char *name; |
67 | 67 | ||
68 | /* The charger-manager use Exton framework*/ | 68 | /* The charger-manager use Extcon framework */ |
69 | struct extcon_specific_cable_nb extcon_dev; | 69 | struct extcon_specific_cable_nb extcon_dev; |
70 | struct work_struct wq; | 70 | struct work_struct wq; |
71 | struct notifier_block nb; | 71 | struct notifier_block nb; |
@@ -94,7 +94,7 @@ struct charger_cable { | |||
94 | * the charger will be maintained with disabled state. | 94 | * the charger will be maintained with disabled state. |
95 | * @cables: | 95 | * @cables: |
96 | * the array of charger cables to enable/disable charger | 96 | * the array of charger cables to enable/disable charger |
97 | * and set current limit according to constratint data of | 97 | * and set current limit according to constraint data of |
98 | * struct charger_cable if only charger cable included | 98 | * struct charger_cable if only charger cable included |
99 | * in the array of charger cables is attached/detached. | 99 | * in the array of charger cables is attached/detached. |
100 | * @num_cables: the number of charger cables. | 100 | * @num_cables: the number of charger cables. |
@@ -148,7 +148,7 @@ struct charger_regulator { | |||
148 | * @polling_interval_ms: interval in millisecond at which | 148 | * @polling_interval_ms: interval in millisecond at which |
149 | * charger manager will monitor battery health | 149 | * charger manager will monitor battery health |
150 | * @battery_present: | 150 | * @battery_present: |
151 | * Specify where information for existance of battery can be obtained | 151 | * Specify where information for existence of battery can be obtained |
152 | * @psy_charger_stat: the names of power-supply for chargers | 152 | * @psy_charger_stat: the names of power-supply for chargers |
153 | * @num_charger_regulator: the number of entries in charger_regulators | 153 | * @num_charger_regulator: the number of entries in charger_regulators |
154 | * @charger_regulators: array of charger regulators | 154 | * @charger_regulators: array of charger regulators |
@@ -156,7 +156,7 @@ struct charger_regulator { | |||
156 | * @thermal_zone : the name of thermal zone for battery | 156 | * @thermal_zone : the name of thermal zone for battery |
157 | * @temp_min : Minimum battery temperature for charging. | 157 | * @temp_min : Minimum battery temperature for charging. |
158 | * @temp_max : Maximum battery temperature for charging. | 158 | * @temp_max : Maximum battery temperature for charging. |
159 | * @temp_diff : Temperature diffential to restart charging. | 159 | * @temp_diff : Temperature difference to restart charging. |
160 | * @measure_battery_temp: | 160 | * @measure_battery_temp: |
161 | * true: measure battery temperature | 161 | * true: measure battery temperature |
162 | * false: measure ambient temperature | 162 | * false: measure ambient temperature |
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h index 1d2cd21242e8..54bf1484d41f 100644 --- a/include/linux/pps_kernel.h +++ b/include/linux/pps_kernel.h | |||
@@ -48,9 +48,9 @@ struct pps_source_info { | |||
48 | 48 | ||
49 | struct pps_event_time { | 49 | struct pps_event_time { |
50 | #ifdef CONFIG_NTP_PPS | 50 | #ifdef CONFIG_NTP_PPS |
51 | struct timespec ts_raw; | 51 | struct timespec64 ts_raw; |
52 | #endif /* CONFIG_NTP_PPS */ | 52 | #endif /* CONFIG_NTP_PPS */ |
53 | struct timespec ts_real; | 53 | struct timespec64 ts_real; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | /* The main struct */ | 56 | /* The main struct */ |
@@ -105,7 +105,7 @@ extern void pps_event(struct pps_device *pps, | |||
105 | struct pps_device *pps_lookup_dev(void const *cookie); | 105 | struct pps_device *pps_lookup_dev(void const *cookie); |
106 | 106 | ||
107 | static inline void timespec_to_pps_ktime(struct pps_ktime *kt, | 107 | static inline void timespec_to_pps_ktime(struct pps_ktime *kt, |
108 | struct timespec ts) | 108 | struct timespec64 ts) |
109 | { | 109 | { |
110 | kt->sec = ts.tv_sec; | 110 | kt->sec = ts.tv_sec; |
111 | kt->nsec = ts.tv_nsec; | 111 | kt->nsec = ts.tv_nsec; |
@@ -115,24 +115,24 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt, | |||
115 | 115 | ||
116 | static inline void pps_get_ts(struct pps_event_time *ts) | 116 | static inline void pps_get_ts(struct pps_event_time *ts) |
117 | { | 117 | { |
118 | getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real); | 118 | ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real); |
119 | } | 119 | } |
120 | 120 | ||
121 | #else /* CONFIG_NTP_PPS */ | 121 | #else /* CONFIG_NTP_PPS */ |
122 | 122 | ||
123 | static inline void pps_get_ts(struct pps_event_time *ts) | 123 | static inline void pps_get_ts(struct pps_event_time *ts) |
124 | { | 124 | { |
125 | getnstimeofday(&ts->ts_real); | 125 | ktime_get_real_ts64(&ts->ts_real); |
126 | } | 126 | } |
127 | 127 | ||
128 | #endif /* CONFIG_NTP_PPS */ | 128 | #endif /* CONFIG_NTP_PPS */ |
129 | 129 | ||
130 | /* Subtract known time delay from PPS event time(s) */ | 130 | /* Subtract known time delay from PPS event time(s) */ |
131 | static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec delta) | 131 | static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta) |
132 | { | 132 | { |
133 | ts->ts_real = timespec_sub(ts->ts_real, delta); | 133 | ts->ts_real = timespec64_sub(ts->ts_real, delta); |
134 | #ifdef CONFIG_NTP_PPS | 134 | #ifdef CONFIG_NTP_PPS |
135 | ts->ts_raw = timespec_sub(ts->ts_raw, delta); | 135 | ts->ts_raw = timespec64_sub(ts->ts_raw, delta); |
136 | #endif | 136 | #endif |
137 | } | 137 | } |
138 | 138 | ||
diff --git a/include/linux/pr.h b/include/linux/pr.h new file mode 100644 index 000000000000..65c01c10b335 --- /dev/null +++ b/include/linux/pr.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef LINUX_PR_H | ||
2 | #define LINUX_PR_H | ||
3 | |||
4 | #include <uapi/linux/pr.h> | ||
5 | |||
6 | struct pr_ops { | ||
7 | int (*pr_register)(struct block_device *bdev, u64 old_key, u64 new_key, | ||
8 | u32 flags); | ||
9 | int (*pr_reserve)(struct block_device *bdev, u64 key, | ||
10 | enum pr_type type, u32 flags); | ||
11 | int (*pr_release)(struct block_device *bdev, u64 key, | ||
12 | enum pr_type type); | ||
13 | int (*pr_preempt)(struct block_device *bdev, u64 old_key, u64 new_key, | ||
14 | enum pr_type type, bool abort); | ||
15 | int (*pr_clear)(struct block_device *bdev, u64 key); | ||
16 | }; | ||
17 | |||
18 | #endif /* LINUX_PR_H */ | ||
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index bea8dd8ff5e0..75e4e30677f1 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
@@ -26,7 +26,6 @@ | |||
26 | * SOFTIRQ_MASK: 0x0000ff00 | 26 | * SOFTIRQ_MASK: 0x0000ff00 |
27 | * HARDIRQ_MASK: 0x000f0000 | 27 | * HARDIRQ_MASK: 0x000f0000 |
28 | * NMI_MASK: 0x00100000 | 28 | * NMI_MASK: 0x00100000 |
29 | * PREEMPT_ACTIVE: 0x00200000 | ||
30 | * PREEMPT_NEED_RESCHED: 0x80000000 | 29 | * PREEMPT_NEED_RESCHED: 0x80000000 |
31 | */ | 30 | */ |
32 | #define PREEMPT_BITS 8 | 31 | #define PREEMPT_BITS 8 |
@@ -53,10 +52,6 @@ | |||
53 | 52 | ||
54 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) | 53 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) |
55 | 54 | ||
56 | #define PREEMPT_ACTIVE_BITS 1 | ||
57 | #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) | ||
58 | #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) | ||
59 | |||
60 | /* We use the MSB mostly because its available */ | 55 | /* We use the MSB mostly because its available */ |
61 | #define PREEMPT_NEED_RESCHED 0x80000000 | 56 | #define PREEMPT_NEED_RESCHED 0x80000000 |
62 | 57 | ||
@@ -126,8 +121,7 @@ | |||
126 | * Check whether we were atomic before we did preempt_disable(): | 121 | * Check whether we were atomic before we did preempt_disable(): |
127 | * (used by the scheduler) | 122 | * (used by the scheduler) |
128 | */ | 123 | */ |
129 | #define in_atomic_preempt_off() \ | 124 | #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET) |
130 | ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET) | ||
131 | 125 | ||
132 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) | 126 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
133 | extern void preempt_count_add(int val); | 127 | extern void preempt_count_add(int val); |
@@ -146,18 +140,6 @@ extern void preempt_count_sub(int val); | |||
146 | #define preempt_count_inc() preempt_count_add(1) | 140 | #define preempt_count_inc() preempt_count_add(1) |
147 | #define preempt_count_dec() preempt_count_sub(1) | 141 | #define preempt_count_dec() preempt_count_sub(1) |
148 | 142 | ||
149 | #define preempt_active_enter() \ | ||
150 | do { \ | ||
151 | preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ | ||
152 | barrier(); \ | ||
153 | } while (0) | ||
154 | |||
155 | #define preempt_active_exit() \ | ||
156 | do { \ | ||
157 | barrier(); \ | ||
158 | preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ | ||
159 | } while (0) | ||
160 | |||
161 | #ifdef CONFIG_PREEMPT_COUNT | 143 | #ifdef CONFIG_PREEMPT_COUNT |
162 | 144 | ||
163 | #define preempt_disable() \ | 145 | #define preempt_disable() \ |
diff --git a/include/linux/property.h b/include/linux/property.h index a59c6ee566c2..0a3705a7c9f2 100644 --- a/include/linux/property.h +++ b/include/linux/property.h | |||
@@ -27,6 +27,12 @@ enum dev_prop_type { | |||
27 | DEV_PROP_MAX, | 27 | DEV_PROP_MAX, |
28 | }; | 28 | }; |
29 | 29 | ||
30 | enum dev_dma_attr { | ||
31 | DEV_DMA_NOT_SUPPORTED, | ||
32 | DEV_DMA_NON_COHERENT, | ||
33 | DEV_DMA_COHERENT, | ||
34 | }; | ||
35 | |||
30 | bool device_property_present(struct device *dev, const char *propname); | 36 | bool device_property_present(struct device *dev, const char *propname); |
31 | int device_property_read_u8_array(struct device *dev, const char *propname, | 37 | int device_property_read_u8_array(struct device *dev, const char *propname, |
32 | u8 *val, size_t nval); | 38 | u8 *val, size_t nval); |
@@ -40,6 +46,8 @@ int device_property_read_string_array(struct device *dev, const char *propname, | |||
40 | const char **val, size_t nval); | 46 | const char **val, size_t nval); |
41 | int device_property_read_string(struct device *dev, const char *propname, | 47 | int device_property_read_string(struct device *dev, const char *propname, |
42 | const char **val); | 48 | const char **val); |
49 | int device_property_match_string(struct device *dev, | ||
50 | const char *propname, const char *string); | ||
43 | 51 | ||
44 | bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname); | 52 | bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname); |
45 | int fwnode_property_read_u8_array(struct fwnode_handle *fwnode, | 53 | int fwnode_property_read_u8_array(struct fwnode_handle *fwnode, |
@@ -59,6 +67,8 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode, | |||
59 | size_t nval); | 67 | size_t nval); |
60 | int fwnode_property_read_string(struct fwnode_handle *fwnode, | 68 | int fwnode_property_read_string(struct fwnode_handle *fwnode, |
61 | const char *propname, const char **val); | 69 | const char *propname, const char **val); |
70 | int fwnode_property_match_string(struct fwnode_handle *fwnode, | ||
71 | const char *propname, const char *string); | ||
62 | 72 | ||
63 | struct fwnode_handle *device_get_next_child_node(struct device *dev, | 73 | struct fwnode_handle *device_get_next_child_node(struct device *dev, |
64 | struct fwnode_handle *child); | 74 | struct fwnode_handle *child); |
@@ -164,7 +174,9 @@ struct property_set { | |||
164 | 174 | ||
165 | void device_add_property_set(struct device *dev, struct property_set *pset); | 175 | void device_add_property_set(struct device *dev, struct property_set *pset); |
166 | 176 | ||
167 | bool device_dma_is_coherent(struct device *dev); | 177 | bool device_dma_supported(struct device *dev); |
178 | |||
179 | enum dev_dma_attr device_get_dma_attr(struct device *dev); | ||
168 | 180 | ||
169 | int device_get_phy_mode(struct device *dev); | 181 | int device_get_phy_mode(struct device *dev); |
170 | 182 | ||
diff --git a/include/linux/psci.h b/include/linux/psci.h index a682fcc91c33..12c4865457ad 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h | |||
@@ -21,6 +21,8 @@ | |||
21 | #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 | 21 | #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 |
22 | 22 | ||
23 | bool psci_tos_resident_on(int cpu); | 23 | bool psci_tos_resident_on(int cpu); |
24 | bool psci_power_state_loses_context(u32 state); | ||
25 | bool psci_power_state_is_valid(u32 state); | ||
24 | 26 | ||
25 | struct psci_operations { | 27 | struct psci_operations { |
26 | int (*cpu_suspend)(u32 state, unsigned long entry_point); | 28 | int (*cpu_suspend)(u32 state, unsigned long entry_point); |
diff --git a/include/linux/pstore.h b/include/linux/pstore.h index 8e7a25b068b0..831479f8df8f 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h | |||
@@ -75,20 +75,8 @@ struct pstore_info { | |||
75 | 75 | ||
76 | #define PSTORE_FLAGS_FRAGILE 1 | 76 | #define PSTORE_FLAGS_FRAGILE 1 |
77 | 77 | ||
78 | #ifdef CONFIG_PSTORE | ||
79 | extern int pstore_register(struct pstore_info *); | 78 | extern int pstore_register(struct pstore_info *); |
79 | extern void pstore_unregister(struct pstore_info *); | ||
80 | extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); | 80 | extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); |
81 | #else | ||
82 | static inline int | ||
83 | pstore_register(struct pstore_info *psi) | ||
84 | { | ||
85 | return -ENODEV; | ||
86 | } | ||
87 | static inline bool | ||
88 | pstore_cannot_block_path(enum kmsg_dump_reason reason) | ||
89 | { | ||
90 | return false; | ||
91 | } | ||
92 | #endif | ||
93 | 81 | ||
94 | #endif /*_LINUX_PSTORE_H*/ | 82 | #endif /*_LINUX_PSTORE_H*/ |
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h index 159c987b1853..a079656b614c 100644 --- a/include/linux/ptp_classify.h +++ b/include/linux/ptp_classify.h | |||
@@ -32,9 +32,9 @@ | |||
32 | #define PTP_CLASS_VMASK 0x0f /* max protocol version is 15 */ | 32 | #define PTP_CLASS_VMASK 0x0f /* max protocol version is 15 */ |
33 | #define PTP_CLASS_IPV4 0x10 /* event in an IPV4 UDP packet */ | 33 | #define PTP_CLASS_IPV4 0x10 /* event in an IPV4 UDP packet */ |
34 | #define PTP_CLASS_IPV6 0x20 /* event in an IPV6 UDP packet */ | 34 | #define PTP_CLASS_IPV6 0x20 /* event in an IPV6 UDP packet */ |
35 | #define PTP_CLASS_L2 0x30 /* event in a L2 packet */ | 35 | #define PTP_CLASS_L2 0x40 /* event in a L2 packet */ |
36 | #define PTP_CLASS_PMASK 0x30 /* mask for the packet type field */ | 36 | #define PTP_CLASS_PMASK 0x70 /* mask for the packet type field */ |
37 | #define PTP_CLASS_VLAN 0x40 /* event in a VLAN tagged packet */ | 37 | #define PTP_CLASS_VLAN 0x80 /* event in a VLAN tagged packet */ |
38 | 38 | ||
39 | #define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4) | 39 | #define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4) |
40 | #define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /* probably DNE */ | 40 | #define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /* probably DNE */ |
@@ -42,6 +42,7 @@ | |||
42 | #define PTP_CLASS_V2_IPV6 (PTP_CLASS_V2 | PTP_CLASS_IPV6) | 42 | #define PTP_CLASS_V2_IPV6 (PTP_CLASS_V2 | PTP_CLASS_IPV6) |
43 | #define PTP_CLASS_V2_L2 (PTP_CLASS_V2 | PTP_CLASS_L2) | 43 | #define PTP_CLASS_V2_L2 (PTP_CLASS_V2 | PTP_CLASS_L2) |
44 | #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) | 44 | #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) |
45 | #define PTP_CLASS_L4 (PTP_CLASS_IPV4 | PTP_CLASS_IPV6) | ||
45 | 46 | ||
46 | #define PTP_EV_PORT 319 | 47 | #define PTP_EV_PORT 319 |
47 | #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */ | 48 | #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */ |
diff --git a/include/linux/pwm.h b/include/linux/pwm.h index d681f6875aef..cfc3ed46cad2 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __LINUX_PWM_H | 2 | #define __LINUX_PWM_H |
3 | 3 | ||
4 | #include <linux/err.h> | 4 | #include <linux/err.h> |
5 | #include <linux/mutex.h> | ||
5 | #include <linux/of.h> | 6 | #include <linux/of.h> |
6 | 7 | ||
7 | struct pwm_device; | 8 | struct pwm_device; |
@@ -87,6 +88,7 @@ enum { | |||
87 | * @pwm: global index of the PWM device | 88 | * @pwm: global index of the PWM device |
88 | * @chip: PWM chip providing this PWM device | 89 | * @chip: PWM chip providing this PWM device |
89 | * @chip_data: chip-private data associated with the PWM device | 90 | * @chip_data: chip-private data associated with the PWM device |
91 | * @lock: used to serialize accesses to the PWM device where necessary | ||
90 | * @period: period of the PWM signal (in nanoseconds) | 92 | * @period: period of the PWM signal (in nanoseconds) |
91 | * @duty_cycle: duty cycle of the PWM signal (in nanoseconds) | 93 | * @duty_cycle: duty cycle of the PWM signal (in nanoseconds) |
92 | * @polarity: polarity of the PWM signal | 94 | * @polarity: polarity of the PWM signal |
@@ -98,6 +100,7 @@ struct pwm_device { | |||
98 | unsigned int pwm; | 100 | unsigned int pwm; |
99 | struct pwm_chip *chip; | 101 | struct pwm_chip *chip; |
100 | void *chip_data; | 102 | void *chip_data; |
103 | struct mutex lock; | ||
101 | 104 | ||
102 | unsigned int period; | 105 | unsigned int period; |
103 | unsigned int duty_cycle; | 106 | unsigned int duty_cycle; |
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index 92273776bce6..c2f2574ff61c 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h | |||
@@ -198,6 +198,7 @@ enum pxa_ssp_type { | |||
198 | LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ | 198 | LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ |
199 | LPSS_BYT_SSP, | 199 | LPSS_BYT_SSP, |
200 | LPSS_SPT_SSP, | 200 | LPSS_SPT_SSP, |
201 | LPSS_BXT_SSP, | ||
201 | }; | 202 | }; |
202 | 203 | ||
203 | struct ssp_device { | 204 | struct ssp_device { |
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 6e7d5ec65838..9e12000914b3 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h | |||
@@ -23,6 +23,8 @@ struct qcom_scm_hdcp_req { | |||
23 | u32 val; | 23 | u32 val; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | extern bool qcom_scm_is_available(void); | ||
27 | |||
26 | extern bool qcom_scm_hdcp_available(void); | 28 | extern bool qcom_scm_hdcp_available(void); |
27 | extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, | 29 | extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, |
28 | u32 *resp); | 30 | u32 *resp); |
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h new file mode 100644 index 000000000000..6a4347639c03 --- /dev/null +++ b/include/linux/qed/common_hsi.h | |||
@@ -0,0 +1,607 @@ | |||
1 | /* QLogic qed NIC Driver | ||
2 | * Copyright (c) 2015 QLogic Corporation | ||
3 | * | ||
4 | * This software is available under the terms of the GNU General Public License | ||
5 | * (GPL) Version 2, available from the file COPYING in the main directory of | ||
6 | * this source tree. | ||
7 | */ | ||
8 | |||
9 | #ifndef __COMMON_HSI__ | ||
10 | #define __COMMON_HSI__ | ||
11 | |||
12 | #define FW_MAJOR_VERSION 8 | ||
13 | #define FW_MINOR_VERSION 4 | ||
14 | #define FW_REVISION_VERSION 2 | ||
15 | #define FW_ENGINEERING_VERSION 0 | ||
16 | |||
17 | /***********************/ | ||
18 | /* COMMON HW CONSTANTS */ | ||
19 | /***********************/ | ||
20 | |||
21 | /* PCI functions */ | ||
22 | #define MAX_NUM_PORTS_K2 (4) | ||
23 | #define MAX_NUM_PORTS_BB (2) | ||
24 | #define MAX_NUM_PORTS (MAX_NUM_PORTS_K2) | ||
25 | |||
26 | #define MAX_NUM_PFS_K2 (16) | ||
27 | #define MAX_NUM_PFS_BB (8) | ||
28 | #define MAX_NUM_PFS (MAX_NUM_PFS_K2) | ||
29 | #define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ | ||
30 | |||
31 | #define MAX_NUM_VFS_K2 (192) | ||
32 | #define MAX_NUM_VFS_BB (120) | ||
33 | #define MAX_NUM_VFS (MAX_NUM_VFS_K2) | ||
34 | |||
35 | #define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB) | ||
36 | #define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS) | ||
37 | |||
38 | #define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB) | ||
39 | #define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS) | ||
40 | |||
41 | #define MAX_NUM_VPORTS_K2 (208) | ||
42 | #define MAX_NUM_VPORTS_BB (160) | ||
43 | #define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2) | ||
44 | |||
45 | #define MAX_NUM_L2_QUEUES_K2 (320) | ||
46 | #define MAX_NUM_L2_QUEUES_BB (256) | ||
47 | #define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2) | ||
48 | |||
49 | /* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */ | ||
50 | #define NUM_PHYS_TCS_4PORT_K2 (4) | ||
51 | #define NUM_OF_PHYS_TCS (8) | ||
52 | |||
53 | #define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1) | ||
54 | #define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) | ||
55 | |||
56 | #define LB_TC (NUM_OF_PHYS_TCS) | ||
57 | |||
58 | /* Num of possible traffic priority values */ | ||
59 | #define NUM_OF_PRIO (8) | ||
60 | |||
61 | #define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2) | ||
62 | #define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB) | ||
63 | #define MAX_NUM_VOQS (MAX_NUM_VOQS_K2) | ||
64 | #define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB) | ||
65 | |||
66 | /* CIDs */ | ||
67 | #define NUM_OF_CONNECTION_TYPES (8) | ||
68 | #define NUM_OF_LCIDS (320) | ||
69 | #define NUM_OF_LTIDS (320) | ||
70 | |||
71 | /*****************/ | ||
72 | /* CDU CONSTANTS */ | ||
73 | /*****************/ | ||
74 | |||
75 | #define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) | ||
76 | #define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) | ||
77 | |||
78 | /*****************/ | ||
79 | /* DQ CONSTANTS */ | ||
80 | /*****************/ | ||
81 | |||
82 | /* DEMS */ | ||
83 | #define DQ_DEMS_LEGACY 0 | ||
84 | |||
85 | /* XCM agg val selection */ | ||
86 | #define DQ_XCM_AGG_VAL_SEL_WORD2 0 | ||
87 | #define DQ_XCM_AGG_VAL_SEL_WORD3 1 | ||
88 | #define DQ_XCM_AGG_VAL_SEL_WORD4 2 | ||
89 | #define DQ_XCM_AGG_VAL_SEL_WORD5 3 | ||
90 | #define DQ_XCM_AGG_VAL_SEL_REG3 4 | ||
91 | #define DQ_XCM_AGG_VAL_SEL_REG4 5 | ||
92 | #define DQ_XCM_AGG_VAL_SEL_REG5 6 | ||
93 | #define DQ_XCM_AGG_VAL_SEL_REG6 7 | ||
94 | |||
95 | /* XCM agg val selection */ | ||
96 | #define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \ | ||
97 | DQ_XCM_AGG_VAL_SEL_WORD2 | ||
98 | #define DQ_XCM_ETH_TX_BD_CONS_CMD \ | ||
99 | DQ_XCM_AGG_VAL_SEL_WORD3 | ||
100 | #define DQ_XCM_CORE_TX_BD_CONS_CMD \ | ||
101 | DQ_XCM_AGG_VAL_SEL_WORD3 | ||
102 | #define DQ_XCM_ETH_TX_BD_PROD_CMD \ | ||
103 | DQ_XCM_AGG_VAL_SEL_WORD4 | ||
104 | #define DQ_XCM_CORE_TX_BD_PROD_CMD \ | ||
105 | DQ_XCM_AGG_VAL_SEL_WORD4 | ||
106 | #define DQ_XCM_CORE_SPQ_PROD_CMD \ | ||
107 | DQ_XCM_AGG_VAL_SEL_WORD4 | ||
108 | #define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 | ||
109 | |||
110 | /* XCM agg counter flag selection */ | ||
111 | #define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 | ||
112 | #define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 | ||
113 | #define DQ_XCM_AGG_FLG_SHIFT_CF12 2 | ||
114 | #define DQ_XCM_AGG_FLG_SHIFT_CF13 3 | ||
115 | #define DQ_XCM_AGG_FLG_SHIFT_CF18 4 | ||
116 | #define DQ_XCM_AGG_FLG_SHIFT_CF19 5 | ||
117 | #define DQ_XCM_AGG_FLG_SHIFT_CF22 6 | ||
118 | #define DQ_XCM_AGG_FLG_SHIFT_CF23 7 | ||
119 | |||
120 | /* XCM agg counter flag selection */ | ||
121 | #define DQ_XCM_ETH_DQ_CF_CMD (1 << \ | ||
122 | DQ_XCM_AGG_FLG_SHIFT_CF18) | ||
123 | #define DQ_XCM_CORE_DQ_CF_CMD (1 << \ | ||
124 | DQ_XCM_AGG_FLG_SHIFT_CF18) | ||
125 | #define DQ_XCM_ETH_TERMINATE_CMD (1 << \ | ||
126 | DQ_XCM_AGG_FLG_SHIFT_CF19) | ||
127 | #define DQ_XCM_CORE_TERMINATE_CMD (1 << \ | ||
128 | DQ_XCM_AGG_FLG_SHIFT_CF19) | ||
129 | #define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \ | ||
130 | DQ_XCM_AGG_FLG_SHIFT_CF22) | ||
131 | #define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \ | ||
132 | DQ_XCM_AGG_FLG_SHIFT_CF22) | ||
133 | #define DQ_XCM_ETH_TPH_EN_CMD (1 << \ | ||
134 | DQ_XCM_AGG_FLG_SHIFT_CF23) | ||
135 | |||
136 | /*****************/ | ||
137 | /* QM CONSTANTS */ | ||
138 | /*****************/ | ||
139 | |||
140 | /* number of TX queues in the QM */ | ||
141 | #define MAX_QM_TX_QUEUES_K2 512 | ||
142 | #define MAX_QM_TX_QUEUES_BB 448 | ||
143 | #define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 | ||
144 | |||
145 | /* number of Other queues in the QM */ | ||
146 | #define MAX_QM_OTHER_QUEUES_BB 64 | ||
147 | #define MAX_QM_OTHER_QUEUES_K2 128 | ||
148 | #define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 | ||
149 | |||
150 | /* number of queues in a PF queue group */ | ||
151 | #define QM_PF_QUEUE_GROUP_SIZE 8 | ||
152 | |||
153 | /* base number of Tx PQs in the CM PQ representation. | ||
154 | * should be used when storing PQ IDs in CM PQ registers and context | ||
155 | */ | ||
156 | #define CM_TX_PQ_BASE 0x200 | ||
157 | |||
158 | /* QM registers data */ | ||
159 | #define QM_LINE_CRD_REG_WIDTH 16 | ||
160 | #define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1)) | ||
161 | #define QM_BYTE_CRD_REG_WIDTH 24 | ||
162 | #define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1)) | ||
163 | #define QM_WFQ_CRD_REG_WIDTH 32 | ||
164 | #define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1)) | ||
165 | #define QM_RL_CRD_REG_WIDTH 32 | ||
166 | #define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1)) | ||
167 | |||
168 | /*****************/ | ||
169 | /* CAU CONSTANTS */ | ||
170 | /*****************/ | ||
171 | |||
172 | #define CAU_FSM_ETH_RX 0 | ||
173 | #define CAU_FSM_ETH_TX 1 | ||
174 | |||
175 | /* Number of Protocol Indices per Status Block */ | ||
176 | #define PIS_PER_SB 12 | ||
177 | |||
178 | #define CAU_HC_STOPPED_STATE 3 | ||
179 | #define CAU_HC_DISABLE_STATE 4 | ||
180 | #define CAU_HC_ENABLE_STATE 0 | ||
181 | |||
182 | /*****************/ | ||
183 | /* IGU CONSTANTS */ | ||
184 | /*****************/ | ||
185 | |||
186 | #define MAX_SB_PER_PATH_K2 (368) | ||
187 | #define MAX_SB_PER_PATH_BB (288) | ||
188 | #define MAX_TOT_SB_PER_PATH \ | ||
189 | MAX_SB_PER_PATH_K2 | ||
190 | |||
191 | #define MAX_SB_PER_PF_MIMD 129 | ||
192 | #define MAX_SB_PER_PF_SIMD 64 | ||
193 | #define MAX_SB_PER_VF 64 | ||
194 | |||
195 | /* Memory addresses on the BAR for the IGU Sub Block */ | ||
196 | #define IGU_MEM_BASE 0x0000 | ||
197 | |||
198 | #define IGU_MEM_MSIX_BASE 0x0000 | ||
199 | #define IGU_MEM_MSIX_UPPER 0x0101 | ||
200 | #define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff | ||
201 | |||
202 | #define IGU_MEM_PBA_MSIX_BASE 0x0200 | ||
203 | #define IGU_MEM_PBA_MSIX_UPPER 0x0202 | ||
204 | #define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff | ||
205 | |||
206 | #define IGU_CMD_INT_ACK_BASE 0x0400 | ||
207 | #define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \ | ||
208 | MAX_TOT_SB_PER_PATH - \ | ||
209 | 1) | ||
210 | #define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff | ||
211 | |||
212 | #define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 | ||
213 | #define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1 | ||
214 | #define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2 | ||
215 | |||
216 | #define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3 | ||
217 | #define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4 | ||
218 | #define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05f5 | ||
219 | #define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6 | ||
220 | |||
221 | #define IGU_CMD_PROD_UPD_BASE 0x0600 | ||
222 | #define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\ | ||
223 | MAX_TOT_SB_PER_PATH - \ | ||
224 | 1) | ||
225 | #define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff | ||
226 | |||
227 | /*****************/ | ||
228 | /* PXP CONSTANTS */ | ||
229 | /*****************/ | ||
230 | |||
231 | /* PTT and GTT */ | ||
232 | #define PXP_NUM_PF_WINDOWS 12 | ||
233 | #define PXP_PER_PF_ENTRY_SIZE 8 | ||
234 | #define PXP_NUM_GLOBAL_WINDOWS 243 | ||
235 | #define PXP_GLOBAL_ENTRY_SIZE 4 | ||
236 | #define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4 | ||
237 | #define PXP_PF_WINDOW_ADMIN_START 0 | ||
238 | #define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000 | ||
239 | #define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \ | ||
240 | PXP_PF_WINDOW_ADMIN_LENGTH - 1) | ||
241 | #define PXP_PF_WINDOW_ADMIN_PER_PF_START 0 | ||
242 | #define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \ | ||
243 | PXP_PER_PF_ENTRY_SIZE) | ||
244 | #define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \ | ||
245 | PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1) | ||
246 | #define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200 | ||
247 | #define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \ | ||
248 | PXP_GLOBAL_ENTRY_SIZE) | ||
249 | #define PXP_PF_WINDOW_ADMIN_GLOBAL_END \ | ||
250 | (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \ | ||
251 | PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1) | ||
252 | #define PXP_PF_GLOBAL_PRETEND_ADDR 0x1f0 | ||
253 | #define PXP_PF_ME_OPAQUE_MASK_ADDR 0xf4 | ||
254 | #define PXP_PF_ME_OPAQUE_ADDR 0x1f8 | ||
255 | #define PXP_PF_ME_CONCRETE_ADDR 0x1fc | ||
256 | |||
257 | #define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000 | ||
258 | #define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS | ||
259 | #define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000 | ||
260 | #define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \ | ||
261 | (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \ | ||
262 | PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) | ||
263 | #define PXP_EXTERNAL_BAR_PF_WINDOW_END \ | ||
264 | (PXP_EXTERNAL_BAR_PF_WINDOW_START + \ | ||
265 | PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1) | ||
266 | |||
267 | #define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \ | ||
268 | (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1) | ||
269 | #define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM PXP_NUM_GLOBAL_WINDOWS | ||
270 | #define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE 0x1000 | ||
271 | #define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \ | ||
272 | (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \ | ||
273 | PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE) | ||
274 | #define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \ | ||
275 | (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \ | ||
276 | PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) | ||
277 | |||
278 | #define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 | ||
279 | #define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 | ||
280 | |||
281 | /* ILT Records */ | ||
282 | #define PXP_NUM_ILT_RECORDS_BB 7600 | ||
283 | #define PXP_NUM_ILT_RECORDS_K2 11000 | ||
284 | #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) | ||
285 | |||
286 | /******************/ | ||
287 | /* PBF CONSTANTS */ | ||
288 | /******************/ | ||
289 | |||
290 | /* Number of PBF command queue lines. Each line is 32B. */ | ||
291 | #define PBF_MAX_CMD_LINES 3328 | ||
292 | |||
293 | /* Number of BTB blocks. Each block is 256B. */ | ||
294 | #define BTB_MAX_BLOCKS 1440 | ||
295 | |||
296 | /*****************/ | ||
297 | /* PRS CONSTANTS */ | ||
298 | /*****************/ | ||
299 | |||
300 | /* Async data KCQ CQE */ | ||
301 | struct async_data { | ||
302 | __le32 cid; | ||
303 | __le16 itid; | ||
304 | u8 error_code; | ||
305 | u8 fw_debug_param; | ||
306 | }; | ||
307 | |||
308 | struct regpair { | ||
309 | __le32 lo; | ||
310 | __le32 hi; | ||
311 | }; | ||
312 | |||
313 | /* Event Data Union */ | ||
314 | union event_ring_data { | ||
315 | u8 bytes[8]; | ||
316 | struct async_data async_info; | ||
317 | }; | ||
318 | |||
319 | /* Event Ring Entry */ | ||
320 | struct event_ring_entry { | ||
321 | u8 protocol_id; | ||
322 | u8 opcode; | ||
323 | __le16 reserved0; | ||
324 | __le16 echo; | ||
325 | u8 fw_return_code; | ||
326 | u8 flags; | ||
327 | #define EVENT_RING_ENTRY_ASYNC_MASK 0x1 | ||
328 | #define EVENT_RING_ENTRY_ASYNC_SHIFT 0 | ||
329 | #define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F | ||
330 | #define EVENT_RING_ENTRY_RESERVED1_SHIFT 1 | ||
331 | union event_ring_data data; | ||
332 | }; | ||
333 | |||
334 | /* Multi function mode */ | ||
335 | enum mf_mode { | ||
336 | SF, | ||
337 | MF_OVLAN, | ||
338 | MF_NPAR, | ||
339 | MAX_MF_MODE | ||
340 | }; | ||
341 | |||
342 | /* Per-protocol connection types */ | ||
343 | enum protocol_type { | ||
344 | PROTOCOLID_RESERVED1, | ||
345 | PROTOCOLID_RESERVED2, | ||
346 | PROTOCOLID_RESERVED3, | ||
347 | PROTOCOLID_CORE, | ||
348 | PROTOCOLID_ETH, | ||
349 | PROTOCOLID_RESERVED4, | ||
350 | PROTOCOLID_RESERVED5, | ||
351 | PROTOCOLID_PREROCE, | ||
352 | PROTOCOLID_COMMON, | ||
353 | PROTOCOLID_RESERVED6, | ||
354 | MAX_PROTOCOL_TYPE | ||
355 | }; | ||
356 | |||
357 | /* status block structure */ | ||
358 | struct cau_pi_entry { | ||
359 | u32 prod; | ||
360 | #define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF | ||
361 | #define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 | ||
362 | #define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F | ||
363 | #define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 | ||
364 | #define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 | ||
365 | #define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 | ||
366 | #define CAU_PI_ENTRY_RESERVED_MASK 0xFF | ||
367 | #define CAU_PI_ENTRY_RESERVED_SHIFT 24 | ||
368 | }; | ||
369 | |||
370 | /* status block structure */ | ||
371 | struct cau_sb_entry { | ||
372 | u32 data; | ||
373 | #define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF | ||
374 | #define CAU_SB_ENTRY_SB_PROD_SHIFT 0 | ||
375 | #define CAU_SB_ENTRY_STATE0_MASK 0xF | ||
376 | #define CAU_SB_ENTRY_STATE0_SHIFT 24 | ||
377 | #define CAU_SB_ENTRY_STATE1_MASK 0xF | ||
378 | #define CAU_SB_ENTRY_STATE1_SHIFT 28 | ||
379 | u32 params; | ||
380 | #define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F | ||
381 | #define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 | ||
382 | #define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F | ||
383 | #define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 | ||
384 | #define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 | ||
385 | #define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 | ||
386 | #define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 | ||
387 | #define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 | ||
388 | #define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF | ||
389 | #define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 | ||
390 | #define CAU_SB_ENTRY_VF_VALID_MASK 0x1 | ||
391 | #define CAU_SB_ENTRY_VF_VALID_SHIFT 26 | ||
392 | #define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF | ||
393 | #define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 | ||
394 | #define CAU_SB_ENTRY_TPH_MASK 0x1 | ||
395 | #define CAU_SB_ENTRY_TPH_SHIFT 31 | ||
396 | }; | ||
397 | |||
398 | /* core doorbell data */ | ||
399 | struct core_db_data { | ||
400 | u8 params; | ||
401 | #define CORE_DB_DATA_DEST_MASK 0x3 | ||
402 | #define CORE_DB_DATA_DEST_SHIFT 0 | ||
403 | #define CORE_DB_DATA_AGG_CMD_MASK 0x3 | ||
404 | #define CORE_DB_DATA_AGG_CMD_SHIFT 2 | ||
405 | #define CORE_DB_DATA_BYPASS_EN_MASK 0x1 | ||
406 | #define CORE_DB_DATA_BYPASS_EN_SHIFT 4 | ||
407 | #define CORE_DB_DATA_RESERVED_MASK 0x1 | ||
408 | #define CORE_DB_DATA_RESERVED_SHIFT 5 | ||
409 | #define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 | ||
410 | #define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 | ||
411 | u8 agg_flags; | ||
412 | __le16 spq_prod; | ||
413 | }; | ||
414 | |||
415 | /* Enum of doorbell aggregative command selection */ | ||
416 | enum db_agg_cmd_sel { | ||
417 | DB_AGG_CMD_NOP, | ||
418 | DB_AGG_CMD_SET, | ||
419 | DB_AGG_CMD_ADD, | ||
420 | DB_AGG_CMD_MAX, | ||
421 | MAX_DB_AGG_CMD_SEL | ||
422 | }; | ||
423 | |||
424 | /* Enum of doorbell destination */ | ||
425 | enum db_dest { | ||
426 | DB_DEST_XCM, | ||
427 | DB_DEST_UCM, | ||
428 | DB_DEST_TCM, | ||
429 | DB_NUM_DESTINATIONS, | ||
430 | MAX_DB_DEST | ||
431 | }; | ||
432 | |||
433 | /* Structure for doorbell address, in legacy mode */ | ||
434 | struct db_legacy_addr { | ||
435 | __le32 addr; | ||
436 | #define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 | ||
437 | #define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 | ||
438 | #define DB_LEGACY_ADDR_DEMS_MASK 0x7 | ||
439 | #define DB_LEGACY_ADDR_DEMS_SHIFT 2 | ||
440 | #define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF | ||
441 | #define DB_LEGACY_ADDR_ICID_SHIFT 5 | ||
442 | }; | ||
443 | |||
444 | /* Igu interrupt command */ | ||
445 | enum igu_int_cmd { | ||
446 | IGU_INT_ENABLE = 0, | ||
447 | IGU_INT_DISABLE = 1, | ||
448 | IGU_INT_NOP = 2, | ||
449 | IGU_INT_NOP2 = 3, | ||
450 | MAX_IGU_INT_CMD | ||
451 | }; | ||
452 | |||
453 | /* IGU producer or consumer update command */ | ||
454 | struct igu_prod_cons_update { | ||
455 | u32 sb_id_and_flags; | ||
456 | #define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF | ||
457 | #define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 | ||
458 | #define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 | ||
459 | #define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 | ||
460 | #define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 | ||
461 | #define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 | ||
462 | #define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 | ||
463 | #define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 | ||
464 | #define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 | ||
465 | #define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 | ||
466 | #define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 | ||
467 | #define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 | ||
468 | #define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 | ||
469 | #define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 | ||
470 | u32 reserved1; | ||
471 | }; | ||
472 | |||
473 | /* Igu segments access for default status block only */ | ||
474 | enum igu_seg_access { | ||
475 | IGU_SEG_ACCESS_REG = 0, | ||
476 | IGU_SEG_ACCESS_ATTN = 1, | ||
477 | MAX_IGU_SEG_ACCESS | ||
478 | }; | ||
479 | |||
480 | struct parsing_and_err_flags { | ||
481 | __le16 flags; | ||
482 | #define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 | ||
483 | #define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 | ||
484 | #define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 | ||
485 | #define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 | ||
486 | #define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 | ||
487 | #define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 | ||
488 | #define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 | ||
489 | #define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 | ||
490 | #define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 | ||
491 | #define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 | ||
492 | #define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 | ||
493 | #define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 | ||
494 | #define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 | ||
495 | #define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 | ||
496 | #define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 | ||
497 | #define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 | ||
498 | #define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 | ||
499 | #define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 | ||
500 | #define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 | ||
501 | #define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 | ||
502 | #define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 | ||
503 | #define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 | ||
504 | #define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 | ||
505 | #define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 | ||
506 | #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 | ||
507 | #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 | ||
508 | #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 | ||
509 | #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 | ||
510 | }; | ||
511 | |||
512 | /* Concrete Function ID. */ | ||
513 | struct pxp_concrete_fid { | ||
514 | __le16 fid; | ||
515 | #define PXP_CONCRETE_FID_PFID_MASK 0xF | ||
516 | #define PXP_CONCRETE_FID_PFID_SHIFT 0 | ||
517 | #define PXP_CONCRETE_FID_PORT_MASK 0x3 | ||
518 | #define PXP_CONCRETE_FID_PORT_SHIFT 4 | ||
519 | #define PXP_CONCRETE_FID_PATH_MASK 0x1 | ||
520 | #define PXP_CONCRETE_FID_PATH_SHIFT 6 | ||
521 | #define PXP_CONCRETE_FID_VFVALID_MASK 0x1 | ||
522 | #define PXP_CONCRETE_FID_VFVALID_SHIFT 7 | ||
523 | #define PXP_CONCRETE_FID_VFID_MASK 0xFF | ||
524 | #define PXP_CONCRETE_FID_VFID_SHIFT 8 | ||
525 | }; | ||
526 | |||
527 | struct pxp_pretend_concrete_fid { | ||
528 | __le16 fid; | ||
529 | #define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF | ||
530 | #define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 | ||
531 | #define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 | ||
532 | #define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 | ||
533 | #define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 | ||
534 | #define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 | ||
535 | #define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF | ||
536 | #define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 | ||
537 | }; | ||
538 | |||
539 | union pxp_pretend_fid { | ||
540 | struct pxp_pretend_concrete_fid concrete_fid; | ||
541 | __le16 opaque_fid; | ||
542 | }; | ||
543 | |||
544 | /* Pxp Pretend Command Register. */ | ||
545 | struct pxp_pretend_cmd { | ||
546 | union pxp_pretend_fid fid; | ||
547 | __le16 control; | ||
548 | #define PXP_PRETEND_CMD_PATH_MASK 0x1 | ||
549 | #define PXP_PRETEND_CMD_PATH_SHIFT 0 | ||
550 | #define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 | ||
551 | #define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 | ||
552 | #define PXP_PRETEND_CMD_PORT_MASK 0x3 | ||
553 | #define PXP_PRETEND_CMD_PORT_SHIFT 2 | ||
554 | #define PXP_PRETEND_CMD_RESERVED0_MASK 0xF | ||
555 | #define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 | ||
556 | #define PXP_PRETEND_CMD_RESERVED1_MASK 0xF | ||
557 | #define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 | ||
558 | #define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 | ||
559 | #define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 | ||
560 | #define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 | ||
561 | #define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 | ||
562 | #define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 | ||
563 | #define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 | ||
564 | #define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 | ||
565 | #define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 | ||
566 | }; | ||
567 | |||
568 | /* PTT Record in PXP Admin Window. */ | ||
569 | struct pxp_ptt_entry { | ||
570 | __le32 offset; | ||
571 | #define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF | ||
572 | #define PXP_PTT_ENTRY_OFFSET_SHIFT 0 | ||
573 | #define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF | ||
574 | #define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 | ||
575 | struct pxp_pretend_cmd pretend; | ||
576 | }; | ||
577 | |||
578 | /* RSS hash type */ | ||
579 | enum rss_hash_type { | ||
580 | RSS_HASH_TYPE_DEFAULT = 0, | ||
581 | RSS_HASH_TYPE_IPV4 = 1, | ||
582 | RSS_HASH_TYPE_TCP_IPV4 = 2, | ||
583 | RSS_HASH_TYPE_IPV6 = 3, | ||
584 | RSS_HASH_TYPE_TCP_IPV6 = 4, | ||
585 | RSS_HASH_TYPE_UDP_IPV4 = 5, | ||
586 | RSS_HASH_TYPE_UDP_IPV6 = 6, | ||
587 | MAX_RSS_HASH_TYPE | ||
588 | }; | ||
589 | |||
590 | /* status block structure */ | ||
591 | struct status_block { | ||
592 | __le16 pi_array[PIS_PER_SB]; | ||
593 | __le32 sb_num; | ||
594 | #define STATUS_BLOCK_SB_NUM_MASK 0x1FF | ||
595 | #define STATUS_BLOCK_SB_NUM_SHIFT 0 | ||
596 | #define STATUS_BLOCK_ZERO_PAD_MASK 0x7F | ||
597 | #define STATUS_BLOCK_ZERO_PAD_SHIFT 9 | ||
598 | #define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF | ||
599 | #define STATUS_BLOCK_ZERO_PAD2_SHIFT 16 | ||
600 | __le32 prod_index; | ||
601 | #define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF | ||
602 | #define STATUS_BLOCK_PROD_INDEX_SHIFT 0 | ||
603 | #define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF | ||
604 | #define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 | ||
605 | }; | ||
606 | |||
607 | #endif /* __COMMON_HSI__ */ | ||
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h new file mode 100644 index 000000000000..320b3373ac1d --- /dev/null +++ b/include/linux/qed/eth_common.h | |||
@@ -0,0 +1,279 @@ | |||
1 | /* QLogic qed NIC Driver | ||
2 | * Copyright (c) 2015 QLogic Corporation | ||
3 | * | ||
4 | * This software is available under the terms of the GNU General Public License | ||
5 | * (GPL) Version 2, available from the file COPYING in the main directory of | ||
6 | * this source tree. | ||
7 | */ | ||
8 | |||
9 | #ifndef __ETH_COMMON__ | ||
10 | #define __ETH_COMMON__ | ||
11 | |||
12 | /********************/ | ||
13 | /* ETH FW CONSTANTS */ | ||
14 | /********************/ | ||
15 | #define ETH_CACHE_LINE_SIZE 64 | ||
16 | |||
17 | #define ETH_MAX_RAMROD_PER_CON 8 | ||
18 | #define ETH_TX_BD_PAGE_SIZE_BYTES 4096 | ||
19 | #define ETH_RX_BD_PAGE_SIZE_BYTES 4096 | ||
20 | #define ETH_RX_SGE_PAGE_SIZE_BYTES 4096 | ||
21 | #define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 | ||
22 | #define ETH_RX_NUM_NEXT_PAGE_BDS 2 | ||
23 | #define ETH_RX_NUM_NEXT_PAGE_SGES 2 | ||
24 | |||
25 | #define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 | ||
26 | #define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 | ||
27 | #define ETH_TX_MAX_LSO_HDR_NBD 4 | ||
28 | #define ETH_TX_MIN_BDS_PER_LSO_PKT 3 | ||
29 | #define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 | ||
30 | #define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 | ||
31 | #define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 | ||
32 | #define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8)) | ||
33 | #define ETH_TX_MAX_LSO_HDR_BYTES 510 | ||
34 | |||
35 | #define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS | ||
36 | |||
37 | #define ETH_REG_CQE_PBL_SIZE 3 | ||
38 | |||
39 | /* num of MAC/VLAN filters */ | ||
40 | #define ETH_NUM_MAC_FILTERS 512 | ||
41 | #define ETH_NUM_VLAN_FILTERS 512 | ||
42 | |||
43 | /* approx. multicast constants */ | ||
44 | #define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 | ||
45 | #define ETH_MULTICAST_MAC_BINS 256 | ||
46 | #define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) | ||
47 | |||
48 | /* ethernet vport update constants */ | ||
49 | #define ETH_FILTER_RULES_COUNT 10 | ||
50 | #define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 | ||
51 | #define ETH_RSS_KEY_SIZE_REGS 10 | ||
52 | #define ETH_RSS_ENGINE_NUM_K2 207 | ||
53 | #define ETH_RSS_ENGINE_NUM_BB 127 | ||
54 | |||
55 | /* TPA constants */ | ||
56 | #define ETH_TPA_MAX_AGGS_NUM 64 | ||
57 | #define ETH_TPA_CQE_START_SGL_SIZE 3 | ||
58 | #define ETH_TPA_CQE_CONT_SGL_SIZE 6 | ||
59 | #define ETH_TPA_CQE_END_SGL_SIZE 4 | ||
60 | |||
61 | /* Queue Zone sizes */ | ||
62 | #define TSTORM_QZONE_SIZE 0 | ||
63 | #define MSTORM_QZONE_SIZE sizeof(struct mstorm_eth_queue_zone) | ||
64 | #define USTORM_QZONE_SIZE sizeof(struct ustorm_eth_queue_zone) | ||
65 | #define XSTORM_QZONE_SIZE 0 | ||
66 | #define YSTORM_QZONE_SIZE sizeof(struct ystorm_eth_queue_zone) | ||
67 | #define PSTORM_QZONE_SIZE 0 | ||
68 | |||
69 | /* Interrupt coalescing TimeSet */ | ||
70 | struct coalescing_timeset { | ||
71 | u8 timeset; | ||
72 | u8 valid; | ||
73 | }; | ||
74 | |||
75 | struct eth_tx_1st_bd_flags { | ||
76 | u8 bitfields; | ||
77 | #define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 | ||
78 | #define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0 | ||
79 | #define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 | ||
80 | #define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 1 | ||
81 | #define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 | ||
82 | #define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 2 | ||
83 | #define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 | ||
84 | #define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 3 | ||
85 | #define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 | ||
86 | #define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 4 | ||
87 | #define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 | ||
88 | #define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 5 | ||
89 | #define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 | ||
90 | #define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 | ||
91 | #define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 | ||
92 | #define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 | ||
93 | }; | ||
94 | |||
95 | /* The parsing information data fo rthe first tx bd of a given packet. */ | ||
96 | struct eth_tx_data_1st_bd { | ||
97 | __le16 vlan; | ||
98 | u8 nbds; | ||
99 | struct eth_tx_1st_bd_flags bd_flags; | ||
100 | __le16 fw_use_only; | ||
101 | }; | ||
102 | |||
103 | /* The parsing information data for the second tx bd of a given packet. */ | ||
104 | struct eth_tx_data_2nd_bd { | ||
105 | __le16 tunn_ip_size; | ||
106 | __le16 bitfields; | ||
107 | #define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF | ||
108 | #define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 | ||
109 | #define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 | ||
110 | #define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 | ||
111 | __le16 bitfields2; | ||
112 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF | ||
113 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 | ||
114 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 | ||
115 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 | ||
116 | #define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 | ||
117 | #define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 | ||
118 | #define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 | ||
119 | #define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 8 | ||
120 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 | ||
121 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 10 | ||
122 | #define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 | ||
123 | #define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 11 | ||
124 | #define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 | ||
125 | #define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 12 | ||
126 | #define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 | ||
127 | #define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 13 | ||
128 | #define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 | ||
129 | #define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 14 | ||
130 | #define ETH_TX_DATA_2ND_BD_RESERVED1_MASK 0x1 | ||
131 | #define ETH_TX_DATA_2ND_BD_RESERVED1_SHIFT 15 | ||
132 | }; | ||
133 | |||
134 | /* Regular ETH Rx FP CQE. */ | ||
135 | struct eth_fast_path_rx_reg_cqe { | ||
136 | u8 type; | ||
137 | u8 bitfields; | ||
138 | #define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 | ||
139 | #define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 | ||
140 | #define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF | ||
141 | #define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 | ||
142 | #define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 | ||
143 | #define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 | ||
144 | __le16 pkt_len; | ||
145 | struct parsing_and_err_flags pars_flags; | ||
146 | __le16 vlan_tag; | ||
147 | __le32 rss_hash; | ||
148 | __le16 len_on_bd; | ||
149 | u8 placement_offset; | ||
150 | u8 reserved; | ||
151 | __le16 pbl[ETH_REG_CQE_PBL_SIZE]; | ||
152 | u8 reserved1[10]; | ||
153 | }; | ||
154 | |||
155 | /* The L4 pseudo checksum mode for Ethernet */ | ||
156 | enum eth_l4_pseudo_checksum_mode { | ||
157 | ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH, | ||
158 | ETH_L4_PSEUDO_CSUM_ZERO_LENGTH, | ||
159 | MAX_ETH_L4_PSEUDO_CHECKSUM_MODE | ||
160 | }; | ||
161 | |||
162 | struct eth_rx_bd { | ||
163 | struct regpair addr; | ||
164 | }; | ||
165 | |||
166 | /* regular ETH Rx SP CQE */ | ||
167 | struct eth_slow_path_rx_cqe { | ||
168 | u8 type; | ||
169 | u8 ramrod_cmd_id; | ||
170 | u8 error_flag; | ||
171 | u8 reserved[27]; | ||
172 | __le16 echo; | ||
173 | }; | ||
174 | |||
175 | /* union for all ETH Rx CQE types */ | ||
176 | union eth_rx_cqe { | ||
177 | struct eth_fast_path_rx_reg_cqe fast_path_regular; | ||
178 | struct eth_slow_path_rx_cqe slow_path; | ||
179 | }; | ||
180 | |||
181 | /* ETH Rx CQE type */ | ||
182 | enum eth_rx_cqe_type { | ||
183 | ETH_RX_CQE_TYPE_UNUSED, | ||
184 | ETH_RX_CQE_TYPE_REGULAR, | ||
185 | ETH_RX_CQE_TYPE_SLOW_PATH, | ||
186 | MAX_ETH_RX_CQE_TYPE | ||
187 | }; | ||
188 | |||
189 | /* ETH Rx producers data */ | ||
190 | struct eth_rx_prod_data { | ||
191 | __le16 bd_prod; | ||
192 | __le16 sge_prod; | ||
193 | __le16 cqe_prod; | ||
194 | __le16 reserved; | ||
195 | }; | ||
196 | |||
197 | /* The first tx bd of a given packet */ | ||
198 | struct eth_tx_1st_bd { | ||
199 | struct regpair addr; | ||
200 | __le16 nbytes; | ||
201 | struct eth_tx_data_1st_bd data; | ||
202 | }; | ||
203 | |||
204 | /* The second tx bd of a given packet */ | ||
205 | struct eth_tx_2nd_bd { | ||
206 | struct regpair addr; | ||
207 | __le16 nbytes; | ||
208 | struct eth_tx_data_2nd_bd data; | ||
209 | }; | ||
210 | |||
211 | /* The parsing information data for the third tx bd of a given packet. */ | ||
212 | struct eth_tx_data_3rd_bd { | ||
213 | __le16 lso_mss; | ||
214 | u8 bitfields; | ||
215 | #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF | ||
216 | #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 | ||
217 | #define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF | ||
218 | #define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 | ||
219 | u8 resereved0[3]; | ||
220 | }; | ||
221 | |||
222 | /* The third tx bd of a given packet */ | ||
223 | struct eth_tx_3rd_bd { | ||
224 | struct regpair addr; | ||
225 | __le16 nbytes; | ||
226 | struct eth_tx_data_3rd_bd data; | ||
227 | }; | ||
228 | |||
229 | /* The common non-special TX BD ring element */ | ||
230 | struct eth_tx_bd { | ||
231 | struct regpair addr; | ||
232 | __le16 nbytes; | ||
233 | __le16 reserved0; | ||
234 | __le32 reserved1; | ||
235 | }; | ||
236 | |||
237 | union eth_tx_bd_types { | ||
238 | struct eth_tx_1st_bd first_bd; | ||
239 | struct eth_tx_2nd_bd second_bd; | ||
240 | struct eth_tx_3rd_bd third_bd; | ||
241 | struct eth_tx_bd reg_bd; | ||
242 | }; | ||
243 | |||
244 | /* Mstorm Queue Zone */ | ||
245 | struct mstorm_eth_queue_zone { | ||
246 | struct eth_rx_prod_data rx_producers; | ||
247 | __le32 reserved[2]; | ||
248 | }; | ||
249 | |||
250 | /* Ustorm Queue Zone */ | ||
251 | struct ustorm_eth_queue_zone { | ||
252 | struct coalescing_timeset int_coalescing_timeset; | ||
253 | __le16 reserved[3]; | ||
254 | }; | ||
255 | |||
256 | /* Ystorm Queue Zone */ | ||
257 | struct ystorm_eth_queue_zone { | ||
258 | struct coalescing_timeset int_coalescing_timeset; | ||
259 | __le16 reserved[3]; | ||
260 | }; | ||
261 | |||
262 | /* ETH doorbell data */ | ||
263 | struct eth_db_data { | ||
264 | u8 params; | ||
265 | #define ETH_DB_DATA_DEST_MASK 0x3 | ||
266 | #define ETH_DB_DATA_DEST_SHIFT 0 | ||
267 | #define ETH_DB_DATA_AGG_CMD_MASK 0x3 | ||
268 | #define ETH_DB_DATA_AGG_CMD_SHIFT 2 | ||
269 | #define ETH_DB_DATA_BYPASS_EN_MASK 0x1 | ||
270 | #define ETH_DB_DATA_BYPASS_EN_SHIFT 4 | ||
271 | #define ETH_DB_DATA_RESERVED_MASK 0x1 | ||
272 | #define ETH_DB_DATA_RESERVED_SHIFT 5 | ||
273 | #define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 | ||
274 | #define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 | ||
275 | u8 agg_flags; | ||
276 | __le16 bd_prod; | ||
277 | }; | ||
278 | |||
279 | #endif /* __ETH_COMMON__ */ | ||
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h new file mode 100644 index 000000000000..b920c3605c46 --- /dev/null +++ b/include/linux/qed/qed_chain.h | |||
@@ -0,0 +1,539 @@ | |||
1 | /* QLogic qed NIC Driver | ||
2 | * Copyright (c) 2015 QLogic Corporation | ||
3 | * | ||
4 | * This software is available under the terms of the GNU General Public License | ||
5 | * (GPL) Version 2, available from the file COPYING in the main directory of | ||
6 | * this source tree. | ||
7 | */ | ||
8 | |||
9 | #ifndef _QED_CHAIN_H | ||
10 | #define _QED_CHAIN_H | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <asm/byteorder.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/list.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/qed/common_hsi.h> | ||
18 | |||
19 | /* dma_addr_t manip */ | ||
20 | #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) | ||
21 | #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) | ||
22 | |||
23 | #define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) | ||
24 | #define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t) | ||
25 | #define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) | ||
26 | #define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo)) | ||
27 | #define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) | ||
28 | |||
29 | enum qed_chain_mode { | ||
30 | /* Each Page contains a next pointer at its end */ | ||
31 | QED_CHAIN_MODE_NEXT_PTR, | ||
32 | |||
33 | /* Chain is a single page (next ptr) is unrequired */ | ||
34 | QED_CHAIN_MODE_SINGLE, | ||
35 | |||
36 | /* Page pointers are located in a side list */ | ||
37 | QED_CHAIN_MODE_PBL, | ||
38 | }; | ||
39 | |||
40 | enum qed_chain_use_mode { | ||
41 | QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ | ||
42 | QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ | ||
43 | QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ | ||
44 | }; | ||
45 | |||
46 | struct qed_chain_next { | ||
47 | struct regpair next_phys; | ||
48 | void *next_virt; | ||
49 | }; | ||
50 | |||
51 | struct qed_chain_pbl { | ||
52 | dma_addr_t p_phys_table; | ||
53 | void *p_virt_table; | ||
54 | u16 prod_page_idx; | ||
55 | u16 cons_page_idx; | ||
56 | }; | ||
57 | |||
58 | struct qed_chain { | ||
59 | void *p_virt_addr; | ||
60 | dma_addr_t p_phys_addr; | ||
61 | void *p_prod_elem; | ||
62 | void *p_cons_elem; | ||
63 | u16 page_cnt; | ||
64 | enum qed_chain_mode mode; | ||
65 | enum qed_chain_use_mode intended_use; /* used to produce/consume */ | ||
66 | u16 capacity; /*< number of _usable_ elements */ | ||
67 | u16 size; /* number of elements */ | ||
68 | u16 prod_idx; | ||
69 | u16 cons_idx; | ||
70 | u16 elem_per_page; | ||
71 | u16 elem_per_page_mask; | ||
72 | u16 elem_unusable; | ||
73 | u16 usable_per_page; | ||
74 | u16 elem_size; | ||
75 | u16 next_page_mask; | ||
76 | struct qed_chain_pbl pbl; | ||
77 | }; | ||
78 | |||
79 | #define QED_CHAIN_PBL_ENTRY_SIZE (8) | ||
80 | #define QED_CHAIN_PAGE_SIZE (0x1000) | ||
81 | #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) | ||
82 | |||
83 | #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ | ||
84 | ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \ | ||
85 | (1 + ((sizeof(struct qed_chain_next) - 1) / \ | ||
86 | (elem_size))) : 0) | ||
87 | |||
88 | #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ | ||
89 | ((u32)(ELEMS_PER_PAGE(elem_size) - \ | ||
90 | UNUSABLE_ELEMS_PER_PAGE(elem_size, mode))) | ||
91 | |||
92 | #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ | ||
93 | DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) | ||
94 | |||
95 | /* Accessors */ | ||
96 | static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) | ||
97 | { | ||
98 | return p_chain->prod_idx; | ||
99 | } | ||
100 | |||
101 | static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain) | ||
102 | { | ||
103 | return p_chain->cons_idx; | ||
104 | } | ||
105 | |||
106 | static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) | ||
107 | { | ||
108 | u16 used; | ||
109 | |||
110 | /* we don't need to trancate upon assignmet, as we assign u32->u16 */ | ||
111 | used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - | ||
112 | (u32)p_chain->cons_idx; | ||
113 | if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) | ||
114 | used -= (used / p_chain->elem_per_page); | ||
115 | |||
116 | return p_chain->capacity - used; | ||
117 | } | ||
118 | |||
119 | static inline u8 qed_chain_is_full(struct qed_chain *p_chain) | ||
120 | { | ||
121 | return qed_chain_get_elem_left(p_chain) == p_chain->capacity; | ||
122 | } | ||
123 | |||
124 | static inline u8 qed_chain_is_empty(struct qed_chain *p_chain) | ||
125 | { | ||
126 | return qed_chain_get_elem_left(p_chain) == 0; | ||
127 | } | ||
128 | |||
129 | static inline u16 qed_chain_get_elem_per_page( | ||
130 | struct qed_chain *p_chain) | ||
131 | { | ||
132 | return p_chain->elem_per_page; | ||
133 | } | ||
134 | |||
135 | static inline u16 qed_chain_get_usable_per_page( | ||
136 | struct qed_chain *p_chain) | ||
137 | { | ||
138 | return p_chain->usable_per_page; | ||
139 | } | ||
140 | |||
141 | static inline u16 qed_chain_get_unusable_per_page( | ||
142 | struct qed_chain *p_chain) | ||
143 | { | ||
144 | return p_chain->elem_unusable; | ||
145 | } | ||
146 | |||
147 | static inline u16 qed_chain_get_size(struct qed_chain *p_chain) | ||
148 | { | ||
149 | return p_chain->size; | ||
150 | } | ||
151 | |||
152 | static inline dma_addr_t | ||
153 | qed_chain_get_pbl_phys(struct qed_chain *p_chain) | ||
154 | { | ||
155 | return p_chain->pbl.p_phys_table; | ||
156 | } | ||
157 | |||
158 | /** | ||
159 | * @brief qed_chain_advance_page - | ||
160 | * | ||
161 | * Advance the next element accros pages for a linked chain | ||
162 | * | ||
163 | * @param p_chain | ||
164 | * @param p_next_elem | ||
165 | * @param idx_to_inc | ||
166 | * @param page_to_inc | ||
167 | */ | ||
168 | static inline void | ||
169 | qed_chain_advance_page(struct qed_chain *p_chain, | ||
170 | void **p_next_elem, | ||
171 | u16 *idx_to_inc, | ||
172 | u16 *page_to_inc) | ||
173 | |||
174 | { | ||
175 | switch (p_chain->mode) { | ||
176 | case QED_CHAIN_MODE_NEXT_PTR: | ||
177 | { | ||
178 | struct qed_chain_next *p_next = *p_next_elem; | ||
179 | *p_next_elem = p_next->next_virt; | ||
180 | *idx_to_inc += p_chain->elem_unusable; | ||
181 | break; | ||
182 | } | ||
183 | case QED_CHAIN_MODE_SINGLE: | ||
184 | *p_next_elem = p_chain->p_virt_addr; | ||
185 | break; | ||
186 | |||
187 | case QED_CHAIN_MODE_PBL: | ||
188 | /* It is assumed pages are sequential, next element needs | ||
189 | * to change only when passing going back to first from last. | ||
190 | */ | ||
191 | if (++(*page_to_inc) == p_chain->page_cnt) { | ||
192 | *page_to_inc = 0; | ||
193 | *p_next_elem = p_chain->p_virt_addr; | ||
194 | } | ||
195 | } | ||
196 | } | ||
197 | |||
198 | #define is_unusable_idx(p, idx) \ | ||
199 | (((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page) | ||
200 | |||
201 | #define is_unusable_next_idx(p, idx) \ | ||
202 | ((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page) | ||
203 | |||
204 | #define test_ans_skip(p, idx) \ | ||
205 | do { \ | ||
206 | if (is_unusable_idx(p, idx)) { \ | ||
207 | (p)->idx += (p)->elem_unusable; \ | ||
208 | } \ | ||
209 | } while (0) | ||
210 | |||
211 | /** | ||
212 | * @brief qed_chain_return_multi_produced - | ||
213 | * | ||
214 | * A chain in which the driver "Produces" elements should use this API | ||
215 | * to indicate previous produced elements are now consumed. | ||
216 | * | ||
217 | * @param p_chain | ||
218 | * @param num | ||
219 | */ | ||
220 | static inline void | ||
221 | qed_chain_return_multi_produced(struct qed_chain *p_chain, | ||
222 | u16 num) | ||
223 | { | ||
224 | p_chain->cons_idx += num; | ||
225 | test_ans_skip(p_chain, cons_idx); | ||
226 | } | ||
227 | |||
228 | /** | ||
229 | * @brief qed_chain_return_produced - | ||
230 | * | ||
231 | * A chain in which the driver "Produces" elements should use this API | ||
232 | * to indicate previous produced elements are now consumed. | ||
233 | * | ||
234 | * @param p_chain | ||
235 | */ | ||
236 | static inline void qed_chain_return_produced(struct qed_chain *p_chain) | ||
237 | { | ||
238 | p_chain->cons_idx++; | ||
239 | test_ans_skip(p_chain, cons_idx); | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * @brief qed_chain_produce - | ||
244 | * | ||
245 | * A chain in which the driver "Produces" elements should use this to get | ||
246 | * a pointer to the next element which can be "Produced". It's driver | ||
247 | * responsibility to validate that the chain has room for new element. | ||
248 | * | ||
249 | * @param p_chain | ||
250 | * | ||
251 | * @return void*, a pointer to next element | ||
252 | */ | ||
253 | static inline void *qed_chain_produce(struct qed_chain *p_chain) | ||
254 | { | ||
255 | void *ret = NULL; | ||
256 | |||
257 | if ((p_chain->prod_idx & p_chain->elem_per_page_mask) == | ||
258 | p_chain->next_page_mask) { | ||
259 | qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, | ||
260 | &p_chain->prod_idx, | ||
261 | &p_chain->pbl.prod_page_idx); | ||
262 | } | ||
263 | |||
264 | ret = p_chain->p_prod_elem; | ||
265 | p_chain->prod_idx++; | ||
266 | p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) + | ||
267 | p_chain->elem_size); | ||
268 | |||
269 | return ret; | ||
270 | } | ||
271 | |||
272 | /** | ||
273 | * @brief qed_chain_get_capacity - | ||
274 | * | ||
275 | * Get the maximum number of BDs in chain | ||
276 | * | ||
277 | * @param p_chain | ||
278 | * @param num | ||
279 | * | ||
280 | * @return u16, number of unusable BDs | ||
281 | */ | ||
282 | static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain) | ||
283 | { | ||
284 | return p_chain->capacity; | ||
285 | } | ||
286 | |||
287 | /** | ||
288 | * @brief qed_chain_recycle_consumed - | ||
289 | * | ||
290 | * Returns an element which was previously consumed; | ||
291 | * Increments producers so they could be written to FW. | ||
292 | * | ||
293 | * @param p_chain | ||
294 | */ | ||
295 | static inline void | ||
296 | qed_chain_recycle_consumed(struct qed_chain *p_chain) | ||
297 | { | ||
298 | test_ans_skip(p_chain, prod_idx); | ||
299 | p_chain->prod_idx++; | ||
300 | } | ||
301 | |||
302 | /** | ||
303 | * @brief qed_chain_consume - | ||
304 | * | ||
305 | * A Chain in which the driver utilizes data written by a different source | ||
306 | * (i.e., FW) should use this to access passed buffers. | ||
307 | * | ||
308 | * @param p_chain | ||
309 | * | ||
310 | * @return void*, a pointer to the next buffer written | ||
311 | */ | ||
312 | static inline void *qed_chain_consume(struct qed_chain *p_chain) | ||
313 | { | ||
314 | void *ret = NULL; | ||
315 | |||
316 | if ((p_chain->cons_idx & p_chain->elem_per_page_mask) == | ||
317 | p_chain->next_page_mask) { | ||
318 | qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, | ||
319 | &p_chain->cons_idx, | ||
320 | &p_chain->pbl.cons_page_idx); | ||
321 | } | ||
322 | |||
323 | ret = p_chain->p_cons_elem; | ||
324 | p_chain->cons_idx++; | ||
325 | p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) + | ||
326 | p_chain->elem_size); | ||
327 | |||
328 | return ret; | ||
329 | } | ||
330 | |||
331 | /** | ||
332 | * @brief qed_chain_reset - Resets the chain to its start state | ||
333 | * | ||
334 | * @param p_chain pointer to a previously allocted chain | ||
335 | */ | ||
336 | static inline void qed_chain_reset(struct qed_chain *p_chain) | ||
337 | { | ||
338 | int i; | ||
339 | |||
340 | p_chain->prod_idx = 0; | ||
341 | p_chain->cons_idx = 0; | ||
342 | p_chain->p_cons_elem = p_chain->p_virt_addr; | ||
343 | p_chain->p_prod_elem = p_chain->p_virt_addr; | ||
344 | |||
345 | if (p_chain->mode == QED_CHAIN_MODE_PBL) { | ||
346 | p_chain->pbl.prod_page_idx = p_chain->page_cnt - 1; | ||
347 | p_chain->pbl.cons_page_idx = p_chain->page_cnt - 1; | ||
348 | } | ||
349 | |||
350 | switch (p_chain->intended_use) { | ||
351 | case QED_CHAIN_USE_TO_CONSUME_PRODUCE: | ||
352 | case QED_CHAIN_USE_TO_PRODUCE: | ||
353 | /* Do nothing */ | ||
354 | break; | ||
355 | |||
356 | case QED_CHAIN_USE_TO_CONSUME: | ||
357 | /* produce empty elements */ | ||
358 | for (i = 0; i < p_chain->capacity; i++) | ||
359 | qed_chain_recycle_consumed(p_chain); | ||
360 | break; | ||
361 | } | ||
362 | } | ||
363 | |||
364 | /** | ||
365 | * @brief qed_chain_init - Initalizes a basic chain struct | ||
366 | * | ||
367 | * @param p_chain | ||
368 | * @param p_virt_addr | ||
369 | * @param p_phys_addr physical address of allocated buffer's beginning | ||
370 | * @param page_cnt number of pages in the allocated buffer | ||
371 | * @param elem_size size of each element in the chain | ||
372 | * @param intended_use | ||
373 | * @param mode | ||
374 | */ | ||
375 | static inline void qed_chain_init(struct qed_chain *p_chain, | ||
376 | void *p_virt_addr, | ||
377 | dma_addr_t p_phys_addr, | ||
378 | u16 page_cnt, | ||
379 | u8 elem_size, | ||
380 | enum qed_chain_use_mode intended_use, | ||
381 | enum qed_chain_mode mode) | ||
382 | { | ||
383 | /* chain fixed parameters */ | ||
384 | p_chain->p_virt_addr = p_virt_addr; | ||
385 | p_chain->p_phys_addr = p_phys_addr; | ||
386 | p_chain->elem_size = elem_size; | ||
387 | p_chain->page_cnt = page_cnt; | ||
388 | p_chain->mode = mode; | ||
389 | |||
390 | p_chain->intended_use = intended_use; | ||
391 | p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); | ||
392 | p_chain->usable_per_page = | ||
393 | USABLE_ELEMS_PER_PAGE(elem_size, mode); | ||
394 | p_chain->capacity = p_chain->usable_per_page * page_cnt; | ||
395 | p_chain->size = p_chain->elem_per_page * page_cnt; | ||
396 | p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; | ||
397 | |||
398 | p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); | ||
399 | |||
400 | p_chain->next_page_mask = (p_chain->usable_per_page & | ||
401 | p_chain->elem_per_page_mask); | ||
402 | |||
403 | if (mode == QED_CHAIN_MODE_NEXT_PTR) { | ||
404 | struct qed_chain_next *p_next; | ||
405 | u16 i; | ||
406 | |||
407 | for (i = 0; i < page_cnt - 1; i++) { | ||
408 | /* Increment mem_phy to the next page. */ | ||
409 | p_phys_addr += QED_CHAIN_PAGE_SIZE; | ||
410 | |||
411 | /* Initialize the physical address of the next page. */ | ||
412 | p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + | ||
413 | elem_size * | ||
414 | p_chain-> | ||
415 | usable_per_page); | ||
416 | |||
417 | p_next->next_phys.lo = DMA_LO_LE(p_phys_addr); | ||
418 | p_next->next_phys.hi = DMA_HI_LE(p_phys_addr); | ||
419 | |||
420 | /* Initialize the virtual address of the next page. */ | ||
421 | p_next->next_virt = (void *)((u8 *)p_virt_addr + | ||
422 | QED_CHAIN_PAGE_SIZE); | ||
423 | |||
424 | /* Move to the next page. */ | ||
425 | p_virt_addr = p_next->next_virt; | ||
426 | } | ||
427 | |||
428 | /* Last page's next should point to beginning of the chain */ | ||
429 | p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + | ||
430 | elem_size * | ||
431 | p_chain->usable_per_page); | ||
432 | |||
433 | p_next->next_phys.lo = DMA_LO_LE(p_chain->p_phys_addr); | ||
434 | p_next->next_phys.hi = DMA_HI_LE(p_chain->p_phys_addr); | ||
435 | p_next->next_virt = p_chain->p_virt_addr; | ||
436 | } | ||
437 | qed_chain_reset(p_chain); | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * @brief qed_chain_pbl_init - Initalizes a basic pbl chain | ||
442 | * struct | ||
443 | * @param p_chain | ||
444 | * @param p_virt_addr virtual address of allocated buffer's beginning | ||
445 | * @param p_phys_addr physical address of allocated buffer's beginning | ||
446 | * @param page_cnt number of pages in the allocated buffer | ||
447 | * @param elem_size size of each element in the chain | ||
448 | * @param use_mode | ||
449 | * @param p_phys_pbl pointer to a pre-allocated side table | ||
450 | * which will hold physical page addresses. | ||
451 | * @param p_virt_pbl pointer to a pre allocated side table | ||
452 | * which will hold virtual page addresses. | ||
453 | */ | ||
454 | static inline void | ||
455 | qed_chain_pbl_init(struct qed_chain *p_chain, | ||
456 | void *p_virt_addr, | ||
457 | dma_addr_t p_phys_addr, | ||
458 | u16 page_cnt, | ||
459 | u8 elem_size, | ||
460 | enum qed_chain_use_mode use_mode, | ||
461 | dma_addr_t p_phys_pbl, | ||
462 | dma_addr_t *p_virt_pbl) | ||
463 | { | ||
464 | dma_addr_t *p_pbl_dma = p_virt_pbl; | ||
465 | int i; | ||
466 | |||
467 | qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt, | ||
468 | elem_size, use_mode, QED_CHAIN_MODE_PBL); | ||
469 | |||
470 | p_chain->pbl.p_phys_table = p_phys_pbl; | ||
471 | p_chain->pbl.p_virt_table = p_virt_pbl; | ||
472 | |||
473 | /* Fill the PBL with physical addresses*/ | ||
474 | for (i = 0; i < page_cnt; i++) { | ||
475 | *p_pbl_dma = p_phys_addr; | ||
476 | p_phys_addr += QED_CHAIN_PAGE_SIZE; | ||
477 | p_pbl_dma++; | ||
478 | } | ||
479 | } | ||
480 | |||
481 | /** | ||
482 | * @brief qed_chain_set_prod - sets the prod to the given | ||
483 | * value | ||
484 | * | ||
485 | * @param prod_idx | ||
486 | * @param p_prod_elem | ||
487 | */ | ||
488 | static inline void qed_chain_set_prod(struct qed_chain *p_chain, | ||
489 | u16 prod_idx, | ||
490 | void *p_prod_elem) | ||
491 | { | ||
492 | p_chain->prod_idx = prod_idx; | ||
493 | p_chain->p_prod_elem = p_prod_elem; | ||
494 | } | ||
495 | |||
496 | /** | ||
497 | * @brief qed_chain_get_elem - | ||
498 | * | ||
499 | * get a pointer to an element represented by absolute idx | ||
500 | * | ||
501 | * @param p_chain | ||
502 | * @assumption p_chain->size is a power of 2 | ||
503 | * | ||
504 | * @return void*, a pointer to next element | ||
505 | */ | ||
506 | static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain, | ||
507 | u16 idx) | ||
508 | { | ||
509 | void *ret = NULL; | ||
510 | |||
511 | if (idx >= p_chain->size) | ||
512 | return NULL; | ||
513 | |||
514 | ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx; | ||
515 | |||
516 | return ret; | ||
517 | } | ||
518 | |||
519 | /** | ||
520 | * @brief qed_chain_sge_inc_cons_prod | ||
521 | * | ||
522 | * for sge chains, producer isn't increased serially, the ring | ||
523 | * is expected to be full at all times. Once elements are | ||
524 | * consumed, they are immediately produced. | ||
525 | * | ||
526 | * @param p_chain | ||
527 | * @param cnt | ||
528 | * | ||
529 | * @return inline void | ||
530 | */ | ||
531 | static inline void | ||
532 | qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain, | ||
533 | u16 cnt) | ||
534 | { | ||
535 | p_chain->prod_idx += cnt; | ||
536 | p_chain->cons_idx += cnt; | ||
537 | } | ||
538 | |||
539 | #endif | ||
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h new file mode 100644 index 000000000000..81ab178e31c1 --- /dev/null +++ b/include/linux/qed/qed_eth_if.h | |||
@@ -0,0 +1,165 @@ | |||
1 | /* QLogic qed NIC Driver | ||
2 | * Copyright (c) 2015 QLogic Corporation | ||
3 | * | ||
4 | * This software is available under the terms of the GNU General Public License | ||
5 | * (GPL) Version 2, available from the file COPYING in the main directory of | ||
6 | * this source tree. | ||
7 | */ | ||
8 | |||
9 | #ifndef _QED_ETH_IF_H | ||
10 | #define _QED_ETH_IF_H | ||
11 | |||
12 | #include <linux/list.h> | ||
13 | #include <linux/if_link.h> | ||
14 | #include <linux/qed/eth_common.h> | ||
15 | #include <linux/qed/qed_if.h> | ||
16 | |||
17 | struct qed_dev_eth_info { | ||
18 | struct qed_dev_info common; | ||
19 | |||
20 | u8 num_queues; | ||
21 | u8 num_tc; | ||
22 | |||
23 | u8 port_mac[ETH_ALEN]; | ||
24 | u8 num_vlan_filters; | ||
25 | }; | ||
26 | |||
27 | struct qed_update_vport_rss_params { | ||
28 | u16 rss_ind_table[128]; | ||
29 | u32 rss_key[10]; | ||
30 | }; | ||
31 | |||
32 | struct qed_update_vport_params { | ||
33 | u8 vport_id; | ||
34 | u8 update_vport_active_flg; | ||
35 | u8 vport_active_flg; | ||
36 | u8 update_rss_flg; | ||
37 | struct qed_update_vport_rss_params rss_params; | ||
38 | }; | ||
39 | |||
40 | struct qed_stop_rxq_params { | ||
41 | u8 rss_id; | ||
42 | u8 rx_queue_id; | ||
43 | u8 vport_id; | ||
44 | bool eq_completion_only; | ||
45 | }; | ||
46 | |||
47 | struct qed_stop_txq_params { | ||
48 | u8 rss_id; | ||
49 | u8 tx_queue_id; | ||
50 | }; | ||
51 | |||
52 | enum qed_filter_rx_mode_type { | ||
53 | QED_FILTER_RX_MODE_TYPE_REGULAR, | ||
54 | QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC, | ||
55 | QED_FILTER_RX_MODE_TYPE_PROMISC, | ||
56 | }; | ||
57 | |||
58 | enum qed_filter_xcast_params_type { | ||
59 | QED_FILTER_XCAST_TYPE_ADD, | ||
60 | QED_FILTER_XCAST_TYPE_DEL, | ||
61 | QED_FILTER_XCAST_TYPE_REPLACE, | ||
62 | }; | ||
63 | |||
64 | struct qed_filter_ucast_params { | ||
65 | enum qed_filter_xcast_params_type type; | ||
66 | u8 vlan_valid; | ||
67 | u16 vlan; | ||
68 | u8 mac_valid; | ||
69 | unsigned char mac[ETH_ALEN]; | ||
70 | }; | ||
71 | |||
72 | struct qed_filter_mcast_params { | ||
73 | enum qed_filter_xcast_params_type type; | ||
74 | u8 num; | ||
75 | unsigned char mac[64][ETH_ALEN]; | ||
76 | }; | ||
77 | |||
78 | union qed_filter_type_params { | ||
79 | enum qed_filter_rx_mode_type accept_flags; | ||
80 | struct qed_filter_ucast_params ucast; | ||
81 | struct qed_filter_mcast_params mcast; | ||
82 | }; | ||
83 | |||
84 | enum qed_filter_type { | ||
85 | QED_FILTER_TYPE_UCAST, | ||
86 | QED_FILTER_TYPE_MCAST, | ||
87 | QED_FILTER_TYPE_RX_MODE, | ||
88 | QED_MAX_FILTER_TYPES, | ||
89 | }; | ||
90 | |||
91 | struct qed_filter_params { | ||
92 | enum qed_filter_type type; | ||
93 | union qed_filter_type_params filter; | ||
94 | }; | ||
95 | |||
96 | struct qed_queue_start_common_params { | ||
97 | u8 rss_id; | ||
98 | u8 queue_id; | ||
99 | u8 vport_id; | ||
100 | u16 sb; | ||
101 | u16 sb_idx; | ||
102 | }; | ||
103 | |||
104 | struct qed_eth_cb_ops { | ||
105 | struct qed_common_cb_ops common; | ||
106 | }; | ||
107 | |||
108 | struct qed_eth_ops { | ||
109 | const struct qed_common_ops *common; | ||
110 | |||
111 | int (*fill_dev_info)(struct qed_dev *cdev, | ||
112 | struct qed_dev_eth_info *info); | ||
113 | |||
114 | void (*register_ops)(struct qed_dev *cdev, | ||
115 | struct qed_eth_cb_ops *ops, | ||
116 | void *cookie); | ||
117 | |||
118 | int (*vport_start)(struct qed_dev *cdev, | ||
119 | u8 vport_id, u16 mtu, | ||
120 | u8 drop_ttl0_flg, | ||
121 | u8 inner_vlan_removal_en_flg); | ||
122 | |||
123 | int (*vport_stop)(struct qed_dev *cdev, | ||
124 | u8 vport_id); | ||
125 | |||
126 | int (*vport_update)(struct qed_dev *cdev, | ||
127 | struct qed_update_vport_params *params); | ||
128 | |||
129 | int (*q_rx_start)(struct qed_dev *cdev, | ||
130 | struct qed_queue_start_common_params *params, | ||
131 | u16 bd_max_bytes, | ||
132 | dma_addr_t bd_chain_phys_addr, | ||
133 | dma_addr_t cqe_pbl_addr, | ||
134 | u16 cqe_pbl_size, | ||
135 | void __iomem **pp_prod); | ||
136 | |||
137 | int (*q_rx_stop)(struct qed_dev *cdev, | ||
138 | struct qed_stop_rxq_params *params); | ||
139 | |||
140 | int (*q_tx_start)(struct qed_dev *cdev, | ||
141 | struct qed_queue_start_common_params *params, | ||
142 | dma_addr_t pbl_addr, | ||
143 | u16 pbl_size, | ||
144 | void __iomem **pp_doorbell); | ||
145 | |||
146 | int (*q_tx_stop)(struct qed_dev *cdev, | ||
147 | struct qed_stop_txq_params *params); | ||
148 | |||
149 | int (*filter_config)(struct qed_dev *cdev, | ||
150 | struct qed_filter_params *params); | ||
151 | |||
152 | int (*fastpath_stop)(struct qed_dev *cdev); | ||
153 | |||
154 | int (*eth_cqe_completion)(struct qed_dev *cdev, | ||
155 | u8 rss_id, | ||
156 | struct eth_slow_path_rx_cqe *cqe); | ||
157 | |||
158 | void (*get_vport_stats)(struct qed_dev *cdev, | ||
159 | struct qed_eth_stats *stats); | ||
160 | }; | ||
161 | |||
162 | const struct qed_eth_ops *qed_get_eth_ops(u32 version); | ||
163 | void qed_put_eth_ops(void); | ||
164 | |||
165 | #endif | ||
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h new file mode 100644 index 000000000000..dc9a1353f971 --- /dev/null +++ b/include/linux/qed/qed_if.h | |||
@@ -0,0 +1,498 @@ | |||
1 | /* QLogic qed NIC Driver | ||
2 | * | ||
3 | * Copyright (c) 2015 QLogic Corporation | ||
4 | * | ||
5 | * This software is available under the terms of the GNU General Public License | ||
6 | * (GPL) Version 2, available from the file COPYING in the main directory of | ||
7 | * this source tree. | ||
8 | */ | ||
9 | |||
10 | #ifndef _QED_IF_H | ||
11 | #define _QED_IF_H | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/netdevice.h> | ||
16 | #include <linux/pci.h> | ||
17 | #include <linux/skbuff.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <asm/byteorder.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/compiler.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/list.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/qed/common_hsi.h> | ||
26 | #include <linux/qed/qed_chain.h> | ||
27 | |||
28 | #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ | ||
29 | (void __iomem *)(reg_addr)) | ||
30 | |||
31 | #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) | ||
32 | |||
33 | #define QED_COALESCE_MAX 0xFF | ||
34 | |||
35 | /* forward */ | ||
36 | struct qed_dev; | ||
37 | |||
38 | struct qed_eth_pf_params { | ||
39 | /* The following parameters are used during HW-init | ||
40 | * and these parameters need to be passed as arguments | ||
41 | * to update_pf_params routine invoked before slowpath start | ||
42 | */ | ||
43 | u16 num_cons; | ||
44 | }; | ||
45 | |||
46 | struct qed_pf_params { | ||
47 | struct qed_eth_pf_params eth_pf_params; | ||
48 | }; | ||
49 | |||
50 | enum qed_int_mode { | ||
51 | QED_INT_MODE_INTA, | ||
52 | QED_INT_MODE_MSIX, | ||
53 | QED_INT_MODE_MSI, | ||
54 | QED_INT_MODE_POLL, | ||
55 | }; | ||
56 | |||
57 | struct qed_sb_info { | ||
58 | struct status_block *sb_virt; | ||
59 | dma_addr_t sb_phys; | ||
60 | u32 sb_ack; /* Last given ack */ | ||
61 | u16 igu_sb_id; | ||
62 | void __iomem *igu_addr; | ||
63 | u8 flags; | ||
64 | #define QED_SB_INFO_INIT 0x1 | ||
65 | #define QED_SB_INFO_SETUP 0x2 | ||
66 | |||
67 | struct qed_dev *cdev; | ||
68 | }; | ||
69 | |||
70 | struct qed_dev_info { | ||
71 | unsigned long pci_mem_start; | ||
72 | unsigned long pci_mem_end; | ||
73 | unsigned int pci_irq; | ||
74 | u8 num_hwfns; | ||
75 | |||
76 | u8 hw_mac[ETH_ALEN]; | ||
77 | bool is_mf; | ||
78 | |||
79 | /* FW version */ | ||
80 | u16 fw_major; | ||
81 | u16 fw_minor; | ||
82 | u16 fw_rev; | ||
83 | u16 fw_eng; | ||
84 | |||
85 | /* MFW version */ | ||
86 | u32 mfw_rev; | ||
87 | |||
88 | u32 flash_size; | ||
89 | u8 mf_mode; | ||
90 | }; | ||
91 | |||
92 | enum qed_sb_type { | ||
93 | QED_SB_TYPE_L2_QUEUE, | ||
94 | }; | ||
95 | |||
96 | enum qed_protocol { | ||
97 | QED_PROTOCOL_ETH, | ||
98 | }; | ||
99 | |||
100 | struct qed_link_params { | ||
101 | bool link_up; | ||
102 | |||
103 | #define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0) | ||
104 | #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) | ||
105 | #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) | ||
106 | #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) | ||
107 | u32 override_flags; | ||
108 | bool autoneg; | ||
109 | u32 adv_speeds; | ||
110 | u32 forced_speed; | ||
111 | #define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) | ||
112 | #define QED_LINK_PAUSE_RX_ENABLE BIT(1) | ||
113 | #define QED_LINK_PAUSE_TX_ENABLE BIT(2) | ||
114 | u32 pause_config; | ||
115 | }; | ||
116 | |||
117 | struct qed_link_output { | ||
118 | bool link_up; | ||
119 | |||
120 | u32 supported_caps; /* In SUPPORTED defs */ | ||
121 | u32 advertised_caps; /* In ADVERTISED defs */ | ||
122 | u32 lp_caps; /* In ADVERTISED defs */ | ||
123 | u32 speed; /* In Mb/s */ | ||
124 | u8 duplex; /* In DUPLEX defs */ | ||
125 | u8 port; /* In PORT defs */ | ||
126 | bool autoneg; | ||
127 | u32 pause_config; | ||
128 | }; | ||
129 | |||
130 | #define QED_DRV_VER_STR_SIZE 12 | ||
131 | struct qed_slowpath_params { | ||
132 | u32 int_mode; | ||
133 | u8 drv_major; | ||
134 | u8 drv_minor; | ||
135 | u8 drv_rev; | ||
136 | u8 drv_eng; | ||
137 | u8 name[QED_DRV_VER_STR_SIZE]; | ||
138 | }; | ||
139 | |||
140 | #define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */ | ||
141 | |||
142 | struct qed_int_info { | ||
143 | struct msix_entry *msix; | ||
144 | u8 msix_cnt; | ||
145 | |||
146 | /* This should be updated by the protocol driver */ | ||
147 | u8 used_cnt; | ||
148 | }; | ||
149 | |||
150 | struct qed_common_cb_ops { | ||
151 | void (*link_update)(void *dev, | ||
152 | struct qed_link_output *link); | ||
153 | }; | ||
154 | |||
155 | struct qed_common_ops { | ||
156 | struct qed_dev* (*probe)(struct pci_dev *dev, | ||
157 | enum qed_protocol protocol, | ||
158 | u32 dp_module, u8 dp_level); | ||
159 | |||
160 | void (*remove)(struct qed_dev *cdev); | ||
161 | |||
162 | int (*set_power_state)(struct qed_dev *cdev, | ||
163 | pci_power_t state); | ||
164 | |||
165 | void (*set_id)(struct qed_dev *cdev, | ||
166 | char name[], | ||
167 | char ver_str[]); | ||
168 | |||
169 | /* Client drivers need to make this call before slowpath_start. | ||
170 | * PF params required for the call before slowpath_start is | ||
171 | * documented within the qed_pf_params structure definition. | ||
172 | */ | ||
173 | void (*update_pf_params)(struct qed_dev *cdev, | ||
174 | struct qed_pf_params *params); | ||
175 | int (*slowpath_start)(struct qed_dev *cdev, | ||
176 | struct qed_slowpath_params *params); | ||
177 | |||
178 | int (*slowpath_stop)(struct qed_dev *cdev); | ||
179 | |||
180 | /* Requests to use `cnt' interrupts for fastpath. | ||
181 | * upon success, returns number of interrupts allocated for fastpath. | ||
182 | */ | ||
183 | int (*set_fp_int)(struct qed_dev *cdev, | ||
184 | u16 cnt); | ||
185 | |||
186 | /* Fills `info' with pointers required for utilizing interrupts */ | ||
187 | int (*get_fp_int)(struct qed_dev *cdev, | ||
188 | struct qed_int_info *info); | ||
189 | |||
190 | u32 (*sb_init)(struct qed_dev *cdev, | ||
191 | struct qed_sb_info *sb_info, | ||
192 | void *sb_virt_addr, | ||
193 | dma_addr_t sb_phy_addr, | ||
194 | u16 sb_id, | ||
195 | enum qed_sb_type type); | ||
196 | |||
197 | u32 (*sb_release)(struct qed_dev *cdev, | ||
198 | struct qed_sb_info *sb_info, | ||
199 | u16 sb_id); | ||
200 | |||
201 | void (*simd_handler_config)(struct qed_dev *cdev, | ||
202 | void *token, | ||
203 | int index, | ||
204 | void (*handler)(void *)); | ||
205 | |||
206 | void (*simd_handler_clean)(struct qed_dev *cdev, | ||
207 | int index); | ||
208 | /** | ||
209 | * @brief set_link - set links according to params | ||
210 | * | ||
211 | * @param cdev | ||
212 | * @param params - values used to override the default link configuration | ||
213 | * | ||
214 | * @return 0 on success, error otherwise. | ||
215 | */ | ||
216 | int (*set_link)(struct qed_dev *cdev, | ||
217 | struct qed_link_params *params); | ||
218 | |||
219 | /** | ||
220 | * @brief get_link - returns the current link state. | ||
221 | * | ||
222 | * @param cdev | ||
223 | * @param if_link - structure to be filled with current link configuration. | ||
224 | */ | ||
225 | void (*get_link)(struct qed_dev *cdev, | ||
226 | struct qed_link_output *if_link); | ||
227 | |||
228 | /** | ||
229 | * @brief - drains chip in case Tx completions fail to arrive due to pause. | ||
230 | * | ||
231 | * @param cdev | ||
232 | */ | ||
233 | int (*drain)(struct qed_dev *cdev); | ||
234 | |||
235 | /** | ||
236 | * @brief update_msglvl - update module debug level | ||
237 | * | ||
238 | * @param cdev | ||
239 | * @param dp_module | ||
240 | * @param dp_level | ||
241 | */ | ||
242 | void (*update_msglvl)(struct qed_dev *cdev, | ||
243 | u32 dp_module, | ||
244 | u8 dp_level); | ||
245 | |||
246 | int (*chain_alloc)(struct qed_dev *cdev, | ||
247 | enum qed_chain_use_mode intended_use, | ||
248 | enum qed_chain_mode mode, | ||
249 | u16 num_elems, | ||
250 | size_t elem_size, | ||
251 | struct qed_chain *p_chain); | ||
252 | |||
253 | void (*chain_free)(struct qed_dev *cdev, | ||
254 | struct qed_chain *p_chain); | ||
255 | }; | ||
256 | |||
257 | /** | ||
258 | * @brief qed_get_protocol_version | ||
259 | * | ||
260 | * @param protocol | ||
261 | * | ||
262 | * @return version supported by qed for given protocol driver | ||
263 | */ | ||
264 | u32 qed_get_protocol_version(enum qed_protocol protocol); | ||
265 | |||
266 | #define MASK_FIELD(_name, _value) \ | ||
267 | ((_value) &= (_name ## _MASK)) | ||
268 | |||
269 | #define FIELD_VALUE(_name, _value) \ | ||
270 | ((_value & _name ## _MASK) << _name ## _SHIFT) | ||
271 | |||
272 | #define SET_FIELD(value, name, flag) \ | ||
273 | do { \ | ||
274 | (value) &= ~(name ## _MASK << name ## _SHIFT); \ | ||
275 | (value) |= (((u64)flag) << (name ## _SHIFT)); \ | ||
276 | } while (0) | ||
277 | |||
278 | #define GET_FIELD(value, name) \ | ||
279 | (((value) >> (name ## _SHIFT)) & name ## _MASK) | ||
280 | |||
281 | /* Debug print definitions */ | ||
282 | #define DP_ERR(cdev, fmt, ...) \ | ||
283 | pr_err("[%s:%d(%s)]" fmt, \ | ||
284 | __func__, __LINE__, \ | ||
285 | DP_NAME(cdev) ? DP_NAME(cdev) : "", \ | ||
286 | ## __VA_ARGS__) \ | ||
287 | |||
288 | #define DP_NOTICE(cdev, fmt, ...) \ | ||
289 | do { \ | ||
290 | if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \ | ||
291 | pr_notice("[%s:%d(%s)]" fmt, \ | ||
292 | __func__, __LINE__, \ | ||
293 | DP_NAME(cdev) ? DP_NAME(cdev) : "", \ | ||
294 | ## __VA_ARGS__); \ | ||
295 | \ | ||
296 | } \ | ||
297 | } while (0) | ||
298 | |||
299 | #define DP_INFO(cdev, fmt, ...) \ | ||
300 | do { \ | ||
301 | if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \ | ||
302 | pr_notice("[%s:%d(%s)]" fmt, \ | ||
303 | __func__, __LINE__, \ | ||
304 | DP_NAME(cdev) ? DP_NAME(cdev) : "", \ | ||
305 | ## __VA_ARGS__); \ | ||
306 | } \ | ||
307 | } while (0) | ||
308 | |||
309 | #define DP_VERBOSE(cdev, module, fmt, ...) \ | ||
310 | do { \ | ||
311 | if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \ | ||
312 | ((cdev)->dp_module & module))) { \ | ||
313 | pr_notice("[%s:%d(%s)]" fmt, \ | ||
314 | __func__, __LINE__, \ | ||
315 | DP_NAME(cdev) ? DP_NAME(cdev) : "", \ | ||
316 | ## __VA_ARGS__); \ | ||
317 | } \ | ||
318 | } while (0) | ||
319 | |||
320 | enum DP_LEVEL { | ||
321 | QED_LEVEL_VERBOSE = 0x0, | ||
322 | QED_LEVEL_INFO = 0x1, | ||
323 | QED_LEVEL_NOTICE = 0x2, | ||
324 | QED_LEVEL_ERR = 0x3, | ||
325 | }; | ||
326 | |||
327 | #define QED_LOG_LEVEL_SHIFT (30) | ||
328 | #define QED_LOG_VERBOSE_MASK (0x3fffffff) | ||
329 | #define QED_LOG_INFO_MASK (0x40000000) | ||
330 | #define QED_LOG_NOTICE_MASK (0x80000000) | ||
331 | |||
332 | enum DP_MODULE { | ||
333 | QED_MSG_SPQ = 0x10000, | ||
334 | QED_MSG_STATS = 0x20000, | ||
335 | QED_MSG_DCB = 0x40000, | ||
336 | QED_MSG_IOV = 0x80000, | ||
337 | QED_MSG_SP = 0x100000, | ||
338 | QED_MSG_STORAGE = 0x200000, | ||
339 | QED_MSG_CXT = 0x800000, | ||
340 | QED_MSG_ILT = 0x2000000, | ||
341 | QED_MSG_ROCE = 0x4000000, | ||
342 | QED_MSG_DEBUG = 0x8000000, | ||
343 | /* to be added...up to 0x8000000 */ | ||
344 | }; | ||
345 | |||
346 | struct qed_eth_stats { | ||
347 | u64 no_buff_discards; | ||
348 | u64 packet_too_big_discard; | ||
349 | u64 ttl0_discard; | ||
350 | u64 rx_ucast_bytes; | ||
351 | u64 rx_mcast_bytes; | ||
352 | u64 rx_bcast_bytes; | ||
353 | u64 rx_ucast_pkts; | ||
354 | u64 rx_mcast_pkts; | ||
355 | u64 rx_bcast_pkts; | ||
356 | u64 mftag_filter_discards; | ||
357 | u64 mac_filter_discards; | ||
358 | u64 tx_ucast_bytes; | ||
359 | u64 tx_mcast_bytes; | ||
360 | u64 tx_bcast_bytes; | ||
361 | u64 tx_ucast_pkts; | ||
362 | u64 tx_mcast_pkts; | ||
363 | u64 tx_bcast_pkts; | ||
364 | u64 tx_err_drop_pkts; | ||
365 | u64 tpa_coalesced_pkts; | ||
366 | u64 tpa_coalesced_events; | ||
367 | u64 tpa_aborts_num; | ||
368 | u64 tpa_not_coalesced_pkts; | ||
369 | u64 tpa_coalesced_bytes; | ||
370 | |||
371 | /* port */ | ||
372 | u64 rx_64_byte_packets; | ||
373 | u64 rx_127_byte_packets; | ||
374 | u64 rx_255_byte_packets; | ||
375 | u64 rx_511_byte_packets; | ||
376 | u64 rx_1023_byte_packets; | ||
377 | u64 rx_1518_byte_packets; | ||
378 | u64 rx_1522_byte_packets; | ||
379 | u64 rx_2047_byte_packets; | ||
380 | u64 rx_4095_byte_packets; | ||
381 | u64 rx_9216_byte_packets; | ||
382 | u64 rx_16383_byte_packets; | ||
383 | u64 rx_crc_errors; | ||
384 | u64 rx_mac_crtl_frames; | ||
385 | u64 rx_pause_frames; | ||
386 | u64 rx_pfc_frames; | ||
387 | u64 rx_align_errors; | ||
388 | u64 rx_carrier_errors; | ||
389 | u64 rx_oversize_packets; | ||
390 | u64 rx_jabbers; | ||
391 | u64 rx_undersize_packets; | ||
392 | u64 rx_fragments; | ||
393 | u64 tx_64_byte_packets; | ||
394 | u64 tx_65_to_127_byte_packets; | ||
395 | u64 tx_128_to_255_byte_packets; | ||
396 | u64 tx_256_to_511_byte_packets; | ||
397 | u64 tx_512_to_1023_byte_packets; | ||
398 | u64 tx_1024_to_1518_byte_packets; | ||
399 | u64 tx_1519_to_2047_byte_packets; | ||
400 | u64 tx_2048_to_4095_byte_packets; | ||
401 | u64 tx_4096_to_9216_byte_packets; | ||
402 | u64 tx_9217_to_16383_byte_packets; | ||
403 | u64 tx_pause_frames; | ||
404 | u64 tx_pfc_frames; | ||
405 | u64 tx_lpi_entry_count; | ||
406 | u64 tx_total_collisions; | ||
407 | u64 brb_truncates; | ||
408 | u64 brb_discards; | ||
409 | u64 rx_mac_bytes; | ||
410 | u64 rx_mac_uc_packets; | ||
411 | u64 rx_mac_mc_packets; | ||
412 | u64 rx_mac_bc_packets; | ||
413 | u64 rx_mac_frames_ok; | ||
414 | u64 tx_mac_bytes; | ||
415 | u64 tx_mac_uc_packets; | ||
416 | u64 tx_mac_mc_packets; | ||
417 | u64 tx_mac_bc_packets; | ||
418 | u64 tx_mac_ctrl_frames; | ||
419 | }; | ||
420 | |||
421 | #define QED_SB_IDX 0x0002 | ||
422 | |||
423 | #define RX_PI 0 | ||
424 | #define TX_PI(tc) (RX_PI + 1 + tc) | ||
425 | |||
426 | static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) | ||
427 | { | ||
428 | u32 prod = 0; | ||
429 | u16 rc = 0; | ||
430 | |||
431 | prod = le32_to_cpu(sb_info->sb_virt->prod_index) & | ||
432 | STATUS_BLOCK_PROD_INDEX_MASK; | ||
433 | if (sb_info->sb_ack != prod) { | ||
434 | sb_info->sb_ack = prod; | ||
435 | rc |= QED_SB_IDX; | ||
436 | } | ||
437 | |||
438 | /* Let SB update */ | ||
439 | mmiowb(); | ||
440 | return rc; | ||
441 | } | ||
442 | |||
443 | /** | ||
444 | * | ||
445 | * @brief This function creates an update command for interrupts that is | ||
446 | * written to the IGU. | ||
447 | * | ||
448 | * @param sb_info - This is the structure allocated and | ||
449 | * initialized per status block. Assumption is | ||
450 | * that it was initialized using qed_sb_init | ||
451 | * @param int_cmd - Enable/Disable/Nop | ||
452 | * @param upd_flg - whether igu consumer should be | ||
453 | * updated. | ||
454 | * | ||
455 | * @return inline void | ||
456 | */ | ||
457 | static inline void qed_sb_ack(struct qed_sb_info *sb_info, | ||
458 | enum igu_int_cmd int_cmd, | ||
459 | u8 upd_flg) | ||
460 | { | ||
461 | struct igu_prod_cons_update igu_ack = { 0 }; | ||
462 | |||
463 | igu_ack.sb_id_and_flags = | ||
464 | ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | | ||
465 | (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | | ||
466 | (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | | ||
467 | (IGU_SEG_ACCESS_REG << | ||
468 | IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); | ||
469 | |||
470 | DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags); | ||
471 | |||
472 | /* Both segments (interrupts & acks) are written to same place address; | ||
473 | * Need to guarantee all commands will be received (in-order) by HW. | ||
474 | */ | ||
475 | mmiowb(); | ||
476 | barrier(); | ||
477 | } | ||
478 | |||
479 | static inline void __internal_ram_wr(void *p_hwfn, | ||
480 | void __iomem *addr, | ||
481 | int size, | ||
482 | u32 *data) | ||
483 | |||
484 | { | ||
485 | unsigned int i; | ||
486 | |||
487 | for (i = 0; i < size / sizeof(*data); i++) | ||
488 | DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]); | ||
489 | } | ||
490 | |||
491 | static inline void internal_ram_wr(void __iomem *addr, | ||
492 | int size, | ||
493 | u32 *data) | ||
494 | { | ||
495 | __internal_ram_wr(NULL, addr, size, data); | ||
496 | } | ||
497 | |||
498 | #endif | ||
diff --git a/include/linux/random.h b/include/linux/random.h index e651874df2c9..a75840c1aa71 100644 --- a/include/linux/random.h +++ b/include/linux/random.h | |||
@@ -7,6 +7,8 @@ | |||
7 | #define _LINUX_RANDOM_H | 7 | #define _LINUX_RANDOM_H |
8 | 8 | ||
9 | #include <linux/list.h> | 9 | #include <linux/list.h> |
10 | #include <linux/once.h> | ||
11 | |||
10 | #include <uapi/linux/random.h> | 12 | #include <uapi/linux/random.h> |
11 | 13 | ||
12 | struct random_ready_callback { | 14 | struct random_ready_callback { |
@@ -45,6 +47,10 @@ struct rnd_state { | |||
45 | 47 | ||
46 | u32 prandom_u32_state(struct rnd_state *state); | 48 | u32 prandom_u32_state(struct rnd_state *state); |
47 | void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); | 49 | void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); |
50 | void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); | ||
51 | |||
52 | #define prandom_init_once(pcpu_state) \ | ||
53 | DO_ONCE(prandom_seed_full_state, (pcpu_state)) | ||
48 | 54 | ||
49 | /** | 55 | /** |
50 | * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) | 56 | * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) |
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index 830c4992088d..a5aa7ae671f4 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h | |||
@@ -101,13 +101,21 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent | |||
101 | }) | 101 | }) |
102 | 102 | ||
103 | /** | 103 | /** |
104 | * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of | 104 | * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of |
105 | * given type safe against removal of rb_node entry | 105 | * given type allowing the backing memory of @pos to be invalidated |
106 | * | 106 | * |
107 | * @pos: the 'type *' to use as a loop cursor. | 107 | * @pos: the 'type *' to use as a loop cursor. |
108 | * @n: another 'type *' to use as temporary storage | 108 | * @n: another 'type *' to use as temporary storage |
109 | * @root: 'rb_root *' of the rbtree. | 109 | * @root: 'rb_root *' of the rbtree. |
110 | * @field: the name of the rb_node field within 'type'. | 110 | * @field: the name of the rb_node field within 'type'. |
111 | * | ||
112 | * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as | ||
113 | * list_for_each_entry_safe() and allows the iteration to continue independent | ||
114 | * of changes to @pos by the body of the loop. | ||
115 | * | ||
116 | * Note, however, that it cannot handle other modifications that re-order the | ||
117 | * rbtree it is iterating over. This includes calling rb_erase() on @pos, as | ||
118 | * rb_erase() may rebalance the tree, causing us to miss some nodes. | ||
111 | */ | 119 | */ |
112 | #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ | 120 | #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ |
113 | for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ | 121 | for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ |
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h new file mode 100644 index 000000000000..a63a33e6196e --- /dev/null +++ b/include/linux/rcu_sync.h | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * RCU-based infrastructure for lightweight reader-writer locking | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, you can access it online at | ||
16 | * http://www.gnu.org/licenses/gpl-2.0.html. | ||
17 | * | ||
18 | * Copyright (c) 2015, Red Hat, Inc. | ||
19 | * | ||
20 | * Author: Oleg Nesterov <oleg@redhat.com> | ||
21 | */ | ||
22 | |||
23 | #ifndef _LINUX_RCU_SYNC_H_ | ||
24 | #define _LINUX_RCU_SYNC_H_ | ||
25 | |||
26 | #include <linux/wait.h> | ||
27 | #include <linux/rcupdate.h> | ||
28 | |||
29 | enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC }; | ||
30 | |||
31 | /* Structure to mediate between updaters and fastpath-using readers. */ | ||
32 | struct rcu_sync { | ||
33 | int gp_state; | ||
34 | int gp_count; | ||
35 | wait_queue_head_t gp_wait; | ||
36 | |||
37 | int cb_state; | ||
38 | struct rcu_head cb_head; | ||
39 | |||
40 | enum rcu_sync_type gp_type; | ||
41 | }; | ||
42 | |||
43 | extern void rcu_sync_lockdep_assert(struct rcu_sync *); | ||
44 | |||
45 | /** | ||
46 | * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? | ||
47 | * @rsp: Pointer to rcu_sync structure to use for synchronization | ||
48 | * | ||
49 | * Returns true if readers are permitted to use their fastpaths. | ||
50 | * Must be invoked within an RCU read-side critical section whose | ||
51 | * flavor matches that of the rcu_sync struture. | ||
52 | */ | ||
53 | static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) | ||
54 | { | ||
55 | #ifdef CONFIG_PROVE_RCU | ||
56 | rcu_sync_lockdep_assert(rsp); | ||
57 | #endif | ||
58 | return !rsp->gp_state; /* GP_IDLE */ | ||
59 | } | ||
60 | |||
61 | extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); | ||
62 | extern void rcu_sync_enter(struct rcu_sync *); | ||
63 | extern void rcu_sync_exit(struct rcu_sync *); | ||
64 | extern void rcu_sync_dtor(struct rcu_sync *); | ||
65 | |||
66 | #define __RCU_SYNC_INITIALIZER(name, type) { \ | ||
67 | .gp_state = 0, \ | ||
68 | .gp_count = 0, \ | ||
69 | .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \ | ||
70 | .cb_state = 0, \ | ||
71 | .gp_type = type, \ | ||
72 | } | ||
73 | |||
74 | #define __DEFINE_RCU_SYNC(name, type) \ | ||
75 | struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type) | ||
76 | |||
77 | #define DEFINE_RCU_SYNC(name) \ | ||
78 | __DEFINE_RCU_SYNC(name, RCU_SYNC) | ||
79 | |||
80 | #define DEFINE_RCU_SCHED_SYNC(name) \ | ||
81 | __DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC) | ||
82 | |||
83 | #define DEFINE_RCU_BH_SYNC(name) \ | ||
84 | __DEFINE_RCU_SYNC(name, RCU_BH_SYNC) | ||
85 | |||
86 | #endif /* _LINUX_RCU_SYNC_H_ */ | ||
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 17c6b1f84a77..5ed540986019 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -247,10 +247,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
247 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | 247 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
248 | */ | 248 | */ |
249 | #define list_entry_rcu(ptr, type, member) \ | 249 | #define list_entry_rcu(ptr, type, member) \ |
250 | ({ \ | 250 | container_of(lockless_dereference(ptr), type, member) |
251 | typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \ | ||
252 | container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ | ||
253 | }) | ||
254 | 251 | ||
255 | /** | 252 | /** |
256 | * Where are list_empty_rcu() and list_first_entry_rcu()? | 253 | * Where are list_empty_rcu() and list_first_entry_rcu()? |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index ff476515f716..a0189ba67fde 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -160,7 +160,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename, | |||
160 | * more than one CPU). | 160 | * more than one CPU). |
161 | */ | 161 | */ |
162 | void call_rcu(struct rcu_head *head, | 162 | void call_rcu(struct rcu_head *head, |
163 | void (*func)(struct rcu_head *head)); | 163 | rcu_callback_t func); |
164 | 164 | ||
165 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | 165 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
166 | 166 | ||
@@ -191,7 +191,7 @@ void call_rcu(struct rcu_head *head, | |||
191 | * memory ordering guarantees. | 191 | * memory ordering guarantees. |
192 | */ | 192 | */ |
193 | void call_rcu_bh(struct rcu_head *head, | 193 | void call_rcu_bh(struct rcu_head *head, |
194 | void (*func)(struct rcu_head *head)); | 194 | rcu_callback_t func); |
195 | 195 | ||
196 | /** | 196 | /** |
197 | * call_rcu_sched() - Queue an RCU for invocation after sched grace period. | 197 | * call_rcu_sched() - Queue an RCU for invocation after sched grace period. |
@@ -213,7 +213,7 @@ void call_rcu_bh(struct rcu_head *head, | |||
213 | * memory ordering guarantees. | 213 | * memory ordering guarantees. |
214 | */ | 214 | */ |
215 | void call_rcu_sched(struct rcu_head *head, | 215 | void call_rcu_sched(struct rcu_head *head, |
216 | void (*func)(struct rcu_head *rcu)); | 216 | rcu_callback_t func); |
217 | 217 | ||
218 | void synchronize_sched(void); | 218 | void synchronize_sched(void); |
219 | 219 | ||
@@ -230,12 +230,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, | |||
230 | struct rcu_synchronize *rs_array); | 230 | struct rcu_synchronize *rs_array); |
231 | 231 | ||
232 | #define _wait_rcu_gp(checktiny, ...) \ | 232 | #define _wait_rcu_gp(checktiny, ...) \ |
233 | do { \ | 233 | do { \ |
234 | call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ | 234 | call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ |
235 | const int __n = ARRAY_SIZE(__crcu_array); \ | 235 | struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ |
236 | struct rcu_synchronize __rs_array[__n]; \ | 236 | __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ |
237 | \ | 237 | __crcu_array, __rs_array); \ |
238 | __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \ | ||
239 | } while (0) | 238 | } while (0) |
240 | 239 | ||
241 | #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) | 240 | #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) |
@@ -275,7 +274,7 @@ do { \ | |||
275 | * See the description of call_rcu() for more detailed information on | 274 | * See the description of call_rcu() for more detailed information on |
276 | * memory ordering guarantees. | 275 | * memory ordering guarantees. |
277 | */ | 276 | */ |
278 | void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head)); | 277 | void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); |
279 | void synchronize_rcu_tasks(void); | 278 | void synchronize_rcu_tasks(void); |
280 | void rcu_barrier_tasks(void); | 279 | void rcu_barrier_tasks(void); |
281 | 280 | ||
@@ -298,12 +297,14 @@ void synchronize_rcu(void); | |||
298 | 297 | ||
299 | static inline void __rcu_read_lock(void) | 298 | static inline void __rcu_read_lock(void) |
300 | { | 299 | { |
301 | preempt_disable(); | 300 | if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) |
301 | preempt_disable(); | ||
302 | } | 302 | } |
303 | 303 | ||
304 | static inline void __rcu_read_unlock(void) | 304 | static inline void __rcu_read_unlock(void) |
305 | { | 305 | { |
306 | preempt_enable(); | 306 | if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) |
307 | preempt_enable(); | ||
307 | } | 308 | } |
308 | 309 | ||
309 | static inline void synchronize_rcu(void) | 310 | static inline void synchronize_rcu(void) |
@@ -536,29 +537,9 @@ static inline int rcu_read_lock_sched_held(void) | |||
536 | 537 | ||
537 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 538 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
538 | 539 | ||
539 | /* Deprecate rcu_lockdep_assert(): Use RCU_LOCKDEP_WARN() instead. */ | ||
540 | static inline void __attribute((deprecated)) deprecate_rcu_lockdep_assert(void) | ||
541 | { | ||
542 | } | ||
543 | |||
544 | #ifdef CONFIG_PROVE_RCU | 540 | #ifdef CONFIG_PROVE_RCU |
545 | 541 | ||
546 | /** | 542 | /** |
547 | * rcu_lockdep_assert - emit lockdep splat if specified condition not met | ||
548 | * @c: condition to check | ||
549 | * @s: informative message | ||
550 | */ | ||
551 | #define rcu_lockdep_assert(c, s) \ | ||
552 | do { \ | ||
553 | static bool __section(.data.unlikely) __warned; \ | ||
554 | deprecate_rcu_lockdep_assert(); \ | ||
555 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ | ||
556 | __warned = true; \ | ||
557 | lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ | ||
558 | } \ | ||
559 | } while (0) | ||
560 | |||
561 | /** | ||
562 | * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met | 543 | * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met |
563 | * @c: condition to check | 544 | * @c: condition to check |
564 | * @s: informative message | 545 | * @s: informative message |
@@ -595,7 +576,6 @@ static inline void rcu_preempt_sleep_check(void) | |||
595 | 576 | ||
596 | #else /* #ifdef CONFIG_PROVE_RCU */ | 577 | #else /* #ifdef CONFIG_PROVE_RCU */ |
597 | 578 | ||
598 | #define rcu_lockdep_assert(c, s) deprecate_rcu_lockdep_assert() | ||
599 | #define RCU_LOCKDEP_WARN(c, s) do { } while (0) | 579 | #define RCU_LOCKDEP_WARN(c, s) do { } while (0) |
600 | #define rcu_sleep_check() do { } while (0) | 580 | #define rcu_sleep_check() do { } while (0) |
601 | 581 | ||
@@ -812,6 +792,28 @@ static inline void rcu_preempt_sleep_check(void) | |||
812 | #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) | 792 | #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) |
813 | 793 | ||
814 | /** | 794 | /** |
795 | * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism | ||
796 | * @p: The pointer to hand off | ||
797 | * | ||
798 | * This is simply an identity function, but it documents where a pointer | ||
799 | * is handed off from RCU to some other synchronization mechanism, for | ||
800 | * example, reference counting or locking. In C11, it would map to | ||
801 | * kill_dependency(). It could be used as follows: | ||
802 | * | ||
803 | * rcu_read_lock(); | ||
804 | * p = rcu_dereference(gp); | ||
805 | * long_lived = is_long_lived(p); | ||
806 | * if (long_lived) { | ||
807 | * if (!atomic_inc_not_zero(p->refcnt)) | ||
808 | * long_lived = false; | ||
809 | * else | ||
810 | * p = rcu_pointer_handoff(p); | ||
811 | * } | ||
812 | * rcu_read_unlock(); | ||
813 | */ | ||
814 | #define rcu_pointer_handoff(p) (p) | ||
815 | |||
816 | /** | ||
815 | * rcu_read_lock() - mark the beginning of an RCU read-side critical section | 817 | * rcu_read_lock() - mark the beginning of an RCU read-side critical section |
816 | * | 818 | * |
817 | * When synchronize_rcu() is invoked on one CPU while other CPUs | 819 | * When synchronize_rcu() is invoked on one CPU while other CPUs |
@@ -1066,7 +1068,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
1066 | #define __kfree_rcu(head, offset) \ | 1068 | #define __kfree_rcu(head, offset) \ |
1067 | do { \ | 1069 | do { \ |
1068 | BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ | 1070 | BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ |
1069 | kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \ | 1071 | kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ |
1070 | } while (0) | 1072 | } while (0) |
1071 | 1073 | ||
1072 | /** | 1074 | /** |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index ff968b7af3a4..4c1aaf9cce7b 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -83,7 +83,7 @@ static inline void synchronize_sched_expedited(void) | |||
83 | } | 83 | } |
84 | 84 | ||
85 | static inline void kfree_call_rcu(struct rcu_head *head, | 85 | static inline void kfree_call_rcu(struct rcu_head *head, |
86 | void (*func)(struct rcu_head *rcu)) | 86 | rcu_callback_t func) |
87 | { | 87 | { |
88 | call_rcu(head, func); | 88 | call_rcu(head, func); |
89 | } | 89 | } |
@@ -216,6 +216,7 @@ static inline bool rcu_is_watching(void) | |||
216 | 216 | ||
217 | static inline void rcu_all_qs(void) | 217 | static inline void rcu_all_qs(void) |
218 | { | 218 | { |
219 | barrier(); /* Avoid RCU read-side critical sections leaking across. */ | ||
219 | } | 220 | } |
220 | 221 | ||
221 | #endif /* __LINUX_RCUTINY_H */ | 222 | #endif /* __LINUX_RCUTINY_H */ |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 5abec82f325e..60d15a080d7c 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -48,7 +48,7 @@ void synchronize_rcu_bh(void); | |||
48 | void synchronize_sched_expedited(void); | 48 | void synchronize_sched_expedited(void); |
49 | void synchronize_rcu_expedited(void); | 49 | void synchronize_rcu_expedited(void); |
50 | 50 | ||
51 | void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); | 51 | void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); |
52 | 52 | ||
53 | /** | 53 | /** |
54 | * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period | 54 | * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period |
diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 8fc0bfd8edc4..d68bb402120e 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h | |||
@@ -296,6 +296,8 @@ typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg, | |||
296 | unsigned int *val); | 296 | unsigned int *val); |
297 | typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg, | 297 | typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg, |
298 | unsigned int val); | 298 | unsigned int val); |
299 | typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg, | ||
300 | unsigned int mask, unsigned int val); | ||
299 | typedef struct regmap_async *(*regmap_hw_async_alloc)(void); | 301 | typedef struct regmap_async *(*regmap_hw_async_alloc)(void); |
300 | typedef void (*regmap_hw_free_context)(void *context); | 302 | typedef void (*regmap_hw_free_context)(void *context); |
301 | 303 | ||
@@ -335,6 +337,7 @@ struct regmap_bus { | |||
335 | regmap_hw_gather_write gather_write; | 337 | regmap_hw_gather_write gather_write; |
336 | regmap_hw_async_write async_write; | 338 | regmap_hw_async_write async_write; |
337 | regmap_hw_reg_write reg_write; | 339 | regmap_hw_reg_write reg_write; |
340 | regmap_hw_reg_update_bits reg_update_bits; | ||
338 | regmap_hw_read read; | 341 | regmap_hw_read read; |
339 | regmap_hw_reg_read reg_read; | 342 | regmap_hw_reg_read reg_read; |
340 | regmap_hw_free_context free_context; | 343 | regmap_hw_free_context free_context; |
@@ -791,6 +794,9 @@ struct regmap_irq { | |||
791 | unsigned int mask; | 794 | unsigned int mask; |
792 | }; | 795 | }; |
793 | 796 | ||
797 | #define REGMAP_IRQ_REG(_irq, _off, _mask) \ | ||
798 | [_irq] = { .reg_offset = (_off), .mask = (_mask) } | ||
799 | |||
794 | /** | 800 | /** |
795 | * Description of a generic regmap irq_chip. This is not intended to | 801 | * Description of a generic regmap irq_chip. This is not intended to |
796 | * handle every possible interrupt controller, but it should handle a | 802 | * handle every possible interrupt controller, but it should handle a |
@@ -800,6 +806,8 @@ struct regmap_irq { | |||
800 | * | 806 | * |
801 | * @status_base: Base status register address. | 807 | * @status_base: Base status register address. |
802 | * @mask_base: Base mask register address. | 808 | * @mask_base: Base mask register address. |
809 | * @unmask_base: Base unmask register address. for chips who have | ||
810 | * separate mask and unmask registers | ||
803 | * @ack_base: Base ack address. If zero then the chip is clear on read. | 811 | * @ack_base: Base ack address. If zero then the chip is clear on read. |
804 | * Using zero value is possible with @use_ack bit. | 812 | * Using zero value is possible with @use_ack bit. |
805 | * @wake_base: Base address for wake enables. If zero unsupported. | 813 | * @wake_base: Base address for wake enables. If zero unsupported. |
@@ -807,6 +815,7 @@ struct regmap_irq { | |||
807 | * @init_ack_masked: Ack all masked interrupts once during initalization. | 815 | * @init_ack_masked: Ack all masked interrupts once during initalization. |
808 | * @mask_invert: Inverted mask register: cleared bits are masked out. | 816 | * @mask_invert: Inverted mask register: cleared bits are masked out. |
809 | * @use_ack: Use @ack register even if it is zero. | 817 | * @use_ack: Use @ack register even if it is zero. |
818 | * @ack_invert: Inverted ack register: cleared bits for ack. | ||
810 | * @wake_invert: Inverted wake register: cleared bits are wake enabled. | 819 | * @wake_invert: Inverted wake register: cleared bits are wake enabled. |
811 | * @runtime_pm: Hold a runtime PM lock on the device when accessing it. | 820 | * @runtime_pm: Hold a runtime PM lock on the device when accessing it. |
812 | * | 821 | * |
@@ -820,12 +829,14 @@ struct regmap_irq_chip { | |||
820 | 829 | ||
821 | unsigned int status_base; | 830 | unsigned int status_base; |
822 | unsigned int mask_base; | 831 | unsigned int mask_base; |
832 | unsigned int unmask_base; | ||
823 | unsigned int ack_base; | 833 | unsigned int ack_base; |
824 | unsigned int wake_base; | 834 | unsigned int wake_base; |
825 | unsigned int irq_reg_stride; | 835 | unsigned int irq_reg_stride; |
826 | bool init_ack_masked:1; | 836 | bool init_ack_masked:1; |
827 | bool mask_invert:1; | 837 | bool mask_invert:1; |
828 | bool use_ack:1; | 838 | bool use_ack:1; |
839 | bool ack_invert:1; | ||
829 | bool wake_invert:1; | 840 | bool wake_invert:1; |
830 | bool runtime_pm:1; | 841 | bool runtime_pm:1; |
831 | 842 | ||
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 45932228cbf5..9c2903e58adb 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h | |||
@@ -245,6 +245,7 @@ enum regulator_type { | |||
245 | * @linear_min_sel: Minimal selector for starting linear mapping | 245 | * @linear_min_sel: Minimal selector for starting linear mapping |
246 | * @fixed_uV: Fixed voltage of rails. | 246 | * @fixed_uV: Fixed voltage of rails. |
247 | * @ramp_delay: Time to settle down after voltage change (unit: uV/us) | 247 | * @ramp_delay: Time to settle down after voltage change (unit: uV/us) |
248 | * @min_dropout_uV: The minimum dropout voltage this regulator can handle | ||
248 | * @linear_ranges: A constant table of possible voltage ranges. | 249 | * @linear_ranges: A constant table of possible voltage ranges. |
249 | * @n_linear_ranges: Number of entries in the @linear_ranges table. | 250 | * @n_linear_ranges: Number of entries in the @linear_ranges table. |
250 | * @volt_table: Voltage mapping table (if table based mapping) | 251 | * @volt_table: Voltage mapping table (if table based mapping) |
@@ -292,6 +293,7 @@ struct regulator_desc { | |||
292 | unsigned int linear_min_sel; | 293 | unsigned int linear_min_sel; |
293 | int fixed_uV; | 294 | int fixed_uV; |
294 | unsigned int ramp_delay; | 295 | unsigned int ramp_delay; |
296 | int min_dropout_uV; | ||
295 | 297 | ||
296 | const struct regulator_linear_range *linear_ranges; | 298 | const struct regulator_linear_range *linear_ranges; |
297 | int n_linear_ranges; | 299 | int n_linear_ranges; |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index e2c13cd863bd..4acc552e9279 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -154,8 +154,8 @@ ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
154 | } | 154 | } |
155 | #endif | 155 | #endif |
156 | 156 | ||
157 | int ring_buffer_empty(struct ring_buffer *buffer); | 157 | bool ring_buffer_empty(struct ring_buffer *buffer); |
158 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); | 158 | bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); |
159 | 159 | ||
160 | void ring_buffer_record_disable(struct ring_buffer *buffer); | 160 | void ring_buffer_record_disable(struct ring_buffer *buffer); |
161 | void ring_buffer_record_enable(struct ring_buffer *buffer); | 161 | void ring_buffer_record_enable(struct ring_buffer *buffer); |
diff --git a/include/linux/rotary_encoder.h b/include/linux/rotary_encoder.h index 3f594dce5716..fe3dc64e5aeb 100644 --- a/include/linux/rotary_encoder.h +++ b/include/linux/rotary_encoder.h | |||
@@ -8,9 +8,10 @@ struct rotary_encoder_platform_data { | |||
8 | unsigned int gpio_b; | 8 | unsigned int gpio_b; |
9 | unsigned int inverted_a; | 9 | unsigned int inverted_a; |
10 | unsigned int inverted_b; | 10 | unsigned int inverted_b; |
11 | unsigned int steps_per_period; | ||
11 | bool relative_axis; | 12 | bool relative_axis; |
12 | bool rollover; | 13 | bool rollover; |
13 | bool half_period; | 14 | bool wakeup_source; |
14 | }; | 15 | }; |
15 | 16 | ||
16 | #endif /* __ROTARY_ENCODER_H__ */ | 17 | #endif /* __ROTARY_ENCODER_H__ */ |
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 39adaa9529eb..4be5048b1fbe 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
@@ -33,11 +33,11 @@ extern wait_queue_head_t netdev_unregistering_wq; | |||
33 | extern struct mutex net_mutex; | 33 | extern struct mutex net_mutex; |
34 | 34 | ||
35 | #ifdef CONFIG_PROVE_LOCKING | 35 | #ifdef CONFIG_PROVE_LOCKING |
36 | extern int lockdep_rtnl_is_held(void); | 36 | extern bool lockdep_rtnl_is_held(void); |
37 | #else | 37 | #else |
38 | static inline int lockdep_rtnl_is_held(void) | 38 | static inline bool lockdep_rtnl_is_held(void) |
39 | { | 39 | { |
40 | return 1; | 40 | return true; |
41 | } | 41 | } |
42 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | 42 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
43 | 43 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index b7b9501b41af..edad7a43edea 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -384,6 +384,7 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, | |||
384 | void __user *buffer, | 384 | void __user *buffer, |
385 | size_t *lenp, loff_t *ppos); | 385 | size_t *lenp, loff_t *ppos); |
386 | extern unsigned int softlockup_panic; | 386 | extern unsigned int softlockup_panic; |
387 | extern unsigned int hardlockup_panic; | ||
387 | void lockup_detector_init(void); | 388 | void lockup_detector_init(void); |
388 | #else | 389 | #else |
389 | static inline void touch_softlockup_watchdog(void) | 390 | static inline void touch_softlockup_watchdog(void) |
@@ -483,9 +484,11 @@ static inline int get_dumpable(struct mm_struct *mm) | |||
483 | #define MMF_DUMP_ELF_HEADERS 6 | 484 | #define MMF_DUMP_ELF_HEADERS 6 |
484 | #define MMF_DUMP_HUGETLB_PRIVATE 7 | 485 | #define MMF_DUMP_HUGETLB_PRIVATE 7 |
485 | #define MMF_DUMP_HUGETLB_SHARED 8 | 486 | #define MMF_DUMP_HUGETLB_SHARED 8 |
487 | #define MMF_DUMP_DAX_PRIVATE 9 | ||
488 | #define MMF_DUMP_DAX_SHARED 10 | ||
486 | 489 | ||
487 | #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS | 490 | #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS |
488 | #define MMF_DUMP_FILTER_BITS 7 | 491 | #define MMF_DUMP_FILTER_BITS 9 |
489 | #define MMF_DUMP_FILTER_MASK \ | 492 | #define MMF_DUMP_FILTER_MASK \ |
490 | (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) | 493 | (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) |
491 | #define MMF_DUMP_FILTER_DEFAULT \ | 494 | #define MMF_DUMP_FILTER_DEFAULT \ |
@@ -599,33 +602,42 @@ struct task_cputime_atomic { | |||
599 | .sum_exec_runtime = ATOMIC64_INIT(0), \ | 602 | .sum_exec_runtime = ATOMIC64_INIT(0), \ |
600 | } | 603 | } |
601 | 604 | ||
602 | #ifdef CONFIG_PREEMPT_COUNT | 605 | #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) |
603 | #define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) | ||
604 | #else | ||
605 | #define PREEMPT_DISABLED PREEMPT_ENABLED | ||
606 | #endif | ||
607 | 606 | ||
608 | /* | 607 | /* |
609 | * Disable preemption until the scheduler is running. | 608 | * Disable preemption until the scheduler is running -- use an unconditional |
610 | * Reset by start_kernel()->sched_init()->init_idle(). | 609 | * value so that it also works on !PREEMPT_COUNT kernels. |
611 | * | 610 | * |
612 | * We include PREEMPT_ACTIVE to avoid cond_resched() from working | 611 | * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). |
613 | * before the scheduler is active -- see should_resched(). | ||
614 | */ | 612 | */ |
615 | #define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE) | 613 | #define INIT_PREEMPT_COUNT PREEMPT_OFFSET |
614 | |||
615 | /* | ||
616 | * Initial preempt_count value; reflects the preempt_count schedule invariant | ||
617 | * which states that during context switches: | ||
618 | * | ||
619 | * preempt_count() == 2*PREEMPT_DISABLE_OFFSET | ||
620 | * | ||
621 | * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. | ||
622 | * Note: See finish_task_switch(). | ||
623 | */ | ||
624 | #define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) | ||
616 | 625 | ||
617 | /** | 626 | /** |
618 | * struct thread_group_cputimer - thread group interval timer counts | 627 | * struct thread_group_cputimer - thread group interval timer counts |
619 | * @cputime_atomic: atomic thread group interval timers. | 628 | * @cputime_atomic: atomic thread group interval timers. |
620 | * @running: non-zero when there are timers running and | 629 | * @running: true when there are timers running and |
621 | * @cputime receives updates. | 630 | * @cputime_atomic receives updates. |
631 | * @checking_timer: true when a thread in the group is in the | ||
632 | * process of checking for thread group timers. | ||
622 | * | 633 | * |
623 | * This structure contains the version of task_cputime, above, that is | 634 | * This structure contains the version of task_cputime, above, that is |
624 | * used for thread group CPU timer calculations. | 635 | * used for thread group CPU timer calculations. |
625 | */ | 636 | */ |
626 | struct thread_group_cputimer { | 637 | struct thread_group_cputimer { |
627 | struct task_cputime_atomic cputime_atomic; | 638 | struct task_cputime_atomic cputime_atomic; |
628 | int running; | 639 | bool running; |
640 | bool checking_timer; | ||
629 | }; | 641 | }; |
630 | 642 | ||
631 | #include <linux/rwsem.h> | 643 | #include <linux/rwsem.h> |
@@ -762,18 +774,6 @@ struct signal_struct { | |||
762 | unsigned audit_tty_log_passwd; | 774 | unsigned audit_tty_log_passwd; |
763 | struct tty_audit_buf *tty_audit_buf; | 775 | struct tty_audit_buf *tty_audit_buf; |
764 | #endif | 776 | #endif |
765 | #ifdef CONFIG_CGROUPS | ||
766 | /* | ||
767 | * group_rwsem prevents new tasks from entering the threadgroup and | ||
768 | * member tasks from exiting,a more specifically, setting of | ||
769 | * PF_EXITING. fork and exit paths are protected with this rwsem | ||
770 | * using threadgroup_change_begin/end(). Users which require | ||
771 | * threadgroup to remain stable should use threadgroup_[un]lock() | ||
772 | * which also takes care of exec path. Currently, cgroup is the | ||
773 | * only user. | ||
774 | */ | ||
775 | struct rw_semaphore group_rwsem; | ||
776 | #endif | ||
777 | 777 | ||
778 | oom_flags_t oom_flags; | 778 | oom_flags_t oom_flags; |
779 | short oom_score_adj; /* OOM kill score adjustment */ | 779 | short oom_score_adj; /* OOM kill score adjustment */ |
@@ -840,7 +840,7 @@ struct user_struct { | |||
840 | struct hlist_node uidhash_node; | 840 | struct hlist_node uidhash_node; |
841 | kuid_t uid; | 841 | kuid_t uid; |
842 | 842 | ||
843 | #ifdef CONFIG_PERF_EVENTS | 843 | #if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) |
844 | atomic_long_t locked_vm; | 844 | atomic_long_t locked_vm; |
845 | #endif | 845 | #endif |
846 | }; | 846 | }; |
@@ -1139,8 +1139,6 @@ struct sched_domain_topology_level { | |||
1139 | #endif | 1139 | #endif |
1140 | }; | 1140 | }; |
1141 | 1141 | ||
1142 | extern struct sched_domain_topology_level *sched_domain_topology; | ||
1143 | |||
1144 | extern void set_sched_topology(struct sched_domain_topology_level *tl); | 1142 | extern void set_sched_topology(struct sched_domain_topology_level *tl); |
1145 | extern void wake_up_if_idle(int cpu); | 1143 | extern void wake_up_if_idle(int cpu); |
1146 | 1144 | ||
@@ -1189,10 +1187,10 @@ struct load_weight { | |||
1189 | 1187 | ||
1190 | /* | 1188 | /* |
1191 | * The load_avg/util_avg accumulates an infinite geometric series. | 1189 | * The load_avg/util_avg accumulates an infinite geometric series. |
1192 | * 1) load_avg factors the amount of time that a sched_entity is | 1190 | * 1) load_avg factors frequency scaling into the amount of time that a |
1193 | * runnable on a rq into its weight. For cfs_rq, it is the aggregated | 1191 | * sched_entity is runnable on a rq into its weight. For cfs_rq, it is the |
1194 | * such weights of all runnable and blocked sched_entities. | 1192 | * aggregated such weights of all runnable and blocked sched_entities. |
1195 | * 2) util_avg factors frequency scaling into the amount of time | 1193 | * 2) util_avg factors frequency and cpu scaling into the amount of time |
1196 | * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE]. | 1194 | * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE]. |
1197 | * For cfs_rq, it is the aggregated such times of all runnable and | 1195 | * For cfs_rq, it is the aggregated such times of all runnable and |
1198 | * blocked sched_entities. | 1196 | * blocked sched_entities. |
@@ -1342,10 +1340,12 @@ struct sched_dl_entity { | |||
1342 | 1340 | ||
1343 | union rcu_special { | 1341 | union rcu_special { |
1344 | struct { | 1342 | struct { |
1345 | bool blocked; | 1343 | u8 blocked; |
1346 | bool need_qs; | 1344 | u8 need_qs; |
1347 | } b; | 1345 | u8 exp_need_qs; |
1348 | short s; | 1346 | u8 pad; /* Otherwise the compiler can store garbage here. */ |
1347 | } b; /* Bits. */ | ||
1348 | u32 s; /* Set of bits. */ | ||
1349 | }; | 1349 | }; |
1350 | struct rcu_node; | 1350 | struct rcu_node; |
1351 | 1351 | ||
@@ -1463,7 +1463,9 @@ struct task_struct { | |||
1463 | unsigned sched_reset_on_fork:1; | 1463 | unsigned sched_reset_on_fork:1; |
1464 | unsigned sched_contributes_to_load:1; | 1464 | unsigned sched_contributes_to_load:1; |
1465 | unsigned sched_migrated:1; | 1465 | unsigned sched_migrated:1; |
1466 | 1466 | #ifdef CONFIG_MEMCG | |
1467 | unsigned memcg_may_oom:1; | ||
1468 | #endif | ||
1467 | #ifdef CONFIG_MEMCG_KMEM | 1469 | #ifdef CONFIG_MEMCG_KMEM |
1468 | unsigned memcg_kmem_skip_account:1; | 1470 | unsigned memcg_kmem_skip_account:1; |
1469 | #endif | 1471 | #endif |
@@ -1570,9 +1572,7 @@ struct task_struct { | |||
1570 | 1572 | ||
1571 | unsigned long sas_ss_sp; | 1573 | unsigned long sas_ss_sp; |
1572 | size_t sas_ss_size; | 1574 | size_t sas_ss_size; |
1573 | int (*notifier)(void *priv); | 1575 | |
1574 | void *notifier_data; | ||
1575 | sigset_t *notifier_mask; | ||
1576 | struct callback_head *task_works; | 1576 | struct callback_head *task_works; |
1577 | 1577 | ||
1578 | struct audit_context *audit_context; | 1578 | struct audit_context *audit_context; |
@@ -1794,12 +1794,12 @@ struct task_struct { | |||
1794 | unsigned long trace_recursion; | 1794 | unsigned long trace_recursion; |
1795 | #endif /* CONFIG_TRACING */ | 1795 | #endif /* CONFIG_TRACING */ |
1796 | #ifdef CONFIG_MEMCG | 1796 | #ifdef CONFIG_MEMCG |
1797 | struct memcg_oom_info { | 1797 | struct mem_cgroup *memcg_in_oom; |
1798 | struct mem_cgroup *memcg; | 1798 | gfp_t memcg_oom_gfp_mask; |
1799 | gfp_t gfp_mask; | 1799 | int memcg_oom_order; |
1800 | int order; | 1800 | |
1801 | unsigned int may_oom:1; | 1801 | /* number of pages to reclaim on returning to userland */ |
1802 | } memcg_oom; | 1802 | unsigned int memcg_nr_pages_over_high; |
1803 | #endif | 1803 | #endif |
1804 | #ifdef CONFIG_UPROBES | 1804 | #ifdef CONFIG_UPROBES |
1805 | struct uprobe_task *utask; | 1805 | struct uprobe_task *utask; |
@@ -2464,21 +2464,29 @@ extern void ignore_signals(struct task_struct *); | |||
2464 | extern void flush_signal_handlers(struct task_struct *, int force_default); | 2464 | extern void flush_signal_handlers(struct task_struct *, int force_default); |
2465 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); | 2465 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); |
2466 | 2466 | ||
2467 | static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | 2467 | static inline int kernel_dequeue_signal(siginfo_t *info) |
2468 | { | 2468 | { |
2469 | unsigned long flags; | 2469 | struct task_struct *tsk = current; |
2470 | siginfo_t __info; | ||
2470 | int ret; | 2471 | int ret; |
2471 | 2472 | ||
2472 | spin_lock_irqsave(&tsk->sighand->siglock, flags); | 2473 | spin_lock_irq(&tsk->sighand->siglock); |
2473 | ret = dequeue_signal(tsk, mask, info); | 2474 | ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); |
2474 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); | 2475 | spin_unlock_irq(&tsk->sighand->siglock); |
2475 | 2476 | ||
2476 | return ret; | 2477 | return ret; |
2477 | } | 2478 | } |
2478 | 2479 | ||
2479 | extern void block_all_signals(int (*notifier)(void *priv), void *priv, | 2480 | static inline void kernel_signal_stop(void) |
2480 | sigset_t *mask); | 2481 | { |
2481 | extern void unblock_all_signals(void); | 2482 | spin_lock_irq(¤t->sighand->siglock); |
2483 | if (current->jobctl & JOBCTL_STOP_DEQUEUED) | ||
2484 | __set_current_state(TASK_STOPPED); | ||
2485 | spin_unlock_irq(¤t->sighand->siglock); | ||
2486 | |||
2487 | schedule(); | ||
2488 | } | ||
2489 | |||
2482 | extern void release_task(struct task_struct * p); | 2490 | extern void release_task(struct task_struct * p); |
2483 | extern int send_sig_info(int, struct siginfo *, struct task_struct *); | 2491 | extern int send_sig_info(int, struct siginfo *, struct task_struct *); |
2484 | extern int force_sigsegv(int, struct task_struct *); | 2492 | extern int force_sigsegv(int, struct task_struct *); |
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 9d303b8847df..9089a2ae913d 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h | |||
@@ -21,4 +21,9 @@ static inline int dl_task(struct task_struct *p) | |||
21 | return dl_prio(p->prio); | 21 | return dl_prio(p->prio); |
22 | } | 22 | } |
23 | 23 | ||
24 | static inline bool dl_time_before(u64 a, u64 b) | ||
25 | { | ||
26 | return (s64)(a - b) < 0; | ||
27 | } | ||
28 | |||
24 | #endif /* _SCHED_DEADLINE_H */ | 29 | #endif /* _SCHED_DEADLINE_H */ |
diff --git a/include/linux/scif.h b/include/linux/scif.h index 44f4f3898bbe..49a35d6edc94 100644 --- a/include/linux/scif.h +++ b/include/linux/scif.h | |||
@@ -55,6 +55,7 @@ | |||
55 | 55 | ||
56 | #include <linux/types.h> | 56 | #include <linux/types.h> |
57 | #include <linux/poll.h> | 57 | #include <linux/poll.h> |
58 | #include <linux/device.h> | ||
58 | #include <linux/scif_ioctl.h> | 59 | #include <linux/scif_ioctl.h> |
59 | 60 | ||
60 | #define SCIF_ACCEPT_SYNC 1 | 61 | #define SCIF_ACCEPT_SYNC 1 |
@@ -92,6 +93,70 @@ enum { | |||
92 | #define SCIF_PORT_RSVD 1088 | 93 | #define SCIF_PORT_RSVD 1088 |
93 | 94 | ||
94 | typedef struct scif_endpt *scif_epd_t; | 95 | typedef struct scif_endpt *scif_epd_t; |
96 | typedef struct scif_pinned_pages *scif_pinned_pages_t; | ||
97 | |||
98 | /** | ||
99 | * struct scif_range - SCIF registered range used in kernel mode | ||
100 | * @cookie: cookie used internally by SCIF | ||
101 | * @nr_pages: number of pages of PAGE_SIZE | ||
102 | * @prot_flags: R/W protection | ||
103 | * @phys_addr: Array of bus addresses | ||
104 | * @va: Array of kernel virtual addresses backed by the pages in the phys_addr | ||
105 | * array. The va is populated only when called on the host for a remote | ||
106 | * SCIF connection on MIC. This is required to support the use case of DMA | ||
107 | * between MIC and another device which is not a SCIF node e.g., an IB or | ||
108 | * ethernet NIC. | ||
109 | */ | ||
110 | struct scif_range { | ||
111 | void *cookie; | ||
112 | int nr_pages; | ||
113 | int prot_flags; | ||
114 | dma_addr_t *phys_addr; | ||
115 | void __iomem **va; | ||
116 | }; | ||
117 | |||
118 | /** | ||
119 | * struct scif_pollepd - SCIF endpoint to be monitored via scif_poll | ||
120 | * @epd: SCIF endpoint | ||
121 | * @events: requested events | ||
122 | * @revents: returned events | ||
123 | */ | ||
124 | struct scif_pollepd { | ||
125 | scif_epd_t epd; | ||
126 | short events; | ||
127 | short revents; | ||
128 | }; | ||
129 | |||
130 | /** | ||
131 | * scif_peer_dev - representation of a peer SCIF device | ||
132 | * | ||
133 | * Peer devices show up as PCIe devices for the mgmt node but not the cards. | ||
134 | * The mgmt node discovers all the cards on the PCIe bus and informs the other | ||
135 | * cards about their peers. Upon notification of a peer a node adds a peer | ||
136 | * device to the peer bus to maintain symmetry in the way devices are | ||
137 | * discovered across all nodes in the SCIF network. | ||
138 | * | ||
139 | * @dev: underlying device | ||
140 | * @dnode - The destination node which this device will communicate with. | ||
141 | */ | ||
142 | struct scif_peer_dev { | ||
143 | struct device dev; | ||
144 | u8 dnode; | ||
145 | }; | ||
146 | |||
147 | /** | ||
148 | * scif_client - representation of a SCIF client | ||
149 | * @name: client name | ||
150 | * @probe - client method called when a peer device is registered | ||
151 | * @remove - client method called when a peer device is unregistered | ||
152 | * @si - subsys_interface used internally for implementing SCIF clients | ||
153 | */ | ||
154 | struct scif_client { | ||
155 | const char *name; | ||
156 | void (*probe)(struct scif_peer_dev *spdev); | ||
157 | void (*remove)(struct scif_peer_dev *spdev); | ||
158 | struct subsys_interface si; | ||
159 | }; | ||
95 | 160 | ||
96 | #define SCIF_OPEN_FAILED ((scif_epd_t)-1) | 161 | #define SCIF_OPEN_FAILED ((scif_epd_t)-1) |
97 | #define SCIF_REGISTER_FAILED ((off_t)-1) | 162 | #define SCIF_REGISTER_FAILED ((off_t)-1) |
@@ -345,7 +410,6 @@ int scif_close(scif_epd_t epd); | |||
345 | * Errors: | 410 | * Errors: |
346 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | 411 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor |
347 | * ECONNRESET - Connection reset by peer | 412 | * ECONNRESET - Connection reset by peer |
348 | * EFAULT - An invalid address was specified for a parameter | ||
349 | * EINVAL - flags is invalid, or len is negative | 413 | * EINVAL - flags is invalid, or len is negative |
350 | * ENODEV - The remote node is lost or existed, but is not currently in the | 414 | * ENODEV - The remote node is lost or existed, but is not currently in the |
351 | * network since it may have crashed | 415 | * network since it may have crashed |
@@ -398,7 +462,6 @@ int scif_send(scif_epd_t epd, void *msg, int len, int flags); | |||
398 | * EAGAIN - The destination node is returning from a low power state | 462 | * EAGAIN - The destination node is returning from a low power state |
399 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | 463 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor |
400 | * ECONNRESET - Connection reset by peer | 464 | * ECONNRESET - Connection reset by peer |
401 | * EFAULT - An invalid address was specified for a parameter | ||
402 | * EINVAL - flags is invalid, or len is negative | 465 | * EINVAL - flags is invalid, or len is negative |
403 | * ENODEV - The remote node is lost or existed, but is not currently in the | 466 | * ENODEV - The remote node is lost or existed, but is not currently in the |
404 | * network since it may have crashed | 467 | * network since it may have crashed |
@@ -461,9 +524,6 @@ int scif_recv(scif_epd_t epd, void *msg, int len, int flags); | |||
461 | * SCIF_PROT_READ - allow read operations from the window | 524 | * SCIF_PROT_READ - allow read operations from the window |
462 | * SCIF_PROT_WRITE - allow write operations to the window | 525 | * SCIF_PROT_WRITE - allow write operations to the window |
463 | * | 526 | * |
464 | * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a | ||
465 | * fixed offset. | ||
466 | * | ||
467 | * Return: | 527 | * Return: |
468 | * Upon successful completion, scif_register() returns the offset at which the | 528 | * Upon successful completion, scif_register() returns the offset at which the |
469 | * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that | 529 | * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that |
@@ -476,7 +536,6 @@ int scif_recv(scif_epd_t epd, void *msg, int len, int flags); | |||
476 | * EAGAIN - The mapping could not be performed due to lack of resources | 536 | * EAGAIN - The mapping could not be performed due to lack of resources |
477 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | 537 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor |
478 | * ECONNRESET - Connection reset by peer | 538 | * ECONNRESET - Connection reset by peer |
479 | * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid | ||
480 | * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is | 539 | * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is |
481 | * set in flags, and offset is not a multiple of the page size, or addr is not a | 540 | * set in flags, and offset is not a multiple of the page size, or addr is not a |
482 | * multiple of the page size, or len is not a multiple of the page size, or is | 541 | * multiple of the page size, or len is not a multiple of the page size, or is |
@@ -759,7 +818,6 @@ int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t | |||
759 | * EACCESS - Attempt to write to a read-only range | 818 | * EACCESS - Attempt to write to a read-only range |
760 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | 819 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor |
761 | * ECONNRESET - Connection reset by peer | 820 | * ECONNRESET - Connection reset by peer |
762 | * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid | ||
763 | * EINVAL - rma_flags is invalid | 821 | * EINVAL - rma_flags is invalid |
764 | * ENODEV - The remote node is lost or existed, but is not currently in the | 822 | * ENODEV - The remote node is lost or existed, but is not currently in the |
765 | * network since it may have crashed | 823 | * network since it may have crashed |
@@ -840,7 +898,6 @@ int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset, | |||
840 | * EACCESS - Attempt to write to a read-only range | 898 | * EACCESS - Attempt to write to a read-only range |
841 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | 899 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor |
842 | * ECONNRESET - Connection reset by peer | 900 | * ECONNRESET - Connection reset by peer |
843 | * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid | ||
844 | * EINVAL - rma_flags is invalid | 901 | * EINVAL - rma_flags is invalid |
845 | * ENODEV - The remote node is lost or existed, but is not currently in the | 902 | * ENODEV - The remote node is lost or existed, but is not currently in the |
846 | * network since it may have crashed | 903 | * network since it may have crashed |
@@ -984,10 +1041,299 @@ int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff, | |||
984 | * online nodes in the SCIF network including 'self'; otherwise in user mode | 1041 | * online nodes in the SCIF network including 'self'; otherwise in user mode |
985 | * -1 is returned and errno is set to indicate the error; in kernel mode no | 1042 | * -1 is returned and errno is set to indicate the error; in kernel mode no |
986 | * errors are returned. | 1043 | * errors are returned. |
1044 | */ | ||
1045 | int scif_get_node_ids(u16 *nodes, int len, u16 *self); | ||
1046 | |||
1047 | /** | ||
1048 | * scif_pin_pages() - Pin a set of pages | ||
1049 | * @addr: Virtual address of range to pin | ||
1050 | * @len: Length of range to pin | ||
1051 | * @prot_flags: Page protection flags | ||
1052 | * @map_flags: Page classification flags | ||
1053 | * @pinned_pages: Handle to pinned pages | ||
1054 | * | ||
1055 | * scif_pin_pages() pins (locks in physical memory) the physical pages which | ||
1056 | * back the range of virtual address pages starting at addr and continuing for | ||
1057 | * len bytes. addr and len are constrained to be multiples of the page size. A | ||
1058 | * successful scif_pin_pages() call returns a handle to pinned_pages which may | ||
1059 | * be used in subsequent calls to scif_register_pinned_pages(). | ||
1060 | * | ||
1061 | * The pages will remain pinned as long as there is a reference against the | ||
1062 | * scif_pinned_pages_t value returned by scif_pin_pages() and until | ||
1063 | * scif_unpin_pages() is called, passing the scif_pinned_pages_t value. A | ||
1064 | * reference is added to a scif_pinned_pages_t value each time a window is | ||
1065 | * created by calling scif_register_pinned_pages() and passing the | ||
1066 | * scif_pinned_pages_t value. A reference is removed from a | ||
1067 | * scif_pinned_pages_t value each time such a window is deleted. | ||
1068 | * | ||
1069 | * Subsequent operations which change the memory pages to which virtual | ||
1070 | * addresses are mapped (such as mmap(), munmap()) have no effect on the | ||
1071 | * scif_pinned_pages_t value or windows created against it. | ||
1072 | * | ||
1073 | * If the process will fork(), it is recommended that the registered | ||
1074 | * virtual address range be marked with MADV_DONTFORK. Doing so will prevent | ||
1075 | * problems due to copy-on-write semantics. | ||
1076 | * | ||
1077 | * The prot_flags argument is formed by OR'ing together one or more of the | ||
1078 | * following values. | ||
1079 | * SCIF_PROT_READ - allow read operations against the pages | ||
1080 | * SCIF_PROT_WRITE - allow write operations against the pages | ||
1081 | * The map_flags argument can be set as SCIF_MAP_KERNEL to interpret addr as a | ||
1082 | * kernel space address. By default, addr is interpreted as a user space | ||
1083 | * address. | ||
1084 | * | ||
1085 | * Return: | ||
1086 | * Upon successful completion, scif_pin_pages() returns 0; otherwise the | ||
1087 | * negative of one of the following errors is returned. | ||
987 | * | 1088 | * |
988 | * Errors: | 1089 | * Errors: |
989 | * EFAULT - Bad address | 1090 | * EINVAL - prot_flags is invalid, map_flags is invalid, or offset is negative |
1091 | * ENOMEM - Not enough space | ||
990 | */ | 1092 | */ |
991 | int scif_get_node_ids(u16 *nodes, int len, u16 *self); | 1093 | int scif_pin_pages(void *addr, size_t len, int prot_flags, int map_flags, |
1094 | scif_pinned_pages_t *pinned_pages); | ||
1095 | |||
1096 | /** | ||
1097 | * scif_unpin_pages() - Unpin a set of pages | ||
1098 | * @pinned_pages: Handle to pinned pages to be unpinned | ||
1099 | * | ||
1100 | * scif_unpin_pages() prevents scif_register_pinned_pages() from registering new | ||
1101 | * windows against pinned_pages. The physical pages represented by pinned_pages | ||
1102 | * will remain pinned until all windows previously registered against | ||
1103 | * pinned_pages are deleted (the window is scif_unregister()'d and all | ||
1104 | * references to the window are removed (see scif_unregister()). | ||
1105 | * | ||
1106 | * pinned_pages must have been obtain from a previous call to scif_pin_pages(). | ||
1107 | * After calling scif_unpin_pages(), it is an error to pass pinned_pages to | ||
1108 | * scif_register_pinned_pages(). | ||
1109 | * | ||
1110 | * Return: | ||
1111 | * Upon successful completion, scif_unpin_pages() returns 0; otherwise the | ||
1112 | * negative of one of the following errors is returned. | ||
1113 | * | ||
1114 | * Errors: | ||
1115 | * EINVAL - pinned_pages is not valid | ||
1116 | */ | ||
1117 | int scif_unpin_pages(scif_pinned_pages_t pinned_pages); | ||
1118 | |||
1119 | /** | ||
1120 | * scif_register_pinned_pages() - Mark a memory region for remote access. | ||
1121 | * @epd: endpoint descriptor | ||
1122 | * @pinned_pages: Handle to pinned pages | ||
1123 | * @offset: Registered address space offset | ||
1124 | * @map_flags: Flags which control where pages are mapped | ||
1125 | * | ||
1126 | * The scif_register_pinned_pages() function opens a window, a range of whole | ||
1127 | * pages of the registered address space of the endpoint epd, starting at | ||
1128 | * offset po. The value of po, further described below, is a function of the | ||
1129 | * parameters offset and pinned_pages, and the value of map_flags. Each page of | ||
1130 | * the window represents a corresponding physical memory page of the range | ||
1131 | * represented by pinned_pages; the length of the window is the same as the | ||
1132 | * length of range represented by pinned_pages. A successful | ||
1133 | * scif_register_pinned_pages() call returns po as the return value. | ||
1134 | * | ||
1135 | * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset | ||
1136 | * exactly, and offset is constrained to be a multiple of the page size. The | ||
1137 | * mapping established by scif_register_pinned_pages() will not replace any | ||
1138 | * existing registration; an error is returned if any page of the new window | ||
1139 | * would intersect an existing window. | ||
1140 | * | ||
1141 | * When SCIF_MAP_FIXED is not set, the implementation uses offset in an | ||
1142 | * implementation-defined manner to arrive at po. The po so chosen will be an | ||
1143 | * area of the registered address space that the implementation deems suitable | ||
1144 | * for a mapping of the required size. An offset value of 0 is interpreted as | ||
1145 | * granting the implementation complete freedom in selecting po, subject to | ||
1146 | * constraints described below. A non-zero value of offset is taken to be a | ||
1147 | * suggestion of an offset near which the mapping should be placed. When the | ||
1148 | * implementation selects a value for po, it does not replace any extant | ||
1149 | * window. In all cases, po will be a multiple of the page size. | ||
1150 | * | ||
1151 | * The physical pages which are so represented by a window are available for | ||
1152 | * access in calls to scif_get_pages(), scif_readfrom(), scif_writeto(), | ||
1153 | * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the | ||
1154 | * physical pages represented by the window will not be reused by the memory | ||
1155 | * subsystem for any other purpose. Note that the same physical page may be | ||
1156 | * represented by multiple windows. | ||
1157 | * | ||
1158 | * Windows created by scif_register_pinned_pages() are unregistered by | ||
1159 | * scif_unregister(). | ||
1160 | * | ||
1161 | * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a | ||
1162 | * fixed offset. | ||
1163 | * | ||
1164 | * Return: | ||
1165 | * Upon successful completion, scif_register_pinned_pages() returns the offset | ||
1166 | * at which the mapping was placed (po); otherwise the negative of one of the | ||
1167 | * following errors is returned. | ||
1168 | * | ||
1169 | * Errors: | ||
1170 | * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags and pages in the new window | ||
1171 | * would intersect an existing window | ||
1172 | * EAGAIN - The mapping could not be performed due to lack of resources | ||
1173 | * ECONNRESET - Connection reset by peer | ||
1174 | * EINVAL - map_flags is invalid, or SCIF_MAP_FIXED is set in map_flags, and | ||
1175 | * offset is not a multiple of the page size, or offset is negative | ||
1176 | * ENODEV - The remote node is lost or existed, but is not currently in the | ||
1177 | * network since it may have crashed | ||
1178 | * ENOMEM - Not enough space | ||
1179 | * ENOTCONN - The endpoint is not connected | ||
1180 | */ | ||
1181 | off_t scif_register_pinned_pages(scif_epd_t epd, | ||
1182 | scif_pinned_pages_t pinned_pages, | ||
1183 | off_t offset, int map_flags); | ||
1184 | |||
1185 | /** | ||
1186 | * scif_get_pages() - Add references to remote registered pages | ||
1187 | * @epd: endpoint descriptor | ||
1188 | * @offset: remote registered offset | ||
1189 | * @len: length of range of pages | ||
1190 | * @pages: returned scif_range structure | ||
1191 | * | ||
1192 | * scif_get_pages() returns the addresses of the physical pages represented by | ||
1193 | * those pages of the registered address space of the peer of epd, starting at | ||
1194 | * offset and continuing for len bytes. offset and len are constrained to be | ||
1195 | * multiples of the page size. | ||
1196 | * | ||
1197 | * All of the pages in the specified range [offset, offset + len - 1] must be | ||
1198 | * within a single window of the registered address space of the peer of epd. | ||
1199 | * | ||
1200 | * The addresses are returned as a virtually contiguous array pointed to by the | ||
1201 | * phys_addr component of the scif_range structure whose address is returned in | ||
1202 | * pages. The nr_pages component of scif_range is the length of the array. The | ||
1203 | * prot_flags component of scif_range holds the protection flag value passed | ||
1204 | * when the pages were registered. | ||
1205 | * | ||
1206 | * Each physical page whose address is returned by scif_get_pages() remains | ||
1207 | * available and will not be released for reuse until the scif_range structure | ||
1208 | * is returned in a call to scif_put_pages(). The scif_range structure returned | ||
1209 | * by scif_get_pages() must be unmodified. | ||
1210 | * | ||
1211 | * It is an error to call scif_close() on an endpoint on which a scif_range | ||
1212 | * structure of that endpoint has not been returned to scif_put_pages(). | ||
1213 | * | ||
1214 | * Return: | ||
1215 | * Upon successful completion, scif_get_pages() returns 0; otherwise the | ||
1216 | * negative of one of the following errors is returned. | ||
1217 | * Errors: | ||
1218 | * ECONNRESET - Connection reset by peer. | ||
1219 | * EINVAL - offset is not a multiple of the page size, or offset is negative, or | ||
1220 | * len is not a multiple of the page size | ||
1221 | * ENODEV - The remote node is lost or existed, but is not currently in the | ||
1222 | * network since it may have crashed | ||
1223 | * ENOTCONN - The endpoint is not connected | ||
1224 | * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid | ||
1225 | * for the registered address space of the peer epd | ||
1226 | */ | ||
1227 | int scif_get_pages(scif_epd_t epd, off_t offset, size_t len, | ||
1228 | struct scif_range **pages); | ||
1229 | |||
1230 | /** | ||
1231 | * scif_put_pages() - Remove references from remote registered pages | ||
1232 | * @pages: pages to be returned | ||
1233 | * | ||
1234 | * scif_put_pages() releases a scif_range structure previously obtained by | ||
1235 | * calling scif_get_pages(). The physical pages represented by pages may | ||
1236 | * be reused when the window which represented those pages is unregistered. | ||
1237 | * Therefore, those pages must not be accessed after calling scif_put_pages(). | ||
1238 | * | ||
1239 | * Return: | ||
1240 | * Upon successful completion, scif_put_pages() returns 0; otherwise the | ||
1241 | * negative of one of the following errors is returned. | ||
1242 | * Errors: | ||
1243 | * EINVAL - pages does not point to a valid scif_range structure, or | ||
1244 | * the scif_range structure pointed to by pages was already returned | ||
1245 | * ENODEV - The remote node is lost or existed, but is not currently in the | ||
1246 | * network since it may have crashed | ||
1247 | * ENOTCONN - The endpoint is not connected | ||
1248 | */ | ||
1249 | int scif_put_pages(struct scif_range *pages); | ||
1250 | |||
1251 | /** | ||
1252 | * scif_poll() - Wait for some event on an endpoint | ||
1253 | * @epds: Array of endpoint descriptors | ||
1254 | * @nepds: Length of epds | ||
1255 | * @timeout: Upper limit on time for which scif_poll() will block | ||
1256 | * | ||
1257 | * scif_poll() waits for one of a set of endpoints to become ready to perform | ||
1258 | * an I/O operation. | ||
1259 | * | ||
1260 | * The epds argument specifies the endpoint descriptors to be examined and the | ||
1261 | * events of interest for each endpoint descriptor. epds is a pointer to an | ||
1262 | * array with one member for each open endpoint descriptor of interest. | ||
1263 | * | ||
1264 | * The number of items in the epds array is specified in nepds. The epd field | ||
1265 | * of scif_pollepd is an endpoint descriptor of an open endpoint. The field | ||
1266 | * events is a bitmask specifying the events which the application is | ||
1267 | * interested in. The field revents is an output parameter, filled by the | ||
1268 | * kernel with the events that actually occurred. The bits returned in revents | ||
1269 | * can include any of those specified in events, or one of the values POLLERR, | ||
1270 | * POLLHUP, or POLLNVAL. (These three bits are meaningless in the events | ||
1271 | * field, and will be set in the revents field whenever the corresponding | ||
1272 | * condition is true.) | ||
1273 | * | ||
1274 | * If none of the events requested (and no error) has occurred for any of the | ||
1275 | * endpoint descriptors, then scif_poll() blocks until one of the events occurs. | ||
1276 | * | ||
1277 | * The timeout argument specifies an upper limit on the time for which | ||
1278 | * scif_poll() will block, in milliseconds. Specifying a negative value in | ||
1279 | * timeout means an infinite timeout. | ||
1280 | * | ||
1281 | * The following bits may be set in events and returned in revents. | ||
1282 | * POLLIN - Data may be received without blocking. For a connected | ||
1283 | * endpoint, this means that scif_recv() may be called without blocking. For a | ||
1284 | * listening endpoint, this means that scif_accept() may be called without | ||
1285 | * blocking. | ||
1286 | * POLLOUT - Data may be sent without blocking. For a connected endpoint, this | ||
1287 | * means that scif_send() may be called without blocking. POLLOUT may also be | ||
1288 | * used to block waiting for a non-blocking connect to complete. This bit value | ||
1289 | * has no meaning for a listening endpoint and is ignored if specified. | ||
1290 | * | ||
1291 | * The following bits are only returned in revents, and are ignored if set in | ||
1292 | * events. | ||
1293 | * POLLERR - An error occurred on the endpoint | ||
1294 | * POLLHUP - The connection to the peer endpoint was disconnected | ||
1295 | * POLLNVAL - The specified endpoint descriptor is invalid. | ||
1296 | * | ||
1297 | * Return: | ||
1298 | * Upon successful completion, scif_poll() returns a non-negative value. A | ||
1299 | * positive value indicates the total number of endpoint descriptors that have | ||
1300 | * been selected (that is, endpoint descriptors for which the revents member is | ||
1301 | * non-zero). A value of 0 indicates that the call timed out and no endpoint | ||
1302 | * descriptors have been selected. Otherwise in user mode -1 is returned and | ||
1303 | * errno is set to indicate the error; in kernel mode the negative of one of | ||
1304 | * the following errors is returned. | ||
1305 | * | ||
1306 | * Errors: | ||
1307 | * EINTR - A signal occurred before any requested event | ||
1308 | * EINVAL - The nepds argument is greater than {OPEN_MAX} | ||
1309 | * ENOMEM - There was no space to allocate file descriptor tables | ||
1310 | */ | ||
1311 | int scif_poll(struct scif_pollepd *epds, unsigned int nepds, long timeout); | ||
1312 | |||
1313 | /** | ||
1314 | * scif_client_register() - Register a SCIF client | ||
1315 | * @client: client to be registered | ||
1316 | * | ||
1317 | * scif_client_register() registers a SCIF client. The probe() method | ||
1318 | * of the client is called when SCIF peer devices come online and the | ||
1319 | * remove() method is called when the peer devices disappear. | ||
1320 | * | ||
1321 | * Return: | ||
1322 | * Upon successful completion, scif_client_register() returns a non-negative | ||
1323 | * value. Otherwise the return value is the same as subsys_interface_register() | ||
1324 | * in the kernel. | ||
1325 | */ | ||
1326 | int scif_client_register(struct scif_client *client); | ||
1327 | |||
1328 | /** | ||
1329 | * scif_client_unregister() - Unregister a SCIF client | ||
1330 | * @client: client to be unregistered | ||
1331 | * | ||
1332 | * scif_client_unregister() unregisters a SCIF client. | ||
1333 | * | ||
1334 | * Return: | ||
1335 | * None | ||
1336 | */ | ||
1337 | void scif_client_unregister(struct scif_client *client); | ||
992 | 1338 | ||
993 | #endif /* __SCIF_H__ */ | 1339 | #endif /* __SCIF_H__ */ |
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h new file mode 100644 index 000000000000..80af3cd35ae4 --- /dev/null +++ b/include/linux/scpi_protocol.h | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * SCPI Message Protocol driver header | ||
3 | * | ||
4 | * Copyright (C) 2014 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/types.h> | ||
19 | |||
20 | struct scpi_opp { | ||
21 | u32 freq; | ||
22 | u32 m_volt; | ||
23 | } __packed; | ||
24 | |||
25 | struct scpi_dvfs_info { | ||
26 | unsigned int count; | ||
27 | unsigned int latency; /* in nanoseconds */ | ||
28 | struct scpi_opp *opps; | ||
29 | }; | ||
30 | |||
31 | enum scpi_sensor_class { | ||
32 | TEMPERATURE, | ||
33 | VOLTAGE, | ||
34 | CURRENT, | ||
35 | POWER, | ||
36 | }; | ||
37 | |||
38 | struct scpi_sensor_info { | ||
39 | u16 sensor_id; | ||
40 | u8 class; | ||
41 | u8 trigger_type; | ||
42 | char name[20]; | ||
43 | } __packed; | ||
44 | |||
45 | /** | ||
46 | * struct scpi_ops - represents the various operations provided | ||
47 | * by SCP through SCPI message protocol | ||
48 | * @get_version: returns the major and minor revision on the SCPI | ||
49 | * message protocol | ||
50 | * @clk_get_range: gets clock range limit(min - max in Hz) | ||
51 | * @clk_get_val: gets clock value(in Hz) | ||
52 | * @clk_set_val: sets the clock value, setting to 0 will disable the | ||
53 | * clock (if supported) | ||
54 | * @dvfs_get_idx: gets the Operating Point of the given power domain. | ||
55 | * OPP is an index to the list return by @dvfs_get_info | ||
56 | * @dvfs_set_idx: sets the Operating Point of the given power domain. | ||
57 | * OPP is an index to the list return by @dvfs_get_info | ||
58 | * @dvfs_get_info: returns the DVFS capabilities of the given power | ||
59 | * domain. It includes the OPP list and the latency information | ||
60 | */ | ||
61 | struct scpi_ops { | ||
62 | u32 (*get_version)(void); | ||
63 | int (*clk_get_range)(u16, unsigned long *, unsigned long *); | ||
64 | unsigned long (*clk_get_val)(u16); | ||
65 | int (*clk_set_val)(u16, unsigned long); | ||
66 | int (*dvfs_get_idx)(u8); | ||
67 | int (*dvfs_set_idx)(u8, u8); | ||
68 | struct scpi_dvfs_info *(*dvfs_get_info)(u8); | ||
69 | int (*sensor_get_capability)(u16 *sensors); | ||
70 | int (*sensor_get_info)(u16 sensor_id, struct scpi_sensor_info *); | ||
71 | int (*sensor_get_value)(u16, u32 *); | ||
72 | }; | ||
73 | |||
74 | #if IS_ENABLED(CONFIG_ARM_SCPI_PROTOCOL) | ||
75 | struct scpi_ops *get_scpi_ops(void); | ||
76 | #else | ||
77 | static inline struct scpi_ops *get_scpi_ops(void) { return NULL; } | ||
78 | #endif | ||
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index f4265039a94c..2296e6b2f690 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h | |||
@@ -95,4 +95,15 @@ static inline void get_seccomp_filter(struct task_struct *tsk) | |||
95 | return; | 95 | return; |
96 | } | 96 | } |
97 | #endif /* CONFIG_SECCOMP_FILTER */ | 97 | #endif /* CONFIG_SECCOMP_FILTER */ |
98 | |||
99 | #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE) | ||
100 | extern long seccomp_get_filter(struct task_struct *task, | ||
101 | unsigned long filter_off, void __user *data); | ||
102 | #else | ||
103 | static inline long seccomp_get_filter(struct task_struct *task, | ||
104 | unsigned long n, void __user *data) | ||
105 | { | ||
106 | return -EINVAL; | ||
107 | } | ||
108 | #endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */ | ||
98 | #endif /* _LINUX_SECCOMP_H */ | 109 | #endif /* _LINUX_SECCOMP_H */ |
diff --git a/include/linux/signal.h b/include/linux/signal.h index ab1e0392b5ac..92557bbce7e7 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
@@ -239,7 +239,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *); | |||
239 | extern void set_current_blocked(sigset_t *); | 239 | extern void set_current_blocked(sigset_t *); |
240 | extern void __set_current_blocked(const sigset_t *); | 240 | extern void __set_current_blocked(const sigset_t *); |
241 | extern int show_unhandled_signals; | 241 | extern int show_unhandled_signals; |
242 | extern int sigsuspend(sigset_t *); | ||
243 | 242 | ||
244 | struct sigaction { | 243 | struct sigaction { |
245 | #ifndef __ARCH_HAS_IRIX_SIGACTION | 244 | #ifndef __ARCH_HAS_IRIX_SIGACTION |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2b0a30a6e31c..4355129fff91 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -463,6 +463,15 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, | |||
463 | return delta_us; | 463 | return delta_us; |
464 | } | 464 | } |
465 | 465 | ||
466 | static inline bool skb_mstamp_after(const struct skb_mstamp *t1, | ||
467 | const struct skb_mstamp *t0) | ||
468 | { | ||
469 | s32 diff = t1->stamp_jiffies - t0->stamp_jiffies; | ||
470 | |||
471 | if (!diff) | ||
472 | diff = t1->stamp_us - t0->stamp_us; | ||
473 | return diff > 0; | ||
474 | } | ||
466 | 475 | ||
467 | /** | 476 | /** |
468 | * struct sk_buff - socket buffer | 477 | * struct sk_buff - socket buffer |
@@ -1215,7 +1224,7 @@ static inline int skb_cloned(const struct sk_buff *skb) | |||
1215 | 1224 | ||
1216 | static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) | 1225 | static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) |
1217 | { | 1226 | { |
1218 | might_sleep_if(pri & __GFP_WAIT); | 1227 | might_sleep_if(gfpflags_allow_blocking(pri)); |
1219 | 1228 | ||
1220 | if (skb_cloned(skb)) | 1229 | if (skb_cloned(skb)) |
1221 | return pskb_expand_head(skb, 0, 0, pri); | 1230 | return pskb_expand_head(skb, 0, 0, pri); |
@@ -1299,7 +1308,7 @@ static inline int skb_shared(const struct sk_buff *skb) | |||
1299 | */ | 1308 | */ |
1300 | static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) | 1309 | static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) |
1301 | { | 1310 | { |
1302 | might_sleep_if(pri & __GFP_WAIT); | 1311 | might_sleep_if(gfpflags_allow_blocking(pri)); |
1303 | if (skb_shared(skb)) { | 1312 | if (skb_shared(skb)) { |
1304 | struct sk_buff *nskb = skb_clone(skb, pri); | 1313 | struct sk_buff *nskb = skb_clone(skb, pri); |
1305 | 1314 | ||
@@ -1335,7 +1344,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) | |||
1335 | static inline struct sk_buff *skb_unshare(struct sk_buff *skb, | 1344 | static inline struct sk_buff *skb_unshare(struct sk_buff *skb, |
1336 | gfp_t pri) | 1345 | gfp_t pri) |
1337 | { | 1346 | { |
1338 | might_sleep_if(pri & __GFP_WAIT); | 1347 | might_sleep_if(gfpflags_allow_blocking(pri)); |
1339 | if (skb_cloned(skb)) { | 1348 | if (skb_cloned(skb)) { |
1340 | struct sk_buff *nskb = skb_copy(skb, pri); | 1349 | struct sk_buff *nskb = skb_copy(skb, pri); |
1341 | 1350 | ||
@@ -2708,7 +2717,7 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, | |||
2708 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 2717 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
2709 | skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); | 2718 | skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); |
2710 | else if (skb->ip_summed == CHECKSUM_PARTIAL && | 2719 | else if (skb->ip_summed == CHECKSUM_PARTIAL && |
2711 | skb_checksum_start_offset(skb) <= len) | 2720 | skb_checksum_start_offset(skb) < 0) |
2712 | skb->ip_summed = CHECKSUM_NONE; | 2721 | skb->ip_summed = CHECKSUM_NONE; |
2713 | } | 2722 | } |
2714 | 2723 | ||
diff --git a/include/linux/slab.h b/include/linux/slab.h index 7e37d448ed91..2037a861e367 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -111,7 +111,7 @@ struct mem_cgroup; | |||
111 | * struct kmem_cache related prototypes | 111 | * struct kmem_cache related prototypes |
112 | */ | 112 | */ |
113 | void __init kmem_cache_init(void); | 113 | void __init kmem_cache_init(void); |
114 | int slab_is_available(void); | 114 | bool slab_is_available(void); |
115 | 115 | ||
116 | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, | 116 | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, |
117 | unsigned long, | 117 | unsigned long, |
@@ -158,6 +158,24 @@ size_t ksize(const void *); | |||
158 | #endif | 158 | #endif |
159 | 159 | ||
160 | /* | 160 | /* |
161 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | ||
162 | * Intended for arches that get misalignment faults even for 64 bit integer | ||
163 | * aligned buffers. | ||
164 | */ | ||
165 | #ifndef ARCH_SLAB_MINALIGN | ||
166 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
167 | #endif | ||
168 | |||
169 | /* | ||
170 | * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned | ||
171 | * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN | ||
172 | * aligned pointers. | ||
173 | */ | ||
174 | #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) | ||
175 | #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) | ||
176 | #define __assume_page_alignment __assume_aligned(PAGE_SIZE) | ||
177 | |||
178 | /* | ||
161 | * Kmalloc array related definitions | 179 | * Kmalloc array related definitions |
162 | */ | 180 | */ |
163 | 181 | ||
@@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size) | |||
286 | } | 304 | } |
287 | #endif /* !CONFIG_SLOB */ | 305 | #endif /* !CONFIG_SLOB */ |
288 | 306 | ||
289 | void *__kmalloc(size_t size, gfp_t flags); | 307 | void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment; |
290 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); | 308 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment; |
291 | void kmem_cache_free(struct kmem_cache *, void *); | 309 | void kmem_cache_free(struct kmem_cache *, void *); |
292 | 310 | ||
293 | /* | 311 | /* |
@@ -298,11 +316,11 @@ void kmem_cache_free(struct kmem_cache *, void *); | |||
298 | * Note that interrupts must be enabled when calling these functions. | 316 | * Note that interrupts must be enabled when calling these functions. |
299 | */ | 317 | */ |
300 | void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); | 318 | void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); |
301 | bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); | 319 | int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
302 | 320 | ||
303 | #ifdef CONFIG_NUMA | 321 | #ifdef CONFIG_NUMA |
304 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 322 | void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; |
305 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 323 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment; |
306 | #else | 324 | #else |
307 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | 325 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) |
308 | { | 326 | { |
@@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f | |||
316 | #endif | 334 | #endif |
317 | 335 | ||
318 | #ifdef CONFIG_TRACING | 336 | #ifdef CONFIG_TRACING |
319 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); | 337 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment; |
320 | 338 | ||
321 | #ifdef CONFIG_NUMA | 339 | #ifdef CONFIG_NUMA |
322 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | 340 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, |
323 | gfp_t gfpflags, | 341 | gfp_t gfpflags, |
324 | int node, size_t size); | 342 | int node, size_t size) __assume_slab_alignment; |
325 | #else | 343 | #else |
326 | static __always_inline void * | 344 | static __always_inline void * |
327 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | 345 | kmem_cache_alloc_node_trace(struct kmem_cache *s, |
@@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, | |||
354 | } | 372 | } |
355 | #endif /* CONFIG_TRACING */ | 373 | #endif /* CONFIG_TRACING */ |
356 | 374 | ||
357 | extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); | 375 | extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; |
358 | 376 | ||
359 | #ifdef CONFIG_TRACING | 377 | #ifdef CONFIG_TRACING |
360 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | 378 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; |
361 | #else | 379 | #else |
362 | static __always_inline void * | 380 | static __always_inline void * |
363 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | 381 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) |
@@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
482 | return __kmalloc_node(size, flags, node); | 500 | return __kmalloc_node(size, flags, node); |
483 | } | 501 | } |
484 | 502 | ||
485 | /* | ||
486 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | ||
487 | * Intended for arches that get misalignment faults even for 64 bit integer | ||
488 | * aligned buffers. | ||
489 | */ | ||
490 | #ifndef ARCH_SLAB_MINALIGN | ||
491 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
492 | #endif | ||
493 | |||
494 | struct memcg_cache_array { | 503 | struct memcg_cache_array { |
495 | struct rcu_head rcu; | 504 | struct rcu_head rcu; |
496 | struct kmem_cache *entries[0]; | 505 | struct kmem_cache *entries[0]; |
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h index e6109a6cd8f6..12910cf19869 100644 --- a/include/linux/smpboot.h +++ b/include/linux/smpboot.h | |||
@@ -24,9 +24,6 @@ struct smpboot_thread_data; | |||
24 | * parked (cpu offline) | 24 | * parked (cpu offline) |
25 | * @unpark: Optional unpark function, called when the thread is | 25 | * @unpark: Optional unpark function, called when the thread is |
26 | * unparked (cpu online) | 26 | * unparked (cpu online) |
27 | * @pre_unpark: Optional unpark function, called before the thread is | ||
28 | * unparked (cpu online). This is not guaranteed to be | ||
29 | * called on the target cpu of the thread. Careful! | ||
30 | * @cpumask: Internal state. To update which threads are unparked, | 27 | * @cpumask: Internal state. To update which threads are unparked, |
31 | * call smpboot_update_cpumask_percpu_thread(). | 28 | * call smpboot_update_cpumask_percpu_thread(). |
32 | * @selfparking: Thread is not parked by the park function. | 29 | * @selfparking: Thread is not parked by the park function. |
@@ -42,7 +39,6 @@ struct smp_hotplug_thread { | |||
42 | void (*cleanup)(unsigned int cpu, bool online); | 39 | void (*cleanup)(unsigned int cpu, bool online); |
43 | void (*park)(unsigned int cpu); | 40 | void (*park)(unsigned int cpu); |
44 | void (*unpark)(unsigned int cpu); | 41 | void (*unpark)(unsigned int cpu); |
45 | void (*pre_unpark)(unsigned int cpu); | ||
46 | cpumask_var_t cpumask; | 42 | cpumask_var_t cpumask; |
47 | bool selfparking; | 43 | bool selfparking; |
48 | const char *thread_comm; | 44 | const char *thread_comm; |
diff --git a/include/linux/soc/brcmstb/brcmstb.h b/include/linux/soc/brcmstb/brcmstb.h new file mode 100644 index 000000000000..337ce414e898 --- /dev/null +++ b/include/linux/soc/brcmstb/brcmstb.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef __BRCMSTB_SOC_H | ||
2 | #define __BRCMSTB_SOC_H | ||
3 | |||
4 | /* | ||
5 | * Bus Interface Unit control register setup, must happen early during boot, | ||
6 | * before SMP is brought up, called by machine entry point. | ||
7 | */ | ||
8 | void brcmstb_biuctrl_init(void); | ||
9 | |||
10 | #endif /* __BRCMSTB_SOC_H */ | ||
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h index d7e50aa6a4ac..d0cb6d189a0a 100644 --- a/include/linux/soc/qcom/smd.h +++ b/include/linux/soc/qcom/smd.h | |||
@@ -9,6 +9,14 @@ struct qcom_smd_channel; | |||
9 | struct qcom_smd_lookup; | 9 | struct qcom_smd_lookup; |
10 | 10 | ||
11 | /** | 11 | /** |
12 | * struct qcom_smd_id - struct used for matching a smd device | ||
13 | * @name: name of the channel | ||
14 | */ | ||
15 | struct qcom_smd_id { | ||
16 | char name[20]; | ||
17 | }; | ||
18 | |||
19 | /** | ||
12 | * struct qcom_smd_device - smd device struct | 20 | * struct qcom_smd_device - smd device struct |
13 | * @dev: the device struct | 21 | * @dev: the device struct |
14 | * @channel: handle to the smd channel for this device | 22 | * @channel: handle to the smd channel for this device |
@@ -21,6 +29,7 @@ struct qcom_smd_device { | |||
21 | /** | 29 | /** |
22 | * struct qcom_smd_driver - smd driver struct | 30 | * struct qcom_smd_driver - smd driver struct |
23 | * @driver: underlying device driver | 31 | * @driver: underlying device driver |
32 | * @smd_match_table: static channel match table | ||
24 | * @probe: invoked when the smd channel is found | 33 | * @probe: invoked when the smd channel is found |
25 | * @remove: invoked when the smd channel is closed | 34 | * @remove: invoked when the smd channel is closed |
26 | * @callback: invoked when an inbound message is received on the channel, | 35 | * @callback: invoked when an inbound message is received on the channel, |
@@ -29,6 +38,8 @@ struct qcom_smd_device { | |||
29 | */ | 38 | */ |
30 | struct qcom_smd_driver { | 39 | struct qcom_smd_driver { |
31 | struct device_driver driver; | 40 | struct device_driver driver; |
41 | const struct qcom_smd_id *smd_match_table; | ||
42 | |||
32 | int (*probe)(struct qcom_smd_device *dev); | 43 | int (*probe)(struct qcom_smd_device *dev); |
33 | void (*remove)(struct qcom_smd_device *dev); | 44 | void (*remove)(struct qcom_smd_device *dev); |
34 | int (*callback)(struct qcom_smd_device *, const void *, size_t); | 45 | int (*callback)(struct qcom_smd_device *, const void *, size_t); |
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h index bc9630d3aced..785e196ee2ca 100644 --- a/include/linux/soc/qcom/smem.h +++ b/include/linux/soc/qcom/smem.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #define QCOM_SMEM_HOST_ANY -1 | 4 | #define QCOM_SMEM_HOST_ANY -1 |
5 | 5 | ||
6 | int qcom_smem_alloc(unsigned host, unsigned item, size_t size); | 6 | int qcom_smem_alloc(unsigned host, unsigned item, size_t size); |
7 | int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size); | 7 | void *qcom_smem_get(unsigned host, unsigned item, size_t *size); |
8 | 8 | ||
9 | int qcom_smem_get_free_space(unsigned host); | 9 | int qcom_smem_get_free_space(unsigned host); |
10 | 10 | ||
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index 6d36dacec4ba..9ec4c147abbc 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h | |||
@@ -23,7 +23,6 @@ struct dma_chan; | |||
23 | 23 | ||
24 | /* device.platform_data for SSP controller devices */ | 24 | /* device.platform_data for SSP controller devices */ |
25 | struct pxa2xx_spi_master { | 25 | struct pxa2xx_spi_master { |
26 | u32 clock_enable; | ||
27 | u16 num_chipselect; | 26 | u16 num_chipselect; |
28 | u8 enable_dma; | 27 | u8 enable_dma; |
29 | 28 | ||
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 6b00f18f5e6b..cce80e6dc7d1 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -51,6 +51,8 @@ extern struct bus_type spi_bus_type; | |||
51 | * @bytes_tx: number of bytes sent to device | 51 | * @bytes_tx: number of bytes sent to device |
52 | * @bytes_rx: number of bytes received from device | 52 | * @bytes_rx: number of bytes received from device |
53 | * | 53 | * |
54 | * @transfer_bytes_histo: | ||
55 | * transfer bytes histogramm | ||
54 | */ | 56 | */ |
55 | struct spi_statistics { | 57 | struct spi_statistics { |
56 | spinlock_t lock; /* lock for the whole structure */ | 58 | spinlock_t lock; /* lock for the whole structure */ |
@@ -68,6 +70,8 @@ struct spi_statistics { | |||
68 | unsigned long long bytes_rx; | 70 | unsigned long long bytes_rx; |
69 | unsigned long long bytes_tx; | 71 | unsigned long long bytes_tx; |
70 | 72 | ||
73 | #define SPI_STATISTICS_HISTO_SIZE 17 | ||
74 | unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE]; | ||
71 | }; | 75 | }; |
72 | 76 | ||
73 | void spi_statistics_add_transfer_stats(struct spi_statistics *stats, | 77 | void spi_statistics_add_transfer_stats(struct spi_statistics *stats, |
@@ -250,7 +254,7 @@ static inline struct spi_driver *to_spi_driver(struct device_driver *drv) | |||
250 | return drv ? container_of(drv, struct spi_driver, driver) : NULL; | 254 | return drv ? container_of(drv, struct spi_driver, driver) : NULL; |
251 | } | 255 | } |
252 | 256 | ||
253 | extern int spi_register_driver(struct spi_driver *sdrv); | 257 | extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv); |
254 | 258 | ||
255 | /** | 259 | /** |
256 | * spi_unregister_driver - reverse effect of spi_register_driver | 260 | * spi_unregister_driver - reverse effect of spi_register_driver |
@@ -263,6 +267,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
263 | driver_unregister(&sdrv->driver); | 267 | driver_unregister(&sdrv->driver); |
264 | } | 268 | } |
265 | 269 | ||
270 | /* use a define to avoid include chaining to get THIS_MODULE */ | ||
271 | #define spi_register_driver(driver) \ | ||
272 | __spi_register_driver(THIS_MODULE, driver) | ||
273 | |||
266 | /** | 274 | /** |
267 | * module_spi_driver() - Helper macro for registering a SPI driver | 275 | * module_spi_driver() - Helper macro for registering a SPI driver |
268 | * @__spi_driver: spi_driver struct | 276 | * @__spi_driver: spi_driver struct |
@@ -843,8 +851,10 @@ extern int spi_bus_unlock(struct spi_master *master); | |||
843 | * @len: data buffer size | 851 | * @len: data buffer size |
844 | * Context: can sleep | 852 | * Context: can sleep |
845 | * | 853 | * |
846 | * This writes the buffer and returns zero or a negative error code. | 854 | * This function writes the buffer @buf. |
847 | * Callable only from contexts that can sleep. | 855 | * Callable only from contexts that can sleep. |
856 | * | ||
857 | * Return: zero on success, else a negative error code. | ||
848 | */ | 858 | */ |
849 | static inline int | 859 | static inline int |
850 | spi_write(struct spi_device *spi, const void *buf, size_t len) | 860 | spi_write(struct spi_device *spi, const void *buf, size_t len) |
@@ -867,8 +877,10 @@ spi_write(struct spi_device *spi, const void *buf, size_t len) | |||
867 | * @len: data buffer size | 877 | * @len: data buffer size |
868 | * Context: can sleep | 878 | * Context: can sleep |
869 | * | 879 | * |
870 | * This reads the buffer and returns zero or a negative error code. | 880 | * This function reads the buffer @buf. |
871 | * Callable only from contexts that can sleep. | 881 | * Callable only from contexts that can sleep. |
882 | * | ||
883 | * Return: zero on success, else a negative error code. | ||
872 | */ | 884 | */ |
873 | static inline int | 885 | static inline int |
874 | spi_read(struct spi_device *spi, void *buf, size_t len) | 886 | spi_read(struct spi_device *spi, void *buf, size_t len) |
@@ -895,7 +907,7 @@ spi_read(struct spi_device *spi, void *buf, size_t len) | |||
895 | * | 907 | * |
896 | * For more specific semantics see spi_sync(). | 908 | * For more specific semantics see spi_sync(). |
897 | * | 909 | * |
898 | * It returns zero on success, else a negative error code. | 910 | * Return: Return: zero on success, else a negative error code. |
899 | */ | 911 | */ |
900 | static inline int | 912 | static inline int |
901 | spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers, | 913 | spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers, |
@@ -919,9 +931,10 @@ extern int spi_write_then_read(struct spi_device *spi, | |||
919 | * @cmd: command to be written before data is read back | 931 | * @cmd: command to be written before data is read back |
920 | * Context: can sleep | 932 | * Context: can sleep |
921 | * | 933 | * |
922 | * This returns the (unsigned) eight bit number returned by the | 934 | * Callable only from contexts that can sleep. |
923 | * device, or else a negative error code. Callable only from | 935 | * |
924 | * contexts that can sleep. | 936 | * Return: the (unsigned) eight bit number returned by the |
937 | * device, or else a negative error code. | ||
925 | */ | 938 | */ |
926 | static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) | 939 | static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) |
927 | { | 940 | { |
@@ -940,12 +953,13 @@ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) | |||
940 | * @cmd: command to be written before data is read back | 953 | * @cmd: command to be written before data is read back |
941 | * Context: can sleep | 954 | * Context: can sleep |
942 | * | 955 | * |
943 | * This returns the (unsigned) sixteen bit number returned by the | ||
944 | * device, or else a negative error code. Callable only from | ||
945 | * contexts that can sleep. | ||
946 | * | ||
947 | * The number is returned in wire-order, which is at least sometimes | 956 | * The number is returned in wire-order, which is at least sometimes |
948 | * big-endian. | 957 | * big-endian. |
958 | * | ||
959 | * Callable only from contexts that can sleep. | ||
960 | * | ||
961 | * Return: the (unsigned) sixteen bit number returned by the | ||
962 | * device, or else a negative error code. | ||
949 | */ | 963 | */ |
950 | static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) | 964 | static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) |
951 | { | 965 | { |
@@ -964,13 +978,13 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) | |||
964 | * @cmd: command to be written before data is read back | 978 | * @cmd: command to be written before data is read back |
965 | * Context: can sleep | 979 | * Context: can sleep |
966 | * | 980 | * |
967 | * This returns the (unsigned) sixteen bit number returned by the device in cpu | ||
968 | * endianness, or else a negative error code. Callable only from contexts that | ||
969 | * can sleep. | ||
970 | * | ||
971 | * This function is similar to spi_w8r16, with the exception that it will | 981 | * This function is similar to spi_w8r16, with the exception that it will |
972 | * convert the read 16 bit data word from big-endian to native endianness. | 982 | * convert the read 16 bit data word from big-endian to native endianness. |
973 | * | 983 | * |
984 | * Callable only from contexts that can sleep. | ||
985 | * | ||
986 | * Return: the (unsigned) sixteen bit number returned by the device in cpu | ||
987 | * endianness, or else a negative error code. | ||
974 | */ | 988 | */ |
975 | static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) | 989 | static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) |
976 | 990 | ||
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h index 85578d4be034..154788ed218c 100644 --- a/include/linux/spi/spi_bitbang.h +++ b/include/linux/spi/spi_bitbang.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <linux/workqueue.h> | 4 | #include <linux/workqueue.h> |
5 | 5 | ||
6 | struct spi_bitbang { | 6 | struct spi_bitbang { |
7 | spinlock_t lock; | 7 | struct mutex lock; |
8 | u8 busy; | 8 | u8 busy; |
9 | u8 use_dma; | 9 | u8 use_dma; |
10 | u8 flags; /* extra spi->mode support */ | 10 | u8 flags; /* extra spi->mode support */ |
diff --git a/include/linux/spmi.h b/include/linux/spmi.h index f84212cd3b7d..1396a255d2a2 100644 --- a/include/linux/spmi.h +++ b/include/linux/spmi.h | |||
@@ -153,7 +153,9 @@ static inline struct spmi_driver *to_spmi_driver(struct device_driver *d) | |||
153 | return container_of(d, struct spmi_driver, driver); | 153 | return container_of(d, struct spmi_driver, driver); |
154 | } | 154 | } |
155 | 155 | ||
156 | int spmi_driver_register(struct spmi_driver *sdrv); | 156 | #define spmi_driver_register(sdrv) \ |
157 | __spmi_driver_register(sdrv, THIS_MODULE) | ||
158 | int __spmi_driver_register(struct spmi_driver *sdrv, struct module *owner); | ||
157 | 159 | ||
158 | /** | 160 | /** |
159 | * spmi_driver_unregister() - unregister an SPMI client driver | 161 | * spmi_driver_unregister() - unregister an SPMI client driver |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index bdeb4567b71e..f5f80c5643ac 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
@@ -215,8 +215,11 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp) | |||
215 | */ | 215 | */ |
216 | static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) | 216 | static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) |
217 | { | 217 | { |
218 | int retval = __srcu_read_lock(sp); | 218 | int retval; |
219 | 219 | ||
220 | preempt_disable(); | ||
221 | retval = __srcu_read_lock(sp); | ||
222 | preempt_enable(); | ||
220 | rcu_lock_acquire(&(sp)->dep_map); | 223 | rcu_lock_acquire(&(sp)->dep_map); |
221 | return retval; | 224 | return retval; |
222 | } | 225 | } |
diff --git a/include/linux/stm.h b/include/linux/stm.h new file mode 100644 index 000000000000..9d0083d364e6 --- /dev/null +++ b/include/linux/stm.h | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * System Trace Module (STM) infrastructure apis | ||
3 | * Copyright (C) 2014 Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _STM_H_ | ||
16 | #define _STM_H_ | ||
17 | |||
18 | #include <linux/device.h> | ||
19 | |||
20 | /** | ||
21 | * enum stp_packet_type - STP packets that an STM driver sends | ||
22 | */ | ||
23 | enum stp_packet_type { | ||
24 | STP_PACKET_DATA = 0, | ||
25 | STP_PACKET_FLAG, | ||
26 | STP_PACKET_USER, | ||
27 | STP_PACKET_MERR, | ||
28 | STP_PACKET_GERR, | ||
29 | STP_PACKET_TRIG, | ||
30 | STP_PACKET_XSYNC, | ||
31 | }; | ||
32 | |||
33 | /** | ||
34 | * enum stp_packet_flags - STP packet modifiers | ||
35 | */ | ||
36 | enum stp_packet_flags { | ||
37 | STP_PACKET_MARKED = 0x1, | ||
38 | STP_PACKET_TIMESTAMPED = 0x2, | ||
39 | }; | ||
40 | |||
41 | struct stp_policy; | ||
42 | |||
43 | struct stm_device; | ||
44 | |||
45 | /** | ||
46 | * struct stm_data - STM device description and callbacks | ||
47 | * @name: device name | ||
48 | * @stm: internal structure, only used by stm class code | ||
49 | * @sw_start: first STP master available to software | ||
50 | * @sw_end: last STP master available to software | ||
51 | * @sw_nchannels: number of STP channels per master | ||
52 | * @sw_mmiosz: size of one channel's IO space, for mmap, optional | ||
53 | * @packet: callback that sends an STP packet | ||
54 | * @mmio_addr: mmap callback, optional | ||
55 | * @link: called when a new stm_source gets linked to us, optional | ||
56 | * @unlink: likewise for unlinking, again optional | ||
57 | * @set_options: set device-specific options on a channel | ||
58 | * | ||
59 | * Fill out this structure before calling stm_register_device() to create | ||
60 | * an STM device and stm_unregister_device() to destroy it. It will also be | ||
61 | * passed back to @packet(), @mmio_addr(), @link(), @unlink() and @set_options() | ||
62 | * callbacks. | ||
63 | * | ||
64 | * Normally, an STM device will have a range of masters available to software | ||
65 | * and the rest being statically assigned to various hardware trace sources. | ||
66 | * The former is defined by the the range [@sw_start..@sw_end] of the device | ||
67 | * description. That is, the lowest master that can be allocated to software | ||
68 | * writers is @sw_start and data from this writer will appear is @sw_start | ||
69 | * master in the STP stream. | ||
70 | */ | ||
71 | struct stm_data { | ||
72 | const char *name; | ||
73 | struct stm_device *stm; | ||
74 | unsigned int sw_start; | ||
75 | unsigned int sw_end; | ||
76 | unsigned int sw_nchannels; | ||
77 | unsigned int sw_mmiosz; | ||
78 | ssize_t (*packet)(struct stm_data *, unsigned int, | ||
79 | unsigned int, unsigned int, | ||
80 | unsigned int, unsigned int, | ||
81 | const unsigned char *); | ||
82 | phys_addr_t (*mmio_addr)(struct stm_data *, unsigned int, | ||
83 | unsigned int, unsigned int); | ||
84 | int (*link)(struct stm_data *, unsigned int, | ||
85 | unsigned int); | ||
86 | void (*unlink)(struct stm_data *, unsigned int, | ||
87 | unsigned int); | ||
88 | long (*set_options)(struct stm_data *, unsigned int, | ||
89 | unsigned int, unsigned int, | ||
90 | unsigned long); | ||
91 | }; | ||
92 | |||
93 | int stm_register_device(struct device *parent, struct stm_data *stm_data, | ||
94 | struct module *owner); | ||
95 | void stm_unregister_device(struct stm_data *stm_data); | ||
96 | |||
97 | struct stm_source_device; | ||
98 | |||
99 | /** | ||
100 | * struct stm_source_data - STM source device description and callbacks | ||
101 | * @name: device name, will be used for policy lookup | ||
102 | * @src: internal structure, only used by stm class code | ||
103 | * @nr_chans: number of channels to allocate | ||
104 | * @link: called when this source gets linked to an STM device | ||
105 | * @unlink: called when this source is about to get unlinked from its STM | ||
106 | * | ||
107 | * Fill in this structure before calling stm_source_register_device() to | ||
108 | * register a source device. Also pass it to unregister and write calls. | ||
109 | */ | ||
110 | struct stm_source_data { | ||
111 | const char *name; | ||
112 | struct stm_source_device *src; | ||
113 | unsigned int percpu; | ||
114 | unsigned int nr_chans; | ||
115 | int (*link)(struct stm_source_data *data); | ||
116 | void (*unlink)(struct stm_source_data *data); | ||
117 | }; | ||
118 | |||
119 | int stm_source_register_device(struct device *parent, | ||
120 | struct stm_source_data *data); | ||
121 | void stm_source_unregister_device(struct stm_source_data *data); | ||
122 | |||
123 | int stm_source_write(struct stm_source_data *data, unsigned int chan, | ||
124 | const char *buf, size_t count); | ||
125 | |||
126 | #endif /* _STM_H_ */ | ||
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 414d924318ce..0adedca24c5b 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h | |||
@@ -33,6 +33,8 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, | |||
33 | struct cpu_stop_work *work_buf); | 33 | struct cpu_stop_work *work_buf); |
34 | int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); | 34 | int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); |
35 | int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); | 35 | int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); |
36 | void stop_machine_park(int cpu); | ||
37 | void stop_machine_unpark(int cpu); | ||
36 | 38 | ||
37 | #else /* CONFIG_SMP */ | 39 | #else /* CONFIG_SMP */ |
38 | 40 | ||
diff --git a/include/linux/string.h b/include/linux/string.h index a8d90db9c4b0..9ef7795e65e4 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
@@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t); | |||
25 | #ifndef __HAVE_ARCH_STRLCPY | 25 | #ifndef __HAVE_ARCH_STRLCPY |
26 | size_t strlcpy(char *, const char *, size_t); | 26 | size_t strlcpy(char *, const char *, size_t); |
27 | #endif | 27 | #endif |
28 | #ifndef __HAVE_ARCH_STRSCPY | ||
29 | ssize_t __must_check strscpy(char *, const char *, size_t); | ||
30 | #endif | ||
28 | #ifndef __HAVE_ARCH_STRCAT | 31 | #ifndef __HAVE_ARCH_STRCAT |
29 | extern char * strcat(char *, const char *); | 32 | extern char * strcat(char *, const char *); |
30 | #endif | 33 | #endif |
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index 8df43c9f11dc..4397a4824c81 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h | |||
@@ -38,6 +38,11 @@ void xprt_free_bc_request(struct rpc_rqst *req); | |||
38 | int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); | 38 | int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); |
39 | void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); | 39 | void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); |
40 | 40 | ||
41 | /* Socket backchannel transport methods */ | ||
42 | int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs); | ||
43 | void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs); | ||
44 | void xprt_free_bc_rqst(struct rpc_rqst *req); | ||
45 | |||
41 | /* | 46 | /* |
42 | * Determine if a shared backchannel is in use | 47 | * Determine if a shared backchannel is in use |
43 | */ | 48 | */ |
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 03d3b4c92d9f..ed03c9f7f908 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h | |||
@@ -48,8 +48,10 @@ | |||
48 | struct cache_head { | 48 | struct cache_head { |
49 | struct hlist_node cache_list; | 49 | struct hlist_node cache_list; |
50 | time_t expiry_time; /* After time time, don't use the data */ | 50 | time_t expiry_time; /* After time time, don't use the data */ |
51 | time_t last_refresh; /* If CACHE_PENDING, this is when upcall | 51 | time_t last_refresh; /* If CACHE_PENDING, this is when upcall was |
52 | * was sent, else this is when update was received | 52 | * sent, else this is when update was |
53 | * received, though it is alway set to | ||
54 | * be *after* ->flush_time. | ||
53 | */ | 55 | */ |
54 | struct kref ref; | 56 | struct kref ref; |
55 | unsigned long flags; | 57 | unsigned long flags; |
@@ -105,8 +107,12 @@ struct cache_detail { | |||
105 | /* fields below this comment are for internal use | 107 | /* fields below this comment are for internal use |
106 | * and should not be touched by cache owners | 108 | * and should not be touched by cache owners |
107 | */ | 109 | */ |
108 | time_t flush_time; /* flush all cache items with last_refresh | 110 | time_t flush_time; /* flush all cache items with |
109 | * earlier than this */ | 111 | * last_refresh at or earlier |
112 | * than this. last_refresh | ||
113 | * is never set at or earlier | ||
114 | * than this. | ||
115 | */ | ||
110 | struct list_head others; | 116 | struct list_head others; |
111 | time_t nextcheck; | 117 | time_t nextcheck; |
112 | int entries; | 118 | int entries; |
@@ -203,7 +209,7 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd) | |||
203 | static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) | 209 | static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) |
204 | { | 210 | { |
205 | return (h->expiry_time < seconds_since_boot()) || | 211 | return (h->expiry_time < seconds_since_boot()) || |
206 | (detail->flush_time > h->last_refresh); | 212 | (detail->flush_time >= h->last_refresh); |
207 | } | 213 | } |
208 | 214 | ||
209 | extern int cache_check(struct cache_detail *detail, | 215 | extern int cache_check(struct cache_detail *detail, |
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 7ccc961f33e9..f869807a0d0e 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h | |||
@@ -105,11 +105,9 @@ struct svc_rdma_chunk_sge { | |||
105 | }; | 105 | }; |
106 | struct svc_rdma_fastreg_mr { | 106 | struct svc_rdma_fastreg_mr { |
107 | struct ib_mr *mr; | 107 | struct ib_mr *mr; |
108 | void *kva; | 108 | struct scatterlist *sg; |
109 | struct ib_fast_reg_page_list *page_list; | 109 | int sg_nents; |
110 | int page_list_len; | ||
111 | unsigned long access_flags; | 110 | unsigned long access_flags; |
112 | unsigned long map_len; | ||
113 | enum dma_data_direction direction; | 111 | enum dma_data_direction direction; |
114 | struct list_head frmr_list; | 112 | struct list_head frmr_list; |
115 | }; | 113 | }; |
@@ -228,9 +226,13 @@ extern void svc_rdma_put_frmr(struct svcxprt_rdma *, | |||
228 | struct svc_rdma_fastreg_mr *); | 226 | struct svc_rdma_fastreg_mr *); |
229 | extern void svc_sq_reap(struct svcxprt_rdma *); | 227 | extern void svc_sq_reap(struct svcxprt_rdma *); |
230 | extern void svc_rq_reap(struct svcxprt_rdma *); | 228 | extern void svc_rq_reap(struct svcxprt_rdma *); |
231 | extern struct svc_xprt_class svc_rdma_class; | ||
232 | extern void svc_rdma_prep_reply_hdr(struct svc_rqst *); | 229 | extern void svc_rdma_prep_reply_hdr(struct svc_rqst *); |
233 | 230 | ||
231 | extern struct svc_xprt_class svc_rdma_class; | ||
232 | #ifdef CONFIG_SUNRPC_BACKCHANNEL | ||
233 | extern struct svc_xprt_class svc_rdma_bc_class; | ||
234 | #endif | ||
235 | |||
234 | /* svc_rdma.c */ | 236 | /* svc_rdma.c */ |
235 | extern int svc_rdma_init(void); | 237 | extern int svc_rdma_init(void); |
236 | extern void svc_rdma_cleanup(void); | 238 | extern void svc_rdma_cleanup(void); |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 0fb9acbb4780..69ef5b3ab038 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
@@ -54,6 +54,8 @@ enum rpc_display_format_t { | |||
54 | struct rpc_task; | 54 | struct rpc_task; |
55 | struct rpc_xprt; | 55 | struct rpc_xprt; |
56 | struct seq_file; | 56 | struct seq_file; |
57 | struct svc_serv; | ||
58 | struct net; | ||
57 | 59 | ||
58 | /* | 60 | /* |
59 | * This describes a complete RPC request | 61 | * This describes a complete RPC request |
@@ -136,6 +138,12 @@ struct rpc_xprt_ops { | |||
136 | int (*enable_swap)(struct rpc_xprt *xprt); | 138 | int (*enable_swap)(struct rpc_xprt *xprt); |
137 | void (*disable_swap)(struct rpc_xprt *xprt); | 139 | void (*disable_swap)(struct rpc_xprt *xprt); |
138 | void (*inject_disconnect)(struct rpc_xprt *xprt); | 140 | void (*inject_disconnect)(struct rpc_xprt *xprt); |
141 | int (*bc_setup)(struct rpc_xprt *xprt, | ||
142 | unsigned int min_reqs); | ||
143 | int (*bc_up)(struct svc_serv *serv, struct net *net); | ||
144 | void (*bc_free_rqst)(struct rpc_rqst *rqst); | ||
145 | void (*bc_destroy)(struct rpc_xprt *xprt, | ||
146 | unsigned int max_reqs); | ||
139 | }; | 147 | }; |
140 | 148 | ||
141 | /* | 149 | /* |
@@ -153,6 +161,7 @@ enum xprt_transports { | |||
153 | XPRT_TRANSPORT_TCP = IPPROTO_TCP, | 161 | XPRT_TRANSPORT_TCP = IPPROTO_TCP, |
154 | XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC, | 162 | XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC, |
155 | XPRT_TRANSPORT_RDMA = 256, | 163 | XPRT_TRANSPORT_RDMA = 256, |
164 | XPRT_TRANSPORT_BC_RDMA = XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC, | ||
156 | XPRT_TRANSPORT_LOCAL = 257, | 165 | XPRT_TRANSPORT_LOCAL = 257, |
157 | }; | 166 | }; |
158 | 167 | ||
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 357e44c1a46b..0ece4ba06f06 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h | |||
@@ -44,6 +44,8 @@ struct sock_xprt { | |||
44 | */ | 44 | */ |
45 | unsigned long sock_state; | 45 | unsigned long sock_state; |
46 | struct delayed_work connect_worker; | 46 | struct delayed_work connect_worker; |
47 | struct work_struct recv_worker; | ||
48 | struct mutex recv_mutex; | ||
47 | struct sockaddr_storage srcaddr; | 49 | struct sockaddr_storage srcaddr; |
48 | unsigned short srcport; | 50 | unsigned short srcport; |
49 | 51 | ||
diff --git a/include/linux/sunxi-rsb.h b/include/linux/sunxi-rsb.h new file mode 100644 index 000000000000..7e75bb0346d0 --- /dev/null +++ b/include/linux/sunxi-rsb.h | |||
@@ -0,0 +1,105 @@ | |||
1 | /* | ||
2 | * Allwinner Reduced Serial Bus Driver | ||
3 | * | ||
4 | * Copyright (c) 2015 Chen-Yu Tsai | ||
5 | * | ||
6 | * Author: Chen-Yu Tsai <wens@csie.org> | ||
7 | * | ||
8 | * This file is licensed under the terms of the GNU General Public | ||
9 | * License version 2. This program is licensed "as is" without any | ||
10 | * warranty of any kind, whether express or implied. | ||
11 | */ | ||
12 | #ifndef _SUNXI_RSB_H | ||
13 | #define _SUNXI_RSB_H | ||
14 | |||
15 | #include <linux/device.h> | ||
16 | #include <linux/regmap.h> | ||
17 | #include <linux/types.h> | ||
18 | |||
19 | struct sunxi_rsb; | ||
20 | |||
21 | /** | ||
22 | * struct sunxi_rsb_device - Basic representation of an RSB device | ||
23 | * @dev: Driver model representation of the device. | ||
24 | * @ctrl: RSB controller managing the bus hosting this device. | ||
25 | * @rtaddr: This device's runtime address | ||
26 | * @hwaddr: This device's hardware address | ||
27 | */ | ||
28 | struct sunxi_rsb_device { | ||
29 | struct device dev; | ||
30 | struct sunxi_rsb *rsb; | ||
31 | int irq; | ||
32 | u8 rtaddr; | ||
33 | u16 hwaddr; | ||
34 | }; | ||
35 | |||
36 | static inline struct sunxi_rsb_device *to_sunxi_rsb_device(struct device *d) | ||
37 | { | ||
38 | return container_of(d, struct sunxi_rsb_device, dev); | ||
39 | } | ||
40 | |||
41 | static inline void *sunxi_rsb_device_get_drvdata(const struct sunxi_rsb_device *rdev) | ||
42 | { | ||
43 | return dev_get_drvdata(&rdev->dev); | ||
44 | } | ||
45 | |||
46 | static inline void sunxi_rsb_device_set_drvdata(struct sunxi_rsb_device *rdev, | ||
47 | void *data) | ||
48 | { | ||
49 | dev_set_drvdata(&rdev->dev, data); | ||
50 | } | ||
51 | |||
52 | /** | ||
53 | * struct sunxi_rsb_driver - RSB slave device driver | ||
54 | * @driver: RSB device drivers should initialize name and owner field of | ||
55 | * this structure. | ||
56 | * @probe: binds this driver to a RSB device. | ||
57 | * @remove: unbinds this driver from the RSB device. | ||
58 | */ | ||
59 | struct sunxi_rsb_driver { | ||
60 | struct device_driver driver; | ||
61 | int (*probe)(struct sunxi_rsb_device *rdev); | ||
62 | int (*remove)(struct sunxi_rsb_device *rdev); | ||
63 | }; | ||
64 | |||
65 | static inline struct sunxi_rsb_driver *to_sunxi_rsb_driver(struct device_driver *d) | ||
66 | { | ||
67 | return container_of(d, struct sunxi_rsb_driver, driver); | ||
68 | } | ||
69 | |||
70 | int sunxi_rsb_driver_register(struct sunxi_rsb_driver *rdrv); | ||
71 | |||
72 | /** | ||
73 | * sunxi_rsb_driver_unregister() - unregister an RSB client driver | ||
74 | * @rdrv: the driver to unregister | ||
75 | */ | ||
76 | static inline void sunxi_rsb_driver_unregister(struct sunxi_rsb_driver *rdrv) | ||
77 | { | ||
78 | if (rdrv) | ||
79 | driver_unregister(&rdrv->driver); | ||
80 | } | ||
81 | |||
82 | #define module_sunxi_rsb_driver(__sunxi_rsb_driver) \ | ||
83 | module_driver(__sunxi_rsb_driver, sunxi_rsb_driver_register, \ | ||
84 | sunxi_rsb_driver_unregister) | ||
85 | |||
86 | struct regmap *__devm_regmap_init_sunxi_rsb(struct sunxi_rsb_device *rdev, | ||
87 | const struct regmap_config *config, | ||
88 | struct lock_class_key *lock_key, | ||
89 | const char *lock_name); | ||
90 | |||
91 | /** | ||
92 | * devm_regmap_init_sunxi_rsb(): Initialise managed register map | ||
93 | * | ||
94 | * @rdev: Device that will be interacted with | ||
95 | * @config: Configuration for register map | ||
96 | * | ||
97 | * The return value will be an ERR_PTR() on error or a valid pointer | ||
98 | * to a struct regmap. The regmap will be automatically freed by the | ||
99 | * device management code. | ||
100 | */ | ||
101 | #define devm_regmap_init_sunxi_rsb(rdev, config) \ | ||
102 | __regmap_lockdep_wrapper(__devm_regmap_init_sunxi_rsb, #config, \ | ||
103 | rdev, config) | ||
104 | |||
105 | #endif /* _SUNXI_RSB_H */ | ||
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 5efe743ce1e8..8b6ec7ef0854 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -202,6 +202,36 @@ struct platform_freeze_ops { | |||
202 | extern void suspend_set_ops(const struct platform_suspend_ops *ops); | 202 | extern void suspend_set_ops(const struct platform_suspend_ops *ops); |
203 | extern int suspend_valid_only_mem(suspend_state_t state); | 203 | extern int suspend_valid_only_mem(suspend_state_t state); |
204 | 204 | ||
205 | extern unsigned int pm_suspend_global_flags; | ||
206 | |||
207 | #define PM_SUSPEND_FLAG_FW_SUSPEND (1 << 0) | ||
208 | #define PM_SUSPEND_FLAG_FW_RESUME (1 << 1) | ||
209 | |||
210 | static inline void pm_suspend_clear_flags(void) | ||
211 | { | ||
212 | pm_suspend_global_flags = 0; | ||
213 | } | ||
214 | |||
215 | static inline void pm_set_suspend_via_firmware(void) | ||
216 | { | ||
217 | pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_SUSPEND; | ||
218 | } | ||
219 | |||
220 | static inline void pm_set_resume_via_firmware(void) | ||
221 | { | ||
222 | pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_RESUME; | ||
223 | } | ||
224 | |||
225 | static inline bool pm_suspend_via_firmware(void) | ||
226 | { | ||
227 | return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_SUSPEND); | ||
228 | } | ||
229 | |||
230 | static inline bool pm_resume_via_firmware(void) | ||
231 | { | ||
232 | return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_RESUME); | ||
233 | } | ||
234 | |||
205 | /* Suspend-to-idle state machnine. */ | 235 | /* Suspend-to-idle state machnine. */ |
206 | enum freeze_state { | 236 | enum freeze_state { |
207 | FREEZE_STATE_NONE, /* Not suspended/suspending. */ | 237 | FREEZE_STATE_NONE, /* Not suspended/suspending. */ |
@@ -241,6 +271,12 @@ extern int pm_suspend(suspend_state_t state); | |||
241 | #else /* !CONFIG_SUSPEND */ | 271 | #else /* !CONFIG_SUSPEND */ |
242 | #define suspend_valid_only_mem NULL | 272 | #define suspend_valid_only_mem NULL |
243 | 273 | ||
274 | static inline void pm_suspend_clear_flags(void) {} | ||
275 | static inline void pm_set_suspend_via_firmware(void) {} | ||
276 | static inline void pm_set_resume_via_firmware(void) {} | ||
277 | static inline bool pm_suspend_via_firmware(void) { return false; } | ||
278 | static inline bool pm_resume_via_firmware(void) { return false; } | ||
279 | |||
244 | static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} | 280 | static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} |
245 | static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } | 281 | static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } |
246 | static inline bool idle_should_freeze(void) { return false; } | 282 | static inline bool idle_should_freeze(void) { return false; } |
@@ -387,10 +423,12 @@ extern int unregister_pm_notifier(struct notifier_block *nb); | |||
387 | 423 | ||
388 | /* drivers/base/power/wakeup.c */ | 424 | /* drivers/base/power/wakeup.c */ |
389 | extern bool events_check_enabled; | 425 | extern bool events_check_enabled; |
426 | extern unsigned int pm_wakeup_irq; | ||
390 | 427 | ||
391 | extern bool pm_wakeup_pending(void); | 428 | extern bool pm_wakeup_pending(void); |
392 | extern void pm_system_wakeup(void); | 429 | extern void pm_system_wakeup(void); |
393 | extern void pm_wakeup_clear(void); | 430 | extern void pm_wakeup_clear(void); |
431 | extern void pm_system_irq_wakeup(unsigned int irq_number); | ||
394 | extern bool pm_get_wakeup_count(unsigned int *count, bool block); | 432 | extern bool pm_get_wakeup_count(unsigned int *count, bool block); |
395 | extern bool pm_save_wakeup_count(unsigned int count); | 433 | extern bool pm_save_wakeup_count(unsigned int count); |
396 | extern void pm_wakep_autosleep_enabled(bool set); | 434 | extern void pm_wakep_autosleep_enabled(bool set); |
@@ -440,6 +478,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) | |||
440 | static inline bool pm_wakeup_pending(void) { return false; } | 478 | static inline bool pm_wakeup_pending(void) { return false; } |
441 | static inline void pm_system_wakeup(void) {} | 479 | static inline void pm_system_wakeup(void) {} |
442 | static inline void pm_wakeup_clear(void) {} | 480 | static inline void pm_wakeup_clear(void) {} |
481 | static inline void pm_system_irq_wakeup(unsigned int irq_number) {} | ||
443 | 482 | ||
444 | static inline void lock_system_sleep(void) {} | 483 | static inline void lock_system_sleep(void) {} |
445 | static inline void unlock_system_sleep(void) {} | 484 | static inline void unlock_system_sleep(void) {} |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a460e2ef2843..a156b82dd14c 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -887,4 +887,6 @@ asmlinkage long sys_execveat(int dfd, const char __user *filename, | |||
887 | 887 | ||
888 | asmlinkage long sys_membarrier(int cmd, int flags); | 888 | asmlinkage long sys_membarrier(int cmd, int flags); |
889 | 889 | ||
890 | asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags); | ||
891 | |||
890 | #endif | 892 | #endif |
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 9f65758311a4..c6f0f0d0e17e 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h | |||
@@ -64,10 +64,18 @@ do { \ | |||
64 | * a new subdirectory with this name. | 64 | * a new subdirectory with this name. |
65 | * @is_visible: Optional: Function to return permissions associated with an | 65 | * @is_visible: Optional: Function to return permissions associated with an |
66 | * attribute of the group. Will be called repeatedly for each | 66 | * attribute of the group. Will be called repeatedly for each |
67 | * attribute in the group. Only read/write permissions as well as | 67 | * non-binary attribute in the group. Only read/write |
68 | * SYSFS_PREALLOC are accepted. Must return 0 if an attribute is | 68 | * permissions as well as SYSFS_PREALLOC are accepted. Must |
69 | * not visible. The returned value will replace static permissions | 69 | * return 0 if an attribute is not visible. The returned value |
70 | * defined in struct attribute or struct bin_attribute. | 70 | * will replace static permissions defined in struct attribute. |
71 | * @is_bin_visible: | ||
72 | * Optional: Function to return permissions associated with a | ||
73 | * binary attribute of the group. Will be called repeatedly | ||
74 | * for each binary attribute in the group. Only read/write | ||
75 | * permissions as well as SYSFS_PREALLOC are accepted. Must | ||
76 | * return 0 if a binary attribute is not visible. The returned | ||
77 | * value will replace static permissions defined in | ||
78 | * struct bin_attribute. | ||
71 | * @attrs: Pointer to NULL terminated list of attributes. | 79 | * @attrs: Pointer to NULL terminated list of attributes. |
72 | * @bin_attrs: Pointer to NULL terminated list of binary attributes. | 80 | * @bin_attrs: Pointer to NULL terminated list of binary attributes. |
73 | * Either attrs or bin_attrs or both must be provided. | 81 | * Either attrs or bin_attrs or both must be provided. |
@@ -76,6 +84,8 @@ struct attribute_group { | |||
76 | const char *name; | 84 | const char *name; |
77 | umode_t (*is_visible)(struct kobject *, | 85 | umode_t (*is_visible)(struct kobject *, |
78 | struct attribute *, int); | 86 | struct attribute *, int); |
87 | umode_t (*is_bin_visible)(struct kobject *, | ||
88 | struct bin_attribute *, int); | ||
79 | struct attribute **attrs; | 89 | struct attribute **attrs; |
80 | struct bin_attribute **bin_attrs; | 90 | struct bin_attribute **bin_attrs; |
81 | }; | 91 | }; |
@@ -268,6 +278,9 @@ int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, | |||
268 | struct kobject *target, const char *link_name); | 278 | struct kobject *target, const char *link_name); |
269 | void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, | 279 | void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, |
270 | const char *link_name); | 280 | const char *link_name); |
281 | int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, | ||
282 | struct kobject *target_kobj, | ||
283 | const char *target_name); | ||
271 | 284 | ||
272 | void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); | 285 | void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); |
273 | 286 | ||
@@ -451,6 +464,14 @@ static inline void sysfs_remove_link_from_group(struct kobject *kobj, | |||
451 | { | 464 | { |
452 | } | 465 | } |
453 | 466 | ||
467 | static inline int __compat_only_sysfs_link_entry_to_kobj( | ||
468 | struct kobject *kobj, | ||
469 | struct kobject *target_kobj, | ||
470 | const char *target_name) | ||
471 | { | ||
472 | return 0; | ||
473 | } | ||
474 | |||
454 | static inline void sysfs_notify(struct kobject *kobj, const char *dir, | 475 | static inline void sysfs_notify(struct kobject *kobj, const char *dir, |
455 | const char *attr) | 476 | const char *attr) |
456 | { | 477 | { |
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index 6a8b9942632d..dd8de82cf5b5 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h | |||
@@ -14,9 +14,9 @@ struct t10_pi_tuple { | |||
14 | }; | 14 | }; |
15 | 15 | ||
16 | 16 | ||
17 | extern struct blk_integrity t10_pi_type1_crc; | 17 | extern struct blk_integrity_profile t10_pi_type1_crc; |
18 | extern struct blk_integrity t10_pi_type1_ip; | 18 | extern struct blk_integrity_profile t10_pi_type1_ip; |
19 | extern struct blk_integrity t10_pi_type3_crc; | 19 | extern struct blk_integrity_profile t10_pi_type3_crc; |
20 | extern struct blk_integrity t10_pi_type3_ip; | 20 | extern struct blk_integrity_profile t10_pi_type3_ip; |
21 | 21 | ||
22 | #endif | 22 | #endif |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 48c3696e8645..b386361ba3e8 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
@@ -112,10 +112,11 @@ struct tcp_request_sock_ops; | |||
112 | struct tcp_request_sock { | 112 | struct tcp_request_sock { |
113 | struct inet_request_sock req; | 113 | struct inet_request_sock req; |
114 | const struct tcp_request_sock_ops *af_specific; | 114 | const struct tcp_request_sock_ops *af_specific; |
115 | struct skb_mstamp snt_synack; /* first SYNACK sent time */ | ||
115 | bool tfo_listener; | 116 | bool tfo_listener; |
117 | u32 txhash; | ||
116 | u32 rcv_isn; | 118 | u32 rcv_isn; |
117 | u32 snt_isn; | 119 | u32 snt_isn; |
118 | u32 snt_synack; /* synack sent time */ | ||
119 | u32 last_oow_ack_time; /* last SYNACK */ | 120 | u32 last_oow_ack_time; /* last SYNACK */ |
120 | u32 rcv_nxt; /* the ack # by SYNACK. For | 121 | u32 rcv_nxt; /* the ack # by SYNACK. For |
121 | * FastOpen it's the seq# | 122 | * FastOpen it's the seq# |
@@ -193,6 +194,12 @@ struct tcp_sock { | |||
193 | u32 window_clamp; /* Maximal window to advertise */ | 194 | u32 window_clamp; /* Maximal window to advertise */ |
194 | u32 rcv_ssthresh; /* Current window clamp */ | 195 | u32 rcv_ssthresh; /* Current window clamp */ |
195 | 196 | ||
197 | /* Information of the most recently (s)acked skb */ | ||
198 | struct tcp_rack { | ||
199 | struct skb_mstamp mstamp; /* (Re)sent time of the skb */ | ||
200 | u8 advanced; /* mstamp advanced since last lost marking */ | ||
201 | u8 reord; /* reordering detected */ | ||
202 | } rack; | ||
196 | u16 advmss; /* Advertised MSS */ | 203 | u16 advmss; /* Advertised MSS */ |
197 | u8 unused; | 204 | u8 unused; |
198 | u8 nonagle : 4,/* Disable Nagle algorithm? */ | 205 | u8 nonagle : 4,/* Disable Nagle algorithm? */ |
@@ -216,6 +223,9 @@ struct tcp_sock { | |||
216 | u32 mdev_max_us; /* maximal mdev for the last rtt period */ | 223 | u32 mdev_max_us; /* maximal mdev for the last rtt period */ |
217 | u32 rttvar_us; /* smoothed mdev_max */ | 224 | u32 rttvar_us; /* smoothed mdev_max */ |
218 | u32 rtt_seq; /* sequence number to update rttvar */ | 225 | u32 rtt_seq; /* sequence number to update rttvar */ |
226 | struct rtt_meas { | ||
227 | u32 rtt, ts; /* RTT in usec and sampling time in jiffies. */ | ||
228 | } rtt_min[3]; | ||
219 | 229 | ||
220 | u32 packets_out; /* Packets which are "in flight" */ | 230 | u32 packets_out; /* Packets which are "in flight" */ |
221 | u32 retrans_out; /* Retransmitted packets out */ | 231 | u32 retrans_out; /* Retransmitted packets out */ |
@@ -279,8 +289,6 @@ struct tcp_sock { | |||
279 | int lost_cnt_hint; | 289 | int lost_cnt_hint; |
280 | u32 retransmit_high; /* L-bits may be on up to this seqno */ | 290 | u32 retransmit_high; /* L-bits may be on up to this seqno */ |
281 | 291 | ||
282 | u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ | ||
283 | |||
284 | u32 prior_ssthresh; /* ssthresh saved at recovery start */ | 292 | u32 prior_ssthresh; /* ssthresh saved at recovery start */ |
285 | u32 high_seq; /* snd_nxt at onset of congestion */ | 293 | u32 high_seq; /* snd_nxt at onset of congestion */ |
286 | 294 | ||
@@ -355,8 +363,8 @@ static inline struct tcp_sock *tcp_sk(const struct sock *sk) | |||
355 | 363 | ||
356 | struct tcp_timewait_sock { | 364 | struct tcp_timewait_sock { |
357 | struct inet_timewait_sock tw_sk; | 365 | struct inet_timewait_sock tw_sk; |
358 | u32 tw_rcv_nxt; | 366 | #define tw_rcv_nxt tw_sk.__tw_common.skc_tw_rcv_nxt |
359 | u32 tw_snd_nxt; | 367 | #define tw_snd_nxt tw_sk.__tw_common.skc_tw_snd_nxt |
360 | u32 tw_rcv_wnd; | 368 | u32 tw_rcv_wnd; |
361 | u32 tw_ts_offset; | 369 | u32 tw_ts_offset; |
362 | u32 tw_ts_recent; | 370 | u32 tw_ts_recent; |
@@ -381,25 +389,19 @@ static inline bool tcp_passive_fastopen(const struct sock *sk) | |||
381 | tcp_sk(sk)->fastopen_rsk != NULL); | 389 | tcp_sk(sk)->fastopen_rsk != NULL); |
382 | } | 390 | } |
383 | 391 | ||
384 | extern void tcp_sock_destruct(struct sock *sk); | 392 | static inline void fastopen_queue_tune(struct sock *sk, int backlog) |
393 | { | ||
394 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; | ||
395 | int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn); | ||
396 | |||
397 | queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn); | ||
398 | } | ||
385 | 399 | ||
386 | static inline int fastopen_init_queue(struct sock *sk, int backlog) | 400 | static inline void tcp_move_syn(struct tcp_sock *tp, |
401 | struct request_sock *req) | ||
387 | { | 402 | { |
388 | struct request_sock_queue *queue = | 403 | tp->saved_syn = req->saved_syn; |
389 | &inet_csk(sk)->icsk_accept_queue; | 404 | req->saved_syn = NULL; |
390 | |||
391 | if (queue->fastopenq == NULL) { | ||
392 | queue->fastopenq = kzalloc( | ||
393 | sizeof(struct fastopen_queue), | ||
394 | sk->sk_allocation); | ||
395 | if (queue->fastopenq == NULL) | ||
396 | return -ENOMEM; | ||
397 | |||
398 | sk->sk_destruct = tcp_sock_destruct; | ||
399 | spin_lock_init(&queue->fastopenq->lock); | ||
400 | } | ||
401 | queue->fastopenq->max_qlen = backlog; | ||
402 | return 0; | ||
403 | } | 405 | } |
404 | 406 | ||
405 | static inline void tcp_saved_syn_free(struct tcp_sock *tp) | 407 | static inline void tcp_saved_syn_free(struct tcp_sock *tp) |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 157d366e761b..4014a59828fc 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
@@ -44,9 +44,11 @@ | |||
44 | #define THERMAL_WEIGHT_DEFAULT 0 | 44 | #define THERMAL_WEIGHT_DEFAULT 0 |
45 | 45 | ||
46 | /* Unit conversion macros */ | 46 | /* Unit conversion macros */ |
47 | #define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \ | 47 | #define DECI_KELVIN_TO_CELSIUS(t) ({ \ |
48 | ((long)t-2732+5)/10 : ((long)t-2732-5)/10) | 48 | long _t = (t); \ |
49 | #define CELSIUS_TO_KELVIN(t) ((t)*10+2732) | 49 | ((_t-2732 >= 0) ? (_t-2732+5)/10 : (_t-2732-5)/10); \ |
50 | }) | ||
51 | #define CELSIUS_TO_DECI_KELVIN(t) ((t)*10+2732) | ||
50 | #define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100) | 52 | #define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100) |
51 | #define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732) | 53 | #define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732) |
52 | #define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off)) | 54 | #define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off)) |
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h index d4217eff489f..0a0d56834c8e 100644 --- a/include/linux/ti_wilink_st.h +++ b/include/linux/ti_wilink_st.h | |||
@@ -158,6 +158,7 @@ struct st_data_s { | |||
158 | unsigned long ll_state; | 158 | unsigned long ll_state; |
159 | void *kim_data; | 159 | void *kim_data; |
160 | struct tty_struct *tty; | 160 | struct tty_struct *tty; |
161 | struct work_struct work_write_wakeup; | ||
161 | }; | 162 | }; |
162 | 163 | ||
163 | /* | 164 | /* |
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index ba0ae09cbb21..ec89d846324c 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h | |||
@@ -263,8 +263,8 @@ extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); | |||
263 | /* | 263 | /* |
264 | * PPS accessor | 264 | * PPS accessor |
265 | */ | 265 | */ |
266 | extern void getnstime_raw_and_real(struct timespec *ts_raw, | 266 | extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, |
267 | struct timespec *ts_real); | 267 | struct timespec64 *ts_real); |
268 | 268 | ||
269 | /* | 269 | /* |
270 | * Persistent clock related interfaces | 270 | * Persistent clock related interfaces |
diff --git a/include/linux/timex.h b/include/linux/timex.h index 9d3f1a5b6178..39c25dbebfe8 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h | |||
@@ -152,7 +152,7 @@ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ | |||
152 | #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) | 152 | #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) |
153 | 153 | ||
154 | extern int do_adjtimex(struct timex *); | 154 | extern int do_adjtimex(struct timex *); |
155 | extern void hardpps(const struct timespec *, const struct timespec *); | 155 | extern void hardpps(const struct timespec64 *, const struct timespec64 *); |
156 | 156 | ||
157 | int read_current_timer(unsigned long *timer_val); | 157 | int read_current_timer(unsigned long *timer_val); |
158 | void ntp_notify_cmos_timer(void); | 158 | void ntp_notify_cmos_timer(void); |
diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 8350c538b486..706e63eea080 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h | |||
@@ -30,6 +30,8 @@ | |||
30 | #define TPM_ANY_NUM 0xFFFF | 30 | #define TPM_ANY_NUM 0xFFFF |
31 | 31 | ||
32 | struct tpm_chip; | 32 | struct tpm_chip; |
33 | struct trusted_key_payload; | ||
34 | struct trusted_key_options; | ||
33 | 35 | ||
34 | struct tpm_class_ops { | 36 | struct tpm_class_ops { |
35 | const u8 req_complete_mask; | 37 | const u8 req_complete_mask; |
@@ -46,11 +48,22 @@ struct tpm_class_ops { | |||
46 | 48 | ||
47 | #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) | 49 | #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) |
48 | 50 | ||
51 | extern int tpm_is_tpm2(u32 chip_num); | ||
49 | extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); | 52 | extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); |
50 | extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); | 53 | extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); |
51 | extern int tpm_send(u32 chip_num, void *cmd, size_t buflen); | 54 | extern int tpm_send(u32 chip_num, void *cmd, size_t buflen); |
52 | extern int tpm_get_random(u32 chip_num, u8 *data, size_t max); | 55 | extern int tpm_get_random(u32 chip_num, u8 *data, size_t max); |
56 | extern int tpm_seal_trusted(u32 chip_num, | ||
57 | struct trusted_key_payload *payload, | ||
58 | struct trusted_key_options *options); | ||
59 | extern int tpm_unseal_trusted(u32 chip_num, | ||
60 | struct trusted_key_payload *payload, | ||
61 | struct trusted_key_options *options); | ||
53 | #else | 62 | #else |
63 | static inline int tpm_is_tpm2(u32 chip_num) | ||
64 | { | ||
65 | return -ENODEV; | ||
66 | } | ||
54 | static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) { | 67 | static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) { |
55 | return -ENODEV; | 68 | return -ENODEV; |
56 | } | 69 | } |
@@ -63,5 +76,18 @@ static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) { | |||
63 | static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) { | 76 | static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) { |
64 | return -ENODEV; | 77 | return -ENODEV; |
65 | } | 78 | } |
79 | |||
80 | static inline int tpm_seal_trusted(u32 chip_num, | ||
81 | struct trusted_key_payload *payload, | ||
82 | struct trusted_key_options *options) | ||
83 | { | ||
84 | return -ENODEV; | ||
85 | } | ||
86 | static inline int tpm_unseal_trusted(u32 chip_num, | ||
87 | struct trusted_key_payload *payload, | ||
88 | struct trusted_key_options *options) | ||
89 | { | ||
90 | return -ENODEV; | ||
91 | } | ||
66 | #endif | 92 | #endif |
67 | #endif | 93 | #endif |
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index ed27917cabc9..429fdfc3baf5 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h | |||
@@ -168,13 +168,12 @@ struct ring_buffer_event * | |||
168 | trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, | 168 | trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, |
169 | int type, unsigned long len, | 169 | int type, unsigned long len, |
170 | unsigned long flags, int pc); | 170 | unsigned long flags, int pc); |
171 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, | 171 | void trace_buffer_unlock_commit(struct trace_array *tr, |
172 | struct ring_buffer_event *event, | 172 | struct ring_buffer *buffer, |
173 | unsigned long flags, int pc); | ||
174 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, | ||
175 | struct ring_buffer_event *event, | 173 | struct ring_buffer_event *event, |
176 | unsigned long flags, int pc); | 174 | unsigned long flags, int pc); |
177 | void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, | 175 | void trace_buffer_unlock_commit_regs(struct trace_array *tr, |
176 | struct ring_buffer *buffer, | ||
178 | struct ring_buffer_event *event, | 177 | struct ring_buffer_event *event, |
179 | unsigned long flags, int pc, | 178 | unsigned long flags, int pc, |
180 | struct pt_regs *regs); | 179 | struct pt_regs *regs); |
@@ -329,6 +328,7 @@ enum { | |||
329 | EVENT_FILE_FL_SOFT_DISABLED_BIT, | 328 | EVENT_FILE_FL_SOFT_DISABLED_BIT, |
330 | EVENT_FILE_FL_TRIGGER_MODE_BIT, | 329 | EVENT_FILE_FL_TRIGGER_MODE_BIT, |
331 | EVENT_FILE_FL_TRIGGER_COND_BIT, | 330 | EVENT_FILE_FL_TRIGGER_COND_BIT, |
331 | EVENT_FILE_FL_PID_FILTER_BIT, | ||
332 | }; | 332 | }; |
333 | 333 | ||
334 | /* | 334 | /* |
@@ -342,6 +342,7 @@ enum { | |||
342 | * tracepoint may be enabled) | 342 | * tracepoint may be enabled) |
343 | * TRIGGER_MODE - When set, invoke the triggers associated with the event | 343 | * TRIGGER_MODE - When set, invoke the triggers associated with the event |
344 | * TRIGGER_COND - When set, one or more triggers has an associated filter | 344 | * TRIGGER_COND - When set, one or more triggers has an associated filter |
345 | * PID_FILTER - When set, the event is filtered based on pid | ||
345 | */ | 346 | */ |
346 | enum { | 347 | enum { |
347 | EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), | 348 | EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), |
@@ -352,6 +353,7 @@ enum { | |||
352 | EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT), | 353 | EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT), |
353 | EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), | 354 | EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), |
354 | EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), | 355 | EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), |
356 | EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT), | ||
355 | }; | 357 | }; |
356 | 358 | ||
357 | struct trace_event_file { | 359 | struct trace_event_file { |
@@ -430,6 +432,8 @@ extern enum event_trigger_type event_triggers_call(struct trace_event_file *file | |||
430 | extern void event_triggers_post_call(struct trace_event_file *file, | 432 | extern void event_triggers_post_call(struct trace_event_file *file, |
431 | enum event_trigger_type tt); | 433 | enum event_trigger_type tt); |
432 | 434 | ||
435 | bool trace_event_ignore_this_pid(struct trace_event_file *trace_file); | ||
436 | |||
433 | /** | 437 | /** |
434 | * trace_trigger_soft_disabled - do triggers and test if soft disabled | 438 | * trace_trigger_soft_disabled - do triggers and test if soft disabled |
435 | * @file: The file pointer of the event to test | 439 | * @file: The file pointer of the event to test |
@@ -449,6 +453,8 @@ trace_trigger_soft_disabled(struct trace_event_file *file) | |||
449 | event_triggers_call(file, NULL); | 453 | event_triggers_call(file, NULL); |
450 | if (eflags & EVENT_FILE_FL_SOFT_DISABLED) | 454 | if (eflags & EVENT_FILE_FL_SOFT_DISABLED) |
451 | return true; | 455 | return true; |
456 | if (eflags & EVENT_FILE_FL_PID_FILTER) | ||
457 | return trace_event_ignore_this_pid(file); | ||
452 | } | 458 | } |
453 | return false; | 459 | return false; |
454 | } | 460 | } |
@@ -508,7 +514,7 @@ event_trigger_unlock_commit(struct trace_event_file *file, | |||
508 | enum event_trigger_type tt = ETT_NONE; | 514 | enum event_trigger_type tt = ETT_NONE; |
509 | 515 | ||
510 | if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) | 516 | if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) |
511 | trace_buffer_unlock_commit(buffer, event, irq_flags, pc); | 517 | trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); |
512 | 518 | ||
513 | if (tt) | 519 | if (tt) |
514 | event_triggers_post_call(file, tt); | 520 | event_triggers_post_call(file, tt); |
@@ -540,7 +546,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file, | |||
540 | enum event_trigger_type tt = ETT_NONE; | 546 | enum event_trigger_type tt = ETT_NONE; |
541 | 547 | ||
542 | if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) | 548 | if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) |
543 | trace_buffer_unlock_commit_regs(buffer, event, | 549 | trace_buffer_unlock_commit_regs(file->tr, buffer, event, |
544 | irq_flags, pc, regs); | 550 | irq_flags, pc, regs); |
545 | 551 | ||
546 | if (tt) | 552 | if (tt) |
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 84d497297c5f..26c152122a42 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/ptrace.h> | 50 | #include <linux/ptrace.h> |
51 | #include <linux/security.h> | 51 | #include <linux/security.h> |
52 | #include <linux/task_work.h> | 52 | #include <linux/task_work.h> |
53 | #include <linux/memcontrol.h> | ||
53 | struct linux_binprm; | 54 | struct linux_binprm; |
54 | 55 | ||
55 | /* | 56 | /* |
@@ -188,6 +189,8 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) | |||
188 | smp_mb__after_atomic(); | 189 | smp_mb__after_atomic(); |
189 | if (unlikely(current->task_works)) | 190 | if (unlikely(current->task_works)) |
190 | task_work_run(); | 191 | task_work_run(); |
192 | |||
193 | mem_cgroup_handle_over_high(); | ||
191 | } | 194 | } |
192 | 195 | ||
193 | #endif /* <linux/tracehook.h> */ | 196 | #endif /* <linux/tracehook.h> */ |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index a5f7f3ecafa3..696a339c592c 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -26,6 +26,7 @@ struct notifier_block; | |||
26 | struct tracepoint_func { | 26 | struct tracepoint_func { |
27 | void *func; | 27 | void *func; |
28 | void *data; | 28 | void *data; |
29 | int prio; | ||
29 | }; | 30 | }; |
30 | 31 | ||
31 | struct tracepoint { | 32 | struct tracepoint { |
@@ -42,9 +43,14 @@ struct trace_enum_map { | |||
42 | unsigned long enum_value; | 43 | unsigned long enum_value; |
43 | }; | 44 | }; |
44 | 45 | ||
46 | #define TRACEPOINT_DEFAULT_PRIO 10 | ||
47 | |||
45 | extern int | 48 | extern int |
46 | tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data); | 49 | tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data); |
47 | extern int | 50 | extern int |
51 | tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data, | ||
52 | int prio); | ||
53 | extern int | ||
48 | tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data); | 54 | tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data); |
49 | extern void | 55 | extern void |
50 | for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), | 56 | for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), |
@@ -111,7 +117,18 @@ extern void syscall_unregfunc(void); | |||
111 | #define TP_ARGS(args...) args | 117 | #define TP_ARGS(args...) args |
112 | #define TP_CONDITION(args...) args | 118 | #define TP_CONDITION(args...) args |
113 | 119 | ||
114 | #ifdef CONFIG_TRACEPOINTS | 120 | /* |
121 | * Individual subsystem my have a separate configuration to | ||
122 | * enable their tracepoints. By default, this file will create | ||
123 | * the tracepoints if CONFIG_TRACEPOINT is defined. If a subsystem | ||
124 | * wants to be able to disable its tracepoints from being created | ||
125 | * it can define NOTRACE before including the tracepoint headers. | ||
126 | */ | ||
127 | #if defined(CONFIG_TRACEPOINTS) && !defined(NOTRACE) | ||
128 | #define TRACEPOINTS_ENABLED | ||
129 | #endif | ||
130 | |||
131 | #ifdef TRACEPOINTS_ENABLED | ||
115 | 132 | ||
116 | /* | 133 | /* |
117 | * it_func[0] is never NULL because there is at least one element in the array | 134 | * it_func[0] is never NULL because there is at least one element in the array |
@@ -167,10 +184,11 @@ extern void syscall_unregfunc(void); | |||
167 | * structure. Force alignment to the same alignment as the section start. | 184 | * structure. Force alignment to the same alignment as the section start. |
168 | * | 185 | * |
169 | * When lockdep is enabled, we make sure to always do the RCU portions of | 186 | * When lockdep is enabled, we make sure to always do the RCU portions of |
170 | * the tracepoint code, regardless of whether tracing is on or we match the | 187 | * the tracepoint code, regardless of whether tracing is on. However, |
171 | * condition. This lets us find RCU issues triggered with tracepoints even | 188 | * don't check if the condition is false, due to interaction with idle |
172 | * when this tracepoint is off. This code has no purpose other than poking | 189 | * instrumentation. This lets us find RCU issues triggered with tracepoints |
173 | * RCU a bit. | 190 | * even when this tracepoint is off. This code has no purpose other than |
191 | * poking RCU a bit. | ||
174 | */ | 192 | */ |
175 | #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ | 193 | #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ |
176 | extern struct tracepoint __tracepoint_##name; \ | 194 | extern struct tracepoint __tracepoint_##name; \ |
@@ -196,6 +214,13 @@ extern void syscall_unregfunc(void); | |||
196 | (void *)probe, data); \ | 214 | (void *)probe, data); \ |
197 | } \ | 215 | } \ |
198 | static inline int \ | 216 | static inline int \ |
217 | register_trace_prio_##name(void (*probe)(data_proto), void *data,\ | ||
218 | int prio) \ | ||
219 | { \ | ||
220 | return tracepoint_probe_register_prio(&__tracepoint_##name, \ | ||
221 | (void *)probe, data, prio); \ | ||
222 | } \ | ||
223 | static inline int \ | ||
199 | unregister_trace_##name(void (*probe)(data_proto), void *data) \ | 224 | unregister_trace_##name(void (*probe)(data_proto), void *data) \ |
200 | { \ | 225 | { \ |
201 | return tracepoint_probe_unregister(&__tracepoint_##name,\ | 226 | return tracepoint_probe_unregister(&__tracepoint_##name,\ |
@@ -234,7 +259,7 @@ extern void syscall_unregfunc(void); | |||
234 | #define EXPORT_TRACEPOINT_SYMBOL(name) \ | 259 | #define EXPORT_TRACEPOINT_SYMBOL(name) \ |
235 | EXPORT_SYMBOL(__tracepoint_##name) | 260 | EXPORT_SYMBOL(__tracepoint_##name) |
236 | 261 | ||
237 | #else /* !CONFIG_TRACEPOINTS */ | 262 | #else /* !TRACEPOINTS_ENABLED */ |
238 | #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ | 263 | #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ |
239 | static inline void trace_##name(proto) \ | 264 | static inline void trace_##name(proto) \ |
240 | { } \ | 265 | { } \ |
@@ -266,7 +291,7 @@ extern void syscall_unregfunc(void); | |||
266 | #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) | 291 | #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) |
267 | #define EXPORT_TRACEPOINT_SYMBOL(name) | 292 | #define EXPORT_TRACEPOINT_SYMBOL(name) |
268 | 293 | ||
269 | #endif /* CONFIG_TRACEPOINTS */ | 294 | #endif /* TRACEPOINTS_ENABLED */ |
270 | 295 | ||
271 | #ifdef CONFIG_TRACING | 296 | #ifdef CONFIG_TRACING |
272 | /** | 297 | /** |
diff --git a/include/linux/tty.h b/include/linux/tty.h index d072ded41678..5e31f1b99037 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -227,7 +227,6 @@ struct tty_port { | |||
227 | int blocked_open; /* Waiting to open */ | 227 | int blocked_open; /* Waiting to open */ |
228 | int count; /* Usage count */ | 228 | int count; /* Usage count */ |
229 | wait_queue_head_t open_wait; /* Open waiters */ | 229 | wait_queue_head_t open_wait; /* Open waiters */ |
230 | wait_queue_head_t close_wait; /* Close waiters */ | ||
231 | wait_queue_head_t delta_msr_wait; /* Modem status change */ | 230 | wait_queue_head_t delta_msr_wait; /* Modem status change */ |
232 | unsigned long flags; /* TTY flags ASY_*/ | 231 | unsigned long flags; /* TTY flags ASY_*/ |
233 | unsigned char console:1, /* port is a console */ | 232 | unsigned char console:1, /* port is a console */ |
@@ -424,6 +423,7 @@ extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, | |||
424 | const char *routine); | 423 | const char *routine); |
425 | extern const char *tty_name(const struct tty_struct *tty); | 424 | extern const char *tty_name(const struct tty_struct *tty); |
426 | extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); | 425 | extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); |
426 | extern int __tty_check_change(struct tty_struct *tty, int sig); | ||
427 | extern int tty_check_change(struct tty_struct *tty); | 427 | extern int tty_check_change(struct tty_struct *tty); |
428 | extern void __stop_tty(struct tty_struct *tty); | 428 | extern void __stop_tty(struct tty_struct *tty); |
429 | extern void stop_tty(struct tty_struct *tty); | 429 | extern void stop_tty(struct tty_struct *tty); |
@@ -467,6 +467,8 @@ extern void tty_buffer_free_all(struct tty_port *port); | |||
467 | extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld); | 467 | extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld); |
468 | extern void tty_buffer_init(struct tty_port *port); | 468 | extern void tty_buffer_init(struct tty_port *port); |
469 | extern void tty_buffer_set_lock_subclass(struct tty_port *port); | 469 | extern void tty_buffer_set_lock_subclass(struct tty_port *port); |
470 | extern bool tty_buffer_restart_work(struct tty_port *port); | ||
471 | extern bool tty_buffer_cancel_work(struct tty_port *port); | ||
470 | extern speed_t tty_termios_baud_rate(struct ktermios *termios); | 472 | extern speed_t tty_termios_baud_rate(struct ktermios *termios); |
471 | extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); | 473 | extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); |
472 | extern void tty_termios_encode_baud_rate(struct ktermios *termios, | 474 | extern void tty_termios_encode_baud_rate(struct ktermios *termios, |
@@ -605,7 +607,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); | |||
605 | 607 | ||
606 | /* tty_audit.c */ | 608 | /* tty_audit.c */ |
607 | #ifdef CONFIG_AUDIT | 609 | #ifdef CONFIG_AUDIT |
608 | extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, | 610 | extern void tty_audit_add_data(struct tty_struct *tty, const void *data, |
609 | size_t size, unsigned icanon); | 611 | size_t size, unsigned icanon); |
610 | extern void tty_audit_exit(void); | 612 | extern void tty_audit_exit(void); |
611 | extern void tty_audit_fork(struct signal_struct *sig); | 613 | extern void tty_audit_fork(struct signal_struct *sig); |
@@ -613,8 +615,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); | |||
613 | extern void tty_audit_push(struct tty_struct *tty); | 615 | extern void tty_audit_push(struct tty_struct *tty); |
614 | extern int tty_audit_push_current(void); | 616 | extern int tty_audit_push_current(void); |
615 | #else | 617 | #else |
616 | static inline void tty_audit_add_data(struct tty_struct *tty, | 618 | static inline void tty_audit_add_data(struct tty_struct *tty, const void *data, |
617 | unsigned char *data, size_t size, unsigned icanon) | 619 | size_t size, unsigned icanon) |
618 | { | 620 | { |
619 | } | 621 | } |
620 | static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) | 622 | static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) |
@@ -656,50 +658,6 @@ extern void __lockfunc tty_unlock(struct tty_struct *tty); | |||
656 | extern void __lockfunc tty_lock_slave(struct tty_struct *tty); | 658 | extern void __lockfunc tty_lock_slave(struct tty_struct *tty); |
657 | extern void __lockfunc tty_unlock_slave(struct tty_struct *tty); | 659 | extern void __lockfunc tty_unlock_slave(struct tty_struct *tty); |
658 | extern void tty_set_lock_subclass(struct tty_struct *tty); | 660 | extern void tty_set_lock_subclass(struct tty_struct *tty); |
659 | /* | ||
660 | * this shall be called only from where BTM is held (like close) | ||
661 | * | ||
662 | * We need this to ensure nobody waits for us to finish while we are waiting. | ||
663 | * Without this we were encountering system stalls. | ||
664 | * | ||
665 | * This should be indeed removed with BTM removal later. | ||
666 | * | ||
667 | * Locking: BTM required. Nobody is allowed to hold port->mutex. | ||
668 | */ | ||
669 | static inline void tty_wait_until_sent_from_close(struct tty_struct *tty, | ||
670 | long timeout) | ||
671 | { | ||
672 | tty_unlock(tty); /* tty->ops->close holds the BTM, drop it while waiting */ | ||
673 | tty_wait_until_sent(tty, timeout); | ||
674 | tty_lock(tty); | ||
675 | } | ||
676 | |||
677 | /* | ||
678 | * wait_event_interruptible_tty -- wait for a condition with the tty lock held | ||
679 | * | ||
680 | * The condition we are waiting for might take a long time to | ||
681 | * become true, or might depend on another thread taking the | ||
682 | * BTM. In either case, we need to drop the BTM to guarantee | ||
683 | * forward progress. This is a leftover from the conversion | ||
684 | * from the BKL and should eventually get removed as the BTM | ||
685 | * falls out of use. | ||
686 | * | ||
687 | * Do not use in new code. | ||
688 | */ | ||
689 | #define wait_event_interruptible_tty(tty, wq, condition) \ | ||
690 | ({ \ | ||
691 | int __ret = 0; \ | ||
692 | if (!(condition)) \ | ||
693 | __ret = __wait_event_interruptible_tty(tty, wq, \ | ||
694 | condition); \ | ||
695 | __ret; \ | ||
696 | }) | ||
697 | |||
698 | #define __wait_event_interruptible_tty(tty, wq, condition) \ | ||
699 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ | ||
700 | tty_unlock(tty); \ | ||
701 | schedule(); \ | ||
702 | tty_lock(tty)) | ||
703 | 661 | ||
704 | #ifdef CONFIG_PROC_FS | 662 | #ifdef CONFIG_PROC_FS |
705 | extern void proc_tty_register_driver(struct tty_driver *); | 663 | extern void proc_tty_register_driver(struct tty_driver *); |
diff --git a/include/linux/types.h b/include/linux/types.h index c314989d9158..70d8500bddf1 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
@@ -205,11 +205,25 @@ struct ustat { | |||
205 | * struct callback_head - callback structure for use with RCU and task_work | 205 | * struct callback_head - callback structure for use with RCU and task_work |
206 | * @next: next update requests in a list | 206 | * @next: next update requests in a list |
207 | * @func: actual update function to call after the grace period. | 207 | * @func: actual update function to call after the grace period. |
208 | * | ||
209 | * The struct is aligned to size of pointer. On most architectures it happens | ||
210 | * naturally due ABI requirements, but some architectures (like CRIS) have | ||
211 | * weird ABI and we need to ask it explicitly. | ||
212 | * | ||
213 | * The alignment is required to guarantee that bits 0 and 1 of @next will be | ||
214 | * clear under normal conditions -- as long as we use call_rcu(), | ||
215 | * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback. | ||
216 | * | ||
217 | * This guarantee is important for few reasons: | ||
218 | * - future call_rcu_lazy() will make use of lower bits in the pointer; | ||
219 | * - the structure shares storage spacer in struct page with @compound_head, | ||
220 | * which encode PageTail() in bit 0. The guarantee is needed to avoid | ||
221 | * false-positive PageTail(). | ||
208 | */ | 222 | */ |
209 | struct callback_head { | 223 | struct callback_head { |
210 | struct callback_head *next; | 224 | struct callback_head *next; |
211 | void (*func)(struct callback_head *head); | 225 | void (*func)(struct callback_head *head); |
212 | }; | 226 | } __attribute__((aligned(sizeof(void *)))); |
213 | #define rcu_head callback_head | 227 | #define rcu_head callback_head |
214 | 228 | ||
215 | typedef void (*rcu_callback_t)(struct rcu_head *head); | 229 | typedef void (*rcu_callback_t)(struct rcu_head *head); |
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index d6f2c2c5b043..558129af828a 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h | |||
@@ -75,36 +75,6 @@ static inline unsigned long __copy_from_user_nocache(void *to, | |||
75 | 75 | ||
76 | #endif /* ARCH_HAS_NOCACHE_UACCESS */ | 76 | #endif /* ARCH_HAS_NOCACHE_UACCESS */ |
77 | 77 | ||
78 | /** | ||
79 | * probe_kernel_address(): safely attempt to read from a location | ||
80 | * @addr: address to read from - its type is type typeof(retval)* | ||
81 | * @retval: read into this variable | ||
82 | * | ||
83 | * Safely read from address @addr into variable @revtal. If a kernel fault | ||
84 | * happens, handle that and return -EFAULT. | ||
85 | * We ensure that the __get_user() is executed in atomic context so that | ||
86 | * do_page_fault() doesn't attempt to take mmap_sem. This makes | ||
87 | * probe_kernel_address() suitable for use within regions where the caller | ||
88 | * already holds mmap_sem, or other locks which nest inside mmap_sem. | ||
89 | * This must be a macro because __get_user() needs to know the types of the | ||
90 | * args. | ||
91 | * | ||
92 | * We don't include enough header files to be able to do the set_fs(). We | ||
93 | * require that the probe_kernel_address() caller will do that. | ||
94 | */ | ||
95 | #define probe_kernel_address(addr, retval) \ | ||
96 | ({ \ | ||
97 | long ret; \ | ||
98 | mm_segment_t old_fs = get_fs(); \ | ||
99 | \ | ||
100 | set_fs(KERNEL_DS); \ | ||
101 | pagefault_disable(); \ | ||
102 | ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ | ||
103 | pagefault_enable(); \ | ||
104 | set_fs(old_fs); \ | ||
105 | ret; \ | ||
106 | }) | ||
107 | |||
108 | /* | 78 | /* |
109 | * probe_kernel_read(): safely attempt to read from a location | 79 | * probe_kernel_read(): safely attempt to read from a location |
110 | * @dst: pointer to the buffer that shall take the data | 80 | * @dst: pointer to the buffer that shall take the data |
@@ -131,4 +101,14 @@ extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size | |||
131 | 101 | ||
132 | extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); | 102 | extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); |
133 | 103 | ||
104 | /** | ||
105 | * probe_kernel_address(): safely attempt to read from a location | ||
106 | * @addr: address to read from | ||
107 | * @retval: read into this variable | ||
108 | * | ||
109 | * Returns 0 on success, or -EFAULT. | ||
110 | */ | ||
111 | #define probe_kernel_address(addr, retval) \ | ||
112 | probe_kernel_read(&retval, addr, sizeof(retval)) | ||
113 | |||
134 | #endif /* __LINUX_UACCESS_H__ */ | 114 | #endif /* __LINUX_UACCESS_H__ */ |
diff --git a/include/linux/usb.h b/include/linux/usb.h index 447fe29b55b4..b9a28074210f 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
@@ -122,6 +122,8 @@ enum usb_interface_condition { | |||
122 | * has been deferred. | 122 | * has been deferred. |
123 | * @needs_binding: flag set when the driver should be re-probed or unbound | 123 | * @needs_binding: flag set when the driver should be re-probed or unbound |
124 | * following a reset or suspend operation it doesn't support. | 124 | * following a reset or suspend operation it doesn't support. |
125 | * @authorized: This allows to (de)authorize individual interfaces instead | ||
126 | * a whole device in contrast to the device authorization. | ||
125 | * @dev: driver model's view of this device | 127 | * @dev: driver model's view of this device |
126 | * @usb_dev: if an interface is bound to the USB major, this will point | 128 | * @usb_dev: if an interface is bound to the USB major, this will point |
127 | * to the sysfs representation for that device. | 129 | * to the sysfs representation for that device. |
@@ -178,6 +180,7 @@ struct usb_interface { | |||
178 | unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ | 180 | unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ |
179 | unsigned needs_binding:1; /* needs delayed unbind/rebind */ | 181 | unsigned needs_binding:1; /* needs delayed unbind/rebind */ |
180 | unsigned resetting_device:1; /* true: bandwidth alloc after reset */ | 182 | unsigned resetting_device:1; /* true: bandwidth alloc after reset */ |
183 | unsigned authorized:1; /* used for interface authorization */ | ||
181 | 184 | ||
182 | struct device dev; /* interface specific device info */ | 185 | struct device dev; /* interface specific device info */ |
183 | struct device *usb_dev; | 186 | struct device *usb_dev; |
@@ -325,6 +328,7 @@ struct usb_host_bos { | |||
325 | /* wireless cap descriptor is handled by wusb */ | 328 | /* wireless cap descriptor is handled by wusb */ |
326 | struct usb_ext_cap_descriptor *ext_cap; | 329 | struct usb_ext_cap_descriptor *ext_cap; |
327 | struct usb_ss_cap_descriptor *ss_cap; | 330 | struct usb_ss_cap_descriptor *ss_cap; |
331 | struct usb_ssp_cap_descriptor *ssp_cap; | ||
328 | struct usb_ss_container_id_descriptor *ss_id; | 332 | struct usb_ss_container_id_descriptor *ss_id; |
329 | }; | 333 | }; |
330 | 334 | ||
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h new file mode 100644 index 000000000000..b5706f94ee9e --- /dev/null +++ b/include/linux/usb/cdc.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * USB CDC common helpers | ||
3 | * | ||
4 | * Copyright (c) 2015 Oliver Neukum <oneukum@suse.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * version 2 as published by the Free Software Foundation. | ||
9 | */ | ||
10 | #ifndef __LINUX_USB_CDC_H | ||
11 | #define __LINUX_USB_CDC_H | ||
12 | |||
13 | #include <uapi/linux/usb/cdc.h> | ||
14 | |||
15 | /* | ||
16 | * inofficial magic numbers | ||
17 | */ | ||
18 | |||
19 | #define CDC_PHONET_MAGIC_NUMBER 0xAB | ||
20 | |||
21 | /* | ||
22 | * parsing CDC headers | ||
23 | */ | ||
24 | |||
25 | struct usb_cdc_parsed_header { | ||
26 | struct usb_cdc_union_desc *usb_cdc_union_desc; | ||
27 | struct usb_cdc_header_desc *usb_cdc_header_desc; | ||
28 | |||
29 | struct usb_cdc_call_mgmt_descriptor *usb_cdc_call_mgmt_descriptor; | ||
30 | struct usb_cdc_acm_descriptor *usb_cdc_acm_descriptor; | ||
31 | struct usb_cdc_country_functional_desc *usb_cdc_country_functional_desc; | ||
32 | struct usb_cdc_network_terminal_desc *usb_cdc_network_terminal_desc; | ||
33 | struct usb_cdc_ether_desc *usb_cdc_ether_desc; | ||
34 | struct usb_cdc_dmm_desc *usb_cdc_dmm_desc; | ||
35 | struct usb_cdc_mdlm_desc *usb_cdc_mdlm_desc; | ||
36 | struct usb_cdc_mdlm_detail_desc *usb_cdc_mdlm_detail_desc; | ||
37 | struct usb_cdc_obex_desc *usb_cdc_obex_desc; | ||
38 | struct usb_cdc_ncm_desc *usb_cdc_ncm_desc; | ||
39 | struct usb_cdc_mbim_desc *usb_cdc_mbim_desc; | ||
40 | struct usb_cdc_mbim_extended_desc *usb_cdc_mbim_extended_desc; | ||
41 | |||
42 | bool phonet_magic_present; | ||
43 | }; | ||
44 | |||
45 | struct usb_interface; | ||
46 | int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, | ||
47 | struct usb_interface *intf, | ||
48 | u8 *buffer, | ||
49 | int buflen); | ||
50 | |||
51 | #endif /* __LINUX_USB_CDC_H */ | ||
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index 27603bcbb9b9..6cc96bb12ddc 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h | |||
@@ -32,9 +32,9 @@ | |||
32 | #ifndef __LINUX_USB_CH9_H | 32 | #ifndef __LINUX_USB_CH9_H |
33 | #define __LINUX_USB_CH9_H | 33 | #define __LINUX_USB_CH9_H |
34 | 34 | ||
35 | #include <linux/device.h> | ||
35 | #include <uapi/linux/usb/ch9.h> | 36 | #include <uapi/linux/usb/ch9.h> |
36 | 37 | ||
37 | |||
38 | /** | 38 | /** |
39 | * usb_speed_string() - Returns human readable-name of the speed. | 39 | * usb_speed_string() - Returns human readable-name of the speed. |
40 | * @speed: The speed to return human-readable name for. If it's not | 40 | * @speed: The speed to return human-readable name for. If it's not |
@@ -43,6 +43,15 @@ | |||
43 | */ | 43 | */ |
44 | extern const char *usb_speed_string(enum usb_device_speed speed); | 44 | extern const char *usb_speed_string(enum usb_device_speed speed); |
45 | 45 | ||
46 | /** | ||
47 | * usb_get_maximum_speed - Get maximum requested speed for a given USB | ||
48 | * controller. | ||
49 | * @dev: Pointer to the given USB controller device | ||
50 | * | ||
51 | * The function gets the maximum speed string from property "maximum-speed", | ||
52 | * and returns the corresponding enum usb_device_speed. | ||
53 | */ | ||
54 | extern enum usb_device_speed usb_get_maximum_speed(struct device *dev); | ||
46 | 55 | ||
47 | /** | 56 | /** |
48 | * usb_state_string - Returns human readable name for the state. | 57 | * usb_state_string - Returns human readable name for the state. |
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index a41833cd184c..5dd75fa47dd8 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h | |||
@@ -5,9 +5,28 @@ | |||
5 | #ifndef __LINUX_USB_CHIPIDEA_H | 5 | #ifndef __LINUX_USB_CHIPIDEA_H |
6 | #define __LINUX_USB_CHIPIDEA_H | 6 | #define __LINUX_USB_CHIPIDEA_H |
7 | 7 | ||
8 | #include <linux/extcon.h> | ||
8 | #include <linux/usb/otg.h> | 9 | #include <linux/usb/otg.h> |
9 | 10 | ||
10 | struct ci_hdrc; | 11 | struct ci_hdrc; |
12 | |||
13 | /** | ||
14 | * struct ci_hdrc_cable - structure for external connector cable state tracking | ||
15 | * @state: current state of the line | ||
16 | * @changed: set to true when extcon event happen | ||
17 | * @edev: device which generate events | ||
18 | * @ci: driver state of the chipidea device | ||
19 | * @nb: hold event notification callback | ||
20 | * @conn: used for notification registration | ||
21 | */ | ||
22 | struct ci_hdrc_cable { | ||
23 | bool state; | ||
24 | bool changed; | ||
25 | struct extcon_dev *edev; | ||
26 | struct ci_hdrc *ci; | ||
27 | struct notifier_block nb; | ||
28 | }; | ||
29 | |||
11 | struct ci_hdrc_platform_data { | 30 | struct ci_hdrc_platform_data { |
12 | const char *name; | 31 | const char *name; |
13 | /* offset of the capability registers */ | 32 | /* offset of the capability registers */ |
@@ -48,6 +67,11 @@ struct ci_hdrc_platform_data { | |||
48 | u32 ahb_burst_config; | 67 | u32 ahb_burst_config; |
49 | u32 tx_burst_size; | 68 | u32 tx_burst_size; |
50 | u32 rx_burst_size; | 69 | u32 rx_burst_size; |
70 | |||
71 | /* VBUS and ID signal state tracking, using extcon framework */ | ||
72 | struct ci_hdrc_cable vbus_extcon; | ||
73 | struct ci_hdrc_cable id_extcon; | ||
74 | u32 phy_clkgate_delay_us; | ||
51 | }; | 75 | }; |
52 | 76 | ||
53 | /* Default offset of capability registers */ | 77 | /* Default offset of capability registers */ |
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index c14a69b36d27..3d583a10b926 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h | |||
@@ -215,6 +215,7 @@ struct usb_ep { | |||
215 | struct list_head ep_list; | 215 | struct list_head ep_list; |
216 | struct usb_ep_caps caps; | 216 | struct usb_ep_caps caps; |
217 | bool claimed; | 217 | bool claimed; |
218 | bool enabled; | ||
218 | unsigned maxpacket:16; | 219 | unsigned maxpacket:16; |
219 | unsigned maxpacket_limit:16; | 220 | unsigned maxpacket_limit:16; |
220 | unsigned max_streams:16; | 221 | unsigned max_streams:16; |
@@ -264,7 +265,18 @@ static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep, | |||
264 | */ | 265 | */ |
265 | static inline int usb_ep_enable(struct usb_ep *ep) | 266 | static inline int usb_ep_enable(struct usb_ep *ep) |
266 | { | 267 | { |
267 | return ep->ops->enable(ep, ep->desc); | 268 | int ret; |
269 | |||
270 | if (ep->enabled) | ||
271 | return 0; | ||
272 | |||
273 | ret = ep->ops->enable(ep, ep->desc); | ||
274 | if (ret) | ||
275 | return ret; | ||
276 | |||
277 | ep->enabled = true; | ||
278 | |||
279 | return 0; | ||
268 | } | 280 | } |
269 | 281 | ||
270 | /** | 282 | /** |
@@ -281,7 +293,18 @@ static inline int usb_ep_enable(struct usb_ep *ep) | |||
281 | */ | 293 | */ |
282 | static inline int usb_ep_disable(struct usb_ep *ep) | 294 | static inline int usb_ep_disable(struct usb_ep *ep) |
283 | { | 295 | { |
284 | return ep->ops->disable(ep); | 296 | int ret; |
297 | |||
298 | if (!ep->enabled) | ||
299 | return 0; | ||
300 | |||
301 | ret = ep->ops->disable(ep); | ||
302 | if (ret) | ||
303 | return ret; | ||
304 | |||
305 | ep->enabled = false; | ||
306 | |||
307 | return 0; | ||
285 | } | 308 | } |
286 | 309 | ||
287 | /** | 310 | /** |
@@ -1233,6 +1256,8 @@ extern struct usb_ep *usb_ep_autoconfig_ss(struct usb_gadget *, | |||
1233 | struct usb_endpoint_descriptor *, | 1256 | struct usb_endpoint_descriptor *, |
1234 | struct usb_ss_ep_comp_descriptor *); | 1257 | struct usb_ss_ep_comp_descriptor *); |
1235 | 1258 | ||
1259 | extern void usb_ep_autoconfig_release(struct usb_ep *); | ||
1260 | |||
1236 | extern void usb_ep_autoconfig_reset(struct usb_gadget *); | 1261 | extern void usb_ep_autoconfig_reset(struct usb_gadget *); |
1237 | 1262 | ||
1238 | #endif /* __LINUX_USB_GADGET_H */ | 1263 | #endif /* __LINUX_USB_GADGET_H */ |
diff --git a/include/linux/usb/gadget_configfs.h b/include/linux/usb/gadget_configfs.h index d74c0ae989d5..c36e95730de1 100644 --- a/include/linux/usb/gadget_configfs.h +++ b/include/linux/usb/gadget_configfs.h | |||
@@ -7,9 +7,10 @@ int check_user_usb_string(const char *name, | |||
7 | struct usb_gadget_strings *stringtab_dev); | 7 | struct usb_gadget_strings *stringtab_dev); |
8 | 8 | ||
9 | #define GS_STRINGS_W(__struct, __name) \ | 9 | #define GS_STRINGS_W(__struct, __name) \ |
10 | static ssize_t __struct##_##__name##_store(struct __struct *gs, \ | 10 | static ssize_t __struct##_##__name##_store(struct config_item *item, \ |
11 | const char *page, size_t len) \ | 11 | const char *page, size_t len) \ |
12 | { \ | 12 | { \ |
13 | struct __struct *gs = to_##__struct(item); \ | ||
13 | int ret; \ | 14 | int ret; \ |
14 | \ | 15 | \ |
15 | ret = usb_string_copy(page, &gs->__name); \ | 16 | ret = usb_string_copy(page, &gs->__name); \ |
@@ -19,30 +20,20 @@ int check_user_usb_string(const char *name, | |||
19 | } | 20 | } |
20 | 21 | ||
21 | #define GS_STRINGS_R(__struct, __name) \ | 22 | #define GS_STRINGS_R(__struct, __name) \ |
22 | static ssize_t __struct##_##__name##_show(struct __struct *gs, \ | 23 | static ssize_t __struct##_##__name##_show(struct config_item *item, char *page) \ |
23 | char *page) \ | ||
24 | { \ | 24 | { \ |
25 | struct __struct *gs = to_##__struct(item); \ | ||
25 | return sprintf(page, "%s\n", gs->__name ?: ""); \ | 26 | return sprintf(page, "%s\n", gs->__name ?: ""); \ |
26 | } | 27 | } |
27 | 28 | ||
28 | #define GS_STRING_ITEM_ATTR(struct_name, name) \ | ||
29 | static struct struct_name##_attribute struct_name##_##name = \ | ||
30 | __CONFIGFS_ATTR(name, S_IRUGO | S_IWUSR, \ | ||
31 | struct_name##_##name##_show, \ | ||
32 | struct_name##_##name##_store) | ||
33 | |||
34 | #define GS_STRINGS_RW(struct_name, _name) \ | 29 | #define GS_STRINGS_RW(struct_name, _name) \ |
35 | GS_STRINGS_R(struct_name, _name) \ | 30 | GS_STRINGS_R(struct_name, _name) \ |
36 | GS_STRINGS_W(struct_name, _name) \ | 31 | GS_STRINGS_W(struct_name, _name) \ |
37 | GS_STRING_ITEM_ATTR(struct_name, _name) | 32 | CONFIGFS_ATTR(struct_name##_, _name) |
38 | 33 | ||
39 | #define USB_CONFIG_STRING_RW_OPS(struct_in) \ | 34 | #define USB_CONFIG_STRING_RW_OPS(struct_in) \ |
40 | CONFIGFS_ATTR_OPS(struct_in); \ | ||
41 | \ | ||
42 | static struct configfs_item_operations struct_in##_langid_item_ops = { \ | 35 | static struct configfs_item_operations struct_in##_langid_item_ops = { \ |
43 | .release = struct_in##_attr_release, \ | 36 | .release = struct_in##_attr_release, \ |
44 | .show_attribute = struct_in##_attr_show, \ | ||
45 | .store_attribute = struct_in##_attr_store, \ | ||
46 | }; \ | 37 | }; \ |
47 | \ | 38 | \ |
48 | static struct config_item_type struct_in##_langid_type = { \ | 39 | static struct config_item_type struct_in##_langid_type = { \ |
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index d2784c10bfe2..f89c24bd53a4 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h | |||
@@ -58,12 +58,6 @@ | |||
58 | * | 58 | * |
59 | * Since "struct usb_bus" is so thin, you can't share much code in it. | 59 | * Since "struct usb_bus" is so thin, you can't share much code in it. |
60 | * This framework is a layer over that, and should be more sharable. | 60 | * This framework is a layer over that, and should be more sharable. |
61 | * | ||
62 | * @authorized_default: Specifies if new devices are authorized to | ||
63 | * connect by default or they require explicit | ||
64 | * user space authorization; this bit is settable | ||
65 | * through /sys/class/usb_host/X/authorized_default. | ||
66 | * For the rest is RO, so we don't lock to r/w it. | ||
67 | */ | 61 | */ |
68 | 62 | ||
69 | /*-------------------------------------------------------------------------*/ | 63 | /*-------------------------------------------------------------------------*/ |
@@ -120,6 +114,8 @@ struct usb_hcd { | |||
120 | #define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */ | 114 | #define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */ |
121 | #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ | 115 | #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ |
122 | #define HCD_FLAG_DEAD 6 /* controller has died? */ | 116 | #define HCD_FLAG_DEAD 6 /* controller has died? */ |
117 | #define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */ | ||
118 | #define HCD_FLAG_DEV_AUTHORIZED 8 /* authorize devices? */ | ||
123 | 119 | ||
124 | /* The flags can be tested using these macros; they are likely to | 120 | /* The flags can be tested using these macros; they are likely to |
125 | * be slightly faster than test_bit(). | 121 | * be slightly faster than test_bit(). |
@@ -131,6 +127,22 @@ struct usb_hcd { | |||
131 | #define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) | 127 | #define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) |
132 | #define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) | 128 | #define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) |
133 | 129 | ||
130 | /* | ||
131 | * Specifies if interfaces are authorized by default | ||
132 | * or they require explicit user space authorization; this bit is | ||
133 | * settable through /sys/class/usb_host/X/interface_authorized_default | ||
134 | */ | ||
135 | #define HCD_INTF_AUTHORIZED(hcd) \ | ||
136 | ((hcd)->flags & (1U << HCD_FLAG_INTF_AUTHORIZED)) | ||
137 | |||
138 | /* | ||
139 | * Specifies if devices are authorized by default | ||
140 | * or they require explicit user space authorization; this bit is | ||
141 | * settable through /sys/class/usb_host/X/authorized_default | ||
142 | */ | ||
143 | #define HCD_DEV_AUTHORIZED(hcd) \ | ||
144 | ((hcd)->flags & (1U << HCD_FLAG_DEV_AUTHORIZED)) | ||
145 | |||
134 | /* Flags that get set only during HCD registration or removal. */ | 146 | /* Flags that get set only during HCD registration or removal. */ |
135 | unsigned rh_registered:1;/* is root hub registered? */ | 147 | unsigned rh_registered:1;/* is root hub registered? */ |
136 | unsigned rh_pollable:1; /* may we poll the root hub? */ | 148 | unsigned rh_pollable:1; /* may we poll the root hub? */ |
@@ -141,7 +153,6 @@ struct usb_hcd { | |||
141 | * support the new root-hub polling mechanism. */ | 153 | * support the new root-hub polling mechanism. */ |
142 | unsigned uses_new_polling:1; | 154 | unsigned uses_new_polling:1; |
143 | unsigned wireless:1; /* Wireless USB HCD */ | 155 | unsigned wireless:1; /* Wireless USB HCD */ |
144 | unsigned authorized_default:1; | ||
145 | unsigned has_tt:1; /* Integrated TT in root hub */ | 156 | unsigned has_tt:1; /* Integrated TT in root hub */ |
146 | unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */ | 157 | unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */ |
147 | unsigned can_do_streams:1; /* HC supports streams */ | 158 | unsigned can_do_streams:1; /* HC supports streams */ |
@@ -239,6 +250,7 @@ struct hc_driver { | |||
239 | #define HCD_USB2 0x0020 /* USB 2.0 */ | 250 | #define HCD_USB2 0x0020 /* USB 2.0 */ |
240 | #define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/ | 251 | #define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/ |
241 | #define HCD_USB3 0x0040 /* USB 3.0 */ | 252 | #define HCD_USB3 0x0040 /* USB 3.0 */ |
253 | #define HCD_USB31 0x0050 /* USB 3.1 */ | ||
242 | #define HCD_MASK 0x0070 | 254 | #define HCD_MASK 0x0070 |
243 | #define HCD_BH 0x0100 /* URB complete in BH context */ | 255 | #define HCD_BH 0x0100 /* URB complete in BH context */ |
244 | 256 | ||
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index a4ee1b582183..fa6dc132bd1b 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h | |||
@@ -95,7 +95,7 @@ struct musb_hdrc_config { | |||
95 | /* musb CLKIN in Blackfin in MHZ */ | 95 | /* musb CLKIN in Blackfin in MHZ */ |
96 | unsigned char clkin; | 96 | unsigned char clkin; |
97 | #endif | 97 | #endif |
98 | 98 | u32 maximum_speed; | |
99 | }; | 99 | }; |
100 | 100 | ||
101 | struct musb_hdrc_platform_data { | 101 | struct musb_hdrc_platform_data { |
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h index 8c5a818ec244..c3fe9e48ce27 100644 --- a/include/linux/usb/of.h +++ b/include/linux/usb/of.h | |||
@@ -12,22 +12,10 @@ | |||
12 | #include <linux/usb/phy.h> | 12 | #include <linux/usb/phy.h> |
13 | 13 | ||
14 | #if IS_ENABLED(CONFIG_OF) | 14 | #if IS_ENABLED(CONFIG_OF) |
15 | enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np); | ||
16 | enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np); | ||
17 | bool of_usb_host_tpl_support(struct device_node *np); | 15 | bool of_usb_host_tpl_support(struct device_node *np); |
18 | int of_usb_update_otg_caps(struct device_node *np, | 16 | int of_usb_update_otg_caps(struct device_node *np, |
19 | struct usb_otg_caps *otg_caps); | 17 | struct usb_otg_caps *otg_caps); |
20 | #else | 18 | #else |
21 | static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np) | ||
22 | { | ||
23 | return USB_DR_MODE_UNKNOWN; | ||
24 | } | ||
25 | |||
26 | static inline enum usb_device_speed | ||
27 | of_usb_get_maximum_speed(struct device_node *np) | ||
28 | { | ||
29 | return USB_SPEED_UNKNOWN; | ||
30 | } | ||
31 | static inline bool of_usb_host_tpl_support(struct device_node *np) | 19 | static inline bool of_usb_host_tpl_support(struct device_node *np) |
32 | { | 20 | { |
33 | return false; | 21 | return false; |
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index bd1dcf816100..67929df86df5 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h | |||
@@ -119,4 +119,13 @@ enum usb_dr_mode { | |||
119 | USB_DR_MODE_OTG, | 119 | USB_DR_MODE_OTG, |
120 | }; | 120 | }; |
121 | 121 | ||
122 | /** | ||
123 | * usb_get_dr_mode - Get dual role mode for given device | ||
124 | * @dev: Pointer to the given device | ||
125 | * | ||
126 | * The function gets phy interface string from property 'dr_mode', | ||
127 | * and returns the correspondig enum usb_dr_mode | ||
128 | */ | ||
129 | extern enum usb_dr_mode usb_get_dr_mode(struct device *dev); | ||
130 | |||
122 | #endif /* __LINUX_USB_OTG_H */ | 131 | #endif /* __LINUX_USB_OTG_H */ |
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index e39f251cf861..31a8068c42a5 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h | |||
@@ -63,7 +63,7 @@ enum usb_otg_state { | |||
63 | struct usb_phy; | 63 | struct usb_phy; |
64 | struct usb_otg; | 64 | struct usb_otg; |
65 | 65 | ||
66 | /* for transceivers connected thru an ULPI interface, the user must | 66 | /* for phys connected thru an ULPI interface, the user must |
67 | * provide access ops | 67 | * provide access ops |
68 | */ | 68 | */ |
69 | struct usb_phy_io_ops { | 69 | struct usb_phy_io_ops { |
@@ -92,10 +92,10 @@ struct usb_phy { | |||
92 | u16 port_status; | 92 | u16 port_status; |
93 | u16 port_change; | 93 | u16 port_change; |
94 | 94 | ||
95 | /* to support controllers that have multiple transceivers */ | 95 | /* to support controllers that have multiple phys */ |
96 | struct list_head head; | 96 | struct list_head head; |
97 | 97 | ||
98 | /* initialize/shutdown the OTG controller */ | 98 | /* initialize/shutdown the phy */ |
99 | int (*init)(struct usb_phy *x); | 99 | int (*init)(struct usb_phy *x); |
100 | void (*shutdown)(struct usb_phy *x); | 100 | void (*shutdown)(struct usb_phy *x); |
101 | 101 | ||
@@ -106,7 +106,7 @@ struct usb_phy { | |||
106 | int (*set_power)(struct usb_phy *x, | 106 | int (*set_power)(struct usb_phy *x, |
107 | unsigned mA); | 107 | unsigned mA); |
108 | 108 | ||
109 | /* Set transceiver into suspend mode */ | 109 | /* Set phy into suspend mode */ |
110 | int (*set_suspend)(struct usb_phy *x, | 110 | int (*set_suspend)(struct usb_phy *x, |
111 | int suspend); | 111 | int suspend); |
112 | 112 | ||
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 3dd5a781da99..bfb74723f151 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h | |||
@@ -157,7 +157,7 @@ struct renesas_usbhs_driver_param { | |||
157 | */ | 157 | */ |
158 | int pio_dma_border; /* default is 64byte */ | 158 | int pio_dma_border; /* default is 64byte */ |
159 | 159 | ||
160 | u32 type; | 160 | uintptr_t type; |
161 | u32 enable_gpio; | 161 | u32 enable_gpio; |
162 | 162 | ||
163 | /* | 163 | /* |
diff --git a/include/linux/vfio.h b/include/linux/vfio.h index ddb440975382..610a86a892b8 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h | |||
@@ -44,6 +44,9 @@ struct vfio_device_ops { | |||
44 | void (*request)(void *device_data, unsigned int count); | 44 | void (*request)(void *device_data, unsigned int count); |
45 | }; | 45 | }; |
46 | 46 | ||
47 | extern struct iommu_group *vfio_iommu_group_get(struct device *dev); | ||
48 | extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev); | ||
49 | |||
47 | extern int vfio_add_group_dev(struct device *dev, | 50 | extern int vfio_add_group_dev(struct device *dev, |
48 | const struct vfio_device_ops *ops, | 51 | const struct vfio_device_ops *ops, |
49 | void *device_data); | 52 | void *device_data); |
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index 376499197717..69e1d4a1f1b3 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h | |||
@@ -39,10 +39,6 @@ struct pci_dev; | |||
39 | * enum vga_switcheroo_state - client power state | 39 | * enum vga_switcheroo_state - client power state |
40 | * @VGA_SWITCHEROO_OFF: off | 40 | * @VGA_SWITCHEROO_OFF: off |
41 | * @VGA_SWITCHEROO_ON: on | 41 | * @VGA_SWITCHEROO_ON: on |
42 | * @VGA_SWITCHEROO_INIT: client has registered with vga_switcheroo but | ||
43 | * vga_switcheroo is not enabled, i.e. no second client or no handler | ||
44 | * has registered. Only used in vga_switcheroo_get_client_state() which | ||
45 | * in turn is only called from hda_intel.c | ||
46 | * @VGA_SWITCHEROO_NOT_FOUND: client has not registered with vga_switcheroo. | 42 | * @VGA_SWITCHEROO_NOT_FOUND: client has not registered with vga_switcheroo. |
47 | * Only used in vga_switcheroo_get_client_state() which in turn is only | 43 | * Only used in vga_switcheroo_get_client_state() which in turn is only |
48 | * called from hda_intel.c | 44 | * called from hda_intel.c |
@@ -53,12 +49,14 @@ enum vga_switcheroo_state { | |||
53 | VGA_SWITCHEROO_OFF, | 49 | VGA_SWITCHEROO_OFF, |
54 | VGA_SWITCHEROO_ON, | 50 | VGA_SWITCHEROO_ON, |
55 | /* below are referred only from vga_switcheroo_get_client_state() */ | 51 | /* below are referred only from vga_switcheroo_get_client_state() */ |
56 | VGA_SWITCHEROO_INIT, | ||
57 | VGA_SWITCHEROO_NOT_FOUND, | 52 | VGA_SWITCHEROO_NOT_FOUND, |
58 | }; | 53 | }; |
59 | 54 | ||
60 | /** | 55 | /** |
61 | * enum vga_switcheroo_client_id - client identifier | 56 | * enum vga_switcheroo_client_id - client identifier |
57 | * @VGA_SWITCHEROO_UNKNOWN_ID: initial identifier assigned to vga clients. | ||
58 | * Determining the id requires the handler, so GPUs are given their | ||
59 | * true id in a delayed fashion in vga_switcheroo_enable() | ||
62 | * @VGA_SWITCHEROO_IGD: integrated graphics device | 60 | * @VGA_SWITCHEROO_IGD: integrated graphics device |
63 | * @VGA_SWITCHEROO_DIS: discrete graphics device | 61 | * @VGA_SWITCHEROO_DIS: discrete graphics device |
64 | * @VGA_SWITCHEROO_MAX_CLIENTS: currently no more than two GPUs are supported | 62 | * @VGA_SWITCHEROO_MAX_CLIENTS: currently no more than two GPUs are supported |
@@ -66,6 +64,7 @@ enum vga_switcheroo_state { | |||
66 | * Client identifier. Audio clients use the same identifier & 0x100. | 64 | * Client identifier. Audio clients use the same identifier & 0x100. |
67 | */ | 65 | */ |
68 | enum vga_switcheroo_client_id { | 66 | enum vga_switcheroo_client_id { |
67 | VGA_SWITCHEROO_UNKNOWN_ID = -1, | ||
69 | VGA_SWITCHEROO_IGD, | 68 | VGA_SWITCHEROO_IGD, |
70 | VGA_SWITCHEROO_DIS, | 69 | VGA_SWITCHEROO_DIS, |
71 | VGA_SWITCHEROO_MAX_CLIENTS, | 70 | VGA_SWITCHEROO_MAX_CLIENTS, |
@@ -96,7 +95,7 @@ struct vga_switcheroo_handler { | |||
96 | int (*switchto)(enum vga_switcheroo_client_id id); | 95 | int (*switchto)(enum vga_switcheroo_client_id id); |
97 | int (*power_state)(enum vga_switcheroo_client_id id, | 96 | int (*power_state)(enum vga_switcheroo_client_id id, |
98 | enum vga_switcheroo_state state); | 97 | enum vga_switcheroo_state state); |
99 | int (*get_client_id)(struct pci_dev *pdev); | 98 | enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev); |
100 | }; | 99 | }; |
101 | 100 | ||
102 | /** | 101 | /** |
@@ -128,17 +127,17 @@ int vga_switcheroo_register_client(struct pci_dev *dev, | |||
128 | bool driver_power_control); | 127 | bool driver_power_control); |
129 | int vga_switcheroo_register_audio_client(struct pci_dev *pdev, | 128 | int vga_switcheroo_register_audio_client(struct pci_dev *pdev, |
130 | const struct vga_switcheroo_client_ops *ops, | 129 | const struct vga_switcheroo_client_ops *ops, |
131 | int id); | 130 | enum vga_switcheroo_client_id id); |
132 | 131 | ||
133 | void vga_switcheroo_client_fb_set(struct pci_dev *dev, | 132 | void vga_switcheroo_client_fb_set(struct pci_dev *dev, |
134 | struct fb_info *info); | 133 | struct fb_info *info); |
135 | 134 | ||
136 | int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler); | 135 | int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler); |
137 | void vga_switcheroo_unregister_handler(void); | 136 | void vga_switcheroo_unregister_handler(void); |
138 | 137 | ||
139 | int vga_switcheroo_process_delayed_switch(void); | 138 | int vga_switcheroo_process_delayed_switch(void); |
140 | 139 | ||
141 | int vga_switcheroo_get_client_state(struct pci_dev *dev); | 140 | enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev); |
142 | 141 | ||
143 | void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); | 142 | void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); |
144 | 143 | ||
@@ -151,13 +150,13 @@ static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} | |||
151 | static inline int vga_switcheroo_register_client(struct pci_dev *dev, | 150 | static inline int vga_switcheroo_register_client(struct pci_dev *dev, |
152 | const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; } | 151 | const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; } |
153 | static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {} | 152 | static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {} |
154 | static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; } | 153 | static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler) { return 0; } |
155 | static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, | 154 | static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, |
156 | const struct vga_switcheroo_client_ops *ops, | 155 | const struct vga_switcheroo_client_ops *ops, |
157 | int id) { return 0; } | 156 | enum vga_switcheroo_client_id id) { return 0; } |
158 | static inline void vga_switcheroo_unregister_handler(void) {} | 157 | static inline void vga_switcheroo_unregister_handler(void) {} |
159 | static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } | 158 | static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } |
160 | static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } | 159 | static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } |
161 | 160 | ||
162 | static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} | 161 | static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} |
163 | 162 | ||
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 9246d32dc973..e623d392db0c 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h | |||
@@ -14,12 +14,12 @@ | |||
14 | #endif | 14 | #endif |
15 | 15 | ||
16 | #ifdef CONFIG_HIGHMEM | 16 | #ifdef CONFIG_HIGHMEM |
17 | #define HIGHMEM_ZONE(xx) , xx##_HIGH | 17 | #define HIGHMEM_ZONE(xx) xx##_HIGH, |
18 | #else | 18 | #else |
19 | #define HIGHMEM_ZONE(xx) | 19 | #define HIGHMEM_ZONE(xx) |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE | 22 | #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE |
23 | 23 | ||
24 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | 24 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, |
25 | FOR_ALL_ZONES(PGALLOC), | 25 | FOR_ALL_ZONES(PGALLOC), |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 0ec598381f97..3bff87a25a42 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -182,22 +182,10 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) | |||
182 | # endif | 182 | # endif |
183 | #endif | 183 | #endif |
184 | 184 | ||
185 | struct vmalloc_info { | ||
186 | unsigned long used; | ||
187 | unsigned long largest_chunk; | ||
188 | }; | ||
189 | |||
190 | #ifdef CONFIG_MMU | 185 | #ifdef CONFIG_MMU |
191 | #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) | 186 | #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) |
192 | extern void get_vmalloc_info(struct vmalloc_info *vmi); | ||
193 | #else | 187 | #else |
194 | |||
195 | #define VMALLOC_TOTAL 0UL | 188 | #define VMALLOC_TOTAL 0UL |
196 | #define get_vmalloc_info(vmi) \ | ||
197 | do { \ | ||
198 | (vmi)->used = 0; \ | ||
199 | (vmi)->largest_chunk = 0; \ | ||
200 | } while (0) | ||
201 | #endif | 189 | #endif |
202 | 190 | ||
203 | #endif /* _LINUX_VMALLOC_H */ | 191 | #endif /* _LINUX_VMALLOC_H */ |
diff --git a/include/linux/vme.h b/include/linux/vme.h index c0131358f351..71e4a6dec5ac 100644 --- a/include/linux/vme.h +++ b/include/linux/vme.h | |||
@@ -81,6 +81,9 @@ struct vme_resource { | |||
81 | 81 | ||
82 | extern struct bus_type vme_bus_type; | 82 | extern struct bus_type vme_bus_type; |
83 | 83 | ||
84 | /* Number of VME interrupt vectors */ | ||
85 | #define VME_NUM_STATUSID 256 | ||
86 | |||
84 | /* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */ | 87 | /* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */ |
85 | #define VME_MAX_BRIDGES (sizeof(unsigned int)*8) | 88 | #define VME_MAX_BRIDGES (sizeof(unsigned int)*8) |
86 | #define VME_MAX_SLOTS 32 | 89 | #define VME_MAX_SLOTS 32 |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 82e7db7f7100..5dbc8b0ee567 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -161,30 +161,8 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, | |||
161 | } | 161 | } |
162 | 162 | ||
163 | #ifdef CONFIG_NUMA | 163 | #ifdef CONFIG_NUMA |
164 | /* | ||
165 | * Determine the per node value of a stat item. This function | ||
166 | * is called frequently in a NUMA machine, so try to be as | ||
167 | * frugal as possible. | ||
168 | */ | ||
169 | static inline unsigned long node_page_state(int node, | ||
170 | enum zone_stat_item item) | ||
171 | { | ||
172 | struct zone *zones = NODE_DATA(node)->node_zones; | ||
173 | |||
174 | return | ||
175 | #ifdef CONFIG_ZONE_DMA | ||
176 | zone_page_state(&zones[ZONE_DMA], item) + | ||
177 | #endif | ||
178 | #ifdef CONFIG_ZONE_DMA32 | ||
179 | zone_page_state(&zones[ZONE_DMA32], item) + | ||
180 | #endif | ||
181 | #ifdef CONFIG_HIGHMEM | ||
182 | zone_page_state(&zones[ZONE_HIGHMEM], item) + | ||
183 | #endif | ||
184 | zone_page_state(&zones[ZONE_NORMAL], item) + | ||
185 | zone_page_state(&zones[ZONE_MOVABLE], item); | ||
186 | } | ||
187 | 164 | ||
165 | extern unsigned long node_page_state(int node, enum zone_stat_item item); | ||
188 | extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); | 166 | extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); |
189 | 167 | ||
190 | #else | 168 | #else |
@@ -269,7 +247,6 @@ static inline void __dec_zone_page_state(struct page *page, | |||
269 | 247 | ||
270 | #define set_pgdat_percpu_threshold(pgdat, callback) { } | 248 | #define set_pgdat_percpu_threshold(pgdat, callback) { } |
271 | 249 | ||
272 | static inline void refresh_cpu_vm_stats(int cpu) { } | ||
273 | static inline void refresh_zone_stat_thresholds(void) { } | 250 | static inline void refresh_zone_stat_thresholds(void) { } |
274 | static inline void cpu_vm_stats_fold(int cpu) { } | 251 | static inline void cpu_vm_stats_fold(int cpu) { } |
275 | 252 | ||
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index d74a0e907b9e..027b1f43f12d 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h | |||
@@ -24,8 +24,8 @@ struct watchdog_device; | |||
24 | * @stop: The routine for stopping the watchdog device. | 24 | * @stop: The routine for stopping the watchdog device. |
25 | * @ping: The routine that sends a keepalive ping to the watchdog device. | 25 | * @ping: The routine that sends a keepalive ping to the watchdog device. |
26 | * @status: The routine that shows the status of the watchdog device. | 26 | * @status: The routine that shows the status of the watchdog device. |
27 | * @set_timeout:The routine for setting the watchdog devices timeout value. | 27 | * @set_timeout:The routine for setting the watchdog devices timeout value (in seconds). |
28 | * @get_timeleft:The routine that get's the time that's left before a reset. | 28 | * @get_timeleft:The routine that gets the time left before a reset (in seconds). |
29 | * @ref: The ref operation for dyn. allocated watchdog_device structs | 29 | * @ref: The ref operation for dyn. allocated watchdog_device structs |
30 | * @unref: The unref operation for dyn. allocated watchdog_device structs | 30 | * @unref: The unref operation for dyn. allocated watchdog_device structs |
31 | * @ioctl: The routines that handles extra ioctl calls. | 31 | * @ioctl: The routines that handles extra ioctl calls. |
@@ -33,7 +33,7 @@ struct watchdog_device; | |||
33 | * The watchdog_ops structure contains a list of low-level operations | 33 | * The watchdog_ops structure contains a list of low-level operations |
34 | * that control a watchdog device. It also contains the module that owns | 34 | * that control a watchdog device. It also contains the module that owns |
35 | * these operations. The start and stop function are mandatory, all other | 35 | * these operations. The start and stop function are mandatory, all other |
36 | * functions are optonal. | 36 | * functions are optional. |
37 | */ | 37 | */ |
38 | struct watchdog_ops { | 38 | struct watchdog_ops { |
39 | struct module *owner; | 39 | struct module *owner; |
@@ -59,9 +59,9 @@ struct watchdog_ops { | |||
59 | * @info: Pointer to a watchdog_info structure. | 59 | * @info: Pointer to a watchdog_info structure. |
60 | * @ops: Pointer to the list of watchdog operations. | 60 | * @ops: Pointer to the list of watchdog operations. |
61 | * @bootstatus: Status of the watchdog device at boot. | 61 | * @bootstatus: Status of the watchdog device at boot. |
62 | * @timeout: The watchdog devices timeout value. | 62 | * @timeout: The watchdog devices timeout value (in seconds). |
63 | * @min_timeout:The watchdog devices minimum timeout value. | 63 | * @min_timeout:The watchdog devices minimum timeout value (in seconds). |
64 | * @max_timeout:The watchdog devices maximum timeout value. | 64 | * @max_timeout:The watchdog devices maximum timeout value (in seconds). |
65 | * @driver-data:Pointer to the drivers private data. | 65 | * @driver-data:Pointer to the drivers private data. |
66 | * @lock: Lock for watchdog core internal use only. | 66 | * @lock: Lock for watchdog core internal use only. |
67 | * @status: Field that contains the devices internal status bits. | 67 | * @status: Field that contains the devices internal status bits. |
@@ -119,8 +119,15 @@ static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool noway | |||
119 | /* Use the following function to check if a timeout value is invalid */ | 119 | /* Use the following function to check if a timeout value is invalid */ |
120 | static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t) | 120 | static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t) |
121 | { | 121 | { |
122 | return ((wdd->max_timeout != 0) && | 122 | /* |
123 | (t < wdd->min_timeout || t > wdd->max_timeout)); | 123 | * The timeout is invalid if |
124 | * - the requested value is smaller than the configured minimum timeout, | ||
125 | * or | ||
126 | * - a maximum timeout is configured, and the requested value is larger | ||
127 | * than the maximum timeout. | ||
128 | */ | ||
129 | return t < wdd->min_timeout || | ||
130 | (wdd->max_timeout && t > wdd->max_timeout); | ||
124 | } | 131 | } |
125 | 132 | ||
126 | /* Use the following functions to manipulate watchdog driver specific data */ | 133 | /* Use the following functions to manipulate watchdog driver specific data */ |
diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 91b0a68d38dc..89474b9d260c 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h | |||
@@ -21,15 +21,19 @@ struct dentry; | |||
21 | 21 | ||
22 | struct xattr_handler { | 22 | struct xattr_handler { |
23 | const char *prefix; | 23 | const char *prefix; |
24 | int flags; /* fs private flags passed back to the handlers */ | 24 | int flags; /* fs private flags */ |
25 | size_t (*list)(struct dentry *dentry, char *list, size_t list_size, | 25 | size_t (*list)(const struct xattr_handler *, struct dentry *dentry, |
26 | const char *name, size_t name_len, int handler_flags); | 26 | char *list, size_t list_size, const char *name, |
27 | int (*get)(struct dentry *dentry, const char *name, void *buffer, | 27 | size_t name_len); |
28 | size_t size, int handler_flags); | 28 | int (*get)(const struct xattr_handler *, struct dentry *dentry, |
29 | int (*set)(struct dentry *dentry, const char *name, const void *buffer, | 29 | const char *name, void *buffer, size_t size); |
30 | size_t size, int flags, int handler_flags); | 30 | int (*set)(const struct xattr_handler *, struct dentry *dentry, |
31 | const char *name, const void *buffer, size_t size, | ||
32 | int flags); | ||
31 | }; | 33 | }; |
32 | 34 | ||
35 | const char *xattr_full_name(const struct xattr_handler *, const char *); | ||
36 | |||
33 | struct xattr { | 37 | struct xattr { |
34 | const char *name; | 38 | const char *name; |
35 | void *value; | 39 | void *value; |
diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 42f8ec992452..2e97b7707dff 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h | |||
@@ -38,10 +38,10 @@ enum zpool_mapmode { | |||
38 | 38 | ||
39 | bool zpool_has_pool(char *type); | 39 | bool zpool_has_pool(char *type); |
40 | 40 | ||
41 | struct zpool *zpool_create_pool(char *type, char *name, | 41 | struct zpool *zpool_create_pool(const char *type, const char *name, |
42 | gfp_t gfp, const struct zpool_ops *ops); | 42 | gfp_t gfp, const struct zpool_ops *ops); |
43 | 43 | ||
44 | char *zpool_get_type(struct zpool *pool); | 44 | const char *zpool_get_type(struct zpool *pool); |
45 | 45 | ||
46 | void zpool_destroy_pool(struct zpool *pool); | 46 | void zpool_destroy_pool(struct zpool *pool); |
47 | 47 | ||
@@ -83,7 +83,9 @@ struct zpool_driver { | |||
83 | atomic_t refcount; | 83 | atomic_t refcount; |
84 | struct list_head list; | 84 | struct list_head list; |
85 | 85 | ||
86 | void *(*create)(char *name, gfp_t gfp, const struct zpool_ops *ops, | 86 | void *(*create)(const char *name, |
87 | gfp_t gfp, | ||
88 | const struct zpool_ops *ops, | ||
87 | struct zpool *zpool); | 89 | struct zpool *zpool); |
88 | void (*destroy)(void *pool); | 90 | void (*destroy)(void *pool); |
89 | 91 | ||
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 6398dfae53f1..34eb16098a33 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h | |||
@@ -41,7 +41,7 @@ struct zs_pool_stats { | |||
41 | 41 | ||
42 | struct zs_pool; | 42 | struct zs_pool; |
43 | 43 | ||
44 | struct zs_pool *zs_create_pool(char *name, gfp_t flags); | 44 | struct zs_pool *zs_create_pool(const char *name, gfp_t flags); |
45 | void zs_destroy_pool(struct zs_pool *pool); | 45 | void zs_destroy_pool(struct zs_pool *pool); |
46 | 46 | ||
47 | unsigned long zs_malloc(struct zs_pool *pool, size_t size); | 47 | unsigned long zs_malloc(struct zs_pool *pool, size_t size); |
diff --git a/include/linux/zutil.h b/include/linux/zutil.h index 6adfa9a6ffe9..663689521759 100644 --- a/include/linux/zutil.h +++ b/include/linux/zutil.h | |||
@@ -68,10 +68,10 @@ typedef uLong (*check_func) (uLong check, const Byte *buf, | |||
68 | An Adler-32 checksum is almost as reliable as a CRC32 but can be computed | 68 | An Adler-32 checksum is almost as reliable as a CRC32 but can be computed |
69 | much faster. Usage example: | 69 | much faster. Usage example: |
70 | 70 | ||
71 | uLong adler = adler32(0L, NULL, 0); | 71 | uLong adler = zlib_adler32(0L, NULL, 0); |
72 | 72 | ||
73 | while (read_buffer(buffer, length) != EOF) { | 73 | while (read_buffer(buffer, length) != EOF) { |
74 | adler = adler32(adler, buffer, length); | 74 | adler = zlib_adler32(adler, buffer, length); |
75 | } | 75 | } |
76 | if (adler != original_adler) error(); | 76 | if (adler != original_adler) error(); |
77 | */ | 77 | */ |