diff options
Diffstat (limited to 'include/linux')
260 files changed, 7004 insertions, 1865 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 407a12f663eb..6bff83b1f298 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
29 | #include <linux/ioport.h> /* for struct resource */ | 29 | #include <linux/ioport.h> /* for struct resource */ |
30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
31 | #include <linux/property.h> | ||
31 | 32 | ||
32 | #ifndef _LINUX | 33 | #ifndef _LINUX |
33 | #define _LINUX | 34 | #define _LINUX |
@@ -123,6 +124,10 @@ int acpi_numa_init (void); | |||
123 | 124 | ||
124 | int acpi_table_init (void); | 125 | int acpi_table_init (void); |
125 | int acpi_table_parse(char *id, acpi_tbl_table_handler handler); | 126 | int acpi_table_parse(char *id, acpi_tbl_table_handler handler); |
127 | int __init acpi_parse_entries(char *id, unsigned long table_size, | ||
128 | acpi_tbl_entry_handler handler, | ||
129 | struct acpi_table_header *table_header, | ||
130 | int entry_id, unsigned int max_entries); | ||
126 | int __init acpi_table_parse_entries(char *id, unsigned long table_size, | 131 | int __init acpi_table_parse_entries(char *id, unsigned long table_size, |
127 | int entry_id, | 132 | int entry_id, |
128 | acpi_tbl_entry_handler handler, | 133 | acpi_tbl_entry_handler handler, |
@@ -423,14 +428,11 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), | |||
423 | const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, | 428 | const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, |
424 | const struct device *dev); | 429 | const struct device *dev); |
425 | 430 | ||
426 | static inline bool acpi_driver_match_device(struct device *dev, | 431 | extern bool acpi_driver_match_device(struct device *dev, |
427 | const struct device_driver *drv) | 432 | const struct device_driver *drv); |
428 | { | ||
429 | return !!acpi_match_device(drv->acpi_match_table, dev); | ||
430 | } | ||
431 | |||
432 | int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); | 433 | int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); |
433 | int acpi_device_modalias(struct device *, char *, int); | 434 | int acpi_device_modalias(struct device *, char *, int); |
435 | void acpi_walk_dep_device_list(acpi_handle handle); | ||
434 | 436 | ||
435 | struct platform_device *acpi_create_platform_device(struct acpi_device *); | 437 | struct platform_device *acpi_create_platform_device(struct acpi_device *); |
436 | #define ACPI_PTR(_ptr) (_ptr) | 438 | #define ACPI_PTR(_ptr) (_ptr) |
@@ -443,6 +445,23 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *); | |||
443 | #define ACPI_COMPANION_SET(dev, adev) do { } while (0) | 445 | #define ACPI_COMPANION_SET(dev, adev) do { } while (0) |
444 | #define ACPI_HANDLE(dev) (NULL) | 446 | #define ACPI_HANDLE(dev) (NULL) |
445 | 447 | ||
448 | struct fwnode_handle; | ||
449 | |||
450 | static inline bool is_acpi_node(struct fwnode_handle *fwnode) | ||
451 | { | ||
452 | return false; | ||
453 | } | ||
454 | |||
455 | static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode) | ||
456 | { | ||
457 | return NULL; | ||
458 | } | ||
459 | |||
460 | static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) | ||
461 | { | ||
462 | return NULL; | ||
463 | } | ||
464 | |||
446 | static inline const char *acpi_dev_name(struct acpi_device *adev) | 465 | static inline const char *acpi_dev_name(struct acpi_device *adev) |
447 | { | 466 | { |
448 | return NULL; | 467 | return NULL; |
@@ -553,16 +572,26 @@ static inline void arch_reserve_mem_area(acpi_physical_address addr, | |||
553 | #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) | 572 | #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) |
554 | #endif | 573 | #endif |
555 | 574 | ||
556 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM_RUNTIME) | 575 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM) |
557 | int acpi_dev_runtime_suspend(struct device *dev); | 576 | int acpi_dev_runtime_suspend(struct device *dev); |
558 | int acpi_dev_runtime_resume(struct device *dev); | 577 | int acpi_dev_runtime_resume(struct device *dev); |
559 | int acpi_subsys_runtime_suspend(struct device *dev); | 578 | int acpi_subsys_runtime_suspend(struct device *dev); |
560 | int acpi_subsys_runtime_resume(struct device *dev); | 579 | int acpi_subsys_runtime_resume(struct device *dev); |
580 | struct acpi_device *acpi_dev_pm_get_node(struct device *dev); | ||
581 | int acpi_dev_pm_attach(struct device *dev, bool power_on); | ||
561 | #else | 582 | #else |
562 | static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } | 583 | static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } |
563 | static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } | 584 | static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } |
564 | static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } | 585 | static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } |
565 | static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } | 586 | static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } |
587 | static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) | ||
588 | { | ||
589 | return NULL; | ||
590 | } | ||
591 | static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) | ||
592 | { | ||
593 | return -ENODEV; | ||
594 | } | ||
566 | #endif | 595 | #endif |
567 | 596 | ||
568 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) | 597 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) |
@@ -585,20 +614,6 @@ static inline int acpi_subsys_suspend(struct device *dev) { return 0; } | |||
585 | static inline int acpi_subsys_freeze(struct device *dev) { return 0; } | 614 | static inline int acpi_subsys_freeze(struct device *dev) { return 0; } |
586 | #endif | 615 | #endif |
587 | 616 | ||
588 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM) | ||
589 | struct acpi_device *acpi_dev_pm_get_node(struct device *dev); | ||
590 | int acpi_dev_pm_attach(struct device *dev, bool power_on); | ||
591 | #else | ||
592 | static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) | ||
593 | { | ||
594 | return NULL; | ||
595 | } | ||
596 | static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) | ||
597 | { | ||
598 | return -ENODEV; | ||
599 | } | ||
600 | #endif | ||
601 | |||
602 | #ifdef CONFIG_ACPI | 617 | #ifdef CONFIG_ACPI |
603 | __printf(3, 4) | 618 | __printf(3, 4) |
604 | void acpi_handle_printk(const char *level, acpi_handle handle, | 619 | void acpi_handle_printk(const char *level, acpi_handle handle, |
@@ -659,4 +674,114 @@ do { \ | |||
659 | #endif | 674 | #endif |
660 | #endif | 675 | #endif |
661 | 676 | ||
677 | struct acpi_gpio_params { | ||
678 | unsigned int crs_entry_index; | ||
679 | unsigned int line_index; | ||
680 | bool active_low; | ||
681 | }; | ||
682 | |||
683 | struct acpi_gpio_mapping { | ||
684 | const char *name; | ||
685 | const struct acpi_gpio_params *data; | ||
686 | unsigned int size; | ||
687 | }; | ||
688 | |||
689 | #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) | ||
690 | int acpi_dev_add_driver_gpios(struct acpi_device *adev, | ||
691 | const struct acpi_gpio_mapping *gpios); | ||
692 | |||
693 | static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) | ||
694 | { | ||
695 | if (adev) | ||
696 | adev->driver_gpios = NULL; | ||
697 | } | ||
698 | #else | ||
699 | static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, | ||
700 | const struct acpi_gpio_mapping *gpios) | ||
701 | { | ||
702 | return -ENXIO; | ||
703 | } | ||
704 | static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} | ||
705 | #endif | ||
706 | |||
707 | /* Device properties */ | ||
708 | |||
709 | #define MAX_ACPI_REFERENCE_ARGS 8 | ||
710 | struct acpi_reference_args { | ||
711 | struct acpi_device *adev; | ||
712 | size_t nargs; | ||
713 | u64 args[MAX_ACPI_REFERENCE_ARGS]; | ||
714 | }; | ||
715 | |||
716 | #ifdef CONFIG_ACPI | ||
717 | int acpi_dev_get_property(struct acpi_device *adev, const char *name, | ||
718 | acpi_object_type type, const union acpi_object **obj); | ||
719 | int acpi_dev_get_property_array(struct acpi_device *adev, const char *name, | ||
720 | acpi_object_type type, | ||
721 | const union acpi_object **obj); | ||
722 | int acpi_dev_get_property_reference(struct acpi_device *adev, | ||
723 | const char *name, size_t index, | ||
724 | struct acpi_reference_args *args); | ||
725 | |||
726 | int acpi_dev_prop_get(struct acpi_device *adev, const char *propname, | ||
727 | void **valptr); | ||
728 | int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, | ||
729 | enum dev_prop_type proptype, void *val); | ||
730 | int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, | ||
731 | enum dev_prop_type proptype, void *val, size_t nval); | ||
732 | |||
733 | struct acpi_device *acpi_get_next_child(struct device *dev, | ||
734 | struct acpi_device *child); | ||
735 | #else | ||
736 | static inline int acpi_dev_get_property(struct acpi_device *adev, | ||
737 | const char *name, acpi_object_type type, | ||
738 | const union acpi_object **obj) | ||
739 | { | ||
740 | return -ENXIO; | ||
741 | } | ||
742 | static inline int acpi_dev_get_property_array(struct acpi_device *adev, | ||
743 | const char *name, | ||
744 | acpi_object_type type, | ||
745 | const union acpi_object **obj) | ||
746 | { | ||
747 | return -ENXIO; | ||
748 | } | ||
749 | static inline int acpi_dev_get_property_reference(struct acpi_device *adev, | ||
750 | const char *name, const char *cells_name, | ||
751 | size_t index, struct acpi_reference_args *args) | ||
752 | { | ||
753 | return -ENXIO; | ||
754 | } | ||
755 | |||
756 | static inline int acpi_dev_prop_get(struct acpi_device *adev, | ||
757 | const char *propname, | ||
758 | void **valptr) | ||
759 | { | ||
760 | return -ENXIO; | ||
761 | } | ||
762 | |||
763 | static inline int acpi_dev_prop_read_single(struct acpi_device *adev, | ||
764 | const char *propname, | ||
765 | enum dev_prop_type proptype, | ||
766 | void *val) | ||
767 | { | ||
768 | return -ENXIO; | ||
769 | } | ||
770 | |||
771 | static inline int acpi_dev_prop_read(struct acpi_device *adev, | ||
772 | const char *propname, | ||
773 | enum dev_prop_type proptype, | ||
774 | void *val, size_t nval) | ||
775 | { | ||
776 | return -ENXIO; | ||
777 | } | ||
778 | |||
779 | static inline struct acpi_device *acpi_get_next_child(struct device *dev, | ||
780 | struct acpi_device *child) | ||
781 | { | ||
782 | return NULL; | ||
783 | } | ||
784 | |||
785 | #endif | ||
786 | |||
662 | #endif /*_LINUX_ACPI_H*/ | 787 | #endif /*_LINUX_ACPI_H*/ |
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index c324f5700d1a..2afc618b15ce 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h | |||
@@ -23,6 +23,7 @@ | |||
23 | 23 | ||
24 | #define AMBA_NR_IRQS 9 | 24 | #define AMBA_NR_IRQS 9 |
25 | #define AMBA_CID 0xb105f00d | 25 | #define AMBA_CID 0xb105f00d |
26 | #define CORESIGHT_CID 0xb105900d | ||
26 | 27 | ||
27 | struct clk; | 28 | struct clk; |
28 | 29 | ||
@@ -97,6 +98,16 @@ void amba_release_regions(struct amba_device *); | |||
97 | #define amba_pclk_disable(d) \ | 98 | #define amba_pclk_disable(d) \ |
98 | do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) | 99 | do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) |
99 | 100 | ||
101 | static inline int amba_pclk_prepare(struct amba_device *dev) | ||
102 | { | ||
103 | return clk_prepare(dev->pclk); | ||
104 | } | ||
105 | |||
106 | static inline void amba_pclk_unprepare(struct amba_device *dev) | ||
107 | { | ||
108 | clk_unprepare(dev->pclk); | ||
109 | } | ||
110 | |||
100 | /* Some drivers don't use the struct amba_device */ | 111 | /* Some drivers don't use the struct amba_device */ |
101 | #define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) | 112 | #define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) |
102 | #define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) | 113 | #define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) |
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h index a495a959e8a7..33eb274cd0e6 100644 --- a/include/linux/ath9k_platform.h +++ b/include/linux/ath9k_platform.h | |||
@@ -31,8 +31,11 @@ struct ath9k_platform_data { | |||
31 | u32 gpio_mask; | 31 | u32 gpio_mask; |
32 | u32 gpio_val; | 32 | u32 gpio_val; |
33 | 33 | ||
34 | bool endian_check; | ||
34 | bool is_clk_25mhz; | 35 | bool is_clk_25mhz; |
35 | bool tx_gain_buffalo; | 36 | bool tx_gain_buffalo; |
37 | bool disable_2ghz; | ||
38 | bool disable_5ghz; | ||
36 | 39 | ||
37 | int (*get_mac_revision)(void); | 40 | int (*get_mac_revision)(void); |
38 | int (*external_reset)(void); | 41 | int (*external_reset)(void); |
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h index 91b77f8d495d..9177947bf032 100644 --- a/include/linux/atmel-mci.h +++ b/include/linux/atmel-mci.h | |||
@@ -11,6 +11,7 @@ | |||
11 | * @detect_pin: GPIO pin wired to the card detect switch | 11 | * @detect_pin: GPIO pin wired to the card detect switch |
12 | * @wp_pin: GPIO pin wired to the write protect sensor | 12 | * @wp_pin: GPIO pin wired to the write protect sensor |
13 | * @detect_is_active_high: The state of the detect pin when it is active | 13 | * @detect_is_active_high: The state of the detect pin when it is active |
14 | * @non_removable: The slot is not removable, only detect once | ||
14 | * | 15 | * |
15 | * If a given slot is not present on the board, @bus_width should be | 16 | * If a given slot is not present on the board, @bus_width should be |
16 | * set to 0. The other fields are ignored in this case. | 17 | * set to 0. The other fields are ignored in this case. |
@@ -26,6 +27,7 @@ struct mci_slot_pdata { | |||
26 | int detect_pin; | 27 | int detect_pin; |
27 | int wp_pin; | 28 | int wp_pin; |
28 | bool detect_is_active_high; | 29 | bool detect_is_active_high; |
30 | bool non_removable; | ||
29 | }; | 31 | }; |
30 | 32 | ||
31 | /** | 33 | /** |
diff --git a/include/linux/audit.h b/include/linux/audit.h index e58fe7df8b9c..0c04917c2f12 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
@@ -130,6 +130,7 @@ extern void audit_putname(struct filename *name); | |||
130 | #define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ | 130 | #define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ |
131 | extern void __audit_inode(struct filename *name, const struct dentry *dentry, | 131 | extern void __audit_inode(struct filename *name, const struct dentry *dentry, |
132 | unsigned int flags); | 132 | unsigned int flags); |
133 | extern void __audit_file(const struct file *); | ||
133 | extern void __audit_inode_child(const struct inode *parent, | 134 | extern void __audit_inode_child(const struct inode *parent, |
134 | const struct dentry *dentry, | 135 | const struct dentry *dentry, |
135 | const unsigned char type); | 136 | const unsigned char type); |
@@ -183,6 +184,11 @@ static inline void audit_inode(struct filename *name, | |||
183 | __audit_inode(name, dentry, flags); | 184 | __audit_inode(name, dentry, flags); |
184 | } | 185 | } |
185 | } | 186 | } |
187 | static inline void audit_file(struct file *file) | ||
188 | { | ||
189 | if (unlikely(!audit_dummy_context())) | ||
190 | __audit_file(file); | ||
191 | } | ||
186 | static inline void audit_inode_parent_hidden(struct filename *name, | 192 | static inline void audit_inode_parent_hidden(struct filename *name, |
187 | const struct dentry *dentry) | 193 | const struct dentry *dentry) |
188 | { | 194 | { |
@@ -357,6 +363,9 @@ static inline void audit_inode(struct filename *name, | |||
357 | const struct dentry *dentry, | 363 | const struct dentry *dentry, |
358 | unsigned int parent) | 364 | unsigned int parent) |
359 | { } | 365 | { } |
366 | static inline void audit_file(struct file *file) | ||
367 | { | ||
368 | } | ||
360 | static inline void audit_inode_parent_hidden(struct filename *name, | 369 | static inline void audit_inode_parent_hidden(struct filename *name, |
361 | const struct dentry *dentry) | 370 | const struct dentry *dentry) |
362 | { } | 371 | { } |
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 729f48e6b20b..eb1c6a47b67f 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h | |||
@@ -447,4 +447,6 @@ extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset); | |||
447 | #define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */ | 447 | #define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */ |
448 | extern u32 bcma_core_dma_translation(struct bcma_device *core); | 448 | extern u32 bcma_core_dma_translation(struct bcma_device *core); |
449 | 449 | ||
450 | extern unsigned int bcma_core_irq(struct bcma_device *core, int num); | ||
451 | |||
450 | #endif /* LINUX_BCMA_H_ */ | 452 | #endif /* LINUX_BCMA_H_ */ |
diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h index fb61f3fb4ddb..0b3b32aeeb8a 100644 --- a/include/linux/bcma/bcma_driver_mips.h +++ b/include/linux/bcma/bcma_driver_mips.h | |||
@@ -43,12 +43,12 @@ struct bcma_drv_mips { | |||
43 | extern void bcma_core_mips_init(struct bcma_drv_mips *mcore); | 43 | extern void bcma_core_mips_init(struct bcma_drv_mips *mcore); |
44 | extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore); | 44 | extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore); |
45 | 45 | ||
46 | extern unsigned int bcma_core_irq(struct bcma_device *core); | 46 | extern unsigned int bcma_core_mips_irq(struct bcma_device *dev); |
47 | #else | 47 | #else |
48 | static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { } | 48 | static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { } |
49 | static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { } | 49 | static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { } |
50 | 50 | ||
51 | static inline unsigned int bcma_core_irq(struct bcma_device *core) | 51 | static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev) |
52 | { | 52 | { |
53 | return 0; | 53 | return 0; |
54 | } | 54 | } |
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 61f29e5ea840..576e4639ca60 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
@@ -53,6 +53,10 @@ struct linux_binprm { | |||
53 | #define BINPRM_FLAGS_EXECFD_BIT 1 | 53 | #define BINPRM_FLAGS_EXECFD_BIT 1 |
54 | #define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) | 54 | #define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) |
55 | 55 | ||
56 | /* filename of the binary will be inaccessible after exec */ | ||
57 | #define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2 | ||
58 | #define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT) | ||
59 | |||
56 | /* Function parameter for binfmt->coredump */ | 60 | /* Function parameter for binfmt->coredump */ |
57 | struct coredump_params { | 61 | struct coredump_params { |
58 | const siginfo_t *siginfo; | 62 | const siginfo_t *siginfo; |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 7347f486ceca..efead0b532c4 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -443,6 +443,11 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, | |||
443 | extern void bio_set_pages_dirty(struct bio *bio); | 443 | extern void bio_set_pages_dirty(struct bio *bio); |
444 | extern void bio_check_pages_dirty(struct bio *bio); | 444 | extern void bio_check_pages_dirty(struct bio *bio); |
445 | 445 | ||
446 | void generic_start_io_acct(int rw, unsigned long sectors, | ||
447 | struct hd_struct *part); | ||
448 | void generic_end_io_acct(int rw, struct hd_struct *part, | ||
449 | unsigned long start_time); | ||
450 | |||
446 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | 451 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
447 | # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" | 452 | # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" |
448 | #endif | 453 | #endif |
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index e1c8d080c427..202e4034fe26 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h | |||
@@ -45,6 +45,7 @@ | |||
45 | * bitmap_set(dst, pos, nbits) Set specified bit area | 45 | * bitmap_set(dst, pos, nbits) Set specified bit area |
46 | * bitmap_clear(dst, pos, nbits) Clear specified bit area | 46 | * bitmap_clear(dst, pos, nbits) Clear specified bit area |
47 | * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area | 47 | * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area |
48 | * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above | ||
48 | * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n | 49 | * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n |
49 | * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n | 50 | * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n |
50 | * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) | 51 | * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) |
@@ -60,6 +61,7 @@ | |||
60 | * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region | 61 | * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region |
61 | * bitmap_release_region(bitmap, pos, order) Free specified bit region | 62 | * bitmap_release_region(bitmap, pos, order) Free specified bit region |
62 | * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region | 63 | * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region |
64 | * bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex | ||
63 | */ | 65 | */ |
64 | 66 | ||
65 | /* | 67 | /* |
@@ -114,11 +116,36 @@ extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); | |||
114 | 116 | ||
115 | extern void bitmap_set(unsigned long *map, unsigned int start, int len); | 117 | extern void bitmap_set(unsigned long *map, unsigned int start, int len); |
116 | extern void bitmap_clear(unsigned long *map, unsigned int start, int len); | 118 | extern void bitmap_clear(unsigned long *map, unsigned int start, int len); |
117 | extern unsigned long bitmap_find_next_zero_area(unsigned long *map, | 119 | |
118 | unsigned long size, | 120 | extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, |
119 | unsigned long start, | 121 | unsigned long size, |
120 | unsigned int nr, | 122 | unsigned long start, |
121 | unsigned long align_mask); | 123 | unsigned int nr, |
124 | unsigned long align_mask, | ||
125 | unsigned long align_offset); | ||
126 | |||
127 | /** | ||
128 | * bitmap_find_next_zero_area - find a contiguous aligned zero area | ||
129 | * @map: The address to base the search on | ||
130 | * @size: The bitmap size in bits | ||
131 | * @start: The bitnumber to start searching at | ||
132 | * @nr: The number of zeroed bits we're looking for | ||
133 | * @align_mask: Alignment mask for zero area | ||
134 | * | ||
135 | * The @align_mask should be one less than a power of 2; the effect is that | ||
136 | * the bit offset of all zero areas this function finds is multiples of that | ||
137 | * power of 2. A @align_mask of 0 means no alignment is required. | ||
138 | */ | ||
139 | static inline unsigned long | ||
140 | bitmap_find_next_zero_area(unsigned long *map, | ||
141 | unsigned long size, | ||
142 | unsigned long start, | ||
143 | unsigned int nr, | ||
144 | unsigned long align_mask) | ||
145 | { | ||
146 | return bitmap_find_next_zero_area_off(map, size, start, nr, | ||
147 | align_mask, 0); | ||
148 | } | ||
122 | 149 | ||
123 | extern int bitmap_scnprintf(char *buf, unsigned int len, | 150 | extern int bitmap_scnprintf(char *buf, unsigned int len, |
124 | const unsigned long *src, int nbits); | 151 | const unsigned long *src, int nbits); |
@@ -145,6 +172,8 @@ extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int o | |||
145 | extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); | 172 | extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); |
146 | extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); | 173 | extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); |
147 | extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); | 174 | extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); |
175 | extern int bitmap_print_to_pagebuf(bool list, char *buf, | ||
176 | const unsigned long *maskp, int nmaskbits); | ||
148 | 177 | ||
149 | #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) | 178 | #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) |
150 | #define BITMAP_LAST_WORD_MASK(nbits) \ | 179 | #define BITMAP_LAST_WORD_MASK(nbits) \ |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index c9be1589415a..8aded9ab2e4e 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -79,7 +79,13 @@ struct blk_mq_tag_set { | |||
79 | struct list_head tag_list; | 79 | struct list_head tag_list; |
80 | }; | 80 | }; |
81 | 81 | ||
82 | typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *, bool); | 82 | struct blk_mq_queue_data { |
83 | struct request *rq; | ||
84 | struct list_head *list; | ||
85 | bool last; | ||
86 | }; | ||
87 | |||
88 | typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); | ||
83 | typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); | 89 | typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); |
84 | typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); | 90 | typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); |
85 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); | 91 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); |
@@ -140,6 +146,7 @@ enum { | |||
140 | BLK_MQ_F_TAG_SHARED = 1 << 1, | 146 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
141 | BLK_MQ_F_SG_MERGE = 1 << 2, | 147 | BLK_MQ_F_SG_MERGE = 1 << 2, |
142 | BLK_MQ_F_SYSFS_UP = 1 << 3, | 148 | BLK_MQ_F_SYSFS_UP = 1 << 3, |
149 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, | ||
143 | 150 | ||
144 | BLK_MQ_S_STOPPED = 0, | 151 | BLK_MQ_S_STOPPED = 0, |
145 | BLK_MQ_S_TAG_ACTIVE = 1, | 152 | BLK_MQ_S_TAG_ACTIVE = 1, |
@@ -162,11 +169,29 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); | |||
162 | void blk_mq_insert_request(struct request *, bool, bool, bool); | 169 | void blk_mq_insert_request(struct request *, bool, bool, bool); |
163 | void blk_mq_run_queues(struct request_queue *q, bool async); | 170 | void blk_mq_run_queues(struct request_queue *q, bool async); |
164 | void blk_mq_free_request(struct request *rq); | 171 | void blk_mq_free_request(struct request *rq); |
172 | void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); | ||
165 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); | 173 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); |
166 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, | 174 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, |
167 | gfp_t gfp, bool reserved); | 175 | gfp_t gfp, bool reserved); |
168 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); | 176 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); |
169 | 177 | ||
178 | enum { | ||
179 | BLK_MQ_UNIQUE_TAG_BITS = 16, | ||
180 | BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, | ||
181 | }; | ||
182 | |||
183 | u32 blk_mq_unique_tag(struct request *rq); | ||
184 | |||
185 | static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) | ||
186 | { | ||
187 | return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; | ||
188 | } | ||
189 | |||
190 | static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) | ||
191 | { | ||
192 | return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; | ||
193 | } | ||
194 | |||
170 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); | 195 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); |
171 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); | 196 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); |
172 | 197 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aac0f9ea952a..92f4b4b288dd 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -398,7 +398,7 @@ struct request_queue { | |||
398 | */ | 398 | */ |
399 | struct kobject mq_kobj; | 399 | struct kobject mq_kobj; |
400 | 400 | ||
401 | #ifdef CONFIG_PM_RUNTIME | 401 | #ifdef CONFIG_PM |
402 | struct device *dev; | 402 | struct device *dev; |
403 | int rpm_status; | 403 | int rpm_status; |
404 | unsigned int nr_pending; | 404 | unsigned int nr_pending; |
@@ -1057,7 +1057,7 @@ extern void blk_put_queue(struct request_queue *); | |||
1057 | /* | 1057 | /* |
1058 | * block layer runtime pm functions | 1058 | * block layer runtime pm functions |
1059 | */ | 1059 | */ |
1060 | #ifdef CONFIG_PM_RUNTIME | 1060 | #ifdef CONFIG_PM |
1061 | extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); | 1061 | extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); |
1062 | extern int blk_pre_runtime_suspend(struct request_queue *q); | 1062 | extern int blk_pre_runtime_suspend(struct request_queue *q); |
1063 | extern void blk_post_runtime_suspend(struct request_queue *q, int err); | 1063 | extern void blk_post_runtime_suspend(struct request_queue *q, int err); |
@@ -1136,7 +1136,6 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) | |||
1136 | /* | 1136 | /* |
1137 | * tag stuff | 1137 | * tag stuff |
1138 | */ | 1138 | */ |
1139 | #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) | ||
1140 | extern int blk_queue_start_tag(struct request_queue *, struct request *); | 1139 | extern int blk_queue_start_tag(struct request_queue *, struct request *); |
1141 | extern struct request *blk_queue_find_tag(struct request_queue *, int); | 1140 | extern struct request *blk_queue_find_tag(struct request_queue *, int); |
1142 | extern void blk_queue_end_tag(struct request_queue *, struct request *); | 1141 | extern void blk_queue_end_tag(struct request_queue *, struct request *); |
@@ -1185,7 +1184,6 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | |||
1185 | enum blk_default_limits { | 1184 | enum blk_default_limits { |
1186 | BLK_MAX_SEGMENTS = 128, | 1185 | BLK_MAX_SEGMENTS = 128, |
1187 | BLK_SAFE_MAX_SECTORS = 255, | 1186 | BLK_SAFE_MAX_SECTORS = 255, |
1188 | BLK_DEF_MAX_SECTORS = 1024, | ||
1189 | BLK_MAX_SEGMENT_SIZE = 65536, | 1187 | BLK_MAX_SEGMENT_SIZE = 65536, |
1190 | BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, | 1188 | BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, |
1191 | }; | 1189 | }; |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3cf91754a957..bbfceb756452 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
@@ -22,7 +22,7 @@ struct bpf_map_ops { | |||
22 | 22 | ||
23 | /* funcs callable from userspace and from eBPF programs */ | 23 | /* funcs callable from userspace and from eBPF programs */ |
24 | void *(*map_lookup_elem)(struct bpf_map *map, void *key); | 24 | void *(*map_lookup_elem)(struct bpf_map *map, void *key); |
25 | int (*map_update_elem)(struct bpf_map *map, void *key, void *value); | 25 | int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); |
26 | int (*map_delete_elem)(struct bpf_map *map, void *key); | 26 | int (*map_delete_elem)(struct bpf_map *map, void *key); |
27 | }; | 27 | }; |
28 | 28 | ||
@@ -128,9 +128,18 @@ struct bpf_prog_aux { | |||
128 | struct work_struct work; | 128 | struct work_struct work; |
129 | }; | 129 | }; |
130 | 130 | ||
131 | #ifdef CONFIG_BPF_SYSCALL | ||
131 | void bpf_prog_put(struct bpf_prog *prog); | 132 | void bpf_prog_put(struct bpf_prog *prog); |
133 | #else | ||
134 | static inline void bpf_prog_put(struct bpf_prog *prog) {} | ||
135 | #endif | ||
132 | struct bpf_prog *bpf_prog_get(u32 ufd); | 136 | struct bpf_prog *bpf_prog_get(u32 ufd); |
133 | /* verify correctness of eBPF program */ | 137 | /* verify correctness of eBPF program */ |
134 | int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); | 138 | int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); |
135 | 139 | ||
140 | /* verifier prototypes for helper functions called from eBPF programs */ | ||
141 | extern struct bpf_func_proto bpf_map_lookup_elem_proto; | ||
142 | extern struct bpf_func_proto bpf_map_update_elem_proto; | ||
143 | extern struct bpf_func_proto bpf_map_delete_elem_proto; | ||
144 | |||
136 | #endif /* _LINUX_BPF_H */ | 145 | #endif /* _LINUX_BPF_H */ |
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h new file mode 100644 index 000000000000..3daf5ed392c9 --- /dev/null +++ b/include/linux/cacheinfo.h | |||
@@ -0,0 +1,100 @@ | |||
1 | #ifndef _LINUX_CACHEINFO_H | ||
2 | #define _LINUX_CACHEINFO_H | ||
3 | |||
4 | #include <linux/bitops.h> | ||
5 | #include <linux/cpumask.h> | ||
6 | #include <linux/smp.h> | ||
7 | |||
8 | struct device_node; | ||
9 | struct attribute; | ||
10 | |||
11 | enum cache_type { | ||
12 | CACHE_TYPE_NOCACHE = 0, | ||
13 | CACHE_TYPE_INST = BIT(0), | ||
14 | CACHE_TYPE_DATA = BIT(1), | ||
15 | CACHE_TYPE_SEPARATE = CACHE_TYPE_INST | CACHE_TYPE_DATA, | ||
16 | CACHE_TYPE_UNIFIED = BIT(2), | ||
17 | }; | ||
18 | |||
19 | /** | ||
20 | * struct cacheinfo - represent a cache leaf node | ||
21 | * @type: type of the cache - data, inst or unified | ||
22 | * @level: represents the hierarcy in the multi-level cache | ||
23 | * @coherency_line_size: size of each cache line usually representing | ||
24 | * the minimum amount of data that gets transferred from memory | ||
25 | * @number_of_sets: total number of sets, a set is a collection of cache | ||
26 | * lines sharing the same index | ||
27 | * @ways_of_associativity: number of ways in which a particular memory | ||
28 | * block can be placed in the cache | ||
29 | * @physical_line_partition: number of physical cache lines sharing the | ||
30 | * same cachetag | ||
31 | * @size: Total size of the cache | ||
32 | * @shared_cpu_map: logical cpumask representing all the cpus sharing | ||
33 | * this cache node | ||
34 | * @attributes: bitfield representing various cache attributes | ||
35 | * @of_node: if devicetree is used, this represents either the cpu node in | ||
36 | * case there's no explicit cache node or the cache node itself in the | ||
37 | * device tree | ||
38 | * @disable_sysfs: indicates whether this node is visible to the user via | ||
39 | * sysfs or not | ||
40 | * @priv: pointer to any private data structure specific to particular | ||
41 | * cache design | ||
42 | * | ||
43 | * While @of_node, @disable_sysfs and @priv are used for internal book | ||
44 | * keeping, the remaining members form the core properties of the cache | ||
45 | */ | ||
46 | struct cacheinfo { | ||
47 | enum cache_type type; | ||
48 | unsigned int level; | ||
49 | unsigned int coherency_line_size; | ||
50 | unsigned int number_of_sets; | ||
51 | unsigned int ways_of_associativity; | ||
52 | unsigned int physical_line_partition; | ||
53 | unsigned int size; | ||
54 | cpumask_t shared_cpu_map; | ||
55 | unsigned int attributes; | ||
56 | #define CACHE_WRITE_THROUGH BIT(0) | ||
57 | #define CACHE_WRITE_BACK BIT(1) | ||
58 | #define CACHE_WRITE_POLICY_MASK \ | ||
59 | (CACHE_WRITE_THROUGH | CACHE_WRITE_BACK) | ||
60 | #define CACHE_READ_ALLOCATE BIT(2) | ||
61 | #define CACHE_WRITE_ALLOCATE BIT(3) | ||
62 | #define CACHE_ALLOCATE_POLICY_MASK \ | ||
63 | (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE) | ||
64 | |||
65 | struct device_node *of_node; | ||
66 | bool disable_sysfs; | ||
67 | void *priv; | ||
68 | }; | ||
69 | |||
70 | struct cpu_cacheinfo { | ||
71 | struct cacheinfo *info_list; | ||
72 | unsigned int num_levels; | ||
73 | unsigned int num_leaves; | ||
74 | }; | ||
75 | |||
76 | /* | ||
77 | * Helpers to make sure "func" is executed on the cpu whose cache | ||
78 | * attributes are being detected | ||
79 | */ | ||
80 | #define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \ | ||
81 | static inline void _##func(void *ret) \ | ||
82 | { \ | ||
83 | int cpu = smp_processor_id(); \ | ||
84 | *(int *)ret = __##func(cpu); \ | ||
85 | } \ | ||
86 | \ | ||
87 | int func(unsigned int cpu) \ | ||
88 | { \ | ||
89 | int ret; \ | ||
90 | smp_call_function_single(cpu, _##func, &ret, true); \ | ||
91 | return ret; \ | ||
92 | } | ||
93 | |||
94 | struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu); | ||
95 | int init_cache_level(unsigned int cpu); | ||
96 | int populate_cache_leaves(unsigned int cpu); | ||
97 | |||
98 | const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); | ||
99 | |||
100 | #endif /* _LINUX_CACHEINFO_H */ | ||
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index b37ea95bc348..c05ff0f9f9a5 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h | |||
@@ -127,6 +127,9 @@ void unregister_candev(struct net_device *dev); | |||
127 | int can_restart_now(struct net_device *dev); | 127 | int can_restart_now(struct net_device *dev); |
128 | void can_bus_off(struct net_device *dev); | 128 | void can_bus_off(struct net_device *dev); |
129 | 129 | ||
130 | void can_change_state(struct net_device *dev, struct can_frame *cf, | ||
131 | enum can_state tx_state, enum can_state rx_state); | ||
132 | |||
130 | void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, | 133 | void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, |
131 | unsigned int idx); | 134 | unsigned int idx); |
132 | unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); | 135 | unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 1d5196889048..da0dae0600e6 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -113,6 +113,19 @@ static inline void css_get(struct cgroup_subsys_state *css) | |||
113 | } | 113 | } |
114 | 114 | ||
115 | /** | 115 | /** |
116 | * css_get_many - obtain references on the specified css | ||
117 | * @css: target css | ||
118 | * @n: number of references to get | ||
119 | * | ||
120 | * The caller must already have a reference. | ||
121 | */ | ||
122 | static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) | ||
123 | { | ||
124 | if (!(css->flags & CSS_NO_REF)) | ||
125 | percpu_ref_get_many(&css->refcnt, n); | ||
126 | } | ||
127 | |||
128 | /** | ||
116 | * css_tryget - try to obtain a reference on the specified css | 129 | * css_tryget - try to obtain a reference on the specified css |
117 | * @css: target css | 130 | * @css: target css |
118 | * | 131 | * |
@@ -159,6 +172,19 @@ static inline void css_put(struct cgroup_subsys_state *css) | |||
159 | percpu_ref_put(&css->refcnt); | 172 | percpu_ref_put(&css->refcnt); |
160 | } | 173 | } |
161 | 174 | ||
175 | /** | ||
176 | * css_put_many - put css references | ||
177 | * @css: target css | ||
178 | * @n: number of references to put | ||
179 | * | ||
180 | * Put references obtained via css_get() and css_tryget_online(). | ||
181 | */ | ||
182 | static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) | ||
183 | { | ||
184 | if (!(css->flags & CSS_NO_REF)) | ||
185 | percpu_ref_put_many(&css->refcnt, n); | ||
186 | } | ||
187 | |||
162 | /* bits in struct cgroup flags field */ | 188 | /* bits in struct cgroup flags field */ |
163 | enum { | 189 | enum { |
164 | /* Control Group requires release notifications to userspace */ | 190 | /* Control Group requires release notifications to userspace */ |
@@ -367,8 +393,8 @@ struct css_set { | |||
367 | * struct cftype: handler definitions for cgroup control files | 393 | * struct cftype: handler definitions for cgroup control files |
368 | * | 394 | * |
369 | * When reading/writing to a file: | 395 | * When reading/writing to a file: |
370 | * - the cgroup to use is file->f_dentry->d_parent->d_fsdata | 396 | * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata |
371 | * - the 'cftype' of the file is file->f_dentry->d_fsdata | 397 | * - the 'cftype' of the file is file->f_path.dentry->d_fsdata |
372 | */ | 398 | */ |
373 | 399 | ||
374 | /* cftype->flags */ | 400 | /* cftype->flags */ |
@@ -612,8 +638,10 @@ struct cgroup_subsys { | |||
612 | struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); | 638 | struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); |
613 | int (*css_online)(struct cgroup_subsys_state *css); | 639 | int (*css_online)(struct cgroup_subsys_state *css); |
614 | void (*css_offline)(struct cgroup_subsys_state *css); | 640 | void (*css_offline)(struct cgroup_subsys_state *css); |
641 | void (*css_released)(struct cgroup_subsys_state *css); | ||
615 | void (*css_free)(struct cgroup_subsys_state *css); | 642 | void (*css_free)(struct cgroup_subsys_state *css); |
616 | void (*css_reset)(struct cgroup_subsys_state *css); | 643 | void (*css_reset)(struct cgroup_subsys_state *css); |
644 | void (*css_e_css_changed)(struct cgroup_subsys_state *css); | ||
617 | 645 | ||
618 | int (*can_attach)(struct cgroup_subsys_state *css, | 646 | int (*can_attach)(struct cgroup_subsys_state *css, |
619 | struct cgroup_taskset *tset); | 647 | struct cgroup_taskset *tset); |
@@ -908,6 +936,8 @@ void css_task_iter_end(struct css_task_iter *it); | |||
908 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); | 936 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); |
909 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); | 937 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); |
910 | 938 | ||
939 | struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, | ||
940 | struct cgroup_subsys *ss); | ||
911 | struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, | 941 | struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, |
912 | struct cgroup_subsys *ss); | 942 | struct cgroup_subsys *ss); |
913 | 943 | ||
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index be21af149f11..2839c639f092 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h | |||
@@ -352,7 +352,6 @@ struct clk_divider { | |||
352 | #define CLK_DIVIDER_READ_ONLY BIT(5) | 352 | #define CLK_DIVIDER_READ_ONLY BIT(5) |
353 | 353 | ||
354 | extern const struct clk_ops clk_divider_ops; | 354 | extern const struct clk_ops clk_divider_ops; |
355 | extern const struct clk_ops clk_divider_ro_ops; | ||
356 | struct clk *clk_register_divider(struct device *dev, const char *name, | 355 | struct clk *clk_register_divider(struct device *dev, const char *name, |
357 | const char *parent_name, unsigned long flags, | 356 | const char *parent_name, unsigned long flags, |
358 | void __iomem *reg, u8 shift, u8 width, | 357 | void __iomem *reg, u8 shift, u8 width, |
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index f75acbf70e96..74e5341463c9 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h | |||
@@ -254,13 +254,26 @@ extern const struct clk_ops ti_clk_mux_ops; | |||
254 | void omap2_init_clk_hw_omap_clocks(struct clk *clk); | 254 | void omap2_init_clk_hw_omap_clocks(struct clk *clk); |
255 | int omap3_noncore_dpll_enable(struct clk_hw *hw); | 255 | int omap3_noncore_dpll_enable(struct clk_hw *hw); |
256 | void omap3_noncore_dpll_disable(struct clk_hw *hw); | 256 | void omap3_noncore_dpll_disable(struct clk_hw *hw); |
257 | int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index); | ||
257 | int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, | 258 | int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, |
258 | unsigned long parent_rate); | 259 | unsigned long parent_rate); |
260 | int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw, | ||
261 | unsigned long rate, | ||
262 | unsigned long parent_rate, | ||
263 | u8 index); | ||
264 | long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, | ||
265 | unsigned long rate, | ||
266 | unsigned long *best_parent_rate, | ||
267 | struct clk **best_parent_clk); | ||
259 | unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, | 268 | unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, |
260 | unsigned long parent_rate); | 269 | unsigned long parent_rate); |
261 | long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, | 270 | long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, |
262 | unsigned long target_rate, | 271 | unsigned long target_rate, |
263 | unsigned long *parent_rate); | 272 | unsigned long *parent_rate); |
273 | long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, | ||
274 | unsigned long rate, | ||
275 | unsigned long *best_parent_rate, | ||
276 | struct clk **best_parent_clk); | ||
264 | u8 omap2_init_dpll_parent(struct clk_hw *hw); | 277 | u8 omap2_init_dpll_parent(struct clk_hw *hw); |
265 | unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate); | 278 | unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate); |
266 | long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, | 279 | long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, |
@@ -278,6 +291,8 @@ int omap2_clk_disable_autoidle_all(void); | |||
278 | void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks); | 291 | void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks); |
279 | int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate, | 292 | int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate, |
280 | unsigned long parent_rate); | 293 | unsigned long parent_rate); |
294 | int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, | ||
295 | unsigned long parent_rate, u8 index); | ||
281 | int omap2_dflt_clk_enable(struct clk_hw *hw); | 296 | int omap2_dflt_clk_enable(struct clk_hw *hw); |
282 | void omap2_dflt_clk_disable(struct clk_hw *hw); | 297 | void omap2_dflt_clk_disable(struct clk_hw *hw); |
283 | int omap2_dflt_clk_is_enabled(struct clk_hw *hw); | 298 | int omap2_dflt_clk_is_enabled(struct clk_hw *hw); |
diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 60bdf8dc02a3..3238ffa33f68 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h | |||
@@ -33,10 +33,11 @@ extern int fragmentation_index(struct zone *zone, unsigned int order); | |||
33 | extern unsigned long try_to_compact_pages(struct zonelist *zonelist, | 33 | extern unsigned long try_to_compact_pages(struct zonelist *zonelist, |
34 | int order, gfp_t gfp_mask, nodemask_t *mask, | 34 | int order, gfp_t gfp_mask, nodemask_t *mask, |
35 | enum migrate_mode mode, int *contended, | 35 | enum migrate_mode mode, int *contended, |
36 | struct zone **candidate_zone); | 36 | int alloc_flags, int classzone_idx); |
37 | extern void compact_pgdat(pg_data_t *pgdat, int order); | 37 | extern void compact_pgdat(pg_data_t *pgdat, int order); |
38 | extern void reset_isolation_suitable(pg_data_t *pgdat); | 38 | extern void reset_isolation_suitable(pg_data_t *pgdat); |
39 | extern unsigned long compaction_suitable(struct zone *zone, int order); | 39 | extern unsigned long compaction_suitable(struct zone *zone, int order, |
40 | int alloc_flags, int classzone_idx); | ||
40 | 41 | ||
41 | /* Do not skip compaction more than 64 times */ | 42 | /* Do not skip compaction more than 64 times */ |
42 | #define COMPACT_MAX_DEFER_SHIFT 6 | 43 | #define COMPACT_MAX_DEFER_SHIFT 6 |
@@ -103,7 +104,7 @@ static inline bool compaction_restarting(struct zone *zone, int order) | |||
103 | static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, | 104 | static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, |
104 | int order, gfp_t gfp_mask, nodemask_t *nodemask, | 105 | int order, gfp_t gfp_mask, nodemask_t *nodemask, |
105 | enum migrate_mode mode, int *contended, | 106 | enum migrate_mode mode, int *contended, |
106 | struct zone **candidate_zone) | 107 | int alloc_flags, int classzone_idx) |
107 | { | 108 | { |
108 | return COMPACT_CONTINUE; | 109 | return COMPACT_CONTINUE; |
109 | } | 110 | } |
@@ -116,7 +117,8 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) | |||
116 | { | 117 | { |
117 | } | 118 | } |
118 | 119 | ||
119 | static inline unsigned long compaction_suitable(struct zone *zone, int order) | 120 | static inline unsigned long compaction_suitable(struct zone *zone, int order, |
121 | int alloc_flags, int classzone_idx) | ||
120 | { | 122 | { |
121 | return COMPACT_SKIPPED; | 123 | return COMPACT_SKIPPED; |
122 | } | 124 | } |
diff --git a/include/linux/compat.h b/include/linux/compat.h index e6494261eaff..7450ca2ac1fc 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
@@ -357,6 +357,9 @@ asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int); | |||
357 | 357 | ||
358 | asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, | 358 | asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, |
359 | const compat_uptr_t __user *envp); | 359 | const compat_uptr_t __user *envp); |
360 | asmlinkage long compat_sys_execveat(int dfd, const char __user *filename, | ||
361 | const compat_uptr_t __user *argv, | ||
362 | const compat_uptr_t __user *envp, int flags); | ||
360 | 363 | ||
361 | asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, | 364 | asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, |
362 | compat_ulong_t __user *outp, compat_ulong_t __user *exp, | 365 | compat_ulong_t __user *outp, compat_ulong_t __user *exp, |
diff --git a/include/linux/coresight.h b/include/linux/coresight.h new file mode 100644 index 000000000000..5d3c54311f7a --- /dev/null +++ b/include/linux/coresight.h | |||
@@ -0,0 +1,263 @@ | |||
1 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | */ | ||
12 | |||
13 | #ifndef _LINUX_CORESIGHT_H | ||
14 | #define _LINUX_CORESIGHT_H | ||
15 | |||
16 | #include <linux/device.h> | ||
17 | |||
18 | /* Peripheral id registers (0xFD0-0xFEC) */ | ||
19 | #define CORESIGHT_PERIPHIDR4 0xfd0 | ||
20 | #define CORESIGHT_PERIPHIDR5 0xfd4 | ||
21 | #define CORESIGHT_PERIPHIDR6 0xfd8 | ||
22 | #define CORESIGHT_PERIPHIDR7 0xfdC | ||
23 | #define CORESIGHT_PERIPHIDR0 0xfe0 | ||
24 | #define CORESIGHT_PERIPHIDR1 0xfe4 | ||
25 | #define CORESIGHT_PERIPHIDR2 0xfe8 | ||
26 | #define CORESIGHT_PERIPHIDR3 0xfeC | ||
27 | /* Component id registers (0xFF0-0xFFC) */ | ||
28 | #define CORESIGHT_COMPIDR0 0xff0 | ||
29 | #define CORESIGHT_COMPIDR1 0xff4 | ||
30 | #define CORESIGHT_COMPIDR2 0xff8 | ||
31 | #define CORESIGHT_COMPIDR3 0xffC | ||
32 | |||
33 | #define ETM_ARCH_V3_3 0x23 | ||
34 | #define ETM_ARCH_V3_5 0x25 | ||
35 | #define PFT_ARCH_V1_0 0x30 | ||
36 | #define PFT_ARCH_V1_1 0x31 | ||
37 | |||
38 | #define CORESIGHT_UNLOCK 0xc5acce55 | ||
39 | |||
40 | extern struct bus_type coresight_bustype; | ||
41 | |||
42 | enum coresight_dev_type { | ||
43 | CORESIGHT_DEV_TYPE_NONE, | ||
44 | CORESIGHT_DEV_TYPE_SINK, | ||
45 | CORESIGHT_DEV_TYPE_LINK, | ||
46 | CORESIGHT_DEV_TYPE_LINKSINK, | ||
47 | CORESIGHT_DEV_TYPE_SOURCE, | ||
48 | }; | ||
49 | |||
50 | enum coresight_dev_subtype_sink { | ||
51 | CORESIGHT_DEV_SUBTYPE_SINK_NONE, | ||
52 | CORESIGHT_DEV_SUBTYPE_SINK_PORT, | ||
53 | CORESIGHT_DEV_SUBTYPE_SINK_BUFFER, | ||
54 | }; | ||
55 | |||
56 | enum coresight_dev_subtype_link { | ||
57 | CORESIGHT_DEV_SUBTYPE_LINK_NONE, | ||
58 | CORESIGHT_DEV_SUBTYPE_LINK_MERG, | ||
59 | CORESIGHT_DEV_SUBTYPE_LINK_SPLIT, | ||
60 | CORESIGHT_DEV_SUBTYPE_LINK_FIFO, | ||
61 | }; | ||
62 | |||
63 | enum coresight_dev_subtype_source { | ||
64 | CORESIGHT_DEV_SUBTYPE_SOURCE_NONE, | ||
65 | CORESIGHT_DEV_SUBTYPE_SOURCE_PROC, | ||
66 | CORESIGHT_DEV_SUBTYPE_SOURCE_BUS, | ||
67 | CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE, | ||
68 | }; | ||
69 | |||
70 | /** | ||
71 | * struct coresight_dev_subtype - further characterisation of a type | ||
72 | * @sink_subtype: type of sink this component is, as defined | ||
73 | by @coresight_dev_subtype_sink. | ||
74 | * @link_subtype: type of link this component is, as defined | ||
75 | by @coresight_dev_subtype_link. | ||
76 | * @source_subtype: type of source this component is, as defined | ||
77 | by @coresight_dev_subtype_source. | ||
78 | */ | ||
79 | struct coresight_dev_subtype { | ||
80 | enum coresight_dev_subtype_sink sink_subtype; | ||
81 | enum coresight_dev_subtype_link link_subtype; | ||
82 | enum coresight_dev_subtype_source source_subtype; | ||
83 | }; | ||
84 | |||
85 | /** | ||
86 | * struct coresight_platform_data - data harvested from the DT specification | ||
87 | * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs. | ||
88 | * @name: name of the component as shown under sysfs. | ||
89 | * @nr_inport: number of input ports for this component. | ||
90 | * @outports: list of remote endpoint port number. | ||
91 | * @child_names:name of all child components connected to this device. | ||
92 | * @child_ports:child component port number the current component is | ||
93 | connected to. | ||
94 | * @nr_outport: number of output ports for this component. | ||
95 | * @clk: The clock this component is associated to. | ||
96 | */ | ||
97 | struct coresight_platform_data { | ||
98 | int cpu; | ||
99 | const char *name; | ||
100 | int nr_inport; | ||
101 | int *outports; | ||
102 | const char **child_names; | ||
103 | int *child_ports; | ||
104 | int nr_outport; | ||
105 | struct clk *clk; | ||
106 | }; | ||
107 | |||
108 | /** | ||
109 | * struct coresight_desc - description of a component required from drivers | ||
110 | * @type: as defined by @coresight_dev_type. | ||
111 | * @subtype: as defined by @coresight_dev_subtype. | ||
112 | * @ops: generic operations for this component, as defined | ||
113 | by @coresight_ops. | ||
114 | * @pdata: platform data collected from DT. | ||
115 | * @dev: The device entity associated to this component. | ||
116 | * @groups: operations specific to this component. These will end up | ||
117 | in the component's sysfs sub-directory. | ||
118 | */ | ||
119 | struct coresight_desc { | ||
120 | enum coresight_dev_type type; | ||
121 | struct coresight_dev_subtype subtype; | ||
122 | const struct coresight_ops *ops; | ||
123 | struct coresight_platform_data *pdata; | ||
124 | struct device *dev; | ||
125 | const struct attribute_group **groups; | ||
126 | }; | ||
127 | |||
128 | /** | ||
129 | * struct coresight_connection - representation of a single connection | ||
130 | * @outport: a connection's output port number. | ||
131 | * @chid_name: remote component's name. | ||
132 | * @child_port: remote component's port number @output is connected to. | ||
133 | * @child_dev: a @coresight_device representation of the component | ||
134 | connected to @outport. | ||
135 | */ | ||
136 | struct coresight_connection { | ||
137 | int outport; | ||
138 | const char *child_name; | ||
139 | int child_port; | ||
140 | struct coresight_device *child_dev; | ||
141 | }; | ||
142 | |||
143 | /** | ||
144 | * struct coresight_device - representation of a device as used by the framework | ||
145 | * @conns: array of coresight_connections associated to this component. | ||
146 | * @nr_inport: number of input port associated to this component. | ||
147 | * @nr_outport: number of output port associated to this component. | ||
148 | * @type: as defined by @coresight_dev_type. | ||
149 | * @subtype: as defined by @coresight_dev_subtype. | ||
150 | * @ops: generic operations for this component, as defined | ||
151 | by @coresight_ops. | ||
152 | * @dev: The device entity associated to this component. | ||
153 | * @refcnt: keep track of what is in use. | ||
154 | * @path_link: link of current component into the path being enabled. | ||
155 | * @orphan: true if the component has connections that haven't been linked. | ||
156 | * @enable: 'true' if component is currently part of an active path. | ||
157 | * @activated: 'true' only if a _sink_ has been activated. A sink can be | ||
158 | activated but not yet enabled. Enabling for a _sink_ | ||
159 | happens when a source has been selected for that it. | ||
160 | */ | ||
161 | struct coresight_device { | ||
162 | struct coresight_connection *conns; | ||
163 | int nr_inport; | ||
164 | int nr_outport; | ||
165 | enum coresight_dev_type type; | ||
166 | struct coresight_dev_subtype subtype; | ||
167 | const struct coresight_ops *ops; | ||
168 | struct device dev; | ||
169 | atomic_t *refcnt; | ||
170 | struct list_head path_link; | ||
171 | bool orphan; | ||
172 | bool enable; /* true only if configured as part of a path */ | ||
173 | bool activated; /* true only if a sink is part of a path */ | ||
174 | }; | ||
175 | |||
176 | #define to_coresight_device(d) container_of(d, struct coresight_device, dev) | ||
177 | |||
178 | #define source_ops(csdev) csdev->ops->source_ops | ||
179 | #define sink_ops(csdev) csdev->ops->sink_ops | ||
180 | #define link_ops(csdev) csdev->ops->link_ops | ||
181 | |||
182 | #define CORESIGHT_DEBUGFS_ENTRY(__name, __entry_name, \ | ||
183 | __mode, __get, __set, __fmt) \ | ||
184 | DEFINE_SIMPLE_ATTRIBUTE(__name ## _ops, __get, __set, __fmt); \ | ||
185 | static const struct coresight_ops_entry __name ## _entry = { \ | ||
186 | .name = __entry_name, \ | ||
187 | .mode = __mode, \ | ||
188 | .ops = &__name ## _ops \ | ||
189 | } | ||
190 | |||
191 | /** | ||
192 | * struct coresight_ops_sink - basic operations for a sink | ||
193 | * Operations available for sinks | ||
194 | * @enable: enables the sink. | ||
195 | * @disable: disables the sink. | ||
196 | */ | ||
197 | struct coresight_ops_sink { | ||
198 | int (*enable)(struct coresight_device *csdev); | ||
199 | void (*disable)(struct coresight_device *csdev); | ||
200 | }; | ||
201 | |||
202 | /** | ||
203 | * struct coresight_ops_link - basic operations for a link | ||
204 | * Operations available for links. | ||
205 | * @enable: enables flow between iport and oport. | ||
206 | * @disable: disables flow between iport and oport. | ||
207 | */ | ||
208 | struct coresight_ops_link { | ||
209 | int (*enable)(struct coresight_device *csdev, int iport, int oport); | ||
210 | void (*disable)(struct coresight_device *csdev, int iport, int oport); | ||
211 | }; | ||
212 | |||
213 | /** | ||
214 | * struct coresight_ops_source - basic operations for a source | ||
215 | * Operations available for sources. | ||
216 | * @trace_id: returns the value of the component's trace ID as known | ||
217 | to the HW. | ||
218 | * @enable: enables tracing from a source. | ||
219 | * @disable: disables tracing for a source. | ||
220 | */ | ||
221 | struct coresight_ops_source { | ||
222 | int (*trace_id)(struct coresight_device *csdev); | ||
223 | int (*enable)(struct coresight_device *csdev); | ||
224 | void (*disable)(struct coresight_device *csdev); | ||
225 | }; | ||
226 | |||
227 | struct coresight_ops { | ||
228 | const struct coresight_ops_sink *sink_ops; | ||
229 | const struct coresight_ops_link *link_ops; | ||
230 | const struct coresight_ops_source *source_ops; | ||
231 | }; | ||
232 | |||
233 | #ifdef CONFIG_CORESIGHT | ||
234 | extern struct coresight_device * | ||
235 | coresight_register(struct coresight_desc *desc); | ||
236 | extern void coresight_unregister(struct coresight_device *csdev); | ||
237 | extern int coresight_enable(struct coresight_device *csdev); | ||
238 | extern void coresight_disable(struct coresight_device *csdev); | ||
239 | extern int coresight_is_bit_set(u32 val, int position, int value); | ||
240 | extern int coresight_timeout(void __iomem *addr, u32 offset, | ||
241 | int position, int value); | ||
242 | #ifdef CONFIG_OF | ||
243 | extern struct coresight_platform_data *of_get_coresight_platform_data( | ||
244 | struct device *dev, struct device_node *node); | ||
245 | #endif | ||
246 | #else | ||
247 | static inline struct coresight_device * | ||
248 | coresight_register(struct coresight_desc *desc) { return NULL; } | ||
249 | static inline void coresight_unregister(struct coresight_device *csdev) {} | ||
250 | static inline int | ||
251 | coresight_enable(struct coresight_device *csdev) { return -ENOSYS; } | ||
252 | static inline void coresight_disable(struct coresight_device *csdev) {} | ||
253 | static inline int coresight_is_bit_set(u32 val, int position, int value) | ||
254 | { return 0; } | ||
255 | static inline int coresight_timeout(void __iomem *addr, u32 offset, | ||
256 | int position, int value) { return 1; } | ||
257 | #ifdef CONFIG_OF | ||
258 | static inline struct coresight_platform_data *of_get_coresight_platform_data( | ||
259 | struct device *dev, struct device_node *node) { return NULL; } | ||
260 | #endif | ||
261 | #endif | ||
262 | |||
263 | #endif | ||
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index b2d9a43012b2..4260e8594bd7 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | struct device; | 20 | struct device; |
21 | struct device_node; | 21 | struct device_node; |
22 | struct attribute_group; | ||
22 | 23 | ||
23 | struct cpu { | 24 | struct cpu { |
24 | int node_id; /* The node which contains the CPU */ | 25 | int node_id; /* The node which contains the CPU */ |
@@ -39,6 +40,9 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr); | |||
39 | extern int cpu_add_dev_attr_group(struct attribute_group *attrs); | 40 | extern int cpu_add_dev_attr_group(struct attribute_group *attrs); |
40 | extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); | 41 | extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); |
41 | 42 | ||
43 | extern struct device *cpu_device_create(struct device *parent, void *drvdata, | ||
44 | const struct attribute_group **groups, | ||
45 | const char *fmt, ...); | ||
42 | #ifdef CONFIG_HOTPLUG_CPU | 46 | #ifdef CONFIG_HOTPLUG_CPU |
43 | extern void unregister_cpu(struct cpu *cpu); | 47 | extern void unregister_cpu(struct cpu *cpu); |
44 | extern ssize_t arch_cpu_probe(const char *, size_t); | 48 | extern ssize_t arch_cpu_probe(const char *, size_t); |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 503b085b7832..4d078cebafd2 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -217,26 +217,26 @@ __ATTR(_name, 0644, show_##_name, store_##_name) | |||
217 | 217 | ||
218 | 218 | ||
219 | struct cpufreq_driver { | 219 | struct cpufreq_driver { |
220 | char name[CPUFREQ_NAME_LEN]; | 220 | char name[CPUFREQ_NAME_LEN]; |
221 | u8 flags; | 221 | u8 flags; |
222 | void *driver_data; | 222 | void *driver_data; |
223 | 223 | ||
224 | /* needed by all drivers */ | 224 | /* needed by all drivers */ |
225 | int (*init) (struct cpufreq_policy *policy); | 225 | int (*init)(struct cpufreq_policy *policy); |
226 | int (*verify) (struct cpufreq_policy *policy); | 226 | int (*verify)(struct cpufreq_policy *policy); |
227 | 227 | ||
228 | /* define one out of two */ | 228 | /* define one out of two */ |
229 | int (*setpolicy) (struct cpufreq_policy *policy); | 229 | int (*setpolicy)(struct cpufreq_policy *policy); |
230 | 230 | ||
231 | /* | 231 | /* |
232 | * On failure, should always restore frequency to policy->restore_freq | 232 | * On failure, should always restore frequency to policy->restore_freq |
233 | * (i.e. old freq). | 233 | * (i.e. old freq). |
234 | */ | 234 | */ |
235 | int (*target) (struct cpufreq_policy *policy, /* Deprecated */ | 235 | int (*target)(struct cpufreq_policy *policy, |
236 | unsigned int target_freq, | 236 | unsigned int target_freq, |
237 | unsigned int relation); | 237 | unsigned int relation); /* Deprecated */ |
238 | int (*target_index) (struct cpufreq_policy *policy, | 238 | int (*target_index)(struct cpufreq_policy *policy, |
239 | unsigned int index); | 239 | unsigned int index); |
240 | /* | 240 | /* |
241 | * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION | 241 | * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION |
242 | * unset. | 242 | * unset. |
@@ -252,27 +252,31 @@ struct cpufreq_driver { | |||
252 | * wish to switch to intermediate frequency for some target frequency. | 252 | * wish to switch to intermediate frequency for some target frequency. |
253 | * In that case core will directly call ->target_index(). | 253 | * In that case core will directly call ->target_index(). |
254 | */ | 254 | */ |
255 | unsigned int (*get_intermediate)(struct cpufreq_policy *policy, | 255 | unsigned int (*get_intermediate)(struct cpufreq_policy *policy, |
256 | unsigned int index); | 256 | unsigned int index); |
257 | int (*target_intermediate)(struct cpufreq_policy *policy, | 257 | int (*target_intermediate)(struct cpufreq_policy *policy, |
258 | unsigned int index); | 258 | unsigned int index); |
259 | 259 | ||
260 | /* should be defined, if possible */ | 260 | /* should be defined, if possible */ |
261 | unsigned int (*get) (unsigned int cpu); | 261 | unsigned int (*get)(unsigned int cpu); |
262 | 262 | ||
263 | /* optional */ | 263 | /* optional */ |
264 | int (*bios_limit) (int cpu, unsigned int *limit); | 264 | int (*bios_limit)(int cpu, unsigned int *limit); |
265 | |||
266 | int (*exit)(struct cpufreq_policy *policy); | ||
267 | void (*stop_cpu)(struct cpufreq_policy *policy); | ||
268 | int (*suspend)(struct cpufreq_policy *policy); | ||
269 | int (*resume)(struct cpufreq_policy *policy); | ||
270 | |||
271 | /* Will be called after the driver is fully initialized */ | ||
272 | void (*ready)(struct cpufreq_policy *policy); | ||
265 | 273 | ||
266 | int (*exit) (struct cpufreq_policy *policy); | 274 | struct freq_attr **attr; |
267 | void (*stop_cpu) (struct cpufreq_policy *policy); | ||
268 | int (*suspend) (struct cpufreq_policy *policy); | ||
269 | int (*resume) (struct cpufreq_policy *policy); | ||
270 | struct freq_attr **attr; | ||
271 | 275 | ||
272 | /* platform specific boost support code */ | 276 | /* platform specific boost support code */ |
273 | bool boost_supported; | 277 | bool boost_supported; |
274 | bool boost_enabled; | 278 | bool boost_enabled; |
275 | int (*set_boost) (int state); | 279 | int (*set_boost)(int state); |
276 | }; | 280 | }; |
277 | 281 | ||
278 | /* flags */ | 282 | /* flags */ |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 25e0df6155a4..a07e087f54b2 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
@@ -53,7 +53,7 @@ struct cpuidle_state { | |||
53 | }; | 53 | }; |
54 | 54 | ||
55 | /* Idle State Flags */ | 55 | /* Idle State Flags */ |
56 | #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ | 56 | #define CPUIDLE_FLAG_TIME_INVALID (0x01) /* is residency time measurable? */ |
57 | #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ | 57 | #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ |
58 | #define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ | 58 | #define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ |
59 | 59 | ||
@@ -90,7 +90,7 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); | |||
90 | * cpuidle_get_last_residency - retrieves the last state's residency time | 90 | * cpuidle_get_last_residency - retrieves the last state's residency time |
91 | * @dev: the target CPU | 91 | * @dev: the target CPU |
92 | * | 92 | * |
93 | * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_VALID isn't set | 93 | * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set |
94 | */ | 94 | */ |
95 | static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) | 95 | static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) |
96 | { | 96 | { |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 0a9a6da21e74..b950e9d6008b 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -803,6 +803,23 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu) | |||
803 | } | 803 | } |
804 | #endif /* NR_CPUS > BITS_PER_LONG */ | 804 | #endif /* NR_CPUS > BITS_PER_LONG */ |
805 | 805 | ||
806 | /** | ||
807 | * cpumap_print_to_pagebuf - copies the cpumask into the buffer either | ||
808 | * as comma-separated list of cpus or hex values of cpumask | ||
809 | * @list: indicates whether the cpumap must be list | ||
810 | * @mask: the cpumask to copy | ||
811 | * @buf: the buffer to copy into | ||
812 | * | ||
813 | * Returns the length of the (null-terminated) @buf string, zero if | ||
814 | * nothing is copied. | ||
815 | */ | ||
816 | static inline ssize_t | ||
817 | cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) | ||
818 | { | ||
819 | return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask), | ||
820 | nr_cpumask_bits); | ||
821 | } | ||
822 | |||
806 | /* | 823 | /* |
807 | * | 824 | * |
808 | * From here down, all obsolete. Use cpumask_ variants! | 825 | * From here down, all obsolete. Use cpumask_ variants! |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 2f073db7392e..1b357997cac5 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -48,29 +48,16 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | |||
48 | void cpuset_init_current_mems_allowed(void); | 48 | void cpuset_init_current_mems_allowed(void); |
49 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); | 49 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
50 | 50 | ||
51 | extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); | 51 | extern int __cpuset_node_allowed(int node, gfp_t gfp_mask); |
52 | extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); | ||
53 | 52 | ||
54 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | 53 | static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) |
55 | { | 54 | { |
56 | return nr_cpusets() <= 1 || | 55 | return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask); |
57 | __cpuset_node_allowed_softwall(node, gfp_mask); | ||
58 | } | 56 | } |
59 | 57 | ||
60 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | 58 | static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
61 | { | 59 | { |
62 | return nr_cpusets() <= 1 || | 60 | return cpuset_node_allowed(zone_to_nid(z), gfp_mask); |
63 | __cpuset_node_allowed_hardwall(node, gfp_mask); | ||
64 | } | ||
65 | |||
66 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | ||
67 | { | ||
68 | return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); | ||
69 | } | ||
70 | |||
71 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | ||
72 | { | ||
73 | return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); | ||
74 | } | 61 | } |
75 | 62 | ||
76 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | 63 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
@@ -179,22 +166,12 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) | |||
179 | return 1; | 166 | return 1; |
180 | } | 167 | } |
181 | 168 | ||
182 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | 169 | static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) |
183 | { | ||
184 | return 1; | ||
185 | } | ||
186 | |||
187 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | ||
188 | { | ||
189 | return 1; | ||
190 | } | ||
191 | |||
192 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | ||
193 | { | 170 | { |
194 | return 1; | 171 | return 1; |
195 | } | 172 | } |
196 | 173 | ||
197 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | 174 | static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
198 | { | 175 | { |
199 | return 1; | 176 | return 1; |
200 | } | 177 | } |
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index d45e949699ea..9c8776d0ada8 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
@@ -26,6 +26,19 @@ | |||
26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Autoloaded crypto modules should only use a prefixed name to avoid allowing | ||
30 | * arbitrary modules to be loaded. Loading from userspace may still need the | ||
31 | * unprefixed names, so retains those aliases as well. | ||
32 | * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 | ||
33 | * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro | ||
34 | * expands twice on the same line. Instead, use a separate base name for the | ||
35 | * alias. | ||
36 | */ | ||
37 | #define MODULE_ALIAS_CRYPTO(name) \ | ||
38 | __MODULE_INFO(alias, alias_userspace, name); \ | ||
39 | __MODULE_INFO(alias, alias_crypto, "crypto-" name) | ||
40 | |||
41 | /* | ||
29 | * Algorithm masks and types. | 42 | * Algorithm masks and types. |
30 | */ | 43 | */ |
31 | #define CRYPTO_ALG_TYPE_MASK 0x0000000f | 44 | #define CRYPTO_ALG_TYPE_MASK 0x0000000f |
@@ -127,6 +140,13 @@ struct skcipher_givcrypt_request; | |||
127 | 140 | ||
128 | typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); | 141 | typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); |
129 | 142 | ||
143 | /** | ||
144 | * DOC: Block Cipher Context Data Structures | ||
145 | * | ||
146 | * These data structures define the operating context for each block cipher | ||
147 | * type. | ||
148 | */ | ||
149 | |||
130 | struct crypto_async_request { | 150 | struct crypto_async_request { |
131 | struct list_head list; | 151 | struct list_head list; |
132 | crypto_completion_t complete; | 152 | crypto_completion_t complete; |
@@ -194,9 +214,63 @@ struct hash_desc { | |||
194 | u32 flags; | 214 | u32 flags; |
195 | }; | 215 | }; |
196 | 216 | ||
197 | /* | 217 | /** |
198 | * Algorithms: modular crypto algorithm implementations, managed | 218 | * DOC: Block Cipher Algorithm Definitions |
199 | * via crypto_register_alg() and crypto_unregister_alg(). | 219 | * |
220 | * These data structures define modular crypto algorithm implementations, | ||
221 | * managed via crypto_register_alg() and crypto_unregister_alg(). | ||
222 | */ | ||
223 | |||
224 | /** | ||
225 | * struct ablkcipher_alg - asynchronous block cipher definition | ||
226 | * @min_keysize: Minimum key size supported by the transformation. This is the | ||
227 | * smallest key length supported by this transformation algorithm. | ||
228 | * This must be set to one of the pre-defined values as this is | ||
229 | * not hardware specific. Possible values for this field can be | ||
230 | * found via git grep "_MIN_KEY_SIZE" include/crypto/ | ||
231 | * @max_keysize: Maximum key size supported by the transformation. This is the | ||
232 | * largest key length supported by this transformation algorithm. | ||
233 | * This must be set to one of the pre-defined values as this is | ||
234 | * not hardware specific. Possible values for this field can be | ||
235 | * found via git grep "_MAX_KEY_SIZE" include/crypto/ | ||
236 | * @setkey: Set key for the transformation. This function is used to either | ||
237 | * program a supplied key into the hardware or store the key in the | ||
238 | * transformation context for programming it later. Note that this | ||
239 | * function does modify the transformation context. This function can | ||
240 | * be called multiple times during the existence of the transformation | ||
241 | * object, so one must make sure the key is properly reprogrammed into | ||
242 | * the hardware. This function is also responsible for checking the key | ||
243 | * length for validity. In case a software fallback was put in place in | ||
244 | * the @cra_init call, this function might need to use the fallback if | ||
245 | * the algorithm doesn't support all of the key sizes. | ||
246 | * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt | ||
247 | * the supplied scatterlist containing the blocks of data. The crypto | ||
248 | * API consumer is responsible for aligning the entries of the | ||
249 | * scatterlist properly and making sure the chunks are correctly | ||
250 | * sized. In case a software fallback was put in place in the | ||
251 | * @cra_init call, this function might need to use the fallback if | ||
252 | * the algorithm doesn't support all of the key sizes. In case the | ||
253 | * key was stored in transformation context, the key might need to be | ||
254 | * re-programmed into the hardware in this function. This function | ||
255 | * shall not modify the transformation context, as this function may | ||
256 | * be called in parallel with the same transformation object. | ||
257 | * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt | ||
258 | * and the conditions are exactly the same. | ||
259 | * @givencrypt: Update the IV for encryption. With this function, a cipher | ||
260 | * implementation may provide the function on how to update the IV | ||
261 | * for encryption. | ||
262 | * @givdecrypt: Update the IV for decryption. This is the reverse of | ||
263 | * @givencrypt . | ||
264 | * @geniv: The transformation implementation may use an "IV generator" provided | ||
265 | * by the kernel crypto API. Several use cases have a predefined | ||
266 | * approach how IVs are to be updated. For such use cases, the kernel | ||
267 | * crypto API provides ready-to-use implementations that can be | ||
268 | * referenced with this variable. | ||
269 | * @ivsize: IV size applicable for transformation. The consumer must provide an | ||
270 | * IV of exactly that size to perform the encrypt or decrypt operation. | ||
271 | * | ||
272 | * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are | ||
273 | * mandatory and must be filled. | ||
200 | */ | 274 | */ |
201 | struct ablkcipher_alg { | 275 | struct ablkcipher_alg { |
202 | int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, | 276 | int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, |
@@ -213,6 +287,32 @@ struct ablkcipher_alg { | |||
213 | unsigned int ivsize; | 287 | unsigned int ivsize; |
214 | }; | 288 | }; |
215 | 289 | ||
290 | /** | ||
291 | * struct aead_alg - AEAD cipher definition | ||
292 | * @maxauthsize: Set the maximum authentication tag size supported by the | ||
293 | * transformation. A transformation may support smaller tag sizes. | ||
294 | * As the authentication tag is a message digest to ensure the | ||
295 | * integrity of the encrypted data, a consumer typically wants the | ||
296 | * largest authentication tag possible as defined by this | ||
297 | * variable. | ||
298 | * @setauthsize: Set authentication size for the AEAD transformation. This | ||
299 | * function is used to specify the consumer requested size of the | ||
300 | * authentication tag to be either generated by the transformation | ||
301 | * during encryption or the size of the authentication tag to be | ||
302 | * supplied during the decryption operation. This function is also | ||
303 | * responsible for checking the authentication tag size for | ||
304 | * validity. | ||
305 | * @setkey: see struct ablkcipher_alg | ||
306 | * @encrypt: see struct ablkcipher_alg | ||
307 | * @decrypt: see struct ablkcipher_alg | ||
308 | * @givencrypt: see struct ablkcipher_alg | ||
309 | * @givdecrypt: see struct ablkcipher_alg | ||
310 | * @geniv: see struct ablkcipher_alg | ||
311 | * @ivsize: see struct ablkcipher_alg | ||
312 | * | ||
313 | * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are | ||
314 | * mandatory and must be filled. | ||
315 | */ | ||
216 | struct aead_alg { | 316 | struct aead_alg { |
217 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, | 317 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, |
218 | unsigned int keylen); | 318 | unsigned int keylen); |
@@ -228,6 +328,18 @@ struct aead_alg { | |||
228 | unsigned int maxauthsize; | 328 | unsigned int maxauthsize; |
229 | }; | 329 | }; |
230 | 330 | ||
331 | /** | ||
332 | * struct blkcipher_alg - synchronous block cipher definition | ||
333 | * @min_keysize: see struct ablkcipher_alg | ||
334 | * @max_keysize: see struct ablkcipher_alg | ||
335 | * @setkey: see struct ablkcipher_alg | ||
336 | * @encrypt: see struct ablkcipher_alg | ||
337 | * @decrypt: see struct ablkcipher_alg | ||
338 | * @geniv: see struct ablkcipher_alg | ||
339 | * @ivsize: see struct ablkcipher_alg | ||
340 | * | ||
341 | * All fields except @geniv and @ivsize are mandatory and must be filled. | ||
342 | */ | ||
231 | struct blkcipher_alg { | 343 | struct blkcipher_alg { |
232 | int (*setkey)(struct crypto_tfm *tfm, const u8 *key, | 344 | int (*setkey)(struct crypto_tfm *tfm, const u8 *key, |
233 | unsigned int keylen); | 345 | unsigned int keylen); |
@@ -245,6 +357,53 @@ struct blkcipher_alg { | |||
245 | unsigned int ivsize; | 357 | unsigned int ivsize; |
246 | }; | 358 | }; |
247 | 359 | ||
360 | /** | ||
361 | * struct cipher_alg - single-block symmetric ciphers definition | ||
362 | * @cia_min_keysize: Minimum key size supported by the transformation. This is | ||
363 | * the smallest key length supported by this transformation | ||
364 | * algorithm. This must be set to one of the pre-defined | ||
365 | * values as this is not hardware specific. Possible values | ||
366 | * for this field can be found via git grep "_MIN_KEY_SIZE" | ||
367 | * include/crypto/ | ||
368 | * @cia_max_keysize: Maximum key size supported by the transformation. This is | ||
369 | * the largest key length supported by this transformation | ||
370 | * algorithm. This must be set to one of the pre-defined values | ||
371 | * as this is not hardware specific. Possible values for this | ||
372 | * field can be found via git grep "_MAX_KEY_SIZE" | ||
373 | * include/crypto/ | ||
374 | * @cia_setkey: Set key for the transformation. This function is used to either | ||
375 | * program a supplied key into the hardware or store the key in the | ||
376 | * transformation context for programming it later. Note that this | ||
377 | * function does modify the transformation context. This function | ||
378 | * can be called multiple times during the existence of the | ||
379 | * transformation object, so one must make sure the key is properly | ||
380 | * reprogrammed into the hardware. This function is also | ||
381 | * responsible for checking the key length for validity. | ||
382 | * @cia_encrypt: Encrypt a single block. This function is used to encrypt a | ||
383 | * single block of data, which must be @cra_blocksize big. This | ||
384 | * always operates on a full @cra_blocksize and it is not possible | ||
385 | * to encrypt a block of smaller size. The supplied buffers must | ||
386 | * therefore also be at least of @cra_blocksize size. Both the | ||
387 | * input and output buffers are always aligned to @cra_alignmask. | ||
388 | * In case either of the input or output buffer supplied by user | ||
389 | * of the crypto API is not aligned to @cra_alignmask, the crypto | ||
390 | * API will re-align the buffers. The re-alignment means that a | ||
391 | * new buffer will be allocated, the data will be copied into the | ||
392 | * new buffer, then the processing will happen on the new buffer, | ||
393 | * then the data will be copied back into the original buffer and | ||
394 | * finally the new buffer will be freed. In case a software | ||
395 | * fallback was put in place in the @cra_init call, this function | ||
396 | * might need to use the fallback if the algorithm doesn't support | ||
397 | * all of the key sizes. In case the key was stored in | ||
398 | * transformation context, the key might need to be re-programmed | ||
399 | * into the hardware in this function. This function shall not | ||
400 | * modify the transformation context, as this function may be | ||
401 | * called in parallel with the same transformation object. | ||
402 | * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to | ||
403 | * @cia_encrypt, and the conditions are exactly the same. | ||
404 | * | ||
405 | * All fields are mandatory and must be filled. | ||
406 | */ | ||
248 | struct cipher_alg { | 407 | struct cipher_alg { |
249 | unsigned int cia_min_keysize; | 408 | unsigned int cia_min_keysize; |
250 | unsigned int cia_max_keysize; | 409 | unsigned int cia_max_keysize; |
@@ -261,6 +420,25 @@ struct compress_alg { | |||
261 | unsigned int slen, u8 *dst, unsigned int *dlen); | 420 | unsigned int slen, u8 *dst, unsigned int *dlen); |
262 | }; | 421 | }; |
263 | 422 | ||
423 | /** | ||
424 | * struct rng_alg - random number generator definition | ||
425 | * @rng_make_random: The function defined by this variable obtains a random | ||
426 | * number. The random number generator transform must generate | ||
427 | * the random number out of the context provided with this | ||
428 | * call. | ||
429 | * @rng_reset: Reset of the random number generator by clearing the entire state. | ||
430 | * With the invocation of this function call, the random number | ||
431 | * generator shall completely reinitialize its state. If the random | ||
432 | * number generator requires a seed for setting up a new state, | ||
433 | * the seed must be provided by the consumer while invoking this | ||
434 | * function. The required size of the seed is defined with | ||
435 | * @seedsize . | ||
436 | * @seedsize: The seed size required for a random number generator | ||
437 | * initialization defined with this variable. Some random number | ||
438 | * generators like the SP800-90A DRBG does not require a seed as the | ||
439 | * seeding is implemented internally without the need of support by | ||
440 | * the consumer. In this case, the seed size is set to zero. | ||
441 | */ | ||
264 | struct rng_alg { | 442 | struct rng_alg { |
265 | int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, | 443 | int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, |
266 | unsigned int dlen); | 444 | unsigned int dlen); |
@@ -277,6 +455,81 @@ struct rng_alg { | |||
277 | #define cra_compress cra_u.compress | 455 | #define cra_compress cra_u.compress |
278 | #define cra_rng cra_u.rng | 456 | #define cra_rng cra_u.rng |
279 | 457 | ||
458 | /** | ||
459 | * struct crypto_alg - definition of a cryptograpic cipher algorithm | ||
460 | * @cra_flags: Flags describing this transformation. See include/linux/crypto.h | ||
461 | * CRYPTO_ALG_* flags for the flags which go in here. Those are | ||
462 | * used for fine-tuning the description of the transformation | ||
463 | * algorithm. | ||
464 | * @cra_blocksize: Minimum block size of this transformation. The size in bytes | ||
465 | * of the smallest possible unit which can be transformed with | ||
466 | * this algorithm. The users must respect this value. | ||
467 | * In case of HASH transformation, it is possible for a smaller | ||
468 | * block than @cra_blocksize to be passed to the crypto API for | ||
469 | * transformation, in case of any other transformation type, an | ||
470 | * error will be returned upon any attempt to transform smaller | ||
471 | * than @cra_blocksize chunks. | ||
472 | * @cra_ctxsize: Size of the operational context of the transformation. This | ||
473 | * value informs the kernel crypto API about the memory size | ||
474 | * needed to be allocated for the transformation context. | ||
475 | * @cra_alignmask: Alignment mask for the input and output data buffer. The data | ||
476 | * buffer containing the input data for the algorithm must be | ||
477 | * aligned to this alignment mask. The data buffer for the | ||
478 | * output data must be aligned to this alignment mask. Note that | ||
479 | * the Crypto API will do the re-alignment in software, but | ||
480 | * only under special conditions and there is a performance hit. | ||
481 | * The re-alignment happens at these occasions for different | ||
482 | * @cra_u types: cipher -- For both input data and output data | ||
483 | * buffer; ahash -- For output hash destination buf; shash -- | ||
484 | * For output hash destination buf. | ||
485 | * This is needed on hardware which is flawed by design and | ||
486 | * cannot pick data from arbitrary addresses. | ||
487 | * @cra_priority: Priority of this transformation implementation. In case | ||
488 | * multiple transformations with same @cra_name are available to | ||
489 | * the Crypto API, the kernel will use the one with highest | ||
490 | * @cra_priority. | ||
491 | * @cra_name: Generic name (usable by multiple implementations) of the | ||
492 | * transformation algorithm. This is the name of the transformation | ||
493 | * itself. This field is used by the kernel when looking up the | ||
494 | * providers of particular transformation. | ||
495 | * @cra_driver_name: Unique name of the transformation provider. This is the | ||
496 | * name of the provider of the transformation. This can be any | ||
497 | * arbitrary value, but in the usual case, this contains the | ||
498 | * name of the chip or provider and the name of the | ||
499 | * transformation algorithm. | ||
500 | * @cra_type: Type of the cryptographic transformation. This is a pointer to | ||
501 | * struct crypto_type, which implements callbacks common for all | ||
502 | * trasnformation types. There are multiple options: | ||
503 | * &crypto_blkcipher_type, &crypto_ablkcipher_type, | ||
504 | * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type. | ||
505 | * This field might be empty. In that case, there are no common | ||
506 | * callbacks. This is the case for: cipher, compress, shash. | ||
507 | * @cra_u: Callbacks implementing the transformation. This is a union of | ||
508 | * multiple structures. Depending on the type of transformation selected | ||
509 | * by @cra_type and @cra_flags above, the associated structure must be | ||
510 | * filled with callbacks. This field might be empty. This is the case | ||
511 | * for ahash, shash. | ||
512 | * @cra_init: Initialize the cryptographic transformation object. This function | ||
513 | * is used to initialize the cryptographic transformation object. | ||
514 | * This function is called only once at the instantiation time, right | ||
515 | * after the transformation context was allocated. In case the | ||
516 | * cryptographic hardware has some special requirements which need to | ||
517 | * be handled by software, this function shall check for the precise | ||
518 | * requirement of the transformation and put any software fallbacks | ||
519 | * in place. | ||
520 | * @cra_exit: Deinitialize the cryptographic transformation object. This is a | ||
521 | * counterpart to @cra_init, used to remove various changes set in | ||
522 | * @cra_init. | ||
523 | * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE | ||
524 | * @cra_list: internally used | ||
525 | * @cra_users: internally used | ||
526 | * @cra_refcnt: internally used | ||
527 | * @cra_destroy: internally used | ||
528 | * | ||
529 | * The struct crypto_alg describes a generic Crypto API algorithm and is common | ||
530 | * for all of the transformations. Any variable not documented here shall not | ||
531 | * be used by a cipher implementation as it is internal to the Crypto API. | ||
532 | */ | ||
280 | struct crypto_alg { | 533 | struct crypto_alg { |
281 | struct list_head cra_list; | 534 | struct list_head cra_list; |
282 | struct list_head cra_users; | 535 | struct list_head cra_users; |
@@ -581,6 +834,50 @@ static inline u32 crypto_skcipher_mask(u32 mask) | |||
581 | return mask; | 834 | return mask; |
582 | } | 835 | } |
583 | 836 | ||
837 | /** | ||
838 | * DOC: Asynchronous Block Cipher API | ||
839 | * | ||
840 | * Asynchronous block cipher API is used with the ciphers of type | ||
841 | * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). | ||
842 | * | ||
843 | * Asynchronous cipher operations imply that the function invocation for a | ||
844 | * cipher request returns immediately before the completion of the operation. | ||
845 | * The cipher request is scheduled as a separate kernel thread and therefore | ||
846 | * load-balanced on the different CPUs via the process scheduler. To allow | ||
847 | * the kernel crypto API to inform the caller about the completion of a cipher | ||
848 | * request, the caller must provide a callback function. That function is | ||
849 | * invoked with the cipher handle when the request completes. | ||
850 | * | ||
851 | * To support the asynchronous operation, additional information than just the | ||
852 | * cipher handle must be supplied to the kernel crypto API. That additional | ||
853 | * information is given by filling in the ablkcipher_request data structure. | ||
854 | * | ||
855 | * For the asynchronous block cipher API, the state is maintained with the tfm | ||
856 | * cipher handle. A single tfm can be used across multiple calls and in | ||
857 | * parallel. For asynchronous block cipher calls, context data supplied and | ||
858 | * only used by the caller can be referenced the request data structure in | ||
859 | * addition to the IV used for the cipher request. The maintenance of such | ||
860 | * state information would be important for a crypto driver implementer to | ||
861 | * have, because when calling the callback function upon completion of the | ||
862 | * cipher operation, that callback function may need some information about | ||
863 | * which operation just finished if it invoked multiple in parallel. This | ||
864 | * state information is unused by the kernel crypto API. | ||
865 | */ | ||
866 | |||
867 | /** | ||
868 | * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle | ||
869 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
870 | * ablkcipher cipher | ||
871 | * @type: specifies the type of the cipher | ||
872 | * @mask: specifies the mask for the cipher | ||
873 | * | ||
874 | * Allocate a cipher handle for an ablkcipher. The returned struct | ||
875 | * crypto_ablkcipher is the cipher handle that is required for any subsequent | ||
876 | * API invocation for that ablkcipher. | ||
877 | * | ||
878 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
879 | * of an error, PTR_ERR() returns the error code. | ||
880 | */ | ||
584 | struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, | 881 | struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, |
585 | u32 type, u32 mask); | 882 | u32 type, u32 mask); |
586 | 883 | ||
@@ -590,11 +887,25 @@ static inline struct crypto_tfm *crypto_ablkcipher_tfm( | |||
590 | return &tfm->base; | 887 | return &tfm->base; |
591 | } | 888 | } |
592 | 889 | ||
890 | /** | ||
891 | * crypto_free_ablkcipher() - zeroize and free cipher handle | ||
892 | * @tfm: cipher handle to be freed | ||
893 | */ | ||
593 | static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) | 894 | static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) |
594 | { | 895 | { |
595 | crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); | 896 | crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); |
596 | } | 897 | } |
597 | 898 | ||
899 | /** | ||
900 | * crypto_has_ablkcipher() - Search for the availability of an ablkcipher. | ||
901 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
902 | * ablkcipher | ||
903 | * @type: specifies the type of the cipher | ||
904 | * @mask: specifies the mask for the cipher | ||
905 | * | ||
906 | * Return: true when the ablkcipher is known to the kernel crypto API; false | ||
907 | * otherwise | ||
908 | */ | ||
598 | static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, | 909 | static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, |
599 | u32 mask) | 910 | u32 mask) |
600 | { | 911 | { |
@@ -608,12 +919,31 @@ static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( | |||
608 | return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; | 919 | return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; |
609 | } | 920 | } |
610 | 921 | ||
922 | /** | ||
923 | * crypto_ablkcipher_ivsize() - obtain IV size | ||
924 | * @tfm: cipher handle | ||
925 | * | ||
926 | * The size of the IV for the ablkcipher referenced by the cipher handle is | ||
927 | * returned. This IV size may be zero if the cipher does not need an IV. | ||
928 | * | ||
929 | * Return: IV size in bytes | ||
930 | */ | ||
611 | static inline unsigned int crypto_ablkcipher_ivsize( | 931 | static inline unsigned int crypto_ablkcipher_ivsize( |
612 | struct crypto_ablkcipher *tfm) | 932 | struct crypto_ablkcipher *tfm) |
613 | { | 933 | { |
614 | return crypto_ablkcipher_crt(tfm)->ivsize; | 934 | return crypto_ablkcipher_crt(tfm)->ivsize; |
615 | } | 935 | } |
616 | 936 | ||
937 | /** | ||
938 | * crypto_ablkcipher_blocksize() - obtain block size of cipher | ||
939 | * @tfm: cipher handle | ||
940 | * | ||
941 | * The block size for the ablkcipher referenced with the cipher handle is | ||
942 | * returned. The caller may use that information to allocate appropriate | ||
943 | * memory for the data returned by the encryption or decryption operation | ||
944 | * | ||
945 | * Return: block size of cipher | ||
946 | */ | ||
617 | static inline unsigned int crypto_ablkcipher_blocksize( | 947 | static inline unsigned int crypto_ablkcipher_blocksize( |
618 | struct crypto_ablkcipher *tfm) | 948 | struct crypto_ablkcipher *tfm) |
619 | { | 949 | { |
@@ -643,6 +973,22 @@ static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, | |||
643 | crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); | 973 | crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); |
644 | } | 974 | } |
645 | 975 | ||
976 | /** | ||
977 | * crypto_ablkcipher_setkey() - set key for cipher | ||
978 | * @tfm: cipher handle | ||
979 | * @key: buffer holding the key | ||
980 | * @keylen: length of the key in bytes | ||
981 | * | ||
982 | * The caller provided key is set for the ablkcipher referenced by the cipher | ||
983 | * handle. | ||
984 | * | ||
985 | * Note, the key length determines the cipher type. Many block ciphers implement | ||
986 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | ||
987 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | ||
988 | * is performed. | ||
989 | * | ||
990 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
991 | */ | ||
646 | static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, | 992 | static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, |
647 | const u8 *key, unsigned int keylen) | 993 | const u8 *key, unsigned int keylen) |
648 | { | 994 | { |
@@ -651,12 +997,32 @@ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, | |||
651 | return crt->setkey(crt->base, key, keylen); | 997 | return crt->setkey(crt->base, key, keylen); |
652 | } | 998 | } |
653 | 999 | ||
1000 | /** | ||
1001 | * crypto_ablkcipher_reqtfm() - obtain cipher handle from request | ||
1002 | * @req: ablkcipher_request out of which the cipher handle is to be obtained | ||
1003 | * | ||
1004 | * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request | ||
1005 | * data structure. | ||
1006 | * | ||
1007 | * Return: crypto_ablkcipher handle | ||
1008 | */ | ||
654 | static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( | 1009 | static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( |
655 | struct ablkcipher_request *req) | 1010 | struct ablkcipher_request *req) |
656 | { | 1011 | { |
657 | return __crypto_ablkcipher_cast(req->base.tfm); | 1012 | return __crypto_ablkcipher_cast(req->base.tfm); |
658 | } | 1013 | } |
659 | 1014 | ||
1015 | /** | ||
1016 | * crypto_ablkcipher_encrypt() - encrypt plaintext | ||
1017 | * @req: reference to the ablkcipher_request handle that holds all information | ||
1018 | * needed to perform the cipher operation | ||
1019 | * | ||
1020 | * Encrypt plaintext data using the ablkcipher_request handle. That data | ||
1021 | * structure and how it is filled with data is discussed with the | ||
1022 | * ablkcipher_request_* functions. | ||
1023 | * | ||
1024 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
1025 | */ | ||
660 | static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) | 1026 | static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) |
661 | { | 1027 | { |
662 | struct ablkcipher_tfm *crt = | 1028 | struct ablkcipher_tfm *crt = |
@@ -664,6 +1030,17 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) | |||
664 | return crt->encrypt(req); | 1030 | return crt->encrypt(req); |
665 | } | 1031 | } |
666 | 1032 | ||
1033 | /** | ||
1034 | * crypto_ablkcipher_decrypt() - decrypt ciphertext | ||
1035 | * @req: reference to the ablkcipher_request handle that holds all information | ||
1036 | * needed to perform the cipher operation | ||
1037 | * | ||
1038 | * Decrypt ciphertext data using the ablkcipher_request handle. That data | ||
1039 | * structure and how it is filled with data is discussed with the | ||
1040 | * ablkcipher_request_* functions. | ||
1041 | * | ||
1042 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
1043 | */ | ||
667 | static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) | 1044 | static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) |
668 | { | 1045 | { |
669 | struct ablkcipher_tfm *crt = | 1046 | struct ablkcipher_tfm *crt = |
@@ -671,12 +1048,37 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) | |||
671 | return crt->decrypt(req); | 1048 | return crt->decrypt(req); |
672 | } | 1049 | } |
673 | 1050 | ||
1051 | /** | ||
1052 | * DOC: Asynchronous Cipher Request Handle | ||
1053 | * | ||
1054 | * The ablkcipher_request data structure contains all pointers to data | ||
1055 | * required for the asynchronous cipher operation. This includes the cipher | ||
1056 | * handle (which can be used by multiple ablkcipher_request instances), pointer | ||
1057 | * to plaintext and ciphertext, asynchronous callback function, etc. It acts | ||
1058 | * as a handle to the ablkcipher_request_* API calls in a similar way as | ||
1059 | * ablkcipher handle to the crypto_ablkcipher_* API calls. | ||
1060 | */ | ||
1061 | |||
1062 | /** | ||
1063 | * crypto_ablkcipher_reqsize() - obtain size of the request data structure | ||
1064 | * @tfm: cipher handle | ||
1065 | * | ||
1066 | * Return: number of bytes | ||
1067 | */ | ||
674 | static inline unsigned int crypto_ablkcipher_reqsize( | 1068 | static inline unsigned int crypto_ablkcipher_reqsize( |
675 | struct crypto_ablkcipher *tfm) | 1069 | struct crypto_ablkcipher *tfm) |
676 | { | 1070 | { |
677 | return crypto_ablkcipher_crt(tfm)->reqsize; | 1071 | return crypto_ablkcipher_crt(tfm)->reqsize; |
678 | } | 1072 | } |
679 | 1073 | ||
1074 | /** | ||
1075 | * ablkcipher_request_set_tfm() - update cipher handle reference in request | ||
1076 | * @req: request handle to be modified | ||
1077 | * @tfm: cipher handle that shall be added to the request handle | ||
1078 | * | ||
1079 | * Allow the caller to replace the existing ablkcipher handle in the request | ||
1080 | * data structure with a different one. | ||
1081 | */ | ||
680 | static inline void ablkcipher_request_set_tfm( | 1082 | static inline void ablkcipher_request_set_tfm( |
681 | struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) | 1083 | struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) |
682 | { | 1084 | { |
@@ -689,6 +1091,18 @@ static inline struct ablkcipher_request *ablkcipher_request_cast( | |||
689 | return container_of(req, struct ablkcipher_request, base); | 1091 | return container_of(req, struct ablkcipher_request, base); |
690 | } | 1092 | } |
691 | 1093 | ||
1094 | /** | ||
1095 | * ablkcipher_request_alloc() - allocate request data structure | ||
1096 | * @tfm: cipher handle to be registered with the request | ||
1097 | * @gfp: memory allocation flag that is handed to kmalloc by the API call. | ||
1098 | * | ||
1099 | * Allocate the request data structure that must be used with the ablkcipher | ||
1100 | * encrypt and decrypt API calls. During the allocation, the provided ablkcipher | ||
1101 | * handle is registered in the request data structure. | ||
1102 | * | ||
1103 | * Return: allocated request handle in case of success; IS_ERR() is true in case | ||
1104 | * of an error, PTR_ERR() returns the error code. | ||
1105 | */ | ||
692 | static inline struct ablkcipher_request *ablkcipher_request_alloc( | 1106 | static inline struct ablkcipher_request *ablkcipher_request_alloc( |
693 | struct crypto_ablkcipher *tfm, gfp_t gfp) | 1107 | struct crypto_ablkcipher *tfm, gfp_t gfp) |
694 | { | 1108 | { |
@@ -703,11 +1117,40 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc( | |||
703 | return req; | 1117 | return req; |
704 | } | 1118 | } |
705 | 1119 | ||
1120 | /** | ||
1121 | * ablkcipher_request_free() - zeroize and free request data structure | ||
1122 | * @req: request data structure cipher handle to be freed | ||
1123 | */ | ||
706 | static inline void ablkcipher_request_free(struct ablkcipher_request *req) | 1124 | static inline void ablkcipher_request_free(struct ablkcipher_request *req) |
707 | { | 1125 | { |
708 | kzfree(req); | 1126 | kzfree(req); |
709 | } | 1127 | } |
710 | 1128 | ||
1129 | /** | ||
1130 | * ablkcipher_request_set_callback() - set asynchronous callback function | ||
1131 | * @req: request handle | ||
1132 | * @flags: specify zero or an ORing of the flags | ||
1133 | * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and | ||
1134 | * increase the wait queue beyond the initial maximum size; | ||
1135 | * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep | ||
1136 | * @compl: callback function pointer to be registered with the request handle | ||
1137 | * @data: The data pointer refers to memory that is not used by the kernel | ||
1138 | * crypto API, but provided to the callback function for it to use. Here, | ||
1139 | * the caller can provide a reference to memory the callback function can | ||
1140 | * operate on. As the callback function is invoked asynchronously to the | ||
1141 | * related functionality, it may need to access data structures of the | ||
1142 | * related functionality which can be referenced using this pointer. The | ||
1143 | * callback function can access the memory via the "data" field in the | ||
1144 | * crypto_async_request data structure provided to the callback function. | ||
1145 | * | ||
1146 | * This function allows setting the callback function that is triggered once the | ||
1147 | * cipher operation completes. | ||
1148 | * | ||
1149 | * The callback function is registered with the ablkcipher_request handle and | ||
1150 | * must comply with the following template: | ||
1151 | * | ||
1152 | * void callback_function(struct crypto_async_request *req, int error) | ||
1153 | */ | ||
711 | static inline void ablkcipher_request_set_callback( | 1154 | static inline void ablkcipher_request_set_callback( |
712 | struct ablkcipher_request *req, | 1155 | struct ablkcipher_request *req, |
713 | u32 flags, crypto_completion_t compl, void *data) | 1156 | u32 flags, crypto_completion_t compl, void *data) |
@@ -717,6 +1160,22 @@ static inline void ablkcipher_request_set_callback( | |||
717 | req->base.flags = flags; | 1160 | req->base.flags = flags; |
718 | } | 1161 | } |
719 | 1162 | ||
1163 | /** | ||
1164 | * ablkcipher_request_set_crypt() - set data buffers | ||
1165 | * @req: request handle | ||
1166 | * @src: source scatter / gather list | ||
1167 | * @dst: destination scatter / gather list | ||
1168 | * @nbytes: number of bytes to process from @src | ||
1169 | * @iv: IV for the cipher operation which must comply with the IV size defined | ||
1170 | * by crypto_ablkcipher_ivsize | ||
1171 | * | ||
1172 | * This function allows setting of the source data and destination data | ||
1173 | * scatter / gather lists. | ||
1174 | * | ||
1175 | * For encryption, the source is treated as the plaintext and the | ||
1176 | * destination is the ciphertext. For a decryption operation, the use is | ||
1177 | * reversed: the source is the ciphertext and the destination is the plaintext. | ||
1178 | */ | ||
720 | static inline void ablkcipher_request_set_crypt( | 1179 | static inline void ablkcipher_request_set_crypt( |
721 | struct ablkcipher_request *req, | 1180 | struct ablkcipher_request *req, |
722 | struct scatterlist *src, struct scatterlist *dst, | 1181 | struct scatterlist *src, struct scatterlist *dst, |
@@ -728,11 +1187,55 @@ static inline void ablkcipher_request_set_crypt( | |||
728 | req->info = iv; | 1187 | req->info = iv; |
729 | } | 1188 | } |
730 | 1189 | ||
1190 | /** | ||
1191 | * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API | ||
1192 | * | ||
1193 | * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD | ||
1194 | * (listed as type "aead" in /proc/crypto) | ||
1195 | * | ||
1196 | * The most prominent examples for this type of encryption is GCM and CCM. | ||
1197 | * However, the kernel supports other types of AEAD ciphers which are defined | ||
1198 | * with the following cipher string: | ||
1199 | * | ||
1200 | * authenc(keyed message digest, block cipher) | ||
1201 | * | ||
1202 | * For example: authenc(hmac(sha256), cbc(aes)) | ||
1203 | * | ||
1204 | * The example code provided for the asynchronous block cipher operation | ||
1205 | * applies here as well. Naturally all *ablkcipher* symbols must be exchanged | ||
1206 | * the *aead* pendants discussed in the following. In addtion, for the AEAD | ||
1207 | * operation, the aead_request_set_assoc function must be used to set the | ||
1208 | * pointer to the associated data memory location before performing the | ||
1209 | * encryption or decryption operation. In case of an encryption, the associated | ||
1210 | * data memory is filled during the encryption operation. For decryption, the | ||
1211 | * associated data memory must contain data that is used to verify the integrity | ||
1212 | * of the decrypted data. Another deviation from the asynchronous block cipher | ||
1213 | * operation is that the caller should explicitly check for -EBADMSG of the | ||
1214 | * crypto_aead_decrypt. That error indicates an authentication error, i.e. | ||
1215 | * a breach in the integrity of the message. In essence, that -EBADMSG error | ||
1216 | * code is the key bonus an AEAD cipher has over "standard" block chaining | ||
1217 | * modes. | ||
1218 | */ | ||
1219 | |||
731 | static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) | 1220 | static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) |
732 | { | 1221 | { |
733 | return (struct crypto_aead *)tfm; | 1222 | return (struct crypto_aead *)tfm; |
734 | } | 1223 | } |
735 | 1224 | ||
1225 | /** | ||
1226 | * crypto_alloc_aead() - allocate AEAD cipher handle | ||
1227 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
1228 | * AEAD cipher | ||
1229 | * @type: specifies the type of the cipher | ||
1230 | * @mask: specifies the mask for the cipher | ||
1231 | * | ||
1232 | * Allocate a cipher handle for an AEAD. The returned struct | ||
1233 | * crypto_aead is the cipher handle that is required for any subsequent | ||
1234 | * API invocation for that AEAD. | ||
1235 | * | ||
1236 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
1237 | * of an error, PTR_ERR() returns the error code. | ||
1238 | */ | ||
736 | struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); | 1239 | struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); |
737 | 1240 | ||
738 | static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) | 1241 | static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) |
@@ -740,6 +1243,10 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) | |||
740 | return &tfm->base; | 1243 | return &tfm->base; |
741 | } | 1244 | } |
742 | 1245 | ||
1246 | /** | ||
1247 | * crypto_free_aead() - zeroize and free aead handle | ||
1248 | * @tfm: cipher handle to be freed | ||
1249 | */ | ||
743 | static inline void crypto_free_aead(struct crypto_aead *tfm) | 1250 | static inline void crypto_free_aead(struct crypto_aead *tfm) |
744 | { | 1251 | { |
745 | crypto_free_tfm(crypto_aead_tfm(tfm)); | 1252 | crypto_free_tfm(crypto_aead_tfm(tfm)); |
@@ -750,16 +1257,47 @@ static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm) | |||
750 | return &crypto_aead_tfm(tfm)->crt_aead; | 1257 | return &crypto_aead_tfm(tfm)->crt_aead; |
751 | } | 1258 | } |
752 | 1259 | ||
1260 | /** | ||
1261 | * crypto_aead_ivsize() - obtain IV size | ||
1262 | * @tfm: cipher handle | ||
1263 | * | ||
1264 | * The size of the IV for the aead referenced by the cipher handle is | ||
1265 | * returned. This IV size may be zero if the cipher does not need an IV. | ||
1266 | * | ||
1267 | * Return: IV size in bytes | ||
1268 | */ | ||
753 | static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) | 1269 | static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) |
754 | { | 1270 | { |
755 | return crypto_aead_crt(tfm)->ivsize; | 1271 | return crypto_aead_crt(tfm)->ivsize; |
756 | } | 1272 | } |
757 | 1273 | ||
1274 | /** | ||
1275 | * crypto_aead_authsize() - obtain maximum authentication data size | ||
1276 | * @tfm: cipher handle | ||
1277 | * | ||
1278 | * The maximum size of the authentication data for the AEAD cipher referenced | ||
1279 | * by the AEAD cipher handle is returned. The authentication data size may be | ||
1280 | * zero if the cipher implements a hard-coded maximum. | ||
1281 | * | ||
1282 | * The authentication data may also be known as "tag value". | ||
1283 | * | ||
1284 | * Return: authentication data size / tag size in bytes | ||
1285 | */ | ||
758 | static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) | 1286 | static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) |
759 | { | 1287 | { |
760 | return crypto_aead_crt(tfm)->authsize; | 1288 | return crypto_aead_crt(tfm)->authsize; |
761 | } | 1289 | } |
762 | 1290 | ||
1291 | /** | ||
1292 | * crypto_aead_blocksize() - obtain block size of cipher | ||
1293 | * @tfm: cipher handle | ||
1294 | * | ||
1295 | * The block size for the AEAD referenced with the cipher handle is returned. | ||
1296 | * The caller may use that information to allocate appropriate memory for the | ||
1297 | * data returned by the encryption or decryption operation | ||
1298 | * | ||
1299 | * Return: block size of cipher | ||
1300 | */ | ||
763 | static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) | 1301 | static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) |
764 | { | 1302 | { |
765 | return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); | 1303 | return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); |
@@ -785,6 +1323,22 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) | |||
785 | crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); | 1323 | crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); |
786 | } | 1324 | } |
787 | 1325 | ||
1326 | /** | ||
1327 | * crypto_aead_setkey() - set key for cipher | ||
1328 | * @tfm: cipher handle | ||
1329 | * @key: buffer holding the key | ||
1330 | * @keylen: length of the key in bytes | ||
1331 | * | ||
1332 | * The caller provided key is set for the AEAD referenced by the cipher | ||
1333 | * handle. | ||
1334 | * | ||
1335 | * Note, the key length determines the cipher type. Many block ciphers implement | ||
1336 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | ||
1337 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | ||
1338 | * is performed. | ||
1339 | * | ||
1340 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
1341 | */ | ||
788 | static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, | 1342 | static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, |
789 | unsigned int keylen) | 1343 | unsigned int keylen) |
790 | { | 1344 | { |
@@ -793,6 +1347,16 @@ static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
793 | return crt->setkey(crt->base, key, keylen); | 1347 | return crt->setkey(crt->base, key, keylen); |
794 | } | 1348 | } |
795 | 1349 | ||
1350 | /** | ||
1351 | * crypto_aead_setauthsize() - set authentication data size | ||
1352 | * @tfm: cipher handle | ||
1353 | * @authsize: size of the authentication data / tag in bytes | ||
1354 | * | ||
1355 | * Set the authentication data size / tag size. AEAD requires an authentication | ||
1356 | * tag (or MAC) in addition to the associated data. | ||
1357 | * | ||
1358 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
1359 | */ | ||
796 | int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); | 1360 | int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); |
797 | 1361 | ||
798 | static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) | 1362 | static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) |
@@ -800,27 +1364,105 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) | |||
800 | return __crypto_aead_cast(req->base.tfm); | 1364 | return __crypto_aead_cast(req->base.tfm); |
801 | } | 1365 | } |
802 | 1366 | ||
1367 | /** | ||
1368 | * crypto_aead_encrypt() - encrypt plaintext | ||
1369 | * @req: reference to the aead_request handle that holds all information | ||
1370 | * needed to perform the cipher operation | ||
1371 | * | ||
1372 | * Encrypt plaintext data using the aead_request handle. That data structure | ||
1373 | * and how it is filled with data is discussed with the aead_request_* | ||
1374 | * functions. | ||
1375 | * | ||
1376 | * IMPORTANT NOTE The encryption operation creates the authentication data / | ||
1377 | * tag. That data is concatenated with the created ciphertext. | ||
1378 | * The ciphertext memory size is therefore the given number of | ||
1379 | * block cipher blocks + the size defined by the | ||
1380 | * crypto_aead_setauthsize invocation. The caller must ensure | ||
1381 | * that sufficient memory is available for the ciphertext and | ||
1382 | * the authentication tag. | ||
1383 | * | ||
1384 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
1385 | */ | ||
803 | static inline int crypto_aead_encrypt(struct aead_request *req) | 1386 | static inline int crypto_aead_encrypt(struct aead_request *req) |
804 | { | 1387 | { |
805 | return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); | 1388 | return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); |
806 | } | 1389 | } |
807 | 1390 | ||
1391 | /** | ||
1392 | * crypto_aead_decrypt() - decrypt ciphertext | ||
1393 | * @req: reference to the ablkcipher_request handle that holds all information | ||
1394 | * needed to perform the cipher operation | ||
1395 | * | ||
1396 | * Decrypt ciphertext data using the aead_request handle. That data structure | ||
1397 | * and how it is filled with data is discussed with the aead_request_* | ||
1398 | * functions. | ||
1399 | * | ||
1400 | * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the | ||
1401 | * authentication data / tag. That authentication data / tag | ||
1402 | * must have the size defined by the crypto_aead_setauthsize | ||
1403 | * invocation. | ||
1404 | * | ||
1405 | * | ||
1406 | * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD | ||
1407 | * cipher operation performs the authentication of the data during the | ||
1408 | * decryption operation. Therefore, the function returns this error if | ||
1409 | * the authentication of the ciphertext was unsuccessful (i.e. the | ||
1410 | * integrity of the ciphertext or the associated data was violated); | ||
1411 | * < 0 if an error occurred. | ||
1412 | */ | ||
808 | static inline int crypto_aead_decrypt(struct aead_request *req) | 1413 | static inline int crypto_aead_decrypt(struct aead_request *req) |
809 | { | 1414 | { |
810 | return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); | 1415 | return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); |
811 | } | 1416 | } |
812 | 1417 | ||
1418 | /** | ||
1419 | * DOC: Asynchronous AEAD Request Handle | ||
1420 | * | ||
1421 | * The aead_request data structure contains all pointers to data required for | ||
1422 | * the AEAD cipher operation. This includes the cipher handle (which can be | ||
1423 | * used by multiple aead_request instances), pointer to plaintext and | ||
1424 | * ciphertext, asynchronous callback function, etc. It acts as a handle to the | ||
1425 | * aead_request_* API calls in a similar way as AEAD handle to the | ||
1426 | * crypto_aead_* API calls. | ||
1427 | */ | ||
1428 | |||
1429 | /** | ||
1430 | * crypto_aead_reqsize() - obtain size of the request data structure | ||
1431 | * @tfm: cipher handle | ||
1432 | * | ||
1433 | * Return: number of bytes | ||
1434 | */ | ||
813 | static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) | 1435 | static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) |
814 | { | 1436 | { |
815 | return crypto_aead_crt(tfm)->reqsize; | 1437 | return crypto_aead_crt(tfm)->reqsize; |
816 | } | 1438 | } |
817 | 1439 | ||
1440 | /** | ||
1441 | * aead_request_set_tfm() - update cipher handle reference in request | ||
1442 | * @req: request handle to be modified | ||
1443 | * @tfm: cipher handle that shall be added to the request handle | ||
1444 | * | ||
1445 | * Allow the caller to replace the existing aead handle in the request | ||
1446 | * data structure with a different one. | ||
1447 | */ | ||
818 | static inline void aead_request_set_tfm(struct aead_request *req, | 1448 | static inline void aead_request_set_tfm(struct aead_request *req, |
819 | struct crypto_aead *tfm) | 1449 | struct crypto_aead *tfm) |
820 | { | 1450 | { |
821 | req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); | 1451 | req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); |
822 | } | 1452 | } |
823 | 1453 | ||
1454 | /** | ||
1455 | * aead_request_alloc() - allocate request data structure | ||
1456 | * @tfm: cipher handle to be registered with the request | ||
1457 | * @gfp: memory allocation flag that is handed to kmalloc by the API call. | ||
1458 | * | ||
1459 | * Allocate the request data structure that must be used with the AEAD | ||
1460 | * encrypt and decrypt API calls. During the allocation, the provided aead | ||
1461 | * handle is registered in the request data structure. | ||
1462 | * | ||
1463 | * Return: allocated request handle in case of success; IS_ERR() is true in case | ||
1464 | * of an error, PTR_ERR() returns the error code. | ||
1465 | */ | ||
824 | static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, | 1466 | static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, |
825 | gfp_t gfp) | 1467 | gfp_t gfp) |
826 | { | 1468 | { |
@@ -834,11 +1476,40 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, | |||
834 | return req; | 1476 | return req; |
835 | } | 1477 | } |
836 | 1478 | ||
1479 | /** | ||
1480 | * aead_request_free() - zeroize and free request data structure | ||
1481 | * @req: request data structure cipher handle to be freed | ||
1482 | */ | ||
837 | static inline void aead_request_free(struct aead_request *req) | 1483 | static inline void aead_request_free(struct aead_request *req) |
838 | { | 1484 | { |
839 | kzfree(req); | 1485 | kzfree(req); |
840 | } | 1486 | } |
841 | 1487 | ||
1488 | /** | ||
1489 | * aead_request_set_callback() - set asynchronous callback function | ||
1490 | * @req: request handle | ||
1491 | * @flags: specify zero or an ORing of the flags | ||
1492 | * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and | ||
1493 | * increase the wait queue beyond the initial maximum size; | ||
1494 | * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep | ||
1495 | * @compl: callback function pointer to be registered with the request handle | ||
1496 | * @data: The data pointer refers to memory that is not used by the kernel | ||
1497 | * crypto API, but provided to the callback function for it to use. Here, | ||
1498 | * the caller can provide a reference to memory the callback function can | ||
1499 | * operate on. As the callback function is invoked asynchronously to the | ||
1500 | * related functionality, it may need to access data structures of the | ||
1501 | * related functionality which can be referenced using this pointer. The | ||
1502 | * callback function can access the memory via the "data" field in the | ||
1503 | * crypto_async_request data structure provided to the callback function. | ||
1504 | * | ||
1505 | * Setting the callback function that is triggered once the cipher operation | ||
1506 | * completes | ||
1507 | * | ||
1508 | * The callback function is registered with the aead_request handle and | ||
1509 | * must comply with the following template: | ||
1510 | * | ||
1511 | * void callback_function(struct crypto_async_request *req, int error) | ||
1512 | */ | ||
842 | static inline void aead_request_set_callback(struct aead_request *req, | 1513 | static inline void aead_request_set_callback(struct aead_request *req, |
843 | u32 flags, | 1514 | u32 flags, |
844 | crypto_completion_t compl, | 1515 | crypto_completion_t compl, |
@@ -849,6 +1520,36 @@ static inline void aead_request_set_callback(struct aead_request *req, | |||
849 | req->base.flags = flags; | 1520 | req->base.flags = flags; |
850 | } | 1521 | } |
851 | 1522 | ||
1523 | /** | ||
1524 | * aead_request_set_crypt - set data buffers | ||
1525 | * @req: request handle | ||
1526 | * @src: source scatter / gather list | ||
1527 | * @dst: destination scatter / gather list | ||
1528 | * @cryptlen: number of bytes to process from @src | ||
1529 | * @iv: IV for the cipher operation which must comply with the IV size defined | ||
1530 | * by crypto_aead_ivsize() | ||
1531 | * | ||
1532 | * Setting the source data and destination data scatter / gather lists. | ||
1533 | * | ||
1534 | * For encryption, the source is treated as the plaintext and the | ||
1535 | * destination is the ciphertext. For a decryption operation, the use is | ||
1536 | * reversed: the source is the ciphertext and the destination is the plaintext. | ||
1537 | * | ||
1538 | * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption, | ||
1539 | * the caller must concatenate the ciphertext followed by the | ||
1540 | * authentication tag and provide the entire data stream to the | ||
1541 | * decryption operation (i.e. the data length used for the | ||
1542 | * initialization of the scatterlist and the data length for the | ||
1543 | * decryption operation is identical). For encryption, however, | ||
1544 | * the authentication tag is created while encrypting the data. | ||
1545 | * The destination buffer must hold sufficient space for the | ||
1546 | * ciphertext and the authentication tag while the encryption | ||
1547 | * invocation must only point to the plaintext data size. The | ||
1548 | * following code snippet illustrates the memory usage | ||
1549 | * buffer = kmalloc(ptbuflen + (enc ? authsize : 0)); | ||
1550 | * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0)); | ||
1551 | * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv); | ||
1552 | */ | ||
852 | static inline void aead_request_set_crypt(struct aead_request *req, | 1553 | static inline void aead_request_set_crypt(struct aead_request *req, |
853 | struct scatterlist *src, | 1554 | struct scatterlist *src, |
854 | struct scatterlist *dst, | 1555 | struct scatterlist *dst, |
@@ -860,6 +1561,15 @@ static inline void aead_request_set_crypt(struct aead_request *req, | |||
860 | req->iv = iv; | 1561 | req->iv = iv; |
861 | } | 1562 | } |
862 | 1563 | ||
1564 | /** | ||
1565 | * aead_request_set_assoc() - set the associated data scatter / gather list | ||
1566 | * @req: request handle | ||
1567 | * @assoc: associated data scatter / gather list | ||
1568 | * @assoclen: number of bytes to process from @assoc | ||
1569 | * | ||
1570 | * For encryption, the memory is filled with the associated data. For | ||
1571 | * decryption, the memory must point to the associated data. | ||
1572 | */ | ||
863 | static inline void aead_request_set_assoc(struct aead_request *req, | 1573 | static inline void aead_request_set_assoc(struct aead_request *req, |
864 | struct scatterlist *assoc, | 1574 | struct scatterlist *assoc, |
865 | unsigned int assoclen) | 1575 | unsigned int assoclen) |
@@ -868,6 +1578,36 @@ static inline void aead_request_set_assoc(struct aead_request *req, | |||
868 | req->assoclen = assoclen; | 1578 | req->assoclen = assoclen; |
869 | } | 1579 | } |
870 | 1580 | ||
1581 | /** | ||
1582 | * DOC: Synchronous Block Cipher API | ||
1583 | * | ||
1584 | * The synchronous block cipher API is used with the ciphers of type | ||
1585 | * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto) | ||
1586 | * | ||
1587 | * Synchronous calls, have a context in the tfm. But since a single tfm can be | ||
1588 | * used in multiple calls and in parallel, this info should not be changeable | ||
1589 | * (unless a lock is used). This applies, for example, to the symmetric key. | ||
1590 | * However, the IV is changeable, so there is an iv field in blkcipher_tfm | ||
1591 | * structure for synchronous blkcipher api. So, its the only state info that can | ||
1592 | * be kept for synchronous calls without using a big lock across a tfm. | ||
1593 | * | ||
1594 | * The block cipher API allows the use of a complete cipher, i.e. a cipher | ||
1595 | * consisting of a template (a block chaining mode) and a single block cipher | ||
1596 | * primitive (e.g. AES). | ||
1597 | * | ||
1598 | * The plaintext data buffer and the ciphertext data buffer are pointed to | ||
1599 | * by using scatter/gather lists. The cipher operation is performed | ||
1600 | * on all segments of the provided scatter/gather lists. | ||
1601 | * | ||
1602 | * The kernel crypto API supports a cipher operation "in-place" which means that | ||
1603 | * the caller may provide the same scatter/gather list for the plaintext and | ||
1604 | * cipher text. After the completion of the cipher operation, the plaintext | ||
1605 | * data is replaced with the ciphertext data in case of an encryption and vice | ||
1606 | * versa for a decryption. The caller must ensure that the scatter/gather lists | ||
1607 | * for the output data point to sufficiently large buffers, i.e. multiples of | ||
1608 | * the block size of the cipher. | ||
1609 | */ | ||
1610 | |||
871 | static inline struct crypto_blkcipher *__crypto_blkcipher_cast( | 1611 | static inline struct crypto_blkcipher *__crypto_blkcipher_cast( |
872 | struct crypto_tfm *tfm) | 1612 | struct crypto_tfm *tfm) |
873 | { | 1613 | { |
@@ -881,6 +1621,20 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast( | |||
881 | return __crypto_blkcipher_cast(tfm); | 1621 | return __crypto_blkcipher_cast(tfm); |
882 | } | 1622 | } |
883 | 1623 | ||
1624 | /** | ||
1625 | * crypto_alloc_blkcipher() - allocate synchronous block cipher handle | ||
1626 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
1627 | * blkcipher cipher | ||
1628 | * @type: specifies the type of the cipher | ||
1629 | * @mask: specifies the mask for the cipher | ||
1630 | * | ||
1631 | * Allocate a cipher handle for a block cipher. The returned struct | ||
1632 | * crypto_blkcipher is the cipher handle that is required for any subsequent | ||
1633 | * API invocation for that block cipher. | ||
1634 | * | ||
1635 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
1636 | * of an error, PTR_ERR() returns the error code. | ||
1637 | */ | ||
884 | static inline struct crypto_blkcipher *crypto_alloc_blkcipher( | 1638 | static inline struct crypto_blkcipher *crypto_alloc_blkcipher( |
885 | const char *alg_name, u32 type, u32 mask) | 1639 | const char *alg_name, u32 type, u32 mask) |
886 | { | 1640 | { |
@@ -897,11 +1651,25 @@ static inline struct crypto_tfm *crypto_blkcipher_tfm( | |||
897 | return &tfm->base; | 1651 | return &tfm->base; |
898 | } | 1652 | } |
899 | 1653 | ||
1654 | /** | ||
1655 | * crypto_free_blkcipher() - zeroize and free the block cipher handle | ||
1656 | * @tfm: cipher handle to be freed | ||
1657 | */ | ||
900 | static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) | 1658 | static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) |
901 | { | 1659 | { |
902 | crypto_free_tfm(crypto_blkcipher_tfm(tfm)); | 1660 | crypto_free_tfm(crypto_blkcipher_tfm(tfm)); |
903 | } | 1661 | } |
904 | 1662 | ||
1663 | /** | ||
1664 | * crypto_has_blkcipher() - Search for the availability of a block cipher | ||
1665 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
1666 | * block cipher | ||
1667 | * @type: specifies the type of the cipher | ||
1668 | * @mask: specifies the mask for the cipher | ||
1669 | * | ||
1670 | * Return: true when the block cipher is known to the kernel crypto API; false | ||
1671 | * otherwise | ||
1672 | */ | ||
905 | static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) | 1673 | static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) |
906 | { | 1674 | { |
907 | type &= ~CRYPTO_ALG_TYPE_MASK; | 1675 | type &= ~CRYPTO_ALG_TYPE_MASK; |
@@ -911,6 +1679,12 @@ static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) | |||
911 | return crypto_has_alg(alg_name, type, mask); | 1679 | return crypto_has_alg(alg_name, type, mask); |
912 | } | 1680 | } |
913 | 1681 | ||
1682 | /** | ||
1683 | * crypto_blkcipher_name() - return the name / cra_name from the cipher handle | ||
1684 | * @tfm: cipher handle | ||
1685 | * | ||
1686 | * Return: The character string holding the name of the cipher | ||
1687 | */ | ||
914 | static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) | 1688 | static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) |
915 | { | 1689 | { |
916 | return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); | 1690 | return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); |
@@ -928,11 +1702,30 @@ static inline struct blkcipher_alg *crypto_blkcipher_alg( | |||
928 | return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; | 1702 | return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; |
929 | } | 1703 | } |
930 | 1704 | ||
1705 | /** | ||
1706 | * crypto_blkcipher_ivsize() - obtain IV size | ||
1707 | * @tfm: cipher handle | ||
1708 | * | ||
1709 | * The size of the IV for the block cipher referenced by the cipher handle is | ||
1710 | * returned. This IV size may be zero if the cipher does not need an IV. | ||
1711 | * | ||
1712 | * Return: IV size in bytes | ||
1713 | */ | ||
931 | static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) | 1714 | static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) |
932 | { | 1715 | { |
933 | return crypto_blkcipher_alg(tfm)->ivsize; | 1716 | return crypto_blkcipher_alg(tfm)->ivsize; |
934 | } | 1717 | } |
935 | 1718 | ||
1719 | /** | ||
1720 | * crypto_blkcipher_blocksize() - obtain block size of cipher | ||
1721 | * @tfm: cipher handle | ||
1722 | * | ||
1723 | * The block size for the block cipher referenced with the cipher handle is | ||
1724 | * returned. The caller may use that information to allocate appropriate | ||
1725 | * memory for the data returned by the encryption or decryption operation. | ||
1726 | * | ||
1727 | * Return: block size of cipher | ||
1728 | */ | ||
936 | static inline unsigned int crypto_blkcipher_blocksize( | 1729 | static inline unsigned int crypto_blkcipher_blocksize( |
937 | struct crypto_blkcipher *tfm) | 1730 | struct crypto_blkcipher *tfm) |
938 | { | 1731 | { |
@@ -962,6 +1755,22 @@ static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, | |||
962 | crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); | 1755 | crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); |
963 | } | 1756 | } |
964 | 1757 | ||
1758 | /** | ||
1759 | * crypto_blkcipher_setkey() - set key for cipher | ||
1760 | * @tfm: cipher handle | ||
1761 | * @key: buffer holding the key | ||
1762 | * @keylen: length of the key in bytes | ||
1763 | * | ||
1764 | * The caller provided key is set for the block cipher referenced by the cipher | ||
1765 | * handle. | ||
1766 | * | ||
1767 | * Note, the key length determines the cipher type. Many block ciphers implement | ||
1768 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | ||
1769 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | ||
1770 | * is performed. | ||
1771 | * | ||
1772 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
1773 | */ | ||
965 | static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, | 1774 | static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, |
966 | const u8 *key, unsigned int keylen) | 1775 | const u8 *key, unsigned int keylen) |
967 | { | 1776 | { |
@@ -969,6 +1778,24 @@ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, | |||
969 | key, keylen); | 1778 | key, keylen); |
970 | } | 1779 | } |
971 | 1780 | ||
1781 | /** | ||
1782 | * crypto_blkcipher_encrypt() - encrypt plaintext | ||
1783 | * @desc: reference to the block cipher handle with meta data | ||
1784 | * @dst: scatter/gather list that is filled by the cipher operation with the | ||
1785 | * ciphertext | ||
1786 | * @src: scatter/gather list that holds the plaintext | ||
1787 | * @nbytes: number of bytes of the plaintext to encrypt. | ||
1788 | * | ||
1789 | * Encrypt plaintext data using the IV set by the caller with a preceding | ||
1790 | * call of crypto_blkcipher_set_iv. | ||
1791 | * | ||
1792 | * The blkcipher_desc data structure must be filled by the caller and can | ||
1793 | * reside on the stack. The caller must fill desc as follows: desc.tfm is filled | ||
1794 | * with the block cipher handle; desc.flags is filled with either | ||
1795 | * CRYPTO_TFM_REQ_MAY_SLEEP or 0. | ||
1796 | * | ||
1797 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
1798 | */ | ||
972 | static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, | 1799 | static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, |
973 | struct scatterlist *dst, | 1800 | struct scatterlist *dst, |
974 | struct scatterlist *src, | 1801 | struct scatterlist *src, |
@@ -978,6 +1805,25 @@ static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, | |||
978 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); | 1805 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); |
979 | } | 1806 | } |
980 | 1807 | ||
1808 | /** | ||
1809 | * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV | ||
1810 | * @desc: reference to the block cipher handle with meta data | ||
1811 | * @dst: scatter/gather list that is filled by the cipher operation with the | ||
1812 | * ciphertext | ||
1813 | * @src: scatter/gather list that holds the plaintext | ||
1814 | * @nbytes: number of bytes of the plaintext to encrypt. | ||
1815 | * | ||
1816 | * Encrypt plaintext data with the use of an IV that is solely used for this | ||
1817 | * cipher operation. Any previously set IV is not used. | ||
1818 | * | ||
1819 | * The blkcipher_desc data structure must be filled by the caller and can | ||
1820 | * reside on the stack. The caller must fill desc as follows: desc.tfm is filled | ||
1821 | * with the block cipher handle; desc.info is filled with the IV to be used for | ||
1822 | * the current operation; desc.flags is filled with either | ||
1823 | * CRYPTO_TFM_REQ_MAY_SLEEP or 0. | ||
1824 | * | ||
1825 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
1826 | */ | ||
981 | static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, | 1827 | static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, |
982 | struct scatterlist *dst, | 1828 | struct scatterlist *dst, |
983 | struct scatterlist *src, | 1829 | struct scatterlist *src, |
@@ -986,6 +1832,23 @@ static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, | |||
986 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); | 1832 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); |
987 | } | 1833 | } |
988 | 1834 | ||
1835 | /** | ||
1836 | * crypto_blkcipher_decrypt() - decrypt ciphertext | ||
1837 | * @desc: reference to the block cipher handle with meta data | ||
1838 | * @dst: scatter/gather list that is filled by the cipher operation with the | ||
1839 | * plaintext | ||
1840 | * @src: scatter/gather list that holds the ciphertext | ||
1841 | * @nbytes: number of bytes of the ciphertext to decrypt. | ||
1842 | * | ||
1843 | * Decrypt ciphertext data using the IV set by the caller with a preceding | ||
1844 | * call of crypto_blkcipher_set_iv. | ||
1845 | * | ||
1846 | * The blkcipher_desc data structure must be filled by the caller as documented | ||
1847 | * for the crypto_blkcipher_encrypt call above. | ||
1848 | * | ||
1849 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
1850 | * | ||
1851 | */ | ||
989 | static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, | 1852 | static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, |
990 | struct scatterlist *dst, | 1853 | struct scatterlist *dst, |
991 | struct scatterlist *src, | 1854 | struct scatterlist *src, |
@@ -995,6 +1858,22 @@ static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, | |||
995 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); | 1858 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); |
996 | } | 1859 | } |
997 | 1860 | ||
1861 | /** | ||
1862 | * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV | ||
1863 | * @desc: reference to the block cipher handle with meta data | ||
1864 | * @dst: scatter/gather list that is filled by the cipher operation with the | ||
1865 | * plaintext | ||
1866 | * @src: scatter/gather list that holds the ciphertext | ||
1867 | * @nbytes: number of bytes of the ciphertext to decrypt. | ||
1868 | * | ||
1869 | * Decrypt ciphertext data with the use of an IV that is solely used for this | ||
1870 | * cipher operation. Any previously set IV is not used. | ||
1871 | * | ||
1872 | * The blkcipher_desc data structure must be filled by the caller as documented | ||
1873 | * for the crypto_blkcipher_encrypt_iv call above. | ||
1874 | * | ||
1875 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
1876 | */ | ||
998 | static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, | 1877 | static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, |
999 | struct scatterlist *dst, | 1878 | struct scatterlist *dst, |
1000 | struct scatterlist *src, | 1879 | struct scatterlist *src, |
@@ -1003,18 +1882,54 @@ static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, | |||
1003 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); | 1882 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); |
1004 | } | 1883 | } |
1005 | 1884 | ||
1885 | /** | ||
1886 | * crypto_blkcipher_set_iv() - set IV for cipher | ||
1887 | * @tfm: cipher handle | ||
1888 | * @src: buffer holding the IV | ||
1889 | * @len: length of the IV in bytes | ||
1890 | * | ||
1891 | * The caller provided IV is set for the block cipher referenced by the cipher | ||
1892 | * handle. | ||
1893 | */ | ||
1006 | static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, | 1894 | static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, |
1007 | const u8 *src, unsigned int len) | 1895 | const u8 *src, unsigned int len) |
1008 | { | 1896 | { |
1009 | memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); | 1897 | memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); |
1010 | } | 1898 | } |
1011 | 1899 | ||
1900 | /** | ||
1901 | * crypto_blkcipher_get_iv() - obtain IV from cipher | ||
1902 | * @tfm: cipher handle | ||
1903 | * @dst: buffer filled with the IV | ||
1904 | * @len: length of the buffer dst | ||
1905 | * | ||
1906 | * The caller can obtain the IV set for the block cipher referenced by the | ||
1907 | * cipher handle and store it into the user-provided buffer. If the buffer | ||
1908 | * has an insufficient space, the IV is truncated to fit the buffer. | ||
1909 | */ | ||
1012 | static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, | 1910 | static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, |
1013 | u8 *dst, unsigned int len) | 1911 | u8 *dst, unsigned int len) |
1014 | { | 1912 | { |
1015 | memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); | 1913 | memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); |
1016 | } | 1914 | } |
1017 | 1915 | ||
1916 | /** | ||
1917 | * DOC: Single Block Cipher API | ||
1918 | * | ||
1919 | * The single block cipher API is used with the ciphers of type | ||
1920 | * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). | ||
1921 | * | ||
1922 | * Using the single block cipher API calls, operations with the basic cipher | ||
1923 | * primitive can be implemented. These cipher primitives exclude any block | ||
1924 | * chaining operations including IV handling. | ||
1925 | * | ||
1926 | * The purpose of this single block cipher API is to support the implementation | ||
1927 | * of templates or other concepts that only need to perform the cipher operation | ||
1928 | * on one block at a time. Templates invoke the underlying cipher primitive | ||
1929 | * block-wise and process either the input or the output data of these cipher | ||
1930 | * operations. | ||
1931 | */ | ||
1932 | |||
1018 | static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) | 1933 | static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) |
1019 | { | 1934 | { |
1020 | return (struct crypto_cipher *)tfm; | 1935 | return (struct crypto_cipher *)tfm; |
@@ -1026,6 +1941,20 @@ static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) | |||
1026 | return __crypto_cipher_cast(tfm); | 1941 | return __crypto_cipher_cast(tfm); |
1027 | } | 1942 | } |
1028 | 1943 | ||
1944 | /** | ||
1945 | * crypto_alloc_cipher() - allocate single block cipher handle | ||
1946 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
1947 | * single block cipher | ||
1948 | * @type: specifies the type of the cipher | ||
1949 | * @mask: specifies the mask for the cipher | ||
1950 | * | ||
1951 | * Allocate a cipher handle for a single block cipher. The returned struct | ||
1952 | * crypto_cipher is the cipher handle that is required for any subsequent API | ||
1953 | * invocation for that single block cipher. | ||
1954 | * | ||
1955 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
1956 | * of an error, PTR_ERR() returns the error code. | ||
1957 | */ | ||
1029 | static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, | 1958 | static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, |
1030 | u32 type, u32 mask) | 1959 | u32 type, u32 mask) |
1031 | { | 1960 | { |
@@ -1041,11 +1970,25 @@ static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) | |||
1041 | return &tfm->base; | 1970 | return &tfm->base; |
1042 | } | 1971 | } |
1043 | 1972 | ||
1973 | /** | ||
1974 | * crypto_free_cipher() - zeroize and free the single block cipher handle | ||
1975 | * @tfm: cipher handle to be freed | ||
1976 | */ | ||
1044 | static inline void crypto_free_cipher(struct crypto_cipher *tfm) | 1977 | static inline void crypto_free_cipher(struct crypto_cipher *tfm) |
1045 | { | 1978 | { |
1046 | crypto_free_tfm(crypto_cipher_tfm(tfm)); | 1979 | crypto_free_tfm(crypto_cipher_tfm(tfm)); |
1047 | } | 1980 | } |
1048 | 1981 | ||
1982 | /** | ||
1983 | * crypto_has_cipher() - Search for the availability of a single block cipher | ||
1984 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
1985 | * single block cipher | ||
1986 | * @type: specifies the type of the cipher | ||
1987 | * @mask: specifies the mask for the cipher | ||
1988 | * | ||
1989 | * Return: true when the single block cipher is known to the kernel crypto API; | ||
1990 | * false otherwise | ||
1991 | */ | ||
1049 | static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) | 1992 | static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) |
1050 | { | 1993 | { |
1051 | type &= ~CRYPTO_ALG_TYPE_MASK; | 1994 | type &= ~CRYPTO_ALG_TYPE_MASK; |
@@ -1060,6 +2003,16 @@ static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) | |||
1060 | return &crypto_cipher_tfm(tfm)->crt_cipher; | 2003 | return &crypto_cipher_tfm(tfm)->crt_cipher; |
1061 | } | 2004 | } |
1062 | 2005 | ||
2006 | /** | ||
2007 | * crypto_cipher_blocksize() - obtain block size for cipher | ||
2008 | * @tfm: cipher handle | ||
2009 | * | ||
2010 | * The block size for the single block cipher referenced with the cipher handle | ||
2011 | * tfm is returned. The caller may use that information to allocate appropriate | ||
2012 | * memory for the data returned by the encryption or decryption operation | ||
2013 | * | ||
2014 | * Return: block size of cipher | ||
2015 | */ | ||
1063 | static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) | 2016 | static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) |
1064 | { | 2017 | { |
1065 | return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); | 2018 | return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); |
@@ -1087,6 +2040,22 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, | |||
1087 | crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); | 2040 | crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); |
1088 | } | 2041 | } |
1089 | 2042 | ||
2043 | /** | ||
2044 | * crypto_cipher_setkey() - set key for cipher | ||
2045 | * @tfm: cipher handle | ||
2046 | * @key: buffer holding the key | ||
2047 | * @keylen: length of the key in bytes | ||
2048 | * | ||
2049 | * The caller provided key is set for the single block cipher referenced by the | ||
2050 | * cipher handle. | ||
2051 | * | ||
2052 | * Note, the key length determines the cipher type. Many block ciphers implement | ||
2053 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | ||
2054 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | ||
2055 | * is performed. | ||
2056 | * | ||
2057 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
2058 | */ | ||
1090 | static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, | 2059 | static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, |
1091 | const u8 *key, unsigned int keylen) | 2060 | const u8 *key, unsigned int keylen) |
1092 | { | 2061 | { |
@@ -1094,6 +2063,15 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, | |||
1094 | key, keylen); | 2063 | key, keylen); |
1095 | } | 2064 | } |
1096 | 2065 | ||
2066 | /** | ||
2067 | * crypto_cipher_encrypt_one() - encrypt one block of plaintext | ||
2068 | * @tfm: cipher handle | ||
2069 | * @dst: points to the buffer that will be filled with the ciphertext | ||
2070 | * @src: buffer holding the plaintext to be encrypted | ||
2071 | * | ||
2072 | * Invoke the encryption operation of one block. The caller must ensure that | ||
2073 | * the plaintext and ciphertext buffers are at least one block in size. | ||
2074 | */ | ||
1097 | static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, | 2075 | static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, |
1098 | u8 *dst, const u8 *src) | 2076 | u8 *dst, const u8 *src) |
1099 | { | 2077 | { |
@@ -1101,6 +2079,15 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, | |||
1101 | dst, src); | 2079 | dst, src); |
1102 | } | 2080 | } |
1103 | 2081 | ||
2082 | /** | ||
2083 | * crypto_cipher_decrypt_one() - decrypt one block of ciphertext | ||
2084 | * @tfm: cipher handle | ||
2085 | * @dst: points to the buffer that will be filled with the plaintext | ||
2086 | * @src: buffer holding the ciphertext to be decrypted | ||
2087 | * | ||
2088 | * Invoke the decryption operation of one block. The caller must ensure that | ||
2089 | * the plaintext and ciphertext buffers are at least one block in size. | ||
2090 | */ | ||
1104 | static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, | 2091 | static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, |
1105 | u8 *dst, const u8 *src) | 2092 | u8 *dst, const u8 *src) |
1106 | { | 2093 | { |
@@ -1108,6 +2095,13 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, | |||
1108 | dst, src); | 2095 | dst, src); |
1109 | } | 2096 | } |
1110 | 2097 | ||
2098 | /** | ||
2099 | * DOC: Synchronous Message Digest API | ||
2100 | * | ||
2101 | * The synchronous message digest API is used with the ciphers of type | ||
2102 | * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto) | ||
2103 | */ | ||
2104 | |||
1111 | static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) | 2105 | static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) |
1112 | { | 2106 | { |
1113 | return (struct crypto_hash *)tfm; | 2107 | return (struct crypto_hash *)tfm; |
@@ -1120,6 +2114,20 @@ static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm) | |||
1120 | return __crypto_hash_cast(tfm); | 2114 | return __crypto_hash_cast(tfm); |
1121 | } | 2115 | } |
1122 | 2116 | ||
2117 | /** | ||
2118 | * crypto_alloc_hash() - allocate synchronous message digest handle | ||
2119 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
2120 | * message digest cipher | ||
2121 | * @type: specifies the type of the cipher | ||
2122 | * @mask: specifies the mask for the cipher | ||
2123 | * | ||
2124 | * Allocate a cipher handle for a message digest. The returned struct | ||
2125 | * crypto_hash is the cipher handle that is required for any subsequent | ||
2126 | * API invocation for that message digest. | ||
2127 | * | ||
2128 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
2129 | * of an error, PTR_ERR() returns the error code. | ||
2130 | */ | ||
1123 | static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, | 2131 | static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, |
1124 | u32 type, u32 mask) | 2132 | u32 type, u32 mask) |
1125 | { | 2133 | { |
@@ -1136,11 +2144,25 @@ static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm) | |||
1136 | return &tfm->base; | 2144 | return &tfm->base; |
1137 | } | 2145 | } |
1138 | 2146 | ||
2147 | /** | ||
2148 | * crypto_free_hash() - zeroize and free message digest handle | ||
2149 | * @tfm: cipher handle to be freed | ||
2150 | */ | ||
1139 | static inline void crypto_free_hash(struct crypto_hash *tfm) | 2151 | static inline void crypto_free_hash(struct crypto_hash *tfm) |
1140 | { | 2152 | { |
1141 | crypto_free_tfm(crypto_hash_tfm(tfm)); | 2153 | crypto_free_tfm(crypto_hash_tfm(tfm)); |
1142 | } | 2154 | } |
1143 | 2155 | ||
2156 | /** | ||
2157 | * crypto_has_hash() - Search for the availability of a message digest | ||
2158 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
2159 | * message digest cipher | ||
2160 | * @type: specifies the type of the cipher | ||
2161 | * @mask: specifies the mask for the cipher | ||
2162 | * | ||
2163 | * Return: true when the message digest cipher is known to the kernel crypto | ||
2164 | * API; false otherwise | ||
2165 | */ | ||
1144 | static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) | 2166 | static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) |
1145 | { | 2167 | { |
1146 | type &= ~CRYPTO_ALG_TYPE_MASK; | 2168 | type &= ~CRYPTO_ALG_TYPE_MASK; |
@@ -1156,6 +2178,15 @@ static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm) | |||
1156 | return &crypto_hash_tfm(tfm)->crt_hash; | 2178 | return &crypto_hash_tfm(tfm)->crt_hash; |
1157 | } | 2179 | } |
1158 | 2180 | ||
2181 | /** | ||
2182 | * crypto_hash_blocksize() - obtain block size for message digest | ||
2183 | * @tfm: cipher handle | ||
2184 | * | ||
2185 | * The block size for the message digest cipher referenced with the cipher | ||
2186 | * handle is returned. | ||
2187 | * | ||
2188 | * Return: block size of cipher | ||
2189 | */ | ||
1159 | static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) | 2190 | static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) |
1160 | { | 2191 | { |
1161 | return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); | 2192 | return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); |
@@ -1166,6 +2197,15 @@ static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm) | |||
1166 | return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); | 2197 | return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); |
1167 | } | 2198 | } |
1168 | 2199 | ||
2200 | /** | ||
2201 | * crypto_hash_digestsize() - obtain message digest size | ||
2202 | * @tfm: cipher handle | ||
2203 | * | ||
2204 | * The size for the message digest created by the message digest cipher | ||
2205 | * referenced with the cipher handle is returned. | ||
2206 | * | ||
2207 | * Return: message digest size | ||
2208 | */ | ||
1169 | static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) | 2209 | static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) |
1170 | { | 2210 | { |
1171 | return crypto_hash_crt(tfm)->digestsize; | 2211 | return crypto_hash_crt(tfm)->digestsize; |
@@ -1186,11 +2226,38 @@ static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags) | |||
1186 | crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); | 2226 | crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); |
1187 | } | 2227 | } |
1188 | 2228 | ||
2229 | /** | ||
2230 | * crypto_hash_init() - (re)initialize message digest handle | ||
2231 | * @desc: cipher request handle that to be filled by caller -- | ||
2232 | * desc.tfm is filled with the hash cipher handle; | ||
2233 | * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0. | ||
2234 | * | ||
2235 | * The call (re-)initializes the message digest referenced by the hash cipher | ||
2236 | * request handle. Any potentially existing state created by previous | ||
2237 | * operations is discarded. | ||
2238 | * | ||
2239 | * Return: 0 if the message digest initialization was successful; < 0 if an | ||
2240 | * error occurred | ||
2241 | */ | ||
1189 | static inline int crypto_hash_init(struct hash_desc *desc) | 2242 | static inline int crypto_hash_init(struct hash_desc *desc) |
1190 | { | 2243 | { |
1191 | return crypto_hash_crt(desc->tfm)->init(desc); | 2244 | return crypto_hash_crt(desc->tfm)->init(desc); |
1192 | } | 2245 | } |
1193 | 2246 | ||
2247 | /** | ||
2248 | * crypto_hash_update() - add data to message digest for processing | ||
2249 | * @desc: cipher request handle | ||
2250 | * @sg: scatter / gather list pointing to the data to be added to the message | ||
2251 | * digest | ||
2252 | * @nbytes: number of bytes to be processed from @sg | ||
2253 | * | ||
2254 | * Updates the message digest state of the cipher handle pointed to by the | ||
2255 | * hash cipher request handle with the input data pointed to by the | ||
2256 | * scatter/gather list. | ||
2257 | * | ||
2258 | * Return: 0 if the message digest update was successful; < 0 if an error | ||
2259 | * occurred | ||
2260 | */ | ||
1194 | static inline int crypto_hash_update(struct hash_desc *desc, | 2261 | static inline int crypto_hash_update(struct hash_desc *desc, |
1195 | struct scatterlist *sg, | 2262 | struct scatterlist *sg, |
1196 | unsigned int nbytes) | 2263 | unsigned int nbytes) |
@@ -1198,11 +2265,39 @@ static inline int crypto_hash_update(struct hash_desc *desc, | |||
1198 | return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); | 2265 | return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); |
1199 | } | 2266 | } |
1200 | 2267 | ||
2268 | /** | ||
2269 | * crypto_hash_final() - calculate message digest | ||
2270 | * @desc: cipher request handle | ||
2271 | * @out: message digest output buffer -- The caller must ensure that the out | ||
2272 | * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize | ||
2273 | * function). | ||
2274 | * | ||
2275 | * Finalize the message digest operation and create the message digest | ||
2276 | * based on all data added to the cipher handle. The message digest is placed | ||
2277 | * into the output buffer. | ||
2278 | * | ||
2279 | * Return: 0 if the message digest creation was successful; < 0 if an error | ||
2280 | * occurred | ||
2281 | */ | ||
1201 | static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) | 2282 | static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) |
1202 | { | 2283 | { |
1203 | return crypto_hash_crt(desc->tfm)->final(desc, out); | 2284 | return crypto_hash_crt(desc->tfm)->final(desc, out); |
1204 | } | 2285 | } |
1205 | 2286 | ||
2287 | /** | ||
2288 | * crypto_hash_digest() - calculate message digest for a buffer | ||
2289 | * @desc: see crypto_hash_final() | ||
2290 | * @sg: see crypto_hash_update() | ||
2291 | * @nbytes: see crypto_hash_update() | ||
2292 | * @out: see crypto_hash_final() | ||
2293 | * | ||
2294 | * This function is a "short-hand" for the function calls of crypto_hash_init, | ||
2295 | * crypto_hash_update and crypto_hash_final. The parameters have the same | ||
2296 | * meaning as discussed for those separate three functions. | ||
2297 | * | ||
2298 | * Return: 0 if the message digest creation was successful; < 0 if an error | ||
2299 | * occurred | ||
2300 | */ | ||
1206 | static inline int crypto_hash_digest(struct hash_desc *desc, | 2301 | static inline int crypto_hash_digest(struct hash_desc *desc, |
1207 | struct scatterlist *sg, | 2302 | struct scatterlist *sg, |
1208 | unsigned int nbytes, u8 *out) | 2303 | unsigned int nbytes, u8 *out) |
@@ -1210,6 +2305,17 @@ static inline int crypto_hash_digest(struct hash_desc *desc, | |||
1210 | return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); | 2305 | return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); |
1211 | } | 2306 | } |
1212 | 2307 | ||
2308 | /** | ||
2309 | * crypto_hash_setkey() - set key for message digest | ||
2310 | * @hash: cipher handle | ||
2311 | * @key: buffer holding the key | ||
2312 | * @keylen: length of the key in bytes | ||
2313 | * | ||
2314 | * The caller provided key is set for the message digest cipher. The cipher | ||
2315 | * handle must point to a keyed hash in order for this function to succeed. | ||
2316 | * | ||
2317 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
2318 | */ | ||
1213 | static inline int crypto_hash_setkey(struct crypto_hash *hash, | 2319 | static inline int crypto_hash_setkey(struct crypto_hash *hash, |
1214 | const u8 *key, unsigned int keylen) | 2320 | const u8 *key, unsigned int keylen) |
1215 | { | 2321 | { |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index b2a2a08523bf..5a813988e6d4 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -124,15 +124,15 @@ struct dentry { | |||
124 | void *d_fsdata; /* fs-specific data */ | 124 | void *d_fsdata; /* fs-specific data */ |
125 | 125 | ||
126 | struct list_head d_lru; /* LRU list */ | 126 | struct list_head d_lru; /* LRU list */ |
127 | struct list_head d_child; /* child of parent list */ | ||
128 | struct list_head d_subdirs; /* our children */ | ||
127 | /* | 129 | /* |
128 | * d_child and d_rcu can share memory | 130 | * d_alias and d_rcu can share memory |
129 | */ | 131 | */ |
130 | union { | 132 | union { |
131 | struct list_head d_child; /* child of parent list */ | 133 | struct hlist_node d_alias; /* inode alias list */ |
132 | struct rcu_head d_rcu; | 134 | struct rcu_head d_rcu; |
133 | } d_u; | 135 | } d_u; |
134 | struct list_head d_subdirs; /* our children */ | ||
135 | struct hlist_node d_alias; /* inode alias list */ | ||
136 | }; | 136 | }; |
137 | 137 | ||
138 | /* | 138 | /* |
@@ -230,7 +230,6 @@ extern seqlock_t rename_lock; | |||
230 | */ | 230 | */ |
231 | extern void d_instantiate(struct dentry *, struct inode *); | 231 | extern void d_instantiate(struct dentry *, struct inode *); |
232 | extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); | 232 | extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); |
233 | extern struct dentry * d_materialise_unique(struct dentry *, struct inode *); | ||
234 | extern int d_instantiate_no_diralias(struct dentry *, struct inode *); | 233 | extern int d_instantiate_no_diralias(struct dentry *, struct inode *); |
235 | extern void __d_drop(struct dentry *dentry); | 234 | extern void __d_drop(struct dentry *dentry); |
236 | extern void d_drop(struct dentry *dentry); | 235 | extern void d_drop(struct dentry *dentry); |
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 4d0b4d1aa132..da4c4983adbe 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h | |||
@@ -20,6 +20,7 @@ | |||
20 | 20 | ||
21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
22 | 22 | ||
23 | struct device; | ||
23 | struct file_operations; | 24 | struct file_operations; |
24 | 25 | ||
25 | struct debugfs_blob_wrapper { | 26 | struct debugfs_blob_wrapper { |
@@ -92,20 +93,25 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode, | |||
92 | struct dentry *parent, | 93 | struct dentry *parent, |
93 | struct debugfs_regset32 *regset); | 94 | struct debugfs_regset32 *regset); |
94 | 95 | ||
95 | int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, | 96 | void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, |
96 | int nregs, void __iomem *base, char *prefix); | 97 | int nregs, void __iomem *base, char *prefix); |
97 | 98 | ||
98 | struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, | 99 | struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, |
99 | struct dentry *parent, | 100 | struct dentry *parent, |
100 | u32 *array, u32 elements); | 101 | u32 *array, u32 elements); |
101 | 102 | ||
103 | struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, | ||
104 | struct dentry *parent, | ||
105 | int (*read_fn)(struct seq_file *s, | ||
106 | void *data)); | ||
107 | |||
102 | bool debugfs_initialized(void); | 108 | bool debugfs_initialized(void); |
103 | 109 | ||
104 | #else | 110 | #else |
105 | 111 | ||
106 | #include <linux/err.h> | 112 | #include <linux/err.h> |
107 | 113 | ||
108 | /* | 114 | /* |
109 | * We do not return NULL from these functions if CONFIG_DEBUG_FS is not enabled | 115 | * We do not return NULL from these functions if CONFIG_DEBUG_FS is not enabled |
110 | * so users have a chance to detect if there was a real error or not. We don't | 116 | * so users have a chance to detect if there was a real error or not. We don't |
111 | * want to duplicate the design decision mistakes of procfs and devfs again. | 117 | * want to duplicate the design decision mistakes of procfs and devfs again. |
@@ -233,10 +239,9 @@ static inline struct dentry *debugfs_create_regset32(const char *name, | |||
233 | return ERR_PTR(-ENODEV); | 239 | return ERR_PTR(-ENODEV); |
234 | } | 240 | } |
235 | 241 | ||
236 | static inline int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, | 242 | static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, |
237 | int nregs, void __iomem *base, char *prefix) | 243 | int nregs, void __iomem *base, char *prefix) |
238 | { | 244 | { |
239 | return 0; | ||
240 | } | 245 | } |
241 | 246 | ||
242 | static inline bool debugfs_initialized(void) | 247 | static inline bool debugfs_initialized(void) |
@@ -251,6 +256,15 @@ static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t | |||
251 | return ERR_PTR(-ENODEV); | 256 | return ERR_PTR(-ENODEV); |
252 | } | 257 | } |
253 | 258 | ||
259 | static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev, | ||
260 | const char *name, | ||
261 | struct dentry *parent, | ||
262 | int (*read_fn)(struct seq_file *s, | ||
263 | void *data)) | ||
264 | { | ||
265 | return ERR_PTR(-ENODEV); | ||
266 | } | ||
267 | |||
254 | #endif | 268 | #endif |
255 | 269 | ||
256 | #endif | 270 | #endif |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index e1707de043ae..ca6d2acc5eb7 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -64,6 +64,7 @@ typedef int (*dm_request_endio_fn) (struct dm_target *ti, | |||
64 | union map_info *map_context); | 64 | union map_info *map_context); |
65 | 65 | ||
66 | typedef void (*dm_presuspend_fn) (struct dm_target *ti); | 66 | typedef void (*dm_presuspend_fn) (struct dm_target *ti); |
67 | typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); | ||
67 | typedef void (*dm_postsuspend_fn) (struct dm_target *ti); | 68 | typedef void (*dm_postsuspend_fn) (struct dm_target *ti); |
68 | typedef int (*dm_preresume_fn) (struct dm_target *ti); | 69 | typedef int (*dm_preresume_fn) (struct dm_target *ti); |
69 | typedef void (*dm_resume_fn) (struct dm_target *ti); | 70 | typedef void (*dm_resume_fn) (struct dm_target *ti); |
@@ -145,6 +146,7 @@ struct target_type { | |||
145 | dm_endio_fn end_io; | 146 | dm_endio_fn end_io; |
146 | dm_request_endio_fn rq_end_io; | 147 | dm_request_endio_fn rq_end_io; |
147 | dm_presuspend_fn presuspend; | 148 | dm_presuspend_fn presuspend; |
149 | dm_presuspend_undo_fn presuspend_undo; | ||
148 | dm_postsuspend_fn postsuspend; | 150 | dm_postsuspend_fn postsuspend; |
149 | dm_preresume_fn preresume; | 151 | dm_preresume_fn preresume; |
150 | dm_resume_fn resume; | 152 | dm_resume_fn resume; |
diff --git a/include/linux/device.h b/include/linux/device.h index ce1f21608b16..fb506738f7b7 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -911,6 +911,11 @@ static inline void device_unlock(struct device *dev) | |||
911 | mutex_unlock(&dev->mutex); | 911 | mutex_unlock(&dev->mutex); |
912 | } | 912 | } |
913 | 913 | ||
914 | static inline void device_lock_assert(struct device *dev) | ||
915 | { | ||
916 | lockdep_assert_held(&dev->mutex); | ||
917 | } | ||
918 | |||
914 | void driver_init(void); | 919 | void driver_init(void); |
915 | 920 | ||
916 | /* | 921 | /* |
@@ -1118,6 +1123,41 @@ do { \ | |||
1118 | }) | 1123 | }) |
1119 | #endif | 1124 | #endif |
1120 | 1125 | ||
1126 | #ifdef CONFIG_PRINTK | ||
1127 | #define dev_level_once(dev_level, dev, fmt, ...) \ | ||
1128 | do { \ | ||
1129 | static bool __print_once __read_mostly; \ | ||
1130 | \ | ||
1131 | if (!__print_once) { \ | ||
1132 | __print_once = true; \ | ||
1133 | dev_level(dev, fmt, ##__VA_ARGS__); \ | ||
1134 | } \ | ||
1135 | } while (0) | ||
1136 | #else | ||
1137 | #define dev_level_once(dev_level, dev, fmt, ...) \ | ||
1138 | do { \ | ||
1139 | if (0) \ | ||
1140 | dev_level(dev, fmt, ##__VA_ARGS__); \ | ||
1141 | } while (0) | ||
1142 | #endif | ||
1143 | |||
1144 | #define dev_emerg_once(dev, fmt, ...) \ | ||
1145 | dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__) | ||
1146 | #define dev_alert_once(dev, fmt, ...) \ | ||
1147 | dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__) | ||
1148 | #define dev_crit_once(dev, fmt, ...) \ | ||
1149 | dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__) | ||
1150 | #define dev_err_once(dev, fmt, ...) \ | ||
1151 | dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__) | ||
1152 | #define dev_warn_once(dev, fmt, ...) \ | ||
1153 | dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__) | ||
1154 | #define dev_notice_once(dev, fmt, ...) \ | ||
1155 | dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__) | ||
1156 | #define dev_info_once(dev, fmt, ...) \ | ||
1157 | dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) | ||
1158 | #define dev_dbg_once(dev, fmt, ...) \ | ||
1159 | dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) | ||
1160 | |||
1121 | #define dev_level_ratelimited(dev_level, dev, fmt, ...) \ | 1161 | #define dev_level_ratelimited(dev_level, dev, fmt, ...) \ |
1122 | do { \ | 1162 | do { \ |
1123 | static DEFINE_RATELIMIT_STATE(_rs, \ | 1163 | static DEFINE_RATELIMIT_STATE(_rs, \ |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 653a1fd07ae8..40cd75e21ea2 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -447,7 +447,8 @@ struct dmaengine_unmap_data { | |||
447 | * communicate status | 447 | * communicate status |
448 | * @phys: physical address of the descriptor | 448 | * @phys: physical address of the descriptor |
449 | * @chan: target channel for this operation | 449 | * @chan: target channel for this operation |
450 | * @tx_submit: set the prepared descriptor(s) to be executed by the engine | 450 | * @tx_submit: accept the descriptor, assign ordered cookie and mark the |
451 | * descriptor pending. To be pushed on .issue_pending() call | ||
451 | * @callback: routine to call after this operation is complete | 452 | * @callback: routine to call after this operation is complete |
452 | * @callback_param: general parameter to pass to the callback routine | 453 | * @callback_param: general parameter to pass to the callback routine |
453 | * ---async_tx api specific fields--- | 454 | * ---async_tx api specific fields--- |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 593fff99e6bf..30624954dec5 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -30,6 +30,12 @@ | |||
30 | 30 | ||
31 | struct acpi_dmar_header; | 31 | struct acpi_dmar_header; |
32 | 32 | ||
33 | #ifdef CONFIG_X86 | ||
34 | # define DMAR_UNITS_SUPPORTED MAX_IO_APICS | ||
35 | #else | ||
36 | # define DMAR_UNITS_SUPPORTED 64 | ||
37 | #endif | ||
38 | |||
33 | /* DMAR Flags */ | 39 | /* DMAR Flags */ |
34 | #define DMAR_INTR_REMAP 0x1 | 40 | #define DMAR_INTR_REMAP 0x1 |
35 | #define DMAR_X2APIC_OPT_OUT 0x2 | 41 | #define DMAR_X2APIC_OPT_OUT 0x2 |
@@ -120,28 +126,60 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, | |||
120 | /* Intel IOMMU detection */ | 126 | /* Intel IOMMU detection */ |
121 | extern int detect_intel_iommu(void); | 127 | extern int detect_intel_iommu(void); |
122 | extern int enable_drhd_fault_handling(void); | 128 | extern int enable_drhd_fault_handling(void); |
129 | extern int dmar_device_add(acpi_handle handle); | ||
130 | extern int dmar_device_remove(acpi_handle handle); | ||
131 | |||
132 | static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg) | ||
133 | { | ||
134 | return 0; | ||
135 | } | ||
123 | 136 | ||
124 | #ifdef CONFIG_INTEL_IOMMU | 137 | #ifdef CONFIG_INTEL_IOMMU |
125 | extern int iommu_detected, no_iommu; | 138 | extern int iommu_detected, no_iommu; |
126 | extern int intel_iommu_init(void); | 139 | extern int intel_iommu_init(void); |
127 | extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); | 140 | extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg); |
128 | extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); | 141 | extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg); |
142 | extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg); | ||
143 | extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg); | ||
144 | extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); | ||
129 | extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); | 145 | extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); |
130 | #else /* !CONFIG_INTEL_IOMMU: */ | 146 | #else /* !CONFIG_INTEL_IOMMU: */ |
131 | static inline int intel_iommu_init(void) { return -ENODEV; } | 147 | static inline int intel_iommu_init(void) { return -ENODEV; } |
132 | static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header) | 148 | |
149 | #define dmar_parse_one_rmrr dmar_res_noop | ||
150 | #define dmar_parse_one_atsr dmar_res_noop | ||
151 | #define dmar_check_one_atsr dmar_res_noop | ||
152 | #define dmar_release_one_atsr dmar_res_noop | ||
153 | |||
154 | static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) | ||
133 | { | 155 | { |
134 | return 0; | 156 | return 0; |
135 | } | 157 | } |
136 | static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) | 158 | |
159 | static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) | ||
137 | { | 160 | { |
138 | return 0; | 161 | return 0; |
139 | } | 162 | } |
140 | static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) | 163 | #endif /* CONFIG_INTEL_IOMMU */ |
164 | |||
165 | #ifdef CONFIG_IRQ_REMAP | ||
166 | extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert); | ||
167 | #else /* CONFIG_IRQ_REMAP */ | ||
168 | static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) | ||
169 | { return 0; } | ||
170 | #endif /* CONFIG_IRQ_REMAP */ | ||
171 | |||
172 | #else /* CONFIG_DMAR_TABLE */ | ||
173 | |||
174 | static inline int dmar_device_add(void *handle) | ||
175 | { | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | static inline int dmar_device_remove(void *handle) | ||
141 | { | 180 | { |
142 | return 0; | 181 | return 0; |
143 | } | 182 | } |
144 | #endif /* CONFIG_INTEL_IOMMU */ | ||
145 | 183 | ||
146 | #endif /* CONFIG_DMAR_TABLE */ | 184 | #endif /* CONFIG_DMAR_TABLE */ |
147 | 185 | ||
diff --git a/include/linux/drbd.h b/include/linux/drbd.h index debb70d40547..8723f2a99e15 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h | |||
@@ -172,7 +172,7 @@ enum drbd_ret_code { | |||
172 | ERR_RES_NOT_KNOWN = 158, | 172 | ERR_RES_NOT_KNOWN = 158, |
173 | ERR_RES_IN_USE = 159, | 173 | ERR_RES_IN_USE = 159, |
174 | ERR_MINOR_CONFIGURED = 160, | 174 | ERR_MINOR_CONFIGURED = 160, |
175 | ERR_MINOR_EXISTS = 161, | 175 | ERR_MINOR_OR_VOLUME_EXISTS = 161, |
176 | ERR_INVALID_REQUEST = 162, | 176 | ERR_INVALID_REQUEST = 162, |
177 | ERR_NEED_APV_100 = 163, | 177 | ERR_NEED_APV_100 = 163, |
178 | ERR_NEED_ALLOW_TWO_PRI = 164, | 178 | ERR_NEED_ALLOW_TWO_PRI = 164, |
diff --git a/include/linux/edac.h b/include/linux/edac.h index e1e68da6f35c..da3b72e95db3 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h | |||
@@ -194,7 +194,8 @@ static inline char *mc_event_error_type(const unsigned int err_type) | |||
194 | * @MEM_DDR3: DDR3 RAM | 194 | * @MEM_DDR3: DDR3 RAM |
195 | * @MEM_RDDR3: Registered DDR3 RAM | 195 | * @MEM_RDDR3: Registered DDR3 RAM |
196 | * This is a variant of the DDR3 memories. | 196 | * This is a variant of the DDR3 memories. |
197 | * @MEM_DDR4: DDR4 RAM | 197 | * @MEM_LRDDR3 Load-Reduced DDR3 memory. |
198 | * @MEM_DDR4: Unbuffered DDR4 RAM | ||
198 | * @MEM_RDDR4: Registered DDR4 RAM | 199 | * @MEM_RDDR4: Registered DDR4 RAM |
199 | * This is a variant of the DDR4 memories. | 200 | * This is a variant of the DDR4 memories. |
200 | */ | 201 | */ |
@@ -216,6 +217,7 @@ enum mem_type { | |||
216 | MEM_XDR, | 217 | MEM_XDR, |
217 | MEM_DDR3, | 218 | MEM_DDR3, |
218 | MEM_RDDR3, | 219 | MEM_RDDR3, |
220 | MEM_LRDDR3, | ||
219 | MEM_DDR4, | 221 | MEM_DDR4, |
220 | MEM_RDDR4, | 222 | MEM_RDDR4, |
221 | }; | 223 | }; |
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h index e50f98b0297a..eb0b1988050a 100644 --- a/include/linux/eeprom_93cx6.h +++ b/include/linux/eeprom_93cx6.h | |||
@@ -75,6 +75,10 @@ extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, | |||
75 | const u8 word, u16 *data); | 75 | const u8 word, u16 *data); |
76 | extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, | 76 | extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, |
77 | const u8 word, __le16 *data, const u16 words); | 77 | const u8 word, __le16 *data, const u16 words); |
78 | extern void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom, | ||
79 | const u8 byte, u8 *data); | ||
80 | extern void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom, | ||
81 | const u8 byte, u8 *data, const u16 bytes); | ||
78 | 82 | ||
79 | extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); | 83 | extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); |
80 | 84 | ||
diff --git a/include/linux/efi.h b/include/linux/efi.h index 0949f9c7e872..0238d612750e 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
@@ -547,6 +547,9 @@ void efi_native_runtime_setup(void); | |||
547 | #define SMBIOS_TABLE_GUID \ | 547 | #define SMBIOS_TABLE_GUID \ |
548 | EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) | 548 | EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) |
549 | 549 | ||
550 | #define SMBIOS3_TABLE_GUID \ | ||
551 | EFI_GUID( 0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94 ) | ||
552 | |||
550 | #define SAL_SYSTEM_TABLE_GUID \ | 553 | #define SAL_SYSTEM_TABLE_GUID \ |
551 | EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) | 554 | EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) |
552 | 555 | ||
@@ -810,7 +813,8 @@ extern struct efi { | |||
810 | unsigned long mps; /* MPS table */ | 813 | unsigned long mps; /* MPS table */ |
811 | unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ | 814 | unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ |
812 | unsigned long acpi20; /* ACPI table (ACPI 2.0) */ | 815 | unsigned long acpi20; /* ACPI table (ACPI 2.0) */ |
813 | unsigned long smbios; /* SM BIOS table */ | 816 | unsigned long smbios; /* SMBIOS table (32 bit entry point) */ |
817 | unsigned long smbios3; /* SMBIOS table (64 bit entry point) */ | ||
814 | unsigned long sal_systab; /* SAL system table */ | 818 | unsigned long sal_systab; /* SAL system table */ |
815 | unsigned long boot_info; /* boot info table */ | 819 | unsigned long boot_info; /* boot info table */ |
816 | unsigned long hcdp; /* HCDP table */ | 820 | unsigned long hcdp; /* HCDP table */ |
diff --git a/include/linux/elf.h b/include/linux/elf.h index 67a5fa7830c4..20fa8d8ae313 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h | |||
@@ -15,6 +15,11 @@ | |||
15 | set_personality(PER_LINUX | (current->personality & (~PER_MASK))) | 15 | set_personality(PER_LINUX | (current->personality & (~PER_MASK))) |
16 | #endif | 16 | #endif |
17 | 17 | ||
18 | #ifndef SET_PERSONALITY2 | ||
19 | #define SET_PERSONALITY2(ex, state) \ | ||
20 | SET_PERSONALITY(ex) | ||
21 | #endif | ||
22 | |||
18 | #if ELF_CLASS == ELFCLASS32 | 23 | #if ELF_CLASS == ELFCLASS32 |
19 | 24 | ||
20 | extern Elf32_Dyn _DYNAMIC []; | 25 | extern Elf32_Dyn _DYNAMIC []; |
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 733980fce8e3..41c891d05f04 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h | |||
@@ -392,4 +392,16 @@ static inline unsigned long compare_ether_header(const void *a, const void *b) | |||
392 | #endif | 392 | #endif |
393 | } | 393 | } |
394 | 394 | ||
395 | /** | ||
396 | * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame | ||
397 | * @skb: Buffer to pad | ||
398 | * | ||
399 | * An Ethernet frame should have a minimum size of 60 bytes. This function | ||
400 | * takes short frames and pads them with zeros up to the 60 byte limit. | ||
401 | */ | ||
402 | static inline int eth_skb_pad(struct sk_buff *skb) | ||
403 | { | ||
404 | return skb_put_padto(skb, ETH_ZLEN); | ||
405 | } | ||
406 | |||
395 | #endif /* _LINUX_ETHERDEVICE_H */ | 407 | #endif /* _LINUX_ETHERDEVICE_H */ |
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index c1a2d60dfb82..653dc9c4ebac 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h | |||
@@ -59,6 +59,26 @@ enum ethtool_phys_id_state { | |||
59 | ETHTOOL_ID_OFF | 59 | ETHTOOL_ID_OFF |
60 | }; | 60 | }; |
61 | 61 | ||
62 | enum { | ||
63 | ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */ | ||
64 | ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */ | ||
65 | |||
66 | /* | ||
67 | * Add your fresh new hash function bits above and remember to update | ||
68 | * rss_hash_func_strings[] in ethtool.c | ||
69 | */ | ||
70 | ETH_RSS_HASH_FUNCS_COUNT | ||
71 | }; | ||
72 | |||
73 | #define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) | ||
74 | #define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT) | ||
75 | |||
76 | #define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP) | ||
77 | #define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR) | ||
78 | |||
79 | #define ETH_RSS_HASH_UNKNOWN 0 | ||
80 | #define ETH_RSS_HASH_NO_CHANGE 0 | ||
81 | |||
62 | struct net_device; | 82 | struct net_device; |
63 | 83 | ||
64 | /* Some generic methods drivers may use in their ethtool_ops */ | 84 | /* Some generic methods drivers may use in their ethtool_ops */ |
@@ -158,17 +178,14 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) | |||
158 | * Returns zero if not supported for this specific device. | 178 | * Returns zero if not supported for this specific device. |
159 | * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. | 179 | * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. |
160 | * Returns zero if not supported for this specific device. | 180 | * Returns zero if not supported for this specific device. |
161 | * @get_rxfh: Get the contents of the RX flow hash indirection table and hash | 181 | * @get_rxfh: Get the contents of the RX flow hash indirection table, hash key |
162 | * key. | 182 | * and/or hash function. |
163 | * Will only be called if one or both of @get_rxfh_indir_size and | ||
164 | * @get_rxfh_key_size are implemented and return non-zero. | ||
165 | * Returns a negative error code or zero. | ||
166 | * @set_rxfh: Set the contents of the RX flow hash indirection table and/or | ||
167 | * hash key. In case only the indirection table or hash key is to be | ||
168 | * changed, the other argument will be %NULL. | ||
169 | * Will only be called if one or both of @get_rxfh_indir_size and | ||
170 | * @get_rxfh_key_size are implemented and return non-zero. | ||
171 | * Returns a negative error code or zero. | 183 | * Returns a negative error code or zero. |
184 | * @set_rxfh: Set the contents of the RX flow hash indirection table, hash | ||
185 | * key, and/or hash function. Arguments which are set to %NULL or zero | ||
186 | * will remain unchanged. | ||
187 | * Returns a negative error code or zero. An error code must be returned | ||
188 | * if at least one unsupported change was requested. | ||
172 | * @get_channels: Get number of channels. | 189 | * @get_channels: Get number of channels. |
173 | * @set_channels: Set number of channels. Returns a negative error code or | 190 | * @set_channels: Set number of channels. Returns a negative error code or |
174 | * zero. | 191 | * zero. |
@@ -241,9 +258,10 @@ struct ethtool_ops { | |||
241 | int (*reset)(struct net_device *, u32 *); | 258 | int (*reset)(struct net_device *, u32 *); |
242 | u32 (*get_rxfh_key_size)(struct net_device *); | 259 | u32 (*get_rxfh_key_size)(struct net_device *); |
243 | u32 (*get_rxfh_indir_size)(struct net_device *); | 260 | u32 (*get_rxfh_indir_size)(struct net_device *); |
244 | int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key); | 261 | int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key, |
262 | u8 *hfunc); | ||
245 | int (*set_rxfh)(struct net_device *, const u32 *indir, | 263 | int (*set_rxfh)(struct net_device *, const u32 *indir, |
246 | const u8 *key); | 264 | const u8 *key, const u8 hfunc); |
247 | void (*get_channels)(struct net_device *, struct ethtool_channels *); | 265 | void (*get_channels)(struct net_device *, struct ethtool_channels *); |
248 | int (*set_channels)(struct net_device *, struct ethtool_channels *); | 266 | int (*set_channels)(struct net_device *, struct ethtool_channels *); |
249 | int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); | 267 | int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); |
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 860313a33a43..87f14e90e984 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h | |||
@@ -33,7 +33,8 @@ | |||
33 | #define F2FS_META_INO(sbi) (sbi->meta_ino_num) | 33 | #define F2FS_META_INO(sbi) (sbi->meta_ino_num) |
34 | 34 | ||
35 | /* This flag is used by node and meta inodes, and by recovery */ | 35 | /* This flag is used by node and meta inodes, and by recovery */ |
36 | #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) | 36 | #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) |
37 | #define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM) | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * For further optimization on multi-head logs, on-disk layout supports maximum | 40 | * For further optimization on multi-head logs, on-disk layout supports maximum |
@@ -170,14 +171,12 @@ struct f2fs_extent { | |||
170 | 171 | ||
171 | #define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */ | 172 | #define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */ |
172 | #define F2FS_INLINE_DATA 0x02 /* file inline data flag */ | 173 | #define F2FS_INLINE_DATA 0x02 /* file inline data flag */ |
174 | #define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */ | ||
175 | #define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ | ||
173 | 176 | ||
174 | #define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \ | 177 | #define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \ |
175 | F2FS_INLINE_XATTR_ADDRS - 1)) | 178 | F2FS_INLINE_XATTR_ADDRS - 1)) |
176 | 179 | ||
177 | #define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) -\ | ||
178 | sizeof(__le32) * (DEF_ADDRS_PER_INODE + \ | ||
179 | DEF_NIDS_PER_INODE - 1)) | ||
180 | |||
181 | struct f2fs_inode { | 180 | struct f2fs_inode { |
182 | __le16 i_mode; /* file mode */ | 181 | __le16 i_mode; /* file mode */ |
183 | __u8 i_advise; /* file hints */ | 182 | __u8 i_advise; /* file hints */ |
@@ -435,6 +434,24 @@ struct f2fs_dentry_block { | |||
435 | __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN]; | 434 | __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN]; |
436 | } __packed; | 435 | } __packed; |
437 | 436 | ||
437 | /* for inline dir */ | ||
438 | #define NR_INLINE_DENTRY (MAX_INLINE_DATA * BITS_PER_BYTE / \ | ||
439 | ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ | ||
440 | BITS_PER_BYTE + 1)) | ||
441 | #define INLINE_DENTRY_BITMAP_SIZE ((NR_INLINE_DENTRY + \ | ||
442 | BITS_PER_BYTE - 1) / BITS_PER_BYTE) | ||
443 | #define INLINE_RESERVED_SIZE (MAX_INLINE_DATA - \ | ||
444 | ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ | ||
445 | NR_INLINE_DENTRY + INLINE_DENTRY_BITMAP_SIZE)) | ||
446 | |||
447 | /* inline directory entry structure */ | ||
448 | struct f2fs_inline_dentry { | ||
449 | __u8 dentry_bitmap[INLINE_DENTRY_BITMAP_SIZE]; | ||
450 | __u8 reserved[INLINE_RESERVED_SIZE]; | ||
451 | struct f2fs_dir_entry dentry[NR_INLINE_DENTRY]; | ||
452 | __u8 filename[NR_INLINE_DENTRY][F2FS_SLOT_LEN]; | ||
453 | } __packed; | ||
454 | |||
438 | /* file types used in inode_info->flags */ | 455 | /* file types used in inode_info->flags */ |
439 | enum { | 456 | enum { |
440 | F2FS_FT_UNKNOWN, | 457 | F2FS_FT_UNKNOWN, |
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index c6f996f2abb6..798fad9e420d 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <linux/debugfs.h> | 7 | #include <linux/debugfs.h> |
8 | #include <linux/ratelimit.h> | ||
8 | #include <linux/atomic.h> | 9 | #include <linux/atomic.h> |
9 | 10 | ||
10 | /* | 11 | /* |
@@ -25,14 +26,18 @@ struct fault_attr { | |||
25 | unsigned long reject_end; | 26 | unsigned long reject_end; |
26 | 27 | ||
27 | unsigned long count; | 28 | unsigned long count; |
29 | struct ratelimit_state ratelimit_state; | ||
30 | struct dentry *dname; | ||
28 | }; | 31 | }; |
29 | 32 | ||
30 | #define FAULT_ATTR_INITIALIZER { \ | 33 | #define FAULT_ATTR_INITIALIZER { \ |
31 | .interval = 1, \ | 34 | .interval = 1, \ |
32 | .times = ATOMIC_INIT(1), \ | 35 | .times = ATOMIC_INIT(1), \ |
33 | .require_end = ULONG_MAX, \ | 36 | .require_end = ULONG_MAX, \ |
34 | .stacktrace_depth = 32, \ | 37 | .stacktrace_depth = 32, \ |
35 | .verbose = 2, \ | 38 | .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \ |
39 | .verbose = 2, \ | ||
40 | .dname = NULL, \ | ||
36 | } | 41 | } |
37 | 42 | ||
38 | #define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER | 43 | #define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER |
diff --git a/include/linux/fence.h b/include/linux/fence.h index d174585b874b..39efee130d2b 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h | |||
@@ -128,8 +128,8 @@ struct fence_cb { | |||
128 | * from irq context, so normal spinlocks can be used. | 128 | * from irq context, so normal spinlocks can be used. |
129 | * | 129 | * |
130 | * A return value of false indicates the fence already passed, | 130 | * A return value of false indicates the fence already passed, |
131 | * or some failure occured that made it impossible to enable | 131 | * or some failure occurred that made it impossible to enable |
132 | * signaling. True indicates succesful enabling. | 132 | * signaling. True indicates successful enabling. |
133 | * | 133 | * |
134 | * fence->status may be set in enable_signaling, but only when false is | 134 | * fence->status may be set in enable_signaling, but only when false is |
135 | * returned. | 135 | * returned. |
diff --git a/include/linux/file.h b/include/linux/file.h index 4d69123377a2..f87d30882a24 100644 --- a/include/linux/file.h +++ b/include/linux/file.h | |||
@@ -66,7 +66,6 @@ extern void set_close_on_exec(unsigned int fd, int flag); | |||
66 | extern bool get_close_on_exec(unsigned int fd); | 66 | extern bool get_close_on_exec(unsigned int fd); |
67 | extern void put_filp(struct file *); | 67 | extern void put_filp(struct file *); |
68 | extern int get_unused_fd_flags(unsigned flags); | 68 | extern int get_unused_fd_flags(unsigned flags); |
69 | #define get_unused_fd() get_unused_fd_flags(0) | ||
70 | extern void put_unused_fd(unsigned int fd); | 69 | extern void put_unused_fd(unsigned int fd); |
71 | 70 | ||
72 | extern void fd_install(unsigned int fd, struct file *file); | 71 | extern void fd_install(unsigned int fd, struct file *file); |
diff --git a/include/linux/filter.h b/include/linux/filter.h index ca95abd2bed1..caac2087a4d5 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -381,6 +381,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); | |||
381 | void bpf_prog_destroy(struct bpf_prog *fp); | 381 | void bpf_prog_destroy(struct bpf_prog *fp); |
382 | 382 | ||
383 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); | 383 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
384 | int sk_attach_bpf(u32 ufd, struct sock *sk); | ||
384 | int sk_detach_filter(struct sock *sk); | 385 | int sk_detach_filter(struct sock *sk); |
385 | 386 | ||
386 | int bpf_check_classic(const struct sock_filter *filter, unsigned int flen); | 387 | int bpf_check_classic(const struct sock_filter *filter, unsigned int flen); |
diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 7fd81b8c4897..6b7fd9cf5ea2 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h | |||
@@ -246,15 +246,6 @@ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires, | |||
246 | * defined in <linux/wait.h> | 246 | * defined in <linux/wait.h> |
247 | */ | 247 | */ |
248 | 248 | ||
249 | #define wait_event_freezekillable(wq, condition) \ | ||
250 | ({ \ | ||
251 | int __retval; \ | ||
252 | freezer_do_not_count(); \ | ||
253 | __retval = wait_event_killable(wq, (condition)); \ | ||
254 | freezer_count(); \ | ||
255 | __retval; \ | ||
256 | }) | ||
257 | |||
258 | /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ | 249 | /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ |
259 | #define wait_event_freezekillable_unsafe(wq, condition) \ | 250 | #define wait_event_freezekillable_unsafe(wq, condition) \ |
260 | ({ \ | 251 | ({ \ |
@@ -265,35 +256,6 @@ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires, | |||
265 | __retval; \ | 256 | __retval; \ |
266 | }) | 257 | }) |
267 | 258 | ||
268 | #define wait_event_freezable(wq, condition) \ | ||
269 | ({ \ | ||
270 | int __retval; \ | ||
271 | freezer_do_not_count(); \ | ||
272 | __retval = wait_event_interruptible(wq, (condition)); \ | ||
273 | freezer_count(); \ | ||
274 | __retval; \ | ||
275 | }) | ||
276 | |||
277 | #define wait_event_freezable_timeout(wq, condition, timeout) \ | ||
278 | ({ \ | ||
279 | long __retval = timeout; \ | ||
280 | freezer_do_not_count(); \ | ||
281 | __retval = wait_event_interruptible_timeout(wq, (condition), \ | ||
282 | __retval); \ | ||
283 | freezer_count(); \ | ||
284 | __retval; \ | ||
285 | }) | ||
286 | |||
287 | #define wait_event_freezable_exclusive(wq, condition) \ | ||
288 | ({ \ | ||
289 | int __retval; \ | ||
290 | freezer_do_not_count(); \ | ||
291 | __retval = wait_event_interruptible_exclusive(wq, condition); \ | ||
292 | freezer_count(); \ | ||
293 | __retval; \ | ||
294 | }) | ||
295 | |||
296 | |||
297 | #else /* !CONFIG_FREEZER */ | 259 | #else /* !CONFIG_FREEZER */ |
298 | static inline bool frozen(struct task_struct *p) { return false; } | 260 | static inline bool frozen(struct task_struct *p) { return false; } |
299 | static inline bool freezing(struct task_struct *p) { return false; } | 261 | static inline bool freezing(struct task_struct *p) { return false; } |
@@ -331,18 +293,6 @@ static inline void set_freezable(void) {} | |||
331 | #define freezable_schedule_hrtimeout_range(expires, delta, mode) \ | 293 | #define freezable_schedule_hrtimeout_range(expires, delta, mode) \ |
332 | schedule_hrtimeout_range(expires, delta, mode) | 294 | schedule_hrtimeout_range(expires, delta, mode) |
333 | 295 | ||
334 | #define wait_event_freezable(wq, condition) \ | ||
335 | wait_event_interruptible(wq, condition) | ||
336 | |||
337 | #define wait_event_freezable_timeout(wq, condition, timeout) \ | ||
338 | wait_event_interruptible_timeout(wq, condition, timeout) | ||
339 | |||
340 | #define wait_event_freezable_exclusive(wq, condition) \ | ||
341 | wait_event_interruptible_exclusive(wq, condition) | ||
342 | |||
343 | #define wait_event_freezekillable(wq, condition) \ | ||
344 | wait_event_killable(wq, condition) | ||
345 | |||
346 | #define wait_event_freezekillable_unsafe(wq, condition) \ | 296 | #define wait_event_freezekillable_unsafe(wq, condition) \ |
347 | wait_event_killable(wq, condition) | 297 | wait_event_killable(wq, condition) |
348 | 298 | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index 9ab779e8a63c..88157253b9e6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/pid.h> | 18 | #include <linux/pid.h> |
19 | #include <linux/bug.h> | 19 | #include <linux/bug.h> |
20 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
21 | #include <linux/rwsem.h> | ||
21 | #include <linux/capability.h> | 22 | #include <linux/capability.h> |
22 | #include <linux/semaphore.h> | 23 | #include <linux/semaphore.h> |
23 | #include <linux/fiemap.h> | 24 | #include <linux/fiemap.h> |
@@ -401,7 +402,7 @@ struct address_space { | |||
401 | atomic_t i_mmap_writable;/* count VM_SHARED mappings */ | 402 | atomic_t i_mmap_writable;/* count VM_SHARED mappings */ |
402 | struct rb_root i_mmap; /* tree of private and shared mappings */ | 403 | struct rb_root i_mmap; /* tree of private and shared mappings */ |
403 | struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ | 404 | struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ |
404 | struct mutex i_mmap_mutex; /* protect tree, count, list */ | 405 | struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ |
405 | /* Protected by tree_lock together with the radix tree */ | 406 | /* Protected by tree_lock together with the radix tree */ |
406 | unsigned long nrpages; /* number of total pages */ | 407 | unsigned long nrpages; /* number of total pages */ |
407 | unsigned long nrshadows; /* number of shadow entries */ | 408 | unsigned long nrshadows; /* number of shadow entries */ |
@@ -467,6 +468,26 @@ struct block_device { | |||
467 | 468 | ||
468 | int mapping_tagged(struct address_space *mapping, int tag); | 469 | int mapping_tagged(struct address_space *mapping, int tag); |
469 | 470 | ||
471 | static inline void i_mmap_lock_write(struct address_space *mapping) | ||
472 | { | ||
473 | down_write(&mapping->i_mmap_rwsem); | ||
474 | } | ||
475 | |||
476 | static inline void i_mmap_unlock_write(struct address_space *mapping) | ||
477 | { | ||
478 | up_write(&mapping->i_mmap_rwsem); | ||
479 | } | ||
480 | |||
481 | static inline void i_mmap_lock_read(struct address_space *mapping) | ||
482 | { | ||
483 | down_read(&mapping->i_mmap_rwsem); | ||
484 | } | ||
485 | |||
486 | static inline void i_mmap_unlock_read(struct address_space *mapping) | ||
487 | { | ||
488 | up_read(&mapping->i_mmap_rwsem); | ||
489 | } | ||
490 | |||
470 | /* | 491 | /* |
471 | * Might pages of this file be mapped into userspace? | 492 | * Might pages of this file be mapped into userspace? |
472 | */ | 493 | */ |
@@ -606,9 +627,6 @@ struct inode { | |||
606 | const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ | 627 | const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ |
607 | struct file_lock *i_flock; | 628 | struct file_lock *i_flock; |
608 | struct address_space i_data; | 629 | struct address_space i_data; |
609 | #ifdef CONFIG_QUOTA | ||
610 | struct dquot *i_dquot[MAXQUOTAS]; | ||
611 | #endif | ||
612 | struct list_head i_devices; | 630 | struct list_head i_devices; |
613 | union { | 631 | union { |
614 | struct pipe_inode_info *i_pipe; | 632 | struct pipe_inode_info *i_pipe; |
@@ -789,7 +807,6 @@ struct file { | |||
789 | struct rcu_head fu_rcuhead; | 807 | struct rcu_head fu_rcuhead; |
790 | } f_u; | 808 | } f_u; |
791 | struct path f_path; | 809 | struct path f_path; |
792 | #define f_dentry f_path.dentry | ||
793 | struct inode *f_inode; /* cached value */ | 810 | struct inode *f_inode; /* cached value */ |
794 | const struct file_operations *f_op; | 811 | const struct file_operations *f_op; |
795 | 812 | ||
@@ -1224,6 +1241,7 @@ struct super_block { | |||
1224 | struct backing_dev_info *s_bdi; | 1241 | struct backing_dev_info *s_bdi; |
1225 | struct mtd_info *s_mtd; | 1242 | struct mtd_info *s_mtd; |
1226 | struct hlist_node s_instances; | 1243 | struct hlist_node s_instances; |
1244 | unsigned int s_quota_types; /* Bitmask of supported quota types */ | ||
1227 | struct quota_info s_dquot; /* Diskquota specific options */ | 1245 | struct quota_info s_dquot; /* Diskquota specific options */ |
1228 | 1246 | ||
1229 | struct sb_writers s_writers; | 1247 | struct sb_writers s_writers; |
@@ -1467,7 +1485,10 @@ int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); | |||
1467 | * This allows the kernel to read directories into kernel space or | 1485 | * This allows the kernel to read directories into kernel space or |
1468 | * to have different dirent layouts depending on the binary type. | 1486 | * to have different dirent layouts depending on the binary type. |
1469 | */ | 1487 | */ |
1470 | typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); | 1488 | struct dir_context; |
1489 | typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, | ||
1490 | unsigned); | ||
1491 | |||
1471 | struct dir_context { | 1492 | struct dir_context { |
1472 | const filldir_t actor; | 1493 | const filldir_t actor; |
1473 | loff_t pos; | 1494 | loff_t pos; |
@@ -1497,6 +1518,7 @@ struct file_operations { | |||
1497 | long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); | 1518 | long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); |
1498 | long (*compat_ioctl) (struct file *, unsigned int, unsigned long); | 1519 | long (*compat_ioctl) (struct file *, unsigned int, unsigned long); |
1499 | int (*mmap) (struct file *, struct vm_area_struct *); | 1520 | int (*mmap) (struct file *, struct vm_area_struct *); |
1521 | void (*mremap)(struct file *, struct vm_area_struct *); | ||
1500 | int (*open) (struct inode *, struct file *); | 1522 | int (*open) (struct inode *, struct file *); |
1501 | int (*flush) (struct file *, fl_owner_t id); | 1523 | int (*flush) (struct file *, fl_owner_t id); |
1502 | int (*release) (struct inode *, struct file *); | 1524 | int (*release) (struct inode *, struct file *); |
@@ -1513,7 +1535,7 @@ struct file_operations { | |||
1513 | int (*setlease)(struct file *, long, struct file_lock **, void **); | 1535 | int (*setlease)(struct file *, long, struct file_lock **, void **); |
1514 | long (*fallocate)(struct file *file, int mode, loff_t offset, | 1536 | long (*fallocate)(struct file *file, int mode, loff_t offset, |
1515 | loff_t len); | 1537 | loff_t len); |
1516 | int (*show_fdinfo)(struct seq_file *m, struct file *f); | 1538 | void (*show_fdinfo)(struct seq_file *m, struct file *f); |
1517 | }; | 1539 | }; |
1518 | 1540 | ||
1519 | struct inode_operations { | 1541 | struct inode_operations { |
@@ -1560,6 +1582,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, | |||
1560 | struct iovec *fast_pointer, | 1582 | struct iovec *fast_pointer, |
1561 | struct iovec **ret_pointer); | 1583 | struct iovec **ret_pointer); |
1562 | 1584 | ||
1585 | extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *); | ||
1563 | extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); | 1586 | extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); |
1564 | extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); | 1587 | extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); |
1565 | extern ssize_t vfs_readv(struct file *, const struct iovec __user *, | 1588 | extern ssize_t vfs_readv(struct file *, const struct iovec __user *, |
@@ -1577,7 +1600,9 @@ struct super_operations { | |||
1577 | void (*evict_inode) (struct inode *); | 1600 | void (*evict_inode) (struct inode *); |
1578 | void (*put_super) (struct super_block *); | 1601 | void (*put_super) (struct super_block *); |
1579 | int (*sync_fs)(struct super_block *sb, int wait); | 1602 | int (*sync_fs)(struct super_block *sb, int wait); |
1603 | int (*freeze_super) (struct super_block *); | ||
1580 | int (*freeze_fs) (struct super_block *); | 1604 | int (*freeze_fs) (struct super_block *); |
1605 | int (*thaw_super) (struct super_block *); | ||
1581 | int (*unfreeze_fs) (struct super_block *); | 1606 | int (*unfreeze_fs) (struct super_block *); |
1582 | int (*statfs) (struct dentry *, struct kstatfs *); | 1607 | int (*statfs) (struct dentry *, struct kstatfs *); |
1583 | int (*remount_fs) (struct super_block *, int *, char *); | 1608 | int (*remount_fs) (struct super_block *, int *, char *); |
@@ -1590,6 +1615,7 @@ struct super_operations { | |||
1590 | #ifdef CONFIG_QUOTA | 1615 | #ifdef CONFIG_QUOTA |
1591 | ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); | 1616 | ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); |
1592 | ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); | 1617 | ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); |
1618 | struct dquot **(*get_dquots)(struct inode *); | ||
1593 | #endif | 1619 | #endif |
1594 | int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); | 1620 | int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); |
1595 | long (*nr_cached_objects)(struct super_block *, int); | 1621 | long (*nr_cached_objects)(struct super_block *, int); |
@@ -2072,6 +2098,7 @@ extern int vfs_open(const struct path *, struct file *, const struct cred *); | |||
2072 | extern struct file * dentry_open(const struct path *, int, const struct cred *); | 2098 | extern struct file * dentry_open(const struct path *, int, const struct cred *); |
2073 | extern int filp_close(struct file *, fl_owner_t id); | 2099 | extern int filp_close(struct file *, fl_owner_t id); |
2074 | 2100 | ||
2101 | extern struct filename *getname_flags(const char __user *, int, int *); | ||
2075 | extern struct filename *getname(const char __user *); | 2102 | extern struct filename *getname(const char __user *); |
2076 | extern struct filename *getname_kernel(const char *); | 2103 | extern struct filename *getname_kernel(const char *); |
2077 | 2104 | ||
@@ -2786,6 +2813,11 @@ static inline void inode_has_no_xattr(struct inode *inode) | |||
2786 | inode->i_flags |= S_NOSEC; | 2813 | inode->i_flags |= S_NOSEC; |
2787 | } | 2814 | } |
2788 | 2815 | ||
2816 | static inline bool is_root_inode(struct inode *inode) | ||
2817 | { | ||
2818 | return inode == inode->i_sb->s_root->d_inode; | ||
2819 | } | ||
2820 | |||
2789 | static inline bool dir_emit(struct dir_context *ctx, | 2821 | static inline bool dir_emit(struct dir_context *ctx, |
2790 | const char *name, int namelen, | 2822 | const char *name, int namelen, |
2791 | u64 ino, unsigned type) | 2823 | u64 ino, unsigned type) |
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index ca060d7c4fa6..0f313f93c586 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h | |||
@@ -197,24 +197,6 @@ struct fsnotify_group { | |||
197 | #define FSNOTIFY_EVENT_INODE 2 | 197 | #define FSNOTIFY_EVENT_INODE 2 |
198 | 198 | ||
199 | /* | 199 | /* |
200 | * Inode specific fields in an fsnotify_mark | ||
201 | */ | ||
202 | struct fsnotify_inode_mark { | ||
203 | struct inode *inode; /* inode this mark is associated with */ | ||
204 | struct hlist_node i_list; /* list of marks by inode->i_fsnotify_marks */ | ||
205 | struct list_head free_i_list; /* tmp list used when freeing this mark */ | ||
206 | }; | ||
207 | |||
208 | /* | ||
209 | * Mount point specific fields in an fsnotify_mark | ||
210 | */ | ||
211 | struct fsnotify_vfsmount_mark { | ||
212 | struct vfsmount *mnt; /* vfsmount this mark is associated with */ | ||
213 | struct hlist_node m_list; /* list of marks by inode->i_fsnotify_marks */ | ||
214 | struct list_head free_m_list; /* tmp list used when freeing this mark */ | ||
215 | }; | ||
216 | |||
217 | /* | ||
218 | * a mark is simply an object attached to an in core inode which allows an | 200 | * a mark is simply an object attached to an in core inode which allows an |
219 | * fsnotify listener to indicate they are either no longer interested in events | 201 | * fsnotify listener to indicate they are either no longer interested in events |
220 | * of a type matching mask or only interested in those events. | 202 | * of a type matching mask or only interested in those events. |
@@ -230,11 +212,17 @@ struct fsnotify_mark { | |||
230 | * in kernel that found and may be using this mark. */ | 212 | * in kernel that found and may be using this mark. */ |
231 | atomic_t refcnt; /* active things looking at this mark */ | 213 | atomic_t refcnt; /* active things looking at this mark */ |
232 | struct fsnotify_group *group; /* group this mark is for */ | 214 | struct fsnotify_group *group; /* group this mark is for */ |
233 | struct list_head g_list; /* list of marks by group->i_fsnotify_marks */ | 215 | struct list_head g_list; /* list of marks by group->i_fsnotify_marks |
216 | * Also reused for queueing mark into | ||
217 | * destroy_list when it's waiting for | ||
218 | * the end of SRCU period before it can | ||
219 | * be freed */ | ||
234 | spinlock_t lock; /* protect group and inode */ | 220 | spinlock_t lock; /* protect group and inode */ |
221 | struct hlist_node obj_list; /* list of marks for inode / vfsmount */ | ||
222 | struct list_head free_list; /* tmp list used when freeing this mark */ | ||
235 | union { | 223 | union { |
236 | struct fsnotify_inode_mark i; | 224 | struct inode *inode; /* inode this mark is associated with */ |
237 | struct fsnotify_vfsmount_mark m; | 225 | struct vfsmount *mnt; /* vfsmount this mark is associated with */ |
238 | }; | 226 | }; |
239 | __u32 ignored_mask; /* events types to ignore */ | 227 | __u32 ignored_mask; /* events types to ignore */ |
240 | #define FSNOTIFY_MARK_FLAG_INODE 0x01 | 228 | #define FSNOTIFY_MARK_FLAG_INODE 0x01 |
@@ -243,7 +231,6 @@ struct fsnotify_mark { | |||
243 | #define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08 | 231 | #define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08 |
244 | #define FSNOTIFY_MARK_FLAG_ALIVE 0x10 | 232 | #define FSNOTIFY_MARK_FLAG_ALIVE 0x10 |
245 | unsigned int flags; /* vfsmount or inode mark? */ | 233 | unsigned int flags; /* vfsmount or inode mark? */ |
246 | struct list_head destroy_list; | ||
247 | void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */ | 234 | void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */ |
248 | }; | 235 | }; |
249 | 236 | ||
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 662697babd48..1da602982cf9 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -39,6 +39,12 @@ | |||
39 | # define FTRACE_FORCE_LIST_FUNC 0 | 39 | # define FTRACE_FORCE_LIST_FUNC 0 |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | /* Main tracing buffer and events set up */ | ||
43 | #ifdef CONFIG_TRACING | ||
44 | void trace_init(void); | ||
45 | #else | ||
46 | static inline void trace_init(void) { } | ||
47 | #endif | ||
42 | 48 | ||
43 | struct module; | 49 | struct module; |
44 | struct ftrace_hash; | 50 | struct ftrace_hash; |
@@ -61,6 +67,11 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); | |||
61 | /* | 67 | /* |
62 | * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are | 68 | * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are |
63 | * set in the flags member. | 69 | * set in the flags member. |
70 | * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and | ||
71 | * IPMODIFY are a kind of attribute flags which can be set only before | ||
72 | * registering the ftrace_ops, and can not be modified while registered. | ||
73 | * Changing those attribute flags after regsitering ftrace_ops will | ||
74 | * cause unexpected results. | ||
64 | * | 75 | * |
65 | * ENABLED - set/unset when ftrace_ops is registered/unregistered | 76 | * ENABLED - set/unset when ftrace_ops is registered/unregistered |
66 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically | 77 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically |
@@ -94,6 +105,17 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); | |||
94 | * ADDING - The ops is in the process of being added. | 105 | * ADDING - The ops is in the process of being added. |
95 | * REMOVING - The ops is in the process of being removed. | 106 | * REMOVING - The ops is in the process of being removed. |
96 | * MODIFYING - The ops is in the process of changing its filter functions. | 107 | * MODIFYING - The ops is in the process of changing its filter functions. |
108 | * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. | ||
109 | * The arch specific code sets this flag when it allocated a | ||
110 | * trampoline. This lets the arch know that it can update the | ||
111 | * trampoline in case the callback function changes. | ||
112 | * The ftrace_ops trampoline can be set by the ftrace users, and | ||
113 | * in such cases the arch must not modify it. Only the arch ftrace | ||
114 | * core code should set this flag. | ||
115 | * IPMODIFY - The ops can modify the IP register. This can only be set with | ||
116 | * SAVE_REGS. If another ops with this flag set is already registered | ||
117 | * for any of the functions that this ops will be registered for, then | ||
118 | * this ops will fail to register or set_filter_ip. | ||
97 | */ | 119 | */ |
98 | enum { | 120 | enum { |
99 | FTRACE_OPS_FL_ENABLED = 1 << 0, | 121 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
@@ -108,6 +130,8 @@ enum { | |||
108 | FTRACE_OPS_FL_ADDING = 1 << 9, | 130 | FTRACE_OPS_FL_ADDING = 1 << 9, |
109 | FTRACE_OPS_FL_REMOVING = 1 << 10, | 131 | FTRACE_OPS_FL_REMOVING = 1 << 10, |
110 | FTRACE_OPS_FL_MODIFYING = 1 << 11, | 132 | FTRACE_OPS_FL_MODIFYING = 1 << 11, |
133 | FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, | ||
134 | FTRACE_OPS_FL_IPMODIFY = 1 << 13, | ||
111 | }; | 135 | }; |
112 | 136 | ||
113 | #ifdef CONFIG_DYNAMIC_FTRACE | 137 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -142,6 +166,7 @@ struct ftrace_ops { | |||
142 | struct ftrace_ops_hash *func_hash; | 166 | struct ftrace_ops_hash *func_hash; |
143 | struct ftrace_ops_hash old_hash; | 167 | struct ftrace_ops_hash old_hash; |
144 | unsigned long trampoline; | 168 | unsigned long trampoline; |
169 | unsigned long trampoline_size; | ||
145 | #endif | 170 | #endif |
146 | }; | 171 | }; |
147 | 172 | ||
@@ -255,7 +280,9 @@ struct ftrace_func_command { | |||
255 | int ftrace_arch_code_modify_prepare(void); | 280 | int ftrace_arch_code_modify_prepare(void); |
256 | int ftrace_arch_code_modify_post_process(void); | 281 | int ftrace_arch_code_modify_post_process(void); |
257 | 282 | ||
258 | void ftrace_bug(int err, unsigned long ip); | 283 | struct dyn_ftrace; |
284 | |||
285 | void ftrace_bug(int err, struct dyn_ftrace *rec); | ||
259 | 286 | ||
260 | struct seq_file; | 287 | struct seq_file; |
261 | 288 | ||
@@ -287,6 +314,8 @@ extern int ftrace_text_reserved(const void *start, const void *end); | |||
287 | 314 | ||
288 | extern int ftrace_nr_registered_ops(void); | 315 | extern int ftrace_nr_registered_ops(void); |
289 | 316 | ||
317 | bool is_ftrace_trampoline(unsigned long addr); | ||
318 | |||
290 | /* | 319 | /* |
291 | * The dyn_ftrace record's flags field is split into two parts. | 320 | * The dyn_ftrace record's flags field is split into two parts. |
292 | * the first part which is '0-FTRACE_REF_MAX' is a counter of | 321 | * the first part which is '0-FTRACE_REF_MAX' is a counter of |
@@ -297,6 +326,7 @@ extern int ftrace_nr_registered_ops(void); | |||
297 | * ENABLED - the function is being traced | 326 | * ENABLED - the function is being traced |
298 | * REGS - the record wants the function to save regs | 327 | * REGS - the record wants the function to save regs |
299 | * REGS_EN - the function is set up to save regs. | 328 | * REGS_EN - the function is set up to save regs. |
329 | * IPMODIFY - the record allows for the IP address to be changed. | ||
300 | * | 330 | * |
301 | * When a new ftrace_ops is registered and wants a function to save | 331 | * When a new ftrace_ops is registered and wants a function to save |
302 | * pt_regs, the rec->flag REGS is set. When the function has been | 332 | * pt_regs, the rec->flag REGS is set. When the function has been |
@@ -310,10 +340,11 @@ enum { | |||
310 | FTRACE_FL_REGS_EN = (1UL << 29), | 340 | FTRACE_FL_REGS_EN = (1UL << 29), |
311 | FTRACE_FL_TRAMP = (1UL << 28), | 341 | FTRACE_FL_TRAMP = (1UL << 28), |
312 | FTRACE_FL_TRAMP_EN = (1UL << 27), | 342 | FTRACE_FL_TRAMP_EN = (1UL << 27), |
343 | FTRACE_FL_IPMODIFY = (1UL << 26), | ||
313 | }; | 344 | }; |
314 | 345 | ||
315 | #define FTRACE_REF_MAX_SHIFT 27 | 346 | #define FTRACE_REF_MAX_SHIFT 26 |
316 | #define FTRACE_FL_BITS 5 | 347 | #define FTRACE_FL_BITS 6 |
317 | #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) | 348 | #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) |
318 | #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) | 349 | #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) |
319 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) | 350 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) |
@@ -586,6 +617,11 @@ static inline ssize_t ftrace_notrace_write(struct file *file, const char __user | |||
586 | size_t cnt, loff_t *ppos) { return -ENODEV; } | 617 | size_t cnt, loff_t *ppos) { return -ENODEV; } |
587 | static inline int | 618 | static inline int |
588 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } | 619 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } |
620 | |||
621 | static inline bool is_ftrace_trampoline(unsigned long addr) | ||
622 | { | ||
623 | return false; | ||
624 | } | ||
589 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 625 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
590 | 626 | ||
591 | /* totally disable ftrace - can not re-enable after this */ | 627 | /* totally disable ftrace - can not re-enable after this */ |
@@ -843,6 +879,7 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk) | |||
843 | enum ftrace_dump_mode; | 879 | enum ftrace_dump_mode; |
844 | 880 | ||
845 | extern enum ftrace_dump_mode ftrace_dump_on_oops; | 881 | extern enum ftrace_dump_mode ftrace_dump_on_oops; |
882 | extern int tracepoint_printk; | ||
846 | 883 | ||
847 | extern void disable_trace_on_warning(void); | 884 | extern void disable_trace_on_warning(void); |
848 | extern int __disable_trace_on_warning; | 885 | extern int __disable_trace_on_warning; |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 28672e87e910..0bebb5c348b8 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -138,6 +138,17 @@ enum print_line_t { | |||
138 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ | 138 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ |
139 | }; | 139 | }; |
140 | 140 | ||
141 | /* | ||
142 | * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq | ||
143 | * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function | ||
144 | * simplifies those functions and keeps them in sync. | ||
145 | */ | ||
146 | static inline enum print_line_t trace_handle_return(struct trace_seq *s) | ||
147 | { | ||
148 | return trace_seq_has_overflowed(s) ? | ||
149 | TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; | ||
150 | } | ||
151 | |||
141 | void tracing_generic_entry_update(struct trace_entry *entry, | 152 | void tracing_generic_entry_update(struct trace_entry *entry, |
142 | unsigned long flags, | 153 | unsigned long flags, |
143 | int pc); | 154 | int pc); |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 41b30fd4d041..b840e3b2770d 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -110,11 +110,8 @@ struct vm_area_struct; | |||
110 | #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ | 110 | #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ |
111 | __GFP_RECLAIMABLE) | 111 | __GFP_RECLAIMABLE) |
112 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) | 112 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) |
113 | #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ | 113 | #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) |
114 | __GFP_HIGHMEM) | 114 | #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) |
115 | #define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ | ||
116 | __GFP_HARDWALL | __GFP_HIGHMEM | \ | ||
117 | __GFP_MOVABLE) | ||
118 | #define GFP_IOFS (__GFP_IO | __GFP_FS) | 115 | #define GFP_IOFS (__GFP_IO | __GFP_FS) |
119 | #define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ | 116 | #define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ |
120 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ | 117 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ |
@@ -381,8 +378,8 @@ extern void free_kmem_pages(unsigned long addr, unsigned int order); | |||
381 | 378 | ||
382 | void page_alloc_init(void); | 379 | void page_alloc_init(void); |
383 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); | 380 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); |
384 | void drain_all_pages(void); | 381 | void drain_all_pages(struct zone *zone); |
385 | void drain_local_pages(void *dummy); | 382 | void drain_local_pages(struct zone *zone); |
386 | 383 | ||
387 | /* | 384 | /* |
388 | * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what | 385 | * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what |
diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 85aa5d0b9357..ab81339a8590 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h | |||
@@ -216,14 +216,15 @@ static inline int gpio_to_irq(unsigned gpio) | |||
216 | return -EINVAL; | 216 | return -EINVAL; |
217 | } | 217 | } |
218 | 218 | ||
219 | static inline int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset) | 219 | static inline int gpiochip_lock_as_irq(struct gpio_chip *chip, |
220 | unsigned int offset) | ||
220 | { | 221 | { |
221 | WARN_ON(1); | 222 | WARN_ON(1); |
222 | return -EINVAL; | 223 | return -EINVAL; |
223 | } | 224 | } |
224 | 225 | ||
225 | static inline void gpio_unlock_as_irq(struct gpio_chip *chip, | 226 | static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip, |
226 | unsigned int offset) | 227 | unsigned int offset) |
227 | { | 228 | { |
228 | WARN_ON(1); | 229 | WARN_ON(1); |
229 | } | 230 | } |
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 12f146fa6604..fd85cb120ee0 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h | |||
@@ -66,7 +66,7 @@ __devm_gpiod_get_index_optional(struct device *dev, const char *con_id, | |||
66 | unsigned int index, enum gpiod_flags flags); | 66 | unsigned int index, enum gpiod_flags flags); |
67 | void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); | 67 | void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); |
68 | 68 | ||
69 | int gpiod_get_direction(const struct gpio_desc *desc); | 69 | int gpiod_get_direction(struct gpio_desc *desc); |
70 | int gpiod_direction_input(struct gpio_desc *desc); | 70 | int gpiod_direction_input(struct gpio_desc *desc); |
71 | int gpiod_direction_output(struct gpio_desc *desc, int value); | 71 | int gpiod_direction_output(struct gpio_desc *desc, int value); |
72 | int gpiod_direction_output_raw(struct gpio_desc *desc, int value); | 72 | int gpiod_direction_output_raw(struct gpio_desc *desc, int value); |
@@ -74,14 +74,24 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value); | |||
74 | /* Value get/set from non-sleeping context */ | 74 | /* Value get/set from non-sleeping context */ |
75 | int gpiod_get_value(const struct gpio_desc *desc); | 75 | int gpiod_get_value(const struct gpio_desc *desc); |
76 | void gpiod_set_value(struct gpio_desc *desc, int value); | 76 | void gpiod_set_value(struct gpio_desc *desc, int value); |
77 | void gpiod_set_array(unsigned int array_size, | ||
78 | struct gpio_desc **desc_array, int *value_array); | ||
77 | int gpiod_get_raw_value(const struct gpio_desc *desc); | 79 | int gpiod_get_raw_value(const struct gpio_desc *desc); |
78 | void gpiod_set_raw_value(struct gpio_desc *desc, int value); | 80 | void gpiod_set_raw_value(struct gpio_desc *desc, int value); |
81 | void gpiod_set_raw_array(unsigned int array_size, | ||
82 | struct gpio_desc **desc_array, int *value_array); | ||
79 | 83 | ||
80 | /* Value get/set from sleeping context */ | 84 | /* Value get/set from sleeping context */ |
81 | int gpiod_get_value_cansleep(const struct gpio_desc *desc); | 85 | int gpiod_get_value_cansleep(const struct gpio_desc *desc); |
82 | void gpiod_set_value_cansleep(struct gpio_desc *desc, int value); | 86 | void gpiod_set_value_cansleep(struct gpio_desc *desc, int value); |
87 | void gpiod_set_array_cansleep(unsigned int array_size, | ||
88 | struct gpio_desc **desc_array, | ||
89 | int *value_array); | ||
83 | int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc); | 90 | int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc); |
84 | void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value); | 91 | void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value); |
92 | void gpiod_set_raw_array_cansleep(unsigned int array_size, | ||
93 | struct gpio_desc **desc_array, | ||
94 | int *value_array); | ||
85 | 95 | ||
86 | int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); | 96 | int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); |
87 | 97 | ||
@@ -94,6 +104,13 @@ int gpiod_to_irq(const struct gpio_desc *desc); | |||
94 | struct gpio_desc *gpio_to_desc(unsigned gpio); | 104 | struct gpio_desc *gpio_to_desc(unsigned gpio); |
95 | int desc_to_gpio(const struct gpio_desc *desc); | 105 | int desc_to_gpio(const struct gpio_desc *desc); |
96 | 106 | ||
107 | /* Child properties interface */ | ||
108 | struct fwnode_handle; | ||
109 | |||
110 | struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, | ||
111 | const char *propname); | ||
112 | struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, | ||
113 | struct fwnode_handle *child); | ||
97 | #else /* CONFIG_GPIOLIB */ | 114 | #else /* CONFIG_GPIOLIB */ |
98 | 115 | ||
99 | static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev, | 116 | static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev, |
@@ -210,6 +227,13 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value) | |||
210 | /* GPIO can never have been requested */ | 227 | /* GPIO can never have been requested */ |
211 | WARN_ON(1); | 228 | WARN_ON(1); |
212 | } | 229 | } |
230 | static inline void gpiod_set_array(unsigned int array_size, | ||
231 | struct gpio_desc **desc_array, | ||
232 | int *value_array) | ||
233 | { | ||
234 | /* GPIO can never have been requested */ | ||
235 | WARN_ON(1); | ||
236 | } | ||
213 | static inline int gpiod_get_raw_value(const struct gpio_desc *desc) | 237 | static inline int gpiod_get_raw_value(const struct gpio_desc *desc) |
214 | { | 238 | { |
215 | /* GPIO can never have been requested */ | 239 | /* GPIO can never have been requested */ |
@@ -221,6 +245,13 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value) | |||
221 | /* GPIO can never have been requested */ | 245 | /* GPIO can never have been requested */ |
222 | WARN_ON(1); | 246 | WARN_ON(1); |
223 | } | 247 | } |
248 | static inline void gpiod_set_raw_array(unsigned int array_size, | ||
249 | struct gpio_desc **desc_array, | ||
250 | int *value_array) | ||
251 | { | ||
252 | /* GPIO can never have been requested */ | ||
253 | WARN_ON(1); | ||
254 | } | ||
224 | 255 | ||
225 | static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) | 256 | static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) |
226 | { | 257 | { |
@@ -233,6 +264,13 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) | |||
233 | /* GPIO can never have been requested */ | 264 | /* GPIO can never have been requested */ |
234 | WARN_ON(1); | 265 | WARN_ON(1); |
235 | } | 266 | } |
267 | static inline void gpiod_set_array_cansleep(unsigned int array_size, | ||
268 | struct gpio_desc **desc_array, | ||
269 | int *value_array) | ||
270 | { | ||
271 | /* GPIO can never have been requested */ | ||
272 | WARN_ON(1); | ||
273 | } | ||
236 | static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) | 274 | static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) |
237 | { | 275 | { |
238 | /* GPIO can never have been requested */ | 276 | /* GPIO can never have been requested */ |
@@ -245,6 +283,13 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, | |||
245 | /* GPIO can never have been requested */ | 283 | /* GPIO can never have been requested */ |
246 | WARN_ON(1); | 284 | WARN_ON(1); |
247 | } | 285 | } |
286 | static inline void gpiod_set_raw_array_cansleep(unsigned int array_size, | ||
287 | struct gpio_desc **desc_array, | ||
288 | int *value_array) | ||
289 | { | ||
290 | /* GPIO can never have been requested */ | ||
291 | WARN_ON(1); | ||
292 | } | ||
248 | 293 | ||
249 | static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) | 294 | static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) |
250 | { | 295 | { |
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 249db3057e4d..c497c62889d1 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h | |||
@@ -32,6 +32,7 @@ struct seq_file; | |||
32 | * @get: returns value for signal "offset"; for output signals this | 32 | * @get: returns value for signal "offset"; for output signals this |
33 | * returns either the value actually sensed, or zero | 33 | * returns either the value actually sensed, or zero |
34 | * @set: assigns output value for signal "offset" | 34 | * @set: assigns output value for signal "offset" |
35 | * @set_multiple: assigns output values for multiple signals defined by "mask" | ||
35 | * @set_debounce: optional hook for setting debounce time for specified gpio in | 36 | * @set_debounce: optional hook for setting debounce time for specified gpio in |
36 | * interrupt triggered gpio chips | 37 | * interrupt triggered gpio chips |
37 | * @to_irq: optional hook supporting non-static gpio_to_irq() mappings; | 38 | * @to_irq: optional hook supporting non-static gpio_to_irq() mappings; |
@@ -89,6 +90,9 @@ struct gpio_chip { | |||
89 | unsigned offset); | 90 | unsigned offset); |
90 | void (*set)(struct gpio_chip *chip, | 91 | void (*set)(struct gpio_chip *chip, |
91 | unsigned offset, int value); | 92 | unsigned offset, int value); |
93 | void (*set_multiple)(struct gpio_chip *chip, | ||
94 | unsigned long *mask, | ||
95 | unsigned long *bits); | ||
92 | int (*set_debounce)(struct gpio_chip *chip, | 96 | int (*set_debounce)(struct gpio_chip *chip, |
93 | unsigned offset, | 97 | unsigned offset, |
94 | unsigned debounce); | 98 | unsigned debounce); |
@@ -149,8 +153,8 @@ extern struct gpio_chip *gpiochip_find(void *data, | |||
149 | int (*match)(struct gpio_chip *chip, void *data)); | 153 | int (*match)(struct gpio_chip *chip, void *data)); |
150 | 154 | ||
151 | /* lock/unlock as IRQ */ | 155 | /* lock/unlock as IRQ */ |
152 | int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset); | 156 | int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); |
153 | void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); | 157 | void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); |
154 | 158 | ||
155 | struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); | 159 | struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); |
156 | 160 | ||
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index 8b622468952c..ee2d8c6f9130 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _GPIO_KEYS_H | 2 | #define _GPIO_KEYS_H |
3 | 3 | ||
4 | struct device; | 4 | struct device; |
5 | struct gpio_desc; | ||
5 | 6 | ||
6 | /** | 7 | /** |
7 | * struct gpio_keys_button - configuration parameters | 8 | * struct gpio_keys_button - configuration parameters |
@@ -17,6 +18,7 @@ struct device; | |||
17 | * disable button via sysfs | 18 | * disable button via sysfs |
18 | * @value: axis value for %EV_ABS | 19 | * @value: axis value for %EV_ABS |
19 | * @irq: Irq number in case of interrupt keys | 20 | * @irq: Irq number in case of interrupt keys |
21 | * @gpiod: GPIO descriptor | ||
20 | */ | 22 | */ |
21 | struct gpio_keys_button { | 23 | struct gpio_keys_button { |
22 | unsigned int code; | 24 | unsigned int code; |
@@ -29,6 +31,7 @@ struct gpio_keys_button { | |||
29 | bool can_disable; | 31 | bool can_disable; |
30 | int value; | 32 | int value; |
31 | unsigned int irq; | 33 | unsigned int irq; |
34 | struct gpio_desc *gpiod; | ||
32 | }; | 35 | }; |
33 | 36 | ||
34 | /** | 37 | /** |
diff --git a/include/linux/hash.h b/include/linux/hash.h index d0494c399392..1afde47e1528 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h | |||
@@ -15,7 +15,6 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <asm/types.h> | 17 | #include <asm/types.h> |
18 | #include <asm/hash.h> | ||
19 | #include <linux/compiler.h> | 18 | #include <linux/compiler.h> |
20 | 19 | ||
21 | /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ | 20 | /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ |
@@ -84,38 +83,4 @@ static inline u32 hash32_ptr(const void *ptr) | |||
84 | return (u32)val; | 83 | return (u32)val; |
85 | } | 84 | } |
86 | 85 | ||
87 | struct fast_hash_ops { | ||
88 | u32 (*hash)(const void *data, u32 len, u32 seed); | ||
89 | u32 (*hash2)(const u32 *data, u32 len, u32 seed); | ||
90 | }; | ||
91 | |||
92 | /** | ||
93 | * arch_fast_hash - Caclulates a hash over a given buffer that can have | ||
94 | * arbitrary size. This function will eventually use an | ||
95 | * architecture-optimized hashing implementation if | ||
96 | * available, and trades off distribution for speed. | ||
97 | * | ||
98 | * @data: buffer to hash | ||
99 | * @len: length of buffer in bytes | ||
100 | * @seed: start seed | ||
101 | * | ||
102 | * Returns 32bit hash. | ||
103 | */ | ||
104 | extern u32 arch_fast_hash(const void *data, u32 len, u32 seed); | ||
105 | |||
106 | /** | ||
107 | * arch_fast_hash2 - Caclulates a hash over a given buffer that has a | ||
108 | * size that is of a multiple of 32bit words. This | ||
109 | * function will eventually use an architecture- | ||
110 | * optimized hashing implementation if available, | ||
111 | * and trades off distribution for speed. | ||
112 | * | ||
113 | * @data: buffer to hash (must be 32bit padded) | ||
114 | * @len: number of 32bit words | ||
115 | * @seed: start seed | ||
116 | * | ||
117 | * Returns 32bit hash. | ||
118 | */ | ||
119 | extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed); | ||
120 | |||
121 | #endif /* _LINUX_HASH_H */ | 86 | #endif /* _LINUX_HASH_H */ |
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 11c0182a153b..cbb5790a35cd 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h | |||
@@ -1,9 +1,24 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 Avionic Design GmbH | 2 | * Copyright (C) 2012 Avionic Design GmbH |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * copy of this software and associated documentation files (the "Software"), |
6 | * published by the Free Software Foundation. | 6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the | ||
12 | * next paragraph) shall be included in all copies or substantial portions | ||
13 | * of the Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
7 | */ | 22 | */ |
8 | 23 | ||
9 | #ifndef __LINUX_HDMI_H_ | 24 | #ifndef __LINUX_HDMI_H_ |
diff --git a/include/linux/hid.h b/include/linux/hid.h index 78ea9bf941cd..06c4607744f6 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
@@ -234,6 +234,33 @@ struct hid_item { | |||
234 | #define HID_DG_BARRELSWITCH 0x000d0044 | 234 | #define HID_DG_BARRELSWITCH 0x000d0044 |
235 | #define HID_DG_ERASER 0x000d0045 | 235 | #define HID_DG_ERASER 0x000d0045 |
236 | #define HID_DG_TABLETPICK 0x000d0046 | 236 | #define HID_DG_TABLETPICK 0x000d0046 |
237 | |||
238 | #define HID_CP_CONSUMERCONTROL 0x000c0001 | ||
239 | #define HID_CP_NUMERICKEYPAD 0x000c0002 | ||
240 | #define HID_CP_PROGRAMMABLEBUTTONS 0x000c0003 | ||
241 | #define HID_CP_MICROPHONE 0x000c0004 | ||
242 | #define HID_CP_HEADPHONE 0x000c0005 | ||
243 | #define HID_CP_GRAPHICEQUALIZER 0x000c0006 | ||
244 | #define HID_CP_FUNCTIONBUTTONS 0x000c0036 | ||
245 | #define HID_CP_SELECTION 0x000c0080 | ||
246 | #define HID_CP_MEDIASELECTION 0x000c0087 | ||
247 | #define HID_CP_SELECTDISC 0x000c00ba | ||
248 | #define HID_CP_PLAYBACKSPEED 0x000c00f1 | ||
249 | #define HID_CP_PROXIMITY 0x000c0109 | ||
250 | #define HID_CP_SPEAKERSYSTEM 0x000c0160 | ||
251 | #define HID_CP_CHANNELLEFT 0x000c0161 | ||
252 | #define HID_CP_CHANNELRIGHT 0x000c0162 | ||
253 | #define HID_CP_CHANNELCENTER 0x000c0163 | ||
254 | #define HID_CP_CHANNELFRONT 0x000c0164 | ||
255 | #define HID_CP_CHANNELCENTERFRONT 0x000c0165 | ||
256 | #define HID_CP_CHANNELSIDE 0x000c0166 | ||
257 | #define HID_CP_CHANNELSURROUND 0x000c0167 | ||
258 | #define HID_CP_CHANNELLOWFREQUENCYENHANCEMENT 0x000c0168 | ||
259 | #define HID_CP_CHANNELTOP 0x000c0169 | ||
260 | #define HID_CP_CHANNELUNKNOWN 0x000c016a | ||
261 | #define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180 | ||
262 | #define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200 | ||
263 | |||
237 | #define HID_DG_CONFIDENCE 0x000d0047 | 264 | #define HID_DG_CONFIDENCE 0x000d0047 |
238 | #define HID_DG_WIDTH 0x000d0048 | 265 | #define HID_DG_WIDTH 0x000d0048 |
239 | #define HID_DG_HEIGHT 0x000d0049 | 266 | #define HID_DG_HEIGHT 0x000d0049 |
@@ -312,11 +339,8 @@ struct hid_item { | |||
312 | * Vendor specific HID device groups | 339 | * Vendor specific HID device groups |
313 | */ | 340 | */ |
314 | #define HID_GROUP_RMI 0x0100 | 341 | #define HID_GROUP_RMI 0x0100 |
315 | |||
316 | /* | ||
317 | * Vendor specific HID device groups | ||
318 | */ | ||
319 | #define HID_GROUP_WACOM 0x0101 | 342 | #define HID_GROUP_WACOM 0x0101 |
343 | #define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102 | ||
320 | 344 | ||
321 | /* | 345 | /* |
322 | * This is the global environment of the parser. This information is | 346 | * This is the global environment of the parser. This information is |
@@ -1063,6 +1087,17 @@ static inline void hid_hw_wait(struct hid_device *hdev) | |||
1063 | hdev->ll_driver->wait(hdev); | 1087 | hdev->ll_driver->wait(hdev); |
1064 | } | 1088 | } |
1065 | 1089 | ||
1090 | /** | ||
1091 | * hid_report_len - calculate the report length | ||
1092 | * | ||
1093 | * @report: the report we want to know the length | ||
1094 | */ | ||
1095 | static inline int hid_report_len(struct hid_report *report) | ||
1096 | { | ||
1097 | /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ | ||
1098 | return ((report->size - 1) >> 3) + 1 + (report->id > 0); | ||
1099 | } | ||
1100 | |||
1066 | int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, | 1101 | int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, |
1067 | int interrupt); | 1102 | int interrupt); |
1068 | 1103 | ||
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 6e6d338641fe..431b7fc605c9 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -175,6 +175,52 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb, | |||
175 | } | 175 | } |
176 | 176 | ||
177 | #endif /* !CONFIG_HUGETLB_PAGE */ | 177 | #endif /* !CONFIG_HUGETLB_PAGE */ |
178 | /* | ||
179 | * hugepages at page global directory. If arch support | ||
180 | * hugepages at pgd level, they need to define this. | ||
181 | */ | ||
182 | #ifndef pgd_huge | ||
183 | #define pgd_huge(x) 0 | ||
184 | #endif | ||
185 | |||
186 | #ifndef pgd_write | ||
187 | static inline int pgd_write(pgd_t pgd) | ||
188 | { | ||
189 | BUG(); | ||
190 | return 0; | ||
191 | } | ||
192 | #endif | ||
193 | |||
194 | #ifndef pud_write | ||
195 | static inline int pud_write(pud_t pud) | ||
196 | { | ||
197 | BUG(); | ||
198 | return 0; | ||
199 | } | ||
200 | #endif | ||
201 | |||
202 | #ifndef is_hugepd | ||
203 | /* | ||
204 | * Some architectures requires a hugepage directory format that is | ||
205 | * required to support multiple hugepage sizes. For example | ||
206 | * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables" | ||
207 | * introduced the same on powerpc. This allows for a more flexible hugepage | ||
208 | * pagetable layout. | ||
209 | */ | ||
210 | typedef struct { unsigned long pd; } hugepd_t; | ||
211 | #define is_hugepd(hugepd) (0) | ||
212 | #define __hugepd(x) ((hugepd_t) { (x) }) | ||
213 | static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, | ||
214 | unsigned pdshift, unsigned long end, | ||
215 | int write, struct page **pages, int *nr) | ||
216 | { | ||
217 | return 0; | ||
218 | } | ||
219 | #else | ||
220 | extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr, | ||
221 | unsigned pdshift, unsigned long end, | ||
222 | int write, struct page **pages, int *nr); | ||
223 | #endif | ||
178 | 224 | ||
179 | #define HUGETLB_ANON_FILE "anon_hugepage" | 225 | #define HUGETLB_ANON_FILE "anon_hugepage" |
180 | 226 | ||
@@ -311,7 +357,8 @@ static inline struct hstate *hstate_sizelog(int page_size_log) | |||
311 | { | 357 | { |
312 | if (!page_size_log) | 358 | if (!page_size_log) |
313 | return &default_hstate; | 359 | return &default_hstate; |
314 | return size_to_hstate(1 << page_size_log); | 360 | |
361 | return size_to_hstate(1UL << page_size_log); | ||
315 | } | 362 | } |
316 | 363 | ||
317 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) | 364 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) |
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 0129f89cf98d..bcc853eccc85 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h | |||
@@ -16,7 +16,6 @@ | |||
16 | #define _LINUX_HUGETLB_CGROUP_H | 16 | #define _LINUX_HUGETLB_CGROUP_H |
17 | 17 | ||
18 | #include <linux/mmdebug.h> | 18 | #include <linux/mmdebug.h> |
19 | #include <linux/res_counter.h> | ||
20 | 19 | ||
21 | struct hugetlb_cgroup; | 20 | struct hugetlb_cgroup; |
22 | /* | 21 | /* |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 08cfaff8a072..476c685ca6f9 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
@@ -650,6 +650,8 @@ struct vmbus_channel { | |||
650 | u8 monitor_grp; | 650 | u8 monitor_grp; |
651 | u8 monitor_bit; | 651 | u8 monitor_bit; |
652 | 652 | ||
653 | bool rescind; /* got rescind msg */ | ||
654 | |||
653 | u32 ringbuffer_gpadlhandle; | 655 | u32 ringbuffer_gpadlhandle; |
654 | 656 | ||
655 | /* Allocated memory for ring buffer */ | 657 | /* Allocated memory for ring buffer */ |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index b556e0ab946f..e3a1721c8354 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -46,6 +46,8 @@ struct i2c_client; | |||
46 | struct i2c_driver; | 46 | struct i2c_driver; |
47 | union i2c_smbus_data; | 47 | union i2c_smbus_data; |
48 | struct i2c_board_info; | 48 | struct i2c_board_info; |
49 | enum i2c_slave_event; | ||
50 | typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *); | ||
49 | 51 | ||
50 | struct module; | 52 | struct module; |
51 | 53 | ||
@@ -209,6 +211,8 @@ struct i2c_driver { | |||
209 | * @irq: indicates the IRQ generated by this device (if any) | 211 | * @irq: indicates the IRQ generated by this device (if any) |
210 | * @detected: member of an i2c_driver.clients list or i2c-core's | 212 | * @detected: member of an i2c_driver.clients list or i2c-core's |
211 | * userspace_devices list | 213 | * userspace_devices list |
214 | * @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter | ||
215 | * calls it to pass on slave events to the slave driver. | ||
212 | * | 216 | * |
213 | * An i2c_client identifies a single device (i.e. chip) connected to an | 217 | * An i2c_client identifies a single device (i.e. chip) connected to an |
214 | * i2c bus. The behaviour exposed to Linux is defined by the driver | 218 | * i2c bus. The behaviour exposed to Linux is defined by the driver |
@@ -224,6 +228,7 @@ struct i2c_client { | |||
224 | struct device dev; /* the device structure */ | 228 | struct device dev; /* the device structure */ |
225 | int irq; /* irq issued by device */ | 229 | int irq; /* irq issued by device */ |
226 | struct list_head detected; | 230 | struct list_head detected; |
231 | i2c_slave_cb_t slave_cb; /* callback for slave mode */ | ||
227 | }; | 232 | }; |
228 | #define to_i2c_client(d) container_of(d, struct i2c_client, dev) | 233 | #define to_i2c_client(d) container_of(d, struct i2c_client, dev) |
229 | 234 | ||
@@ -246,6 +251,25 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) | |||
246 | dev_set_drvdata(&dev->dev, data); | 251 | dev_set_drvdata(&dev->dev, data); |
247 | } | 252 | } |
248 | 253 | ||
254 | /* I2C slave support */ | ||
255 | |||
256 | enum i2c_slave_event { | ||
257 | I2C_SLAVE_REQ_READ_START, | ||
258 | I2C_SLAVE_REQ_READ_END, | ||
259 | I2C_SLAVE_REQ_WRITE_START, | ||
260 | I2C_SLAVE_REQ_WRITE_END, | ||
261 | I2C_SLAVE_STOP, | ||
262 | }; | ||
263 | |||
264 | extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb); | ||
265 | extern int i2c_slave_unregister(struct i2c_client *client); | ||
266 | |||
267 | static inline int i2c_slave_event(struct i2c_client *client, | ||
268 | enum i2c_slave_event event, u8 *val) | ||
269 | { | ||
270 | return client->slave_cb(client, event, val); | ||
271 | } | ||
272 | |||
249 | /** | 273 | /** |
250 | * struct i2c_board_info - template for device creation | 274 | * struct i2c_board_info - template for device creation |
251 | * @type: chip type, to initialize i2c_client.name | 275 | * @type: chip type, to initialize i2c_client.name |
@@ -352,6 +376,8 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, | |||
352 | * into I2C transfers instead. | 376 | * into I2C transfers instead. |
353 | * @functionality: Return the flags that this algorithm/adapter pair supports | 377 | * @functionality: Return the flags that this algorithm/adapter pair supports |
354 | * from the I2C_FUNC_* flags. | 378 | * from the I2C_FUNC_* flags. |
379 | * @reg_slave: Register given client to I2C slave mode of this adapter | ||
380 | * @unreg_slave: Unregister given client from I2C slave mode of this adapter | ||
355 | * | 381 | * |
356 | * The following structs are for those who like to implement new bus drivers: | 382 | * The following structs are for those who like to implement new bus drivers: |
357 | * i2c_algorithm is the interface to a class of hardware solutions which can | 383 | * i2c_algorithm is the interface to a class of hardware solutions which can |
@@ -359,7 +385,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, | |||
359 | * to name two of the most common. | 385 | * to name two of the most common. |
360 | * | 386 | * |
361 | * The return codes from the @master_xfer field should indicate the type of | 387 | * The return codes from the @master_xfer field should indicate the type of |
362 | * error code that occured during the transfer, as documented in the kernel | 388 | * error code that occurred during the transfer, as documented in the kernel |
363 | * Documentation file Documentation/i2c/fault-codes. | 389 | * Documentation file Documentation/i2c/fault-codes. |
364 | */ | 390 | */ |
365 | struct i2c_algorithm { | 391 | struct i2c_algorithm { |
@@ -377,6 +403,9 @@ struct i2c_algorithm { | |||
377 | 403 | ||
378 | /* To determine what the adapter supports */ | 404 | /* To determine what the adapter supports */ |
379 | u32 (*functionality) (struct i2c_adapter *); | 405 | u32 (*functionality) (struct i2c_adapter *); |
406 | |||
407 | int (*reg_slave)(struct i2c_client *client); | ||
408 | int (*unreg_slave)(struct i2c_client *client); | ||
380 | }; | 409 | }; |
381 | 410 | ||
382 | /** | 411 | /** |
diff --git a/include/linux/i2c/pmbus.h b/include/linux/i2c/pmbus.h index 69280db02c41..ee3c2aba2a8e 100644 --- a/include/linux/i2c/pmbus.h +++ b/include/linux/i2c/pmbus.h | |||
@@ -40,6 +40,10 @@ | |||
40 | 40 | ||
41 | struct pmbus_platform_data { | 41 | struct pmbus_platform_data { |
42 | u32 flags; /* Device specific flags */ | 42 | u32 flags; /* Device specific flags */ |
43 | |||
44 | /* regulator support */ | ||
45 | int num_regulators; | ||
46 | struct regulator_init_data *reg_init_data; | ||
43 | }; | 47 | }; |
44 | 48 | ||
45 | #endif /* _PMBUS_H_ */ | 49 | #endif /* _PMBUS_H_ */ |
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h index 8cfb50f38529..0bc03f100d04 100644 --- a/include/linux/i2c/twl.h +++ b/include/linux/i2c/twl.h | |||
@@ -26,7 +26,6 @@ | |||
26 | #define __TWL_H_ | 26 | #define __TWL_H_ |
27 | 27 | ||
28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | #include <linux/phy/phy.h> | ||
30 | #include <linux/input/matrix_keypad.h> | 29 | #include <linux/input/matrix_keypad.h> |
31 | 30 | ||
32 | /* | 31 | /* |
@@ -634,7 +633,6 @@ enum twl4030_usb_mode { | |||
634 | struct twl4030_usb_data { | 633 | struct twl4030_usb_data { |
635 | enum twl4030_usb_mode usb_mode; | 634 | enum twl4030_usb_mode usb_mode; |
636 | unsigned long features; | 635 | unsigned long features; |
637 | struct phy_init_data *init_data; | ||
638 | 636 | ||
639 | int (*phy_init)(struct device *dev); | 637 | int (*phy_init)(struct device *dev); |
640 | int (*phy_exit)(struct device *dev); | 638 | int (*phy_exit)(struct device *dev); |
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index b1be39c76931..4f4eea8a6288 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | #include <linux/if_ether.h> | 20 | #include <linux/if_ether.h> |
21 | #include <asm/byteorder.h> | 21 | #include <asm/byteorder.h> |
22 | #include <asm/unaligned.h> | ||
22 | 23 | ||
23 | /* | 24 | /* |
24 | * DS bit usage | 25 | * DS bit usage |
@@ -1066,6 +1067,12 @@ struct ieee80211_pspoll { | |||
1066 | 1067 | ||
1067 | /* TDLS */ | 1068 | /* TDLS */ |
1068 | 1069 | ||
1070 | /* Channel switch timing */ | ||
1071 | struct ieee80211_ch_switch_timing { | ||
1072 | __le16 switch_time; | ||
1073 | __le16 switch_timeout; | ||
1074 | } __packed; | ||
1075 | |||
1069 | /* Link-id information element */ | 1076 | /* Link-id information element */ |
1070 | struct ieee80211_tdls_lnkie { | 1077 | struct ieee80211_tdls_lnkie { |
1071 | u8 ie_type; /* Link Identifier IE */ | 1078 | u8 ie_type; /* Link Identifier IE */ |
@@ -1107,6 +1114,15 @@ struct ieee80211_tdls_data { | |||
1107 | u8 dialog_token; | 1114 | u8 dialog_token; |
1108 | u8 variable[0]; | 1115 | u8 variable[0]; |
1109 | } __packed discover_req; | 1116 | } __packed discover_req; |
1117 | struct { | ||
1118 | u8 target_channel; | ||
1119 | u8 oper_class; | ||
1120 | u8 variable[0]; | ||
1121 | } __packed chan_switch_req; | ||
1122 | struct { | ||
1123 | __le16 status_code; | ||
1124 | u8 variable[0]; | ||
1125 | } __packed chan_switch_resp; | ||
1110 | } u; | 1126 | } u; |
1111 | } __packed; | 1127 | } __packed; |
1112 | 1128 | ||
@@ -1274,7 +1290,7 @@ struct ieee80211_ht_cap { | |||
1274 | #define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2 | 1290 | #define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2 |
1275 | 1291 | ||
1276 | /* | 1292 | /* |
1277 | * Maximum length of AMPDU that the STA can receive. | 1293 | * Maximum length of AMPDU that the STA can receive in high-throughput (HT). |
1278 | * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) | 1294 | * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) |
1279 | */ | 1295 | */ |
1280 | enum ieee80211_max_ampdu_length_exp { | 1296 | enum ieee80211_max_ampdu_length_exp { |
@@ -1284,6 +1300,21 @@ enum ieee80211_max_ampdu_length_exp { | |||
1284 | IEEE80211_HT_MAX_AMPDU_64K = 3 | 1300 | IEEE80211_HT_MAX_AMPDU_64K = 3 |
1285 | }; | 1301 | }; |
1286 | 1302 | ||
1303 | /* | ||
1304 | * Maximum length of AMPDU that the STA can receive in VHT. | ||
1305 | * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) | ||
1306 | */ | ||
1307 | enum ieee80211_vht_max_ampdu_length_exp { | ||
1308 | IEEE80211_VHT_MAX_AMPDU_8K = 0, | ||
1309 | IEEE80211_VHT_MAX_AMPDU_16K = 1, | ||
1310 | IEEE80211_VHT_MAX_AMPDU_32K = 2, | ||
1311 | IEEE80211_VHT_MAX_AMPDU_64K = 3, | ||
1312 | IEEE80211_VHT_MAX_AMPDU_128K = 4, | ||
1313 | IEEE80211_VHT_MAX_AMPDU_256K = 5, | ||
1314 | IEEE80211_VHT_MAX_AMPDU_512K = 6, | ||
1315 | IEEE80211_VHT_MAX_AMPDU_1024K = 7 | ||
1316 | }; | ||
1317 | |||
1287 | #define IEEE80211_HT_MAX_AMPDU_FACTOR 13 | 1318 | #define IEEE80211_HT_MAX_AMPDU_FACTOR 13 |
1288 | 1319 | ||
1289 | /* Minimum MPDU start spacing */ | 1320 | /* Minimum MPDU start spacing */ |
@@ -1998,6 +2029,16 @@ enum ieee80211_tdls_actioncode { | |||
1998 | WLAN_TDLS_DISCOVERY_REQUEST = 10, | 2029 | WLAN_TDLS_DISCOVERY_REQUEST = 10, |
1999 | }; | 2030 | }; |
2000 | 2031 | ||
2032 | /* Extended Channel Switching capability to be set in the 1st byte of | ||
2033 | * the @WLAN_EID_EXT_CAPABILITY information element | ||
2034 | */ | ||
2035 | #define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2) | ||
2036 | |||
2037 | /* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */ | ||
2038 | #define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4) | ||
2039 | #define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5) | ||
2040 | #define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6) | ||
2041 | |||
2001 | /* Interworking capabilities are set in 7th bit of 4th byte of the | 2042 | /* Interworking capabilities are set in 7th bit of 4th byte of the |
2002 | * @WLAN_EID_EXT_CAPABILITY information element | 2043 | * @WLAN_EID_EXT_CAPABILITY information element |
2003 | */ | 2044 | */ |
@@ -2009,6 +2050,7 @@ enum ieee80211_tdls_actioncode { | |||
2009 | */ | 2050 | */ |
2010 | #define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5) | 2051 | #define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5) |
2011 | #define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) | 2052 | #define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) |
2053 | #define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7) | ||
2012 | 2054 | ||
2013 | #define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) | 2055 | #define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) |
2014 | #define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7) | 2056 | #define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7) |
@@ -2016,6 +2058,9 @@ enum ieee80211_tdls_actioncode { | |||
2016 | /* TDLS specific payload type in the LLC/SNAP header */ | 2058 | /* TDLS specific payload type in the LLC/SNAP header */ |
2017 | #define WLAN_TDLS_SNAP_RFTYPE 0x2 | 2059 | #define WLAN_TDLS_SNAP_RFTYPE 0x2 |
2018 | 2060 | ||
2061 | /* BSS Coex IE information field bits */ | ||
2062 | #define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0) | ||
2063 | |||
2019 | /** | 2064 | /** |
2020 | * enum - mesh synchronization method identifier | 2065 | * enum - mesh synchronization method identifier |
2021 | * | 2066 | * |
@@ -2398,6 +2443,30 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, | |||
2398 | return !!(tim->virtual_map[index] & mask); | 2443 | return !!(tim->virtual_map[index] & mask); |
2399 | } | 2444 | } |
2400 | 2445 | ||
2446 | /** | ||
2447 | * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet) | ||
2448 | * @skb: the skb containing the frame, length will not be checked | ||
2449 | * @hdr_size: the size of the ieee80211_hdr that starts at skb->data | ||
2450 | * | ||
2451 | * This function assumes the frame is a data frame, and that the network header | ||
2452 | * is in the correct place. | ||
2453 | */ | ||
2454 | static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size) | ||
2455 | { | ||
2456 | if (!skb_is_nonlinear(skb) && | ||
2457 | skb->len > (skb_network_offset(skb) + 2)) { | ||
2458 | /* Point to where the indication of TDLS should start */ | ||
2459 | const u8 *tdls_data = skb_network_header(skb) - 2; | ||
2460 | |||
2461 | if (get_unaligned_be16(tdls_data) == ETH_P_TDLS && | ||
2462 | tdls_data[2] == WLAN_TDLS_SNAP_RFTYPE && | ||
2463 | tdls_data[3] == WLAN_CATEGORY_TDLS) | ||
2464 | return tdls_data[4]; | ||
2465 | } | ||
2466 | |||
2467 | return -1; | ||
2468 | } | ||
2469 | |||
2401 | /* convert time units */ | 2470 | /* convert time units */ |
2402 | #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) | 2471 | #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) |
2403 | #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) | 2472 | #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) |
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h new file mode 100644 index 000000000000..6e82d888287c --- /dev/null +++ b/include/linux/ieee802154.h | |||
@@ -0,0 +1,242 @@ | |||
1 | /* | ||
2 | * IEEE802.15.4-2003 specification | ||
3 | * | ||
4 | * Copyright (C) 2007, 2008 Siemens AG | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * Written by: | ||
16 | * Pavel Smolenskiy <pavel.smolenskiy@gmail.com> | ||
17 | * Maxim Gorbachyov <maxim.gorbachev@siemens.com> | ||
18 | * Maxim Osipov <maxim.osipov@siemens.com> | ||
19 | * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> | ||
20 | * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> | ||
21 | */ | ||
22 | |||
23 | #ifndef LINUX_IEEE802154_H | ||
24 | #define LINUX_IEEE802154_H | ||
25 | |||
26 | #include <linux/types.h> | ||
27 | #include <linux/random.h> | ||
28 | #include <asm/byteorder.h> | ||
29 | |||
30 | #define IEEE802154_MTU 127 | ||
31 | #define IEEE802154_MIN_PSDU_LEN 5 | ||
32 | |||
33 | #define IEEE802154_PAN_ID_BROADCAST 0xffff | ||
34 | #define IEEE802154_ADDR_SHORT_BROADCAST 0xffff | ||
35 | #define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe | ||
36 | |||
37 | #define IEEE802154_EXTENDED_ADDR_LEN 8 | ||
38 | |||
39 | #define IEEE802154_LIFS_PERIOD 40 | ||
40 | #define IEEE802154_SIFS_PERIOD 12 | ||
41 | |||
42 | #define IEEE802154_MAX_CHANNEL 26 | ||
43 | #define IEEE802154_MAX_PAGE 31 | ||
44 | |||
45 | #define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */ | ||
46 | #define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */ | ||
47 | #define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */ | ||
48 | #define IEEE802154_FC_TYPE_MAC_CMD 0x3 /* Frame is MAC command */ | ||
49 | |||
50 | #define IEEE802154_FC_TYPE_SHIFT 0 | ||
51 | #define IEEE802154_FC_TYPE_MASK ((1 << 3) - 1) | ||
52 | #define IEEE802154_FC_TYPE(x) ((x & IEEE802154_FC_TYPE_MASK) >> IEEE802154_FC_TYPE_SHIFT) | ||
53 | #define IEEE802154_FC_SET_TYPE(v, x) do { \ | ||
54 | v = (((v) & ~IEEE802154_FC_TYPE_MASK) | \ | ||
55 | (((x) << IEEE802154_FC_TYPE_SHIFT) & IEEE802154_FC_TYPE_MASK)); \ | ||
56 | } while (0) | ||
57 | |||
58 | #define IEEE802154_FC_SECEN_SHIFT 3 | ||
59 | #define IEEE802154_FC_SECEN (1 << IEEE802154_FC_SECEN_SHIFT) | ||
60 | #define IEEE802154_FC_FRPEND_SHIFT 4 | ||
61 | #define IEEE802154_FC_FRPEND (1 << IEEE802154_FC_FRPEND_SHIFT) | ||
62 | #define IEEE802154_FC_ACK_REQ_SHIFT 5 | ||
63 | #define IEEE802154_FC_ACK_REQ (1 << IEEE802154_FC_ACK_REQ_SHIFT) | ||
64 | #define IEEE802154_FC_INTRA_PAN_SHIFT 6 | ||
65 | #define IEEE802154_FC_INTRA_PAN (1 << IEEE802154_FC_INTRA_PAN_SHIFT) | ||
66 | |||
67 | #define IEEE802154_FC_SAMODE_SHIFT 14 | ||
68 | #define IEEE802154_FC_SAMODE_MASK (3 << IEEE802154_FC_SAMODE_SHIFT) | ||
69 | #define IEEE802154_FC_DAMODE_SHIFT 10 | ||
70 | #define IEEE802154_FC_DAMODE_MASK (3 << IEEE802154_FC_DAMODE_SHIFT) | ||
71 | |||
72 | #define IEEE802154_FC_VERSION_SHIFT 12 | ||
73 | #define IEEE802154_FC_VERSION_MASK (3 << IEEE802154_FC_VERSION_SHIFT) | ||
74 | #define IEEE802154_FC_VERSION(x) ((x & IEEE802154_FC_VERSION_MASK) >> IEEE802154_FC_VERSION_SHIFT) | ||
75 | |||
76 | #define IEEE802154_FC_SAMODE(x) \ | ||
77 | (((x) & IEEE802154_FC_SAMODE_MASK) >> IEEE802154_FC_SAMODE_SHIFT) | ||
78 | |||
79 | #define IEEE802154_FC_DAMODE(x) \ | ||
80 | (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT) | ||
81 | |||
82 | #define IEEE802154_SCF_SECLEVEL_MASK 7 | ||
83 | #define IEEE802154_SCF_SECLEVEL_SHIFT 0 | ||
84 | #define IEEE802154_SCF_SECLEVEL(x) (x & IEEE802154_SCF_SECLEVEL_MASK) | ||
85 | #define IEEE802154_SCF_KEY_ID_MODE_SHIFT 3 | ||
86 | #define IEEE802154_SCF_KEY_ID_MODE_MASK (3 << IEEE802154_SCF_KEY_ID_MODE_SHIFT) | ||
87 | #define IEEE802154_SCF_KEY_ID_MODE(x) \ | ||
88 | ((x & IEEE802154_SCF_KEY_ID_MODE_MASK) >> IEEE802154_SCF_KEY_ID_MODE_SHIFT) | ||
89 | |||
90 | #define IEEE802154_SCF_KEY_IMPLICIT 0 | ||
91 | #define IEEE802154_SCF_KEY_INDEX 1 | ||
92 | #define IEEE802154_SCF_KEY_SHORT_INDEX 2 | ||
93 | #define IEEE802154_SCF_KEY_HW_INDEX 3 | ||
94 | |||
95 | #define IEEE802154_SCF_SECLEVEL_NONE 0 | ||
96 | #define IEEE802154_SCF_SECLEVEL_MIC32 1 | ||
97 | #define IEEE802154_SCF_SECLEVEL_MIC64 2 | ||
98 | #define IEEE802154_SCF_SECLEVEL_MIC128 3 | ||
99 | #define IEEE802154_SCF_SECLEVEL_ENC 4 | ||
100 | #define IEEE802154_SCF_SECLEVEL_ENC_MIC32 5 | ||
101 | #define IEEE802154_SCF_SECLEVEL_ENC_MIC64 6 | ||
102 | #define IEEE802154_SCF_SECLEVEL_ENC_MIC128 7 | ||
103 | |||
104 | /* MAC footer size */ | ||
105 | #define IEEE802154_MFR_SIZE 2 /* 2 octets */ | ||
106 | |||
107 | /* MAC's Command Frames Identifiers */ | ||
108 | #define IEEE802154_CMD_ASSOCIATION_REQ 0x01 | ||
109 | #define IEEE802154_CMD_ASSOCIATION_RESP 0x02 | ||
110 | #define IEEE802154_CMD_DISASSOCIATION_NOTIFY 0x03 | ||
111 | #define IEEE802154_CMD_DATA_REQ 0x04 | ||
112 | #define IEEE802154_CMD_PANID_CONFLICT_NOTIFY 0x05 | ||
113 | #define IEEE802154_CMD_ORPHAN_NOTIFY 0x06 | ||
114 | #define IEEE802154_CMD_BEACON_REQ 0x07 | ||
115 | #define IEEE802154_CMD_COORD_REALIGN_NOTIFY 0x08 | ||
116 | #define IEEE802154_CMD_GTS_REQ 0x09 | ||
117 | |||
118 | /* | ||
119 | * The return values of MAC operations | ||
120 | */ | ||
121 | enum { | ||
122 | /* | ||
123 | * The requested operation was completed successfully. | ||
124 | * For a transmission request, this value indicates | ||
125 | * a successful transmission. | ||
126 | */ | ||
127 | IEEE802154_SUCCESS = 0x0, | ||
128 | |||
129 | /* The beacon was lost following a synchronization request. */ | ||
130 | IEEE802154_BEACON_LOSS = 0xe0, | ||
131 | /* | ||
132 | * A transmission could not take place due to activity on the | ||
133 | * channel, i.e., the CSMA-CA mechanism has failed. | ||
134 | */ | ||
135 | IEEE802154_CHNL_ACCESS_FAIL = 0xe1, | ||
136 | /* The GTS request has been denied by the PAN coordinator. */ | ||
137 | IEEE802154_DENINED = 0xe2, | ||
138 | /* The attempt to disable the transceiver has failed. */ | ||
139 | IEEE802154_DISABLE_TRX_FAIL = 0xe3, | ||
140 | /* | ||
141 | * The received frame induces a failed security check according to | ||
142 | * the security suite. | ||
143 | */ | ||
144 | IEEE802154_FAILED_SECURITY_CHECK = 0xe4, | ||
145 | /* | ||
146 | * The frame resulting from secure processing has a length that is | ||
147 | * greater than aMACMaxFrameSize. | ||
148 | */ | ||
149 | IEEE802154_FRAME_TOO_LONG = 0xe5, | ||
150 | /* | ||
151 | * The requested GTS transmission failed because the specified GTS | ||
152 | * either did not have a transmit GTS direction or was not defined. | ||
153 | */ | ||
154 | IEEE802154_INVALID_GTS = 0xe6, | ||
155 | /* | ||
156 | * A request to purge an MSDU from the transaction queue was made using | ||
157 | * an MSDU handle that was not found in the transaction table. | ||
158 | */ | ||
159 | IEEE802154_INVALID_HANDLE = 0xe7, | ||
160 | /* A parameter in the primitive is out of the valid range.*/ | ||
161 | IEEE802154_INVALID_PARAMETER = 0xe8, | ||
162 | /* No acknowledgment was received after aMaxFrameRetries. */ | ||
163 | IEEE802154_NO_ACK = 0xe9, | ||
164 | /* A scan operation failed to find any network beacons.*/ | ||
165 | IEEE802154_NO_BEACON = 0xea, | ||
166 | /* No response data were available following a request. */ | ||
167 | IEEE802154_NO_DATA = 0xeb, | ||
168 | /* The operation failed because a short address was not allocated. */ | ||
169 | IEEE802154_NO_SHORT_ADDRESS = 0xec, | ||
170 | /* | ||
171 | * A receiver enable request was unsuccessful because it could not be | ||
172 | * completed within the CAP. | ||
173 | */ | ||
174 | IEEE802154_OUT_OF_CAP = 0xed, | ||
175 | /* | ||
176 | * A PAN identifier conflict has been detected and communicated to the | ||
177 | * PAN coordinator. | ||
178 | */ | ||
179 | IEEE802154_PANID_CONFLICT = 0xee, | ||
180 | /* A coordinator realignment command has been received. */ | ||
181 | IEEE802154_REALIGMENT = 0xef, | ||
182 | /* The transaction has expired and its information discarded. */ | ||
183 | IEEE802154_TRANSACTION_EXPIRED = 0xf0, | ||
184 | /* There is no capacity to store the transaction. */ | ||
185 | IEEE802154_TRANSACTION_OVERFLOW = 0xf1, | ||
186 | /* | ||
187 | * The transceiver was in the transmitter enabled state when the | ||
188 | * receiver was requested to be enabled. | ||
189 | */ | ||
190 | IEEE802154_TX_ACTIVE = 0xf2, | ||
191 | /* The appropriate key is not available in the ACL. */ | ||
192 | IEEE802154_UNAVAILABLE_KEY = 0xf3, | ||
193 | /* | ||
194 | * A SET/GET request was issued with the identifier of a PIB attribute | ||
195 | * that is not supported. | ||
196 | */ | ||
197 | IEEE802154_UNSUPPORTED_ATTR = 0xf4, | ||
198 | /* | ||
199 | * A request to perform a scan operation failed because the MLME was | ||
200 | * in the process of performing a previously initiated scan operation. | ||
201 | */ | ||
202 | IEEE802154_SCAN_IN_PROGRESS = 0xfc, | ||
203 | }; | ||
204 | |||
205 | /** | ||
206 | * ieee802154_is_valid_psdu_len - check if psdu len is valid | ||
207 | * @len: psdu len with (MHR + payload + MFR) | ||
208 | */ | ||
209 | static inline bool ieee802154_is_valid_psdu_len(const u8 len) | ||
210 | { | ||
211 | return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU); | ||
212 | } | ||
213 | |||
214 | /** | ||
215 | * ieee802154_is_valid_psdu_len - check if extended addr is valid | ||
216 | * @addr: extended addr to check | ||
217 | */ | ||
218 | static inline bool ieee802154_is_valid_extended_addr(const __le64 addr) | ||
219 | { | ||
220 | /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff | ||
221 | * is used internally as extended to short address broadcast mapping. | ||
222 | * This is currently a workaround because neighbor discovery can't | ||
223 | * deal with short addresses types right now. | ||
224 | */ | ||
225 | return ((addr != cpu_to_le64(0x0000000000000000ULL)) && | ||
226 | (addr != cpu_to_le64(0xffffffffffffffffULL))); | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * ieee802154_random_extended_addr - generates a random extended address | ||
231 | * @addr: extended addr pointer to place the random address | ||
232 | */ | ||
233 | static inline void ieee802154_random_extended_addr(__le64 *addr) | ||
234 | { | ||
235 | get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN); | ||
236 | |||
237 | /* toggle some bit if we hit an invalid extended addr */ | ||
238 | if (!ieee802154_is_valid_extended_addr(*addr)) | ||
239 | ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01; | ||
240 | } | ||
241 | |||
242 | #endif /* LINUX_IEEE802154_H */ | ||
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 808dcb8cc04f..0a8ce762a47f 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
17 | #include <uapi/linux/if_bridge.h> | 17 | #include <uapi/linux/if_bridge.h> |
18 | #include <linux/bitops.h> | ||
18 | 19 | ||
19 | struct br_ip { | 20 | struct br_ip { |
20 | union { | 21 | union { |
@@ -32,11 +33,41 @@ struct br_ip_list { | |||
32 | struct br_ip addr; | 33 | struct br_ip addr; |
33 | }; | 34 | }; |
34 | 35 | ||
36 | #define BR_HAIRPIN_MODE BIT(0) | ||
37 | #define BR_BPDU_GUARD BIT(1) | ||
38 | #define BR_ROOT_BLOCK BIT(2) | ||
39 | #define BR_MULTICAST_FAST_LEAVE BIT(3) | ||
40 | #define BR_ADMIN_COST BIT(4) | ||
41 | #define BR_LEARNING BIT(5) | ||
42 | #define BR_FLOOD BIT(6) | ||
43 | #define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING) | ||
44 | #define BR_PROMISC BIT(7) | ||
45 | #define BR_PROXYARP BIT(8) | ||
46 | #define BR_LEARNING_SYNC BIT(9) | ||
47 | |||
35 | extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); | 48 | extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); |
36 | 49 | ||
37 | typedef int br_should_route_hook_t(struct sk_buff *skb); | 50 | typedef int br_should_route_hook_t(struct sk_buff *skb); |
38 | extern br_should_route_hook_t __rcu *br_should_route_hook; | 51 | extern br_should_route_hook_t __rcu *br_should_route_hook; |
39 | 52 | ||
53 | #if IS_ENABLED(CONFIG_BRIDGE) | ||
54 | int br_fdb_external_learn_add(struct net_device *dev, | ||
55 | const unsigned char *addr, u16 vid); | ||
56 | int br_fdb_external_learn_del(struct net_device *dev, | ||
57 | const unsigned char *addr, u16 vid); | ||
58 | #else | ||
59 | static inline int br_fdb_external_learn_add(struct net_device *dev, | ||
60 | const unsigned char *addr, u16 vid) | ||
61 | { | ||
62 | return 0; | ||
63 | } | ||
64 | static inline int br_fdb_external_learn_del(struct net_device *dev, | ||
65 | const unsigned char *addr, u16 vid) | ||
66 | { | ||
67 | return 0; | ||
68 | } | ||
69 | #endif | ||
70 | |||
40 | #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) | 71 | #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) |
41 | int br_multicast_list_adjacent(struct net_device *dev, | 72 | int br_multicast_list_adjacent(struct net_device *dev, |
42 | struct list_head *br_ip_list); | 73 | struct list_head *br_ip_list); |
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index d69f0577a319..515a35e2a48a 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h | |||
@@ -282,28 +282,24 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features, | |||
282 | } | 282 | } |
283 | 283 | ||
284 | /** | 284 | /** |
285 | * vlan_insert_tag - regular VLAN tag inserting | 285 | * __vlan_insert_tag - regular VLAN tag inserting |
286 | * @skb: skbuff to tag | 286 | * @skb: skbuff to tag |
287 | * @vlan_proto: VLAN encapsulation protocol | 287 | * @vlan_proto: VLAN encapsulation protocol |
288 | * @vlan_tci: VLAN TCI to insert | 288 | * @vlan_tci: VLAN TCI to insert |
289 | * | 289 | * |
290 | * Inserts the VLAN tag into @skb as part of the payload | 290 | * Inserts the VLAN tag into @skb as part of the payload |
291 | * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. | 291 | * Returns error if skb_cow_head failes. |
292 | * | ||
293 | * Following the skb_unshare() example, in case of error, the calling function | ||
294 | * doesn't have to worry about freeing the original skb. | ||
295 | * | 292 | * |
296 | * Does not change skb->protocol so this function can be used during receive. | 293 | * Does not change skb->protocol so this function can be used during receive. |
297 | */ | 294 | */ |
298 | static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, | 295 | static inline int __vlan_insert_tag(struct sk_buff *skb, |
299 | __be16 vlan_proto, u16 vlan_tci) | 296 | __be16 vlan_proto, u16 vlan_tci) |
300 | { | 297 | { |
301 | struct vlan_ethhdr *veth; | 298 | struct vlan_ethhdr *veth; |
302 | 299 | ||
303 | if (skb_cow_head(skb, VLAN_HLEN) < 0) { | 300 | if (skb_cow_head(skb, VLAN_HLEN) < 0) |
304 | dev_kfree_skb_any(skb); | 301 | return -ENOMEM; |
305 | return NULL; | 302 | |
306 | } | ||
307 | veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); | 303 | veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); |
308 | 304 | ||
309 | /* Move the mac addresses to the beginning of the new header. */ | 305 | /* Move the mac addresses to the beginning of the new header. */ |
@@ -316,12 +312,40 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, | |||
316 | /* now, the TCI */ | 312 | /* now, the TCI */ |
317 | veth->h_vlan_TCI = htons(vlan_tci); | 313 | veth->h_vlan_TCI = htons(vlan_tci); |
318 | 314 | ||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | /** | ||
319 | * vlan_insert_tag - regular VLAN tag inserting | ||
320 | * @skb: skbuff to tag | ||
321 | * @vlan_proto: VLAN encapsulation protocol | ||
322 | * @vlan_tci: VLAN TCI to insert | ||
323 | * | ||
324 | * Inserts the VLAN tag into @skb as part of the payload | ||
325 | * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. | ||
326 | * | ||
327 | * Following the skb_unshare() example, in case of error, the calling function | ||
328 | * doesn't have to worry about freeing the original skb. | ||
329 | * | ||
330 | * Does not change skb->protocol so this function can be used during receive. | ||
331 | */ | ||
332 | static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, | ||
333 | __be16 vlan_proto, u16 vlan_tci) | ||
334 | { | ||
335 | int err; | ||
336 | |||
337 | err = __vlan_insert_tag(skb, vlan_proto, vlan_tci); | ||
338 | if (err) { | ||
339 | dev_kfree_skb_any(skb); | ||
340 | return NULL; | ||
341 | } | ||
319 | return skb; | 342 | return skb; |
320 | } | 343 | } |
321 | 344 | ||
322 | /** | 345 | /** |
323 | * __vlan_put_tag - regular VLAN tag inserting | 346 | * vlan_insert_tag_set_proto - regular VLAN tag inserting |
324 | * @skb: skbuff to tag | 347 | * @skb: skbuff to tag |
348 | * @vlan_proto: VLAN encapsulation protocol | ||
325 | * @vlan_tci: VLAN TCI to insert | 349 | * @vlan_tci: VLAN TCI to insert |
326 | * | 350 | * |
327 | * Inserts the VLAN tag into @skb as part of the payload | 351 | * Inserts the VLAN tag into @skb as part of the payload |
@@ -330,8 +354,9 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, | |||
330 | * Following the skb_unshare() example, in case of error, the calling function | 354 | * Following the skb_unshare() example, in case of error, the calling function |
331 | * doesn't have to worry about freeing the original skb. | 355 | * doesn't have to worry about freeing the original skb. |
332 | */ | 356 | */ |
333 | static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, | 357 | static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, |
334 | __be16 vlan_proto, u16 vlan_tci) | 358 | __be16 vlan_proto, |
359 | u16 vlan_tci) | ||
335 | { | 360 | { |
336 | skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); | 361 | skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); |
337 | if (skb) | 362 | if (skb) |
@@ -339,39 +364,53 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, | |||
339 | return skb; | 364 | return skb; |
340 | } | 365 | } |
341 | 366 | ||
342 | /** | 367 | /* |
343 | * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting | 368 | * __vlan_hwaccel_push_inside - pushes vlan tag to the payload |
344 | * @skb: skbuff to tag | 369 | * @skb: skbuff to tag |
345 | * @vlan_proto: VLAN encapsulation protocol | ||
346 | * @vlan_tci: VLAN TCI to insert | ||
347 | * | 370 | * |
348 | * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest | 371 | * Pushes the VLAN tag from @skb->vlan_tci inside to the payload. |
372 | * | ||
373 | * Following the skb_unshare() example, in case of error, the calling function | ||
374 | * doesn't have to worry about freeing the original skb. | ||
349 | */ | 375 | */ |
350 | static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, | 376 | static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) |
351 | __be16 vlan_proto, | ||
352 | u16 vlan_tci) | ||
353 | { | 377 | { |
354 | skb->vlan_proto = vlan_proto; | 378 | skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, |
355 | skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; | 379 | vlan_tx_tag_get(skb)); |
380 | if (likely(skb)) | ||
381 | skb->vlan_tci = 0; | ||
382 | return skb; | ||
383 | } | ||
384 | /* | ||
385 | * vlan_hwaccel_push_inside - pushes vlan tag to the payload | ||
386 | * @skb: skbuff to tag | ||
387 | * | ||
388 | * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the | ||
389 | * VLAN tag from @skb->vlan_tci inside to the payload. | ||
390 | * | ||
391 | * Following the skb_unshare() example, in case of error, the calling function | ||
392 | * doesn't have to worry about freeing the original skb. | ||
393 | */ | ||
394 | static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) | ||
395 | { | ||
396 | if (vlan_tx_tag_present(skb)) | ||
397 | skb = __vlan_hwaccel_push_inside(skb); | ||
356 | return skb; | 398 | return skb; |
357 | } | 399 | } |
358 | 400 | ||
359 | /** | 401 | /** |
360 | * vlan_put_tag - inserts VLAN tag according to device features | 402 | * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting |
361 | * @skb: skbuff to tag | 403 | * @skb: skbuff to tag |
404 | * @vlan_proto: VLAN encapsulation protocol | ||
362 | * @vlan_tci: VLAN TCI to insert | 405 | * @vlan_tci: VLAN TCI to insert |
363 | * | 406 | * |
364 | * Assumes skb->dev is the target that will xmit this frame. | 407 | * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest |
365 | * Returns a VLAN tagged skb. | ||
366 | */ | 408 | */ |
367 | static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, | 409 | static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, |
368 | __be16 vlan_proto, u16 vlan_tci) | 410 | __be16 vlan_proto, u16 vlan_tci) |
369 | { | 411 | { |
370 | if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) { | 412 | skb->vlan_proto = vlan_proto; |
371 | return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); | 413 | skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; |
372 | } else { | ||
373 | return __vlan_put_tag(skb, vlan_proto, vlan_tci); | ||
374 | } | ||
375 | } | 414 | } |
376 | 415 | ||
377 | /** | 416 | /** |
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index d8257ab60bac..2c476acb87d9 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h | |||
@@ -164,7 +164,7 @@ struct st_sensor_transfer_function { | |||
164 | }; | 164 | }; |
165 | 165 | ||
166 | /** | 166 | /** |
167 | * struct st_sensors - ST sensors list | 167 | * struct st_sensor_settings - ST specific sensor settings |
168 | * @wai: Contents of WhoAmI register. | 168 | * @wai: Contents of WhoAmI register. |
169 | * @sensors_supported: List of supported sensors by struct itself. | 169 | * @sensors_supported: List of supported sensors by struct itself. |
170 | * @ch: IIO channels for the sensor. | 170 | * @ch: IIO channels for the sensor. |
@@ -177,7 +177,7 @@ struct st_sensor_transfer_function { | |||
177 | * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. | 177 | * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. |
178 | * @bootime: samples to discard when sensor passing from power-down to power-up. | 178 | * @bootime: samples to discard when sensor passing from power-down to power-up. |
179 | */ | 179 | */ |
180 | struct st_sensors { | 180 | struct st_sensor_settings { |
181 | u8 wai; | 181 | u8 wai; |
182 | char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME]; | 182 | char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME]; |
183 | struct iio_chan_spec *ch; | 183 | struct iio_chan_spec *ch; |
@@ -196,7 +196,7 @@ struct st_sensors { | |||
196 | * struct st_sensor_data - ST sensor device status | 196 | * struct st_sensor_data - ST sensor device status |
197 | * @dev: Pointer to instance of struct device (I2C or SPI). | 197 | * @dev: Pointer to instance of struct device (I2C or SPI). |
198 | * @trig: The trigger in use by the core driver. | 198 | * @trig: The trigger in use by the core driver. |
199 | * @sensor: Pointer to the current sensor struct in use. | 199 | * @sensor_settings: Pointer to the specific sensor settings in use. |
200 | * @current_fullscale: Maximum range of measure by the sensor. | 200 | * @current_fullscale: Maximum range of measure by the sensor. |
201 | * @vdd: Pointer to sensor's Vdd power supply | 201 | * @vdd: Pointer to sensor's Vdd power supply |
202 | * @vdd_io: Pointer to sensor's Vdd-IO power supply | 202 | * @vdd_io: Pointer to sensor's Vdd-IO power supply |
@@ -213,7 +213,7 @@ struct st_sensors { | |||
213 | struct st_sensor_data { | 213 | struct st_sensor_data { |
214 | struct device *dev; | 214 | struct device *dev; |
215 | struct iio_trigger *trig; | 215 | struct iio_trigger *trig; |
216 | struct st_sensors *sensor; | 216 | struct st_sensor_settings *sensor_settings; |
217 | struct st_sensor_fullscale_avl *current_fullscale; | 217 | struct st_sensor_fullscale_avl *current_fullscale; |
218 | struct regulator *vdd; | 218 | struct regulator *vdd; |
219 | struct regulator *vdd_io; | 219 | struct regulator *vdd_io; |
@@ -279,7 +279,7 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev, | |||
279 | struct iio_chan_spec const *ch, int *val); | 279 | struct iio_chan_spec const *ch, int *val); |
280 | 280 | ||
281 | int st_sensors_check_device_support(struct iio_dev *indio_dev, | 281 | int st_sensors_check_device_support(struct iio_dev *indio_dev, |
282 | int num_sensors_list, const struct st_sensors *sensors); | 282 | int num_sensors_list, const struct st_sensor_settings *sensor_settings); |
283 | 283 | ||
284 | ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, | 284 | ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, |
285 | struct device_attribute *attr, char *buf); | 285 | struct device_attribute *attr, char *buf); |
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h index 8bbd7bc1043d..03fa332ad2a8 100644 --- a/include/linux/iio/events.h +++ b/include/linux/iio/events.h | |||
@@ -72,7 +72,7 @@ struct iio_event_data { | |||
72 | 72 | ||
73 | #define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF) | 73 | #define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF) |
74 | 74 | ||
75 | #define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF) | 75 | #define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F) |
76 | 76 | ||
77 | #define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF) | 77 | #define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF) |
78 | 78 | ||
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 15dc6bc2bdd2..3642ce7ef512 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
14 | #include <linux/cdev.h> | 14 | #include <linux/cdev.h> |
15 | #include <linux/iio/types.h> | 15 | #include <linux/iio/types.h> |
16 | #include <linux/of.h> | ||
16 | /* IIO TODO LIST */ | 17 | /* IIO TODO LIST */ |
17 | /* | 18 | /* |
18 | * Provide means of adjusting timer accuracy. | 19 | * Provide means of adjusting timer accuracy. |
@@ -326,6 +327,11 @@ struct iio_dev; | |||
326 | * @update_scan_mode: function to configure device and scan buffer when | 327 | * @update_scan_mode: function to configure device and scan buffer when |
327 | * channels have changed | 328 | * channels have changed |
328 | * @debugfs_reg_access: function to read or write register value of device | 329 | * @debugfs_reg_access: function to read or write register value of device |
330 | * @of_xlate: function pointer to obtain channel specifier index. | ||
331 | * When #iio-cells is greater than '0', the driver could | ||
332 | * provide a custom of_xlate function that reads the | ||
333 | * *args* and returns the appropriate index in registered | ||
334 | * IIO channels array. | ||
329 | **/ | 335 | **/ |
330 | struct iio_info { | 336 | struct iio_info { |
331 | struct module *driver_module; | 337 | struct module *driver_module; |
@@ -385,6 +391,8 @@ struct iio_info { | |||
385 | int (*debugfs_reg_access)(struct iio_dev *indio_dev, | 391 | int (*debugfs_reg_access)(struct iio_dev *indio_dev, |
386 | unsigned reg, unsigned writeval, | 392 | unsigned reg, unsigned writeval, |
387 | unsigned *readval); | 393 | unsigned *readval); |
394 | int (*of_xlate)(struct iio_dev *indio_dev, | ||
395 | const struct of_phandle_args *iiospec); | ||
388 | }; | 396 | }; |
389 | 397 | ||
390 | /** | 398 | /** |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 77fc43f8fb72..3037fc085e8e 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -102,7 +102,7 @@ extern struct group_info init_groups; | |||
102 | #define INIT_IDS | 102 | #define INIT_IDS |
103 | #endif | 103 | #endif |
104 | 104 | ||
105 | #ifdef CONFIG_TREE_PREEMPT_RCU | 105 | #ifdef CONFIG_PREEMPT_RCU |
106 | #define INIT_TASK_RCU_TREE_PREEMPT() \ | 106 | #define INIT_TASK_RCU_TREE_PREEMPT() \ |
107 | .rcu_blocked_node = NULL, | 107 | .rcu_blocked_node = NULL, |
108 | #else | 108 | #else |
@@ -166,6 +166,15 @@ extern struct task_group root_task_group; | |||
166 | # define INIT_RT_MUTEXES(tsk) | 166 | # define INIT_RT_MUTEXES(tsk) |
167 | #endif | 167 | #endif |
168 | 168 | ||
169 | #ifdef CONFIG_NUMA_BALANCING | ||
170 | # define INIT_NUMA_BALANCING(tsk) \ | ||
171 | .numa_preferred_nid = -1, \ | ||
172 | .numa_group = NULL, \ | ||
173 | .numa_faults = NULL, | ||
174 | #else | ||
175 | # define INIT_NUMA_BALANCING(tsk) | ||
176 | #endif | ||
177 | |||
169 | /* | 178 | /* |
170 | * INIT_TASK is used to set up the first task table, touch at | 179 | * INIT_TASK is used to set up the first task table, touch at |
171 | * your own risk!. Base=0, limit=0x1fffff (=2MB) | 180 | * your own risk!. Base=0, limit=0x1fffff (=2MB) |
@@ -237,6 +246,7 @@ extern struct task_group root_task_group; | |||
237 | INIT_CPUSET_SEQ(tsk) \ | 246 | INIT_CPUSET_SEQ(tsk) \ |
238 | INIT_RT_MUTEXES(tsk) \ | 247 | INIT_RT_MUTEXES(tsk) \ |
239 | INIT_VTIME(tsk) \ | 248 | INIT_VTIME(tsk) \ |
249 | INIT_NUMA_BALANCING(tsk) \ | ||
240 | } | 250 | } |
241 | 251 | ||
242 | 252 | ||
diff --git a/include/linux/integrity.h b/include/linux/integrity.h index 83222cebd47b..c2d6082a1a4c 100644 --- a/include/linux/integrity.h +++ b/include/linux/integrity.h | |||
@@ -24,6 +24,7 @@ enum integrity_status { | |||
24 | #ifdef CONFIG_INTEGRITY | 24 | #ifdef CONFIG_INTEGRITY |
25 | extern struct integrity_iint_cache *integrity_inode_get(struct inode *inode); | 25 | extern struct integrity_iint_cache *integrity_inode_get(struct inode *inode); |
26 | extern void integrity_inode_free(struct inode *inode); | 26 | extern void integrity_inode_free(struct inode *inode); |
27 | extern void __init integrity_load_keys(void); | ||
27 | 28 | ||
28 | #else | 29 | #else |
29 | static inline struct integrity_iint_cache * | 30 | static inline struct integrity_iint_cache * |
@@ -36,5 +37,10 @@ static inline void integrity_inode_free(struct inode *inode) | |||
36 | { | 37 | { |
37 | return; | 38 | return; |
38 | } | 39 | } |
40 | |||
41 | static inline void integrity_load_keys(void) | ||
42 | { | ||
43 | } | ||
39 | #endif /* CONFIG_INTEGRITY */ | 44 | #endif /* CONFIG_INTEGRITY */ |
45 | |||
40 | #endif /* _LINUX_INTEGRITY_H */ | 46 | #endif /* _LINUX_INTEGRITY_H */ |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 69517a24bc50..d9b05b5bf8c7 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -556,12 +556,6 @@ static inline void tasklet_enable(struct tasklet_struct *t) | |||
556 | atomic_dec(&t->count); | 556 | atomic_dec(&t->count); |
557 | } | 557 | } |
558 | 558 | ||
559 | static inline void tasklet_hi_enable(struct tasklet_struct *t) | ||
560 | { | ||
561 | smp_mb__before_atomic(); | ||
562 | atomic_dec(&t->count); | ||
563 | } | ||
564 | |||
565 | extern void tasklet_kill(struct tasklet_struct *t); | 559 | extern void tasklet_kill(struct tasklet_struct *t); |
566 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); | 560 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); |
567 | extern void tasklet_init(struct tasklet_struct *t, | 561 | extern void tasklet_init(struct tasklet_struct *t, |
diff --git a/include/linux/io.h b/include/linux/io.h index d5fc9b8d8b03..fa02e55e5a2e 100644 --- a/include/linux/io.h +++ b/include/linux/io.h | |||
@@ -61,9 +61,9 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr) | |||
61 | #define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err) | 61 | #define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err) |
62 | 62 | ||
63 | void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, | 63 | void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, |
64 | unsigned long size); | 64 | resource_size_t size); |
65 | void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, | 65 | void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, |
66 | unsigned long size); | 66 | resource_size_t size); |
67 | void devm_iounmap(struct device *dev, void __iomem *addr); | 67 | void devm_iounmap(struct device *dev, void __iomem *addr); |
68 | int check_signature(const volatile void __iomem *io_addr, | 68 | int check_signature(const volatile void __iomem *io_addr, |
69 | const unsigned char *signature, int length); | 69 | const unsigned char *signature, int length); |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 415c7613d02c..38daa453f2e5 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
@@ -23,12 +23,13 @@ | |||
23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <linux/of.h> | 24 | #include <linux/of.h> |
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/scatterlist.h> | ||
26 | #include <trace/events/iommu.h> | 27 | #include <trace/events/iommu.h> |
27 | 28 | ||
28 | #define IOMMU_READ (1 << 0) | 29 | #define IOMMU_READ (1 << 0) |
29 | #define IOMMU_WRITE (1 << 1) | 30 | #define IOMMU_WRITE (1 << 1) |
30 | #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ | 31 | #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ |
31 | #define IOMMU_EXEC (1 << 3) | 32 | #define IOMMU_NOEXEC (1 << 3) |
32 | 33 | ||
33 | struct iommu_ops; | 34 | struct iommu_ops; |
34 | struct iommu_group; | 35 | struct iommu_group; |
@@ -62,6 +63,7 @@ enum iommu_cap { | |||
62 | IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA | 63 | IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA |
63 | transactions */ | 64 | transactions */ |
64 | IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ | 65 | IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ |
66 | IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ | ||
65 | }; | 67 | }; |
66 | 68 | ||
67 | /* | 69 | /* |
@@ -98,6 +100,8 @@ enum iommu_attr { | |||
98 | * @detach_dev: detach device from an iommu domain | 100 | * @detach_dev: detach device from an iommu domain |
99 | * @map: map a physically contiguous memory region to an iommu domain | 101 | * @map: map a physically contiguous memory region to an iommu domain |
100 | * @unmap: unmap a physically contiguous memory region from an iommu domain | 102 | * @unmap: unmap a physically contiguous memory region from an iommu domain |
103 | * @map_sg: map a scatter-gather list of physically contiguous memory chunks | ||
104 | * to an iommu domain | ||
101 | * @iova_to_phys: translate iova to physical address | 105 | * @iova_to_phys: translate iova to physical address |
102 | * @add_device: add device to iommu grouping | 106 | * @add_device: add device to iommu grouping |
103 | * @remove_device: remove device from iommu grouping | 107 | * @remove_device: remove device from iommu grouping |
@@ -117,6 +121,8 @@ struct iommu_ops { | |||
117 | phys_addr_t paddr, size_t size, int prot); | 121 | phys_addr_t paddr, size_t size, int prot); |
118 | size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, | 122 | size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, |
119 | size_t size); | 123 | size_t size); |
124 | size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova, | ||
125 | struct scatterlist *sg, unsigned int nents, int prot); | ||
120 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); | 126 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); |
121 | int (*add_device)(struct device *dev); | 127 | int (*add_device)(struct device *dev); |
122 | void (*remove_device)(struct device *dev); | 128 | void (*remove_device)(struct device *dev); |
@@ -164,6 +170,9 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
164 | phys_addr_t paddr, size_t size, int prot); | 170 | phys_addr_t paddr, size_t size, int prot); |
165 | extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, | 171 | extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
166 | size_t size); | 172 | size_t size); |
173 | extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, | ||
174 | struct scatterlist *sg,unsigned int nents, | ||
175 | int prot); | ||
167 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); | 176 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); |
168 | extern void iommu_set_fault_handler(struct iommu_domain *domain, | 177 | extern void iommu_set_fault_handler(struct iommu_domain *domain, |
169 | iommu_fault_handler_t handler, void *token); | 178 | iommu_fault_handler_t handler, void *token); |
@@ -249,6 +258,13 @@ static inline int report_iommu_fault(struct iommu_domain *domain, | |||
249 | return ret; | 258 | return ret; |
250 | } | 259 | } |
251 | 260 | ||
261 | static inline size_t iommu_map_sg(struct iommu_domain *domain, | ||
262 | unsigned long iova, struct scatterlist *sg, | ||
263 | unsigned int nents, int prot) | ||
264 | { | ||
265 | return domain->ops->map_sg(domain, iova, sg, nents, prot); | ||
266 | } | ||
267 | |||
252 | #else /* CONFIG_IOMMU_API */ | 268 | #else /* CONFIG_IOMMU_API */ |
253 | 269 | ||
254 | struct iommu_ops {}; | 270 | struct iommu_ops {}; |
@@ -301,6 +317,13 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, | |||
301 | return -ENODEV; | 317 | return -ENODEV; |
302 | } | 318 | } |
303 | 319 | ||
320 | static inline size_t iommu_map_sg(struct iommu_domain *domain, | ||
321 | unsigned long iova, struct scatterlist *sg, | ||
322 | unsigned int nents, int prot) | ||
323 | { | ||
324 | return -ENODEV; | ||
325 | } | ||
326 | |||
304 | static inline int iommu_domain_window_enable(struct iommu_domain *domain, | 327 | static inline int iommu_domain_window_enable(struct iommu_domain *domain, |
305 | u32 wnd_nr, phys_addr_t paddr, | 328 | u32 wnd_nr, phys_addr_t paddr, |
306 | u64 size, int prot) | 329 | u64 size, int prot) |
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index 35e7eca4e33b..e365d5ec69cb 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h | |||
@@ -7,15 +7,6 @@ | |||
7 | #include <linux/notifier.h> | 7 | #include <linux/notifier.h> |
8 | #include <linux/nsproxy.h> | 8 | #include <linux/nsproxy.h> |
9 | 9 | ||
10 | /* | ||
11 | * ipc namespace events | ||
12 | */ | ||
13 | #define IPCNS_MEMCHANGED 0x00000001 /* Notify lowmem size changed */ | ||
14 | #define IPCNS_CREATED 0x00000002 /* Notify new ipc namespace created */ | ||
15 | #define IPCNS_REMOVED 0x00000003 /* Notify ipc namespace removed */ | ||
16 | |||
17 | #define IPCNS_CALLBACK_PRI 0 | ||
18 | |||
19 | struct user_namespace; | 10 | struct user_namespace; |
20 | 11 | ||
21 | struct ipc_ids { | 12 | struct ipc_ids { |
@@ -38,7 +29,6 @@ struct ipc_namespace { | |||
38 | unsigned int msg_ctlmni; | 29 | unsigned int msg_ctlmni; |
39 | atomic_t msg_bytes; | 30 | atomic_t msg_bytes; |
40 | atomic_t msg_hdrs; | 31 | atomic_t msg_hdrs; |
41 | int auto_msgmni; | ||
42 | 32 | ||
43 | size_t shm_ctlmax; | 33 | size_t shm_ctlmax; |
44 | size_t shm_ctlall; | 34 | size_t shm_ctlall; |
@@ -77,18 +67,8 @@ extern atomic_t nr_ipc_ns; | |||
77 | extern spinlock_t mq_lock; | 67 | extern spinlock_t mq_lock; |
78 | 68 | ||
79 | #ifdef CONFIG_SYSVIPC | 69 | #ifdef CONFIG_SYSVIPC |
80 | extern int register_ipcns_notifier(struct ipc_namespace *); | ||
81 | extern int cond_register_ipcns_notifier(struct ipc_namespace *); | ||
82 | extern void unregister_ipcns_notifier(struct ipc_namespace *); | ||
83 | extern int ipcns_notify(unsigned long); | ||
84 | extern void shm_destroy_orphaned(struct ipc_namespace *ns); | 70 | extern void shm_destroy_orphaned(struct ipc_namespace *ns); |
85 | #else /* CONFIG_SYSVIPC */ | 71 | #else /* CONFIG_SYSVIPC */ |
86 | static inline int register_ipcns_notifier(struct ipc_namespace *ns) | ||
87 | { return 0; } | ||
88 | static inline int cond_register_ipcns_notifier(struct ipc_namespace *ns) | ||
89 | { return 0; } | ||
90 | static inline void unregister_ipcns_notifier(struct ipc_namespace *ns) { } | ||
91 | static inline int ipcns_notify(unsigned long l) { return 0; } | ||
92 | static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {} | 72 | static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {} |
93 | #endif /* CONFIG_SYSVIPC */ | 73 | #endif /* CONFIG_SYSVIPC */ |
94 | 74 | ||
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 76d2acbfa7c6..838dbfa3c331 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h | |||
@@ -37,6 +37,7 @@ | |||
37 | 37 | ||
38 | #include <linux/list.h> | 38 | #include <linux/list.h> |
39 | #include <linux/proc_fs.h> | 39 | #include <linux/proc_fs.h> |
40 | #include <linux/acpi.h> /* For acpi_handle */ | ||
40 | 41 | ||
41 | struct module; | 42 | struct module; |
42 | struct device; | 43 | struct device; |
@@ -278,15 +279,18 @@ enum ipmi_addr_src { | |||
278 | SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, | 279 | SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, |
279 | SI_PCI, SI_DEVICETREE, SI_DEFAULT | 280 | SI_PCI, SI_DEVICETREE, SI_DEFAULT |
280 | }; | 281 | }; |
282 | const char *ipmi_addr_src_to_str(enum ipmi_addr_src src); | ||
281 | 283 | ||
282 | union ipmi_smi_info_union { | 284 | union ipmi_smi_info_union { |
285 | #ifdef CONFIG_ACPI | ||
283 | /* | 286 | /* |
284 | * the acpi_info element is defined for the SI_ACPI | 287 | * the acpi_info element is defined for the SI_ACPI |
285 | * address type | 288 | * address type |
286 | */ | 289 | */ |
287 | struct { | 290 | struct { |
288 | void *acpi_handle; | 291 | acpi_handle acpi_handle; |
289 | } acpi_info; | 292 | } acpi_info; |
293 | #endif | ||
290 | }; | 294 | }; |
291 | 295 | ||
292 | struct ipmi_smi_info { | 296 | struct ipmi_smi_info { |
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index bd349240d50e..0b1e569f5ff5 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h | |||
@@ -98,12 +98,11 @@ struct ipmi_smi_handlers { | |||
98 | operation is not allowed to fail. If an error occurs, it | 98 | operation is not allowed to fail. If an error occurs, it |
99 | should report back the error in a received message. It may | 99 | should report back the error in a received message. It may |
100 | do this in the current call context, since no write locks | 100 | do this in the current call context, since no write locks |
101 | are held when this is run. If the priority is > 0, the | 101 | are held when this is run. Message are delivered one at |
102 | message will go into a high-priority queue and be sent | 102 | a time by the message handler, a new message will not be |
103 | first. Otherwise, it goes into a normal-priority queue. */ | 103 | delivered until the previous message is returned. */ |
104 | void (*sender)(void *send_info, | 104 | void (*sender)(void *send_info, |
105 | struct ipmi_smi_msg *msg, | 105 | struct ipmi_smi_msg *msg); |
106 | int priority); | ||
107 | 106 | ||
108 | /* Called by the upper layer to request that we try to get | 107 | /* Called by the upper layer to request that we try to get |
109 | events from the BMC we are attached to. */ | 108 | events from the BMC we are attached to. */ |
@@ -212,7 +211,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
212 | void *send_info, | 211 | void *send_info, |
213 | struct ipmi_device_id *device_id, | 212 | struct ipmi_device_id *device_id, |
214 | struct device *dev, | 213 | struct device *dev, |
215 | const char *sysfs_name, | ||
216 | unsigned char slave_addr); | 214 | unsigned char slave_addr); |
217 | 215 | ||
218 | /* | 216 | /* |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ff560537dd61..c694e7baa621 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -42,6 +42,7 @@ struct ipv6_devconf { | |||
42 | __s32 accept_ra_from_local; | 42 | __s32 accept_ra_from_local; |
43 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 43 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
44 | __s32 optimistic_dad; | 44 | __s32 optimistic_dad; |
45 | __s32 use_optimistic; | ||
45 | #endif | 46 | #endif |
46 | #ifdef CONFIG_IPV6_MROUTE | 47 | #ifdef CONFIG_IPV6_MROUTE |
47 | __s32 mc_forwarding; | 48 | __s32 mc_forwarding; |
@@ -316,14 +317,4 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) | |||
316 | #define tcp_twsk_ipv6only(__sk) 0 | 317 | #define tcp_twsk_ipv6only(__sk) 0 |
317 | #define inet_v6_ipv6only(__sk) 0 | 318 | #define inet_v6_ipv6only(__sk) 0 |
318 | #endif /* IS_ENABLED(CONFIG_IPV6) */ | 319 | #endif /* IS_ENABLED(CONFIG_IPV6) */ |
319 | |||
320 | #define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \ | ||
321 | (((__sk)->sk_portpair == (__ports)) && \ | ||
322 | ((__sk)->sk_family == AF_INET6) && \ | ||
323 | ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \ | ||
324 | ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \ | ||
325 | (!(__sk)->sk_bound_dev_if || \ | ||
326 | ((__sk)->sk_bound_dev_if == (__dif))) && \ | ||
327 | net_eq(sock_net(__sk), (__net))) | ||
328 | |||
329 | #endif /* _IPV6_H */ | 320 | #endif /* _IPV6_H */ |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 03f48d936f66..d09ec7a1243e 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -15,11 +15,13 @@ | |||
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/cpumask.h> | 16 | #include <linux/cpumask.h> |
17 | #include <linux/gfp.h> | 17 | #include <linux/gfp.h> |
18 | #include <linux/irqhandler.h> | ||
18 | #include <linux/irqreturn.h> | 19 | #include <linux/irqreturn.h> |
19 | #include <linux/irqnr.h> | 20 | #include <linux/irqnr.h> |
20 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
21 | #include <linux/topology.h> | 22 | #include <linux/topology.h> |
22 | #include <linux/wait.h> | 23 | #include <linux/wait.h> |
24 | #include <linux/io.h> | ||
23 | 25 | ||
24 | #include <asm/irq.h> | 26 | #include <asm/irq.h> |
25 | #include <asm/ptrace.h> | 27 | #include <asm/ptrace.h> |
@@ -27,11 +29,7 @@ | |||
27 | 29 | ||
28 | struct seq_file; | 30 | struct seq_file; |
29 | struct module; | 31 | struct module; |
30 | struct irq_desc; | 32 | struct msi_msg; |
31 | struct irq_data; | ||
32 | typedef void (*irq_flow_handler_t)(unsigned int irq, | ||
33 | struct irq_desc *desc); | ||
34 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); | ||
35 | 33 | ||
36 | /* | 34 | /* |
37 | * IRQ line status. | 35 | * IRQ line status. |
@@ -113,10 +111,14 @@ enum { | |||
113 | * | 111 | * |
114 | * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity | 112 | * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity |
115 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity | 113 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity |
114 | * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to | ||
115 | * support stacked irqchips, which indicates skipping | ||
116 | * all descendent irqchips. | ||
116 | */ | 117 | */ |
117 | enum { | 118 | enum { |
118 | IRQ_SET_MASK_OK = 0, | 119 | IRQ_SET_MASK_OK = 0, |
119 | IRQ_SET_MASK_OK_NOCOPY, | 120 | IRQ_SET_MASK_OK_NOCOPY, |
121 | IRQ_SET_MASK_OK_DONE, | ||
120 | }; | 122 | }; |
121 | 123 | ||
122 | struct msi_desc; | 124 | struct msi_desc; |
@@ -133,6 +135,8 @@ struct irq_domain; | |||
133 | * @chip: low level interrupt hardware access | 135 | * @chip: low level interrupt hardware access |
134 | * @domain: Interrupt translation domain; responsible for mapping | 136 | * @domain: Interrupt translation domain; responsible for mapping |
135 | * between hwirq number and linux irq number. | 137 | * between hwirq number and linux irq number. |
138 | * @parent_data: pointer to parent struct irq_data to support hierarchy | ||
139 | * irq_domain | ||
136 | * @handler_data: per-IRQ data for the irq_chip methods | 140 | * @handler_data: per-IRQ data for the irq_chip methods |
137 | * @chip_data: platform-specific per-chip private data for the chip | 141 | * @chip_data: platform-specific per-chip private data for the chip |
138 | * methods, to allow shared chip implementations | 142 | * methods, to allow shared chip implementations |
@@ -151,6 +155,9 @@ struct irq_data { | |||
151 | unsigned int state_use_accessors; | 155 | unsigned int state_use_accessors; |
152 | struct irq_chip *chip; | 156 | struct irq_chip *chip; |
153 | struct irq_domain *domain; | 157 | struct irq_domain *domain; |
158 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | ||
159 | struct irq_data *parent_data; | ||
160 | #endif | ||
154 | void *handler_data; | 161 | void *handler_data; |
155 | void *chip_data; | 162 | void *chip_data; |
156 | struct msi_desc *msi_desc; | 163 | struct msi_desc *msi_desc; |
@@ -315,6 +322,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | |||
315 | * any other callback related to this irq | 322 | * any other callback related to this irq |
316 | * @irq_release_resources: optional to release resources acquired with | 323 | * @irq_release_resources: optional to release resources acquired with |
317 | * irq_request_resources | 324 | * irq_request_resources |
325 | * @irq_compose_msi_msg: optional to compose message content for MSI | ||
326 | * @irq_write_msi_msg: optional to write message content for MSI | ||
318 | * @flags: chip specific flags | 327 | * @flags: chip specific flags |
319 | */ | 328 | */ |
320 | struct irq_chip { | 329 | struct irq_chip { |
@@ -351,6 +360,9 @@ struct irq_chip { | |||
351 | int (*irq_request_resources)(struct irq_data *data); | 360 | int (*irq_request_resources)(struct irq_data *data); |
352 | void (*irq_release_resources)(struct irq_data *data); | 361 | void (*irq_release_resources)(struct irq_data *data); |
353 | 362 | ||
363 | void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg); | ||
364 | void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg); | ||
365 | |||
354 | unsigned long flags; | 366 | unsigned long flags; |
355 | }; | 367 | }; |
356 | 368 | ||
@@ -438,6 +450,18 @@ extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); | |||
438 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 450 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
439 | extern void handle_nested_irq(unsigned int irq); | 451 | extern void handle_nested_irq(unsigned int irq); |
440 | 452 | ||
453 | extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); | ||
454 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | ||
455 | extern void irq_chip_ack_parent(struct irq_data *data); | ||
456 | extern int irq_chip_retrigger_hierarchy(struct irq_data *data); | ||
457 | extern void irq_chip_mask_parent(struct irq_data *data); | ||
458 | extern void irq_chip_unmask_parent(struct irq_data *data); | ||
459 | extern void irq_chip_eoi_parent(struct irq_data *data); | ||
460 | extern int irq_chip_set_affinity_parent(struct irq_data *data, | ||
461 | const struct cpumask *dest, | ||
462 | bool force); | ||
463 | #endif | ||
464 | |||
441 | /* Handling of unhandled and spurious interrupts: */ | 465 | /* Handling of unhandled and spurious interrupts: */ |
442 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, | 466 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
443 | irqreturn_t action_ret); | 467 | irqreturn_t action_ret); |
@@ -639,13 +663,6 @@ void arch_teardown_hwirq(unsigned int irq); | |||
639 | void irq_init_desc(unsigned int irq); | 663 | void irq_init_desc(unsigned int irq); |
640 | #endif | 664 | #endif |
641 | 665 | ||
642 | #ifndef irq_reg_writel | ||
643 | # define irq_reg_writel(val, addr) writel(val, addr) | ||
644 | #endif | ||
645 | #ifndef irq_reg_readl | ||
646 | # define irq_reg_readl(addr) readl(addr) | ||
647 | #endif | ||
648 | |||
649 | /** | 666 | /** |
650 | * struct irq_chip_regs - register offsets for struct irq_gci | 667 | * struct irq_chip_regs - register offsets for struct irq_gci |
651 | * @enable: Enable register offset to reg_base | 668 | * @enable: Enable register offset to reg_base |
@@ -692,6 +709,8 @@ struct irq_chip_type { | |||
692 | * struct irq_chip_generic - Generic irq chip data structure | 709 | * struct irq_chip_generic - Generic irq chip data structure |
693 | * @lock: Lock to protect register and cache data access | 710 | * @lock: Lock to protect register and cache data access |
694 | * @reg_base: Register base address (virtual) | 711 | * @reg_base: Register base address (virtual) |
712 | * @reg_readl: Alternate I/O accessor (defaults to readl if NULL) | ||
713 | * @reg_writel: Alternate I/O accessor (defaults to writel if NULL) | ||
695 | * @irq_base: Interrupt base nr for this chip | 714 | * @irq_base: Interrupt base nr for this chip |
696 | * @irq_cnt: Number of interrupts handled by this chip | 715 | * @irq_cnt: Number of interrupts handled by this chip |
697 | * @mask_cache: Cached mask register shared between all chip types | 716 | * @mask_cache: Cached mask register shared between all chip types |
@@ -716,6 +735,8 @@ struct irq_chip_type { | |||
716 | struct irq_chip_generic { | 735 | struct irq_chip_generic { |
717 | raw_spinlock_t lock; | 736 | raw_spinlock_t lock; |
718 | void __iomem *reg_base; | 737 | void __iomem *reg_base; |
738 | u32 (*reg_readl)(void __iomem *addr); | ||
739 | void (*reg_writel)(u32 val, void __iomem *addr); | ||
719 | unsigned int irq_base; | 740 | unsigned int irq_base; |
720 | unsigned int irq_cnt; | 741 | unsigned int irq_cnt; |
721 | u32 mask_cache; | 742 | u32 mask_cache; |
@@ -740,12 +761,14 @@ struct irq_chip_generic { | |||
740 | * the parent irq. Usually GPIO implementations | 761 | * the parent irq. Usually GPIO implementations |
741 | * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private | 762 | * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private |
742 | * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask | 763 | * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask |
764 | * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE) | ||
743 | */ | 765 | */ |
744 | enum irq_gc_flags { | 766 | enum irq_gc_flags { |
745 | IRQ_GC_INIT_MASK_CACHE = 1 << 0, | 767 | IRQ_GC_INIT_MASK_CACHE = 1 << 0, |
746 | IRQ_GC_INIT_NESTED_LOCK = 1 << 1, | 768 | IRQ_GC_INIT_NESTED_LOCK = 1 << 1, |
747 | IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, | 769 | IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, |
748 | IRQ_GC_NO_MASK = 1 << 3, | 770 | IRQ_GC_NO_MASK = 1 << 3, |
771 | IRQ_GC_BE_IO = 1 << 4, | ||
749 | }; | 772 | }; |
750 | 773 | ||
751 | /* | 774 | /* |
@@ -821,4 +844,22 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { } | |||
821 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } | 844 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } |
822 | #endif | 845 | #endif |
823 | 846 | ||
847 | static inline void irq_reg_writel(struct irq_chip_generic *gc, | ||
848 | u32 val, int reg_offset) | ||
849 | { | ||
850 | if (gc->reg_writel) | ||
851 | gc->reg_writel(val, gc->reg_base + reg_offset); | ||
852 | else | ||
853 | writel(val, gc->reg_base + reg_offset); | ||
854 | } | ||
855 | |||
856 | static inline u32 irq_reg_readl(struct irq_chip_generic *gc, | ||
857 | int reg_offset) | ||
858 | { | ||
859 | if (gc->reg_readl) | ||
860 | return gc->reg_readl(gc->reg_base + reg_offset); | ||
861 | else | ||
862 | return readl(gc->reg_base + reg_offset); | ||
863 | } | ||
864 | |||
824 | #endif /* _LINUX_IRQ_H */ | 865 | #endif /* _LINUX_IRQ_H */ |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 03a4ea37ba86..1e8b0cf30792 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
@@ -49,6 +49,10 @@ | |||
49 | #define GICD_CTLR_ENABLE_G1A (1U << 1) | 49 | #define GICD_CTLR_ENABLE_G1A (1U << 1) |
50 | #define GICD_CTLR_ENABLE_G1 (1U << 0) | 50 | #define GICD_CTLR_ENABLE_G1 (1U << 0) |
51 | 51 | ||
52 | #define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) | ||
53 | #define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) | ||
54 | #define GICD_TYPER_LPIS (1U << 17) | ||
55 | |||
52 | #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) | 56 | #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) |
53 | #define GICD_IROUTER_SPI_MODE_ANY (1U << 31) | 57 | #define GICD_IROUTER_SPI_MODE_ANY (1U << 31) |
54 | 58 | ||
@@ -76,9 +80,27 @@ | |||
76 | #define GICR_MOVALLR 0x0110 | 80 | #define GICR_MOVALLR 0x0110 |
77 | #define GICR_PIDR2 GICD_PIDR2 | 81 | #define GICR_PIDR2 GICD_PIDR2 |
78 | 82 | ||
83 | #define GICR_CTLR_ENABLE_LPIS (1UL << 0) | ||
84 | |||
85 | #define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) | ||
86 | |||
79 | #define GICR_WAKER_ProcessorSleep (1U << 1) | 87 | #define GICR_WAKER_ProcessorSleep (1U << 1) |
80 | #define GICR_WAKER_ChildrenAsleep (1U << 2) | 88 | #define GICR_WAKER_ChildrenAsleep (1U << 2) |
81 | 89 | ||
90 | #define GICR_PROPBASER_NonShareable (0U << 10) | ||
91 | #define GICR_PROPBASER_InnerShareable (1U << 10) | ||
92 | #define GICR_PROPBASER_OuterShareable (2U << 10) | ||
93 | #define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10) | ||
94 | #define GICR_PROPBASER_nCnB (0U << 7) | ||
95 | #define GICR_PROPBASER_nC (1U << 7) | ||
96 | #define GICR_PROPBASER_RaWt (2U << 7) | ||
97 | #define GICR_PROPBASER_RaWb (3U << 7) | ||
98 | #define GICR_PROPBASER_WaWt (4U << 7) | ||
99 | #define GICR_PROPBASER_WaWb (5U << 7) | ||
100 | #define GICR_PROPBASER_RaWaWt (6U << 7) | ||
101 | #define GICR_PROPBASER_RaWaWb (7U << 7) | ||
102 | #define GICR_PROPBASER_IDBITS_MASK (0x1f) | ||
103 | |||
82 | /* | 104 | /* |
83 | * Re-Distributor registers, offsets from SGI_base | 105 | * Re-Distributor registers, offsets from SGI_base |
84 | */ | 106 | */ |
@@ -91,9 +113,93 @@ | |||
91 | #define GICR_IPRIORITYR0 GICD_IPRIORITYR | 113 | #define GICR_IPRIORITYR0 GICD_IPRIORITYR |
92 | #define GICR_ICFGR0 GICD_ICFGR | 114 | #define GICR_ICFGR0 GICD_ICFGR |
93 | 115 | ||
116 | #define GICR_TYPER_PLPIS (1U << 0) | ||
94 | #define GICR_TYPER_VLPIS (1U << 1) | 117 | #define GICR_TYPER_VLPIS (1U << 1) |
95 | #define GICR_TYPER_LAST (1U << 4) | 118 | #define GICR_TYPER_LAST (1U << 4) |
96 | 119 | ||
120 | #define LPI_PROP_GROUP1 (1 << 1) | ||
121 | #define LPI_PROP_ENABLED (1 << 0) | ||
122 | |||
123 | /* | ||
124 | * ITS registers, offsets from ITS_base | ||
125 | */ | ||
126 | #define GITS_CTLR 0x0000 | ||
127 | #define GITS_IIDR 0x0004 | ||
128 | #define GITS_TYPER 0x0008 | ||
129 | #define GITS_CBASER 0x0080 | ||
130 | #define GITS_CWRITER 0x0088 | ||
131 | #define GITS_CREADR 0x0090 | ||
132 | #define GITS_BASER 0x0100 | ||
133 | #define GITS_PIDR2 GICR_PIDR2 | ||
134 | |||
135 | #define GITS_TRANSLATER 0x10040 | ||
136 | |||
137 | #define GITS_TYPER_PTA (1UL << 19) | ||
138 | |||
139 | #define GITS_CBASER_VALID (1UL << 63) | ||
140 | #define GITS_CBASER_nCnB (0UL << 59) | ||
141 | #define GITS_CBASER_nC (1UL << 59) | ||
142 | #define GITS_CBASER_RaWt (2UL << 59) | ||
143 | #define GITS_CBASER_RaWb (3UL << 59) | ||
144 | #define GITS_CBASER_WaWt (4UL << 59) | ||
145 | #define GITS_CBASER_WaWb (5UL << 59) | ||
146 | #define GITS_CBASER_RaWaWt (6UL << 59) | ||
147 | #define GITS_CBASER_RaWaWb (7UL << 59) | ||
148 | #define GITS_CBASER_NonShareable (0UL << 10) | ||
149 | #define GITS_CBASER_InnerShareable (1UL << 10) | ||
150 | #define GITS_CBASER_OuterShareable (2UL << 10) | ||
151 | #define GITS_CBASER_SHAREABILITY_MASK (3UL << 10) | ||
152 | |||
153 | #define GITS_BASER_NR_REGS 8 | ||
154 | |||
155 | #define GITS_BASER_VALID (1UL << 63) | ||
156 | #define GITS_BASER_nCnB (0UL << 59) | ||
157 | #define GITS_BASER_nC (1UL << 59) | ||
158 | #define GITS_BASER_RaWt (2UL << 59) | ||
159 | #define GITS_BASER_RaWb (3UL << 59) | ||
160 | #define GITS_BASER_WaWt (4UL << 59) | ||
161 | #define GITS_BASER_WaWb (5UL << 59) | ||
162 | #define GITS_BASER_RaWaWt (6UL << 59) | ||
163 | #define GITS_BASER_RaWaWb (7UL << 59) | ||
164 | #define GITS_BASER_TYPE_SHIFT (56) | ||
165 | #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) | ||
166 | #define GITS_BASER_ENTRY_SIZE_SHIFT (48) | ||
167 | #define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1) | ||
168 | #define GITS_BASER_NonShareable (0UL << 10) | ||
169 | #define GITS_BASER_InnerShareable (1UL << 10) | ||
170 | #define GITS_BASER_OuterShareable (2UL << 10) | ||
171 | #define GITS_BASER_SHAREABILITY_SHIFT (10) | ||
172 | #define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT) | ||
173 | #define GITS_BASER_PAGE_SIZE_SHIFT (8) | ||
174 | #define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT) | ||
175 | #define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) | ||
176 | #define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) | ||
177 | #define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) | ||
178 | |||
179 | #define GITS_BASER_TYPE_NONE 0 | ||
180 | #define GITS_BASER_TYPE_DEVICE 1 | ||
181 | #define GITS_BASER_TYPE_VCPU 2 | ||
182 | #define GITS_BASER_TYPE_CPU 3 | ||
183 | #define GITS_BASER_TYPE_COLLECTION 4 | ||
184 | #define GITS_BASER_TYPE_RESERVED5 5 | ||
185 | #define GITS_BASER_TYPE_RESERVED6 6 | ||
186 | #define GITS_BASER_TYPE_RESERVED7 7 | ||
187 | |||
188 | /* | ||
189 | * ITS commands | ||
190 | */ | ||
191 | #define GITS_CMD_MAPD 0x08 | ||
192 | #define GITS_CMD_MAPC 0x09 | ||
193 | #define GITS_CMD_MAPVI 0x0a | ||
194 | #define GITS_CMD_MOVI 0x01 | ||
195 | #define GITS_CMD_DISCARD 0x0f | ||
196 | #define GITS_CMD_INV 0x0c | ||
197 | #define GITS_CMD_MOVALL 0x0e | ||
198 | #define GITS_CMD_INVALL 0x0d | ||
199 | #define GITS_CMD_INT 0x03 | ||
200 | #define GITS_CMD_CLEAR 0x04 | ||
201 | #define GITS_CMD_SYNC 0x05 | ||
202 | |||
97 | /* | 203 | /* |
98 | * CPU interface registers | 204 | * CPU interface registers |
99 | */ | 205 | */ |
@@ -189,12 +295,34 @@ | |||
189 | 295 | ||
190 | #include <linux/stringify.h> | 296 | #include <linux/stringify.h> |
191 | 297 | ||
298 | /* | ||
299 | * We need a value to serve as a irq-type for LPIs. Choose one that will | ||
300 | * hopefully pique the interest of the reviewer. | ||
301 | */ | ||
302 | #define GIC_IRQ_TYPE_LPI 0xa110c8ed | ||
303 | |||
304 | struct rdists { | ||
305 | struct { | ||
306 | void __iomem *rd_base; | ||
307 | struct page *pend_page; | ||
308 | phys_addr_t phys_base; | ||
309 | } __percpu *rdist; | ||
310 | struct page *prop_page; | ||
311 | int id_bits; | ||
312 | u64 flags; | ||
313 | }; | ||
314 | |||
192 | static inline void gic_write_eoir(u64 irq) | 315 | static inline void gic_write_eoir(u64 irq) |
193 | { | 316 | { |
194 | asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq)); | 317 | asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq)); |
195 | isb(); | 318 | isb(); |
196 | } | 319 | } |
197 | 320 | ||
321 | struct irq_domain; | ||
322 | int its_cpu_init(void); | ||
323 | int its_init(struct device_node *node, struct rdists *rdists, | ||
324 | struct irq_domain *domain); | ||
325 | |||
198 | #endif | 326 | #endif |
199 | 327 | ||
200 | #endif | 328 | #endif |
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 13eed92c7d24..71d706d5f169 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h | |||
@@ -91,6 +91,8 @@ | |||
91 | 91 | ||
92 | #ifndef __ASSEMBLY__ | 92 | #ifndef __ASSEMBLY__ |
93 | 93 | ||
94 | #include <linux/irqdomain.h> | ||
95 | |||
94 | struct device_node; | 96 | struct device_node; |
95 | 97 | ||
96 | extern struct irq_chip gic_arch_extn; | 98 | extern struct irq_chip gic_arch_extn; |
@@ -106,6 +108,8 @@ static inline void gic_init(unsigned int nr, int start, | |||
106 | gic_init_bases(nr, start, dist, cpu, 0, NULL); | 108 | gic_init_bases(nr, start, dist, cpu, 0, NULL); |
107 | } | 109 | } |
108 | 110 | ||
111 | int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); | ||
112 | |||
109 | void gic_send_sgi(unsigned int cpu_id, unsigned int irq); | 113 | void gic_send_sgi(unsigned int cpu_id, unsigned int irq); |
110 | int gic_get_cpu_id(unsigned int cpu); | 114 | int gic_get_cpu_id(unsigned int cpu); |
111 | void gic_migrate_target(unsigned int new_cpu_id); | 115 | void gic_migrate_target(unsigned int new_cpu_id); |
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h new file mode 100644 index 000000000000..420f77b34d02 --- /dev/null +++ b/include/linux/irqchip/mips-gic.h | |||
@@ -0,0 +1,249 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000, 07 MIPS Technologies, Inc. | ||
7 | */ | ||
8 | #ifndef __LINUX_IRQCHIP_MIPS_GIC_H | ||
9 | #define __LINUX_IRQCHIP_MIPS_GIC_H | ||
10 | |||
11 | #include <linux/clocksource.h> | ||
12 | |||
13 | #define GIC_MAX_INTRS 256 | ||
14 | |||
15 | /* Constants */ | ||
16 | #define GIC_POL_POS 1 | ||
17 | #define GIC_POL_NEG 0 | ||
18 | #define GIC_TRIG_EDGE 1 | ||
19 | #define GIC_TRIG_LEVEL 0 | ||
20 | #define GIC_TRIG_DUAL_ENABLE 1 | ||
21 | #define GIC_TRIG_DUAL_DISABLE 0 | ||
22 | |||
23 | #define MSK(n) ((1 << (n)) - 1) | ||
24 | |||
25 | /* Accessors */ | ||
26 | #define GIC_REG(segment, offset) (segment##_##SECTION_OFS + offset##_##OFS) | ||
27 | |||
28 | /* GIC Address Space */ | ||
29 | #define SHARED_SECTION_OFS 0x0000 | ||
30 | #define SHARED_SECTION_SIZE 0x8000 | ||
31 | #define VPE_LOCAL_SECTION_OFS 0x8000 | ||
32 | #define VPE_LOCAL_SECTION_SIZE 0x4000 | ||
33 | #define VPE_OTHER_SECTION_OFS 0xc000 | ||
34 | #define VPE_OTHER_SECTION_SIZE 0x4000 | ||
35 | #define USM_VISIBLE_SECTION_OFS 0x10000 | ||
36 | #define USM_VISIBLE_SECTION_SIZE 0x10000 | ||
37 | |||
38 | /* Register Map for Shared Section */ | ||
39 | |||
40 | #define GIC_SH_CONFIG_OFS 0x0000 | ||
41 | |||
42 | /* Shared Global Counter */ | ||
43 | #define GIC_SH_COUNTER_31_00_OFS 0x0010 | ||
44 | #define GIC_SH_COUNTER_63_32_OFS 0x0014 | ||
45 | #define GIC_SH_REVISIONID_OFS 0x0020 | ||
46 | |||
47 | /* Convert an interrupt number to a byte offset/bit for multi-word registers */ | ||
48 | #define GIC_INTR_OFS(intr) (((intr) / 32) * 4) | ||
49 | #define GIC_INTR_BIT(intr) ((intr) % 32) | ||
50 | |||
51 | /* Polarity : Reset Value is always 0 */ | ||
52 | #define GIC_SH_SET_POLARITY_OFS 0x0100 | ||
53 | |||
54 | /* Triggering : Reset Value is always 0 */ | ||
55 | #define GIC_SH_SET_TRIGGER_OFS 0x0180 | ||
56 | |||
57 | /* Dual edge triggering : Reset Value is always 0 */ | ||
58 | #define GIC_SH_SET_DUAL_OFS 0x0200 | ||
59 | |||
60 | /* Set/Clear corresponding bit in Edge Detect Register */ | ||
61 | #define GIC_SH_WEDGE_OFS 0x0280 | ||
62 | |||
63 | /* Mask manipulation */ | ||
64 | #define GIC_SH_RMASK_OFS 0x0300 | ||
65 | #define GIC_SH_SMASK_OFS 0x0380 | ||
66 | |||
67 | /* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */ | ||
68 | #define GIC_SH_MASK_OFS 0x0400 | ||
69 | |||
70 | /* Pending Global Interrupts (RO) */ | ||
71 | #define GIC_SH_PEND_OFS 0x0480 | ||
72 | |||
73 | /* Maps Interrupt X to a Pin */ | ||
74 | #define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500 | ||
75 | #define GIC_SH_MAP_TO_PIN(intr) (4 * (intr)) | ||
76 | |||
77 | /* Maps Interrupt X to a VPE */ | ||
78 | #define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000 | ||
79 | #define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \ | ||
80 | ((32 * (intr)) + (((vpe) / 32) * 4)) | ||
81 | #define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32)) | ||
82 | |||
83 | /* Register Map for Local Section */ | ||
84 | #define GIC_VPE_CTL_OFS 0x0000 | ||
85 | #define GIC_VPE_PEND_OFS 0x0004 | ||
86 | #define GIC_VPE_MASK_OFS 0x0008 | ||
87 | #define GIC_VPE_RMASK_OFS 0x000c | ||
88 | #define GIC_VPE_SMASK_OFS 0x0010 | ||
89 | #define GIC_VPE_WD_MAP_OFS 0x0040 | ||
90 | #define GIC_VPE_COMPARE_MAP_OFS 0x0044 | ||
91 | #define GIC_VPE_TIMER_MAP_OFS 0x0048 | ||
92 | #define GIC_VPE_FDC_MAP_OFS 0x004c | ||
93 | #define GIC_VPE_PERFCTR_MAP_OFS 0x0050 | ||
94 | #define GIC_VPE_SWINT0_MAP_OFS 0x0054 | ||
95 | #define GIC_VPE_SWINT1_MAP_OFS 0x0058 | ||
96 | #define GIC_VPE_OTHER_ADDR_OFS 0x0080 | ||
97 | #define GIC_VPE_WD_CONFIG0_OFS 0x0090 | ||
98 | #define GIC_VPE_WD_COUNT0_OFS 0x0094 | ||
99 | #define GIC_VPE_WD_INITIAL0_OFS 0x0098 | ||
100 | #define GIC_VPE_COMPARE_LO_OFS 0x00a0 | ||
101 | #define GIC_VPE_COMPARE_HI_OFS 0x00a4 | ||
102 | |||
103 | #define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100 | ||
104 | #define GIC_VPE_EIC_SS(intr) (4 * (intr)) | ||
105 | |||
106 | #define GIC_VPE_EIC_VEC_BASE_OFS 0x0800 | ||
107 | #define GIC_VPE_EIC_VEC(intr) (4 * (intr)) | ||
108 | |||
109 | #define GIC_VPE_TENABLE_NMI_OFS 0x1000 | ||
110 | #define GIC_VPE_TENABLE_YQ_OFS 0x1004 | ||
111 | #define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080 | ||
112 | #define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084 | ||
113 | |||
114 | /* User Mode Visible Section Register Map */ | ||
115 | #define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000 | ||
116 | #define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004 | ||
117 | |||
118 | /* Masks */ | ||
119 | #define GIC_SH_CONFIG_COUNTSTOP_SHF 28 | ||
120 | #define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF) | ||
121 | |||
122 | #define GIC_SH_CONFIG_COUNTBITS_SHF 24 | ||
123 | #define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF) | ||
124 | |||
125 | #define GIC_SH_CONFIG_NUMINTRS_SHF 16 | ||
126 | #define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF) | ||
127 | |||
128 | #define GIC_SH_CONFIG_NUMVPES_SHF 0 | ||
129 | #define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF) | ||
130 | |||
131 | #define GIC_SH_WEDGE_SET(intr) ((intr) | (0x1 << 31)) | ||
132 | #define GIC_SH_WEDGE_CLR(intr) ((intr) & ~(0x1 << 31)) | ||
133 | |||
134 | #define GIC_MAP_TO_PIN_SHF 31 | ||
135 | #define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF) | ||
136 | #define GIC_MAP_TO_NMI_SHF 30 | ||
137 | #define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF) | ||
138 | #define GIC_MAP_TO_YQ_SHF 29 | ||
139 | #define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF) | ||
140 | #define GIC_MAP_SHF 0 | ||
141 | #define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF) | ||
142 | |||
143 | /* GIC_VPE_CTL Masks */ | ||
144 | #define GIC_VPE_CTL_FDC_RTBL_SHF 4 | ||
145 | #define GIC_VPE_CTL_FDC_RTBL_MSK (MSK(1) << GIC_VPE_CTL_FDC_RTBL_SHF) | ||
146 | #define GIC_VPE_CTL_SWINT_RTBL_SHF 3 | ||
147 | #define GIC_VPE_CTL_SWINT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_SWINT_RTBL_SHF) | ||
148 | #define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2 | ||
149 | #define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF) | ||
150 | #define GIC_VPE_CTL_TIMER_RTBL_SHF 1 | ||
151 | #define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF) | ||
152 | #define GIC_VPE_CTL_EIC_MODE_SHF 0 | ||
153 | #define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF) | ||
154 | |||
155 | /* GIC_VPE_PEND Masks */ | ||
156 | #define GIC_VPE_PEND_WD_SHF 0 | ||
157 | #define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF) | ||
158 | #define GIC_VPE_PEND_CMP_SHF 1 | ||
159 | #define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF) | ||
160 | #define GIC_VPE_PEND_TIMER_SHF 2 | ||
161 | #define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF) | ||
162 | #define GIC_VPE_PEND_PERFCOUNT_SHF 3 | ||
163 | #define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF) | ||
164 | #define GIC_VPE_PEND_SWINT0_SHF 4 | ||
165 | #define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF) | ||
166 | #define GIC_VPE_PEND_SWINT1_SHF 5 | ||
167 | #define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF) | ||
168 | |||
169 | /* GIC_VPE_RMASK Masks */ | ||
170 | #define GIC_VPE_RMASK_WD_SHF 0 | ||
171 | #define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF) | ||
172 | #define GIC_VPE_RMASK_CMP_SHF 1 | ||
173 | #define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF) | ||
174 | #define GIC_VPE_RMASK_TIMER_SHF 2 | ||
175 | #define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF) | ||
176 | #define GIC_VPE_RMASK_PERFCNT_SHF 3 | ||
177 | #define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF) | ||
178 | #define GIC_VPE_RMASK_SWINT0_SHF 4 | ||
179 | #define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF) | ||
180 | #define GIC_VPE_RMASK_SWINT1_SHF 5 | ||
181 | #define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF) | ||
182 | |||
183 | /* GIC_VPE_SMASK Masks */ | ||
184 | #define GIC_VPE_SMASK_WD_SHF 0 | ||
185 | #define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF) | ||
186 | #define GIC_VPE_SMASK_CMP_SHF 1 | ||
187 | #define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF) | ||
188 | #define GIC_VPE_SMASK_TIMER_SHF 2 | ||
189 | #define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF) | ||
190 | #define GIC_VPE_SMASK_PERFCNT_SHF 3 | ||
191 | #define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF) | ||
192 | #define GIC_VPE_SMASK_SWINT0_SHF 4 | ||
193 | #define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF) | ||
194 | #define GIC_VPE_SMASK_SWINT1_SHF 5 | ||
195 | #define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF) | ||
196 | |||
197 | /* GIC nomenclature for Core Interrupt Pins. */ | ||
198 | #define GIC_CPU_INT0 0 /* Core Interrupt 2 */ | ||
199 | #define GIC_CPU_INT1 1 /* . */ | ||
200 | #define GIC_CPU_INT2 2 /* . */ | ||
201 | #define GIC_CPU_INT3 3 /* . */ | ||
202 | #define GIC_CPU_INT4 4 /* . */ | ||
203 | #define GIC_CPU_INT5 5 /* Core Interrupt 7 */ | ||
204 | |||
205 | /* Add 2 to convert GIC CPU pin to core interrupt */ | ||
206 | #define GIC_CPU_PIN_OFFSET 2 | ||
207 | |||
208 | /* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */ | ||
209 | #define GIC_CPU_TO_VEC_OFFSET 2 | ||
210 | |||
211 | /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ | ||
212 | #define GIC_PIN_TO_VEC_OFFSET 1 | ||
213 | |||
214 | /* Local GIC interrupts. */ | ||
215 | #define GIC_LOCAL_INT_WD 0 /* GIC watchdog */ | ||
216 | #define GIC_LOCAL_INT_COMPARE 1 /* GIC count and compare timer */ | ||
217 | #define GIC_LOCAL_INT_TIMER 2 /* CPU timer interrupt */ | ||
218 | #define GIC_LOCAL_INT_PERFCTR 3 /* CPU performance counter */ | ||
219 | #define GIC_LOCAL_INT_SWINT0 4 /* CPU software interrupt 0 */ | ||
220 | #define GIC_LOCAL_INT_SWINT1 5 /* CPU software interrupt 1 */ | ||
221 | #define GIC_LOCAL_INT_FDC 6 /* CPU fast debug channel */ | ||
222 | #define GIC_NUM_LOCAL_INTRS 7 | ||
223 | |||
224 | /* Convert between local/shared IRQ number and GIC HW IRQ number. */ | ||
225 | #define GIC_LOCAL_HWIRQ_BASE 0 | ||
226 | #define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x)) | ||
227 | #define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE) | ||
228 | #define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS | ||
229 | #define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) | ||
230 | #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) | ||
231 | |||
232 | extern unsigned int gic_present; | ||
233 | |||
234 | extern void gic_init(unsigned long gic_base_addr, | ||
235 | unsigned long gic_addrspace_size, unsigned int cpu_vec, | ||
236 | unsigned int irqbase); | ||
237 | extern void gic_clocksource_init(unsigned int); | ||
238 | extern cycle_t gic_read_count(void); | ||
239 | extern unsigned int gic_get_count_width(void); | ||
240 | extern cycle_t gic_read_compare(void); | ||
241 | extern void gic_write_compare(cycle_t cnt); | ||
242 | extern void gic_write_cpu_compare(cycle_t cnt, int cpu); | ||
243 | extern void gic_send_ipi(unsigned int intr); | ||
244 | extern unsigned int plat_ipi_call_int_xlate(unsigned int); | ||
245 | extern unsigned int plat_ipi_resched_int_xlate(unsigned int); | ||
246 | extern unsigned int gic_get_timer_pending(void); | ||
247 | extern int gic_get_c0_compare_int(void); | ||
248 | extern int gic_get_c0_perfcount_int(void); | ||
249 | #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */ | ||
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index b0f9d16e48f6..676d7306a360 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
@@ -33,11 +33,14 @@ | |||
33 | #define _LINUX_IRQDOMAIN_H | 33 | #define _LINUX_IRQDOMAIN_H |
34 | 34 | ||
35 | #include <linux/types.h> | 35 | #include <linux/types.h> |
36 | #include <linux/irqhandler.h> | ||
36 | #include <linux/radix-tree.h> | 37 | #include <linux/radix-tree.h> |
37 | 38 | ||
38 | struct device_node; | 39 | struct device_node; |
39 | struct irq_domain; | 40 | struct irq_domain; |
40 | struct of_device_id; | 41 | struct of_device_id; |
42 | struct irq_chip; | ||
43 | struct irq_data; | ||
41 | 44 | ||
42 | /* Number of irqs reserved for a legacy isa controller */ | 45 | /* Number of irqs reserved for a legacy isa controller */ |
43 | #define NUM_ISA_INTERRUPTS 16 | 46 | #define NUM_ISA_INTERRUPTS 16 |
@@ -64,6 +67,16 @@ struct irq_domain_ops { | |||
64 | int (*xlate)(struct irq_domain *d, struct device_node *node, | 67 | int (*xlate)(struct irq_domain *d, struct device_node *node, |
65 | const u32 *intspec, unsigned int intsize, | 68 | const u32 *intspec, unsigned int intsize, |
66 | unsigned long *out_hwirq, unsigned int *out_type); | 69 | unsigned long *out_hwirq, unsigned int *out_type); |
70 | |||
71 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | ||
72 | /* extended V2 interfaces to support hierarchy irq_domains */ | ||
73 | int (*alloc)(struct irq_domain *d, unsigned int virq, | ||
74 | unsigned int nr_irqs, void *arg); | ||
75 | void (*free)(struct irq_domain *d, unsigned int virq, | ||
76 | unsigned int nr_irqs); | ||
77 | void (*activate)(struct irq_domain *d, struct irq_data *irq_data); | ||
78 | void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); | ||
79 | #endif | ||
67 | }; | 80 | }; |
68 | 81 | ||
69 | extern struct irq_domain_ops irq_generic_chip_ops; | 82 | extern struct irq_domain_ops irq_generic_chip_ops; |
@@ -77,6 +90,7 @@ struct irq_domain_chip_generic; | |||
77 | * @ops: pointer to irq_domain methods | 90 | * @ops: pointer to irq_domain methods |
78 | * @host_data: private data pointer for use by owner. Not touched by irq_domain | 91 | * @host_data: private data pointer for use by owner. Not touched by irq_domain |
79 | * core code. | 92 | * core code. |
93 | * @flags: host per irq_domain flags | ||
80 | * | 94 | * |
81 | * Optional elements | 95 | * Optional elements |
82 | * @of_node: Pointer to device tree nodes associated with the irq_domain. Used | 96 | * @of_node: Pointer to device tree nodes associated with the irq_domain. Used |
@@ -84,6 +98,7 @@ struct irq_domain_chip_generic; | |||
84 | * @gc: Pointer to a list of generic chips. There is a helper function for | 98 | * @gc: Pointer to a list of generic chips. There is a helper function for |
85 | * setting up one or more generic chips for interrupt controllers | 99 | * setting up one or more generic chips for interrupt controllers |
86 | * drivers using the generic chip library which uses this pointer. | 100 | * drivers using the generic chip library which uses this pointer. |
101 | * @parent: Pointer to parent irq_domain to support hierarchy irq_domains | ||
87 | * | 102 | * |
88 | * Revmap data, used internally by irq_domain | 103 | * Revmap data, used internally by irq_domain |
89 | * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that | 104 | * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that |
@@ -97,10 +112,14 @@ struct irq_domain { | |||
97 | const char *name; | 112 | const char *name; |
98 | const struct irq_domain_ops *ops; | 113 | const struct irq_domain_ops *ops; |
99 | void *host_data; | 114 | void *host_data; |
115 | unsigned int flags; | ||
100 | 116 | ||
101 | /* Optional data */ | 117 | /* Optional data */ |
102 | struct device_node *of_node; | 118 | struct device_node *of_node; |
103 | struct irq_domain_chip_generic *gc; | 119 | struct irq_domain_chip_generic *gc; |
120 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | ||
121 | struct irq_domain *parent; | ||
122 | #endif | ||
104 | 123 | ||
105 | /* reverse map data. The linear map gets appended to the irq_domain */ | 124 | /* reverse map data. The linear map gets appended to the irq_domain */ |
106 | irq_hw_number_t hwirq_max; | 125 | irq_hw_number_t hwirq_max; |
@@ -110,6 +129,22 @@ struct irq_domain { | |||
110 | unsigned int linear_revmap[]; | 129 | unsigned int linear_revmap[]; |
111 | }; | 130 | }; |
112 | 131 | ||
132 | /* Irq domain flags */ | ||
133 | enum { | ||
134 | /* Irq domain is hierarchical */ | ||
135 | IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0), | ||
136 | |||
137 | /* Core calls alloc/free recursive through the domain hierarchy. */ | ||
138 | IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1), | ||
139 | |||
140 | /* | ||
141 | * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved | ||
142 | * for implementation specific purposes and ignored by the | ||
143 | * core code. | ||
144 | */ | ||
145 | IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), | ||
146 | }; | ||
147 | |||
113 | #ifdef CONFIG_IRQ_DOMAIN | 148 | #ifdef CONFIG_IRQ_DOMAIN |
114 | struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, | 149 | struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, |
115 | irq_hw_number_t hwirq_max, int direct_max, | 150 | irq_hw_number_t hwirq_max, int direct_max, |
@@ -220,8 +255,74 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, | |||
220 | const u32 *intspec, unsigned int intsize, | 255 | const u32 *intspec, unsigned int intsize, |
221 | irq_hw_number_t *out_hwirq, unsigned int *out_type); | 256 | irq_hw_number_t *out_hwirq, unsigned int *out_type); |
222 | 257 | ||
258 | /* V2 interfaces to support hierarchy IRQ domains. */ | ||
259 | extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, | ||
260 | unsigned int virq); | ||
261 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | ||
262 | extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, | ||
263 | unsigned int flags, unsigned int size, | ||
264 | struct device_node *node, | ||
265 | const struct irq_domain_ops *ops, void *host_data); | ||
266 | extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, | ||
267 | unsigned int nr_irqs, int node, void *arg, | ||
268 | bool realloc); | ||
269 | extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); | ||
270 | extern void irq_domain_activate_irq(struct irq_data *irq_data); | ||
271 | extern void irq_domain_deactivate_irq(struct irq_data *irq_data); | ||
272 | |||
273 | static inline int irq_domain_alloc_irqs(struct irq_domain *domain, | ||
274 | unsigned int nr_irqs, int node, void *arg) | ||
275 | { | ||
276 | return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); | ||
277 | } | ||
278 | |||
279 | extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, | ||
280 | unsigned int virq, | ||
281 | irq_hw_number_t hwirq, | ||
282 | struct irq_chip *chip, | ||
283 | void *chip_data); | ||
284 | extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, | ||
285 | irq_hw_number_t hwirq, struct irq_chip *chip, | ||
286 | void *chip_data, irq_flow_handler_t handler, | ||
287 | void *handler_data, const char *handler_name); | ||
288 | extern void irq_domain_reset_irq_data(struct irq_data *irq_data); | ||
289 | extern void irq_domain_free_irqs_common(struct irq_domain *domain, | ||
290 | unsigned int virq, | ||
291 | unsigned int nr_irqs); | ||
292 | extern void irq_domain_free_irqs_top(struct irq_domain *domain, | ||
293 | unsigned int virq, unsigned int nr_irqs); | ||
294 | |||
295 | extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain, | ||
296 | unsigned int irq_base, | ||
297 | unsigned int nr_irqs, void *arg); | ||
298 | |||
299 | extern void irq_domain_free_irqs_parent(struct irq_domain *domain, | ||
300 | unsigned int irq_base, | ||
301 | unsigned int nr_irqs); | ||
302 | |||
303 | static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) | ||
304 | { | ||
305 | return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY; | ||
306 | } | ||
307 | #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ | ||
308 | static inline void irq_domain_activate_irq(struct irq_data *data) { } | ||
309 | static inline void irq_domain_deactivate_irq(struct irq_data *data) { } | ||
310 | static inline int irq_domain_alloc_irqs(struct irq_domain *domain, | ||
311 | unsigned int nr_irqs, int node, void *arg) | ||
312 | { | ||
313 | return -1; | ||
314 | } | ||
315 | |||
316 | static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) | ||
317 | { | ||
318 | return false; | ||
319 | } | ||
320 | #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ | ||
321 | |||
223 | #else /* CONFIG_IRQ_DOMAIN */ | 322 | #else /* CONFIG_IRQ_DOMAIN */ |
224 | static inline void irq_dispose_mapping(unsigned int virq) { } | 323 | static inline void irq_dispose_mapping(unsigned int virq) { } |
324 | static inline void irq_domain_activate_irq(struct irq_data *data) { } | ||
325 | static inline void irq_domain_deactivate_irq(struct irq_data *data) { } | ||
225 | #endif /* !CONFIG_IRQ_DOMAIN */ | 326 | #endif /* !CONFIG_IRQ_DOMAIN */ |
226 | 327 | ||
227 | #endif /* _LINUX_IRQDOMAIN_H */ | 328 | #endif /* _LINUX_IRQDOMAIN_H */ |
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h new file mode 100644 index 000000000000..62d543004197 --- /dev/null +++ b/include/linux/irqhandler.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef _LINUX_IRQHANDLER_H | ||
2 | #define _LINUX_IRQHANDLER_H | ||
3 | |||
4 | /* | ||
5 | * Interrupt flow handler typedefs are defined here to avoid circular | ||
6 | * include dependencies. | ||
7 | */ | ||
8 | |||
9 | struct irq_desc; | ||
10 | struct irq_data; | ||
11 | typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc); | ||
12 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); | ||
13 | |||
14 | #endif | ||
diff --git a/include/linux/kcmp.h b/include/linux/kcmp.h deleted file mode 100644 index 2dcd1b3aafc8..000000000000 --- a/include/linux/kcmp.h +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | #ifndef _LINUX_KCMP_H | ||
2 | #define _LINUX_KCMP_H | ||
3 | |||
4 | /* Comparison type */ | ||
5 | enum kcmp_type { | ||
6 | KCMP_FILE, | ||
7 | KCMP_VM, | ||
8 | KCMP_FILES, | ||
9 | KCMP_FS, | ||
10 | KCMP_SIGHAND, | ||
11 | KCMP_IO, | ||
12 | KCMP_SYSVSEM, | ||
13 | |||
14 | KCMP_TYPES, | ||
15 | }; | ||
16 | |||
17 | #endif /* _LINUX_KCMP_H */ | ||
diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h index 866caaa9e2bb..c2ce155d83cc 100644 --- a/include/linux/kern_levels.h +++ b/include/linux/kern_levels.h | |||
@@ -22,4 +22,17 @@ | |||
22 | */ | 22 | */ |
23 | #define KERN_CONT "" | 23 | #define KERN_CONT "" |
24 | 24 | ||
25 | /* integer equivalents of KERN_<LEVEL> */ | ||
26 | #define LOGLEVEL_SCHED -2 /* Deferred messages from sched code | ||
27 | * are set to this special level */ | ||
28 | #define LOGLEVEL_DEFAULT -1 /* default (or last) loglevel */ | ||
29 | #define LOGLEVEL_EMERG 0 /* system is unusable */ | ||
30 | #define LOGLEVEL_ALERT 1 /* action must be taken immediately */ | ||
31 | #define LOGLEVEL_CRIT 2 /* critical conditions */ | ||
32 | #define LOGLEVEL_ERR 3 /* error conditions */ | ||
33 | #define LOGLEVEL_WARNING 4 /* warning conditions */ | ||
34 | #define LOGLEVEL_NOTICE 5 /* normal but significant condition */ | ||
35 | #define LOGLEVEL_INFO 6 /* informational */ | ||
36 | #define LOGLEVEL_DEBUG 7 /* debug-level messages */ | ||
37 | |||
25 | #endif | 38 | #endif |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3d770f5564b8..5449d2f4a1ef 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -162,6 +162,7 @@ extern int _cond_resched(void); | |||
162 | #endif | 162 | #endif |
163 | 163 | ||
164 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP | 164 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
165 | void ___might_sleep(const char *file, int line, int preempt_offset); | ||
165 | void __might_sleep(const char *file, int line, int preempt_offset); | 166 | void __might_sleep(const char *file, int line, int preempt_offset); |
166 | /** | 167 | /** |
167 | * might_sleep - annotation for functions that can sleep | 168 | * might_sleep - annotation for functions that can sleep |
@@ -175,10 +176,14 @@ extern int _cond_resched(void); | |||
175 | */ | 176 | */ |
176 | # define might_sleep() \ | 177 | # define might_sleep() \ |
177 | do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) | 178 | do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) |
179 | # define sched_annotate_sleep() __set_current_state(TASK_RUNNING) | ||
178 | #else | 180 | #else |
181 | static inline void ___might_sleep(const char *file, int line, | ||
182 | int preempt_offset) { } | ||
179 | static inline void __might_sleep(const char *file, int line, | 183 | static inline void __might_sleep(const char *file, int line, |
180 | int preempt_offset) { } | 184 | int preempt_offset) { } |
181 | # define might_sleep() do { might_resched(); } while (0) | 185 | # define might_sleep() do { might_resched(); } while (0) |
186 | # define sched_annotate_sleep() do { } while (0) | ||
182 | #endif | 187 | #endif |
183 | 188 | ||
184 | #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) | 189 | #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) |
@@ -411,9 +416,6 @@ extern int __kernel_text_address(unsigned long addr); | |||
411 | extern int kernel_text_address(unsigned long addr); | 416 | extern int kernel_text_address(unsigned long addr); |
412 | extern int func_ptr_is_kernel_text(void *ptr); | 417 | extern int func_ptr_is_kernel_text(void *ptr); |
413 | 418 | ||
414 | struct pid; | ||
415 | extern struct pid *session_of_pgrp(struct pid *pgrp); | ||
416 | |||
417 | unsigned long int_sqrt(unsigned long); | 419 | unsigned long int_sqrt(unsigned long); |
418 | 420 | ||
419 | extern void bust_spinlocks(int yes); | 421 | extern void bust_spinlocks(int yes); |
@@ -422,6 +424,7 @@ extern int panic_timeout; | |||
422 | extern int panic_on_oops; | 424 | extern int panic_on_oops; |
423 | extern int panic_on_unrecovered_nmi; | 425 | extern int panic_on_unrecovered_nmi; |
424 | extern int panic_on_io_nmi; | 426 | extern int panic_on_io_nmi; |
427 | extern int panic_on_warn; | ||
425 | extern int sysctl_panic_on_stackoverflow; | 428 | extern int sysctl_panic_on_stackoverflow; |
426 | /* | 429 | /* |
427 | * Only to be used by arch init code. If the user over-wrote the default | 430 | * Only to be used by arch init code. If the user over-wrote the default |
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 30faf797c2c3..d4e01b358341 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h | |||
@@ -179,6 +179,7 @@ struct kernfs_open_file { | |||
179 | struct mutex mutex; | 179 | struct mutex mutex; |
180 | int event; | 180 | int event; |
181 | struct list_head list; | 181 | struct list_head list; |
182 | char *prealloc_buf; | ||
182 | 183 | ||
183 | size_t atomic_write_len; | 184 | size_t atomic_write_len; |
184 | bool mmapped; | 185 | bool mmapped; |
@@ -214,6 +215,13 @@ struct kernfs_ops { | |||
214 | * larger ones are rejected with -E2BIG. | 215 | * larger ones are rejected with -E2BIG. |
215 | */ | 216 | */ |
216 | size_t atomic_write_len; | 217 | size_t atomic_write_len; |
218 | /* | ||
219 | * "prealloc" causes a buffer to be allocated at open for | ||
220 | * all read/write requests. As ->seq_show uses seq_read() | ||
221 | * which does its own allocation, it is incompatible with | ||
222 | * ->prealloc. Provide ->read and ->write with ->prealloc. | ||
223 | */ | ||
224 | bool prealloc; | ||
217 | ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes, | 225 | ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes, |
218 | loff_t off); | 226 | loff_t off); |
219 | 227 | ||
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 057e95971014..e705467ddb47 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h | |||
@@ -21,6 +21,8 @@ | |||
21 | #ifndef __KMEMLEAK_H | 21 | #ifndef __KMEMLEAK_H |
22 | #define __KMEMLEAK_H | 22 | #define __KMEMLEAK_H |
23 | 23 | ||
24 | #include <linux/slab.h> | ||
25 | |||
24 | #ifdef CONFIG_DEBUG_KMEMLEAK | 26 | #ifdef CONFIG_DEBUG_KMEMLEAK |
25 | 27 | ||
26 | extern void kmemleak_init(void) __ref; | 28 | extern void kmemleak_init(void) __ref; |
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index f7296e57d614..5297f9fa0ef2 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -335,6 +335,7 @@ extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, | |||
335 | extern int arch_prepare_kprobe_ftrace(struct kprobe *p); | 335 | extern int arch_prepare_kprobe_ftrace(struct kprobe *p); |
336 | #endif | 336 | #endif |
337 | 337 | ||
338 | int arch_check_ftrace_location(struct kprobe *p); | ||
338 | 339 | ||
339 | /* Get the kprobe at this addr (if any) - called with preemption disabled */ | 340 | /* Get the kprobe at this addr (if any) - called with preemption disabled */ |
340 | struct kprobe *get_kprobe(void *addr); | 341 | struct kprobe *get_kprobe(void *addr); |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ea53b04993f2..a6059bdf7b03 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm); | |||
703 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); | 703 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
704 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); | 704 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
705 | 705 | ||
706 | bool kvm_is_mmio_pfn(pfn_t pfn); | 706 | bool kvm_is_reserved_pfn(pfn_t pfn); |
707 | 707 | ||
708 | struct kvm_irq_ack_notifier { | 708 | struct kvm_irq_ack_notifier { |
709 | struct hlist_node link; | 709 | struct hlist_node link; |
diff --git a/include/linux/leds.h b/include/linux/leds.h index a57611d0c94e..cfceef32c9b3 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #define __LINUX_LEDS_H_INCLUDED | 13 | #define __LINUX_LEDS_H_INCLUDED |
14 | 14 | ||
15 | #include <linux/list.h> | 15 | #include <linux/list.h> |
16 | #include <linux/mutex.h> | ||
16 | #include <linux/rwsem.h> | 17 | #include <linux/rwsem.h> |
17 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
18 | #include <linux/timer.h> | 19 | #include <linux/timer.h> |
@@ -42,11 +43,20 @@ struct led_classdev { | |||
42 | #define LED_BLINK_ONESHOT (1 << 17) | 43 | #define LED_BLINK_ONESHOT (1 << 17) |
43 | #define LED_BLINK_ONESHOT_STOP (1 << 18) | 44 | #define LED_BLINK_ONESHOT_STOP (1 << 18) |
44 | #define LED_BLINK_INVERT (1 << 19) | 45 | #define LED_BLINK_INVERT (1 << 19) |
46 | #define LED_SYSFS_DISABLE (1 << 20) | ||
47 | #define SET_BRIGHTNESS_ASYNC (1 << 21) | ||
48 | #define SET_BRIGHTNESS_SYNC (1 << 22) | ||
45 | 49 | ||
46 | /* Set LED brightness level */ | 50 | /* Set LED brightness level */ |
47 | /* Must not sleep, use a workqueue if needed */ | 51 | /* Must not sleep, use a workqueue if needed */ |
48 | void (*brightness_set)(struct led_classdev *led_cdev, | 52 | void (*brightness_set)(struct led_classdev *led_cdev, |
49 | enum led_brightness brightness); | 53 | enum led_brightness brightness); |
54 | /* | ||
55 | * Set LED brightness level immediately - it can block the caller for | ||
56 | * the time required for accessing a LED device register. | ||
57 | */ | ||
58 | int (*brightness_set_sync)(struct led_classdev *led_cdev, | ||
59 | enum led_brightness brightness); | ||
50 | /* Get LED brightness level */ | 60 | /* Get LED brightness level */ |
51 | enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); | 61 | enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); |
52 | 62 | ||
@@ -85,6 +95,9 @@ struct led_classdev { | |||
85 | /* true if activated - deactivate routine uses it to do cleanup */ | 95 | /* true if activated - deactivate routine uses it to do cleanup */ |
86 | bool activated; | 96 | bool activated; |
87 | #endif | 97 | #endif |
98 | |||
99 | /* Ensures consistent access to the LED Flash Class device */ | ||
100 | struct mutex led_access; | ||
88 | }; | 101 | }; |
89 | 102 | ||
90 | extern int led_classdev_register(struct device *parent, | 103 | extern int led_classdev_register(struct device *parent, |
@@ -151,6 +164,33 @@ extern void led_set_brightness(struct led_classdev *led_cdev, | |||
151 | */ | 164 | */ |
152 | extern int led_update_brightness(struct led_classdev *led_cdev); | 165 | extern int led_update_brightness(struct led_classdev *led_cdev); |
153 | 166 | ||
167 | /** | ||
168 | * led_sysfs_disable - disable LED sysfs interface | ||
169 | * @led_cdev: the LED to set | ||
170 | * | ||
171 | * Disable the led_cdev's sysfs interface. | ||
172 | */ | ||
173 | extern void led_sysfs_disable(struct led_classdev *led_cdev); | ||
174 | |||
175 | /** | ||
176 | * led_sysfs_enable - enable LED sysfs interface | ||
177 | * @led_cdev: the LED to set | ||
178 | * | ||
179 | * Enable the led_cdev's sysfs interface. | ||
180 | */ | ||
181 | extern void led_sysfs_enable(struct led_classdev *led_cdev); | ||
182 | |||
183 | /** | ||
184 | * led_sysfs_is_disabled - check if LED sysfs interface is disabled | ||
185 | * @led_cdev: the LED to query | ||
186 | * | ||
187 | * Returns: true if the led_cdev's sysfs interface is disabled. | ||
188 | */ | ||
189 | static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev) | ||
190 | { | ||
191 | return led_cdev->flags & LED_SYSFS_DISABLE; | ||
192 | } | ||
193 | |||
154 | /* | 194 | /* |
155 | * LED Triggers | 195 | * LED Triggers |
156 | */ | 196 | */ |
@@ -261,6 +301,7 @@ struct gpio_led { | |||
261 | unsigned retain_state_suspended : 1; | 301 | unsigned retain_state_suspended : 1; |
262 | unsigned default_state : 2; | 302 | unsigned default_state : 2; |
263 | /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ | 303 | /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ |
304 | struct gpio_desc *gpiod; | ||
264 | }; | 305 | }; |
265 | #define LEDS_GPIO_DEFSTATE_OFF 0 | 306 | #define LEDS_GPIO_DEFSTATE_OFF 0 |
266 | #define LEDS_GPIO_DEFSTATE_ON 1 | 307 | #define LEDS_GPIO_DEFSTATE_ON 1 |
@@ -273,7 +314,7 @@ struct gpio_led_platform_data { | |||
273 | #define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */ | 314 | #define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */ |
274 | #define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */ | 315 | #define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */ |
275 | #define GPIO_LED_BLINK 2 /* Please, blink */ | 316 | #define GPIO_LED_BLINK 2 /* Please, blink */ |
276 | int (*gpio_blink_set)(unsigned gpio, int state, | 317 | int (*gpio_blink_set)(struct gpio_desc *desc, int state, |
277 | unsigned long *delay_on, | 318 | unsigned long *delay_on, |
278 | unsigned long *delay_off); | 319 | unsigned long *delay_off); |
279 | }; | 320 | }; |
diff --git a/include/linux/libata.h b/include/linux/libata.h index bd5fefeaf548..2d182413b1db 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -191,7 +191,8 @@ enum { | |||
191 | ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */ | 191 | ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */ |
192 | ATA_DEV_SEMB = 7, /* SEMB */ | 192 | ATA_DEV_SEMB = 7, /* SEMB */ |
193 | ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ | 193 | ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ |
194 | ATA_DEV_NONE = 9, /* no device */ | 194 | ATA_DEV_ZAC = 9, /* ZAC device */ |
195 | ATA_DEV_NONE = 10, /* no device */ | ||
195 | 196 | ||
196 | /* struct ata_link flags */ | 197 | /* struct ata_link flags */ |
197 | ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ | 198 | ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ |
@@ -1191,9 +1192,9 @@ extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev); | |||
1191 | extern int ata_scsi_slave_config(struct scsi_device *sdev); | 1192 | extern int ata_scsi_slave_config(struct scsi_device *sdev); |
1192 | extern void ata_scsi_slave_destroy(struct scsi_device *sdev); | 1193 | extern void ata_scsi_slave_destroy(struct scsi_device *sdev); |
1193 | extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, | 1194 | extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, |
1194 | int queue_depth, int reason); | 1195 | int queue_depth); |
1195 | extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, | 1196 | extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, |
1196 | int queue_depth, int reason); | 1197 | int queue_depth); |
1197 | extern struct ata_device *ata_dev_pair(struct ata_device *adev); | 1198 | extern struct ata_device *ata_dev_pair(struct ata_device *adev); |
1198 | extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); | 1199 | extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); |
1199 | extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); | 1200 | extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); |
@@ -1491,7 +1492,8 @@ static inline unsigned int ata_tag_internal(unsigned int tag) | |||
1491 | static inline unsigned int ata_class_enabled(unsigned int class) | 1492 | static inline unsigned int ata_class_enabled(unsigned int class) |
1492 | { | 1493 | { |
1493 | return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI || | 1494 | return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI || |
1494 | class == ATA_DEV_PMP || class == ATA_DEV_SEMB; | 1495 | class == ATA_DEV_PMP || class == ATA_DEV_SEMB || |
1496 | class == ATA_DEV_ZAC; | ||
1495 | } | 1497 | } |
1496 | 1498 | ||
1497 | static inline unsigned int ata_class_disabled(unsigned int class) | 1499 | static inline unsigned int ata_class_disabled(unsigned int class) |
diff --git a/include/linux/list.h b/include/linux/list.h index f33f831eb3c8..feb773c76ee0 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -346,7 +346,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
346 | * list_entry - get the struct for this entry | 346 | * list_entry - get the struct for this entry |
347 | * @ptr: the &struct list_head pointer. | 347 | * @ptr: the &struct list_head pointer. |
348 | * @type: the type of the struct this is embedded in. | 348 | * @type: the type of the struct this is embedded in. |
349 | * @member: the name of the list_struct within the struct. | 349 | * @member: the name of the list_head within the struct. |
350 | */ | 350 | */ |
351 | #define list_entry(ptr, type, member) \ | 351 | #define list_entry(ptr, type, member) \ |
352 | container_of(ptr, type, member) | 352 | container_of(ptr, type, member) |
@@ -355,7 +355,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
355 | * list_first_entry - get the first element from a list | 355 | * list_first_entry - get the first element from a list |
356 | * @ptr: the list head to take the element from. | 356 | * @ptr: the list head to take the element from. |
357 | * @type: the type of the struct this is embedded in. | 357 | * @type: the type of the struct this is embedded in. |
358 | * @member: the name of the list_struct within the struct. | 358 | * @member: the name of the list_head within the struct. |
359 | * | 359 | * |
360 | * Note, that list is expected to be not empty. | 360 | * Note, that list is expected to be not empty. |
361 | */ | 361 | */ |
@@ -366,7 +366,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
366 | * list_last_entry - get the last element from a list | 366 | * list_last_entry - get the last element from a list |
367 | * @ptr: the list head to take the element from. | 367 | * @ptr: the list head to take the element from. |
368 | * @type: the type of the struct this is embedded in. | 368 | * @type: the type of the struct this is embedded in. |
369 | * @member: the name of the list_struct within the struct. | 369 | * @member: the name of the list_head within the struct. |
370 | * | 370 | * |
371 | * Note, that list is expected to be not empty. | 371 | * Note, that list is expected to be not empty. |
372 | */ | 372 | */ |
@@ -377,7 +377,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
377 | * list_first_entry_or_null - get the first element from a list | 377 | * list_first_entry_or_null - get the first element from a list |
378 | * @ptr: the list head to take the element from. | 378 | * @ptr: the list head to take the element from. |
379 | * @type: the type of the struct this is embedded in. | 379 | * @type: the type of the struct this is embedded in. |
380 | * @member: the name of the list_struct within the struct. | 380 | * @member: the name of the list_head within the struct. |
381 | * | 381 | * |
382 | * Note that if the list is empty, it returns NULL. | 382 | * Note that if the list is empty, it returns NULL. |
383 | */ | 383 | */ |
@@ -387,7 +387,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
387 | /** | 387 | /** |
388 | * list_next_entry - get the next element in list | 388 | * list_next_entry - get the next element in list |
389 | * @pos: the type * to cursor | 389 | * @pos: the type * to cursor |
390 | * @member: the name of the list_struct within the struct. | 390 | * @member: the name of the list_head within the struct. |
391 | */ | 391 | */ |
392 | #define list_next_entry(pos, member) \ | 392 | #define list_next_entry(pos, member) \ |
393 | list_entry((pos)->member.next, typeof(*(pos)), member) | 393 | list_entry((pos)->member.next, typeof(*(pos)), member) |
@@ -395,7 +395,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
395 | /** | 395 | /** |
396 | * list_prev_entry - get the prev element in list | 396 | * list_prev_entry - get the prev element in list |
397 | * @pos: the type * to cursor | 397 | * @pos: the type * to cursor |
398 | * @member: the name of the list_struct within the struct. | 398 | * @member: the name of the list_head within the struct. |
399 | */ | 399 | */ |
400 | #define list_prev_entry(pos, member) \ | 400 | #define list_prev_entry(pos, member) \ |
401 | list_entry((pos)->member.prev, typeof(*(pos)), member) | 401 | list_entry((pos)->member.prev, typeof(*(pos)), member) |
@@ -441,7 +441,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
441 | * list_for_each_entry - iterate over list of given type | 441 | * list_for_each_entry - iterate over list of given type |
442 | * @pos: the type * to use as a loop cursor. | 442 | * @pos: the type * to use as a loop cursor. |
443 | * @head: the head for your list. | 443 | * @head: the head for your list. |
444 | * @member: the name of the list_struct within the struct. | 444 | * @member: the name of the list_head within the struct. |
445 | */ | 445 | */ |
446 | #define list_for_each_entry(pos, head, member) \ | 446 | #define list_for_each_entry(pos, head, member) \ |
447 | for (pos = list_first_entry(head, typeof(*pos), member); \ | 447 | for (pos = list_first_entry(head, typeof(*pos), member); \ |
@@ -452,7 +452,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
452 | * list_for_each_entry_reverse - iterate backwards over list of given type. | 452 | * list_for_each_entry_reverse - iterate backwards over list of given type. |
453 | * @pos: the type * to use as a loop cursor. | 453 | * @pos: the type * to use as a loop cursor. |
454 | * @head: the head for your list. | 454 | * @head: the head for your list. |
455 | * @member: the name of the list_struct within the struct. | 455 | * @member: the name of the list_head within the struct. |
456 | */ | 456 | */ |
457 | #define list_for_each_entry_reverse(pos, head, member) \ | 457 | #define list_for_each_entry_reverse(pos, head, member) \ |
458 | for (pos = list_last_entry(head, typeof(*pos), member); \ | 458 | for (pos = list_last_entry(head, typeof(*pos), member); \ |
@@ -463,7 +463,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
463 | * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() | 463 | * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() |
464 | * @pos: the type * to use as a start point | 464 | * @pos: the type * to use as a start point |
465 | * @head: the head of the list | 465 | * @head: the head of the list |
466 | * @member: the name of the list_struct within the struct. | 466 | * @member: the name of the list_head within the struct. |
467 | * | 467 | * |
468 | * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). | 468 | * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). |
469 | */ | 469 | */ |
@@ -474,7 +474,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
474 | * list_for_each_entry_continue - continue iteration over list of given type | 474 | * list_for_each_entry_continue - continue iteration over list of given type |
475 | * @pos: the type * to use as a loop cursor. | 475 | * @pos: the type * to use as a loop cursor. |
476 | * @head: the head for your list. | 476 | * @head: the head for your list. |
477 | * @member: the name of the list_struct within the struct. | 477 | * @member: the name of the list_head within the struct. |
478 | * | 478 | * |
479 | * Continue to iterate over list of given type, continuing after | 479 | * Continue to iterate over list of given type, continuing after |
480 | * the current position. | 480 | * the current position. |
@@ -488,7 +488,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
488 | * list_for_each_entry_continue_reverse - iterate backwards from the given point | 488 | * list_for_each_entry_continue_reverse - iterate backwards from the given point |
489 | * @pos: the type * to use as a loop cursor. | 489 | * @pos: the type * to use as a loop cursor. |
490 | * @head: the head for your list. | 490 | * @head: the head for your list. |
491 | * @member: the name of the list_struct within the struct. | 491 | * @member: the name of the list_head within the struct. |
492 | * | 492 | * |
493 | * Start to iterate over list of given type backwards, continuing after | 493 | * Start to iterate over list of given type backwards, continuing after |
494 | * the current position. | 494 | * the current position. |
@@ -502,7 +502,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
502 | * list_for_each_entry_from - iterate over list of given type from the current point | 502 | * list_for_each_entry_from - iterate over list of given type from the current point |
503 | * @pos: the type * to use as a loop cursor. | 503 | * @pos: the type * to use as a loop cursor. |
504 | * @head: the head for your list. | 504 | * @head: the head for your list. |
505 | * @member: the name of the list_struct within the struct. | 505 | * @member: the name of the list_head within the struct. |
506 | * | 506 | * |
507 | * Iterate over list of given type, continuing from current position. | 507 | * Iterate over list of given type, continuing from current position. |
508 | */ | 508 | */ |
@@ -515,7 +515,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
515 | * @pos: the type * to use as a loop cursor. | 515 | * @pos: the type * to use as a loop cursor. |
516 | * @n: another type * to use as temporary storage | 516 | * @n: another type * to use as temporary storage |
517 | * @head: the head for your list. | 517 | * @head: the head for your list. |
518 | * @member: the name of the list_struct within the struct. | 518 | * @member: the name of the list_head within the struct. |
519 | */ | 519 | */ |
520 | #define list_for_each_entry_safe(pos, n, head, member) \ | 520 | #define list_for_each_entry_safe(pos, n, head, member) \ |
521 | for (pos = list_first_entry(head, typeof(*pos), member), \ | 521 | for (pos = list_first_entry(head, typeof(*pos), member), \ |
@@ -528,7 +528,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
528 | * @pos: the type * to use as a loop cursor. | 528 | * @pos: the type * to use as a loop cursor. |
529 | * @n: another type * to use as temporary storage | 529 | * @n: another type * to use as temporary storage |
530 | * @head: the head for your list. | 530 | * @head: the head for your list. |
531 | * @member: the name of the list_struct within the struct. | 531 | * @member: the name of the list_head within the struct. |
532 | * | 532 | * |
533 | * Iterate over list of given type, continuing after current point, | 533 | * Iterate over list of given type, continuing after current point, |
534 | * safe against removal of list entry. | 534 | * safe against removal of list entry. |
@@ -544,7 +544,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
544 | * @pos: the type * to use as a loop cursor. | 544 | * @pos: the type * to use as a loop cursor. |
545 | * @n: another type * to use as temporary storage | 545 | * @n: another type * to use as temporary storage |
546 | * @head: the head for your list. | 546 | * @head: the head for your list. |
547 | * @member: the name of the list_struct within the struct. | 547 | * @member: the name of the list_head within the struct. |
548 | * | 548 | * |
549 | * Iterate over list of given type from current point, safe against | 549 | * Iterate over list of given type from current point, safe against |
550 | * removal of list entry. | 550 | * removal of list entry. |
@@ -559,7 +559,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
559 | * @pos: the type * to use as a loop cursor. | 559 | * @pos: the type * to use as a loop cursor. |
560 | * @n: another type * to use as temporary storage | 560 | * @n: another type * to use as temporary storage |
561 | * @head: the head for your list. | 561 | * @head: the head for your list. |
562 | * @member: the name of the list_struct within the struct. | 562 | * @member: the name of the list_head within the struct. |
563 | * | 563 | * |
564 | * Iterate backwards over list of given type, safe against removal | 564 | * Iterate backwards over list of given type, safe against removal |
565 | * of list entry. | 565 | * of list entry. |
@@ -574,7 +574,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
574 | * list_safe_reset_next - reset a stale list_for_each_entry_safe loop | 574 | * list_safe_reset_next - reset a stale list_for_each_entry_safe loop |
575 | * @pos: the loop cursor used in the list_for_each_entry_safe loop | 575 | * @pos: the loop cursor used in the list_for_each_entry_safe loop |
576 | * @n: temporary storage used in list_for_each_entry_safe | 576 | * @n: temporary storage used in list_for_each_entry_safe |
577 | * @member: the name of the list_struct within the struct. | 577 | * @member: the name of the list_head within the struct. |
578 | * | 578 | * |
579 | * list_safe_reset_next is not safe to use in general if the list may be | 579 | * list_safe_reset_next is not safe to use in general if the list may be |
580 | * modified concurrently (eg. the lock is dropped in the loop body). An | 580 | * modified concurrently (eg. the lock is dropped in the loop body). An |
diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h index 257d3779f2ab..0ca8109934e4 100644 --- a/include/linux/lockd/debug.h +++ b/include/linux/lockd/debug.h | |||
@@ -17,12 +17,8 @@ | |||
17 | * Enable lockd debugging. | 17 | * Enable lockd debugging. |
18 | * Requires RPC_DEBUG. | 18 | * Requires RPC_DEBUG. |
19 | */ | 19 | */ |
20 | #ifdef RPC_DEBUG | ||
21 | # define LOCKD_DEBUG 1 | ||
22 | #endif | ||
23 | |||
24 | #undef ifdebug | 20 | #undef ifdebug |
25 | #if defined(RPC_DEBUG) && defined(LOCKD_DEBUG) | 21 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
26 | # define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag)) | 22 | # define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag)) |
27 | #else | 23 | #else |
28 | # define ifdebug(flag) if (0) | 24 | # define ifdebug(flag) if (0) |
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h index 307d9cab2026..1726ccbd8009 100644 --- a/include/linux/mailbox_client.h +++ b/include/linux/mailbox_client.h | |||
@@ -25,6 +25,8 @@ struct mbox_chan; | |||
25 | * if the client receives some ACK packet for transmission. | 25 | * if the client receives some ACK packet for transmission. |
26 | * Unused if the controller already has TX_Done/RTR IRQ. | 26 | * Unused if the controller already has TX_Done/RTR IRQ. |
27 | * @rx_callback: Atomic callback to provide client the data received | 27 | * @rx_callback: Atomic callback to provide client the data received |
28 | * @tx_prepare: Atomic callback to ask client to prepare the payload | ||
29 | * before initiating the transmission if required. | ||
28 | * @tx_done: Atomic callback to tell client of data transmission | 30 | * @tx_done: Atomic callback to tell client of data transmission |
29 | */ | 31 | */ |
30 | struct mbox_client { | 32 | struct mbox_client { |
@@ -34,6 +36,7 @@ struct mbox_client { | |||
34 | bool knows_txdone; | 36 | bool knows_txdone; |
35 | 37 | ||
36 | void (*rx_callback)(struct mbox_client *cl, void *mssg); | 38 | void (*rx_callback)(struct mbox_client *cl, void *mssg); |
39 | void (*tx_prepare)(struct mbox_client *cl, void *mssg); | ||
37 | void (*tx_done)(struct mbox_client *cl, void *mssg, int r); | 40 | void (*tx_done)(struct mbox_client *cl, void *mssg, int r); |
38 | }; | 41 | }; |
39 | 42 | ||
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 8e9a029e093d..e6982ac3200d 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 | 16 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 |
17 | #define MARVELL_PHY_ID_88E1116R 0x01410e40 | 17 | #define MARVELL_PHY_ID_88E1116R 0x01410e40 |
18 | #define MARVELL_PHY_ID_88E1510 0x01410dd0 | 18 | #define MARVELL_PHY_ID_88E1510 0x01410dd0 |
19 | #define MARVELL_PHY_ID_88E3016 0x01410e60 | ||
19 | 20 | ||
20 | /* struct phy_device dev_flags definitions */ | 21 | /* struct phy_device dev_flags definitions */ |
21 | #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 | 22 | #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 |
diff --git a/include/linux/mbus.h b/include/linux/mbus.h index 550c88fb0267..611b69fa8594 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h | |||
@@ -61,6 +61,7 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void) | |||
61 | } | 61 | } |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | int mvebu_mbus_save_cpu_target(u32 *store_addr); | ||
64 | void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); | 65 | void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); |
65 | void mvebu_mbus_get_pcie_io_aperture(struct resource *res); | 66 | void mvebu_mbus_get_pcie_io_aperture(struct resource *res); |
66 | int mvebu_mbus_add_window_remap_by_id(unsigned int target, | 67 | int mvebu_mbus_add_window_remap_by_id(unsigned int target, |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6b75640ef5ab..7c95af8d552c 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/jump_label.h> | 25 | #include <linux/jump_label.h> |
26 | 26 | ||
27 | struct mem_cgroup; | 27 | struct mem_cgroup; |
28 | struct page_cgroup; | ||
29 | struct page; | 28 | struct page; |
30 | struct mm_struct; | 29 | struct mm_struct; |
31 | struct kmem_cache; | 30 | struct kmem_cache; |
@@ -68,10 +67,9 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, | |||
68 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); | 67 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); |
69 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); | 68 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); |
70 | 69 | ||
71 | bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, | 70 | bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, |
72 | struct mem_cgroup *memcg); | 71 | struct mem_cgroup *root); |
73 | bool task_in_mem_cgroup(struct task_struct *task, | 72 | bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); |
74 | const struct mem_cgroup *memcg); | ||
75 | 73 | ||
76 | extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); | 74 | extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); |
77 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | 75 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
@@ -79,15 +77,16 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | |||
79 | extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); | 77 | extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); |
80 | extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); | 78 | extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); |
81 | 79 | ||
82 | static inline | 80 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
83 | bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) | 81 | struct mem_cgroup *memcg) |
84 | { | 82 | { |
85 | struct mem_cgroup *task_memcg; | 83 | struct mem_cgroup *task_memcg; |
86 | bool match; | 84 | bool match = false; |
87 | 85 | ||
88 | rcu_read_lock(); | 86 | rcu_read_lock(); |
89 | task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | 87 | task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
90 | match = __mem_cgroup_same_or_subtree(memcg, task_memcg); | 88 | if (task_memcg) |
89 | match = mem_cgroup_is_descendant(task_memcg, memcg); | ||
91 | rcu_read_unlock(); | 90 | rcu_read_unlock(); |
92 | return match; | 91 | return match; |
93 | } | 92 | } |
@@ -141,8 +140,8 @@ static inline bool mem_cgroup_disabled(void) | |||
141 | 140 | ||
142 | struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, | 141 | struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, |
143 | unsigned long *flags); | 142 | unsigned long *flags); |
144 | void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked, | 143 | void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked, |
145 | unsigned long flags); | 144 | unsigned long *flags); |
146 | void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, | 145 | void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, |
147 | enum mem_cgroup_stat_index idx, int val); | 146 | enum mem_cgroup_stat_index idx, int val); |
148 | 147 | ||
@@ -174,10 +173,6 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, | |||
174 | void mem_cgroup_split_huge_fixup(struct page *head); | 173 | void mem_cgroup_split_huge_fixup(struct page *head); |
175 | #endif | 174 | #endif |
176 | 175 | ||
177 | #ifdef CONFIG_DEBUG_VM | ||
178 | bool mem_cgroup_bad_page_check(struct page *page); | ||
179 | void mem_cgroup_print_bad_page(struct page *page); | ||
180 | #endif | ||
181 | #else /* CONFIG_MEMCG */ | 176 | #else /* CONFIG_MEMCG */ |
182 | struct mem_cgroup; | 177 | struct mem_cgroup; |
183 | 178 | ||
@@ -297,7 +292,7 @@ static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, | |||
297 | } | 292 | } |
298 | 293 | ||
299 | static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, | 294 | static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, |
300 | bool locked, unsigned long flags) | 295 | bool *locked, unsigned long *flags) |
301 | { | 296 | { |
302 | } | 297 | } |
303 | 298 | ||
@@ -347,19 +342,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) | |||
347 | } | 342 | } |
348 | #endif /* CONFIG_MEMCG */ | 343 | #endif /* CONFIG_MEMCG */ |
349 | 344 | ||
350 | #if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) | ||
351 | static inline bool | ||
352 | mem_cgroup_bad_page_check(struct page *page) | ||
353 | { | ||
354 | return false; | ||
355 | } | ||
356 | |||
357 | static inline void | ||
358 | mem_cgroup_print_bad_page(struct page *page) | ||
359 | { | ||
360 | } | ||
361 | #endif | ||
362 | |||
363 | enum { | 345 | enum { |
364 | UNDER_LIMIT, | 346 | UNDER_LIMIT, |
365 | SOFT_LIMIT, | 347 | SOFT_LIMIT, |
@@ -418,8 +400,8 @@ int memcg_cache_id(struct mem_cgroup *memcg); | |||
418 | 400 | ||
419 | void memcg_update_array_size(int num_groups); | 401 | void memcg_update_array_size(int num_groups); |
420 | 402 | ||
421 | struct kmem_cache * | 403 | struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); |
422 | __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); | 404 | void __memcg_kmem_put_cache(struct kmem_cache *cachep); |
423 | 405 | ||
424 | int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order); | 406 | int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order); |
425 | void __memcg_uncharge_slab(struct kmem_cache *cachep, int order); | 407 | void __memcg_uncharge_slab(struct kmem_cache *cachep, int order); |
@@ -447,9 +429,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) | |||
447 | /* | 429 | /* |
448 | * __GFP_NOFAIL allocations will move on even if charging is not | 430 | * __GFP_NOFAIL allocations will move on even if charging is not |
449 | * possible. Therefore we don't even try, and have this allocation | 431 | * possible. Therefore we don't even try, and have this allocation |
450 | * unaccounted. We could in theory charge it with | 432 | * unaccounted. We could in theory charge it forcibly, but we hope |
451 | * res_counter_charge_nofail, but we hope those allocations are rare, | 433 | * those allocations are rare, and won't be worth the trouble. |
452 | * and won't be worth the trouble. | ||
453 | */ | 434 | */ |
454 | if (gfp & __GFP_NOFAIL) | 435 | if (gfp & __GFP_NOFAIL) |
455 | return true; | 436 | return true; |
@@ -467,8 +448,6 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) | |||
467 | * memcg_kmem_uncharge_pages: uncharge pages from memcg | 448 | * memcg_kmem_uncharge_pages: uncharge pages from memcg |
468 | * @page: pointer to struct page being freed | 449 | * @page: pointer to struct page being freed |
469 | * @order: allocation order. | 450 | * @order: allocation order. |
470 | * | ||
471 | * there is no need to specify memcg here, since it is embedded in page_cgroup | ||
472 | */ | 451 | */ |
473 | static inline void | 452 | static inline void |
474 | memcg_kmem_uncharge_pages(struct page *page, int order) | 453 | memcg_kmem_uncharge_pages(struct page *page, int order) |
@@ -485,8 +464,7 @@ memcg_kmem_uncharge_pages(struct page *page, int order) | |||
485 | * | 464 | * |
486 | * Needs to be called after memcg_kmem_newpage_charge, regardless of success or | 465 | * Needs to be called after memcg_kmem_newpage_charge, regardless of success or |
487 | * failure of the allocation. if @page is NULL, this function will revert the | 466 | * failure of the allocation. if @page is NULL, this function will revert the |
488 | * charges. Otherwise, it will commit the memcg given by @memcg to the | 467 | * charges. Otherwise, it will commit @page to @memcg. |
489 | * corresponding page_cgroup. | ||
490 | */ | 468 | */ |
491 | static inline void | 469 | static inline void |
492 | memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) | 470 | memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) |
@@ -514,7 +492,13 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
514 | if (unlikely(fatal_signal_pending(current))) | 492 | if (unlikely(fatal_signal_pending(current))) |
515 | return cachep; | 493 | return cachep; |
516 | 494 | ||
517 | return __memcg_kmem_get_cache(cachep, gfp); | 495 | return __memcg_kmem_get_cache(cachep); |
496 | } | ||
497 | |||
498 | static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | ||
499 | { | ||
500 | if (memcg_kmem_enabled()) | ||
501 | __memcg_kmem_put_cache(cachep); | ||
518 | } | 502 | } |
519 | #else | 503 | #else |
520 | #define for_each_memcg_cache_index(_idx) \ | 504 | #define for_each_memcg_cache_index(_idx) \ |
@@ -550,6 +534,10 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
550 | { | 534 | { |
551 | return cachep; | 535 | return cachep; |
552 | } | 536 | } |
537 | |||
538 | static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | ||
539 | { | ||
540 | } | ||
553 | #endif /* CONFIG_MEMCG_KMEM */ | 541 | #endif /* CONFIG_MEMCG_KMEM */ |
554 | #endif /* _LINUX_MEMCONTROL_H */ | 542 | #endif /* _LINUX_MEMCONTROL_H */ |
555 | 543 | ||
diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h index adba89d9c660..689312745b2f 100644 --- a/include/linux/mfd/abx500/ab8500-sysctrl.h +++ b/include/linux/mfd/abx500/ab8500-sysctrl.h | |||
@@ -12,7 +12,6 @@ | |||
12 | 12 | ||
13 | int ab8500_sysctrl_read(u16 reg, u8 *value); | 13 | int ab8500_sysctrl_read(u16 reg, u8 *value); |
14 | int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value); | 14 | int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value); |
15 | void ab8500_restart(char mode, const char *cmd); | ||
16 | 15 | ||
17 | #else | 16 | #else |
18 | 17 | ||
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h index f34723f7663c..910e3aa1e965 100644 --- a/include/linux/mfd/arizona/core.h +++ b/include/linux/mfd/arizona/core.h | |||
@@ -141,6 +141,7 @@ struct arizona { | |||
141 | 141 | ||
142 | uint16_t dac_comp_coeff; | 142 | uint16_t dac_comp_coeff; |
143 | uint8_t dac_comp_enabled; | 143 | uint8_t dac_comp_enabled; |
144 | struct mutex dac_comp_lock; | ||
144 | }; | 145 | }; |
145 | 146 | ||
146 | int arizona_clk32k_enable(struct arizona *arizona); | 147 | int arizona_clk32k_enable(struct arizona *arizona); |
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h index c0b075f6bc35..aacc10d7789c 100644 --- a/include/linux/mfd/arizona/registers.h +++ b/include/linux/mfd/arizona/registers.h | |||
@@ -125,6 +125,8 @@ | |||
125 | #define ARIZONA_MIC_BIAS_CTRL_1 0x218 | 125 | #define ARIZONA_MIC_BIAS_CTRL_1 0x218 |
126 | #define ARIZONA_MIC_BIAS_CTRL_2 0x219 | 126 | #define ARIZONA_MIC_BIAS_CTRL_2 0x219 |
127 | #define ARIZONA_MIC_BIAS_CTRL_3 0x21A | 127 | #define ARIZONA_MIC_BIAS_CTRL_3 0x21A |
128 | #define ARIZONA_HP_CTRL_1L 0x225 | ||
129 | #define ARIZONA_HP_CTRL_1R 0x226 | ||
128 | #define ARIZONA_ACCESSORY_DETECT_MODE_1 0x293 | 130 | #define ARIZONA_ACCESSORY_DETECT_MODE_1 0x293 |
129 | #define ARIZONA_HEADPHONE_DETECT_1 0x29B | 131 | #define ARIZONA_HEADPHONE_DETECT_1 0x29B |
130 | #define ARIZONA_HEADPHONE_DETECT_2 0x29C | 132 | #define ARIZONA_HEADPHONE_DETECT_2 0x29C |
@@ -279,8 +281,16 @@ | |||
279 | #define ARIZONA_AIF2_FRAME_CTRL_2 0x548 | 281 | #define ARIZONA_AIF2_FRAME_CTRL_2 0x548 |
280 | #define ARIZONA_AIF2_FRAME_CTRL_3 0x549 | 282 | #define ARIZONA_AIF2_FRAME_CTRL_3 0x549 |
281 | #define ARIZONA_AIF2_FRAME_CTRL_4 0x54A | 283 | #define ARIZONA_AIF2_FRAME_CTRL_4 0x54A |
284 | #define ARIZONA_AIF2_FRAME_CTRL_5 0x54B | ||
285 | #define ARIZONA_AIF2_FRAME_CTRL_6 0x54C | ||
286 | #define ARIZONA_AIF2_FRAME_CTRL_7 0x54D | ||
287 | #define ARIZONA_AIF2_FRAME_CTRL_8 0x54E | ||
282 | #define ARIZONA_AIF2_FRAME_CTRL_11 0x551 | 288 | #define ARIZONA_AIF2_FRAME_CTRL_11 0x551 |
283 | #define ARIZONA_AIF2_FRAME_CTRL_12 0x552 | 289 | #define ARIZONA_AIF2_FRAME_CTRL_12 0x552 |
290 | #define ARIZONA_AIF2_FRAME_CTRL_13 0x553 | ||
291 | #define ARIZONA_AIF2_FRAME_CTRL_14 0x554 | ||
292 | #define ARIZONA_AIF2_FRAME_CTRL_15 0x555 | ||
293 | #define ARIZONA_AIF2_FRAME_CTRL_16 0x556 | ||
284 | #define ARIZONA_AIF2_TX_ENABLES 0x559 | 294 | #define ARIZONA_AIF2_TX_ENABLES 0x559 |
285 | #define ARIZONA_AIF2_RX_ENABLES 0x55A | 295 | #define ARIZONA_AIF2_RX_ENABLES 0x55A |
286 | #define ARIZONA_AIF2_FORCE_WRITE 0x55B | 296 | #define ARIZONA_AIF2_FORCE_WRITE 0x55B |
@@ -2245,6 +2255,46 @@ | |||
2245 | #define ARIZONA_MICB3_ENA_WIDTH 1 /* MICB3_ENA */ | 2255 | #define ARIZONA_MICB3_ENA_WIDTH 1 /* MICB3_ENA */ |
2246 | 2256 | ||
2247 | /* | 2257 | /* |
2258 | * R549 (0x225) - HP Ctrl 1L | ||
2259 | */ | ||
2260 | #define ARIZONA_RMV_SHRT_HP1L 0x4000 /* RMV_SHRT_HP1L */ | ||
2261 | #define ARIZONA_RMV_SHRT_HP1L_MASK 0x4000 /* RMV_SHRT_HP1L */ | ||
2262 | #define ARIZONA_RMV_SHRT_HP1L_SHIFT 14 /* RMV_SHRT_HP1L */ | ||
2263 | #define ARIZONA_RMV_SHRT_HP1L_WIDTH 1 /* RMV_SHRT_HP1L */ | ||
2264 | #define ARIZONA_HP1L_FLWR 0x0004 /* HP1L_FLWR */ | ||
2265 | #define ARIZONA_HP1L_FLWR_MASK 0x0004 /* HP1L_FLWR */ | ||
2266 | #define ARIZONA_HP1L_FLWR_SHIFT 2 /* HP1L_FLWR */ | ||
2267 | #define ARIZONA_HP1L_FLWR_WIDTH 1 /* HP1L_FLWR */ | ||
2268 | #define ARIZONA_HP1L_SHRTI 0x0002 /* HP1L_SHRTI */ | ||
2269 | #define ARIZONA_HP1L_SHRTI_MASK 0x0002 /* HP1L_SHRTI */ | ||
2270 | #define ARIZONA_HP1L_SHRTI_SHIFT 1 /* HP1L_SHRTI */ | ||
2271 | #define ARIZONA_HP1L_SHRTI_WIDTH 1 /* HP1L_SHRTI */ | ||
2272 | #define ARIZONA_HP1L_SHRTO 0x0001 /* HP1L_SHRTO */ | ||
2273 | #define ARIZONA_HP1L_SHRTO_MASK 0x0001 /* HP1L_SHRTO */ | ||
2274 | #define ARIZONA_HP1L_SHRTO_SHIFT 0 /* HP1L_SHRTO */ | ||
2275 | #define ARIZONA_HP1L_SHRTO_WIDTH 1 /* HP1L_SHRTO */ | ||
2276 | |||
2277 | /* | ||
2278 | * R550 (0x226) - HP Ctrl 1R | ||
2279 | */ | ||
2280 | #define ARIZONA_RMV_SHRT_HP1R 0x4000 /* RMV_SHRT_HP1R */ | ||
2281 | #define ARIZONA_RMV_SHRT_HP1R_MASK 0x4000 /* RMV_SHRT_HP1R */ | ||
2282 | #define ARIZONA_RMV_SHRT_HP1R_SHIFT 14 /* RMV_SHRT_HP1R */ | ||
2283 | #define ARIZONA_RMV_SHRT_HP1R_WIDTH 1 /* RMV_SHRT_HP1R */ | ||
2284 | #define ARIZONA_HP1R_FLWR 0x0004 /* HP1R_FLWR */ | ||
2285 | #define ARIZONA_HP1R_FLWR_MASK 0x0004 /* HP1R_FLWR */ | ||
2286 | #define ARIZONA_HP1R_FLWR_SHIFT 2 /* HP1R_FLWR */ | ||
2287 | #define ARIZONA_HP1R_FLWR_WIDTH 1 /* HP1R_FLWR */ | ||
2288 | #define ARIZONA_HP1R_SHRTI 0x0002 /* HP1R_SHRTI */ | ||
2289 | #define ARIZONA_HP1R_SHRTI_MASK 0x0002 /* HP1R_SHRTI */ | ||
2290 | #define ARIZONA_HP1R_SHRTI_SHIFT 1 /* HP1R_SHRTI */ | ||
2291 | #define ARIZONA_HP1R_SHRTI_WIDTH 1 /* HP1R_SHRTI */ | ||
2292 | #define ARIZONA_HP1R_SHRTO 0x0001 /* HP1R_SHRTO */ | ||
2293 | #define ARIZONA_HP1R_SHRTO_MASK 0x0001 /* HP1R_SHRTO */ | ||
2294 | #define ARIZONA_HP1R_SHRTO_SHIFT 0 /* HP1R_SHRTO */ | ||
2295 | #define ARIZONA_HP1R_SHRTO_WIDTH 1 /* HP1R_SHRTO */ | ||
2296 | |||
2297 | /* | ||
2248 | * R659 (0x293) - Accessory Detect Mode 1 | 2298 | * R659 (0x293) - Accessory Detect Mode 1 |
2249 | */ | 2299 | */ |
2250 | #define ARIZONA_ACCDET_SRC 0x2000 /* ACCDET_SRC */ | 2300 | #define ARIZONA_ACCDET_SRC 0x2000 /* ACCDET_SRC */ |
diff --git a/include/linux/mfd/atmel-hlcdc.h b/include/linux/mfd/atmel-hlcdc.h new file mode 100644 index 000000000000..1279ab1644b5 --- /dev/null +++ b/include/linux/mfd/atmel-hlcdc.h | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Free Electrons | ||
3 | * Copyright (C) 2014 Atmel | ||
4 | * | ||
5 | * Author: Boris BREZILLON <boris.brezillon@free-electrons.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published by | ||
9 | * the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #ifndef __LINUX_MFD_HLCDC_H | ||
21 | #define __LINUX_MFD_HLCDC_H | ||
22 | |||
23 | #include <linux/clk.h> | ||
24 | #include <linux/regmap.h> | ||
25 | |||
26 | #define ATMEL_HLCDC_CFG(i) ((i) * 0x4) | ||
27 | #define ATMEL_HLCDC_SIG_CFG LCDCFG(5) | ||
28 | #define ATMEL_HLCDC_HSPOL BIT(0) | ||
29 | #define ATMEL_HLCDC_VSPOL BIT(1) | ||
30 | #define ATMEL_HLCDC_VSPDLYS BIT(2) | ||
31 | #define ATMEL_HLCDC_VSPDLYE BIT(3) | ||
32 | #define ATMEL_HLCDC_DISPPOL BIT(4) | ||
33 | #define ATMEL_HLCDC_DITHER BIT(6) | ||
34 | #define ATMEL_HLCDC_DISPDLY BIT(7) | ||
35 | #define ATMEL_HLCDC_MODE_MASK GENMASK(9, 8) | ||
36 | #define ATMEL_HLCDC_PP BIT(10) | ||
37 | #define ATMEL_HLCDC_VSPSU BIT(12) | ||
38 | #define ATMEL_HLCDC_VSPHO BIT(13) | ||
39 | #define ATMEL_HLCDC_GUARDTIME_MASK GENMASK(20, 16) | ||
40 | |||
41 | #define ATMEL_HLCDC_EN 0x20 | ||
42 | #define ATMEL_HLCDC_DIS 0x24 | ||
43 | #define ATMEL_HLCDC_SR 0x28 | ||
44 | #define ATMEL_HLCDC_IER 0x2c | ||
45 | #define ATMEL_HLCDC_IDR 0x30 | ||
46 | #define ATMEL_HLCDC_IMR 0x34 | ||
47 | #define ATMEL_HLCDC_ISR 0x38 | ||
48 | |||
49 | #define ATMEL_HLCDC_CLKPOL BIT(0) | ||
50 | #define ATMEL_HLCDC_CLKSEL BIT(2) | ||
51 | #define ATMEL_HLCDC_CLKPWMSEL BIT(3) | ||
52 | #define ATMEL_HLCDC_CGDIS(i) BIT(8 + (i)) | ||
53 | #define ATMEL_HLCDC_CLKDIV_SHFT 16 | ||
54 | #define ATMEL_HLCDC_CLKDIV_MASK GENMASK(23, 16) | ||
55 | #define ATMEL_HLCDC_CLKDIV(div) ((div - 2) << ATMEL_HLCDC_CLKDIV_SHFT) | ||
56 | |||
57 | #define ATMEL_HLCDC_PIXEL_CLK BIT(0) | ||
58 | #define ATMEL_HLCDC_SYNC BIT(1) | ||
59 | #define ATMEL_HLCDC_DISP BIT(2) | ||
60 | #define ATMEL_HLCDC_PWM BIT(3) | ||
61 | #define ATMEL_HLCDC_SIP BIT(4) | ||
62 | |||
63 | #define ATMEL_HLCDC_SOF BIT(0) | ||
64 | #define ATMEL_HLCDC_SYNCDIS BIT(1) | ||
65 | #define ATMEL_HLCDC_FIFOERR BIT(4) | ||
66 | #define ATMEL_HLCDC_LAYER_STATUS(x) BIT((x) + 8) | ||
67 | |||
68 | /** | ||
69 | * Structure shared by the MFD device and its subdevices. | ||
70 | * | ||
71 | * @regmap: register map used to access HLCDC IP registers | ||
72 | * @periph_clk: the hlcdc peripheral clock | ||
73 | * @sys_clk: the hlcdc system clock | ||
74 | * @slow_clk: the system slow clk | ||
75 | * @irq: the hlcdc irq | ||
76 | */ | ||
77 | struct atmel_hlcdc { | ||
78 | struct regmap *regmap; | ||
79 | struct clk *periph_clk; | ||
80 | struct clk *sys_clk; | ||
81 | struct clk *slow_clk; | ||
82 | int irq; | ||
83 | }; | ||
84 | |||
85 | #endif /* __LINUX_MFD_HLCDC_H */ | ||
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index d0e31a2287ac..81589d176ae8 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h | |||
@@ -14,6 +14,8 @@ | |||
14 | enum { | 14 | enum { |
15 | AXP202_ID = 0, | 15 | AXP202_ID = 0, |
16 | AXP209_ID, | 16 | AXP209_ID, |
17 | AXP288_ID, | ||
18 | NR_AXP20X_VARIANTS, | ||
17 | }; | 19 | }; |
18 | 20 | ||
19 | #define AXP20X_DATACACHE(m) (0x04 + (m)) | 21 | #define AXP20X_DATACACHE(m) (0x04 + (m)) |
@@ -49,11 +51,13 @@ enum { | |||
49 | #define AXP20X_IRQ3_EN 0x42 | 51 | #define AXP20X_IRQ3_EN 0x42 |
50 | #define AXP20X_IRQ4_EN 0x43 | 52 | #define AXP20X_IRQ4_EN 0x43 |
51 | #define AXP20X_IRQ5_EN 0x44 | 53 | #define AXP20X_IRQ5_EN 0x44 |
54 | #define AXP20X_IRQ6_EN 0x45 | ||
52 | #define AXP20X_IRQ1_STATE 0x48 | 55 | #define AXP20X_IRQ1_STATE 0x48 |
53 | #define AXP20X_IRQ2_STATE 0x49 | 56 | #define AXP20X_IRQ2_STATE 0x49 |
54 | #define AXP20X_IRQ3_STATE 0x4a | 57 | #define AXP20X_IRQ3_STATE 0x4a |
55 | #define AXP20X_IRQ4_STATE 0x4b | 58 | #define AXP20X_IRQ4_STATE 0x4b |
56 | #define AXP20X_IRQ5_STATE 0x4c | 59 | #define AXP20X_IRQ5_STATE 0x4c |
60 | #define AXP20X_IRQ6_STATE 0x4d | ||
57 | 61 | ||
58 | /* ADC */ | 62 | /* ADC */ |
59 | #define AXP20X_ACIN_V_ADC_H 0x56 | 63 | #define AXP20X_ACIN_V_ADC_H 0x56 |
@@ -116,6 +120,15 @@ enum { | |||
116 | #define AXP20X_CC_CTRL 0xb8 | 120 | #define AXP20X_CC_CTRL 0xb8 |
117 | #define AXP20X_FG_RES 0xb9 | 121 | #define AXP20X_FG_RES 0xb9 |
118 | 122 | ||
123 | /* AXP288 specific registers */ | ||
124 | #define AXP288_PMIC_ADC_H 0x56 | ||
125 | #define AXP288_PMIC_ADC_L 0x57 | ||
126 | #define AXP288_ADC_TS_PIN_CTRL 0x84 | ||
127 | |||
128 | #define AXP288_PMIC_ADC_EN 0x84 | ||
129 | #define AXP288_FG_TUNE5 0xed | ||
130 | |||
131 | |||
119 | /* Regulators IDs */ | 132 | /* Regulators IDs */ |
120 | enum { | 133 | enum { |
121 | AXP20X_LDO1 = 0, | 134 | AXP20X_LDO1 = 0, |
@@ -169,12 +182,58 @@ enum { | |||
169 | AXP20X_IRQ_GPIO0_INPUT, | 182 | AXP20X_IRQ_GPIO0_INPUT, |
170 | }; | 183 | }; |
171 | 184 | ||
185 | enum axp288_irqs { | ||
186 | AXP288_IRQ_VBUS_FALL = 2, | ||
187 | AXP288_IRQ_VBUS_RISE, | ||
188 | AXP288_IRQ_OV, | ||
189 | AXP288_IRQ_FALLING_ALT, | ||
190 | AXP288_IRQ_RISING_ALT, | ||
191 | AXP288_IRQ_OV_ALT, | ||
192 | AXP288_IRQ_DONE = 10, | ||
193 | AXP288_IRQ_CHARGING, | ||
194 | AXP288_IRQ_SAFE_QUIT, | ||
195 | AXP288_IRQ_SAFE_ENTER, | ||
196 | AXP288_IRQ_ABSENT, | ||
197 | AXP288_IRQ_APPEND, | ||
198 | AXP288_IRQ_QWBTU, | ||
199 | AXP288_IRQ_WBTU, | ||
200 | AXP288_IRQ_QWBTO, | ||
201 | AXP288_IRQ_WBTO, | ||
202 | AXP288_IRQ_QCBTU, | ||
203 | AXP288_IRQ_CBTU, | ||
204 | AXP288_IRQ_QCBTO, | ||
205 | AXP288_IRQ_CBTO, | ||
206 | AXP288_IRQ_WL2, | ||
207 | AXP288_IRQ_WL1, | ||
208 | AXP288_IRQ_GPADC, | ||
209 | AXP288_IRQ_OT = 31, | ||
210 | AXP288_IRQ_GPIO0, | ||
211 | AXP288_IRQ_GPIO1, | ||
212 | AXP288_IRQ_POKO, | ||
213 | AXP288_IRQ_POKL, | ||
214 | AXP288_IRQ_POKS, | ||
215 | AXP288_IRQ_POKN, | ||
216 | AXP288_IRQ_POKP, | ||
217 | AXP288_IRQ_TIMER, | ||
218 | AXP288_IRQ_MV_CHNG, | ||
219 | AXP288_IRQ_BC_USB_CHNG, | ||
220 | }; | ||
221 | |||
222 | #define AXP288_TS_ADC_H 0x58 | ||
223 | #define AXP288_TS_ADC_L 0x59 | ||
224 | #define AXP288_GP_ADC_H 0x5a | ||
225 | #define AXP288_GP_ADC_L 0x5b | ||
226 | |||
172 | struct axp20x_dev { | 227 | struct axp20x_dev { |
173 | struct device *dev; | 228 | struct device *dev; |
174 | struct i2c_client *i2c_client; | 229 | struct i2c_client *i2c_client; |
175 | struct regmap *regmap; | 230 | struct regmap *regmap; |
176 | struct regmap_irq_chip_data *regmap_irqc; | 231 | struct regmap_irq_chip_data *regmap_irqc; |
177 | long variant; | 232 | long variant; |
233 | int nr_cells; | ||
234 | struct mfd_cell *cells; | ||
235 | const struct regmap_config *regmap_cfg; | ||
236 | const struct regmap_irq_chip *regmap_irq_chip; | ||
178 | }; | 237 | }; |
179 | 238 | ||
180 | #endif /* __LINUX_MFD_AXP20X_H */ | 239 | #endif /* __LINUX_MFD_AXP20X_H */ |
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index 73e1709d4c09..a76bc100bf97 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h | |||
@@ -111,6 +111,13 @@ extern int mfd_add_devices(struct device *parent, int id, | |||
111 | struct resource *mem_base, | 111 | struct resource *mem_base, |
112 | int irq_base, struct irq_domain *irq_domain); | 112 | int irq_base, struct irq_domain *irq_domain); |
113 | 113 | ||
114 | static inline int mfd_add_hotplug_devices(struct device *parent, | ||
115 | const struct mfd_cell *cells, int n_devs) | ||
116 | { | ||
117 | return mfd_add_devices(parent, PLATFORM_DEVID_AUTO, cells, n_devs, | ||
118 | NULL, 0, NULL); | ||
119 | } | ||
120 | |||
114 | extern void mfd_remove_devices(struct device *parent); | 121 | extern void mfd_remove_devices(struct device *parent); |
115 | 122 | ||
116 | #endif | 123 | #endif |
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h index cb01496bfa49..8e1cdbef3dad 100644 --- a/include/linux/mfd/davinci_voicecodec.h +++ b/include/linux/mfd/davinci_voicecodec.h | |||
@@ -99,12 +99,6 @@ struct davinci_vcif { | |||
99 | dma_addr_t dma_rx_addr; | 99 | dma_addr_t dma_rx_addr; |
100 | }; | 100 | }; |
101 | 101 | ||
102 | struct cq93vc { | ||
103 | struct platform_device *pdev; | ||
104 | struct snd_soc_codec *codec; | ||
105 | u32 sysclk; | ||
106 | }; | ||
107 | |||
108 | struct davinci_vc; | 102 | struct davinci_vc; |
109 | 103 | ||
110 | struct davinci_vc { | 104 | struct davinci_vc { |
@@ -122,7 +116,6 @@ struct davinci_vc { | |||
122 | 116 | ||
123 | /* Client devices */ | 117 | /* Client devices */ |
124 | struct davinci_vcif davinci_vcif; | 118 | struct davinci_vcif davinci_vcif; |
125 | struct cq93vc cq93vc; | ||
126 | }; | 119 | }; |
127 | 120 | ||
128 | #endif | 121 | #endif |
diff --git a/include/linux/mfd/dln2.h b/include/linux/mfd/dln2.h new file mode 100644 index 000000000000..004b24576da8 --- /dev/null +++ b/include/linux/mfd/dln2.h | |||
@@ -0,0 +1,103 @@ | |||
1 | #ifndef __LINUX_USB_DLN2_H | ||
2 | #define __LINUX_USB_DLN2_H | ||
3 | |||
4 | #define DLN2_CMD(cmd, id) ((cmd) | ((id) << 8)) | ||
5 | |||
6 | struct dln2_platform_data { | ||
7 | u16 handle; /* sub-driver handle (internally used only) */ | ||
8 | u8 port; /* I2C/SPI port */ | ||
9 | }; | ||
10 | |||
11 | /** | ||
12 | * dln2_event_cb_t - event callback function signature | ||
13 | * | ||
14 | * @pdev - the sub-device that registered this callback | ||
15 | * @echo - the echo header field received in the message | ||
16 | * @data - the data payload | ||
17 | * @len - the data payload length | ||
18 | * | ||
19 | * The callback function is called in interrupt context and the data payload is | ||
20 | * only valid during the call. If the user needs later access of the data, it | ||
21 | * must copy it. | ||
22 | */ | ||
23 | |||
24 | typedef void (*dln2_event_cb_t)(struct platform_device *pdev, u16 echo, | ||
25 | const void *data, int len); | ||
26 | |||
27 | /** | ||
28 | * dl2n_register_event_cb - register a callback function for an event | ||
29 | * | ||
30 | * @pdev - the sub-device that registers the callback | ||
31 | * @event - the event for which to register a callback | ||
32 | * @event_cb - the callback function | ||
33 | * | ||
34 | * @return 0 in case of success, negative value in case of error | ||
35 | */ | ||
36 | int dln2_register_event_cb(struct platform_device *pdev, u16 event, | ||
37 | dln2_event_cb_t event_cb); | ||
38 | |||
39 | /** | ||
40 | * dln2_unregister_event_cb - unregister the callback function for an event | ||
41 | * | ||
42 | * @pdev - the sub-device that registered the callback | ||
43 | * @event - the event for which to register a callback | ||
44 | */ | ||
45 | void dln2_unregister_event_cb(struct platform_device *pdev, u16 event); | ||
46 | |||
47 | /** | ||
48 | * dln2_transfer - issue a DLN2 command and wait for a response and the | ||
49 | * associated data | ||
50 | * | ||
51 | * @pdev - the sub-device which is issuing this transfer | ||
52 | * @cmd - the command to be sent to the device | ||
53 | * @obuf - the buffer to be sent to the device; it can be NULL if the user | ||
54 | * doesn't need to transmit data with this command | ||
55 | * @obuf_len - the size of the buffer to be sent to the device | ||
56 | * @ibuf - any data associated with the response will be copied here; it can be | ||
57 | * NULL if the user doesn't need the response data | ||
58 | * @ibuf_len - must be initialized to the input buffer size; it will be modified | ||
59 | * to indicate the actual data transferred; | ||
60 | * | ||
61 | * @return 0 for success, negative value for errors | ||
62 | */ | ||
63 | int dln2_transfer(struct platform_device *pdev, u16 cmd, | ||
64 | const void *obuf, unsigned obuf_len, | ||
65 | void *ibuf, unsigned *ibuf_len); | ||
66 | |||
67 | /** | ||
68 | * dln2_transfer_rx - variant of @dln2_transfer() where TX buffer is not needed | ||
69 | * | ||
70 | * @pdev - the sub-device which is issuing this transfer | ||
71 | * @cmd - the command to be sent to the device | ||
72 | * @ibuf - any data associated with the response will be copied here; it can be | ||
73 | * NULL if the user doesn't need the response data | ||
74 | * @ibuf_len - must be initialized to the input buffer size; it will be modified | ||
75 | * to indicate the actual data transferred; | ||
76 | * | ||
77 | * @return 0 for success, negative value for errors | ||
78 | */ | ||
79 | |||
80 | static inline int dln2_transfer_rx(struct platform_device *pdev, u16 cmd, | ||
81 | void *ibuf, unsigned *ibuf_len) | ||
82 | { | ||
83 | return dln2_transfer(pdev, cmd, NULL, 0, ibuf, ibuf_len); | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * dln2_transfer_tx - variant of @dln2_transfer() where RX buffer is not needed | ||
88 | * | ||
89 | * @pdev - the sub-device which is issuing this transfer | ||
90 | * @cmd - the command to be sent to the device | ||
91 | * @obuf - the buffer to be sent to the device; it can be NULL if the | ||
92 | * user doesn't need to transmit data with this command | ||
93 | * @obuf_len - the size of the buffer to be sent to the device | ||
94 | * | ||
95 | * @return 0 for success, negative value for errors | ||
96 | */ | ||
97 | static inline int dln2_transfer_tx(struct platform_device *pdev, u16 cmd, | ||
98 | const void *obuf, unsigned obuf_len) | ||
99 | { | ||
100 | return dln2_transfer(pdev, cmd, obuf, obuf_len, NULL, NULL); | ||
101 | } | ||
102 | |||
103 | #endif | ||
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h index 7e6dc4b2b795..553f7d09258a 100644 --- a/include/linux/mfd/max77686.h +++ b/include/linux/mfd/max77686.h | |||
@@ -131,13 +131,6 @@ enum max77686_opmode { | |||
131 | MAX77686_OPMODE_STANDBY, | 131 | MAX77686_OPMODE_STANDBY, |
132 | }; | 132 | }; |
133 | 133 | ||
134 | enum max77802_opmode { | ||
135 | MAX77802_OPMODE_OFF, | ||
136 | MAX77802_OPMODE_STANDBY, | ||
137 | MAX77802_OPMODE_LP, | ||
138 | MAX77802_OPMODE_NORMAL, | ||
139 | }; | ||
140 | |||
141 | struct max77686_opmode_data { | 134 | struct max77686_opmode_data { |
142 | int id; | 135 | int id; |
143 | int mode; | 136 | int mode; |
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index 582e67f34054..08dae01258b9 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h | |||
@@ -26,7 +26,6 @@ | |||
26 | 26 | ||
27 | #include <linux/i2c.h> | 27 | #include <linux/i2c.h> |
28 | 28 | ||
29 | #define MAX77693_NUM_IRQ_MUIC_REGS 3 | ||
30 | #define MAX77693_REG_INVALID (0xff) | 29 | #define MAX77693_REG_INVALID (0xff) |
31 | 30 | ||
32 | /* Slave addr = 0xCC: PMIC, Charger, Flash LED */ | 31 | /* Slave addr = 0xCC: PMIC, Charger, Flash LED */ |
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h index 74346d5e7899..0c12628e91c6 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/mfd/rtsx_pci.h | |||
@@ -558,6 +558,7 @@ | |||
558 | #define SD_SAMPLE_POINT_CTL 0xFDA7 | 558 | #define SD_SAMPLE_POINT_CTL 0xFDA7 |
559 | #define SD_PUSH_POINT_CTL 0xFDA8 | 559 | #define SD_PUSH_POINT_CTL 0xFDA8 |
560 | #define SD_CMD0 0xFDA9 | 560 | #define SD_CMD0 0xFDA9 |
561 | #define SD_CMD_START 0x40 | ||
561 | #define SD_CMD1 0xFDAA | 562 | #define SD_CMD1 0xFDAA |
562 | #define SD_CMD2 0xFDAB | 563 | #define SD_CMD2 0xFDAB |
563 | #define SD_CMD3 0xFDAC | 564 | #define SD_CMD3 0xFDAC |
@@ -707,6 +708,14 @@ | |||
707 | #define PM_CTRL1 0xFF44 | 708 | #define PM_CTRL1 0xFF44 |
708 | #define PM_CTRL2 0xFF45 | 709 | #define PM_CTRL2 0xFF45 |
709 | #define PM_CTRL3 0xFF46 | 710 | #define PM_CTRL3 0xFF46 |
711 | #define SDIO_SEND_PME_EN 0x80 | ||
712 | #define FORCE_RC_MODE_ON 0x40 | ||
713 | #define FORCE_RX50_LINK_ON 0x20 | ||
714 | #define D3_DELINK_MODE_EN 0x10 | ||
715 | #define USE_PESRTB_CTL_DELINK 0x08 | ||
716 | #define DELAY_PIN_WAKE 0x04 | ||
717 | #define RESET_PIN_WAKE 0x02 | ||
718 | #define PM_WAKE_EN 0x01 | ||
710 | #define PM_CTRL4 0xFF47 | 719 | #define PM_CTRL4 0xFF47 |
711 | 720 | ||
712 | /* Memory mapping */ | 721 | /* Memory mapping */ |
@@ -752,6 +761,14 @@ | |||
752 | #define PHY_DUM_REG 0x1F | 761 | #define PHY_DUM_REG 0x1F |
753 | 762 | ||
754 | #define LCTLR 0x80 | 763 | #define LCTLR 0x80 |
764 | #define LCTLR_EXT_SYNC 0x80 | ||
765 | #define LCTLR_COMMON_CLOCK_CFG 0x40 | ||
766 | #define LCTLR_RETRAIN_LINK 0x20 | ||
767 | #define LCTLR_LINK_DISABLE 0x10 | ||
768 | #define LCTLR_RCB 0x08 | ||
769 | #define LCTLR_RESERVED 0x04 | ||
770 | #define LCTLR_ASPM_CTL_MASK 0x03 | ||
771 | |||
755 | #define PCR_SETTING_REG1 0x724 | 772 | #define PCR_SETTING_REG1 0x724 |
756 | #define PCR_SETTING_REG2 0x814 | 773 | #define PCR_SETTING_REG2 0x814 |
757 | #define PCR_SETTING_REG3 0x747 | 774 | #define PCR_SETTING_REG3 0x747 |
@@ -967,4 +984,24 @@ static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr) | |||
967 | return (u8 *)(pcr->host_cmds_ptr); | 984 | return (u8 *)(pcr->host_cmds_ptr); |
968 | } | 985 | } |
969 | 986 | ||
987 | static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr, | ||
988 | u8 mask, u8 append) | ||
989 | { | ||
990 | int err; | ||
991 | u8 val; | ||
992 | |||
993 | err = pci_read_config_byte(pcr->pci, addr, &val); | ||
994 | if (err < 0) | ||
995 | return err; | ||
996 | return pci_write_config_byte(pcr->pci, addr, (val & mask) | append); | ||
997 | } | ||
998 | |||
999 | static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val) | ||
1000 | { | ||
1001 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg, 0xFF, val >> 24); | ||
1002 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 1, 0xFF, val >> 16); | ||
1003 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 2, 0xFF, val >> 8); | ||
1004 | rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 3, 0xFF, val); | ||
1005 | } | ||
1006 | |||
970 | #endif | 1007 | #endif |
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 1825edacbda7..3fdb7cfbffb3 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h | |||
@@ -28,6 +28,7 @@ | |||
28 | #define MIN_800_MV 800000 | 28 | #define MIN_800_MV 800000 |
29 | #define MIN_750_MV 750000 | 29 | #define MIN_750_MV 750000 |
30 | #define MIN_600_MV 600000 | 30 | #define MIN_600_MV 600000 |
31 | #define MIN_500_MV 500000 | ||
31 | 32 | ||
32 | /* Macros to represent steps for LDO/BUCK */ | 33 | /* Macros to represent steps for LDO/BUCK */ |
33 | #define STEP_50_MV 50000 | 34 | #define STEP_50_MV 50000 |
@@ -41,6 +42,7 @@ enum sec_device_type { | |||
41 | S5M8767X, | 42 | S5M8767X, |
42 | S2MPA01, | 43 | S2MPA01, |
43 | S2MPS11X, | 44 | S2MPS11X, |
45 | S2MPS13X, | ||
44 | S2MPS14X, | 46 | S2MPS14X, |
45 | S2MPU02, | 47 | S2MPU02, |
46 | }; | 48 | }; |
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h new file mode 100644 index 000000000000..ce5dda8958fe --- /dev/null +++ b/include/linux/mfd/samsung/s2mps13.h | |||
@@ -0,0 +1,186 @@ | |||
1 | /* | ||
2 | * s2mps13.h | ||
3 | * | ||
4 | * Copyright (c) 2014 Samsung Electronics Co., Ltd | ||
5 | * http://www.samsung.com | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the | ||
9 | * Free Software Foundation; either version 2 of the License, or (at your | ||
10 | * option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #ifndef __LINUX_MFD_S2MPS13_H | ||
20 | #define __LINUX_MFD_S2MPS13_H | ||
21 | |||
22 | /* S2MPS13 registers */ | ||
23 | enum s2mps13_reg { | ||
24 | S2MPS13_REG_ID, | ||
25 | S2MPS13_REG_INT1, | ||
26 | S2MPS13_REG_INT2, | ||
27 | S2MPS13_REG_INT3, | ||
28 | S2MPS13_REG_INT1M, | ||
29 | S2MPS13_REG_INT2M, | ||
30 | S2MPS13_REG_INT3M, | ||
31 | S2MPS13_REG_ST1, | ||
32 | S2MPS13_REG_ST2, | ||
33 | S2MPS13_REG_PWRONSRC, | ||
34 | S2MPS13_REG_OFFSRC, | ||
35 | S2MPS13_REG_BU_CHG, | ||
36 | S2MPS13_REG_RTCCTRL, | ||
37 | S2MPS13_REG_CTRL1, | ||
38 | S2MPS13_REG_CTRL2, | ||
39 | S2MPS13_REG_RSVD1, | ||
40 | S2MPS13_REG_RSVD2, | ||
41 | S2MPS13_REG_RSVD3, | ||
42 | S2MPS13_REG_RSVD4, | ||
43 | S2MPS13_REG_RSVD5, | ||
44 | S2MPS13_REG_RSVD6, | ||
45 | S2MPS13_REG_CTRL3, | ||
46 | S2MPS13_REG_RSVD7, | ||
47 | S2MPS13_REG_RSVD8, | ||
48 | S2MPS13_REG_WRSTBI, | ||
49 | S2MPS13_REG_B1CTRL, | ||
50 | S2MPS13_REG_B1OUT, | ||
51 | S2MPS13_REG_B2CTRL, | ||
52 | S2MPS13_REG_B2OUT, | ||
53 | S2MPS13_REG_B3CTRL, | ||
54 | S2MPS13_REG_B3OUT, | ||
55 | S2MPS13_REG_B4CTRL, | ||
56 | S2MPS13_REG_B4OUT, | ||
57 | S2MPS13_REG_B5CTRL, | ||
58 | S2MPS13_REG_B5OUT, | ||
59 | S2MPS13_REG_B6CTRL, | ||
60 | S2MPS13_REG_B6OUT, | ||
61 | S2MPS13_REG_B7CTRL, | ||
62 | S2MPS13_REG_B7OUT, | ||
63 | S2MPS13_REG_B8CTRL, | ||
64 | S2MPS13_REG_B8OUT, | ||
65 | S2MPS13_REG_B9CTRL, | ||
66 | S2MPS13_REG_B9OUT, | ||
67 | S2MPS13_REG_B10CTRL, | ||
68 | S2MPS13_REG_B10OUT, | ||
69 | S2MPS13_REG_BB1CTRL, | ||
70 | S2MPS13_REG_BB1OUT, | ||
71 | S2MPS13_REG_BUCK_RAMP1, | ||
72 | S2MPS13_REG_BUCK_RAMP2, | ||
73 | S2MPS13_REG_LDO_DVS1, | ||
74 | S2MPS13_REG_LDO_DVS2, | ||
75 | S2MPS13_REG_LDO_DVS3, | ||
76 | S2MPS13_REG_B6OUT2, | ||
77 | S2MPS13_REG_L1CTRL, | ||
78 | S2MPS13_REG_L2CTRL, | ||
79 | S2MPS13_REG_L3CTRL, | ||
80 | S2MPS13_REG_L4CTRL, | ||
81 | S2MPS13_REG_L5CTRL, | ||
82 | S2MPS13_REG_L6CTRL, | ||
83 | S2MPS13_REG_L7CTRL, | ||
84 | S2MPS13_REG_L8CTRL, | ||
85 | S2MPS13_REG_L9CTRL, | ||
86 | S2MPS13_REG_L10CTRL, | ||
87 | S2MPS13_REG_L11CTRL, | ||
88 | S2MPS13_REG_L12CTRL, | ||
89 | S2MPS13_REG_L13CTRL, | ||
90 | S2MPS13_REG_L14CTRL, | ||
91 | S2MPS13_REG_L15CTRL, | ||
92 | S2MPS13_REG_L16CTRL, | ||
93 | S2MPS13_REG_L17CTRL, | ||
94 | S2MPS13_REG_L18CTRL, | ||
95 | S2MPS13_REG_L19CTRL, | ||
96 | S2MPS13_REG_L20CTRL, | ||
97 | S2MPS13_REG_L21CTRL, | ||
98 | S2MPS13_REG_L22CTRL, | ||
99 | S2MPS13_REG_L23CTRL, | ||
100 | S2MPS13_REG_L24CTRL, | ||
101 | S2MPS13_REG_L25CTRL, | ||
102 | S2MPS13_REG_L26CTRL, | ||
103 | S2MPS13_REG_L27CTRL, | ||
104 | S2MPS13_REG_L28CTRL, | ||
105 | S2MPS13_REG_L30CTRL, | ||
106 | S2MPS13_REG_L31CTRL, | ||
107 | S2MPS13_REG_L32CTRL, | ||
108 | S2MPS13_REG_L33CTRL, | ||
109 | S2MPS13_REG_L34CTRL, | ||
110 | S2MPS13_REG_L35CTRL, | ||
111 | S2MPS13_REG_L36CTRL, | ||
112 | S2MPS13_REG_L37CTRL, | ||
113 | S2MPS13_REG_L38CTRL, | ||
114 | S2MPS13_REG_L39CTRL, | ||
115 | S2MPS13_REG_L40CTRL, | ||
116 | S2MPS13_REG_LDODSCH1, | ||
117 | S2MPS13_REG_LDODSCH2, | ||
118 | S2MPS13_REG_LDODSCH3, | ||
119 | S2MPS13_REG_LDODSCH4, | ||
120 | S2MPS13_REG_LDODSCH5, | ||
121 | }; | ||
122 | |||
123 | /* regulator ids */ | ||
124 | enum s2mps13_regulators { | ||
125 | S2MPS13_LDO1, | ||
126 | S2MPS13_LDO2, | ||
127 | S2MPS13_LDO3, | ||
128 | S2MPS13_LDO4, | ||
129 | S2MPS13_LDO5, | ||
130 | S2MPS13_LDO6, | ||
131 | S2MPS13_LDO7, | ||
132 | S2MPS13_LDO8, | ||
133 | S2MPS13_LDO9, | ||
134 | S2MPS13_LDO10, | ||
135 | S2MPS13_LDO11, | ||
136 | S2MPS13_LDO12, | ||
137 | S2MPS13_LDO13, | ||
138 | S2MPS13_LDO14, | ||
139 | S2MPS13_LDO15, | ||
140 | S2MPS13_LDO16, | ||
141 | S2MPS13_LDO17, | ||
142 | S2MPS13_LDO18, | ||
143 | S2MPS13_LDO19, | ||
144 | S2MPS13_LDO20, | ||
145 | S2MPS13_LDO21, | ||
146 | S2MPS13_LDO22, | ||
147 | S2MPS13_LDO23, | ||
148 | S2MPS13_LDO24, | ||
149 | S2MPS13_LDO25, | ||
150 | S2MPS13_LDO26, | ||
151 | S2MPS13_LDO27, | ||
152 | S2MPS13_LDO28, | ||
153 | S2MPS13_LDO29, | ||
154 | S2MPS13_LDO30, | ||
155 | S2MPS13_LDO31, | ||
156 | S2MPS13_LDO32, | ||
157 | S2MPS13_LDO33, | ||
158 | S2MPS13_LDO34, | ||
159 | S2MPS13_LDO35, | ||
160 | S2MPS13_LDO36, | ||
161 | S2MPS13_LDO37, | ||
162 | S2MPS13_LDO38, | ||
163 | S2MPS13_LDO39, | ||
164 | S2MPS13_LDO40, | ||
165 | S2MPS13_BUCK1, | ||
166 | S2MPS13_BUCK2, | ||
167 | S2MPS13_BUCK3, | ||
168 | S2MPS13_BUCK4, | ||
169 | S2MPS13_BUCK5, | ||
170 | S2MPS13_BUCK6, | ||
171 | S2MPS13_BUCK7, | ||
172 | S2MPS13_BUCK8, | ||
173 | S2MPS13_BUCK9, | ||
174 | S2MPS13_BUCK10, | ||
175 | |||
176 | S2MPS13_REGULATOR_MAX, | ||
177 | }; | ||
178 | |||
179 | /* | ||
180 | * Default ramp delay in uv/us. Datasheet says that ramp delay can be | ||
181 | * controlled however it does not specify which register is used for that. | ||
182 | * Let's assume that default value will be set. | ||
183 | */ | ||
184 | #define S2MPS13_BUCK_RAMP_DELAY 12500 | ||
185 | |||
186 | #endif /* __LINUX_MFD_S2MPS13_H */ | ||
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h index ff44374a1a4e..c877cad61a13 100644 --- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h +++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h | |||
@@ -395,4 +395,43 @@ | |||
395 | #define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17) | 395 | #define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17) |
396 | #define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14) | 396 | #define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14) |
397 | 397 | ||
398 | /* For imx6sx iomux gpr register field define */ | ||
399 | #define IMX6SX_GPR1_VDEC_SW_RST_MASK (0x1 << 20) | ||
400 | #define IMX6SX_GPR1_VDEC_SW_RST_RESET (0x1 << 20) | ||
401 | #define IMX6SX_GPR1_VDEC_SW_RST_RELEASE (0x0 << 20) | ||
402 | #define IMX6SX_GPR1_VADC_SW_RST_MASK (0x1 << 19) | ||
403 | #define IMX6SX_GPR1_VADC_SW_RST_RESET (0x1 << 19) | ||
404 | #define IMX6SX_GPR1_VADC_SW_RST_RELEASE (0x0 << 19) | ||
405 | #define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_MASK (0x3 << 13) | ||
406 | #define IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK (0x3 << 17) | ||
407 | #define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_EXT (0x3 << 13) | ||
408 | |||
409 | #define IMX6SX_GPR4_FEC_ENET1_STOP_REQ (0x1 << 3) | ||
410 | #define IMX6SX_GPR4_FEC_ENET2_STOP_REQ (0x1 << 4) | ||
411 | |||
412 | #define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_MASK (0x1 << 3) | ||
413 | #define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF1 (0x0 << 3) | ||
414 | #define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF2 (0x1 << 3) | ||
415 | |||
416 | #define IMX6SX_GPR5_CSI2_MUX_CTRL_MASK (0x3 << 27) | ||
417 | #define IMX6SX_GPR5_CSI2_MUX_CTRL_EXT_PIN (0x0 << 27) | ||
418 | #define IMX6SX_GPR5_CSI2_MUX_CTRL_CVD (0x1 << 27) | ||
419 | #define IMX6SX_GPR5_CSI2_MUX_CTRL_VDAC_TO_CSI (0x2 << 27) | ||
420 | #define IMX6SX_GPR5_CSI2_MUX_CTRL_GND (0x3 << 27) | ||
421 | #define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_MASK (0x1 << 26) | ||
422 | #define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_ENABLE (0x1 << 26) | ||
423 | #define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_DISABLE (0x0 << 26) | ||
424 | #define IMX6SX_GPR5_CSI1_MUX_CTRL_MASK (0x3 << 4) | ||
425 | #define IMX6SX_GPR5_CSI1_MUX_CTRL_EXT_PIN (0x0 << 4) | ||
426 | #define IMX6SX_GPR5_CSI1_MUX_CTRL_CVD (0x1 << 4) | ||
427 | #define IMX6SX_GPR5_CSI1_MUX_CTRL_VDAC_TO_CSI (0x2 << 4) | ||
428 | #define IMX6SX_GPR5_CSI1_MUX_CTRL_GND (0x3 << 4) | ||
429 | |||
430 | #define IMX6SX_GPR5_DISP_MUX_DCIC2_LCDIF2 (0x0 << 2) | ||
431 | #define IMX6SX_GPR5_DISP_MUX_DCIC2_LVDS (0x1 << 2) | ||
432 | #define IMX6SX_GPR5_DISP_MUX_DCIC2_MASK (0x1 << 2) | ||
433 | #define IMX6SX_GPR5_DISP_MUX_DCIC1_LCDIF1 (0x0 << 1) | ||
434 | #define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1) | ||
435 | #define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1) | ||
436 | |||
398 | #endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ | 437 | #endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ |
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h index e6088c2e2092..e1c12d84c26a 100644 --- a/include/linux/mfd/tc3589x.h +++ b/include/linux/mfd/tc3589x.h | |||
@@ -164,13 +164,10 @@ struct tc3589x_keypad_platform_data { | |||
164 | 164 | ||
165 | /** | 165 | /** |
166 | * struct tc3589x_gpio_platform_data - TC3589x GPIO platform data | 166 | * struct tc3589x_gpio_platform_data - TC3589x GPIO platform data |
167 | * @gpio_base: first gpio number assigned to TC3589x. A maximum of | ||
168 | * %TC3589x_NR_GPIOS GPIOs will be allocated. | ||
169 | * @setup: callback for board-specific initialization | 167 | * @setup: callback for board-specific initialization |
170 | * @remove: callback for board-specific teardown | 168 | * @remove: callback for board-specific teardown |
171 | */ | 169 | */ |
172 | struct tc3589x_gpio_platform_data { | 170 | struct tc3589x_gpio_platform_data { |
173 | int gpio_base; | ||
174 | void (*setup)(struct tc3589x *tc3589x, unsigned gpio_base); | 171 | void (*setup)(struct tc3589x *tc3589x, unsigned gpio_base); |
175 | void (*remove)(struct tc3589x *tc3589x, unsigned gpio_base); | 172 | void (*remove)(struct tc3589x *tc3589x, unsigned gpio_base); |
176 | }; | 173 | }; |
@@ -178,18 +175,13 @@ struct tc3589x_gpio_platform_data { | |||
178 | /** | 175 | /** |
179 | * struct tc3589x_platform_data - TC3589x platform data | 176 | * struct tc3589x_platform_data - TC3589x platform data |
180 | * @block: bitmask of blocks to enable (use TC3589x_BLOCK_*) | 177 | * @block: bitmask of blocks to enable (use TC3589x_BLOCK_*) |
181 | * @irq_base: base IRQ number. %TC3589x_NR_IRQS irqs will be used. | ||
182 | * @gpio: GPIO-specific platform data | 178 | * @gpio: GPIO-specific platform data |
183 | * @keypad: keypad-specific platform data | 179 | * @keypad: keypad-specific platform data |
184 | */ | 180 | */ |
185 | struct tc3589x_platform_data { | 181 | struct tc3589x_platform_data { |
186 | unsigned int block; | 182 | unsigned int block; |
187 | int irq_base; | ||
188 | struct tc3589x_gpio_platform_data *gpio; | 183 | struct tc3589x_gpio_platform_data *gpio; |
189 | const struct tc3589x_keypad_platform_data *keypad; | 184 | const struct tc3589x_keypad_platform_data *keypad; |
190 | }; | 185 | }; |
191 | 186 | ||
192 | #define TC3589x_NR_GPIOS 24 | ||
193 | #define TC3589x_NR_IRQS TC3589x_INT_GPIO(TC3589x_NR_GPIOS) | ||
194 | |||
195 | #endif | 187 | #endif |
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 53d33dee70e1..2e5b194b9b19 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h | |||
@@ -37,7 +37,6 @@ | |||
37 | 37 | ||
38 | /* struct phy_device dev_flags definitions */ | 38 | /* struct phy_device dev_flags definitions */ |
39 | #define MICREL_PHY_50MHZ_CLK 0x00000001 | 39 | #define MICREL_PHY_50MHZ_CLK 0x00000001 |
40 | #define MICREL_PHY_25MHZ_CLK 0x00000002 | ||
41 | 40 | ||
42 | #define MICREL_KSZ9021_EXTREG_CTRL 0xB | 41 | #define MICREL_KSZ9021_EXTREG_CTRL 0xB |
43 | #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC | 42 | #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC |
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 379c02648ab3..64d25941b329 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h | |||
@@ -67,6 +67,8 @@ enum { | |||
67 | MLX4_CMD_MAP_ICM_AUX = 0xffc, | 67 | MLX4_CMD_MAP_ICM_AUX = 0xffc, |
68 | MLX4_CMD_UNMAP_ICM_AUX = 0xffb, | 68 | MLX4_CMD_UNMAP_ICM_AUX = 0xffb, |
69 | MLX4_CMD_SET_ICM_SIZE = 0xffd, | 69 | MLX4_CMD_SET_ICM_SIZE = 0xffd, |
70 | MLX4_CMD_ACCESS_REG = 0x3b, | ||
71 | |||
70 | /*master notify fw on finish for slave's flr*/ | 72 | /*master notify fw on finish for slave's flr*/ |
71 | MLX4_CMD_INFORM_FLR_DONE = 0x5b, | 73 | MLX4_CMD_INFORM_FLR_DONE = 0x5b, |
72 | MLX4_CMD_GET_OP_REQ = 0x59, | 74 | MLX4_CMD_GET_OP_REQ = 0x59, |
@@ -197,6 +199,33 @@ enum { | |||
197 | MLX4_CMD_NATIVE | 199 | MLX4_CMD_NATIVE |
198 | }; | 200 | }; |
199 | 201 | ||
202 | /* | ||
203 | * MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP - | ||
204 | * Receive checksum value is reported in CQE also for non TCP/UDP packets. | ||
205 | * | ||
206 | * MLX4_RX_CSUM_MODE_L4 - | ||
207 | * L4_CSUM bit in CQE, which indicates whether or not L4 checksum | ||
208 | * was validated correctly, is supported. | ||
209 | * | ||
210 | * MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP - | ||
211 | * IP_OK CQE's field is supported also for non TCP/UDP IP packets. | ||
212 | * | ||
213 | * MLX4_RX_CSUM_MODE_MULTI_VLAN - | ||
214 | * Receive Checksum offload is supported for packets with more than 2 vlan headers. | ||
215 | */ | ||
216 | enum mlx4_rx_csum_mode { | ||
217 | MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP = 1UL << 0, | ||
218 | MLX4_RX_CSUM_MODE_L4 = 1UL << 1, | ||
219 | MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP = 1UL << 2, | ||
220 | MLX4_RX_CSUM_MODE_MULTI_VLAN = 1UL << 3 | ||
221 | }; | ||
222 | |||
223 | struct mlx4_config_dev_params { | ||
224 | u16 vxlan_udp_dport; | ||
225 | u8 rx_csum_flags_port_1; | ||
226 | u8 rx_csum_flags_port_2; | ||
227 | }; | ||
228 | |||
200 | struct mlx4_dev; | 229 | struct mlx4_dev; |
201 | 230 | ||
202 | struct mlx4_cmd_mailbox { | 231 | struct mlx4_cmd_mailbox { |
@@ -248,6 +277,8 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); | |||
248 | int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); | 277 | int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); |
249 | int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); | 278 | int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); |
250 | int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); | 279 | int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); |
280 | int mlx4_config_dev_retrieval(struct mlx4_dev *dev, | ||
281 | struct mlx4_config_dev_params *params); | ||
251 | /* | 282 | /* |
252 | * mlx4_get_slave_default_vlan - | 283 | * mlx4_get_slave_default_vlan - |
253 | * return true if VST ( default vlan) | 284 | * return true if VST ( default vlan) |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 37e4404d0227..25c791e295fd 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -95,7 +95,7 @@ enum { | |||
95 | 95 | ||
96 | enum { | 96 | enum { |
97 | MLX4_MAX_NUM_PF = 16, | 97 | MLX4_MAX_NUM_PF = 16, |
98 | MLX4_MAX_NUM_VF = 64, | 98 | MLX4_MAX_NUM_VF = 126, |
99 | MLX4_MAX_NUM_VF_P_PORT = 64, | 99 | MLX4_MAX_NUM_VF_P_PORT = 64, |
100 | MLX4_MFUNC_MAX = 80, | 100 | MLX4_MFUNC_MAX = 80, |
101 | MLX4_MAX_EQ_NUM = 1024, | 101 | MLX4_MAX_EQ_NUM = 1024, |
@@ -117,6 +117,14 @@ enum { | |||
117 | MLX4_STEERING_MODE_DEVICE_MANAGED | 117 | MLX4_STEERING_MODE_DEVICE_MANAGED |
118 | }; | 118 | }; |
119 | 119 | ||
120 | enum { | ||
121 | MLX4_STEERING_DMFS_A0_DEFAULT, | ||
122 | MLX4_STEERING_DMFS_A0_DYNAMIC, | ||
123 | MLX4_STEERING_DMFS_A0_STATIC, | ||
124 | MLX4_STEERING_DMFS_A0_DISABLE, | ||
125 | MLX4_STEERING_DMFS_A0_NOT_SUPPORTED | ||
126 | }; | ||
127 | |||
120 | static inline const char *mlx4_steering_mode_str(int steering_mode) | 128 | static inline const char *mlx4_steering_mode_str(int steering_mode) |
121 | { | 129 | { |
122 | switch (steering_mode) { | 130 | switch (steering_mode) { |
@@ -186,7 +194,31 @@ enum { | |||
186 | MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, | 194 | MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, |
187 | MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, | 195 | MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, |
188 | MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, | 196 | MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, |
189 | MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13 | 197 | MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13, |
198 | MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14, | ||
199 | MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15, | ||
200 | MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, | ||
201 | MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, | ||
202 | MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, | ||
203 | MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19 | ||
204 | }; | ||
205 | |||
206 | enum { | ||
207 | MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0, | ||
208 | MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1 | ||
209 | }; | ||
210 | |||
211 | /* bit enums for an 8-bit flags field indicating special use | ||
212 | * QPs which require special handling in qp_reserve_range. | ||
213 | * Currently, this only includes QPs used by the ETH interface, | ||
214 | * where we expect to use blueflame. These QPs must not have | ||
215 | * bits 6 and 7 set in their qp number. | ||
216 | * | ||
217 | * This enum may use only bits 0..7. | ||
218 | */ | ||
219 | enum { | ||
220 | MLX4_RESERVE_A0_QP = 1 << 6, | ||
221 | MLX4_RESERVE_ETH_BF_QP = 1 << 7, | ||
190 | }; | 222 | }; |
191 | 223 | ||
192 | enum { | 224 | enum { |
@@ -202,7 +234,8 @@ enum { | |||
202 | 234 | ||
203 | enum { | 235 | enum { |
204 | MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, | 236 | MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, |
205 | MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1 | 237 | MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1, |
238 | MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2 | ||
206 | }; | 239 | }; |
207 | 240 | ||
208 | 241 | ||
@@ -328,6 +361,8 @@ enum { | |||
328 | 361 | ||
329 | enum mlx4_qp_region { | 362 | enum mlx4_qp_region { |
330 | MLX4_QP_REGION_FW = 0, | 363 | MLX4_QP_REGION_FW = 0, |
364 | MLX4_QP_REGION_RSS_RAW_ETH, | ||
365 | MLX4_QP_REGION_BOTTOM = MLX4_QP_REGION_RSS_RAW_ETH, | ||
331 | MLX4_QP_REGION_ETH_ADDR, | 366 | MLX4_QP_REGION_ETH_ADDR, |
332 | MLX4_QP_REGION_FC_ADDR, | 367 | MLX4_QP_REGION_FC_ADDR, |
333 | MLX4_QP_REGION_FC_EXCH, | 368 | MLX4_QP_REGION_FC_EXCH, |
@@ -379,6 +414,13 @@ enum { | |||
379 | #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ | 414 | #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ |
380 | MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) | 415 | MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) |
381 | 416 | ||
417 | enum mlx4_module_id { | ||
418 | MLX4_MODULE_ID_SFP = 0x3, | ||
419 | MLX4_MODULE_ID_QSFP = 0xC, | ||
420 | MLX4_MODULE_ID_QSFP_PLUS = 0xD, | ||
421 | MLX4_MODULE_ID_QSFP28 = 0x11, | ||
422 | }; | ||
423 | |||
382 | static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) | 424 | static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) |
383 | { | 425 | { |
384 | return (major << 32) | (minor << 16) | subminor; | 426 | return (major << 32) | (minor << 16) | subminor; |
@@ -433,6 +475,7 @@ struct mlx4_caps { | |||
433 | int num_cqs; | 475 | int num_cqs; |
434 | int max_cqes; | 476 | int max_cqes; |
435 | int reserved_cqs; | 477 | int reserved_cqs; |
478 | int num_sys_eqs; | ||
436 | int num_eqs; | 479 | int num_eqs; |
437 | int reserved_eqs; | 480 | int reserved_eqs; |
438 | int num_comp_vectors; | 481 | int num_comp_vectors; |
@@ -449,6 +492,7 @@ struct mlx4_caps { | |||
449 | int reserved_mcgs; | 492 | int reserved_mcgs; |
450 | int num_qp_per_mgm; | 493 | int num_qp_per_mgm; |
451 | int steering_mode; | 494 | int steering_mode; |
495 | int dmfs_high_steer_mode; | ||
452 | int fs_log_max_ucast_qp_range_size; | 496 | int fs_log_max_ucast_qp_range_size; |
453 | int num_pds; | 497 | int num_pds; |
454 | int reserved_pds; | 498 | int reserved_pds; |
@@ -487,6 +531,10 @@ struct mlx4_caps { | |||
487 | u16 hca_core_clock; | 531 | u16 hca_core_clock; |
488 | u64 phys_port_id[MLX4_MAX_PORTS + 1]; | 532 | u64 phys_port_id[MLX4_MAX_PORTS + 1]; |
489 | int tunnel_offload_mode; | 533 | int tunnel_offload_mode; |
534 | u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1]; | ||
535 | u8 alloc_res_qp_mask; | ||
536 | u32 dmfs_high_rate_qpn_base; | ||
537 | u32 dmfs_high_rate_qpn_range; | ||
490 | }; | 538 | }; |
491 | 539 | ||
492 | struct mlx4_buf_list { | 540 | struct mlx4_buf_list { |
@@ -607,6 +655,11 @@ struct mlx4_cq { | |||
607 | 655 | ||
608 | atomic_t refcount; | 656 | atomic_t refcount; |
609 | struct completion free; | 657 | struct completion free; |
658 | struct { | ||
659 | struct list_head list; | ||
660 | void (*comp)(struct mlx4_cq *); | ||
661 | void *priv; | ||
662 | } tasklet_ctx; | ||
610 | }; | 663 | }; |
611 | 664 | ||
612 | struct mlx4_qp { | 665 | struct mlx4_qp { |
@@ -799,6 +852,26 @@ struct mlx4_init_port_param { | |||
799 | u64 si_guid; | 852 | u64 si_guid; |
800 | }; | 853 | }; |
801 | 854 | ||
855 | #define MAD_IFC_DATA_SZ 192 | ||
856 | /* MAD IFC Mailbox */ | ||
857 | struct mlx4_mad_ifc { | ||
858 | u8 base_version; | ||
859 | u8 mgmt_class; | ||
860 | u8 class_version; | ||
861 | u8 method; | ||
862 | __be16 status; | ||
863 | __be16 class_specific; | ||
864 | __be64 tid; | ||
865 | __be16 attr_id; | ||
866 | __be16 resv; | ||
867 | __be32 attr_mod; | ||
868 | __be64 mkey; | ||
869 | __be16 dr_slid; | ||
870 | __be16 dr_dlid; | ||
871 | u8 reserved[28]; | ||
872 | u8 data[MAD_IFC_DATA_SZ]; | ||
873 | } __packed; | ||
874 | |||
802 | #define mlx4_foreach_port(port, dev, type) \ | 875 | #define mlx4_foreach_port(port, dev, type) \ |
803 | for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ | 876 | for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ |
804 | if ((type) == (dev)->caps.port_mask[(port)]) | 877 | if ((type) == (dev)->caps.port_mask[(port)]) |
@@ -835,7 +908,9 @@ static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev) | |||
835 | static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) | 908 | static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) |
836 | { | 909 | { |
837 | return (qpn < dev->phys_caps.base_sqpn + 8 + | 910 | return (qpn < dev->phys_caps.base_sqpn + 8 + |
838 | 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev)); | 911 | 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev) && |
912 | qpn >= dev->phys_caps.base_sqpn) || | ||
913 | (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]); | ||
839 | } | 914 | } |
840 | 915 | ||
841 | static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) | 916 | static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) |
@@ -911,8 +986,8 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | |||
911 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, | 986 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, |
912 | unsigned vector, int collapsed, int timestamp_en); | 987 | unsigned vector, int collapsed, int timestamp_en); |
913 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); | 988 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); |
914 | 989 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, | |
915 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); | 990 | int *base, u8 flags); |
916 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); | 991 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); |
917 | 992 | ||
918 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, | 993 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, |
@@ -1283,10 +1358,50 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | |||
1283 | u64 iova, u64 size, int npages, | 1358 | u64 iova, u64 size, int npages, |
1284 | int page_shift, struct mlx4_mpt_entry *mpt_entry); | 1359 | int page_shift, struct mlx4_mpt_entry *mpt_entry); |
1285 | 1360 | ||
1361 | int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, | ||
1362 | u16 offset, u16 size, u8 *data); | ||
1363 | |||
1286 | /* Returns true if running in low memory profile (kdump kernel) */ | 1364 | /* Returns true if running in low memory profile (kdump kernel) */ |
1287 | static inline bool mlx4_low_memory_profile(void) | 1365 | static inline bool mlx4_low_memory_profile(void) |
1288 | { | 1366 | { |
1289 | return is_kdump_kernel(); | 1367 | return is_kdump_kernel(); |
1290 | } | 1368 | } |
1291 | 1369 | ||
1370 | /* ACCESS REG commands */ | ||
1371 | enum mlx4_access_reg_method { | ||
1372 | MLX4_ACCESS_REG_QUERY = 0x1, | ||
1373 | MLX4_ACCESS_REG_WRITE = 0x2, | ||
1374 | }; | ||
1375 | |||
1376 | /* ACCESS PTYS Reg command */ | ||
1377 | enum mlx4_ptys_proto { | ||
1378 | MLX4_PTYS_IB = 1<<0, | ||
1379 | MLX4_PTYS_EN = 1<<2, | ||
1380 | }; | ||
1381 | |||
1382 | struct mlx4_ptys_reg { | ||
1383 | u8 resrvd1; | ||
1384 | u8 local_port; | ||
1385 | u8 resrvd2; | ||
1386 | u8 proto_mask; | ||
1387 | __be32 resrvd3[2]; | ||
1388 | __be32 eth_proto_cap; | ||
1389 | __be16 ib_width_cap; | ||
1390 | __be16 ib_speed_cap; | ||
1391 | __be32 resrvd4; | ||
1392 | __be32 eth_proto_admin; | ||
1393 | __be16 ib_width_admin; | ||
1394 | __be16 ib_speed_admin; | ||
1395 | __be32 resrvd5; | ||
1396 | __be32 eth_proto_oper; | ||
1397 | __be16 ib_width_oper; | ||
1398 | __be16 ib_speed_oper; | ||
1399 | __be32 resrvd6; | ||
1400 | __be32 eth_proto_lp_adv; | ||
1401 | } __packed; | ||
1402 | |||
1403 | int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, | ||
1404 | enum mlx4_access_reg_method method, | ||
1405 | struct mlx4_ptys_reg *ptys_reg); | ||
1406 | |||
1292 | #endif /* MLX4_DEVICE_H */ | 1407 | #endif /* MLX4_DEVICE_H */ |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 5f4e36cf0091..467ccdf94c98 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
@@ -120,13 +120,15 @@ enum { | |||
120 | MLX4_RSS_QPC_FLAG_OFFSET = 13, | 120 | MLX4_RSS_QPC_FLAG_OFFSET = 13, |
121 | }; | 121 | }; |
122 | 122 | ||
123 | #define MLX4_EN_RSS_KEY_SIZE 40 | ||
124 | |||
123 | struct mlx4_rss_context { | 125 | struct mlx4_rss_context { |
124 | __be32 base_qpn; | 126 | __be32 base_qpn; |
125 | __be32 default_qpn; | 127 | __be32 default_qpn; |
126 | u16 reserved; | 128 | u16 reserved; |
127 | u8 hash_fn; | 129 | u8 hash_fn; |
128 | u8 flags; | 130 | u8 flags; |
129 | __be32 rss_key[10]; | 131 | __be32 rss_key[MLX4_EN_RSS_KEY_SIZE / sizeof(__be32)]; |
130 | __be32 base_qpn_udp; | 132 | __be32 base_qpn_udp; |
131 | }; | 133 | }; |
132 | 134 | ||
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 1d67fd32e71c..ea4f1c46f761 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
@@ -219,23 +219,15 @@ enum { | |||
219 | }; | 219 | }; |
220 | 220 | ||
221 | enum { | 221 | enum { |
222 | MLX5_DEV_CAP_FLAG_RC = 1LL << 0, | ||
223 | MLX5_DEV_CAP_FLAG_UC = 1LL << 1, | ||
224 | MLX5_DEV_CAP_FLAG_UD = 1LL << 2, | ||
225 | MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, | 222 | MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, |
226 | MLX5_DEV_CAP_FLAG_SRQ = 1LL << 6, | ||
227 | MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, | 223 | MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, |
228 | MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, | 224 | MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, |
229 | MLX5_DEV_CAP_FLAG_APM = 1LL << 17, | 225 | MLX5_DEV_CAP_FLAG_APM = 1LL << 17, |
230 | MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, | 226 | MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, |
231 | MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, | 227 | MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, |
232 | MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, | ||
233 | MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, | 228 | MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, |
234 | MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, | 229 | MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, |
235 | MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, | ||
236 | MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, | 230 | MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, |
237 | MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, | ||
238 | MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, | ||
239 | MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, | 231 | MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, |
240 | MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, | 232 | MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, |
241 | }; | 233 | }; |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 246310dc8bef..b1bf41556b32 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -633,14 +633,6 @@ static inline void *mlx5_vzalloc(unsigned long size) | |||
633 | return rtn; | 633 | return rtn; |
634 | } | 634 | } |
635 | 635 | ||
636 | static inline void mlx5_vfree(const void *addr) | ||
637 | { | ||
638 | if (addr && is_vmalloc_addr(addr)) | ||
639 | vfree(addr); | ||
640 | else | ||
641 | kfree(addr); | ||
642 | } | ||
643 | |||
644 | static inline u32 mlx5_base_mkey(const u32 key) | 636 | static inline u32 mlx5_base_mkey(const u32 key) |
645 | { | 637 | { |
646 | return key & 0xffffff00u; | 638 | return key & 0xffffff00u; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index b46461116cd2..c0a67b894c4c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/bit_spinlock.h> | 19 | #include <linux/bit_spinlock.h> |
20 | #include <linux/shrinker.h> | 20 | #include <linux/shrinker.h> |
21 | #include <linux/resource.h> | 21 | #include <linux/resource.h> |
22 | #include <linux/page_ext.h> | ||
22 | 23 | ||
23 | struct mempolicy; | 24 | struct mempolicy; |
24 | struct anon_vma; | 25 | struct anon_vma; |
@@ -56,6 +57,17 @@ extern int sysctl_legacy_va_layout; | |||
56 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) | 57 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) |
57 | #endif | 58 | #endif |
58 | 59 | ||
60 | /* | ||
61 | * To prevent common memory management code establishing | ||
62 | * a zero page mapping on a read fault. | ||
63 | * This macro should be defined within <asm/pgtable.h>. | ||
64 | * s390 does this to prevent multiplexing of hardware bits | ||
65 | * related to the physical page in case of virtualization. | ||
66 | */ | ||
67 | #ifndef mm_forbids_zeropage | ||
68 | #define mm_forbids_zeropage(X) (0) | ||
69 | #endif | ||
70 | |||
59 | extern unsigned long sysctl_user_reserve_kbytes; | 71 | extern unsigned long sysctl_user_reserve_kbytes; |
60 | extern unsigned long sysctl_admin_reserve_kbytes; | 72 | extern unsigned long sysctl_admin_reserve_kbytes; |
61 | 73 | ||
@@ -128,6 +140,7 @@ extern unsigned int kobjsize(const void *objp); | |||
128 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ | 140 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ |
129 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ | 141 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ |
130 | #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ | 142 | #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ |
143 | #define VM_ARCH_2 0x02000000 | ||
131 | #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ | 144 | #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ |
132 | 145 | ||
133 | #ifdef CONFIG_MEM_SOFT_DIRTY | 146 | #ifdef CONFIG_MEM_SOFT_DIRTY |
@@ -155,6 +168,11 @@ extern unsigned int kobjsize(const void *objp); | |||
155 | # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ | 168 | # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ |
156 | #endif | 169 | #endif |
157 | 170 | ||
171 | #if defined(CONFIG_X86) | ||
172 | /* MPX specific bounds table or bounds directory */ | ||
173 | # define VM_MPX VM_ARCH_2 | ||
174 | #endif | ||
175 | |||
158 | #ifndef VM_GROWSUP | 176 | #ifndef VM_GROWSUP |
159 | # define VM_GROWSUP VM_NONE | 177 | # define VM_GROWSUP VM_NONE |
160 | #endif | 178 | #endif |
@@ -2043,7 +2061,22 @@ static inline void vm_stat_account(struct mm_struct *mm, | |||
2043 | #endif /* CONFIG_PROC_FS */ | 2061 | #endif /* CONFIG_PROC_FS */ |
2044 | 2062 | ||
2045 | #ifdef CONFIG_DEBUG_PAGEALLOC | 2063 | #ifdef CONFIG_DEBUG_PAGEALLOC |
2046 | extern void kernel_map_pages(struct page *page, int numpages, int enable); | 2064 | extern bool _debug_pagealloc_enabled; |
2065 | extern void __kernel_map_pages(struct page *page, int numpages, int enable); | ||
2066 | |||
2067 | static inline bool debug_pagealloc_enabled(void) | ||
2068 | { | ||
2069 | return _debug_pagealloc_enabled; | ||
2070 | } | ||
2071 | |||
2072 | static inline void | ||
2073 | kernel_map_pages(struct page *page, int numpages, int enable) | ||
2074 | { | ||
2075 | if (!debug_pagealloc_enabled()) | ||
2076 | return; | ||
2077 | |||
2078 | __kernel_map_pages(page, numpages, enable); | ||
2079 | } | ||
2047 | #ifdef CONFIG_HIBERNATION | 2080 | #ifdef CONFIG_HIBERNATION |
2048 | extern bool kernel_page_present(struct page *page); | 2081 | extern bool kernel_page_present(struct page *page); |
2049 | #endif /* CONFIG_HIBERNATION */ | 2082 | #endif /* CONFIG_HIBERNATION */ |
@@ -2077,9 +2110,9 @@ int drop_caches_sysctl_handler(struct ctl_table *, int, | |||
2077 | void __user *, size_t *, loff_t *); | 2110 | void __user *, size_t *, loff_t *); |
2078 | #endif | 2111 | #endif |
2079 | 2112 | ||
2080 | unsigned long shrink_slab(struct shrink_control *shrink, | 2113 | unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid, |
2081 | unsigned long nr_pages_scanned, | 2114 | unsigned long nr_scanned, |
2082 | unsigned long lru_pages); | 2115 | unsigned long nr_eligible); |
2083 | 2116 | ||
2084 | #ifndef CONFIG_MMU | 2117 | #ifndef CONFIG_MMU |
2085 | #define randomize_va_space 0 | 2118 | #define randomize_va_space 0 |
@@ -2138,20 +2171,36 @@ extern void copy_user_huge_page(struct page *dst, struct page *src, | |||
2138 | unsigned int pages_per_huge_page); | 2171 | unsigned int pages_per_huge_page); |
2139 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ | 2172 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ |
2140 | 2173 | ||
2174 | extern struct page_ext_operations debug_guardpage_ops; | ||
2175 | extern struct page_ext_operations page_poisoning_ops; | ||
2176 | |||
2141 | #ifdef CONFIG_DEBUG_PAGEALLOC | 2177 | #ifdef CONFIG_DEBUG_PAGEALLOC |
2142 | extern unsigned int _debug_guardpage_minorder; | 2178 | extern unsigned int _debug_guardpage_minorder; |
2179 | extern bool _debug_guardpage_enabled; | ||
2143 | 2180 | ||
2144 | static inline unsigned int debug_guardpage_minorder(void) | 2181 | static inline unsigned int debug_guardpage_minorder(void) |
2145 | { | 2182 | { |
2146 | return _debug_guardpage_minorder; | 2183 | return _debug_guardpage_minorder; |
2147 | } | 2184 | } |
2148 | 2185 | ||
2186 | static inline bool debug_guardpage_enabled(void) | ||
2187 | { | ||
2188 | return _debug_guardpage_enabled; | ||
2189 | } | ||
2190 | |||
2149 | static inline bool page_is_guard(struct page *page) | 2191 | static inline bool page_is_guard(struct page *page) |
2150 | { | 2192 | { |
2151 | return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); | 2193 | struct page_ext *page_ext; |
2194 | |||
2195 | if (!debug_guardpage_enabled()) | ||
2196 | return false; | ||
2197 | |||
2198 | page_ext = lookup_page_ext(page); | ||
2199 | return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); | ||
2152 | } | 2200 | } |
2153 | #else | 2201 | #else |
2154 | static inline unsigned int debug_guardpage_minorder(void) { return 0; } | 2202 | static inline unsigned int debug_guardpage_minorder(void) { return 0; } |
2203 | static inline bool debug_guardpage_enabled(void) { return false; } | ||
2155 | static inline bool page_is_guard(struct page *page) { return false; } | 2204 | static inline bool page_is_guard(struct page *page) { return false; } |
2156 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 2205 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
2157 | 2206 | ||
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6e0b286649f1..6d34aa266a8c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/rwsem.h> | 10 | #include <linux/rwsem.h> |
11 | #include <linux/completion.h> | 11 | #include <linux/completion.h> |
12 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
13 | #include <linux/page-debug-flags.h> | ||
14 | #include <linux/uprobes.h> | 13 | #include <linux/uprobes.h> |
15 | #include <linux/page-flags-layout.h> | 14 | #include <linux/page-flags-layout.h> |
16 | #include <asm/page.h> | 15 | #include <asm/page.h> |
@@ -22,6 +21,7 @@ | |||
22 | #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) | 21 | #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) |
23 | 22 | ||
24 | struct address_space; | 23 | struct address_space; |
24 | struct mem_cgroup; | ||
25 | 25 | ||
26 | #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) | 26 | #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) |
27 | #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ | 27 | #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ |
@@ -167,6 +167,10 @@ struct page { | |||
167 | struct page *first_page; /* Compound tail pages */ | 167 | struct page *first_page; /* Compound tail pages */ |
168 | }; | 168 | }; |
169 | 169 | ||
170 | #ifdef CONFIG_MEMCG | ||
171 | struct mem_cgroup *mem_cgroup; | ||
172 | #endif | ||
173 | |||
170 | /* | 174 | /* |
171 | * On machines where all RAM is mapped into kernel address space, | 175 | * On machines where all RAM is mapped into kernel address space, |
172 | * we can simply calculate the virtual address. On machines with | 176 | * we can simply calculate the virtual address. On machines with |
@@ -181,9 +185,6 @@ struct page { | |||
181 | void *virtual; /* Kernel virtual address (NULL if | 185 | void *virtual; /* Kernel virtual address (NULL if |
182 | not kmapped, ie. highmem) */ | 186 | not kmapped, ie. highmem) */ |
183 | #endif /* WANT_PAGE_VIRTUAL */ | 187 | #endif /* WANT_PAGE_VIRTUAL */ |
184 | #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS | ||
185 | unsigned long debug_flags; /* Use atomic bitops on this */ | ||
186 | #endif | ||
187 | 188 | ||
188 | #ifdef CONFIG_KMEMCHECK | 189 | #ifdef CONFIG_KMEMCHECK |
189 | /* | 190 | /* |
@@ -454,6 +455,10 @@ struct mm_struct { | |||
454 | bool tlb_flush_pending; | 455 | bool tlb_flush_pending; |
455 | #endif | 456 | #endif |
456 | struct uprobes_state uprobes_state; | 457 | struct uprobes_state uprobes_state; |
458 | #ifdef CONFIG_X86_INTEL_MPX | ||
459 | /* address of the bounds directory */ | ||
460 | void __user *bd_addr; | ||
461 | #endif | ||
457 | }; | 462 | }; |
458 | 463 | ||
459 | static inline void mm_init_cpumask(struct mm_struct *mm) | 464 | static inline void mm_init_cpumask(struct mm_struct *mm) |
@@ -525,4 +530,12 @@ enum tlb_flush_reason { | |||
525 | NR_TLB_FLUSH_REASONS, | 530 | NR_TLB_FLUSH_REASONS, |
526 | }; | 531 | }; |
527 | 532 | ||
533 | /* | ||
534 | * A swap entry has to fit into a "unsigned long", as the entry is hidden | ||
535 | * in the "index" field of the swapper address space. | ||
536 | */ | ||
537 | typedef struct { | ||
538 | unsigned long val; | ||
539 | } swp_entry_t; | ||
540 | |||
528 | #endif /* _LINUX_MM_TYPES_H */ | 541 | #endif /* _LINUX_MM_TYPES_H */ |
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index b0692d28f8e6..4d69c00497bd 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
@@ -88,6 +88,9 @@ struct mmc_ext_csd { | |||
88 | unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ | 88 | unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ |
89 | unsigned int boot_ro_lock; /* ro lock support */ | 89 | unsigned int boot_ro_lock; /* ro lock support */ |
90 | bool boot_ro_lockable; | 90 | bool boot_ro_lockable; |
91 | bool ffu_capable; /* Firmware upgrade support */ | ||
92 | #define MMC_FIRMWARE_LEN 8 | ||
93 | u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */ | ||
91 | u8 raw_exception_status; /* 54 */ | 94 | u8 raw_exception_status; /* 54 */ |
92 | u8 raw_partition_support; /* 160 */ | 95 | u8 raw_partition_support; /* 160 */ |
93 | u8 raw_rpmb_size_mult; /* 168 */ | 96 | u8 raw_rpmb_size_mult; /* 168 */ |
@@ -509,24 +512,8 @@ static inline int mmc_card_broken_irq_polling(const struct mmc_card *c) | |||
509 | 512 | ||
510 | #define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev) | 513 | #define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev) |
511 | 514 | ||
512 | #define mmc_list_to_card(l) container_of(l, struct mmc_card, node) | 515 | extern int mmc_register_driver(struct device_driver *); |
513 | #define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev) | 516 | extern void mmc_unregister_driver(struct device_driver *); |
514 | #define mmc_set_drvdata(c,d) dev_set_drvdata(&(c)->dev, d) | ||
515 | |||
516 | /* | ||
517 | * MMC device driver (e.g., Flash card, I/O card...) | ||
518 | */ | ||
519 | struct mmc_driver { | ||
520 | struct device_driver drv; | ||
521 | int (*probe)(struct mmc_card *); | ||
522 | void (*remove)(struct mmc_card *); | ||
523 | int (*suspend)(struct mmc_card *); | ||
524 | int (*resume)(struct mmc_card *); | ||
525 | void (*shutdown)(struct mmc_card *); | ||
526 | }; | ||
527 | |||
528 | extern int mmc_register_driver(struct mmc_driver *); | ||
529 | extern void mmc_unregister_driver(struct mmc_driver *); | ||
530 | 517 | ||
531 | extern void mmc_fixup_device(struct mmc_card *card, | 518 | extern void mmc_fixup_device(struct mmc_card *card, |
532 | const struct mmc_fixup *table); | 519 | const struct mmc_fixup *table); |
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index f206e29f94d7..cb2b0400d284 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h | |||
@@ -154,7 +154,8 @@ extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); | |||
154 | extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool, | 154 | extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool, |
155 | bool, bool); | 155 | bool, bool); |
156 | extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); | 156 | extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); |
157 | extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd); | 157 | extern int mmc_send_tuning(struct mmc_host *host); |
158 | extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); | ||
158 | 159 | ||
159 | #define MMC_ERASE_ARG 0x00000000 | 160 | #define MMC_ERASE_ARG 0x00000000 |
160 | #define MMC_SECURE_ERASE_ARG 0x80000000 | 161 | #define MMC_SECURE_ERASE_ARG 0x80000000 |
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 001366927cf4..42b724e8d503 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h | |||
@@ -54,6 +54,7 @@ struct mmc_data; | |||
54 | * transfer is in progress. | 54 | * transfer is in progress. |
55 | * @use_dma: Whether DMA channel is initialized or not. | 55 | * @use_dma: Whether DMA channel is initialized or not. |
56 | * @using_dma: Whether DMA is in use for the current transfer. | 56 | * @using_dma: Whether DMA is in use for the current transfer. |
57 | * @dma_64bit_address: Whether DMA supports 64-bit address mode or not. | ||
57 | * @sg_dma: Bus address of DMA buffer. | 58 | * @sg_dma: Bus address of DMA buffer. |
58 | * @sg_cpu: Virtual address of DMA buffer. | 59 | * @sg_cpu: Virtual address of DMA buffer. |
59 | * @dma_ops: Pointer to platform-specific DMA callbacks. | 60 | * @dma_ops: Pointer to platform-specific DMA callbacks. |
@@ -96,6 +97,7 @@ struct mmc_data; | |||
96 | * @quirks: Set of quirks that apply to specific versions of the IP. | 97 | * @quirks: Set of quirks that apply to specific versions of the IP. |
97 | * @irq_flags: The flags to be passed to request_irq. | 98 | * @irq_flags: The flags to be passed to request_irq. |
98 | * @irq: The irq value to be passed to request_irq. | 99 | * @irq: The irq value to be passed to request_irq. |
100 | * @sdio_id0: Number of slot0 in the SDIO interrupt registers. | ||
99 | * | 101 | * |
100 | * Locking | 102 | * Locking |
101 | * ======= | 103 | * ======= |
@@ -135,11 +137,11 @@ struct dw_mci { | |||
135 | struct mmc_command stop_abort; | 137 | struct mmc_command stop_abort; |
136 | unsigned int prev_blksz; | 138 | unsigned int prev_blksz; |
137 | unsigned char timing; | 139 | unsigned char timing; |
138 | struct workqueue_struct *card_workqueue; | ||
139 | 140 | ||
140 | /* DMA interface members*/ | 141 | /* DMA interface members*/ |
141 | int use_dma; | 142 | int use_dma; |
142 | int using_dma; | 143 | int using_dma; |
144 | int dma_64bit_address; | ||
143 | 145 | ||
144 | dma_addr_t sg_dma; | 146 | dma_addr_t sg_dma; |
145 | void *sg_cpu; | 147 | void *sg_cpu; |
@@ -154,7 +156,6 @@ struct dw_mci { | |||
154 | u32 stop_cmdr; | 156 | u32 stop_cmdr; |
155 | u32 dir_status; | 157 | u32 dir_status; |
156 | struct tasklet_struct tasklet; | 158 | struct tasklet_struct tasklet; |
157 | struct work_struct card_work; | ||
158 | unsigned long pending_events; | 159 | unsigned long pending_events; |
159 | unsigned long completed_events; | 160 | unsigned long completed_events; |
160 | enum dw_mci_state state; | 161 | enum dw_mci_state state; |
@@ -193,6 +194,8 @@ struct dw_mci { | |||
193 | bool vqmmc_enabled; | 194 | bool vqmmc_enabled; |
194 | unsigned long irq_flags; /* IRQ flags */ | 195 | unsigned long irq_flags; /* IRQ flags */ |
195 | int irq; | 196 | int irq; |
197 | |||
198 | int sdio_id0; | ||
196 | }; | 199 | }; |
197 | 200 | ||
198 | /* DMA ops for Internal/External DMAC interface */ | 201 | /* DMA ops for Internal/External DMAC interface */ |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index df0c15396bbf..9f322706f7cb 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -289,6 +289,7 @@ struct mmc_host { | |||
289 | #define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */ | 289 | #define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */ |
290 | #define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \ | 290 | #define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \ |
291 | MMC_CAP2_HS400_1_2V) | 291 | MMC_CAP2_HS400_1_2V) |
292 | #define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V) | ||
292 | #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) | 293 | #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) |
293 | 294 | ||
294 | mmc_pm_flag_t pm_caps; /* supported pm features */ | 295 | mmc_pm_flag_t pm_caps; /* supported pm features */ |
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 1cd00b3a75b9..49ad7a943638 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h | |||
@@ -296,6 +296,7 @@ struct _mmc_csd { | |||
296 | #define EXT_CSD_SANITIZE_START 165 /* W */ | 296 | #define EXT_CSD_SANITIZE_START 165 /* W */ |
297 | #define EXT_CSD_WR_REL_PARAM 166 /* RO */ | 297 | #define EXT_CSD_WR_REL_PARAM 166 /* RO */ |
298 | #define EXT_CSD_RPMB_MULT 168 /* RO */ | 298 | #define EXT_CSD_RPMB_MULT 168 /* RO */ |
299 | #define EXT_CSD_FW_CONFIG 169 /* R/W */ | ||
299 | #define EXT_CSD_BOOT_WP 173 /* R/W */ | 300 | #define EXT_CSD_BOOT_WP 173 /* R/W */ |
300 | #define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */ | 301 | #define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */ |
301 | #define EXT_CSD_PART_CONFIG 179 /* R/W */ | 302 | #define EXT_CSD_PART_CONFIG 179 /* R/W */ |
@@ -332,6 +333,8 @@ struct _mmc_csd { | |||
332 | #define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ | 333 | #define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ |
333 | #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ | 334 | #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ |
334 | #define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ | 335 | #define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ |
336 | #define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */ | ||
337 | #define EXT_CSD_SUPPORTED_MODE 493 /* RO */ | ||
335 | #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ | 338 | #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ |
336 | #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ | 339 | #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ |
337 | #define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */ | 340 | #define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */ |
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h index dba793e3a331..375af80bde7d 100644 --- a/include/linux/mmc/sdhci.h +++ b/include/linux/mmc/sdhci.h | |||
@@ -100,6 +100,12 @@ struct sdhci_host { | |||
100 | #define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7) | 100 | #define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7) |
101 | /* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */ | 101 | /* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */ |
102 | #define SDHCI_QUIRK2_STOP_WITH_TC (1<<8) | 102 | #define SDHCI_QUIRK2_STOP_WITH_TC (1<<8) |
103 | /* Controller does not support 64-bit DMA */ | ||
104 | #define SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1<<9) | ||
105 | /* need clear transfer mode register before send cmd */ | ||
106 | #define SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1<<10) | ||
107 | /* Capability register bit-63 indicates HS400 support */ | ||
108 | #define SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1<<11) | ||
103 | 109 | ||
104 | int irq; /* Device IRQ */ | 110 | int irq; /* Device IRQ */ |
105 | void __iomem *ioaddr; /* Mapped address */ | 111 | void __iomem *ioaddr; /* Mapped address */ |
@@ -130,6 +136,7 @@ struct sdhci_host { | |||
130 | #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ | 136 | #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ |
131 | #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ | 137 | #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ |
132 | #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ | 138 | #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ |
139 | #define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ | ||
133 | 140 | ||
134 | unsigned int version; /* SDHCI spec. version */ | 141 | unsigned int version; /* SDHCI spec. version */ |
135 | 142 | ||
@@ -155,12 +162,19 @@ struct sdhci_host { | |||
155 | 162 | ||
156 | int sg_count; /* Mapped sg entries */ | 163 | int sg_count; /* Mapped sg entries */ |
157 | 164 | ||
158 | u8 *adma_desc; /* ADMA descriptor table */ | 165 | void *adma_table; /* ADMA descriptor table */ |
159 | u8 *align_buffer; /* Bounce buffer */ | 166 | void *align_buffer; /* Bounce buffer */ |
167 | |||
168 | size_t adma_table_sz; /* ADMA descriptor table size */ | ||
169 | size_t align_buffer_sz; /* Bounce buffer size */ | ||
160 | 170 | ||
161 | dma_addr_t adma_addr; /* Mapped ADMA descr. table */ | 171 | dma_addr_t adma_addr; /* Mapped ADMA descr. table */ |
162 | dma_addr_t align_addr; /* Mapped bounce buffer */ | 172 | dma_addr_t align_addr; /* Mapped bounce buffer */ |
163 | 173 | ||
174 | unsigned int desc_sz; /* ADMA descriptor size */ | ||
175 | unsigned int align_sz; /* ADMA alignment */ | ||
176 | unsigned int align_mask; /* ADMA alignment mask */ | ||
177 | |||
164 | struct tasklet_struct finish_tasklet; /* Tasklet structures */ | 178 | struct tasklet_struct finish_tasklet; /* Tasklet structures */ |
165 | 179 | ||
166 | struct timer_list timer; /* Timer for timeouts */ | 180 | struct timer_list timer; /* Timer for timeouts */ |
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index 50f0bc952328..aab032a6ae61 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h | |||
@@ -84,8 +84,6 @@ struct sdio_driver { | |||
84 | struct device_driver drv; | 84 | struct device_driver drv; |
85 | }; | 85 | }; |
86 | 86 | ||
87 | #define to_sdio_driver(d) container_of(d, struct sdio_driver, drv) | ||
88 | |||
89 | /** | 87 | /** |
90 | * SDIO_DEVICE - macro used to describe a specific SDIO device | 88 | * SDIO_DEVICE - macro used to describe a specific SDIO device |
91 | * @vend: the 16 bit manufacturer code | 89 | * @vend: the 16 bit manufacturer code |
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 88787bb4b3b9..95243d28a0ee 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h | |||
@@ -98,11 +98,11 @@ struct mmu_notifier_ops { | |||
98 | /* | 98 | /* |
99 | * invalidate_range_start() and invalidate_range_end() must be | 99 | * invalidate_range_start() and invalidate_range_end() must be |
100 | * paired and are called only when the mmap_sem and/or the | 100 | * paired and are called only when the mmap_sem and/or the |
101 | * locks protecting the reverse maps are held. The subsystem | 101 | * locks protecting the reverse maps are held. If the subsystem |
102 | * must guarantee that no additional references are taken to | 102 | * can't guarantee that no additional references are taken to |
103 | * the pages in the range established between the call to | 103 | * the pages in the range, it has to implement the |
104 | * invalidate_range_start() and the matching call to | 104 | * invalidate_range() notifier to remove any references taken |
105 | * invalidate_range_end(). | 105 | * after invalidate_range_start(). |
106 | * | 106 | * |
107 | * Invalidation of multiple concurrent ranges may be | 107 | * Invalidation of multiple concurrent ranges may be |
108 | * optionally permitted by the driver. Either way the | 108 | * optionally permitted by the driver. Either way the |
@@ -144,6 +144,29 @@ struct mmu_notifier_ops { | |||
144 | void (*invalidate_range_end)(struct mmu_notifier *mn, | 144 | void (*invalidate_range_end)(struct mmu_notifier *mn, |
145 | struct mm_struct *mm, | 145 | struct mm_struct *mm, |
146 | unsigned long start, unsigned long end); | 146 | unsigned long start, unsigned long end); |
147 | |||
148 | /* | ||
149 | * invalidate_range() is either called between | ||
150 | * invalidate_range_start() and invalidate_range_end() when the | ||
151 | * VM has to free pages that where unmapped, but before the | ||
152 | * pages are actually freed, or outside of _start()/_end() when | ||
153 | * a (remote) TLB is necessary. | ||
154 | * | ||
155 | * If invalidate_range() is used to manage a non-CPU TLB with | ||
156 | * shared page-tables, it not necessary to implement the | ||
157 | * invalidate_range_start()/end() notifiers, as | ||
158 | * invalidate_range() alread catches the points in time when an | ||
159 | * external TLB range needs to be flushed. | ||
160 | * | ||
161 | * The invalidate_range() function is called under the ptl | ||
162 | * spin-lock and not allowed to sleep. | ||
163 | * | ||
164 | * Note that this function might be called with just a sub-range | ||
165 | * of what was passed to invalidate_range_start()/end(), if | ||
166 | * called between those functions. | ||
167 | */ | ||
168 | void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, | ||
169 | unsigned long start, unsigned long end); | ||
147 | }; | 170 | }; |
148 | 171 | ||
149 | /* | 172 | /* |
@@ -154,7 +177,7 @@ struct mmu_notifier_ops { | |||
154 | * Therefore notifier chains can only be traversed when either | 177 | * Therefore notifier chains can only be traversed when either |
155 | * | 178 | * |
156 | * 1. mmap_sem is held. | 179 | * 1. mmap_sem is held. |
157 | * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem). | 180 | * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem). |
158 | * 3. No other concurrent thread can access the list (release) | 181 | * 3. No other concurrent thread can access the list (release) |
159 | */ | 182 | */ |
160 | struct mmu_notifier { | 183 | struct mmu_notifier { |
@@ -190,6 +213,8 @@ extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, | |||
190 | unsigned long start, unsigned long end); | 213 | unsigned long start, unsigned long end); |
191 | extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, | 214 | extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, |
192 | unsigned long start, unsigned long end); | 215 | unsigned long start, unsigned long end); |
216 | extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, | ||
217 | unsigned long start, unsigned long end); | ||
193 | 218 | ||
194 | static inline void mmu_notifier_release(struct mm_struct *mm) | 219 | static inline void mmu_notifier_release(struct mm_struct *mm) |
195 | { | 220 | { |
@@ -242,6 +267,13 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, | |||
242 | __mmu_notifier_invalidate_range_end(mm, start, end); | 267 | __mmu_notifier_invalidate_range_end(mm, start, end); |
243 | } | 268 | } |
244 | 269 | ||
270 | static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, | ||
271 | unsigned long start, unsigned long end) | ||
272 | { | ||
273 | if (mm_has_notifiers(mm)) | ||
274 | __mmu_notifier_invalidate_range(mm, start, end); | ||
275 | } | ||
276 | |||
245 | static inline void mmu_notifier_mm_init(struct mm_struct *mm) | 277 | static inline void mmu_notifier_mm_init(struct mm_struct *mm) |
246 | { | 278 | { |
247 | mm->mmu_notifier_mm = NULL; | 279 | mm->mmu_notifier_mm = NULL; |
@@ -279,6 +311,44 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
279 | __young; \ | 311 | __young; \ |
280 | }) | 312 | }) |
281 | 313 | ||
314 | #define ptep_clear_flush_notify(__vma, __address, __ptep) \ | ||
315 | ({ \ | ||
316 | unsigned long ___addr = __address & PAGE_MASK; \ | ||
317 | struct mm_struct *___mm = (__vma)->vm_mm; \ | ||
318 | pte_t ___pte; \ | ||
319 | \ | ||
320 | ___pte = ptep_clear_flush(__vma, __address, __ptep); \ | ||
321 | mmu_notifier_invalidate_range(___mm, ___addr, \ | ||
322 | ___addr + PAGE_SIZE); \ | ||
323 | \ | ||
324 | ___pte; \ | ||
325 | }) | ||
326 | |||
327 | #define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \ | ||
328 | ({ \ | ||
329 | unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ | ||
330 | struct mm_struct *___mm = (__vma)->vm_mm; \ | ||
331 | pmd_t ___pmd; \ | ||
332 | \ | ||
333 | ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \ | ||
334 | mmu_notifier_invalidate_range(___mm, ___haddr, \ | ||
335 | ___haddr + HPAGE_PMD_SIZE); \ | ||
336 | \ | ||
337 | ___pmd; \ | ||
338 | }) | ||
339 | |||
340 | #define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \ | ||
341 | ({ \ | ||
342 | unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ | ||
343 | pmd_t ___pmd; \ | ||
344 | \ | ||
345 | ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \ | ||
346 | mmu_notifier_invalidate_range(__mm, ___haddr, \ | ||
347 | ___haddr + HPAGE_PMD_SIZE); \ | ||
348 | \ | ||
349 | ___pmd; \ | ||
350 | }) | ||
351 | |||
282 | /* | 352 | /* |
283 | * set_pte_at_notify() sets the pte _after_ running the notifier. | 353 | * set_pte_at_notify() sets the pte _after_ running the notifier. |
284 | * This is safe to start by updating the secondary MMUs, because the primary MMU | 354 | * This is safe to start by updating the secondary MMUs, because the primary MMU |
@@ -342,6 +412,11 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, | |||
342 | { | 412 | { |
343 | } | 413 | } |
344 | 414 | ||
415 | static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, | ||
416 | unsigned long start, unsigned long end) | ||
417 | { | ||
418 | } | ||
419 | |||
345 | static inline void mmu_notifier_mm_init(struct mm_struct *mm) | 420 | static inline void mmu_notifier_mm_init(struct mm_struct *mm) |
346 | { | 421 | { |
347 | } | 422 | } |
@@ -352,6 +427,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
352 | 427 | ||
353 | #define ptep_clear_flush_young_notify ptep_clear_flush_young | 428 | #define ptep_clear_flush_young_notify ptep_clear_flush_young |
354 | #define pmdp_clear_flush_young_notify pmdp_clear_flush_young | 429 | #define pmdp_clear_flush_young_notify pmdp_clear_flush_young |
430 | #define ptep_clear_flush_notify ptep_clear_flush | ||
431 | #define pmdp_clear_flush_notify pmdp_clear_flush | ||
432 | #define pmdp_get_and_clear_notify pmdp_get_and_clear | ||
355 | #define set_pte_at_notify set_pte_at | 433 | #define set_pte_at_notify set_pte_at |
356 | 434 | ||
357 | #endif /* CONFIG_MMU_NOTIFIER */ | 435 | #endif /* CONFIG_MMU_NOTIFIER */ |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ffe66e381c04..2f0856d14b21 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -722,8 +722,8 @@ typedef struct pglist_data { | |||
722 | int nr_zones; | 722 | int nr_zones; |
723 | #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ | 723 | #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ |
724 | struct page *node_mem_map; | 724 | struct page *node_mem_map; |
725 | #ifdef CONFIG_MEMCG | 725 | #ifdef CONFIG_PAGE_EXTENSION |
726 | struct page_cgroup *node_page_cgroup; | 726 | struct page_ext *node_page_ext; |
727 | #endif | 727 | #endif |
728 | #endif | 728 | #endif |
729 | #ifndef CONFIG_NO_BOOTMEM | 729 | #ifndef CONFIG_NO_BOOTMEM |
@@ -1078,7 +1078,7 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) | |||
1078 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) | 1078 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) |
1079 | 1079 | ||
1080 | struct page; | 1080 | struct page; |
1081 | struct page_cgroup; | 1081 | struct page_ext; |
1082 | struct mem_section { | 1082 | struct mem_section { |
1083 | /* | 1083 | /* |
1084 | * This is, logically, a pointer to an array of struct | 1084 | * This is, logically, a pointer to an array of struct |
@@ -1096,12 +1096,12 @@ struct mem_section { | |||
1096 | 1096 | ||
1097 | /* See declaration of similar field in struct zone */ | 1097 | /* See declaration of similar field in struct zone */ |
1098 | unsigned long *pageblock_flags; | 1098 | unsigned long *pageblock_flags; |
1099 | #ifdef CONFIG_MEMCG | 1099 | #ifdef CONFIG_PAGE_EXTENSION |
1100 | /* | 1100 | /* |
1101 | * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use | 1101 | * If !SPARSEMEM, pgdat doesn't have page_ext pointer. We use |
1102 | * section. (see memcontrol.h/page_cgroup.h about this.) | 1102 | * section. (see page_ext.h about this.) |
1103 | */ | 1103 | */ |
1104 | struct page_cgroup *page_cgroup; | 1104 | struct page_ext *page_ext; |
1105 | unsigned long pad; | 1105 | unsigned long pad; |
1106 | #endif | 1106 | #endif |
1107 | /* | 1107 | /* |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 44eeef0da186..745def862580 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
@@ -69,7 +69,7 @@ struct ieee1394_device_id { | |||
69 | * @bDeviceClass: Class of device; numbers are assigned | 69 | * @bDeviceClass: Class of device; numbers are assigned |
70 | * by the USB forum. Products may choose to implement classes, | 70 | * by the USB forum. Products may choose to implement classes, |
71 | * or be vendor-specific. Device classes specify behavior of all | 71 | * or be vendor-specific. Device classes specify behavior of all |
72 | * the interfaces on a devices. | 72 | * the interfaces on a device. |
73 | * @bDeviceSubClass: Subclass of device; associated with bDeviceClass. | 73 | * @bDeviceSubClass: Subclass of device; associated with bDeviceClass. |
74 | * @bDeviceProtocol: Protocol of device; associated with bDeviceClass. | 74 | * @bDeviceProtocol: Protocol of device; associated with bDeviceClass. |
75 | * @bInterfaceClass: Class of interface; numbers are assigned | 75 | * @bInterfaceClass: Class of interface; numbers are assigned |
diff --git a/include/linux/msi.h b/include/linux/msi.h index 44f4746d033b..8ac4a68ffae2 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
@@ -10,17 +10,12 @@ struct msi_msg { | |||
10 | u32 data; /* 16 bits of msi message data */ | 10 | u32 data; /* 16 bits of msi message data */ |
11 | }; | 11 | }; |
12 | 12 | ||
13 | extern int pci_msi_ignore_mask; | ||
13 | /* Helper functions */ | 14 | /* Helper functions */ |
14 | struct irq_data; | 15 | struct irq_data; |
15 | struct msi_desc; | 16 | struct msi_desc; |
16 | void mask_msi_irq(struct irq_data *data); | ||
17 | void unmask_msi_irq(struct irq_data *data); | ||
18 | void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); | ||
19 | void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); | 17 | void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
20 | void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); | ||
21 | void read_msi_msg(unsigned int irq, struct msi_msg *msg); | ||
22 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); | 18 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); |
23 | void write_msi_msg(unsigned int irq, struct msi_msg *msg); | ||
24 | 19 | ||
25 | struct msi_desc { | 20 | struct msi_desc { |
26 | struct { | 21 | struct { |
@@ -48,6 +43,52 @@ struct msi_desc { | |||
48 | struct msi_msg msg; | 43 | struct msi_msg msg; |
49 | }; | 44 | }; |
50 | 45 | ||
46 | /* Helpers to hide struct msi_desc implementation details */ | ||
47 | #define msi_desc_to_dev(desc) (&(desc)->dev.dev) | ||
48 | #define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list) | ||
49 | #define first_msi_entry(dev) \ | ||
50 | list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) | ||
51 | #define for_each_msi_entry(desc, dev) \ | ||
52 | list_for_each_entry((desc), dev_to_msi_list((dev)), list) | ||
53 | |||
54 | #ifdef CONFIG_PCI_MSI | ||
55 | #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) | ||
56 | #define for_each_pci_msi_entry(desc, pdev) \ | ||
57 | for_each_msi_entry((desc), &(pdev)->dev) | ||
58 | |||
59 | static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) | ||
60 | { | ||
61 | return desc->dev; | ||
62 | } | ||
63 | #endif /* CONFIG_PCI_MSI */ | ||
64 | |||
65 | void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); | ||
66 | void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); | ||
67 | void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); | ||
68 | |||
69 | u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); | ||
70 | u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); | ||
71 | void pci_msi_mask_irq(struct irq_data *data); | ||
72 | void pci_msi_unmask_irq(struct irq_data *data); | ||
73 | |||
74 | /* Conversion helpers. Should be removed after merging */ | ||
75 | static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | ||
76 | { | ||
77 | __pci_write_msi_msg(entry, msg); | ||
78 | } | ||
79 | static inline void write_msi_msg(int irq, struct msi_msg *msg) | ||
80 | { | ||
81 | pci_write_msi_msg(irq, msg); | ||
82 | } | ||
83 | static inline void mask_msi_irq(struct irq_data *data) | ||
84 | { | ||
85 | pci_msi_mask_irq(data); | ||
86 | } | ||
87 | static inline void unmask_msi_irq(struct irq_data *data) | ||
88 | { | ||
89 | pci_msi_unmask_irq(data); | ||
90 | } | ||
91 | |||
51 | /* | 92 | /* |
52 | * The arch hooks to setup up msi irqs. Those functions are | 93 | * The arch hooks to setup up msi irqs. Those functions are |
53 | * implemented as weak symbols so that they /can/ be overriden by | 94 | * implemented as weak symbols so that they /can/ be overriden by |
@@ -61,18 +102,142 @@ void arch_restore_msi_irqs(struct pci_dev *dev); | |||
61 | 102 | ||
62 | void default_teardown_msi_irqs(struct pci_dev *dev); | 103 | void default_teardown_msi_irqs(struct pci_dev *dev); |
63 | void default_restore_msi_irqs(struct pci_dev *dev); | 104 | void default_restore_msi_irqs(struct pci_dev *dev); |
64 | u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); | ||
65 | u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag); | ||
66 | 105 | ||
67 | struct msi_chip { | 106 | struct msi_controller { |
68 | struct module *owner; | 107 | struct module *owner; |
69 | struct device *dev; | 108 | struct device *dev; |
70 | struct device_node *of_node; | 109 | struct device_node *of_node; |
71 | struct list_head list; | 110 | struct list_head list; |
111 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN | ||
112 | struct irq_domain *domain; | ||
113 | #endif | ||
72 | 114 | ||
73 | int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev, | 115 | int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, |
74 | struct msi_desc *desc); | 116 | struct msi_desc *desc); |
75 | void (*teardown_irq)(struct msi_chip *chip, unsigned int irq); | 117 | void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); |
118 | }; | ||
119 | |||
120 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN | ||
121 | |||
122 | #include <linux/irqhandler.h> | ||
123 | #include <asm/msi.h> | ||
124 | |||
125 | struct irq_domain; | ||
126 | struct irq_chip; | ||
127 | struct device_node; | ||
128 | struct msi_domain_info; | ||
129 | |||
130 | /** | ||
131 | * struct msi_domain_ops - MSI interrupt domain callbacks | ||
132 | * @get_hwirq: Retrieve the resulting hw irq number | ||
133 | * @msi_init: Domain specific init function for MSI interrupts | ||
134 | * @msi_free: Domain specific function to free a MSI interrupts | ||
135 | * @msi_check: Callback for verification of the domain/info/dev data | ||
136 | * @msi_prepare: Prepare the allocation of the interrupts in the domain | ||
137 | * @msi_finish: Optional callbacl to finalize the allocation | ||
138 | * @set_desc: Set the msi descriptor for an interrupt | ||
139 | * @handle_error: Optional error handler if the allocation fails | ||
140 | * | ||
141 | * @get_hwirq, @msi_init and @msi_free are callbacks used by | ||
142 | * msi_create_irq_domain() and related interfaces | ||
143 | * | ||
144 | * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error | ||
145 | * are callbacks used by msi_irq_domain_alloc_irqs() and related | ||
146 | * interfaces which are based on msi_desc. | ||
147 | */ | ||
148 | struct msi_domain_ops { | ||
149 | irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, | ||
150 | msi_alloc_info_t *arg); | ||
151 | int (*msi_init)(struct irq_domain *domain, | ||
152 | struct msi_domain_info *info, | ||
153 | unsigned int virq, irq_hw_number_t hwirq, | ||
154 | msi_alloc_info_t *arg); | ||
155 | void (*msi_free)(struct irq_domain *domain, | ||
156 | struct msi_domain_info *info, | ||
157 | unsigned int virq); | ||
158 | int (*msi_check)(struct irq_domain *domain, | ||
159 | struct msi_domain_info *info, | ||
160 | struct device *dev); | ||
161 | int (*msi_prepare)(struct irq_domain *domain, | ||
162 | struct device *dev, int nvec, | ||
163 | msi_alloc_info_t *arg); | ||
164 | void (*msi_finish)(msi_alloc_info_t *arg, int retval); | ||
165 | void (*set_desc)(msi_alloc_info_t *arg, | ||
166 | struct msi_desc *desc); | ||
167 | int (*handle_error)(struct irq_domain *domain, | ||
168 | struct msi_desc *desc, int error); | ||
169 | }; | ||
170 | |||
171 | /** | ||
172 | * struct msi_domain_info - MSI interrupt domain data | ||
173 | * @flags: Flags to decribe features and capabilities | ||
174 | * @ops: The callback data structure | ||
175 | * @chip: Optional: associated interrupt chip | ||
176 | * @chip_data: Optional: associated interrupt chip data | ||
177 | * @handler: Optional: associated interrupt flow handler | ||
178 | * @handler_data: Optional: associated interrupt flow handler data | ||
179 | * @handler_name: Optional: associated interrupt flow handler name | ||
180 | * @data: Optional: domain specific data | ||
181 | */ | ||
182 | struct msi_domain_info { | ||
183 | u32 flags; | ||
184 | struct msi_domain_ops *ops; | ||
185 | struct irq_chip *chip; | ||
186 | void *chip_data; | ||
187 | irq_flow_handler_t handler; | ||
188 | void *handler_data; | ||
189 | const char *handler_name; | ||
190 | void *data; | ||
191 | }; | ||
192 | |||
193 | /* Flags for msi_domain_info */ | ||
194 | enum { | ||
195 | /* | ||
196 | * Init non implemented ops callbacks with default MSI domain | ||
197 | * callbacks. | ||
198 | */ | ||
199 | MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0), | ||
200 | /* | ||
201 | * Init non implemented chip callbacks with default MSI chip | ||
202 | * callbacks. | ||
203 | */ | ||
204 | MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), | ||
205 | /* Build identity map between hwirq and irq */ | ||
206 | MSI_FLAG_IDENTITY_MAP = (1 << 2), | ||
207 | /* Support multiple PCI MSI interrupts */ | ||
208 | MSI_FLAG_MULTI_PCI_MSI = (1 << 3), | ||
209 | /* Support PCI MSIX interrupts */ | ||
210 | MSI_FLAG_PCI_MSIX = (1 << 4), | ||
76 | }; | 211 | }; |
77 | 212 | ||
213 | int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, | ||
214 | bool force); | ||
215 | |||
216 | struct irq_domain *msi_create_irq_domain(struct device_node *of_node, | ||
217 | struct msi_domain_info *info, | ||
218 | struct irq_domain *parent); | ||
219 | int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, | ||
220 | int nvec); | ||
221 | void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); | ||
222 | struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); | ||
223 | |||
224 | #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ | ||
225 | |||
226 | #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN | ||
227 | void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); | ||
228 | struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, | ||
229 | struct msi_domain_info *info, | ||
230 | struct irq_domain *parent); | ||
231 | int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, | ||
232 | int nvec, int type); | ||
233 | void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); | ||
234 | struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, | ||
235 | struct msi_domain_info *info, struct irq_domain *parent); | ||
236 | |||
237 | irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, | ||
238 | struct msi_desc *desc); | ||
239 | int pci_msi_domain_check_cap(struct irq_domain *domain, | ||
240 | struct msi_domain_info *info, struct device *dev); | ||
241 | #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ | ||
242 | |||
78 | #endif /* LINUX_MSI_H */ | 243 | #endif /* LINUX_MSI_H */ |
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index dcfdecbfa0b7..8e30685affeb 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h | |||
@@ -47,9 +47,9 @@ enum { | |||
47 | NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ | 47 | NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ |
48 | NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ | 48 | NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ |
49 | NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ | 49 | NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ |
50 | NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ | 50 | NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ |
51 | /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ | 51 | /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ |
52 | NETIF_F_GSO_MPLS_BIT, | 52 | NETIF_F_GSO_TUNNEL_REMCSUM_BIT, |
53 | 53 | ||
54 | NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ | 54 | NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ |
55 | NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ | 55 | NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ |
@@ -118,7 +118,7 @@ enum { | |||
118 | #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) | 118 | #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) |
119 | #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) | 119 | #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) |
120 | #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) | 120 | #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) |
121 | #define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) | 121 | #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) |
122 | #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) | 122 | #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) |
123 | #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) | 123 | #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) |
124 | #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) | 124 | #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) |
@@ -181,7 +181,6 @@ enum { | |||
181 | NETIF_F_GSO_IPIP | \ | 181 | NETIF_F_GSO_IPIP | \ |
182 | NETIF_F_GSO_SIT | \ | 182 | NETIF_F_GSO_SIT | \ |
183 | NETIF_F_GSO_UDP_TUNNEL | \ | 183 | NETIF_F_GSO_UDP_TUNNEL | \ |
184 | NETIF_F_GSO_UDP_TUNNEL_CSUM | \ | 184 | NETIF_F_GSO_UDP_TUNNEL_CSUM) |
185 | NETIF_F_GSO_MPLS) | ||
186 | 185 | ||
187 | #endif /* _LINUX_NETDEV_FEATURES_H */ | 186 | #endif /* _LINUX_NETDEV_FEATURES_H */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 74fd5d37f15a..c31f74d76ebd 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -57,6 +57,8 @@ struct device; | |||
57 | struct phy_device; | 57 | struct phy_device; |
58 | /* 802.11 specific */ | 58 | /* 802.11 specific */ |
59 | struct wireless_dev; | 59 | struct wireless_dev; |
60 | /* 802.15.4 specific */ | ||
61 | struct wpan_dev; | ||
60 | 62 | ||
61 | void netdev_set_default_ethtool_ops(struct net_device *dev, | 63 | void netdev_set_default_ethtool_ops(struct net_device *dev, |
62 | const struct ethtool_ops *ops); | 64 | const struct ethtool_ops *ops); |
@@ -314,6 +316,7 @@ struct napi_struct { | |||
314 | struct net_device *dev; | 316 | struct net_device *dev; |
315 | struct sk_buff *gro_list; | 317 | struct sk_buff *gro_list; |
316 | struct sk_buff *skb; | 318 | struct sk_buff *skb; |
319 | struct hrtimer timer; | ||
317 | struct list_head dev_list; | 320 | struct list_head dev_list; |
318 | struct hlist_node napi_hash_node; | 321 | struct hlist_node napi_hash_node; |
319 | unsigned int napi_id; | 322 | unsigned int napi_id; |
@@ -386,6 +389,7 @@ typedef enum rx_handler_result rx_handler_result_t; | |||
386 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); | 389 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); |
387 | 390 | ||
388 | void __napi_schedule(struct napi_struct *n); | 391 | void __napi_schedule(struct napi_struct *n); |
392 | void __napi_schedule_irqoff(struct napi_struct *n); | ||
389 | 393 | ||
390 | static inline bool napi_disable_pending(struct napi_struct *n) | 394 | static inline bool napi_disable_pending(struct napi_struct *n) |
391 | { | 395 | { |
@@ -420,6 +424,18 @@ static inline void napi_schedule(struct napi_struct *n) | |||
420 | __napi_schedule(n); | 424 | __napi_schedule(n); |
421 | } | 425 | } |
422 | 426 | ||
427 | /** | ||
428 | * napi_schedule_irqoff - schedule NAPI poll | ||
429 | * @n: napi context | ||
430 | * | ||
431 | * Variant of napi_schedule(), assuming hard irqs are masked. | ||
432 | */ | ||
433 | static inline void napi_schedule_irqoff(struct napi_struct *n) | ||
434 | { | ||
435 | if (napi_schedule_prep(n)) | ||
436 | __napi_schedule_irqoff(n); | ||
437 | } | ||
438 | |||
423 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ | 439 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ |
424 | static inline bool napi_reschedule(struct napi_struct *napi) | 440 | static inline bool napi_reschedule(struct napi_struct *napi) |
425 | { | 441 | { |
@@ -430,14 +446,19 @@ static inline bool napi_reschedule(struct napi_struct *napi) | |||
430 | return false; | 446 | return false; |
431 | } | 447 | } |
432 | 448 | ||
449 | void __napi_complete(struct napi_struct *n); | ||
450 | void napi_complete_done(struct napi_struct *n, int work_done); | ||
433 | /** | 451 | /** |
434 | * napi_complete - NAPI processing complete | 452 | * napi_complete - NAPI processing complete |
435 | * @n: napi context | 453 | * @n: napi context |
436 | * | 454 | * |
437 | * Mark NAPI processing as complete. | 455 | * Mark NAPI processing as complete. |
456 | * Consider using napi_complete_done() instead. | ||
438 | */ | 457 | */ |
439 | void __napi_complete(struct napi_struct *n); | 458 | static inline void napi_complete(struct napi_struct *n) |
440 | void napi_complete(struct napi_struct *n); | 459 | { |
460 | return napi_complete_done(n, 0); | ||
461 | } | ||
441 | 462 | ||
442 | /** | 463 | /** |
443 | * napi_by_id - lookup a NAPI by napi_id | 464 | * napi_by_id - lookup a NAPI by napi_id |
@@ -472,14 +493,7 @@ void napi_hash_del(struct napi_struct *napi); | |||
472 | * Stop NAPI from being scheduled on this context. | 493 | * Stop NAPI from being scheduled on this context. |
473 | * Waits till any outstanding processing completes. | 494 | * Waits till any outstanding processing completes. |
474 | */ | 495 | */ |
475 | static inline void napi_disable(struct napi_struct *n) | 496 | void napi_disable(struct napi_struct *n); |
476 | { | ||
477 | might_sleep(); | ||
478 | set_bit(NAPI_STATE_DISABLE, &n->state); | ||
479 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) | ||
480 | msleep(1); | ||
481 | clear_bit(NAPI_STATE_DISABLE, &n->state); | ||
482 | } | ||
483 | 497 | ||
484 | /** | 498 | /** |
485 | * napi_enable - enable NAPI scheduling | 499 | * napi_enable - enable NAPI scheduling |
@@ -740,13 +754,13 @@ struct netdev_fcoe_hbainfo { | |||
740 | }; | 754 | }; |
741 | #endif | 755 | #endif |
742 | 756 | ||
743 | #define MAX_PHYS_PORT_ID_LEN 32 | 757 | #define MAX_PHYS_ITEM_ID_LEN 32 |
744 | 758 | ||
745 | /* This structure holds a unique identifier to identify the | 759 | /* This structure holds a unique identifier to identify some |
746 | * physical port used by a netdevice. | 760 | * physical item (port for example) used by a netdevice. |
747 | */ | 761 | */ |
748 | struct netdev_phys_port_id { | 762 | struct netdev_phys_item_id { |
749 | unsigned char id[MAX_PHYS_PORT_ID_LEN]; | 763 | unsigned char id[MAX_PHYS_ITEM_ID_LEN]; |
750 | unsigned char id_len; | 764 | unsigned char id_len; |
751 | }; | 765 | }; |
752 | 766 | ||
@@ -937,11 +951,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | |||
937 | * | 951 | * |
938 | * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], | 952 | * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], |
939 | * struct net_device *dev, | 953 | * struct net_device *dev, |
940 | * const unsigned char *addr, u16 flags) | 954 | * const unsigned char *addr, u16 vid, u16 flags) |
941 | * Adds an FDB entry to dev for addr. | 955 | * Adds an FDB entry to dev for addr. |
942 | * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], | 956 | * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], |
943 | * struct net_device *dev, | 957 | * struct net_device *dev, |
944 | * const unsigned char *addr) | 958 | * const unsigned char *addr, u16 vid) |
945 | * Deletes the FDB entry from dev coresponding to addr. | 959 | * Deletes the FDB entry from dev coresponding to addr. |
946 | * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, | 960 | * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, |
947 | * struct net_device *dev, struct net_device *filter_dev, | 961 | * struct net_device *dev, struct net_device *filter_dev, |
@@ -962,7 +976,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | |||
962 | * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. | 976 | * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. |
963 | * | 977 | * |
964 | * int (*ndo_get_phys_port_id)(struct net_device *dev, | 978 | * int (*ndo_get_phys_port_id)(struct net_device *dev, |
965 | * struct netdev_phys_port_id *ppid); | 979 | * struct netdev_phys_item_id *ppid); |
966 | * Called to get ID of physical port of this device. If driver does | 980 | * Called to get ID of physical port of this device. If driver does |
967 | * not implement this, it is assumed that the hw is not able to have | 981 | * not implement this, it is assumed that the hw is not able to have |
968 | * multiple net devices on single physical port. | 982 | * multiple net devices on single physical port. |
@@ -1004,6 +1018,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | |||
1004 | * performing GSO on a packet. The device returns true if it is | 1018 | * performing GSO on a packet. The device returns true if it is |
1005 | * able to GSO the packet, false otherwise. If the return value is | 1019 | * able to GSO the packet, false otherwise. If the return value is |
1006 | * false the stack will do software GSO. | 1020 | * false the stack will do software GSO. |
1021 | * | ||
1022 | * int (*ndo_switch_parent_id_get)(struct net_device *dev, | ||
1023 | * struct netdev_phys_item_id *psid); | ||
1024 | * Called to get an ID of the switch chip this port is part of. | ||
1025 | * If driver implements this, it indicates that it represents a port | ||
1026 | * of a switch chip. | ||
1027 | * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state); | ||
1028 | * Called to notify switch device port of bridge port STP | ||
1029 | * state change. | ||
1007 | */ | 1030 | */ |
1008 | struct net_device_ops { | 1031 | struct net_device_ops { |
1009 | int (*ndo_init)(struct net_device *dev); | 1032 | int (*ndo_init)(struct net_device *dev); |
@@ -1114,11 +1137,13 @@ struct net_device_ops { | |||
1114 | struct nlattr *tb[], | 1137 | struct nlattr *tb[], |
1115 | struct net_device *dev, | 1138 | struct net_device *dev, |
1116 | const unsigned char *addr, | 1139 | const unsigned char *addr, |
1140 | u16 vid, | ||
1117 | u16 flags); | 1141 | u16 flags); |
1118 | int (*ndo_fdb_del)(struct ndmsg *ndm, | 1142 | int (*ndo_fdb_del)(struct ndmsg *ndm, |
1119 | struct nlattr *tb[], | 1143 | struct nlattr *tb[], |
1120 | struct net_device *dev, | 1144 | struct net_device *dev, |
1121 | const unsigned char *addr); | 1145 | const unsigned char *addr, |
1146 | u16 vid); | ||
1122 | int (*ndo_fdb_dump)(struct sk_buff *skb, | 1147 | int (*ndo_fdb_dump)(struct sk_buff *skb, |
1123 | struct netlink_callback *cb, | 1148 | struct netlink_callback *cb, |
1124 | struct net_device *dev, | 1149 | struct net_device *dev, |
@@ -1136,7 +1161,7 @@ struct net_device_ops { | |||
1136 | int (*ndo_change_carrier)(struct net_device *dev, | 1161 | int (*ndo_change_carrier)(struct net_device *dev, |
1137 | bool new_carrier); | 1162 | bool new_carrier); |
1138 | int (*ndo_get_phys_port_id)(struct net_device *dev, | 1163 | int (*ndo_get_phys_port_id)(struct net_device *dev, |
1139 | struct netdev_phys_port_id *ppid); | 1164 | struct netdev_phys_item_id *ppid); |
1140 | void (*ndo_add_vxlan_port)(struct net_device *dev, | 1165 | void (*ndo_add_vxlan_port)(struct net_device *dev, |
1141 | sa_family_t sa_family, | 1166 | sa_family_t sa_family, |
1142 | __be16 port); | 1167 | __be16 port); |
@@ -1155,6 +1180,12 @@ struct net_device_ops { | |||
1155 | int (*ndo_get_lock_subclass)(struct net_device *dev); | 1180 | int (*ndo_get_lock_subclass)(struct net_device *dev); |
1156 | bool (*ndo_gso_check) (struct sk_buff *skb, | 1181 | bool (*ndo_gso_check) (struct sk_buff *skb, |
1157 | struct net_device *dev); | 1182 | struct net_device *dev); |
1183 | #ifdef CONFIG_NET_SWITCHDEV | ||
1184 | int (*ndo_switch_parent_id_get)(struct net_device *dev, | ||
1185 | struct netdev_phys_item_id *psid); | ||
1186 | int (*ndo_switch_port_stp_update)(struct net_device *dev, | ||
1187 | u8 state); | ||
1188 | #endif | ||
1158 | }; | 1189 | }; |
1159 | 1190 | ||
1160 | /** | 1191 | /** |
@@ -1216,6 +1247,8 @@ enum netdev_priv_flags { | |||
1216 | IFF_LIVE_ADDR_CHANGE = 1<<20, | 1247 | IFF_LIVE_ADDR_CHANGE = 1<<20, |
1217 | IFF_MACVLAN = 1<<21, | 1248 | IFF_MACVLAN = 1<<21, |
1218 | IFF_XMIT_DST_RELEASE_PERM = 1<<22, | 1249 | IFF_XMIT_DST_RELEASE_PERM = 1<<22, |
1250 | IFF_IPVLAN_MASTER = 1<<23, | ||
1251 | IFF_IPVLAN_SLAVE = 1<<24, | ||
1219 | }; | 1252 | }; |
1220 | 1253 | ||
1221 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN | 1254 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN |
@@ -1241,6 +1274,8 @@ enum netdev_priv_flags { | |||
1241 | #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE | 1274 | #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE |
1242 | #define IFF_MACVLAN IFF_MACVLAN | 1275 | #define IFF_MACVLAN IFF_MACVLAN |
1243 | #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM | 1276 | #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM |
1277 | #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER | ||
1278 | #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE | ||
1244 | 1279 | ||
1245 | /** | 1280 | /** |
1246 | * struct net_device - The DEVICE structure. | 1281 | * struct net_device - The DEVICE structure. |
@@ -1572,6 +1607,7 @@ struct net_device { | |||
1572 | struct inet6_dev __rcu *ip6_ptr; | 1607 | struct inet6_dev __rcu *ip6_ptr; |
1573 | void *ax25_ptr; | 1608 | void *ax25_ptr; |
1574 | struct wireless_dev *ieee80211_ptr; | 1609 | struct wireless_dev *ieee80211_ptr; |
1610 | struct wpan_dev *ieee802154_ptr; | ||
1575 | 1611 | ||
1576 | /* | 1612 | /* |
1577 | * Cache lines mostly used on receive path (including eth_type_trans()) | 1613 | * Cache lines mostly used on receive path (including eth_type_trans()) |
@@ -1590,6 +1626,7 @@ struct net_device { | |||
1590 | 1626 | ||
1591 | #endif | 1627 | #endif |
1592 | 1628 | ||
1629 | unsigned long gro_flush_timeout; | ||
1593 | rx_handler_func_t __rcu *rx_handler; | 1630 | rx_handler_func_t __rcu *rx_handler; |
1594 | void __rcu *rx_handler_data; | 1631 | void __rcu *rx_handler_data; |
1595 | 1632 | ||
@@ -2316,10 +2353,7 @@ extern int netdev_flow_limit_table_len; | |||
2316 | * Incoming packets are placed on per-cpu queues | 2353 | * Incoming packets are placed on per-cpu queues |
2317 | */ | 2354 | */ |
2318 | struct softnet_data { | 2355 | struct softnet_data { |
2319 | struct Qdisc *output_queue; | ||
2320 | struct Qdisc **output_queue_tailp; | ||
2321 | struct list_head poll_list; | 2356 | struct list_head poll_list; |
2322 | struct sk_buff *completion_queue; | ||
2323 | struct sk_buff_head process_queue; | 2357 | struct sk_buff_head process_queue; |
2324 | 2358 | ||
2325 | /* stats */ | 2359 | /* stats */ |
@@ -2327,10 +2361,17 @@ struct softnet_data { | |||
2327 | unsigned int time_squeeze; | 2361 | unsigned int time_squeeze; |
2328 | unsigned int cpu_collision; | 2362 | unsigned int cpu_collision; |
2329 | unsigned int received_rps; | 2363 | unsigned int received_rps; |
2330 | |||
2331 | #ifdef CONFIG_RPS | 2364 | #ifdef CONFIG_RPS |
2332 | struct softnet_data *rps_ipi_list; | 2365 | struct softnet_data *rps_ipi_list; |
2366 | #endif | ||
2367 | #ifdef CONFIG_NET_FLOW_LIMIT | ||
2368 | struct sd_flow_limit __rcu *flow_limit; | ||
2369 | #endif | ||
2370 | struct Qdisc *output_queue; | ||
2371 | struct Qdisc **output_queue_tailp; | ||
2372 | struct sk_buff *completion_queue; | ||
2333 | 2373 | ||
2374 | #ifdef CONFIG_RPS | ||
2334 | /* Elements below can be accessed between CPUs for RPS */ | 2375 | /* Elements below can be accessed between CPUs for RPS */ |
2335 | struct call_single_data csd ____cacheline_aligned_in_smp; | 2376 | struct call_single_data csd ____cacheline_aligned_in_smp; |
2336 | struct softnet_data *rps_ipi_next; | 2377 | struct softnet_data *rps_ipi_next; |
@@ -2342,9 +2383,6 @@ struct softnet_data { | |||
2342 | struct sk_buff_head input_pkt_queue; | 2383 | struct sk_buff_head input_pkt_queue; |
2343 | struct napi_struct backlog; | 2384 | struct napi_struct backlog; |
2344 | 2385 | ||
2345 | #ifdef CONFIG_NET_FLOW_LIMIT | ||
2346 | struct sd_flow_limit __rcu *flow_limit; | ||
2347 | #endif | ||
2348 | }; | 2386 | }; |
2349 | 2387 | ||
2350 | static inline void input_queue_head_incr(struct softnet_data *sd) | 2388 | static inline void input_queue_head_incr(struct softnet_data *sd) |
@@ -2748,23 +2786,6 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev, | |||
2748 | } | 2786 | } |
2749 | #endif | 2787 | #endif |
2750 | 2788 | ||
2751 | static inline int netif_copy_real_num_queues(struct net_device *to_dev, | ||
2752 | const struct net_device *from_dev) | ||
2753 | { | ||
2754 | int err; | ||
2755 | |||
2756 | err = netif_set_real_num_tx_queues(to_dev, | ||
2757 | from_dev->real_num_tx_queues); | ||
2758 | if (err) | ||
2759 | return err; | ||
2760 | #ifdef CONFIG_SYSFS | ||
2761 | return netif_set_real_num_rx_queues(to_dev, | ||
2762 | from_dev->real_num_rx_queues); | ||
2763 | #else | ||
2764 | return 0; | ||
2765 | #endif | ||
2766 | } | ||
2767 | |||
2768 | #ifdef CONFIG_SYSFS | 2789 | #ifdef CONFIG_SYSFS |
2769 | static inline unsigned int get_netdev_rx_queue_index( | 2790 | static inline unsigned int get_netdev_rx_queue_index( |
2770 | struct netdev_rx_queue *queue) | 2791 | struct netdev_rx_queue *queue) |
@@ -2864,7 +2885,7 @@ void dev_set_group(struct net_device *, int); | |||
2864 | int dev_set_mac_address(struct net_device *, struct sockaddr *); | 2885 | int dev_set_mac_address(struct net_device *, struct sockaddr *); |
2865 | int dev_change_carrier(struct net_device *, bool new_carrier); | 2886 | int dev_change_carrier(struct net_device *, bool new_carrier); |
2866 | int dev_get_phys_port_id(struct net_device *dev, | 2887 | int dev_get_phys_port_id(struct net_device *dev, |
2867 | struct netdev_phys_port_id *ppid); | 2888 | struct netdev_phys_item_id *ppid); |
2868 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); | 2889 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); |
2869 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 2890 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
2870 | struct netdev_queue *txq, int *ret); | 2891 | struct netdev_queue *txq, int *ret); |
@@ -3425,6 +3446,12 @@ void netdev_upper_dev_unlink(struct net_device *dev, | |||
3425 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); | 3446 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); |
3426 | void *netdev_lower_dev_get_private(struct net_device *dev, | 3447 | void *netdev_lower_dev_get_private(struct net_device *dev, |
3427 | struct net_device *lower_dev); | 3448 | struct net_device *lower_dev); |
3449 | |||
3450 | /* RSS keys are 40 or 52 bytes long */ | ||
3451 | #define NETDEV_RSS_KEY_LEN 52 | ||
3452 | extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; | ||
3453 | void netdev_rss_key_fill(void *buffer, size_t len); | ||
3454 | |||
3428 | int dev_get_nest_level(struct net_device *dev, | 3455 | int dev_get_nest_level(struct net_device *dev, |
3429 | bool (*type_check)(struct net_device *dev)); | 3456 | bool (*type_check)(struct net_device *dev)); |
3430 | int skb_checksum_help(struct sk_buff *skb); | 3457 | int skb_checksum_help(struct sk_buff *skb); |
@@ -3569,7 +3596,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) | |||
3569 | BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); | 3596 | BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); |
3570 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); | 3597 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); |
3571 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); | 3598 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); |
3572 | BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT)); | 3599 | BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); |
3573 | 3600 | ||
3574 | return (features & feature) == feature; | 3601 | return (features & feature) == feature; |
3575 | } | 3602 | } |
@@ -3614,6 +3641,21 @@ static inline bool netif_is_macvlan(struct net_device *dev) | |||
3614 | return dev->priv_flags & IFF_MACVLAN; | 3641 | return dev->priv_flags & IFF_MACVLAN; |
3615 | } | 3642 | } |
3616 | 3643 | ||
3644 | static inline bool netif_is_macvlan_port(struct net_device *dev) | ||
3645 | { | ||
3646 | return dev->priv_flags & IFF_MACVLAN_PORT; | ||
3647 | } | ||
3648 | |||
3649 | static inline bool netif_is_ipvlan(struct net_device *dev) | ||
3650 | { | ||
3651 | return dev->priv_flags & IFF_IPVLAN_SLAVE; | ||
3652 | } | ||
3653 | |||
3654 | static inline bool netif_is_ipvlan_port(struct net_device *dev) | ||
3655 | { | ||
3656 | return dev->priv_flags & IFF_IPVLAN_MASTER; | ||
3657 | } | ||
3658 | |||
3617 | static inline bool netif_is_bond_master(struct net_device *dev) | 3659 | static inline bool netif_is_bond_master(struct net_device *dev) |
3618 | { | 3660 | { |
3619 | return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; | 3661 | return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; |
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 356acc2846fd..022b761dbf0a 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h | |||
@@ -490,6 +490,8 @@ enum { | |||
490 | 490 | ||
491 | /* nfs42 */ | 491 | /* nfs42 */ |
492 | NFSPROC4_CLNT_SEEK, | 492 | NFSPROC4_CLNT_SEEK, |
493 | NFSPROC4_CLNT_ALLOCATE, | ||
494 | NFSPROC4_CLNT_DEALLOCATE, | ||
493 | }; | 495 | }; |
494 | 496 | ||
495 | /* nfs41 types */ | 497 | /* nfs41 types */ |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index c72d1ad41ad4..6d627b92df53 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -163,7 +163,7 @@ struct nfs_inode { | |||
163 | */ | 163 | */ |
164 | __be32 cookieverf[2]; | 164 | __be32 cookieverf[2]; |
165 | 165 | ||
166 | unsigned long npages; | 166 | unsigned long nrequests; |
167 | struct nfs_mds_commit_info commit_info; | 167 | struct nfs_mds_commit_info commit_info; |
168 | 168 | ||
169 | /* Open contexts for shared mmap writes */ | 169 | /* Open contexts for shared mmap writes */ |
@@ -520,7 +520,7 @@ extern void nfs_commit_free(struct nfs_commit_data *data); | |||
520 | static inline int | 520 | static inline int |
521 | nfs_have_writebacks(struct inode *inode) | 521 | nfs_have_writebacks(struct inode *inode) |
522 | { | 522 | { |
523 | return NFS_I(inode)->npages != 0; | 523 | return NFS_I(inode)->nrequests != 0; |
524 | } | 524 | } |
525 | 525 | ||
526 | /* | 526 | /* |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index a32ba0d7a98f..1e37fbb78f7a 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -231,5 +231,7 @@ struct nfs_server { | |||
231 | #define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17) | 231 | #define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17) |
232 | #define NFS_CAP_SECURITY_LABEL (1U << 18) | 232 | #define NFS_CAP_SECURITY_LABEL (1U << 18) |
233 | #define NFS_CAP_SEEK (1U << 19) | 233 | #define NFS_CAP_SEEK (1U << 19) |
234 | #define NFS_CAP_ALLOCATE (1U << 20) | ||
235 | #define NFS_CAP_DEALLOCATE (1U << 21) | ||
234 | 236 | ||
235 | #endif | 237 | #endif |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 47ebb4fafd87..467c84efb596 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -1243,6 +1243,20 @@ nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) | |||
1243 | #endif /* CONFIG_NFS_V4_1 */ | 1243 | #endif /* CONFIG_NFS_V4_1 */ |
1244 | 1244 | ||
1245 | #ifdef CONFIG_NFS_V4_2 | 1245 | #ifdef CONFIG_NFS_V4_2 |
1246 | struct nfs42_falloc_args { | ||
1247 | struct nfs4_sequence_args seq_args; | ||
1248 | |||
1249 | struct nfs_fh *falloc_fh; | ||
1250 | nfs4_stateid falloc_stateid; | ||
1251 | u64 falloc_offset; | ||
1252 | u64 falloc_length; | ||
1253 | }; | ||
1254 | |||
1255 | struct nfs42_falloc_res { | ||
1256 | struct nfs4_sequence_res seq_res; | ||
1257 | unsigned int status; | ||
1258 | }; | ||
1259 | |||
1246 | struct nfs42_seek_args { | 1260 | struct nfs42_seek_args { |
1247 | struct nfs4_sequence_args seq_args; | 1261 | struct nfs4_sequence_args seq_args; |
1248 | 1262 | ||
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h index 20163b9a0eae..167342c2ce6b 100644 --- a/include/linux/nl802154.h +++ b/include/linux/nl802154.h | |||
@@ -12,10 +12,6 @@ | |||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License along | ||
16 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | * | ||
19 | */ | 15 | */ |
20 | 16 | ||
21 | #ifndef NL802154_H | 17 | #ifndef NL802154_H |
diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 2bf403195c09..258945fcabf1 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
20 | #include <linux/miscdevice.h> | 20 | #include <linux/miscdevice.h> |
21 | #include <linux/kref.h> | 21 | #include <linux/kref.h> |
22 | #include <linux/blk-mq.h> | ||
22 | 23 | ||
23 | struct nvme_bar { | 24 | struct nvme_bar { |
24 | __u64 cap; /* Controller Capabilities */ | 25 | __u64 cap; /* Controller Capabilities */ |
@@ -38,6 +39,7 @@ struct nvme_bar { | |||
38 | #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) | 39 | #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) |
39 | #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) | 40 | #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) |
40 | #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) | 41 | #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) |
42 | #define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) | ||
41 | 43 | ||
42 | enum { | 44 | enum { |
43 | NVME_CC_ENABLE = 1 << 0, | 45 | NVME_CC_ENABLE = 1 << 0, |
@@ -70,8 +72,10 @@ extern unsigned char nvme_io_timeout; | |||
70 | */ | 72 | */ |
71 | struct nvme_dev { | 73 | struct nvme_dev { |
72 | struct list_head node; | 74 | struct list_head node; |
73 | struct nvme_queue __rcu **queues; | 75 | struct nvme_queue **queues; |
74 | unsigned short __percpu *io_queue; | 76 | struct request_queue *admin_q; |
77 | struct blk_mq_tag_set tagset; | ||
78 | struct blk_mq_tag_set admin_tagset; | ||
75 | u32 __iomem *dbs; | 79 | u32 __iomem *dbs; |
76 | struct pci_dev *pci_dev; | 80 | struct pci_dev *pci_dev; |
77 | struct dma_pool *prp_page_pool; | 81 | struct dma_pool *prp_page_pool; |
@@ -90,15 +94,16 @@ struct nvme_dev { | |||
90 | struct miscdevice miscdev; | 94 | struct miscdevice miscdev; |
91 | work_func_t reset_workfn; | 95 | work_func_t reset_workfn; |
92 | struct work_struct reset_work; | 96 | struct work_struct reset_work; |
93 | struct work_struct cpu_work; | ||
94 | char name[12]; | 97 | char name[12]; |
95 | char serial[20]; | 98 | char serial[20]; |
96 | char model[40]; | 99 | char model[40]; |
97 | char firmware_rev[8]; | 100 | char firmware_rev[8]; |
98 | u32 max_hw_sectors; | 101 | u32 max_hw_sectors; |
99 | u32 stripe_size; | 102 | u32 stripe_size; |
103 | u32 page_size; | ||
100 | u16 oncs; | 104 | u16 oncs; |
101 | u16 abort_limit; | 105 | u16 abort_limit; |
106 | u8 event_limit; | ||
102 | u8 vwc; | 107 | u8 vwc; |
103 | u8 initialized; | 108 | u8 initialized; |
104 | }; | 109 | }; |
@@ -132,7 +137,6 @@ struct nvme_iod { | |||
132 | int offset; /* Of PRP list */ | 137 | int offset; /* Of PRP list */ |
133 | int nents; /* Used in scatterlist */ | 138 | int nents; /* Used in scatterlist */ |
134 | int length; /* Of data, in bytes */ | 139 | int length; /* Of data, in bytes */ |
135 | unsigned long start_time; | ||
136 | dma_addr_t first_dma; | 140 | dma_addr_t first_dma; |
137 | struct list_head node; | 141 | struct list_head node; |
138 | struct scatterlist sg[0]; | 142 | struct scatterlist sg[0]; |
@@ -150,12 +154,14 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) | |||
150 | */ | 154 | */ |
151 | void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod); | 155 | void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod); |
152 | 156 | ||
153 | int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int , gfp_t); | 157 | int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t); |
154 | struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, | 158 | struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, |
155 | unsigned long addr, unsigned length); | 159 | unsigned long addr, unsigned length); |
156 | void nvme_unmap_user_pages(struct nvme_dev *dev, int write, | 160 | void nvme_unmap_user_pages(struct nvme_dev *dev, int write, |
157 | struct nvme_iod *iod); | 161 | struct nvme_iod *iod); |
158 | int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *); | 162 | int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *, |
163 | struct nvme_command *, u32 *); | ||
164 | int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns); | ||
159 | int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *, | 165 | int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *, |
160 | u32 *result); | 166 | u32 *result); |
161 | int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns, | 167 | int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns, |
diff --git a/include/linux/of.h b/include/linux/of.h index 29f0adc5f3e4..dfde07e77a63 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
24 | #include <linux/topology.h> | 24 | #include <linux/topology.h> |
25 | #include <linux/notifier.h> | 25 | #include <linux/notifier.h> |
26 | #include <linux/property.h> | ||
27 | #include <linux/list.h> | ||
26 | 28 | ||
27 | #include <asm/byteorder.h> | 29 | #include <asm/byteorder.h> |
28 | #include <asm/errno.h> | 30 | #include <asm/errno.h> |
@@ -49,14 +51,13 @@ struct device_node { | |||
49 | const char *type; | 51 | const char *type; |
50 | phandle phandle; | 52 | phandle phandle; |
51 | const char *full_name; | 53 | const char *full_name; |
54 | struct fwnode_handle fwnode; | ||
52 | 55 | ||
53 | struct property *properties; | 56 | struct property *properties; |
54 | struct property *deadprops; /* removed properties */ | 57 | struct property *deadprops; /* removed properties */ |
55 | struct device_node *parent; | 58 | struct device_node *parent; |
56 | struct device_node *child; | 59 | struct device_node *child; |
57 | struct device_node *sibling; | 60 | struct device_node *sibling; |
58 | struct device_node *next; /* next device of same type */ | ||
59 | struct device_node *allnext; /* next in list of all nodes */ | ||
60 | struct kobject kobj; | 61 | struct kobject kobj; |
61 | unsigned long _flags; | 62 | unsigned long _flags; |
62 | void *data; | 63 | void *data; |
@@ -74,11 +75,18 @@ struct of_phandle_args { | |||
74 | uint32_t args[MAX_PHANDLE_ARGS]; | 75 | uint32_t args[MAX_PHANDLE_ARGS]; |
75 | }; | 76 | }; |
76 | 77 | ||
78 | struct of_reconfig_data { | ||
79 | struct device_node *dn; | ||
80 | struct property *prop; | ||
81 | struct property *old_prop; | ||
82 | }; | ||
83 | |||
77 | /* initialize a node */ | 84 | /* initialize a node */ |
78 | extern struct kobj_type of_node_ktype; | 85 | extern struct kobj_type of_node_ktype; |
79 | static inline void of_node_init(struct device_node *node) | 86 | static inline void of_node_init(struct device_node *node) |
80 | { | 87 | { |
81 | kobject_init(&node->kobj, &of_node_ktype); | 88 | kobject_init(&node->kobj, &of_node_ktype); |
89 | node->fwnode.type = FWNODE_OF; | ||
82 | } | 90 | } |
83 | 91 | ||
84 | /* true when node is initialized */ | 92 | /* true when node is initialized */ |
@@ -105,18 +113,27 @@ static inline struct device_node *of_node_get(struct device_node *node) | |||
105 | static inline void of_node_put(struct device_node *node) { } | 113 | static inline void of_node_put(struct device_node *node) { } |
106 | #endif /* !CONFIG_OF_DYNAMIC */ | 114 | #endif /* !CONFIG_OF_DYNAMIC */ |
107 | 115 | ||
108 | #ifdef CONFIG_OF | ||
109 | |||
110 | /* Pointer for first entry in chain of all nodes. */ | 116 | /* Pointer for first entry in chain of all nodes. */ |
111 | extern struct device_node *of_allnodes; | 117 | extern struct device_node *of_root; |
112 | extern struct device_node *of_chosen; | 118 | extern struct device_node *of_chosen; |
113 | extern struct device_node *of_aliases; | 119 | extern struct device_node *of_aliases; |
114 | extern struct device_node *of_stdout; | 120 | extern struct device_node *of_stdout; |
115 | extern raw_spinlock_t devtree_lock; | 121 | extern raw_spinlock_t devtree_lock; |
116 | 122 | ||
123 | #ifdef CONFIG_OF | ||
124 | static inline bool is_of_node(struct fwnode_handle *fwnode) | ||
125 | { | ||
126 | return fwnode && fwnode->type == FWNODE_OF; | ||
127 | } | ||
128 | |||
129 | static inline struct device_node *of_node(struct fwnode_handle *fwnode) | ||
130 | { | ||
131 | return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL; | ||
132 | } | ||
133 | |||
117 | static inline bool of_have_populated_dt(void) | 134 | static inline bool of_have_populated_dt(void) |
118 | { | 135 | { |
119 | return of_allnodes != NULL; | 136 | return of_root != NULL; |
120 | } | 137 | } |
121 | 138 | ||
122 | static inline bool of_node_is_root(const struct device_node *node) | 139 | static inline bool of_node_is_root(const struct device_node *node) |
@@ -160,6 +177,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag | |||
160 | clear_bit(flag, &p->_flags); | 177 | clear_bit(flag, &p->_flags); |
161 | } | 178 | } |
162 | 179 | ||
180 | extern struct device_node *__of_find_all_nodes(struct device_node *prev); | ||
163 | extern struct device_node *of_find_all_nodes(struct device_node *prev); | 181 | extern struct device_node *of_find_all_nodes(struct device_node *prev); |
164 | 182 | ||
165 | /* | 183 | /* |
@@ -215,8 +233,9 @@ static inline const char *of_node_full_name(const struct device_node *np) | |||
215 | return np ? np->full_name : "<no-node>"; | 233 | return np ? np->full_name : "<no-node>"; |
216 | } | 234 | } |
217 | 235 | ||
218 | #define for_each_of_allnodes(dn) \ | 236 | #define for_each_of_allnodes_from(from, dn) \ |
219 | for (dn = of_allnodes; dn; dn = dn->allnext) | 237 | for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn)) |
238 | #define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn) | ||
220 | extern struct device_node *of_find_node_by_name(struct device_node *from, | 239 | extern struct device_node *of_find_node_by_name(struct device_node *from, |
221 | const char *name); | 240 | const char *name); |
222 | extern struct device_node *of_find_node_by_type(struct device_node *from, | 241 | extern struct device_node *of_find_node_by_type(struct device_node *from, |
@@ -228,7 +247,13 @@ extern struct device_node *of_find_matching_node_and_match( | |||
228 | const struct of_device_id *matches, | 247 | const struct of_device_id *matches, |
229 | const struct of_device_id **match); | 248 | const struct of_device_id **match); |
230 | 249 | ||
231 | extern struct device_node *of_find_node_by_path(const char *path); | 250 | extern struct device_node *of_find_node_opts_by_path(const char *path, |
251 | const char **opts); | ||
252 | static inline struct device_node *of_find_node_by_path(const char *path) | ||
253 | { | ||
254 | return of_find_node_opts_by_path(path, NULL); | ||
255 | } | ||
256 | |||
232 | extern struct device_node *of_find_node_by_phandle(phandle handle); | 257 | extern struct device_node *of_find_node_by_phandle(phandle handle); |
233 | extern struct device_node *of_get_parent(const struct device_node *node); | 258 | extern struct device_node *of_get_parent(const struct device_node *node); |
234 | extern struct device_node *of_get_next_parent(struct device_node *node); | 259 | extern struct device_node *of_get_next_parent(struct device_node *node); |
@@ -263,6 +288,10 @@ extern int of_property_read_u32_array(const struct device_node *np, | |||
263 | size_t sz); | 288 | size_t sz); |
264 | extern int of_property_read_u64(const struct device_node *np, | 289 | extern int of_property_read_u64(const struct device_node *np, |
265 | const char *propname, u64 *out_value); | 290 | const char *propname, u64 *out_value); |
291 | extern int of_property_read_u64_array(const struct device_node *np, | ||
292 | const char *propname, | ||
293 | u64 *out_values, | ||
294 | size_t sz); | ||
266 | 295 | ||
267 | extern int of_property_read_string(struct device_node *np, | 296 | extern int of_property_read_string(struct device_node *np, |
268 | const char *propname, | 297 | const char *propname, |
@@ -275,7 +304,7 @@ extern int of_property_read_string_helper(struct device_node *np, | |||
275 | const char **out_strs, size_t sz, int index); | 304 | const char **out_strs, size_t sz, int index); |
276 | extern int of_device_is_compatible(const struct device_node *device, | 305 | extern int of_device_is_compatible(const struct device_node *device, |
277 | const char *); | 306 | const char *); |
278 | extern int of_device_is_available(const struct device_node *device); | 307 | extern bool of_device_is_available(const struct device_node *device); |
279 | extern const void *of_get_property(const struct device_node *node, | 308 | extern const void *of_get_property(const struct device_node *node, |
280 | const char *name, | 309 | const char *name, |
281 | int *lenp); | 310 | int *lenp); |
@@ -317,16 +346,6 @@ extern int of_update_property(struct device_node *np, struct property *newprop); | |||
317 | #define OF_RECONFIG_REMOVE_PROPERTY 0x0004 | 346 | #define OF_RECONFIG_REMOVE_PROPERTY 0x0004 |
318 | #define OF_RECONFIG_UPDATE_PROPERTY 0x0005 | 347 | #define OF_RECONFIG_UPDATE_PROPERTY 0x0005 |
319 | 348 | ||
320 | struct of_prop_reconfig { | ||
321 | struct device_node *dn; | ||
322 | struct property *prop; | ||
323 | struct property *old_prop; | ||
324 | }; | ||
325 | |||
326 | extern int of_reconfig_notifier_register(struct notifier_block *); | ||
327 | extern int of_reconfig_notifier_unregister(struct notifier_block *); | ||
328 | extern int of_reconfig_notify(unsigned long, void *); | ||
329 | |||
330 | extern int of_attach_node(struct device_node *); | 349 | extern int of_attach_node(struct device_node *); |
331 | extern int of_detach_node(struct device_node *); | 350 | extern int of_detach_node(struct device_node *); |
332 | 351 | ||
@@ -355,6 +374,16 @@ bool of_console_check(struct device_node *dn, char *name, int index); | |||
355 | 374 | ||
356 | #else /* CONFIG_OF */ | 375 | #else /* CONFIG_OF */ |
357 | 376 | ||
377 | static inline bool is_of_node(struct fwnode_handle *fwnode) | ||
378 | { | ||
379 | return false; | ||
380 | } | ||
381 | |||
382 | static inline struct device_node *of_node(struct fwnode_handle *fwnode) | ||
383 | { | ||
384 | return NULL; | ||
385 | } | ||
386 | |||
358 | static inline const char* of_node_full_name(const struct device_node *np) | 387 | static inline const char* of_node_full_name(const struct device_node *np) |
359 | { | 388 | { |
360 | return "<no-node>"; | 389 | return "<no-node>"; |
@@ -385,6 +414,12 @@ static inline struct device_node *of_find_node_by_path(const char *path) | |||
385 | return NULL; | 414 | return NULL; |
386 | } | 415 | } |
387 | 416 | ||
417 | static inline struct device_node *of_find_node_opts_by_path(const char *path, | ||
418 | const char **opts) | ||
419 | { | ||
420 | return NULL; | ||
421 | } | ||
422 | |||
388 | static inline struct device_node *of_get_parent(const struct device_node *node) | 423 | static inline struct device_node *of_get_parent(const struct device_node *node) |
389 | { | 424 | { |
390 | return NULL; | 425 | return NULL; |
@@ -426,9 +461,9 @@ static inline int of_device_is_compatible(const struct device_node *device, | |||
426 | return 0; | 461 | return 0; |
427 | } | 462 | } |
428 | 463 | ||
429 | static inline int of_device_is_available(const struct device_node *device) | 464 | static inline bool of_device_is_available(const struct device_node *device) |
430 | { | 465 | { |
431 | return 0; | 466 | return false; |
432 | } | 467 | } |
433 | 468 | ||
434 | static inline struct property *of_find_property(const struct device_node *np, | 469 | static inline struct property *of_find_property(const struct device_node *np, |
@@ -477,6 +512,13 @@ static inline int of_property_read_u32_array(const struct device_node *np, | |||
477 | return -ENOSYS; | 512 | return -ENOSYS; |
478 | } | 513 | } |
479 | 514 | ||
515 | static inline int of_property_read_u64_array(const struct device_node *np, | ||
516 | const char *propname, | ||
517 | u64 *out_values, size_t sz) | ||
518 | { | ||
519 | return -ENOSYS; | ||
520 | } | ||
521 | |||
480 | static inline int of_property_read_string(struct device_node *np, | 522 | static inline int of_property_read_string(struct device_node *np, |
481 | const char *propname, | 523 | const char *propname, |
482 | const char **out_string) | 524 | const char **out_string) |
@@ -760,6 +802,13 @@ static inline int of_property_read_u32(const struct device_node *np, | |||
760 | return of_property_read_u32_array(np, propname, out_value, 1); | 802 | return of_property_read_u32_array(np, propname, out_value, 1); |
761 | } | 803 | } |
762 | 804 | ||
805 | static inline int of_property_read_s32(const struct device_node *np, | ||
806 | const char *propname, | ||
807 | s32 *out_value) | ||
808 | { | ||
809 | return of_property_read_u32(np, propname, (u32*) out_value); | ||
810 | } | ||
811 | |||
763 | #define of_property_for_each_u32(np, propname, prop, p, u) \ | 812 | #define of_property_for_each_u32(np, propname, prop, p, u) \ |
764 | for (prop = of_find_property(np, propname, NULL), \ | 813 | for (prop = of_find_property(np, propname, NULL), \ |
765 | p = of_prop_next_u32(prop, NULL, &u); \ | 814 | p = of_prop_next_u32(prop, NULL, &u); \ |
@@ -828,7 +877,7 @@ static inline int of_get_available_child_count(const struct device_node *np) | |||
828 | = { .compatible = compat, \ | 877 | = { .compatible = compat, \ |
829 | .data = (fn == (fn_type)NULL) ? fn : fn } | 878 | .data = (fn == (fn_type)NULL) ? fn : fn } |
830 | #else | 879 | #else |
831 | #define _OF_DECLARE(table, name, compat, fn, fn_type) \ | 880 | #define _OF_DECLARE(table, name, compat, fn, fn_type) \ |
832 | static const struct of_device_id __of_table_##name \ | 881 | static const struct of_device_id __of_table_##name \ |
833 | __attribute__((unused)) \ | 882 | __attribute__((unused)) \ |
834 | = { .compatible = compat, \ | 883 | = { .compatible = compat, \ |
@@ -879,7 +928,19 @@ struct of_changeset { | |||
879 | struct list_head entries; | 928 | struct list_head entries; |
880 | }; | 929 | }; |
881 | 930 | ||
931 | enum of_reconfig_change { | ||
932 | OF_RECONFIG_NO_CHANGE = 0, | ||
933 | OF_RECONFIG_CHANGE_ADD, | ||
934 | OF_RECONFIG_CHANGE_REMOVE, | ||
935 | }; | ||
936 | |||
882 | #ifdef CONFIG_OF_DYNAMIC | 937 | #ifdef CONFIG_OF_DYNAMIC |
938 | extern int of_reconfig_notifier_register(struct notifier_block *); | ||
939 | extern int of_reconfig_notifier_unregister(struct notifier_block *); | ||
940 | extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd); | ||
941 | extern int of_reconfig_get_state_change(unsigned long action, | ||
942 | struct of_reconfig_data *arg); | ||
943 | |||
883 | extern void of_changeset_init(struct of_changeset *ocs); | 944 | extern void of_changeset_init(struct of_changeset *ocs); |
884 | extern void of_changeset_destroy(struct of_changeset *ocs); | 945 | extern void of_changeset_destroy(struct of_changeset *ocs); |
885 | extern int of_changeset_apply(struct of_changeset *ocs); | 946 | extern int of_changeset_apply(struct of_changeset *ocs); |
@@ -917,9 +978,69 @@ static inline int of_changeset_update_property(struct of_changeset *ocs, | |||
917 | { | 978 | { |
918 | return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop); | 979 | return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop); |
919 | } | 980 | } |
920 | #endif | 981 | #else /* CONFIG_OF_DYNAMIC */ |
982 | static inline int of_reconfig_notifier_register(struct notifier_block *nb) | ||
983 | { | ||
984 | return -EINVAL; | ||
985 | } | ||
986 | static inline int of_reconfig_notifier_unregister(struct notifier_block *nb) | ||
987 | { | ||
988 | return -EINVAL; | ||
989 | } | ||
990 | static inline int of_reconfig_notify(unsigned long action, | ||
991 | struct of_reconfig_data *arg) | ||
992 | { | ||
993 | return -EINVAL; | ||
994 | } | ||
995 | static inline int of_reconfig_get_state_change(unsigned long action, | ||
996 | struct of_reconfig_data *arg) | ||
997 | { | ||
998 | return -EINVAL; | ||
999 | } | ||
1000 | #endif /* CONFIG_OF_DYNAMIC */ | ||
921 | 1001 | ||
922 | /* CONFIG_OF_RESOLVE api */ | 1002 | /* CONFIG_OF_RESOLVE api */ |
923 | extern int of_resolve_phandles(struct device_node *tree); | 1003 | extern int of_resolve_phandles(struct device_node *tree); |
924 | 1004 | ||
1005 | /** | ||
1006 | * of_device_is_system_power_controller - Tells if system-power-controller is found for device_node | ||
1007 | * @np: Pointer to the given device_node | ||
1008 | * | ||
1009 | * return true if present false otherwise | ||
1010 | */ | ||
1011 | static inline bool of_device_is_system_power_controller(const struct device_node *np) | ||
1012 | { | ||
1013 | return of_property_read_bool(np, "system-power-controller"); | ||
1014 | } | ||
1015 | |||
1016 | /** | ||
1017 | * Overlay support | ||
1018 | */ | ||
1019 | |||
1020 | #ifdef CONFIG_OF_OVERLAY | ||
1021 | |||
1022 | /* ID based overlays; the API for external users */ | ||
1023 | int of_overlay_create(struct device_node *tree); | ||
1024 | int of_overlay_destroy(int id); | ||
1025 | int of_overlay_destroy_all(void); | ||
1026 | |||
1027 | #else | ||
1028 | |||
1029 | static inline int of_overlay_create(struct device_node *tree) | ||
1030 | { | ||
1031 | return -ENOTSUPP; | ||
1032 | } | ||
1033 | |||
1034 | static inline int of_overlay_destroy(int id) | ||
1035 | { | ||
1036 | return -ENOTSUPP; | ||
1037 | } | ||
1038 | |||
1039 | static inline int of_overlay_destroy_all(void) | ||
1040 | { | ||
1041 | return -ENOTSUPP; | ||
1042 | } | ||
1043 | |||
1044 | #endif | ||
1045 | |||
925 | #endif /* _LINUX_OF_H */ | 1046 | #endif /* _LINUX_OF_H */ |
diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 8cb14eb393d6..d88e81be6368 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h | |||
@@ -106,7 +106,7 @@ extern int of_address_to_resource(struct device_node *dev, int index, | |||
106 | struct resource *r); | 106 | struct resource *r); |
107 | void __iomem *of_iomap(struct device_node *node, int index); | 107 | void __iomem *of_iomap(struct device_node *node, int index); |
108 | void __iomem *of_io_request_and_map(struct device_node *device, | 108 | void __iomem *of_io_request_and_map(struct device_node *device, |
109 | int index, char *name); | 109 | int index, const char *name); |
110 | #else | 110 | #else |
111 | 111 | ||
112 | #include <linux/io.h> | 112 | #include <linux/io.h> |
@@ -123,7 +123,7 @@ static inline void __iomem *of_iomap(struct device_node *device, int index) | |||
123 | } | 123 | } |
124 | 124 | ||
125 | static inline void __iomem *of_io_request_and_map(struct device_node *device, | 125 | static inline void __iomem *of_io_request_and_map(struct device_node *device, |
126 | int index, char *name) | 126 | int index, const char *name) |
127 | { | 127 | { |
128 | return IOMEM_ERR_PTR(-EINVAL); | 128 | return IOMEM_ERR_PTR(-EINVAL); |
129 | } | 129 | } |
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index 1fd207e7a847..ce0e5abeb454 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h | |||
@@ -59,13 +59,13 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, | |||
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | #if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) | 61 | #if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) |
62 | int of_pci_msi_chip_add(struct msi_chip *chip); | 62 | int of_pci_msi_chip_add(struct msi_controller *chip); |
63 | void of_pci_msi_chip_remove(struct msi_chip *chip); | 63 | void of_pci_msi_chip_remove(struct msi_controller *chip); |
64 | struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node); | 64 | struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node); |
65 | #else | 65 | #else |
66 | static inline int of_pci_msi_chip_add(struct msi_chip *chip) { return -EINVAL; } | 66 | static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; } |
67 | static inline void of_pci_msi_chip_remove(struct msi_chip *chip) { } | 67 | static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { } |
68 | static inline struct msi_chip * | 68 | static inline struct msi_controller * |
69 | of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; } | 69 | of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; } |
70 | #endif | 70 | #endif |
71 | 71 | ||
diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h index c65a18a0cfdf..7e09244bb679 100644 --- a/include/linux/of_pdt.h +++ b/include/linux/of_pdt.h | |||
@@ -39,7 +39,6 @@ extern void *prom_early_alloc(unsigned long size); | |||
39 | /* for building the device tree */ | 39 | /* for building the device tree */ |
40 | extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); | 40 | extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); |
41 | 41 | ||
42 | extern void (*of_pdt_build_more)(struct device_node *dp, | 42 | extern void (*of_pdt_build_more)(struct device_node *dp); |
43 | struct device_node ***nextp); | ||
44 | 43 | ||
45 | #endif /* _LINUX_OF_PDT_H */ | 44 | #endif /* _LINUX_OF_PDT_H */ |
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index c2b0627a2317..8a860f096c35 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h | |||
@@ -84,4 +84,10 @@ static inline int of_platform_populate(struct device_node *root, | |||
84 | static inline void of_platform_depopulate(struct device *parent) { } | 84 | static inline void of_platform_depopulate(struct device *parent) { } |
85 | #endif | 85 | #endif |
86 | 86 | ||
87 | #ifdef CONFIG_OF_DYNAMIC | ||
88 | extern void of_platform_register_reconfig_notifier(void); | ||
89 | #else | ||
90 | static inline void of_platform_register_reconfig_notifier(void) { } | ||
91 | #endif | ||
92 | |||
87 | #endif /* _LINUX_OF_PLATFORM_H */ | 93 | #endif /* _LINUX_OF_PLATFORM_H */ |
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h new file mode 100644 index 000000000000..c2080eebbb47 --- /dev/null +++ b/include/linux/omap-gpmc.h | |||
@@ -0,0 +1,199 @@ | |||
1 | /* | ||
2 | * OMAP GPMC (General Purpose Memory Controller) defines | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License as published by the | ||
6 | * Free Software Foundation; either version 2 of the License, or (at your | ||
7 | * option) any later version. | ||
8 | */ | ||
9 | |||
10 | /* Maximum Number of Chip Selects */ | ||
11 | #define GPMC_CS_NUM 8 | ||
12 | |||
13 | #define GPMC_CONFIG_WP 0x00000005 | ||
14 | |||
15 | #define GPMC_IRQ_FIFOEVENTENABLE 0x01 | ||
16 | #define GPMC_IRQ_COUNT_EVENT 0x02 | ||
17 | |||
18 | #define GPMC_BURST_4 4 /* 4 word burst */ | ||
19 | #define GPMC_BURST_8 8 /* 8 word burst */ | ||
20 | #define GPMC_BURST_16 16 /* 16 word burst */ | ||
21 | #define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */ | ||
22 | #define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */ | ||
23 | #define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */ | ||
24 | #define GPMC_MUX_AD 2 /* Addr-Data multiplex */ | ||
25 | |||
26 | /* bool type time settings */ | ||
27 | struct gpmc_bool_timings { | ||
28 | bool cycle2cyclediffcsen; | ||
29 | bool cycle2cyclesamecsen; | ||
30 | bool we_extra_delay; | ||
31 | bool oe_extra_delay; | ||
32 | bool adv_extra_delay; | ||
33 | bool cs_extra_delay; | ||
34 | bool time_para_granularity; | ||
35 | }; | ||
36 | |||
37 | /* | ||
38 | * Note that all values in this struct are in nanoseconds except sync_clk | ||
39 | * (which is in picoseconds), while the register values are in gpmc_fck cycles. | ||
40 | */ | ||
41 | struct gpmc_timings { | ||
42 | /* Minimum clock period for synchronous mode (in picoseconds) */ | ||
43 | u32 sync_clk; | ||
44 | |||
45 | /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */ | ||
46 | u32 cs_on; /* Assertion time */ | ||
47 | u32 cs_rd_off; /* Read deassertion time */ | ||
48 | u32 cs_wr_off; /* Write deassertion time */ | ||
49 | |||
50 | /* ADV signal timings corresponding to GPMC_CONFIG3 */ | ||
51 | u32 adv_on; /* Assertion time */ | ||
52 | u32 adv_rd_off; /* Read deassertion time */ | ||
53 | u32 adv_wr_off; /* Write deassertion time */ | ||
54 | |||
55 | /* WE signals timings corresponding to GPMC_CONFIG4 */ | ||
56 | u32 we_on; /* WE assertion time */ | ||
57 | u32 we_off; /* WE deassertion time */ | ||
58 | |||
59 | /* OE signals timings corresponding to GPMC_CONFIG4 */ | ||
60 | u32 oe_on; /* OE assertion time */ | ||
61 | u32 oe_off; /* OE deassertion time */ | ||
62 | |||
63 | /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */ | ||
64 | u32 page_burst_access; /* Multiple access word delay */ | ||
65 | u32 access; /* Start-cycle to first data valid delay */ | ||
66 | u32 rd_cycle; /* Total read cycle time */ | ||
67 | u32 wr_cycle; /* Total write cycle time */ | ||
68 | |||
69 | u32 bus_turnaround; | ||
70 | u32 cycle2cycle_delay; | ||
71 | |||
72 | u32 wait_monitoring; | ||
73 | u32 clk_activation; | ||
74 | |||
75 | /* The following are only on OMAP3430 */ | ||
76 | u32 wr_access; /* WRACCESSTIME */ | ||
77 | u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */ | ||
78 | |||
79 | struct gpmc_bool_timings bool_timings; | ||
80 | }; | ||
81 | |||
82 | /* Device timings in picoseconds */ | ||
83 | struct gpmc_device_timings { | ||
84 | u32 t_ceasu; /* address setup to CS valid */ | ||
85 | u32 t_avdasu; /* address setup to ADV valid */ | ||
86 | /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is | ||
87 | * of tusb using these timings even for sync whilst | ||
88 | * ideally for adv_rd/(wr)_off it should have considered | ||
89 | * t_avdh instead. This indirectly necessitates r/w | ||
90 | * variations of t_avdp as it is possible to have one | ||
91 | * sync & other async | ||
92 | */ | ||
93 | u32 t_avdp_r; /* ADV low time (what about t_cer ?) */ | ||
94 | u32 t_avdp_w; | ||
95 | u32 t_aavdh; /* address hold time */ | ||
96 | u32 t_oeasu; /* address setup to OE valid */ | ||
97 | u32 t_aa; /* access time from ADV assertion */ | ||
98 | u32 t_iaa; /* initial access time */ | ||
99 | u32 t_oe; /* access time from OE assertion */ | ||
100 | u32 t_ce; /* access time from CS asertion */ | ||
101 | u32 t_rd_cycle; /* read cycle time */ | ||
102 | u32 t_cez_r; /* read CS deassertion to high Z */ | ||
103 | u32 t_cez_w; /* write CS deassertion to high Z */ | ||
104 | u32 t_oez; /* OE deassertion to high Z */ | ||
105 | u32 t_weasu; /* address setup to WE valid */ | ||
106 | u32 t_wpl; /* write assertion time */ | ||
107 | u32 t_wph; /* write deassertion time */ | ||
108 | u32 t_wr_cycle; /* write cycle time */ | ||
109 | |||
110 | u32 clk; | ||
111 | u32 t_bacc; /* burst access valid clock to output delay */ | ||
112 | u32 t_ces; /* CS setup time to clk */ | ||
113 | u32 t_avds; /* ADV setup time to clk */ | ||
114 | u32 t_avdh; /* ADV hold time from clk */ | ||
115 | u32 t_ach; /* address hold time from clk */ | ||
116 | u32 t_rdyo; /* clk to ready valid */ | ||
117 | |||
118 | u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */ | ||
119 | u32 t_ce_avd; /* CS on to ADV on delay */ | ||
120 | |||
121 | /* XXX: check the possibility of combining | ||
122 | * cyc_aavhd_oe & cyc_aavdh_we | ||
123 | */ | ||
124 | u8 cyc_aavdh_oe;/* read address hold time in cycles */ | ||
125 | u8 cyc_aavdh_we;/* write address hold time in cycles */ | ||
126 | u8 cyc_oe; /* access time from OE assertion in cycles */ | ||
127 | u8 cyc_wpl; /* write deassertion time in cycles */ | ||
128 | u32 cyc_iaa; /* initial access time in cycles */ | ||
129 | |||
130 | /* extra delays */ | ||
131 | bool ce_xdelay; | ||
132 | bool avd_xdelay; | ||
133 | bool oe_xdelay; | ||
134 | bool we_xdelay; | ||
135 | }; | ||
136 | |||
137 | struct gpmc_settings { | ||
138 | bool burst_wrap; /* enables wrap bursting */ | ||
139 | bool burst_read; /* enables read page/burst mode */ | ||
140 | bool burst_write; /* enables write page/burst mode */ | ||
141 | bool device_nand; /* device is NAND */ | ||
142 | bool sync_read; /* enables synchronous reads */ | ||
143 | bool sync_write; /* enables synchronous writes */ | ||
144 | bool wait_on_read; /* monitor wait on reads */ | ||
145 | bool wait_on_write; /* monitor wait on writes */ | ||
146 | u32 burst_len; /* page/burst length */ | ||
147 | u32 device_width; /* device bus width (8 or 16 bit) */ | ||
148 | u32 mux_add_data; /* multiplex address & data */ | ||
149 | u32 wait_pin; /* wait-pin to be used */ | ||
150 | }; | ||
151 | |||
152 | extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t, | ||
153 | struct gpmc_settings *gpmc_s, | ||
154 | struct gpmc_device_timings *dev_t); | ||
155 | |||
156 | struct gpmc_nand_regs; | ||
157 | struct device_node; | ||
158 | |||
159 | extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs); | ||
160 | extern int gpmc_get_client_irq(unsigned irq_config); | ||
161 | |||
162 | extern unsigned int gpmc_ticks_to_ns(unsigned int ticks); | ||
163 | |||
164 | extern void gpmc_cs_write_reg(int cs, int idx, u32 val); | ||
165 | extern int gpmc_calc_divider(unsigned int sync_clk); | ||
166 | extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t); | ||
167 | extern int gpmc_cs_program_settings(int cs, struct gpmc_settings *p); | ||
168 | extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base); | ||
169 | extern void gpmc_cs_free(int cs); | ||
170 | extern int gpmc_configure(int cmd, int wval); | ||
171 | extern void gpmc_read_settings_dt(struct device_node *np, | ||
172 | struct gpmc_settings *p); | ||
173 | |||
174 | extern void omap3_gpmc_save_context(void); | ||
175 | extern void omap3_gpmc_restore_context(void); | ||
176 | |||
177 | struct gpmc_timings; | ||
178 | struct omap_nand_platform_data; | ||
179 | struct omap_onenand_platform_data; | ||
180 | |||
181 | #if IS_ENABLED(CONFIG_MTD_NAND_OMAP2) | ||
182 | extern int gpmc_nand_init(struct omap_nand_platform_data *d, | ||
183 | struct gpmc_timings *gpmc_t); | ||
184 | #else | ||
185 | static inline int gpmc_nand_init(struct omap_nand_platform_data *d, | ||
186 | struct gpmc_timings *gpmc_t) | ||
187 | { | ||
188 | return 0; | ||
189 | } | ||
190 | #endif | ||
191 | |||
192 | #if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) | ||
193 | extern void gpmc_onenand_init(struct omap_onenand_platform_data *d); | ||
194 | #else | ||
195 | #define board_onenand_data NULL | ||
196 | static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d) | ||
197 | { | ||
198 | } | ||
199 | #endif | ||
diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h index f8322d9cd235..587bbdd31f5a 100644 --- a/include/linux/omap-mailbox.h +++ b/include/linux/omap-mailbox.h | |||
@@ -10,20 +10,20 @@ | |||
10 | #define OMAP_MAILBOX_H | 10 | #define OMAP_MAILBOX_H |
11 | 11 | ||
12 | typedef u32 mbox_msg_t; | 12 | typedef u32 mbox_msg_t; |
13 | struct omap_mbox; | ||
14 | 13 | ||
15 | typedef int __bitwise omap_mbox_irq_t; | 14 | typedef int __bitwise omap_mbox_irq_t; |
16 | #define IRQ_TX ((__force omap_mbox_irq_t) 1) | 15 | #define IRQ_TX ((__force omap_mbox_irq_t) 1) |
17 | #define IRQ_RX ((__force omap_mbox_irq_t) 2) | 16 | #define IRQ_RX ((__force omap_mbox_irq_t) 2) |
18 | 17 | ||
19 | int omap_mbox_msg_send(struct omap_mbox *, mbox_msg_t msg); | 18 | struct mbox_chan; |
19 | struct mbox_client; | ||
20 | 20 | ||
21 | struct omap_mbox *omap_mbox_get(const char *, struct notifier_block *nb); | 21 | struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl, |
22 | void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb); | 22 | const char *chan_name); |
23 | 23 | ||
24 | void omap_mbox_save_ctx(struct omap_mbox *mbox); | 24 | void omap_mbox_save_ctx(struct mbox_chan *chan); |
25 | void omap_mbox_restore_ctx(struct omap_mbox *mbox); | 25 | void omap_mbox_restore_ctx(struct mbox_chan *chan); |
26 | void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq); | 26 | void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); |
27 | void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq); | 27 | void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); |
28 | 28 | ||
29 | #endif /* OMAP_MAILBOX_H */ | 29 | #endif /* OMAP_MAILBOX_H */ |
diff --git a/include/linux/oom.h b/include/linux/oom.h index e8d6e1058723..853698c721f7 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
@@ -92,6 +92,17 @@ static inline bool oom_gfp_allowed(gfp_t gfp_mask) | |||
92 | 92 | ||
93 | extern struct task_struct *find_lock_task_mm(struct task_struct *p); | 93 | extern struct task_struct *find_lock_task_mm(struct task_struct *p); |
94 | 94 | ||
95 | static inline bool task_will_free_mem(struct task_struct *task) | ||
96 | { | ||
97 | /* | ||
98 | * A coredumping process may sleep for an extended period in exit_mm(), | ||
99 | * so the oom killer cannot assume that the process will promptly exit | ||
100 | * and release memory. | ||
101 | */ | ||
102 | return (task->flags & PF_EXITING) && | ||
103 | !(task->signal->flags & SIGNAL_GROUP_COREDUMP); | ||
104 | } | ||
105 | |||
95 | /* sysctls */ | 106 | /* sysctls */ |
96 | extern int sysctl_oom_dump_tasks; | 107 | extern int sysctl_oom_dump_tasks; |
97 | extern int sysctl_oom_kill_allocating_task; | 108 | extern int sysctl_oom_kill_allocating_task; |
diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h deleted file mode 100644 index 22691f614043..000000000000 --- a/include/linux/page-debug-flags.h +++ /dev/null | |||
@@ -1,32 +0,0 @@ | |||
1 | #ifndef LINUX_PAGE_DEBUG_FLAGS_H | ||
2 | #define LINUX_PAGE_DEBUG_FLAGS_H | ||
3 | |||
4 | /* | ||
5 | * page->debug_flags bits: | ||
6 | * | ||
7 | * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to | ||
8 | * implement generic debug pagealloc feature. The pages are filled with | ||
9 | * poison patterns and set this flag after free_pages(). The poisoned | ||
10 | * pages are verified whether the patterns are not corrupted and clear | ||
11 | * the flag before alloc_pages(). | ||
12 | */ | ||
13 | |||
14 | enum page_debug_flags { | ||
15 | PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */ | ||
16 | PAGE_DEBUG_FLAG_GUARD, | ||
17 | }; | ||
18 | |||
19 | /* | ||
20 | * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably | ||
21 | * gets turned off when no debug features are enabling it! | ||
22 | */ | ||
23 | |||
24 | #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS | ||
25 | #if !defined(CONFIG_PAGE_POISONING) && \ | ||
26 | !defined(CONFIG_PAGE_GUARD) \ | ||
27 | /* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */ | ||
28 | #error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features! | ||
29 | #endif | ||
30 | #endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */ | ||
31 | |||
32 | #endif /* LINUX_PAGE_DEBUG_FLAGS_H */ | ||
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h deleted file mode 100644 index 5c831f1eca79..000000000000 --- a/include/linux/page_cgroup.h +++ /dev/null | |||
@@ -1,105 +0,0 @@ | |||
1 | #ifndef __LINUX_PAGE_CGROUP_H | ||
2 | #define __LINUX_PAGE_CGROUP_H | ||
3 | |||
4 | enum { | ||
5 | /* flags for mem_cgroup */ | ||
6 | PCG_USED = 0x01, /* This page is charged to a memcg */ | ||
7 | PCG_MEM = 0x02, /* This page holds a memory charge */ | ||
8 | PCG_MEMSW = 0x04, /* This page holds a memory+swap charge */ | ||
9 | }; | ||
10 | |||
11 | struct pglist_data; | ||
12 | |||
13 | #ifdef CONFIG_MEMCG | ||
14 | struct mem_cgroup; | ||
15 | |||
16 | /* | ||
17 | * Page Cgroup can be considered as an extended mem_map. | ||
18 | * A page_cgroup page is associated with every page descriptor. The | ||
19 | * page_cgroup helps us identify information about the cgroup | ||
20 | * All page cgroups are allocated at boot or memory hotplug event, | ||
21 | * then the page cgroup for pfn always exists. | ||
22 | */ | ||
23 | struct page_cgroup { | ||
24 | unsigned long flags; | ||
25 | struct mem_cgroup *mem_cgroup; | ||
26 | }; | ||
27 | |||
28 | extern void pgdat_page_cgroup_init(struct pglist_data *pgdat); | ||
29 | |||
30 | #ifdef CONFIG_SPARSEMEM | ||
31 | static inline void page_cgroup_init_flatmem(void) | ||
32 | { | ||
33 | } | ||
34 | extern void page_cgroup_init(void); | ||
35 | #else | ||
36 | extern void page_cgroup_init_flatmem(void); | ||
37 | static inline void page_cgroup_init(void) | ||
38 | { | ||
39 | } | ||
40 | #endif | ||
41 | |||
42 | struct page_cgroup *lookup_page_cgroup(struct page *page); | ||
43 | |||
44 | static inline int PageCgroupUsed(struct page_cgroup *pc) | ||
45 | { | ||
46 | return !!(pc->flags & PCG_USED); | ||
47 | } | ||
48 | #else /* !CONFIG_MEMCG */ | ||
49 | struct page_cgroup; | ||
50 | |||
51 | static inline void pgdat_page_cgroup_init(struct pglist_data *pgdat) | ||
52 | { | ||
53 | } | ||
54 | |||
55 | static inline struct page_cgroup *lookup_page_cgroup(struct page *page) | ||
56 | { | ||
57 | return NULL; | ||
58 | } | ||
59 | |||
60 | static inline void page_cgroup_init(void) | ||
61 | { | ||
62 | } | ||
63 | |||
64 | static inline void page_cgroup_init_flatmem(void) | ||
65 | { | ||
66 | } | ||
67 | #endif /* CONFIG_MEMCG */ | ||
68 | |||
69 | #include <linux/swap.h> | ||
70 | |||
71 | #ifdef CONFIG_MEMCG_SWAP | ||
72 | extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, | ||
73 | unsigned short old, unsigned short new); | ||
74 | extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); | ||
75 | extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); | ||
76 | extern int swap_cgroup_swapon(int type, unsigned long max_pages); | ||
77 | extern void swap_cgroup_swapoff(int type); | ||
78 | #else | ||
79 | |||
80 | static inline | ||
81 | unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) | ||
82 | { | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | static inline | ||
87 | unsigned short lookup_swap_cgroup_id(swp_entry_t ent) | ||
88 | { | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static inline int | ||
93 | swap_cgroup_swapon(int type, unsigned long max_pages) | ||
94 | { | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | static inline void swap_cgroup_swapoff(int type) | ||
99 | { | ||
100 | return; | ||
101 | } | ||
102 | |||
103 | #endif /* CONFIG_MEMCG_SWAP */ | ||
104 | |||
105 | #endif /* __LINUX_PAGE_CGROUP_H */ | ||
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h new file mode 100644 index 000000000000..955421575d16 --- /dev/null +++ b/include/linux/page_counter.h | |||
@@ -0,0 +1,51 @@ | |||
1 | #ifndef _LINUX_PAGE_COUNTER_H | ||
2 | #define _LINUX_PAGE_COUNTER_H | ||
3 | |||
4 | #include <linux/atomic.h> | ||
5 | #include <linux/kernel.h> | ||
6 | #include <asm/page.h> | ||
7 | |||
8 | struct page_counter { | ||
9 | atomic_long_t count; | ||
10 | unsigned long limit; | ||
11 | struct page_counter *parent; | ||
12 | |||
13 | /* legacy */ | ||
14 | unsigned long watermark; | ||
15 | unsigned long failcnt; | ||
16 | }; | ||
17 | |||
18 | #if BITS_PER_LONG == 32 | ||
19 | #define PAGE_COUNTER_MAX LONG_MAX | ||
20 | #else | ||
21 | #define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE) | ||
22 | #endif | ||
23 | |||
24 | static inline void page_counter_init(struct page_counter *counter, | ||
25 | struct page_counter *parent) | ||
26 | { | ||
27 | atomic_long_set(&counter->count, 0); | ||
28 | counter->limit = PAGE_COUNTER_MAX; | ||
29 | counter->parent = parent; | ||
30 | } | ||
31 | |||
32 | static inline unsigned long page_counter_read(struct page_counter *counter) | ||
33 | { | ||
34 | return atomic_long_read(&counter->count); | ||
35 | } | ||
36 | |||
37 | void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); | ||
38 | void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); | ||
39 | int page_counter_try_charge(struct page_counter *counter, | ||
40 | unsigned long nr_pages, | ||
41 | struct page_counter **fail); | ||
42 | void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); | ||
43 | int page_counter_limit(struct page_counter *counter, unsigned long limit); | ||
44 | int page_counter_memparse(const char *buf, unsigned long *nr_pages); | ||
45 | |||
46 | static inline void page_counter_reset_watermark(struct page_counter *counter) | ||
47 | { | ||
48 | counter->watermark = page_counter_read(counter); | ||
49 | } | ||
50 | |||
51 | #endif /* _LINUX_PAGE_COUNTER_H */ | ||
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h new file mode 100644 index 000000000000..d2a2c84c72d0 --- /dev/null +++ b/include/linux/page_ext.h | |||
@@ -0,0 +1,84 @@ | |||
1 | #ifndef __LINUX_PAGE_EXT_H | ||
2 | #define __LINUX_PAGE_EXT_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/stacktrace.h> | ||
6 | |||
7 | struct pglist_data; | ||
8 | struct page_ext_operations { | ||
9 | bool (*need)(void); | ||
10 | void (*init)(void); | ||
11 | }; | ||
12 | |||
13 | #ifdef CONFIG_PAGE_EXTENSION | ||
14 | |||
15 | /* | ||
16 | * page_ext->flags bits: | ||
17 | * | ||
18 | * PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to | ||
19 | * implement generic debug pagealloc feature. The pages are filled with | ||
20 | * poison patterns and set this flag after free_pages(). The poisoned | ||
21 | * pages are verified whether the patterns are not corrupted and clear | ||
22 | * the flag before alloc_pages(). | ||
23 | */ | ||
24 | |||
25 | enum page_ext_flags { | ||
26 | PAGE_EXT_DEBUG_POISON, /* Page is poisoned */ | ||
27 | PAGE_EXT_DEBUG_GUARD, | ||
28 | PAGE_EXT_OWNER, | ||
29 | }; | ||
30 | |||
31 | /* | ||
32 | * Page Extension can be considered as an extended mem_map. | ||
33 | * A page_ext page is associated with every page descriptor. The | ||
34 | * page_ext helps us add more information about the page. | ||
35 | * All page_ext are allocated at boot or memory hotplug event, | ||
36 | * then the page_ext for pfn always exists. | ||
37 | */ | ||
38 | struct page_ext { | ||
39 | unsigned long flags; | ||
40 | #ifdef CONFIG_PAGE_OWNER | ||
41 | unsigned int order; | ||
42 | gfp_t gfp_mask; | ||
43 | struct stack_trace trace; | ||
44 | unsigned long trace_entries[8]; | ||
45 | #endif | ||
46 | }; | ||
47 | |||
48 | extern void pgdat_page_ext_init(struct pglist_data *pgdat); | ||
49 | |||
50 | #ifdef CONFIG_SPARSEMEM | ||
51 | static inline void page_ext_init_flatmem(void) | ||
52 | { | ||
53 | } | ||
54 | extern void page_ext_init(void); | ||
55 | #else | ||
56 | extern void page_ext_init_flatmem(void); | ||
57 | static inline void page_ext_init(void) | ||
58 | { | ||
59 | } | ||
60 | #endif | ||
61 | |||
62 | struct page_ext *lookup_page_ext(struct page *page); | ||
63 | |||
64 | #else /* !CONFIG_PAGE_EXTENSION */ | ||
65 | struct page_ext; | ||
66 | |||
67 | static inline void pgdat_page_ext_init(struct pglist_data *pgdat) | ||
68 | { | ||
69 | } | ||
70 | |||
71 | static inline struct page_ext *lookup_page_ext(struct page *page) | ||
72 | { | ||
73 | return NULL; | ||
74 | } | ||
75 | |||
76 | static inline void page_ext_init(void) | ||
77 | { | ||
78 | } | ||
79 | |||
80 | static inline void page_ext_init_flatmem(void) | ||
81 | { | ||
82 | } | ||
83 | #endif /* CONFIG_PAGE_EXTENSION */ | ||
84 | #endif /* __LINUX_PAGE_EXT_H */ | ||
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h new file mode 100644 index 000000000000..b48c3471c254 --- /dev/null +++ b/include/linux/page_owner.h | |||
@@ -0,0 +1,38 @@ | |||
1 | #ifndef __LINUX_PAGE_OWNER_H | ||
2 | #define __LINUX_PAGE_OWNER_H | ||
3 | |||
4 | #ifdef CONFIG_PAGE_OWNER | ||
5 | extern bool page_owner_inited; | ||
6 | extern struct page_ext_operations page_owner_ops; | ||
7 | |||
8 | extern void __reset_page_owner(struct page *page, unsigned int order); | ||
9 | extern void __set_page_owner(struct page *page, | ||
10 | unsigned int order, gfp_t gfp_mask); | ||
11 | |||
12 | static inline void reset_page_owner(struct page *page, unsigned int order) | ||
13 | { | ||
14 | if (likely(!page_owner_inited)) | ||
15 | return; | ||
16 | |||
17 | __reset_page_owner(page, order); | ||
18 | } | ||
19 | |||
20 | static inline void set_page_owner(struct page *page, | ||
21 | unsigned int order, gfp_t gfp_mask) | ||
22 | { | ||
23 | if (likely(!page_owner_inited)) | ||
24 | return; | ||
25 | |||
26 | __set_page_owner(page, order, gfp_mask); | ||
27 | } | ||
28 | #else | ||
29 | static inline void reset_page_owner(struct page *page, unsigned int order) | ||
30 | { | ||
31 | } | ||
32 | static inline void set_page_owner(struct page *page, | ||
33 | unsigned int order, gfp_t gfp_mask) | ||
34 | { | ||
35 | } | ||
36 | |||
37 | #endif /* CONFIG_PAGE_OWNER */ | ||
38 | #endif /* __LINUX_PAGE_OWNER_H */ | ||
diff --git a/include/linux/pci.h b/include/linux/pci.h index 5be8db45e368..44a27696ab6c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -331,6 +331,7 @@ struct pci_dev { | |||
331 | unsigned int is_added:1; | 331 | unsigned int is_added:1; |
332 | unsigned int is_busmaster:1; /* device is busmaster */ | 332 | unsigned int is_busmaster:1; /* device is busmaster */ |
333 | unsigned int no_msi:1; /* device may not use msi */ | 333 | unsigned int no_msi:1; /* device may not use msi */ |
334 | unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */ | ||
334 | unsigned int block_cfg_access:1; /* config space access is blocked */ | 335 | unsigned int block_cfg_access:1; /* config space access is blocked */ |
335 | unsigned int broken_parity_status:1; /* Device generates false positive parity */ | 336 | unsigned int broken_parity_status:1; /* Device generates false positive parity */ |
336 | unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ | 337 | unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ |
@@ -449,7 +450,7 @@ struct pci_bus { | |||
449 | struct resource busn_res; /* bus numbers routed to this bus */ | 450 | struct resource busn_res; /* bus numbers routed to this bus */ |
450 | 451 | ||
451 | struct pci_ops *ops; /* configuration access functions */ | 452 | struct pci_ops *ops; /* configuration access functions */ |
452 | struct msi_chip *msi; /* MSI controller */ | 453 | struct msi_controller *msi; /* MSI controller */ |
453 | void *sysdata; /* hook for sys-specific extension */ | 454 | void *sysdata; /* hook for sys-specific extension */ |
454 | struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ | 455 | struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ |
455 | 456 | ||
@@ -1003,6 +1004,8 @@ void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); | |||
1003 | int pci_save_state(struct pci_dev *dev); | 1004 | int pci_save_state(struct pci_dev *dev); |
1004 | void pci_restore_state(struct pci_dev *dev); | 1005 | void pci_restore_state(struct pci_dev *dev); |
1005 | struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); | 1006 | struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); |
1007 | int pci_load_saved_state(struct pci_dev *dev, | ||
1008 | struct pci_saved_state *state); | ||
1006 | int pci_load_and_free_saved_state(struct pci_dev *dev, | 1009 | int pci_load_and_free_saved_state(struct pci_dev *dev, |
1007 | struct pci_saved_state **state); | 1010 | struct pci_saved_state **state); |
1008 | struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); | 1011 | struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); |
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 2706ee9a4327..8c7895061121 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h | |||
@@ -109,7 +109,6 @@ struct hotplug_slot { | |||
109 | struct list_head slot_list; | 109 | struct list_head slot_list; |
110 | struct pci_slot *pci_slot; | 110 | struct pci_slot *pci_slot; |
111 | }; | 111 | }; |
112 | #define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj) | ||
113 | 112 | ||
114 | static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) | 113 | static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) |
115 | { | 114 | { |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 1fa99a301817..e63c02a93f6b 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -522,6 +522,8 @@ | |||
522 | #define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403 | 522 | #define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403 |
523 | #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d | 523 | #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d |
524 | #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e | 524 | #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e |
525 | #define PCI_DEVICE_ID_AMD_15H_M60H_NB_F3 0x1573 | ||
526 | #define PCI_DEVICE_ID_AMD_15H_M60H_NB_F4 0x1574 | ||
525 | #define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 | 527 | #define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 |
526 | #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 | 528 | #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 |
527 | #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 | 529 | #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 |
@@ -562,6 +564,7 @@ | |||
562 | #define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 | 564 | #define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 |
563 | #define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 | 565 | #define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 |
564 | #define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 | 566 | #define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 |
567 | #define PCI_DEVICE_ID_AMD_NL_USB 0x7912 | ||
565 | #define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F | 568 | #define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F |
566 | #define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 | 569 | #define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 |
567 | #define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 | 570 | #define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 |
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 420032d41d27..57f3a1c550dc 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h | |||
@@ -254,8 +254,6 @@ do { \ | |||
254 | #endif /* CONFIG_SMP */ | 254 | #endif /* CONFIG_SMP */ |
255 | 255 | ||
256 | #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) | 256 | #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) |
257 | #define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) | ||
258 | #define __get_cpu_var(var) (*this_cpu_ptr(&(var))) | ||
259 | 257 | ||
260 | /* | 258 | /* |
261 | * Must be an lvalue. Since @var must be a simple identifier, | 259 | * Must be an lvalue. Since @var must be a simple identifier, |
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 51ce60c35f4c..b4337646388b 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
@@ -128,10 +128,8 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) | |||
128 | static inline bool __ref_is_percpu(struct percpu_ref *ref, | 128 | static inline bool __ref_is_percpu(struct percpu_ref *ref, |
129 | unsigned long __percpu **percpu_countp) | 129 | unsigned long __percpu **percpu_countp) |
130 | { | 130 | { |
131 | unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr); | ||
132 | |||
133 | /* paired with smp_store_release() in percpu_ref_reinit() */ | 131 | /* paired with smp_store_release() in percpu_ref_reinit() */ |
134 | smp_read_barrier_depends(); | 132 | unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr); |
135 | 133 | ||
136 | /* | 134 | /* |
137 | * Theoretically, the following could test just ATOMIC; however, | 135 | * Theoretically, the following could test just ATOMIC; however, |
@@ -147,28 +145,42 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, | |||
147 | } | 145 | } |
148 | 146 | ||
149 | /** | 147 | /** |
150 | * percpu_ref_get - increment a percpu refcount | 148 | * percpu_ref_get_many - increment a percpu refcount |
151 | * @ref: percpu_ref to get | 149 | * @ref: percpu_ref to get |
150 | * @nr: number of references to get | ||
152 | * | 151 | * |
153 | * Analagous to atomic_long_inc(). | 152 | * Analogous to atomic_long_add(). |
154 | * | 153 | * |
155 | * This function is safe to call as long as @ref is between init and exit. | 154 | * This function is safe to call as long as @ref is between init and exit. |
156 | */ | 155 | */ |
157 | static inline void percpu_ref_get(struct percpu_ref *ref) | 156 | static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) |
158 | { | 157 | { |
159 | unsigned long __percpu *percpu_count; | 158 | unsigned long __percpu *percpu_count; |
160 | 159 | ||
161 | rcu_read_lock_sched(); | 160 | rcu_read_lock_sched(); |
162 | 161 | ||
163 | if (__ref_is_percpu(ref, &percpu_count)) | 162 | if (__ref_is_percpu(ref, &percpu_count)) |
164 | this_cpu_inc(*percpu_count); | 163 | this_cpu_add(*percpu_count, nr); |
165 | else | 164 | else |
166 | atomic_long_inc(&ref->count); | 165 | atomic_long_add(nr, &ref->count); |
167 | 166 | ||
168 | rcu_read_unlock_sched(); | 167 | rcu_read_unlock_sched(); |
169 | } | 168 | } |
170 | 169 | ||
171 | /** | 170 | /** |
171 | * percpu_ref_get - increment a percpu refcount | ||
172 | * @ref: percpu_ref to get | ||
173 | * | ||
174 | * Analagous to atomic_long_inc(). | ||
175 | * | ||
176 | * This function is safe to call as long as @ref is between init and exit. | ||
177 | */ | ||
178 | static inline void percpu_ref_get(struct percpu_ref *ref) | ||
179 | { | ||
180 | percpu_ref_get_many(ref, 1); | ||
181 | } | ||
182 | |||
183 | /** | ||
172 | * percpu_ref_tryget - try to increment a percpu refcount | 184 | * percpu_ref_tryget - try to increment a percpu refcount |
173 | * @ref: percpu_ref to try-get | 185 | * @ref: percpu_ref to try-get |
174 | * | 186 | * |
@@ -231,29 +243,44 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | |||
231 | } | 243 | } |
232 | 244 | ||
233 | /** | 245 | /** |
234 | * percpu_ref_put - decrement a percpu refcount | 246 | * percpu_ref_put_many - decrement a percpu refcount |
235 | * @ref: percpu_ref to put | 247 | * @ref: percpu_ref to put |
248 | * @nr: number of references to put | ||
236 | * | 249 | * |
237 | * Decrement the refcount, and if 0, call the release function (which was passed | 250 | * Decrement the refcount, and if 0, call the release function (which was passed |
238 | * to percpu_ref_init()) | 251 | * to percpu_ref_init()) |
239 | * | 252 | * |
240 | * This function is safe to call as long as @ref is between init and exit. | 253 | * This function is safe to call as long as @ref is between init and exit. |
241 | */ | 254 | */ |
242 | static inline void percpu_ref_put(struct percpu_ref *ref) | 255 | static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) |
243 | { | 256 | { |
244 | unsigned long __percpu *percpu_count; | 257 | unsigned long __percpu *percpu_count; |
245 | 258 | ||
246 | rcu_read_lock_sched(); | 259 | rcu_read_lock_sched(); |
247 | 260 | ||
248 | if (__ref_is_percpu(ref, &percpu_count)) | 261 | if (__ref_is_percpu(ref, &percpu_count)) |
249 | this_cpu_dec(*percpu_count); | 262 | this_cpu_sub(*percpu_count, nr); |
250 | else if (unlikely(atomic_long_dec_and_test(&ref->count))) | 263 | else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) |
251 | ref->release(ref); | 264 | ref->release(ref); |
252 | 265 | ||
253 | rcu_read_unlock_sched(); | 266 | rcu_read_unlock_sched(); |
254 | } | 267 | } |
255 | 268 | ||
256 | /** | 269 | /** |
270 | * percpu_ref_put - decrement a percpu refcount | ||
271 | * @ref: percpu_ref to put | ||
272 | * | ||
273 | * Decrement the refcount, and if 0, call the release function (which was passed | ||
274 | * to percpu_ref_init()) | ||
275 | * | ||
276 | * This function is safe to call as long as @ref is between init and exit. | ||
277 | */ | ||
278 | static inline void percpu_ref_put(struct percpu_ref *ref) | ||
279 | { | ||
280 | percpu_ref_put_many(ref, 1); | ||
281 | } | ||
282 | |||
283 | /** | ||
257 | * percpu_ref_is_zero - test whether a percpu refcount reached zero | 284 | * percpu_ref_is_zero - test whether a percpu refcount reached zero |
258 | * @ref: percpu_ref to test | 285 | * @ref: percpu_ref to test |
259 | * | 286 | * |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index a3aa63e47637..caebf2a758dc 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/preempt.h> | 5 | #include <linux/preempt.h> |
6 | #include <linux/smp.h> | 6 | #include <linux/smp.h> |
7 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
8 | #include <linux/printk.h> | ||
8 | #include <linux/pfn.h> | 9 | #include <linux/pfn.h> |
9 | #include <linux/init.h> | 10 | #include <linux/init.h> |
10 | 11 | ||
@@ -134,4 +135,7 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | |||
134 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ | 135 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ |
135 | __alignof__(type)) | 136 | __alignof__(type)) |
136 | 137 | ||
138 | /* To avoid include hell, as printk can not declare this, we declare it here */ | ||
139 | DECLARE_PER_CPU(printk_func_t, printk_func); | ||
140 | |||
137 | #endif /* __LINUX_PERCPU_H */ | 141 | #endif /* __LINUX_PERCPU_H */ |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 893a0d07986f..486e84ccb1f9 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -79,7 +79,7 @@ struct perf_branch_stack { | |||
79 | struct perf_branch_entry entries[0]; | 79 | struct perf_branch_entry entries[0]; |
80 | }; | 80 | }; |
81 | 81 | ||
82 | struct perf_regs_user { | 82 | struct perf_regs { |
83 | __u64 abi; | 83 | __u64 abi; |
84 | struct pt_regs *regs; | 84 | struct pt_regs *regs; |
85 | }; | 85 | }; |
@@ -580,34 +580,40 @@ extern u64 perf_event_read_value(struct perf_event *event, | |||
580 | 580 | ||
581 | 581 | ||
582 | struct perf_sample_data { | 582 | struct perf_sample_data { |
583 | u64 type; | 583 | /* |
584 | * Fields set by perf_sample_data_init(), group so as to | ||
585 | * minimize the cachelines touched. | ||
586 | */ | ||
587 | u64 addr; | ||
588 | struct perf_raw_record *raw; | ||
589 | struct perf_branch_stack *br_stack; | ||
590 | u64 period; | ||
591 | u64 weight; | ||
592 | u64 txn; | ||
593 | union perf_mem_data_src data_src; | ||
584 | 594 | ||
595 | /* | ||
596 | * The other fields, optionally {set,used} by | ||
597 | * perf_{prepare,output}_sample(). | ||
598 | */ | ||
599 | u64 type; | ||
585 | u64 ip; | 600 | u64 ip; |
586 | struct { | 601 | struct { |
587 | u32 pid; | 602 | u32 pid; |
588 | u32 tid; | 603 | u32 tid; |
589 | } tid_entry; | 604 | } tid_entry; |
590 | u64 time; | 605 | u64 time; |
591 | u64 addr; | ||
592 | u64 id; | 606 | u64 id; |
593 | u64 stream_id; | 607 | u64 stream_id; |
594 | struct { | 608 | struct { |
595 | u32 cpu; | 609 | u32 cpu; |
596 | u32 reserved; | 610 | u32 reserved; |
597 | } cpu_entry; | 611 | } cpu_entry; |
598 | u64 period; | ||
599 | union perf_mem_data_src data_src; | ||
600 | struct perf_callchain_entry *callchain; | 612 | struct perf_callchain_entry *callchain; |
601 | struct perf_raw_record *raw; | 613 | struct perf_regs regs_user; |
602 | struct perf_branch_stack *br_stack; | 614 | struct perf_regs regs_intr; |
603 | struct perf_regs_user regs_user; | ||
604 | u64 stack_user_size; | 615 | u64 stack_user_size; |
605 | u64 weight; | 616 | } ____cacheline_aligned; |
606 | /* | ||
607 | * Transaction flags for abort events: | ||
608 | */ | ||
609 | u64 txn; | ||
610 | }; | ||
611 | 617 | ||
612 | /* default value for data source */ | 618 | /* default value for data source */ |
613 | #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ | 619 | #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ |
@@ -624,9 +630,6 @@ static inline void perf_sample_data_init(struct perf_sample_data *data, | |||
624 | data->raw = NULL; | 630 | data->raw = NULL; |
625 | data->br_stack = NULL; | 631 | data->br_stack = NULL; |
626 | data->period = period; | 632 | data->period = period; |
627 | data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE; | ||
628 | data->regs_user.regs = NULL; | ||
629 | data->stack_user_size = 0; | ||
630 | data->weight = 0; | 633 | data->weight = 0; |
631 | data->data_src.val = PERF_MEM_NA; | 634 | data->data_src.val = PERF_MEM_NA; |
632 | data->txn = 0; | 635 | data->txn = 0; |
diff --git a/include/linux/phy.h b/include/linux/phy.h index d090cfcaa167..22af8f8f5802 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -433,6 +433,7 @@ struct phy_device { | |||
433 | * by this PHY | 433 | * by this PHY |
434 | * flags: A bitfield defining certain other features this PHY | 434 | * flags: A bitfield defining certain other features this PHY |
435 | * supports (like interrupts) | 435 | * supports (like interrupts) |
436 | * driver_data: static driver data | ||
436 | * | 437 | * |
437 | * The drivers must implement config_aneg and read_status. All | 438 | * The drivers must implement config_aneg and read_status. All |
438 | * other functions are optional. Note that none of these | 439 | * other functions are optional. Note that none of these |
@@ -448,6 +449,7 @@ struct phy_driver { | |||
448 | unsigned int phy_id_mask; | 449 | unsigned int phy_id_mask; |
449 | u32 features; | 450 | u32 features; |
450 | u32 flags; | 451 | u32 flags; |
452 | const void *driver_data; | ||
451 | 453 | ||
452 | /* | 454 | /* |
453 | * Called to issue a PHY software reset | 455 | * Called to issue a PHY software reset |
@@ -772,4 +774,28 @@ int __init mdio_bus_init(void); | |||
772 | void mdio_bus_exit(void); | 774 | void mdio_bus_exit(void); |
773 | 775 | ||
774 | extern struct bus_type mdio_bus_type; | 776 | extern struct bus_type mdio_bus_type; |
777 | |||
778 | /** | ||
779 | * module_phy_driver() - Helper macro for registering PHY drivers | ||
780 | * @__phy_drivers: array of PHY drivers to register | ||
781 | * | ||
782 | * Helper macro for PHY drivers which do not do anything special in module | ||
783 | * init/exit. Each module may only use this macro once, and calling it | ||
784 | * replaces module_init() and module_exit(). | ||
785 | */ | ||
786 | #define phy_module_driver(__phy_drivers, __count) \ | ||
787 | static int __init phy_module_init(void) \ | ||
788 | { \ | ||
789 | return phy_drivers_register(__phy_drivers, __count); \ | ||
790 | } \ | ||
791 | module_init(phy_module_init); \ | ||
792 | static void __exit phy_module_exit(void) \ | ||
793 | { \ | ||
794 | phy_drivers_unregister(__phy_drivers, __count); \ | ||
795 | } \ | ||
796 | module_exit(phy_module_exit) | ||
797 | |||
798 | #define module_phy_driver(__phy_drivers) \ | ||
799 | phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers)) | ||
800 | |||
775 | #endif /* __PHY_H */ | 801 | #endif /* __PHY_H */ |
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index 8cb6f815475b..a0197fa1b116 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h | |||
@@ -61,7 +61,6 @@ struct phy { | |||
61 | struct device dev; | 61 | struct device dev; |
62 | int id; | 62 | int id; |
63 | const struct phy_ops *ops; | 63 | const struct phy_ops *ops; |
64 | struct phy_init_data *init_data; | ||
65 | struct mutex mutex; | 64 | struct mutex mutex; |
66 | int init_count; | 65 | int init_count; |
67 | int power_count; | 66 | int power_count; |
@@ -84,33 +83,14 @@ struct phy_provider { | |||
84 | struct of_phandle_args *args); | 83 | struct of_phandle_args *args); |
85 | }; | 84 | }; |
86 | 85 | ||
87 | /** | 86 | struct phy_lookup { |
88 | * struct phy_consumer - represents the phy consumer | 87 | struct list_head node; |
89 | * @dev_name: the device name of the controller that will use this PHY device | 88 | const char *dev_id; |
90 | * @port: name given to the consumer port | 89 | const char *con_id; |
91 | */ | 90 | struct phy *phy; |
92 | struct phy_consumer { | ||
93 | const char *dev_name; | ||
94 | const char *port; | ||
95 | }; | ||
96 | |||
97 | /** | ||
98 | * struct phy_init_data - contains the list of PHY consumers | ||
99 | * @num_consumers: number of consumers for this PHY device | ||
100 | * @consumers: list of PHY consumers | ||
101 | */ | ||
102 | struct phy_init_data { | ||
103 | unsigned int num_consumers; | ||
104 | struct phy_consumer *consumers; | ||
105 | }; | 91 | }; |
106 | 92 | ||
107 | #define PHY_CONSUMER(_dev_name, _port) \ | 93 | #define to_phy(a) (container_of((a), struct phy, dev)) |
108 | { \ | ||
109 | .dev_name = _dev_name, \ | ||
110 | .port = _port, \ | ||
111 | } | ||
112 | |||
113 | #define to_phy(dev) (container_of((dev), struct phy, dev)) | ||
114 | 94 | ||
115 | #define of_phy_provider_register(dev, xlate) \ | 95 | #define of_phy_provider_register(dev, xlate) \ |
116 | __of_phy_provider_register((dev), THIS_MODULE, (xlate)) | 96 | __of_phy_provider_register((dev), THIS_MODULE, (xlate)) |
@@ -159,10 +139,9 @@ struct phy *of_phy_get(struct device_node *np, const char *con_id); | |||
159 | struct phy *of_phy_simple_xlate(struct device *dev, | 139 | struct phy *of_phy_simple_xlate(struct device *dev, |
160 | struct of_phandle_args *args); | 140 | struct of_phandle_args *args); |
161 | struct phy *phy_create(struct device *dev, struct device_node *node, | 141 | struct phy *phy_create(struct device *dev, struct device_node *node, |
162 | const struct phy_ops *ops, | 142 | const struct phy_ops *ops); |
163 | struct phy_init_data *init_data); | ||
164 | struct phy *devm_phy_create(struct device *dev, struct device_node *node, | 143 | struct phy *devm_phy_create(struct device *dev, struct device_node *node, |
165 | const struct phy_ops *ops, struct phy_init_data *init_data); | 144 | const struct phy_ops *ops); |
166 | void phy_destroy(struct phy *phy); | 145 | void phy_destroy(struct phy *phy); |
167 | void devm_phy_destroy(struct device *dev, struct phy *phy); | 146 | void devm_phy_destroy(struct device *dev, struct phy *phy); |
168 | struct phy_provider *__of_phy_provider_register(struct device *dev, | 147 | struct phy_provider *__of_phy_provider_register(struct device *dev, |
@@ -174,6 +153,8 @@ struct phy_provider *__devm_of_phy_provider_register(struct device *dev, | |||
174 | void of_phy_provider_unregister(struct phy_provider *phy_provider); | 153 | void of_phy_provider_unregister(struct phy_provider *phy_provider); |
175 | void devm_of_phy_provider_unregister(struct device *dev, | 154 | void devm_of_phy_provider_unregister(struct device *dev, |
176 | struct phy_provider *phy_provider); | 155 | struct phy_provider *phy_provider); |
156 | int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id); | ||
157 | void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id); | ||
177 | #else | 158 | #else |
178 | static inline int phy_pm_runtime_get(struct phy *phy) | 159 | static inline int phy_pm_runtime_get(struct phy *phy) |
179 | { | 160 | { |
@@ -301,16 +282,14 @@ static inline struct phy *of_phy_simple_xlate(struct device *dev, | |||
301 | 282 | ||
302 | static inline struct phy *phy_create(struct device *dev, | 283 | static inline struct phy *phy_create(struct device *dev, |
303 | struct device_node *node, | 284 | struct device_node *node, |
304 | const struct phy_ops *ops, | 285 | const struct phy_ops *ops) |
305 | struct phy_init_data *init_data) | ||
306 | { | 286 | { |
307 | return ERR_PTR(-ENOSYS); | 287 | return ERR_PTR(-ENOSYS); |
308 | } | 288 | } |
309 | 289 | ||
310 | static inline struct phy *devm_phy_create(struct device *dev, | 290 | static inline struct phy *devm_phy_create(struct device *dev, |
311 | struct device_node *node, | 291 | struct device_node *node, |
312 | const struct phy_ops *ops, | 292 | const struct phy_ops *ops) |
313 | struct phy_init_data *init_data) | ||
314 | { | 293 | { |
315 | return ERR_PTR(-ENOSYS); | 294 | return ERR_PTR(-ENOSYS); |
316 | } | 295 | } |
@@ -345,6 +324,13 @@ static inline void devm_of_phy_provider_unregister(struct device *dev, | |||
345 | struct phy_provider *phy_provider) | 324 | struct phy_provider *phy_provider) |
346 | { | 325 | { |
347 | } | 326 | } |
327 | static inline int | ||
328 | phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id) | ||
329 | { | ||
330 | return 0; | ||
331 | } | ||
332 | static inline void phy_remove_lookup(struct phy *phy, const char *con_id, | ||
333 | const char *dev_id) { } | ||
348 | #endif | 334 | #endif |
349 | 335 | ||
350 | #endif /* __DRIVERS_PHY_H */ | 336 | #endif /* __DRIVERS_PHY_H */ |
diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h index a6591c693ebb..5e0bc779e6c5 100644 --- a/include/linux/platform_data/asoc-s3c.h +++ b/include/linux/platform_data/asoc-s3c.h | |||
@@ -27,6 +27,7 @@ struct samsung_i2s { | |||
27 | #define QUIRK_NO_MUXPSR (1 << 2) | 27 | #define QUIRK_NO_MUXPSR (1 << 2) |
28 | #define QUIRK_NEED_RSTCLR (1 << 3) | 28 | #define QUIRK_NEED_RSTCLR (1 << 3) |
29 | #define QUIRK_SUPPORTS_TDM (1 << 4) | 29 | #define QUIRK_SUPPORTS_TDM (1 << 4) |
30 | #define QUIRK_SUPPORTS_IDMA (1 << 5) | ||
30 | /* Quirks of the I2S controller */ | 31 | /* Quirks of the I2S controller */ |
31 | u32 quirks; | 32 | u32 quirks; |
32 | dma_addr_t idma_addr; | 33 | dma_addr_t idma_addr; |
diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h new file mode 100644 index 000000000000..26af54321958 --- /dev/null +++ b/include/linux/platform_data/bcmgenet.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__ | ||
2 | #define __LINUX_PLATFORM_DATA_BCMGENET_H__ | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/if_ether.h> | ||
6 | #include <linux/phy.h> | ||
7 | |||
8 | struct bcmgenet_platform_data { | ||
9 | bool mdio_enabled; | ||
10 | phy_interface_t phy_interface; | ||
11 | int phy_address; | ||
12 | int phy_speed; | ||
13 | int phy_duplex; | ||
14 | u8 mac_address[ETH_ALEN]; | ||
15 | int genet_version; | ||
16 | }; | ||
17 | |||
18 | #endif | ||
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index 6a1357d31871..7d964e787299 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h | |||
@@ -41,6 +41,7 @@ enum sdma_peripheral_type { | |||
41 | IMX_DMATYPE_ESAI, /* ESAI */ | 41 | IMX_DMATYPE_ESAI, /* ESAI */ |
42 | IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ | 42 | IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ |
43 | IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ | 43 | IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ |
44 | IMX_DMATYPE_SAI, /* SAI */ | ||
44 | }; | 45 | }; |
45 | 46 | ||
46 | enum imx_dma_prio { | 47 | enum imx_dma_prio { |
diff --git a/include/linux/platform_data/dwc3-exynos.h b/include/linux/platform_data/dwc3-exynos.h deleted file mode 100644 index 5eb7da9b3772..000000000000 --- a/include/linux/platform_data/dwc3-exynos.h +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | /** | ||
2 | * dwc3-exynos.h - Samsung EXYNOS DWC3 Specific Glue layer, header. | ||
3 | * | ||
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | ||
5 | * http://www.samsung.com | ||
6 | * | ||
7 | * Author: Anton Tikhomirov <av.tikhomirov@samsung.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #ifndef _DWC3_EXYNOS_H_ | ||
16 | #define _DWC3_EXYNOS_H_ | ||
17 | |||
18 | struct dwc3_exynos_data { | ||
19 | int phy_type; | ||
20 | int (*phy_init)(struct platform_device *pdev, int type); | ||
21 | int (*phy_exit)(struct platform_device *pdev, int type); | ||
22 | }; | ||
23 | |||
24 | #endif /* _DWC3_EXYNOS_H_ */ | ||
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h new file mode 100644 index 000000000000..67bbcf0785f6 --- /dev/null +++ b/include/linux/platform_data/hsmmc-omap.h | |||
@@ -0,0 +1,90 @@ | |||
1 | /* | ||
2 | * MMC definitions for OMAP2 | ||
3 | * | ||
4 | * Copyright (C) 2006 Nokia Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * struct omap_hsmmc_dev_attr.flags possibilities | ||
13 | * | ||
14 | * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can | ||
15 | * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag | ||
16 | * should be set if this is the case. See for example Section 22.5.3 | ||
17 | * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia | ||
18 | * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R). | ||
19 | * | ||
20 | * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers | ||
21 | * don't work correctly on some MMC controller instances on some | ||
22 | * OMAP3 SoCs; this flag should be set if this is the case. See | ||
23 | * for example Advisory 2.1.1.128 "MMC: Multiple Block Read | ||
24 | * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_ | ||
25 | * Revision F (October 2010) (SPRZ278F). | ||
26 | */ | ||
27 | #define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0) | ||
28 | #define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1) | ||
29 | #define OMAP_HSMMC_SWAKEUP_MISSING BIT(2) | ||
30 | |||
31 | struct omap_hsmmc_dev_attr { | ||
32 | u8 flags; | ||
33 | }; | ||
34 | |||
35 | struct mmc_card; | ||
36 | |||
37 | struct omap_hsmmc_platform_data { | ||
38 | /* back-link to device */ | ||
39 | struct device *dev; | ||
40 | |||
41 | /* set if your board has components or wiring that limits the | ||
42 | * maximum frequency on the MMC bus */ | ||
43 | unsigned int max_freq; | ||
44 | |||
45 | /* Integrating attributes from the omap_hwmod layer */ | ||
46 | u8 controller_flags; | ||
47 | |||
48 | /* Register offset deviation */ | ||
49 | u16 reg_offset; | ||
50 | |||
51 | /* | ||
52 | * 4/8 wires and any additional host capabilities | ||
53 | * need to OR'd all capabilities (ref. linux/mmc/host.h) | ||
54 | */ | ||
55 | u32 caps; /* Used for the MMC driver on 2430 and later */ | ||
56 | u32 pm_caps; /* PM capabilities of the mmc */ | ||
57 | |||
58 | /* switch pin can be for card detect (default) or card cover */ | ||
59 | unsigned cover:1; | ||
60 | |||
61 | /* use the internal clock */ | ||
62 | unsigned internal_clock:1; | ||
63 | |||
64 | /* nonremovable e.g. eMMC */ | ||
65 | unsigned nonremovable:1; | ||
66 | |||
67 | /* eMMC does not handle power off when not in sleep state */ | ||
68 | unsigned no_regulator_off_init:1; | ||
69 | |||
70 | /* we can put the features above into this variable */ | ||
71 | #define HSMMC_HAS_PBIAS (1 << 0) | ||
72 | #define HSMMC_HAS_UPDATED_RESET (1 << 1) | ||
73 | #define HSMMC_HAS_HSPE_SUPPORT (1 << 2) | ||
74 | unsigned features; | ||
75 | |||
76 | int switch_pin; /* gpio (card detect) */ | ||
77 | int gpio_wp; /* gpio (write protect) */ | ||
78 | |||
79 | int (*set_power)(struct device *dev, int power_on, int vdd); | ||
80 | void (*remux)(struct device *dev, int power_on); | ||
81 | /* Call back before enabling / disabling regulators */ | ||
82 | void (*before_set_reg)(struct device *dev, int power_on, int vdd); | ||
83 | /* Call back after enabling / disabling regulators */ | ||
84 | void (*after_set_reg)(struct device *dev, int power_on, int vdd); | ||
85 | /* if we have special card, init it using this callback */ | ||
86 | void (*init_card)(struct mmc_card *card); | ||
87 | |||
88 | const char *name; | ||
89 | u32 ocr_mask; | ||
90 | }; | ||
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h index 1b2ba24e4e03..9c7fd1efe495 100644 --- a/include/linux/platform_data/lp855x.h +++ b/include/linux/platform_data/lp855x.h | |||
@@ -136,6 +136,7 @@ struct lp855x_rom_data { | |||
136 | Only valid when mode is PWM_BASED. | 136 | Only valid when mode is PWM_BASED. |
137 | * @size_program : total size of lp855x_rom_data | 137 | * @size_program : total size of lp855x_rom_data |
138 | * @rom_data : list of new eeprom/eprom registers | 138 | * @rom_data : list of new eeprom/eprom registers |
139 | * @supply : regulator that supplies 3V input | ||
139 | */ | 140 | */ |
140 | struct lp855x_platform_data { | 141 | struct lp855x_platform_data { |
141 | const char *name; | 142 | const char *name; |
@@ -144,6 +145,7 @@ struct lp855x_platform_data { | |||
144 | unsigned int period_ns; | 145 | unsigned int period_ns; |
145 | int size_program; | 146 | int size_program; |
146 | struct lp855x_rom_data *rom_data; | 147 | struct lp855x_rom_data *rom_data; |
148 | struct regulator *supply; | ||
147 | }; | 149 | }; |
148 | 150 | ||
149 | #endif | 151 | #endif |
diff --git a/include/linux/platform_data/mmc-atmel-mci.h b/include/linux/platform_data/mmc-atmel-mci.h new file mode 100644 index 000000000000..399a2d5a14bd --- /dev/null +++ b/include/linux/platform_data/mmc-atmel-mci.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef __MMC_ATMEL_MCI_H | ||
2 | #define __MMC_ATMEL_MCI_H | ||
3 | |||
4 | #include <linux/platform_data/dma-atmel.h> | ||
5 | #include <linux/platform_data/dma-dw.h> | ||
6 | |||
7 | /** | ||
8 | * struct mci_dma_data - DMA data for MCI interface | ||
9 | */ | ||
10 | struct mci_dma_data { | ||
11 | #ifdef CONFIG_ARM | ||
12 | struct at_dma_slave sdata; | ||
13 | #else | ||
14 | struct dw_dma_slave sdata; | ||
15 | #endif | ||
16 | }; | ||
17 | |||
18 | /* accessor macros */ | ||
19 | #define slave_data_ptr(s) (&(s)->sdata) | ||
20 | #define find_slave_dev(s) ((s)->sdata.dma_dev) | ||
21 | |||
22 | #endif /* __MMC_ATMEL_MCI_H */ | ||
diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h index 51e70cf25cbc..5c188f4e9bec 100644 --- a/include/linux/platform_data/mmc-omap.h +++ b/include/linux/platform_data/mmc-omap.h | |||
@@ -10,32 +10,8 @@ | |||
10 | 10 | ||
11 | #define OMAP_MMC_MAX_SLOTS 2 | 11 | #define OMAP_MMC_MAX_SLOTS 2 |
12 | 12 | ||
13 | /* | ||
14 | * struct omap_mmc_dev_attr.flags possibilities | ||
15 | * | ||
16 | * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can | ||
17 | * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag | ||
18 | * should be set if this is the case. See for example Section 22.5.3 | ||
19 | * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia | ||
20 | * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R). | ||
21 | * | ||
22 | * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers | ||
23 | * don't work correctly on some MMC controller instances on some | ||
24 | * OMAP3 SoCs; this flag should be set if this is the case. See | ||
25 | * for example Advisory 2.1.1.128 "MMC: Multiple Block Read | ||
26 | * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_ | ||
27 | * Revision F (October 2010) (SPRZ278F). | ||
28 | */ | ||
29 | #define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0) | ||
30 | #define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1) | ||
31 | #define OMAP_HSMMC_SWAKEUP_MISSING BIT(2) | ||
32 | |||
33 | struct mmc_card; | 13 | struct mmc_card; |
34 | 14 | ||
35 | struct omap_mmc_dev_attr { | ||
36 | u8 flags; | ||
37 | }; | ||
38 | |||
39 | struct omap_mmc_platform_data { | 15 | struct omap_mmc_platform_data { |
40 | /* back-link to device */ | 16 | /* back-link to device */ |
41 | struct device *dev; | 17 | struct device *dev; |
@@ -106,9 +82,6 @@ struct omap_mmc_platform_data { | |||
106 | unsigned vcc_aux_disable_is_sleep:1; | 82 | unsigned vcc_aux_disable_is_sleep:1; |
107 | 83 | ||
108 | /* we can put the features above into this variable */ | 84 | /* we can put the features above into this variable */ |
109 | #define HSMMC_HAS_PBIAS (1 << 0) | ||
110 | #define HSMMC_HAS_UPDATED_RESET (1 << 1) | ||
111 | #define HSMMC_HAS_HSPE_SUPPORT (1 << 2) | ||
112 | #define MMC_OMAP7XX (1 << 3) | 85 | #define MMC_OMAP7XX (1 << 3) |
113 | #define MMC_OMAP15XX (1 << 4) | 86 | #define MMC_OMAP15XX (1 << 4) |
114 | #define MMC_OMAP16XX (1 << 5) | 87 | #define MMC_OMAP16XX (1 << 5) |
diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h index 27d3156d093a..9e20c2fb4ffd 100644 --- a/include/linux/platform_data/pxa_sdhci.h +++ b/include/linux/platform_data/pxa_sdhci.h | |||
@@ -55,9 +55,4 @@ struct sdhci_pxa_platdata { | |||
55 | unsigned int quirks2; | 55 | unsigned int quirks2; |
56 | unsigned int pm_caps; | 56 | unsigned int pm_caps; |
57 | }; | 57 | }; |
58 | |||
59 | struct sdhci_pxa { | ||
60 | u8 clk_enable; | ||
61 | u8 power_mode; | ||
62 | }; | ||
63 | #endif /* _PXA_SDHCI_H_ */ | 58 | #endif /* _PXA_SDHCI_H_ */ |
diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h deleted file mode 100644 index a5f045e1d8fe..000000000000 --- a/include/linux/platform_data/rcar-du.h +++ /dev/null | |||
@@ -1,74 +0,0 @@ | |||
1 | /* | ||
2 | * rcar_du.h -- R-Car Display Unit DRM driver | ||
3 | * | ||
4 | * Copyright (C) 2013 Renesas Corporation | ||
5 | * | ||
6 | * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #ifndef __RCAR_DU_H__ | ||
15 | #define __RCAR_DU_H__ | ||
16 | |||
17 | #include <video/videomode.h> | ||
18 | |||
19 | enum rcar_du_output { | ||
20 | RCAR_DU_OUTPUT_DPAD0, | ||
21 | RCAR_DU_OUTPUT_DPAD1, | ||
22 | RCAR_DU_OUTPUT_LVDS0, | ||
23 | RCAR_DU_OUTPUT_LVDS1, | ||
24 | RCAR_DU_OUTPUT_TCON, | ||
25 | RCAR_DU_OUTPUT_MAX, | ||
26 | }; | ||
27 | |||
28 | enum rcar_du_encoder_type { | ||
29 | RCAR_DU_ENCODER_UNUSED = 0, | ||
30 | RCAR_DU_ENCODER_NONE, | ||
31 | RCAR_DU_ENCODER_VGA, | ||
32 | RCAR_DU_ENCODER_LVDS, | ||
33 | }; | ||
34 | |||
35 | struct rcar_du_panel_data { | ||
36 | unsigned int width_mm; /* Panel width in mm */ | ||
37 | unsigned int height_mm; /* Panel height in mm */ | ||
38 | struct videomode mode; | ||
39 | }; | ||
40 | |||
41 | struct rcar_du_connector_lvds_data { | ||
42 | struct rcar_du_panel_data panel; | ||
43 | }; | ||
44 | |||
45 | struct rcar_du_connector_vga_data { | ||
46 | /* TODO: Add DDC information for EDID retrieval */ | ||
47 | }; | ||
48 | |||
49 | /* | ||
50 | * struct rcar_du_encoder_data - Encoder platform data | ||
51 | * @type: the encoder type (RCAR_DU_ENCODER_*) | ||
52 | * @output: the DU output the connector is connected to (RCAR_DU_OUTPUT_*) | ||
53 | * @connector.lvds: platform data for LVDS connectors | ||
54 | * @connector.vga: platform data for VGA connectors | ||
55 | * | ||
56 | * Encoder platform data describes an on-board encoder, its associated DU SoC | ||
57 | * output, and the connector. | ||
58 | */ | ||
59 | struct rcar_du_encoder_data { | ||
60 | enum rcar_du_encoder_type type; | ||
61 | enum rcar_du_output output; | ||
62 | |||
63 | union { | ||
64 | struct rcar_du_connector_lvds_data lvds; | ||
65 | struct rcar_du_connector_vga_data vga; | ||
66 | } connector; | ||
67 | }; | ||
68 | |||
69 | struct rcar_du_platform_data { | ||
70 | struct rcar_du_encoder_data *encoders; | ||
71 | unsigned int num_encoders; | ||
72 | }; | ||
73 | |||
74 | #endif /* __RCAR_DU_H__ */ | ||
diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h index c860c1b314c0..d09275f3cde3 100644 --- a/include/linux/platform_data/serial-omap.h +++ b/include/linux/platform_data/serial-omap.h | |||
@@ -38,9 +38,6 @@ struct omap_uart_port_info { | |||
38 | unsigned int dma_rx_timeout; | 38 | unsigned int dma_rx_timeout; |
39 | unsigned int autosuspend_timeout; | 39 | unsigned int autosuspend_timeout; |
40 | unsigned int dma_rx_poll_rate; | 40 | unsigned int dma_rx_poll_rate; |
41 | int DTR_gpio; | ||
42 | int DTR_inverted; | ||
43 | int DTR_present; | ||
44 | 41 | ||
45 | int (*get_context_loss_count)(struct device *); | 42 | int (*get_context_loss_count)(struct device *); |
46 | void (*enable_wakeup)(struct device *, bool); | 43 | void (*enable_wakeup)(struct device *, bool); |
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h index 1730312398ff..5087fff96d86 100644 --- a/include/linux/platform_data/st21nfca.h +++ b/include/linux/platform_data/st21nfca.h | |||
@@ -24,7 +24,6 @@ | |||
24 | #define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci" | 24 | #define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci" |
25 | 25 | ||
26 | struct st21nfca_nfc_platform_data { | 26 | struct st21nfca_nfc_platform_data { |
27 | unsigned int gpio_irq; | ||
28 | unsigned int gpio_ena; | 27 | unsigned int gpio_ena; |
29 | unsigned int irq_polarity; | 28 | unsigned int irq_polarity; |
30 | }; | 29 | }; |
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h index 2d11f1f5efab..c3b432f5b63e 100644 --- a/include/linux/platform_data/st21nfcb.h +++ b/include/linux/platform_data/st21nfcb.h | |||
@@ -24,7 +24,6 @@ | |||
24 | #define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" | 24 | #define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" |
25 | 25 | ||
26 | struct st21nfcb_nfc_platform_data { | 26 | struct st21nfcb_nfc_platform_data { |
27 | unsigned int gpio_irq; | ||
28 | unsigned int gpio_reset; | 27 | unsigned int gpio_reset; |
29 | unsigned int irq_polarity; | 28 | unsigned int irq_polarity; |
30 | }; | 29 | }; |
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 153d303af7eb..ae4882ca4a64 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
@@ -197,8 +197,10 @@ extern void platform_driver_unregister(struct platform_driver *); | |||
197 | /* non-hotpluggable platform devices may use this so that probe() and | 197 | /* non-hotpluggable platform devices may use this so that probe() and |
198 | * its support may live in __init sections, conserving runtime memory. | 198 | * its support may live in __init sections, conserving runtime memory. |
199 | */ | 199 | */ |
200 | extern int platform_driver_probe(struct platform_driver *driver, | 200 | #define platform_driver_probe(drv, probe) \ |
201 | int (*probe)(struct platform_device *)); | 201 | __platform_driver_probe(drv, probe, THIS_MODULE) |
202 | extern int __platform_driver_probe(struct platform_driver *driver, | ||
203 | int (*probe)(struct platform_device *), struct module *module); | ||
202 | 204 | ||
203 | static inline void *platform_get_drvdata(const struct platform_device *pdev) | 205 | static inline void *platform_get_drvdata(const struct platform_device *pdev) |
204 | { | 206 | { |
@@ -238,10 +240,12 @@ static void __exit __platform_driver##_exit(void) \ | |||
238 | } \ | 240 | } \ |
239 | module_exit(__platform_driver##_exit); | 241 | module_exit(__platform_driver##_exit); |
240 | 242 | ||
241 | extern struct platform_device *platform_create_bundle( | 243 | #define platform_create_bundle(driver, probe, res, n_res, data, size) \ |
244 | __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE) | ||
245 | extern struct platform_device *__platform_create_bundle( | ||
242 | struct platform_driver *driver, int (*probe)(struct platform_device *), | 246 | struct platform_driver *driver, int (*probe)(struct platform_device *), |
243 | struct resource *res, unsigned int n_res, | 247 | struct resource *res, unsigned int n_res, |
244 | const void *data, size_t size); | 248 | const void *data, size_t size, struct module *module); |
245 | 249 | ||
246 | /* early platform driver interface */ | 250 | /* early platform driver interface */ |
247 | struct early_platform_driver { | 251 | struct early_platform_driver { |
diff --git a/include/linux/plist.h b/include/linux/plist.h index 8b6c970cff6c..97883604a3c5 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h | |||
@@ -176,7 +176,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head); | |||
176 | * plist_for_each_entry - iterate over list of given type | 176 | * plist_for_each_entry - iterate over list of given type |
177 | * @pos: the type * to use as a loop counter | 177 | * @pos: the type * to use as a loop counter |
178 | * @head: the head for your list | 178 | * @head: the head for your list |
179 | * @mem: the name of the list_struct within the struct | 179 | * @mem: the name of the list_head within the struct |
180 | */ | 180 | */ |
181 | #define plist_for_each_entry(pos, head, mem) \ | 181 | #define plist_for_each_entry(pos, head, mem) \ |
182 | list_for_each_entry(pos, &(head)->node_list, mem.node_list) | 182 | list_for_each_entry(pos, &(head)->node_list, mem.node_list) |
@@ -185,7 +185,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head); | |||
185 | * plist_for_each_entry_continue - continue iteration over list of given type | 185 | * plist_for_each_entry_continue - continue iteration over list of given type |
186 | * @pos: the type * to use as a loop cursor | 186 | * @pos: the type * to use as a loop cursor |
187 | * @head: the head for your list | 187 | * @head: the head for your list |
188 | * @m: the name of the list_struct within the struct | 188 | * @m: the name of the list_head within the struct |
189 | * | 189 | * |
190 | * Continue to iterate over list of given type, continuing after | 190 | * Continue to iterate over list of given type, continuing after |
191 | * the current position. | 191 | * the current position. |
@@ -198,7 +198,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head); | |||
198 | * @pos: the type * to use as a loop counter | 198 | * @pos: the type * to use as a loop counter |
199 | * @n: another type * to use as temporary storage | 199 | * @n: another type * to use as temporary storage |
200 | * @head: the head for your list | 200 | * @head: the head for your list |
201 | * @m: the name of the list_struct within the struct | 201 | * @m: the name of the list_head within the struct |
202 | * | 202 | * |
203 | * Iterate over list of given type, safe against removal of list entry. | 203 | * Iterate over list of given type, safe against removal of list entry. |
204 | */ | 204 | */ |
@@ -229,7 +229,7 @@ static inline int plist_node_empty(const struct plist_node *node) | |||
229 | * plist_first_entry - get the struct for the first entry | 229 | * plist_first_entry - get the struct for the first entry |
230 | * @head: the &struct plist_head pointer | 230 | * @head: the &struct plist_head pointer |
231 | * @type: the type of the struct this is embedded in | 231 | * @type: the type of the struct this is embedded in |
232 | * @member: the name of the list_struct within the struct | 232 | * @member: the name of the list_head within the struct |
233 | */ | 233 | */ |
234 | #ifdef CONFIG_DEBUG_PI_LIST | 234 | #ifdef CONFIG_DEBUG_PI_LIST |
235 | # define plist_first_entry(head, type, member) \ | 235 | # define plist_first_entry(head, type, member) \ |
@@ -246,7 +246,7 @@ static inline int plist_node_empty(const struct plist_node *node) | |||
246 | * plist_last_entry - get the struct for the last entry | 246 | * plist_last_entry - get the struct for the last entry |
247 | * @head: the &struct plist_head pointer | 247 | * @head: the &struct plist_head pointer |
248 | * @type: the type of the struct this is embedded in | 248 | * @type: the type of the struct this is embedded in |
249 | * @member: the name of the list_struct within the struct | 249 | * @member: the name of the list_head within the struct |
250 | */ | 250 | */ |
251 | #ifdef CONFIG_DEBUG_PI_LIST | 251 | #ifdef CONFIG_DEBUG_PI_LIST |
252 | # define plist_last_entry(head, type, member) \ | 252 | # define plist_last_entry(head, type, member) \ |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 383fd68aaee1..66a656eb335b 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -342,7 +342,7 @@ struct dev_pm_ops { | |||
342 | #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) | 342 | #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) |
343 | #endif | 343 | #endif |
344 | 344 | ||
345 | #ifdef CONFIG_PM_RUNTIME | 345 | #ifdef CONFIG_PM |
346 | #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ | 346 | #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ |
347 | .runtime_suspend = suspend_fn, \ | 347 | .runtime_suspend = suspend_fn, \ |
348 | .runtime_resume = resume_fn, \ | 348 | .runtime_resume = resume_fn, \ |
@@ -351,14 +351,7 @@ struct dev_pm_ops { | |||
351 | #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) | 351 | #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) |
352 | #endif | 352 | #endif |
353 | 353 | ||
354 | #ifdef CONFIG_PM | 354 | #define SET_PM_RUNTIME_PM_OPS SET_RUNTIME_PM_OPS |
355 | #define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ | ||
356 | .runtime_suspend = suspend_fn, \ | ||
357 | .runtime_resume = resume_fn, \ | ||
358 | .runtime_idle = idle_fn, | ||
359 | #else | ||
360 | #define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) | ||
361 | #endif | ||
362 | 355 | ||
363 | /* | 356 | /* |
364 | * Use this if you want to use the same suspend and resume callbacks for suspend | 357 | * Use this if you want to use the same suspend and resume callbacks for suspend |
@@ -538,11 +531,7 @@ enum rpm_request { | |||
538 | }; | 531 | }; |
539 | 532 | ||
540 | struct wakeup_source; | 533 | struct wakeup_source; |
541 | 534 | struct pm_domain_data; | |
542 | struct pm_domain_data { | ||
543 | struct list_head list_node; | ||
544 | struct device *dev; | ||
545 | }; | ||
546 | 535 | ||
547 | struct pm_subsys_data { | 536 | struct pm_subsys_data { |
548 | spinlock_t lock; | 537 | spinlock_t lock; |
@@ -576,7 +565,7 @@ struct dev_pm_info { | |||
576 | #else | 565 | #else |
577 | unsigned int should_wakeup:1; | 566 | unsigned int should_wakeup:1; |
578 | #endif | 567 | #endif |
579 | #ifdef CONFIG_PM_RUNTIME | 568 | #ifdef CONFIG_PM |
580 | struct timer_list suspend_timer; | 569 | struct timer_list suspend_timer; |
581 | unsigned long timer_expires; | 570 | unsigned long timer_expires; |
582 | struct work_struct work; | 571 | struct work_struct work; |
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h index 8348866e7b05..0b0039634410 100644 --- a/include/linux/pm_clock.h +++ b/include/linux/pm_clock.h | |||
@@ -18,6 +18,8 @@ struct pm_clk_notifier_block { | |||
18 | char *con_ids[]; | 18 | char *con_ids[]; |
19 | }; | 19 | }; |
20 | 20 | ||
21 | struct clk; | ||
22 | |||
21 | #ifdef CONFIG_PM_CLK | 23 | #ifdef CONFIG_PM_CLK |
22 | static inline bool pm_clk_no_clocks(struct device *dev) | 24 | static inline bool pm_clk_no_clocks(struct device *dev) |
23 | { | 25 | { |
@@ -29,6 +31,7 @@ extern void pm_clk_init(struct device *dev); | |||
29 | extern int pm_clk_create(struct device *dev); | 31 | extern int pm_clk_create(struct device *dev); |
30 | extern void pm_clk_destroy(struct device *dev); | 32 | extern void pm_clk_destroy(struct device *dev); |
31 | extern int pm_clk_add(struct device *dev, const char *con_id); | 33 | extern int pm_clk_add(struct device *dev, const char *con_id); |
34 | extern int pm_clk_add_clk(struct device *dev, struct clk *clk); | ||
32 | extern void pm_clk_remove(struct device *dev, const char *con_id); | 35 | extern void pm_clk_remove(struct device *dev, const char *con_id); |
33 | extern int pm_clk_suspend(struct device *dev); | 36 | extern int pm_clk_suspend(struct device *dev); |
34 | extern int pm_clk_resume(struct device *dev); | 37 | extern int pm_clk_resume(struct device *dev); |
@@ -51,6 +54,11 @@ static inline int pm_clk_add(struct device *dev, const char *con_id) | |||
51 | { | 54 | { |
52 | return -EINVAL; | 55 | return -EINVAL; |
53 | } | 56 | } |
57 | |||
58 | static inline int pm_clk_add_clk(struct device *dev, struct clk *clk) | ||
59 | { | ||
60 | return -EINVAL; | ||
61 | } | ||
54 | static inline void pm_clk_remove(struct device *dev, const char *con_id) | 62 | static inline void pm_clk_remove(struct device *dev, const char *con_id) |
55 | { | 63 | { |
56 | } | 64 | } |
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 2e0e06daf8c0..6cd20d5e651b 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h | |||
@@ -17,6 +17,9 @@ | |||
17 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
18 | #include <linux/cpuidle.h> | 18 | #include <linux/cpuidle.h> |
19 | 19 | ||
20 | /* Defines used for the flags field in the struct generic_pm_domain */ | ||
21 | #define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ | ||
22 | |||
20 | enum gpd_status { | 23 | enum gpd_status { |
21 | GPD_STATE_ACTIVE = 0, /* PM domain is active */ | 24 | GPD_STATE_ACTIVE = 0, /* PM domain is active */ |
22 | GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */ | 25 | GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */ |
@@ -76,6 +79,7 @@ struct generic_pm_domain { | |||
76 | struct device *dev); | 79 | struct device *dev); |
77 | void (*detach_dev)(struct generic_pm_domain *domain, | 80 | void (*detach_dev)(struct generic_pm_domain *domain, |
78 | struct device *dev); | 81 | struct device *dev); |
82 | unsigned int flags; /* Bit field of configs for genpd */ | ||
79 | }; | 83 | }; |
80 | 84 | ||
81 | static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) | 85 | static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) |
@@ -100,6 +104,11 @@ struct gpd_timing_data { | |||
100 | bool cached_stop_ok; | 104 | bool cached_stop_ok; |
101 | }; | 105 | }; |
102 | 106 | ||
107 | struct pm_domain_data { | ||
108 | struct list_head list_node; | ||
109 | struct device *dev; | ||
110 | }; | ||
111 | |||
103 | struct generic_pm_domain_data { | 112 | struct generic_pm_domain_data { |
104 | struct pm_domain_data base; | 113 | struct pm_domain_data base; |
105 | struct gpd_timing_data td; | 114 | struct gpd_timing_data td; |
@@ -147,6 +156,7 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd, | |||
147 | 156 | ||
148 | extern int pm_genpd_poweron(struct generic_pm_domain *genpd); | 157 | extern int pm_genpd_poweron(struct generic_pm_domain *genpd); |
149 | extern int pm_genpd_name_poweron(const char *domain_name); | 158 | extern int pm_genpd_name_poweron(const char *domain_name); |
159 | extern void pm_genpd_poweroff_unused(void); | ||
150 | 160 | ||
151 | extern struct dev_power_governor simple_qos_governor; | 161 | extern struct dev_power_governor simple_qos_governor; |
152 | extern struct dev_power_governor pm_domain_always_on_gov; | 162 | extern struct dev_power_governor pm_domain_always_on_gov; |
@@ -221,6 +231,7 @@ static inline int pm_genpd_name_poweron(const char *domain_name) | |||
221 | { | 231 | { |
222 | return -ENOSYS; | 232 | return -ENOSYS; |
223 | } | 233 | } |
234 | static inline void pm_genpd_poweroff_unused(void) {} | ||
224 | #define simple_qos_governor NULL | 235 | #define simple_qos_governor NULL |
225 | #define pm_domain_always_on_gov NULL | 236 | #define pm_domain_always_on_gov NULL |
226 | #endif | 237 | #endif |
@@ -237,12 +248,6 @@ static inline int pm_genpd_name_add_device(const char *domain_name, | |||
237 | return __pm_genpd_name_add_device(domain_name, dev, NULL); | 248 | return __pm_genpd_name_add_device(domain_name, dev, NULL); |
238 | } | 249 | } |
239 | 250 | ||
240 | #ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME | ||
241 | extern void pm_genpd_poweroff_unused(void); | ||
242 | #else | ||
243 | static inline void pm_genpd_poweroff_unused(void) {} | ||
244 | #endif | ||
245 | |||
246 | #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP | 251 | #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP |
247 | extern void pm_genpd_syscore_poweroff(struct device *dev); | 252 | extern void pm_genpd_syscore_poweroff(struct device *dev); |
248 | extern void pm_genpd_syscore_poweron(struct device *dev); | 253 | extern void pm_genpd_syscore_poweron(struct device *dev); |
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 0330217abfad..cec2d4540914 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h | |||
@@ -21,7 +21,7 @@ struct dev_pm_opp; | |||
21 | struct device; | 21 | struct device; |
22 | 22 | ||
23 | enum dev_pm_opp_event { | 23 | enum dev_pm_opp_event { |
24 | OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, | 24 | OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, |
25 | }; | 25 | }; |
26 | 26 | ||
27 | #if defined(CONFIG_PM_OPP) | 27 | #if defined(CONFIG_PM_OPP) |
@@ -44,6 +44,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, | |||
44 | 44 | ||
45 | int dev_pm_opp_add(struct device *dev, unsigned long freq, | 45 | int dev_pm_opp_add(struct device *dev, unsigned long freq, |
46 | unsigned long u_volt); | 46 | unsigned long u_volt); |
47 | void dev_pm_opp_remove(struct device *dev, unsigned long freq); | ||
47 | 48 | ||
48 | int dev_pm_opp_enable(struct device *dev, unsigned long freq); | 49 | int dev_pm_opp_enable(struct device *dev, unsigned long freq); |
49 | 50 | ||
@@ -90,6 +91,10 @@ static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, | |||
90 | return -EINVAL; | 91 | return -EINVAL; |
91 | } | 92 | } |
92 | 93 | ||
94 | static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) | ||
95 | { | ||
96 | } | ||
97 | |||
93 | static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) | 98 | static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) |
94 | { | 99 | { |
95 | return 0; | 100 | return 0; |
@@ -109,11 +114,16 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( | |||
109 | 114 | ||
110 | #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) | 115 | #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) |
111 | int of_init_opp_table(struct device *dev); | 116 | int of_init_opp_table(struct device *dev); |
117 | void of_free_opp_table(struct device *dev); | ||
112 | #else | 118 | #else |
113 | static inline int of_init_opp_table(struct device *dev) | 119 | static inline int of_init_opp_table(struct device *dev) |
114 | { | 120 | { |
115 | return -EINVAL; | 121 | return -EINVAL; |
116 | } | 122 | } |
123 | |||
124 | static inline void of_free_opp_table(struct device *dev) | ||
125 | { | ||
126 | } | ||
117 | #endif | 127 | #endif |
118 | 128 | ||
119 | #endif /* __LINUX_OPP_H__ */ | 129 | #endif /* __LINUX_OPP_H__ */ |
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 636e82834506..7b3ae0cffc05 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h | |||
@@ -154,6 +154,23 @@ void dev_pm_qos_constraints_destroy(struct device *dev); | |||
154 | int dev_pm_qos_add_ancestor_request(struct device *dev, | 154 | int dev_pm_qos_add_ancestor_request(struct device *dev, |
155 | struct dev_pm_qos_request *req, | 155 | struct dev_pm_qos_request *req, |
156 | enum dev_pm_qos_req_type type, s32 value); | 156 | enum dev_pm_qos_req_type type, s32 value); |
157 | int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); | ||
158 | void dev_pm_qos_hide_latency_limit(struct device *dev); | ||
159 | int dev_pm_qos_expose_flags(struct device *dev, s32 value); | ||
160 | void dev_pm_qos_hide_flags(struct device *dev); | ||
161 | int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); | ||
162 | s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); | ||
163 | int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); | ||
164 | |||
165 | static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) | ||
166 | { | ||
167 | return dev->power.qos->resume_latency_req->data.pnode.prio; | ||
168 | } | ||
169 | |||
170 | static inline s32 dev_pm_qos_requested_flags(struct device *dev) | ||
171 | { | ||
172 | return dev->power.qos->flags_req->data.flr.flags; | ||
173 | } | ||
157 | #else | 174 | #else |
158 | static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, | 175 | static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, |
159 | s32 mask) | 176 | s32 mask) |
@@ -200,27 +217,6 @@ static inline int dev_pm_qos_add_ancestor_request(struct device *dev, | |||
200 | enum dev_pm_qos_req_type type, | 217 | enum dev_pm_qos_req_type type, |
201 | s32 value) | 218 | s32 value) |
202 | { return 0; } | 219 | { return 0; } |
203 | #endif | ||
204 | |||
205 | #ifdef CONFIG_PM_RUNTIME | ||
206 | int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); | ||
207 | void dev_pm_qos_hide_latency_limit(struct device *dev); | ||
208 | int dev_pm_qos_expose_flags(struct device *dev, s32 value); | ||
209 | void dev_pm_qos_hide_flags(struct device *dev); | ||
210 | int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); | ||
211 | s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); | ||
212 | int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); | ||
213 | |||
214 | static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) | ||
215 | { | ||
216 | return dev->power.qos->resume_latency_req->data.pnode.prio; | ||
217 | } | ||
218 | |||
219 | static inline s32 dev_pm_qos_requested_flags(struct device *dev) | ||
220 | { | ||
221 | return dev->power.qos->flags_req->data.flr.flags; | ||
222 | } | ||
223 | #else | ||
224 | static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | 220 | static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) |
225 | { return 0; } | 221 | { return 0; } |
226 | static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} | 222 | static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 367f49b9a1c9..30e84d48bfea 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
@@ -35,16 +35,6 @@ extern int pm_generic_runtime_suspend(struct device *dev); | |||
35 | extern int pm_generic_runtime_resume(struct device *dev); | 35 | extern int pm_generic_runtime_resume(struct device *dev); |
36 | extern int pm_runtime_force_suspend(struct device *dev); | 36 | extern int pm_runtime_force_suspend(struct device *dev); |
37 | extern int pm_runtime_force_resume(struct device *dev); | 37 | extern int pm_runtime_force_resume(struct device *dev); |
38 | #else | ||
39 | static inline bool queue_pm_work(struct work_struct *work) { return false; } | ||
40 | |||
41 | static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } | ||
42 | static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } | ||
43 | static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } | ||
44 | static inline int pm_runtime_force_resume(struct device *dev) { return 0; } | ||
45 | #endif | ||
46 | |||
47 | #ifdef CONFIG_PM_RUNTIME | ||
48 | 38 | ||
49 | extern int __pm_runtime_idle(struct device *dev, int rpmflags); | 39 | extern int __pm_runtime_idle(struct device *dev, int rpmflags); |
50 | extern int __pm_runtime_suspend(struct device *dev, int rpmflags); | 40 | extern int __pm_runtime_suspend(struct device *dev, int rpmflags); |
@@ -128,7 +118,19 @@ static inline void pm_runtime_mark_last_busy(struct device *dev) | |||
128 | ACCESS_ONCE(dev->power.last_busy) = jiffies; | 118 | ACCESS_ONCE(dev->power.last_busy) = jiffies; |
129 | } | 119 | } |
130 | 120 | ||
131 | #else /* !CONFIG_PM_RUNTIME */ | 121 | static inline bool pm_runtime_is_irq_safe(struct device *dev) |
122 | { | ||
123 | return dev->power.irq_safe; | ||
124 | } | ||
125 | |||
126 | #else /* !CONFIG_PM */ | ||
127 | |||
128 | static inline bool queue_pm_work(struct work_struct *work) { return false; } | ||
129 | |||
130 | static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } | ||
131 | static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } | ||
132 | static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } | ||
133 | static inline int pm_runtime_force_resume(struct device *dev) { return 0; } | ||
132 | 134 | ||
133 | static inline int __pm_runtime_idle(struct device *dev, int rpmflags) | 135 | static inline int __pm_runtime_idle(struct device *dev, int rpmflags) |
134 | { | 136 | { |
@@ -167,6 +169,7 @@ static inline bool pm_runtime_enabled(struct device *dev) { return false; } | |||
167 | 169 | ||
168 | static inline void pm_runtime_no_callbacks(struct device *dev) {} | 170 | static inline void pm_runtime_no_callbacks(struct device *dev) {} |
169 | static inline void pm_runtime_irq_safe(struct device *dev) {} | 171 | static inline void pm_runtime_irq_safe(struct device *dev) {} |
172 | static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; } | ||
170 | 173 | ||
171 | static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } | 174 | static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } |
172 | static inline void pm_runtime_mark_last_busy(struct device *dev) {} | 175 | static inline void pm_runtime_mark_last_busy(struct device *dev) {} |
@@ -179,7 +182,7 @@ static inline unsigned long pm_runtime_autosuspend_expiration( | |||
179 | static inline void pm_runtime_set_memalloc_noio(struct device *dev, | 182 | static inline void pm_runtime_set_memalloc_noio(struct device *dev, |
180 | bool enable){} | 183 | bool enable){} |
181 | 184 | ||
182 | #endif /* !CONFIG_PM_RUNTIME */ | 185 | #endif /* !CONFIG_PM */ |
183 | 186 | ||
184 | static inline int pm_runtime_idle(struct device *dev) | 187 | static inline int pm_runtime_idle(struct device *dev) |
185 | { | 188 | { |
diff --git a/include/linux/printk.h b/include/linux/printk.h index d78125f73ac4..c8f170324e64 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h | |||
@@ -118,12 +118,13 @@ int no_printk(const char *fmt, ...) | |||
118 | #ifdef CONFIG_EARLY_PRINTK | 118 | #ifdef CONFIG_EARLY_PRINTK |
119 | extern asmlinkage __printf(1, 2) | 119 | extern asmlinkage __printf(1, 2) |
120 | void early_printk(const char *fmt, ...); | 120 | void early_printk(const char *fmt, ...); |
121 | void early_vprintk(const char *fmt, va_list ap); | ||
122 | #else | 121 | #else |
123 | static inline __printf(1, 2) __cold | 122 | static inline __printf(1, 2) __cold |
124 | void early_printk(const char *s, ...) { } | 123 | void early_printk(const char *s, ...) { } |
125 | #endif | 124 | #endif |
126 | 125 | ||
126 | typedef int(*printk_func_t)(const char *fmt, va_list args); | ||
127 | |||
127 | #ifdef CONFIG_PRINTK | 128 | #ifdef CONFIG_PRINTK |
128 | asmlinkage __printf(5, 0) | 129 | asmlinkage __printf(5, 0) |
129 | int vprintk_emit(int facility, int level, | 130 | int vprintk_emit(int facility, int level, |
diff --git a/include/linux/property.h b/include/linux/property.h new file mode 100644 index 000000000000..a6a3d98bd7e9 --- /dev/null +++ b/include/linux/property.h | |||
@@ -0,0 +1,143 @@ | |||
1 | /* | ||
2 | * property.h - Unified device property interface. | ||
3 | * | ||
4 | * Copyright (C) 2014, Intel Corporation | ||
5 | * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
6 | * Mika Westerberg <mika.westerberg@linux.intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #ifndef _LINUX_PROPERTY_H_ | ||
14 | #define _LINUX_PROPERTY_H_ | ||
15 | |||
16 | #include <linux/types.h> | ||
17 | |||
18 | struct device; | ||
19 | |||
20 | enum dev_prop_type { | ||
21 | DEV_PROP_U8, | ||
22 | DEV_PROP_U16, | ||
23 | DEV_PROP_U32, | ||
24 | DEV_PROP_U64, | ||
25 | DEV_PROP_STRING, | ||
26 | DEV_PROP_MAX, | ||
27 | }; | ||
28 | |||
29 | bool device_property_present(struct device *dev, const char *propname); | ||
30 | int device_property_read_u8_array(struct device *dev, const char *propname, | ||
31 | u8 *val, size_t nval); | ||
32 | int device_property_read_u16_array(struct device *dev, const char *propname, | ||
33 | u16 *val, size_t nval); | ||
34 | int device_property_read_u32_array(struct device *dev, const char *propname, | ||
35 | u32 *val, size_t nval); | ||
36 | int device_property_read_u64_array(struct device *dev, const char *propname, | ||
37 | u64 *val, size_t nval); | ||
38 | int device_property_read_string_array(struct device *dev, const char *propname, | ||
39 | const char **val, size_t nval); | ||
40 | int device_property_read_string(struct device *dev, const char *propname, | ||
41 | const char **val); | ||
42 | |||
43 | enum fwnode_type { | ||
44 | FWNODE_INVALID = 0, | ||
45 | FWNODE_OF, | ||
46 | FWNODE_ACPI, | ||
47 | }; | ||
48 | |||
49 | struct fwnode_handle { | ||
50 | enum fwnode_type type; | ||
51 | }; | ||
52 | |||
53 | bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname); | ||
54 | int fwnode_property_read_u8_array(struct fwnode_handle *fwnode, | ||
55 | const char *propname, u8 *val, | ||
56 | size_t nval); | ||
57 | int fwnode_property_read_u16_array(struct fwnode_handle *fwnode, | ||
58 | const char *propname, u16 *val, | ||
59 | size_t nval); | ||
60 | int fwnode_property_read_u32_array(struct fwnode_handle *fwnode, | ||
61 | const char *propname, u32 *val, | ||
62 | size_t nval); | ||
63 | int fwnode_property_read_u64_array(struct fwnode_handle *fwnode, | ||
64 | const char *propname, u64 *val, | ||
65 | size_t nval); | ||
66 | int fwnode_property_read_string_array(struct fwnode_handle *fwnode, | ||
67 | const char *propname, const char **val, | ||
68 | size_t nval); | ||
69 | int fwnode_property_read_string(struct fwnode_handle *fwnode, | ||
70 | const char *propname, const char **val); | ||
71 | |||
72 | struct fwnode_handle *device_get_next_child_node(struct device *dev, | ||
73 | struct fwnode_handle *child); | ||
74 | |||
75 | #define device_for_each_child_node(dev, child) \ | ||
76 | for (child = device_get_next_child_node(dev, NULL); child; \ | ||
77 | child = device_get_next_child_node(dev, child)) | ||
78 | |||
79 | void fwnode_handle_put(struct fwnode_handle *fwnode); | ||
80 | |||
81 | unsigned int device_get_child_node_count(struct device *dev); | ||
82 | |||
83 | static inline bool device_property_read_bool(struct device *dev, | ||
84 | const char *propname) | ||
85 | { | ||
86 | return device_property_present(dev, propname); | ||
87 | } | ||
88 | |||
89 | static inline int device_property_read_u8(struct device *dev, | ||
90 | const char *propname, u8 *val) | ||
91 | { | ||
92 | return device_property_read_u8_array(dev, propname, val, 1); | ||
93 | } | ||
94 | |||
95 | static inline int device_property_read_u16(struct device *dev, | ||
96 | const char *propname, u16 *val) | ||
97 | { | ||
98 | return device_property_read_u16_array(dev, propname, val, 1); | ||
99 | } | ||
100 | |||
101 | static inline int device_property_read_u32(struct device *dev, | ||
102 | const char *propname, u32 *val) | ||
103 | { | ||
104 | return device_property_read_u32_array(dev, propname, val, 1); | ||
105 | } | ||
106 | |||
107 | static inline int device_property_read_u64(struct device *dev, | ||
108 | const char *propname, u64 *val) | ||
109 | { | ||
110 | return device_property_read_u64_array(dev, propname, val, 1); | ||
111 | } | ||
112 | |||
113 | static inline bool fwnode_property_read_bool(struct fwnode_handle *fwnode, | ||
114 | const char *propname) | ||
115 | { | ||
116 | return fwnode_property_present(fwnode, propname); | ||
117 | } | ||
118 | |||
119 | static inline int fwnode_property_read_u8(struct fwnode_handle *fwnode, | ||
120 | const char *propname, u8 *val) | ||
121 | { | ||
122 | return fwnode_property_read_u8_array(fwnode, propname, val, 1); | ||
123 | } | ||
124 | |||
125 | static inline int fwnode_property_read_u16(struct fwnode_handle *fwnode, | ||
126 | const char *propname, u16 *val) | ||
127 | { | ||
128 | return fwnode_property_read_u16_array(fwnode, propname, val, 1); | ||
129 | } | ||
130 | |||
131 | static inline int fwnode_property_read_u32(struct fwnode_handle *fwnode, | ||
132 | const char *propname, u32 *val) | ||
133 | { | ||
134 | return fwnode_property_read_u32_array(fwnode, propname, val, 1); | ||
135 | } | ||
136 | |||
137 | static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode, | ||
138 | const char *propname, u64 *val) | ||
139 | { | ||
140 | return fwnode_property_read_u64_array(fwnode, propname, val, 1); | ||
141 | } | ||
142 | |||
143 | #endif /* _LINUX_PROPERTY_H_ */ | ||
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 9974975d40db..4af3fdc85b01 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h | |||
@@ -53,7 +53,8 @@ struct persistent_ram_zone { | |||
53 | }; | 53 | }; |
54 | 54 | ||
55 | struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, | 55 | struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, |
56 | u32 sig, struct persistent_ram_ecc_info *ecc_info); | 56 | u32 sig, struct persistent_ram_ecc_info *ecc_info, |
57 | unsigned int memtype); | ||
57 | void persistent_ram_free(struct persistent_ram_zone *prz); | 58 | void persistent_ram_free(struct persistent_ram_zone *prz); |
58 | void persistent_ram_zap(struct persistent_ram_zone *prz); | 59 | void persistent_ram_zap(struct persistent_ram_zone *prz); |
59 | 60 | ||
@@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, | |||
76 | struct ramoops_platform_data { | 77 | struct ramoops_platform_data { |
77 | unsigned long mem_size; | 78 | unsigned long mem_size; |
78 | unsigned long mem_address; | 79 | unsigned long mem_address; |
80 | unsigned int mem_type; | ||
79 | unsigned long record_size; | 81 | unsigned long record_size; |
80 | unsigned long console_size; | 82 | unsigned long console_size; |
81 | unsigned long ftrace_size; | 83 | unsigned long ftrace_size; |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index cc79eff4a1ad..987a73a40ef8 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -52,7 +52,7 @@ extern void ptrace_notify(int exit_code); | |||
52 | extern void __ptrace_link(struct task_struct *child, | 52 | extern void __ptrace_link(struct task_struct *child, |
53 | struct task_struct *new_parent); | 53 | struct task_struct *new_parent); |
54 | extern void __ptrace_unlink(struct task_struct *child); | 54 | extern void __ptrace_unlink(struct task_struct *child); |
55 | extern void exit_ptrace(struct task_struct *tracer); | 55 | extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); |
56 | #define PTRACE_MODE_READ 0x01 | 56 | #define PTRACE_MODE_READ 0x01 |
57 | #define PTRACE_MODE_ATTACH 0x02 | 57 | #define PTRACE_MODE_ATTACH 0x02 |
58 | #define PTRACE_MODE_NOAUDIT 0x04 | 58 | #define PTRACE_MODE_NOAUDIT 0x04 |
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h index 18d75e795606..e1ab6e86cdb3 100644 --- a/include/linux/pxa168_eth.h +++ b/include/linux/pxa168_eth.h | |||
@@ -4,6 +4,8 @@ | |||
4 | #ifndef __LINUX_PXA168_ETH_H | 4 | #ifndef __LINUX_PXA168_ETH_H |
5 | #define __LINUX_PXA168_ETH_H | 5 | #define __LINUX_PXA168_ETH_H |
6 | 6 | ||
7 | #include <linux/phy.h> | ||
8 | |||
7 | struct pxa168_eth_platform_data { | 9 | struct pxa168_eth_platform_data { |
8 | int port_number; | 10 | int port_number; |
9 | int phy_addr; | 11 | int phy_addr; |
@@ -13,6 +15,7 @@ struct pxa168_eth_platform_data { | |||
13 | */ | 15 | */ |
14 | int speed; /* 0, SPEED_10, SPEED_100 */ | 16 | int speed; /* 0, SPEED_10, SPEED_100 */ |
15 | int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ | 17 | int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ |
18 | phy_interface_t intf; | ||
16 | 19 | ||
17 | /* | 20 | /* |
18 | * Override default RX/TX queue sizes if nonzero. | 21 | * Override default RX/TX queue sizes if nonzero. |
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index f2b405116166..77aed9ea1d26 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h | |||
@@ -108,6 +108,25 @@ | |||
108 | #define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ | 108 | #define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ |
109 | #endif | 109 | #endif |
110 | 110 | ||
111 | /* QUARK_X1000 SSCR0 bit definition */ | ||
112 | #define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */ | ||
113 | #define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */ | ||
114 | #define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */ | ||
115 | #define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */ | ||
116 | |||
117 | #define RX_THRESH_QUARK_X1000_DFLT 1 | ||
118 | #define TX_THRESH_QUARK_X1000_DFLT 16 | ||
119 | |||
120 | #define QUARK_X1000_SSSR_TFL_MASK (0x1F << 8) /* Transmit FIFO Level mask */ | ||
121 | #define QUARK_X1000_SSSR_RFL_MASK (0x1F << 13) /* Receive FIFO Level mask */ | ||
122 | |||
123 | #define QUARK_X1000_SSCR1_TFT (0x1F << 6) /* Transmit FIFO Threshold (mask) */ | ||
124 | #define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */ | ||
125 | #define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */ | ||
126 | #define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */ | ||
127 | #define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */ | ||
128 | #define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */ | ||
129 | |||
111 | /* extra bits in PXA255, PXA26x and PXA27x SSP ports */ | 130 | /* extra bits in PXA255, PXA26x and PXA27x SSP ports */ |
112 | #define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ | 131 | #define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ |
113 | #define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ | 132 | #define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ |
@@ -175,6 +194,7 @@ enum pxa_ssp_type { | |||
175 | PXA910_SSP, | 194 | PXA910_SSP, |
176 | CE4100_SSP, | 195 | CE4100_SSP, |
177 | LPSS_SSP, | 196 | LPSS_SSP, |
197 | QUARK_X1000_SSP, | ||
178 | }; | 198 | }; |
179 | 199 | ||
180 | struct ssp_device { | 200 | struct ssp_device { |
diff --git a/include/linux/quota.h b/include/linux/quota.h index 80d345a3524c..50978b781a19 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h | |||
@@ -56,6 +56,11 @@ enum quota_type { | |||
56 | PRJQUOTA = 2, /* element used for project quotas */ | 56 | PRJQUOTA = 2, /* element used for project quotas */ |
57 | }; | 57 | }; |
58 | 58 | ||
59 | /* Masks for quota types when used as a bitmask */ | ||
60 | #define QTYPE_MASK_USR (1 << USRQUOTA) | ||
61 | #define QTYPE_MASK_GRP (1 << GRPQUOTA) | ||
62 | #define QTYPE_MASK_PRJ (1 << PRJQUOTA) | ||
63 | |||
59 | typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ | 64 | typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ |
60 | typedef long long qsize_t; /* Type in which we store sizes */ | 65 | typedef long long qsize_t; /* Type in which we store sizes */ |
61 | 66 | ||
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 1d3eee594cd6..f23538a6e411 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h | |||
@@ -64,10 +64,10 @@ void dquot_destroy(struct dquot *dquot); | |||
64 | int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags); | 64 | int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags); |
65 | void __dquot_free_space(struct inode *inode, qsize_t number, int flags); | 65 | void __dquot_free_space(struct inode *inode, qsize_t number, int flags); |
66 | 66 | ||
67 | int dquot_alloc_inode(const struct inode *inode); | 67 | int dquot_alloc_inode(struct inode *inode); |
68 | 68 | ||
69 | int dquot_claim_space_nodirty(struct inode *inode, qsize_t number); | 69 | int dquot_claim_space_nodirty(struct inode *inode, qsize_t number); |
70 | void dquot_free_inode(const struct inode *inode); | 70 | void dquot_free_inode(struct inode *inode); |
71 | void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number); | 71 | void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number); |
72 | 72 | ||
73 | int dquot_disable(struct super_block *sb, int type, unsigned int flags); | 73 | int dquot_disable(struct super_block *sb, int type, unsigned int flags); |
@@ -213,12 +213,12 @@ static inline void dquot_drop(struct inode *inode) | |||
213 | { | 213 | { |
214 | } | 214 | } |
215 | 215 | ||
216 | static inline int dquot_alloc_inode(const struct inode *inode) | 216 | static inline int dquot_alloc_inode(struct inode *inode) |
217 | { | 217 | { |
218 | return 0; | 218 | return 0; |
219 | } | 219 | } |
220 | 220 | ||
221 | static inline void dquot_free_inode(const struct inode *inode) | 221 | static inline void dquot_free_inode(struct inode *inode) |
222 | { | 222 | { |
223 | } | 223 | } |
224 | 224 | ||
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 0a260d8a18bf..18102529254e 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h | |||
@@ -17,14 +17,20 @@ struct ratelimit_state { | |||
17 | unsigned long begin; | 17 | unsigned long begin; |
18 | }; | 18 | }; |
19 | 19 | ||
20 | #define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ | 20 | #define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ |
21 | \ | ||
22 | struct ratelimit_state name = { \ | ||
23 | .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ | 21 | .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ |
24 | .interval = interval_init, \ | 22 | .interval = interval_init, \ |
25 | .burst = burst_init, \ | 23 | .burst = burst_init, \ |
26 | } | 24 | } |
27 | 25 | ||
26 | #define RATELIMIT_STATE_INIT_DISABLED \ | ||
27 | RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST) | ||
28 | |||
29 | #define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ | ||
30 | \ | ||
31 | struct ratelimit_state name = \ | ||
32 | RATELIMIT_STATE_INIT(name, interval_init, burst_init) \ | ||
33 | |||
28 | static inline void ratelimit_state_init(struct ratelimit_state *rs, | 34 | static inline void ratelimit_state_init(struct ratelimit_state *rs, |
29 | int interval, int burst) | 35 | int interval, int burst) |
30 | { | 36 | { |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 372ad5e0dcb8..529bc946f450 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -241,7 +241,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
241 | * list_entry_rcu - get the struct for this entry | 241 | * list_entry_rcu - get the struct for this entry |
242 | * @ptr: the &struct list_head pointer. | 242 | * @ptr: the &struct list_head pointer. |
243 | * @type: the type of the struct this is embedded in. | 243 | * @type: the type of the struct this is embedded in. |
244 | * @member: the name of the list_struct within the struct. | 244 | * @member: the name of the list_head within the struct. |
245 | * | 245 | * |
246 | * This primitive may safely run concurrently with the _rcu list-mutation | 246 | * This primitive may safely run concurrently with the _rcu list-mutation |
247 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | 247 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
@@ -278,7 +278,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
278 | * list_first_or_null_rcu - get the first element from a list | 278 | * list_first_or_null_rcu - get the first element from a list |
279 | * @ptr: the list head to take the element from. | 279 | * @ptr: the list head to take the element from. |
280 | * @type: the type of the struct this is embedded in. | 280 | * @type: the type of the struct this is embedded in. |
281 | * @member: the name of the list_struct within the struct. | 281 | * @member: the name of the list_head within the struct. |
282 | * | 282 | * |
283 | * Note that if the list is empty, it returns NULL. | 283 | * Note that if the list is empty, it returns NULL. |
284 | * | 284 | * |
@@ -296,7 +296,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
296 | * list_for_each_entry_rcu - iterate over rcu list of given type | 296 | * list_for_each_entry_rcu - iterate over rcu list of given type |
297 | * @pos: the type * to use as a loop cursor. | 297 | * @pos: the type * to use as a loop cursor. |
298 | * @head: the head for your list. | 298 | * @head: the head for your list. |
299 | * @member: the name of the list_struct within the struct. | 299 | * @member: the name of the list_head within the struct. |
300 | * | 300 | * |
301 | * This list-traversal primitive may safely run concurrently with | 301 | * This list-traversal primitive may safely run concurrently with |
302 | * the _rcu list-mutation primitives such as list_add_rcu() | 302 | * the _rcu list-mutation primitives such as list_add_rcu() |
@@ -311,7 +311,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
311 | * list_for_each_entry_continue_rcu - continue iteration over list of given type | 311 | * list_for_each_entry_continue_rcu - continue iteration over list of given type |
312 | * @pos: the type * to use as a loop cursor. | 312 | * @pos: the type * to use as a loop cursor. |
313 | * @head: the head for your list. | 313 | * @head: the head for your list. |
314 | * @member: the name of the list_struct within the struct. | 314 | * @member: the name of the list_head within the struct. |
315 | * | 315 | * |
316 | * Continue to iterate over list of given type, continuing after | 316 | * Continue to iterate over list of given type, continuing after |
317 | * the current position. | 317 | * the current position. |
@@ -542,6 +542,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, | |||
542 | pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ | 542 | pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ |
543 | typeof(*(pos)), member)) | 543 | typeof(*(pos)), member)) |
544 | 544 | ||
545 | /** | ||
546 | * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point | ||
547 | * @pos: the type * to use as a loop cursor. | ||
548 | * @member: the name of the hlist_node within the struct. | ||
549 | */ | ||
550 | #define hlist_for_each_entry_from_rcu(pos, member) \ | ||
551 | for (; pos; \ | ||
552 | pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ | ||
553 | typeof(*(pos)), member)) | ||
545 | 554 | ||
546 | #endif /* __KERNEL__ */ | 555 | #endif /* __KERNEL__ */ |
547 | #endif | 556 | #endif |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 53ff1a752d7e..ed4f5939a452 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -57,7 +57,7 @@ enum rcutorture_type { | |||
57 | INVALID_RCU_FLAVOR | 57 | INVALID_RCU_FLAVOR |
58 | }; | 58 | }; |
59 | 59 | ||
60 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 60 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) |
61 | void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, | 61 | void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, |
62 | unsigned long *gpnum, unsigned long *completed); | 62 | unsigned long *gpnum, unsigned long *completed); |
63 | void rcutorture_record_test_transition(void); | 63 | void rcutorture_record_test_transition(void); |
@@ -260,7 +260,7 @@ static inline int rcu_preempt_depth(void) | |||
260 | void rcu_init(void); | 260 | void rcu_init(void); |
261 | void rcu_sched_qs(void); | 261 | void rcu_sched_qs(void); |
262 | void rcu_bh_qs(void); | 262 | void rcu_bh_qs(void); |
263 | void rcu_check_callbacks(int cpu, int user); | 263 | void rcu_check_callbacks(int user); |
264 | struct notifier_block; | 264 | struct notifier_block; |
265 | void rcu_idle_enter(void); | 265 | void rcu_idle_enter(void); |
266 | void rcu_idle_exit(void); | 266 | void rcu_idle_exit(void); |
@@ -348,8 +348,8 @@ extern struct srcu_struct tasks_rcu_exit_srcu; | |||
348 | */ | 348 | */ |
349 | #define cond_resched_rcu_qs() \ | 349 | #define cond_resched_rcu_qs() \ |
350 | do { \ | 350 | do { \ |
351 | rcu_note_voluntary_context_switch(current); \ | 351 | if (!cond_resched()) \ |
352 | cond_resched(); \ | 352 | rcu_note_voluntary_context_switch(current); \ |
353 | } while (0) | 353 | } while (0) |
354 | 354 | ||
355 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) | 355 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) |
@@ -365,7 +365,7 @@ typedef void call_rcu_func_t(struct rcu_head *head, | |||
365 | void (*func)(struct rcu_head *head)); | 365 | void (*func)(struct rcu_head *head)); |
366 | void wait_rcu_gp(call_rcu_func_t crf); | 366 | void wait_rcu_gp(call_rcu_func_t crf); |
367 | 367 | ||
368 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 368 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) |
369 | #include <linux/rcutree.h> | 369 | #include <linux/rcutree.h> |
370 | #elif defined(CONFIG_TINY_RCU) | 370 | #elif defined(CONFIG_TINY_RCU) |
371 | #include <linux/rcutiny.h> | 371 | #include <linux/rcutiny.h> |
@@ -867,7 +867,7 @@ static inline void rcu_preempt_sleep_check(void) | |||
867 | * | 867 | * |
868 | * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), | 868 | * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), |
869 | * it is illegal to block while in an RCU read-side critical section. | 869 | * it is illegal to block while in an RCU read-side critical section. |
870 | * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT | 870 | * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT |
871 | * kernel builds, RCU read-side critical sections may be preempted, | 871 | * kernel builds, RCU read-side critical sections may be preempted, |
872 | * but explicit blocking is illegal. Finally, in preemptible RCU | 872 | * but explicit blocking is illegal. Finally, in preemptible RCU |
873 | * implementations in real-time (with -rt patchset) kernel builds, RCU | 873 | * implementations in real-time (with -rt patchset) kernel builds, RCU |
@@ -902,7 +902,9 @@ static inline void rcu_read_lock(void) | |||
902 | * Unfortunately, this function acquires the scheduler's runqueue and | 902 | * Unfortunately, this function acquires the scheduler's runqueue and |
903 | * priority-inheritance spinlocks. This means that deadlock could result | 903 | * priority-inheritance spinlocks. This means that deadlock could result |
904 | * if the caller of rcu_read_unlock() already holds one of these locks or | 904 | * if the caller of rcu_read_unlock() already holds one of these locks or |
905 | * any lock that is ever acquired while holding them. | 905 | * any lock that is ever acquired while holding them; or any lock which |
906 | * can be taken from interrupt context because rcu_boost()->rt_mutex_lock() | ||
907 | * does not disable irqs while taking ->wait_lock. | ||
906 | * | 908 | * |
907 | * That said, RCU readers are never priority boosted unless they were | 909 | * That said, RCU readers are never priority boosted unless they were |
908 | * preempted. Therefore, one way to avoid deadlock is to make sure | 910 | * preempted. Therefore, one way to avoid deadlock is to make sure |
@@ -1062,6 +1064,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
1062 | */ | 1064 | */ |
1063 | #define RCU_INIT_POINTER(p, v) \ | 1065 | #define RCU_INIT_POINTER(p, v) \ |
1064 | do { \ | 1066 | do { \ |
1067 | rcu_dereference_sparse(p, __rcu); \ | ||
1065 | p = RCU_INITIALIZER(v); \ | 1068 | p = RCU_INITIALIZER(v); \ |
1066 | } while (0) | 1069 | } while (0) |
1067 | 1070 | ||
@@ -1118,7 +1121,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
1118 | __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) | 1121 | __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) |
1119 | 1122 | ||
1120 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) | 1123 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) |
1121 | static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) | 1124 | static inline int rcu_needs_cpu(unsigned long *delta_jiffies) |
1122 | { | 1125 | { |
1123 | *delta_jiffies = ULONG_MAX; | 1126 | *delta_jiffies = ULONG_MAX; |
1124 | return 0; | 1127 | return 0; |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 38cc5b1e252d..0e5366200154 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -78,7 +78,7 @@ static inline void kfree_call_rcu(struct rcu_head *head, | |||
78 | call_rcu(head, func); | 78 | call_rcu(head, func); |
79 | } | 79 | } |
80 | 80 | ||
81 | static inline void rcu_note_context_switch(int cpu) | 81 | static inline void rcu_note_context_switch(void) |
82 | { | 82 | { |
83 | rcu_sched_qs(); | 83 | rcu_sched_qs(); |
84 | } | 84 | } |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 3e2f5d432743..52953790dcca 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -30,9 +30,9 @@ | |||
30 | #ifndef __LINUX_RCUTREE_H | 30 | #ifndef __LINUX_RCUTREE_H |
31 | #define __LINUX_RCUTREE_H | 31 | #define __LINUX_RCUTREE_H |
32 | 32 | ||
33 | void rcu_note_context_switch(int cpu); | 33 | void rcu_note_context_switch(void); |
34 | #ifndef CONFIG_RCU_NOCB_CPU_ALL | 34 | #ifndef CONFIG_RCU_NOCB_CPU_ALL |
35 | int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); | 35 | int rcu_needs_cpu(unsigned long *delta_jiffies); |
36 | #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ | 36 | #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ |
37 | void rcu_cpu_stall_reset(void); | 37 | void rcu_cpu_stall_reset(void); |
38 | 38 | ||
@@ -43,7 +43,7 @@ void rcu_cpu_stall_reset(void); | |||
43 | */ | 43 | */ |
44 | static inline void rcu_virt_note_context_switch(int cpu) | 44 | static inline void rcu_virt_note_context_switch(int cpu) |
45 | { | 45 | { |
46 | rcu_note_context_switch(cpu); | 46 | rcu_note_context_switch(); |
47 | } | 47 | } |
48 | 48 | ||
49 | void synchronize_rcu_bh(void); | 49 | void synchronize_rcu_bh(void); |
diff --git a/include/linux/regmap.h b/include/linux/regmap.h index c5ed83f49c4e..4419b99d8d6e 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h | |||
@@ -27,6 +27,7 @@ struct spmi_device; | |||
27 | struct regmap; | 27 | struct regmap; |
28 | struct regmap_range_cfg; | 28 | struct regmap_range_cfg; |
29 | struct regmap_field; | 29 | struct regmap_field; |
30 | struct snd_ac97; | ||
30 | 31 | ||
31 | /* An enum of all the supported cache types */ | 32 | /* An enum of all the supported cache types */ |
32 | enum regcache_type { | 33 | enum regcache_type { |
@@ -340,6 +341,8 @@ struct regmap *regmap_init_spmi_ext(struct spmi_device *dev, | |||
340 | struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id, | 341 | struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id, |
341 | void __iomem *regs, | 342 | void __iomem *regs, |
342 | const struct regmap_config *config); | 343 | const struct regmap_config *config); |
344 | struct regmap *regmap_init_ac97(struct snd_ac97 *ac97, | ||
345 | const struct regmap_config *config); | ||
343 | 346 | ||
344 | struct regmap *devm_regmap_init(struct device *dev, | 347 | struct regmap *devm_regmap_init(struct device *dev, |
345 | const struct regmap_bus *bus, | 348 | const struct regmap_bus *bus, |
@@ -356,6 +359,10 @@ struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *dev, | |||
356 | struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id, | 359 | struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id, |
357 | void __iomem *regs, | 360 | void __iomem *regs, |
358 | const struct regmap_config *config); | 361 | const struct regmap_config *config); |
362 | struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97, | ||
363 | const struct regmap_config *config); | ||
364 | |||
365 | bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); | ||
359 | 366 | ||
360 | /** | 367 | /** |
361 | * regmap_init_mmio(): Initialise register map | 368 | * regmap_init_mmio(): Initialise register map |
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index f540b1496e2f..d17e1ff7ad01 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h | |||
@@ -101,6 +101,8 @@ struct regmap; | |||
101 | * Data passed is "struct pre_voltage_change_data" | 101 | * Data passed is "struct pre_voltage_change_data" |
102 | * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason. | 102 | * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason. |
103 | * Data passed is old voltage cast to (void *). | 103 | * Data passed is old voltage cast to (void *). |
104 | * PRE_DISABLE Regulator is about to be disabled | ||
105 | * ABORT_DISABLE Regulator disable failed for some reason | ||
104 | * | 106 | * |
105 | * NOTE: These events can be OR'ed together when passed into handler. | 107 | * NOTE: These events can be OR'ed together when passed into handler. |
106 | */ | 108 | */ |
@@ -115,6 +117,8 @@ struct regmap; | |||
115 | #define REGULATOR_EVENT_DISABLE 0x80 | 117 | #define REGULATOR_EVENT_DISABLE 0x80 |
116 | #define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100 | 118 | #define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100 |
117 | #define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200 | 119 | #define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200 |
120 | #define REGULATOR_EVENT_PRE_DISABLE 0x400 | ||
121 | #define REGULATOR_EVENT_ABORT_DISABLE 0x800 | ||
118 | 122 | ||
119 | /** | 123 | /** |
120 | * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event | 124 | * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event |
@@ -284,7 +288,7 @@ devm_regulator_get(struct device *dev, const char *id) | |||
284 | static inline struct regulator *__must_check | 288 | static inline struct regulator *__must_check |
285 | regulator_get_exclusive(struct device *dev, const char *id) | 289 | regulator_get_exclusive(struct device *dev, const char *id) |
286 | { | 290 | { |
287 | return NULL; | 291 | return ERR_PTR(-ENODEV); |
288 | } | 292 | } |
289 | 293 | ||
290 | static inline struct regulator *__must_check | 294 | static inline struct regulator *__must_check |
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index fc0ee0ce8325..5f1e9ca47417 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h | |||
@@ -243,6 +243,8 @@ enum regulator_type { | |||
243 | * | 243 | * |
244 | * @enable_time: Time taken for initial enable of regulator (in uS). | 244 | * @enable_time: Time taken for initial enable of regulator (in uS). |
245 | * @off_on_delay: guard time (in uS), before re-enabling a regulator | 245 | * @off_on_delay: guard time (in uS), before re-enabling a regulator |
246 | * | ||
247 | * @of_map_mode: Maps a hardware mode defined in a DeviceTree to a standard mode | ||
246 | */ | 248 | */ |
247 | struct regulator_desc { | 249 | struct regulator_desc { |
248 | const char *name; | 250 | const char *name; |
@@ -285,6 +287,8 @@ struct regulator_desc { | |||
285 | unsigned int enable_time; | 287 | unsigned int enable_time; |
286 | 288 | ||
287 | unsigned int off_on_delay; | 289 | unsigned int off_on_delay; |
290 | |||
291 | unsigned int (*of_map_mode)(unsigned int mode); | ||
288 | }; | 292 | }; |
289 | 293 | ||
290 | /** | 294 | /** |
@@ -301,6 +305,9 @@ struct regulator_desc { | |||
301 | * NULL). | 305 | * NULL). |
302 | * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is | 306 | * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is |
303 | * insufficient. | 307 | * insufficient. |
308 | * @ena_gpio_initialized: GPIO controlling regulator enable was properly | ||
309 | * initialized, meaning that >= 0 is a valid gpio | ||
310 | * identifier and < 0 is a non existent gpio. | ||
304 | * @ena_gpio: GPIO controlling regulator enable. | 311 | * @ena_gpio: GPIO controlling regulator enable. |
305 | * @ena_gpio_invert: Sense for GPIO enable control. | 312 | * @ena_gpio_invert: Sense for GPIO enable control. |
306 | * @ena_gpio_flags: Flags to use when calling gpio_request_one() | 313 | * @ena_gpio_flags: Flags to use when calling gpio_request_one() |
@@ -312,6 +319,7 @@ struct regulator_config { | |||
312 | struct device_node *of_node; | 319 | struct device_node *of_node; |
313 | struct regmap *regmap; | 320 | struct regmap *regmap; |
314 | 321 | ||
322 | bool ena_gpio_initialized; | ||
315 | int ena_gpio; | 323 | int ena_gpio; |
316 | unsigned int ena_gpio_invert:1; | 324 | unsigned int ena_gpio_invert:1; |
317 | unsigned int ena_gpio_flags; | 325 | unsigned int ena_gpio_flags; |
diff --git a/include/linux/regulator/of_regulator.h b/include/linux/regulator/of_regulator.h index f9217965aaa3..763953f7e3b8 100644 --- a/include/linux/regulator/of_regulator.h +++ b/include/linux/regulator/of_regulator.h | |||
@@ -6,24 +6,29 @@ | |||
6 | #ifndef __LINUX_OF_REG_H | 6 | #ifndef __LINUX_OF_REG_H |
7 | #define __LINUX_OF_REG_H | 7 | #define __LINUX_OF_REG_H |
8 | 8 | ||
9 | struct regulator_desc; | ||
10 | |||
9 | struct of_regulator_match { | 11 | struct of_regulator_match { |
10 | const char *name; | 12 | const char *name; |
11 | void *driver_data; | 13 | void *driver_data; |
12 | struct regulator_init_data *init_data; | 14 | struct regulator_init_data *init_data; |
13 | struct device_node *of_node; | 15 | struct device_node *of_node; |
16 | const struct regulator_desc *desc; | ||
14 | }; | 17 | }; |
15 | 18 | ||
16 | #if defined(CONFIG_OF) | 19 | #if defined(CONFIG_OF) |
17 | extern struct regulator_init_data | 20 | extern struct regulator_init_data |
18 | *of_get_regulator_init_data(struct device *dev, | 21 | *of_get_regulator_init_data(struct device *dev, |
19 | struct device_node *node); | 22 | struct device_node *node, |
23 | const struct regulator_desc *desc); | ||
20 | extern int of_regulator_match(struct device *dev, struct device_node *node, | 24 | extern int of_regulator_match(struct device *dev, struct device_node *node, |
21 | struct of_regulator_match *matches, | 25 | struct of_regulator_match *matches, |
22 | unsigned int num_matches); | 26 | unsigned int num_matches); |
23 | #else | 27 | #else |
24 | static inline struct regulator_init_data | 28 | static inline struct regulator_init_data |
25 | *of_get_regulator_init_data(struct device *dev, | 29 | *of_get_regulator_init_data(struct device *dev, |
26 | struct device_node *node) | 30 | struct device_node *node, |
31 | const struct regulator_desc *desc) | ||
27 | { | 32 | { |
28 | return NULL; | 33 | return NULL; |
29 | } | 34 | } |
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h deleted file mode 100644 index 56b7bc32db4f..000000000000 --- a/include/linux/res_counter.h +++ /dev/null | |||
@@ -1,223 +0,0 @@ | |||
1 | #ifndef __RES_COUNTER_H__ | ||
2 | #define __RES_COUNTER_H__ | ||
3 | |||
4 | /* | ||
5 | * Resource Counters | ||
6 | * Contain common data types and routines for resource accounting | ||
7 | * | ||
8 | * Copyright 2007 OpenVZ SWsoft Inc | ||
9 | * | ||
10 | * Author: Pavel Emelianov <xemul@openvz.org> | ||
11 | * | ||
12 | * See Documentation/cgroups/resource_counter.txt for more | ||
13 | * info about what this counter is. | ||
14 | */ | ||
15 | |||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/errno.h> | ||
18 | |||
19 | /* | ||
20 | * The core object. the cgroup that wishes to account for some | ||
21 | * resource may include this counter into its structures and use | ||
22 | * the helpers described beyond | ||
23 | */ | ||
24 | |||
25 | struct res_counter { | ||
26 | /* | ||
27 | * the current resource consumption level | ||
28 | */ | ||
29 | unsigned long long usage; | ||
30 | /* | ||
31 | * the maximal value of the usage from the counter creation | ||
32 | */ | ||
33 | unsigned long long max_usage; | ||
34 | /* | ||
35 | * the limit that usage cannot exceed | ||
36 | */ | ||
37 | unsigned long long limit; | ||
38 | /* | ||
39 | * the limit that usage can be exceed | ||
40 | */ | ||
41 | unsigned long long soft_limit; | ||
42 | /* | ||
43 | * the number of unsuccessful attempts to consume the resource | ||
44 | */ | ||
45 | unsigned long long failcnt; | ||
46 | /* | ||
47 | * the lock to protect all of the above. | ||
48 | * the routines below consider this to be IRQ-safe | ||
49 | */ | ||
50 | spinlock_t lock; | ||
51 | /* | ||
52 | * Parent counter, used for hierarchial resource accounting | ||
53 | */ | ||
54 | struct res_counter *parent; | ||
55 | }; | ||
56 | |||
57 | #define RES_COUNTER_MAX ULLONG_MAX | ||
58 | |||
59 | /** | ||
60 | * Helpers to interact with userspace | ||
61 | * res_counter_read_u64() - returns the value of the specified member. | ||
62 | * res_counter_read/_write - put/get the specified fields from the | ||
63 | * res_counter struct to/from the user | ||
64 | * | ||
65 | * @counter: the counter in question | ||
66 | * @member: the field to work with (see RES_xxx below) | ||
67 | * @buf: the buffer to opeate on,... | ||
68 | * @nbytes: its size... | ||
69 | * @pos: and the offset. | ||
70 | */ | ||
71 | |||
72 | u64 res_counter_read_u64(struct res_counter *counter, int member); | ||
73 | |||
74 | ssize_t res_counter_read(struct res_counter *counter, int member, | ||
75 | const char __user *buf, size_t nbytes, loff_t *pos, | ||
76 | int (*read_strategy)(unsigned long long val, char *s)); | ||
77 | |||
78 | int res_counter_memparse_write_strategy(const char *buf, | ||
79 | unsigned long long *res); | ||
80 | |||
81 | /* | ||
82 | * the field descriptors. one for each member of res_counter | ||
83 | */ | ||
84 | |||
85 | enum { | ||
86 | RES_USAGE, | ||
87 | RES_MAX_USAGE, | ||
88 | RES_LIMIT, | ||
89 | RES_FAILCNT, | ||
90 | RES_SOFT_LIMIT, | ||
91 | }; | ||
92 | |||
93 | /* | ||
94 | * helpers for accounting | ||
95 | */ | ||
96 | |||
97 | void res_counter_init(struct res_counter *counter, struct res_counter *parent); | ||
98 | |||
99 | /* | ||
100 | * charge - try to consume more resource. | ||
101 | * | ||
102 | * @counter: the counter | ||
103 | * @val: the amount of the resource. each controller defines its own | ||
104 | * units, e.g. numbers, bytes, Kbytes, etc | ||
105 | * | ||
106 | * returns 0 on success and <0 if the counter->usage will exceed the | ||
107 | * counter->limit | ||
108 | * | ||
109 | * charge_nofail works the same, except that it charges the resource | ||
110 | * counter unconditionally, and returns < 0 if the after the current | ||
111 | * charge we are over limit. | ||
112 | */ | ||
113 | |||
114 | int __must_check res_counter_charge(struct res_counter *counter, | ||
115 | unsigned long val, struct res_counter **limit_fail_at); | ||
116 | int res_counter_charge_nofail(struct res_counter *counter, | ||
117 | unsigned long val, struct res_counter **limit_fail_at); | ||
118 | |||
119 | /* | ||
120 | * uncharge - tell that some portion of the resource is released | ||
121 | * | ||
122 | * @counter: the counter | ||
123 | * @val: the amount of the resource | ||
124 | * | ||
125 | * these calls check for usage underflow and show a warning on the console | ||
126 | * | ||
127 | * returns the total charges still present in @counter. | ||
128 | */ | ||
129 | |||
130 | u64 res_counter_uncharge(struct res_counter *counter, unsigned long val); | ||
131 | |||
132 | u64 res_counter_uncharge_until(struct res_counter *counter, | ||
133 | struct res_counter *top, | ||
134 | unsigned long val); | ||
135 | /** | ||
136 | * res_counter_margin - calculate chargeable space of a counter | ||
137 | * @cnt: the counter | ||
138 | * | ||
139 | * Returns the difference between the hard limit and the current usage | ||
140 | * of resource counter @cnt. | ||
141 | */ | ||
142 | static inline unsigned long long res_counter_margin(struct res_counter *cnt) | ||
143 | { | ||
144 | unsigned long long margin; | ||
145 | unsigned long flags; | ||
146 | |||
147 | spin_lock_irqsave(&cnt->lock, flags); | ||
148 | if (cnt->limit > cnt->usage) | ||
149 | margin = cnt->limit - cnt->usage; | ||
150 | else | ||
151 | margin = 0; | ||
152 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
153 | return margin; | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * Get the difference between the usage and the soft limit | ||
158 | * @cnt: The counter | ||
159 | * | ||
160 | * Returns 0 if usage is less than or equal to soft limit | ||
161 | * The difference between usage and soft limit, otherwise. | ||
162 | */ | ||
163 | static inline unsigned long long | ||
164 | res_counter_soft_limit_excess(struct res_counter *cnt) | ||
165 | { | ||
166 | unsigned long long excess; | ||
167 | unsigned long flags; | ||
168 | |||
169 | spin_lock_irqsave(&cnt->lock, flags); | ||
170 | if (cnt->usage <= cnt->soft_limit) | ||
171 | excess = 0; | ||
172 | else | ||
173 | excess = cnt->usage - cnt->soft_limit; | ||
174 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
175 | return excess; | ||
176 | } | ||
177 | |||
178 | static inline void res_counter_reset_max(struct res_counter *cnt) | ||
179 | { | ||
180 | unsigned long flags; | ||
181 | |||
182 | spin_lock_irqsave(&cnt->lock, flags); | ||
183 | cnt->max_usage = cnt->usage; | ||
184 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
185 | } | ||
186 | |||
187 | static inline void res_counter_reset_failcnt(struct res_counter *cnt) | ||
188 | { | ||
189 | unsigned long flags; | ||
190 | |||
191 | spin_lock_irqsave(&cnt->lock, flags); | ||
192 | cnt->failcnt = 0; | ||
193 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
194 | } | ||
195 | |||
196 | static inline int res_counter_set_limit(struct res_counter *cnt, | ||
197 | unsigned long long limit) | ||
198 | { | ||
199 | unsigned long flags; | ||
200 | int ret = -EBUSY; | ||
201 | |||
202 | spin_lock_irqsave(&cnt->lock, flags); | ||
203 | if (cnt->usage <= limit) { | ||
204 | cnt->limit = limit; | ||
205 | ret = 0; | ||
206 | } | ||
207 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
208 | return ret; | ||
209 | } | ||
210 | |||
211 | static inline int | ||
212 | res_counter_set_soft_limit(struct res_counter *cnt, | ||
213 | unsigned long long soft_limit) | ||
214 | { | ||
215 | unsigned long flags; | ||
216 | |||
217 | spin_lock_irqsave(&cnt->lock, flags); | ||
218 | cnt->soft_limit = soft_limit; | ||
219 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | #endif | ||
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h index 41a4695fde08..ce6b962ffed4 100644 --- a/include/linux/reset-controller.h +++ b/include/linux/reset-controller.h | |||
@@ -12,11 +12,13 @@ struct reset_controller_dev; | |||
12 | * things to reset the device | 12 | * things to reset the device |
13 | * @assert: manually assert the reset line, if supported | 13 | * @assert: manually assert the reset line, if supported |
14 | * @deassert: manually deassert the reset line, if supported | 14 | * @deassert: manually deassert the reset line, if supported |
15 | * @status: return the status of the reset line, if supported | ||
15 | */ | 16 | */ |
16 | struct reset_control_ops { | 17 | struct reset_control_ops { |
17 | int (*reset)(struct reset_controller_dev *rcdev, unsigned long id); | 18 | int (*reset)(struct reset_controller_dev *rcdev, unsigned long id); |
18 | int (*assert)(struct reset_controller_dev *rcdev, unsigned long id); | 19 | int (*assert)(struct reset_controller_dev *rcdev, unsigned long id); |
19 | int (*deassert)(struct reset_controller_dev *rcdev, unsigned long id); | 20 | int (*deassert)(struct reset_controller_dev *rcdev, unsigned long id); |
21 | int (*status)(struct reset_controller_dev *rcdev, unsigned long id); | ||
20 | }; | 22 | }; |
21 | 23 | ||
22 | struct module; | 24 | struct module; |
diff --git a/include/linux/reset.h b/include/linux/reset.h index 349f150ae12c..da5602bd77d7 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h | |||
@@ -10,6 +10,7 @@ struct reset_control; | |||
10 | int reset_control_reset(struct reset_control *rstc); | 10 | int reset_control_reset(struct reset_control *rstc); |
11 | int reset_control_assert(struct reset_control *rstc); | 11 | int reset_control_assert(struct reset_control *rstc); |
12 | int reset_control_deassert(struct reset_control *rstc); | 12 | int reset_control_deassert(struct reset_control *rstc); |
13 | int reset_control_status(struct reset_control *rstc); | ||
13 | 14 | ||
14 | struct reset_control *reset_control_get(struct device *dev, const char *id); | 15 | struct reset_control *reset_control_get(struct device *dev, const char *id); |
15 | void reset_control_put(struct reset_control *rstc); | 16 | void reset_control_put(struct reset_control *rstc); |
@@ -57,6 +58,12 @@ static inline int reset_control_deassert(struct reset_control *rstc) | |||
57 | return 0; | 58 | return 0; |
58 | } | 59 | } |
59 | 60 | ||
61 | static inline int reset_control_status(struct reset_control *rstc) | ||
62 | { | ||
63 | WARN_ON(1); | ||
64 | return 0; | ||
65 | } | ||
66 | |||
60 | static inline void reset_control_put(struct reset_control *rstc) | 67 | static inline void reset_control_put(struct reset_control *rstc) |
61 | { | 68 | { |
62 | WARN_ON(1); | 69 | WARN_ON(1); |
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index fb298e9d6d3a..b93fd89b2e5e 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h | |||
@@ -65,7 +65,10 @@ struct rhashtable_params { | |||
65 | size_t new_size); | 65 | size_t new_size); |
66 | bool (*shrink_decision)(const struct rhashtable *ht, | 66 | bool (*shrink_decision)(const struct rhashtable *ht, |
67 | size_t new_size); | 67 | size_t new_size); |
68 | int (*mutex_is_held)(void); | 68 | #ifdef CONFIG_PROVE_LOCKING |
69 | int (*mutex_is_held)(void *parent); | ||
70 | void *parent; | ||
71 | #endif | ||
69 | }; | 72 | }; |
70 | 73 | ||
71 | /** | 74 | /** |
@@ -96,16 +99,16 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); | |||
96 | u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len); | 99 | u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len); |
97 | u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); | 100 | u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); |
98 | 101 | ||
99 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t); | 102 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); |
100 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t); | 103 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); |
101 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, | 104 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, |
102 | struct rhash_head __rcu **pprev, gfp_t flags); | 105 | struct rhash_head __rcu **pprev); |
103 | 106 | ||
104 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); | 107 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); |
105 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); | 108 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); |
106 | 109 | ||
107 | int rhashtable_expand(struct rhashtable *ht, gfp_t flags); | 110 | int rhashtable_expand(struct rhashtable *ht); |
108 | int rhashtable_shrink(struct rhashtable *ht, gfp_t flags); | 111 | int rhashtable_shrink(struct rhashtable *ht); |
109 | 112 | ||
110 | void *rhashtable_lookup(const struct rhashtable *ht, const void *key); | 113 | void *rhashtable_lookup(const struct rhashtable *ht, const void *key); |
111 | void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, | 114 | void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, |
diff --git a/include/linux/rtc.h b/include/linux/rtc.h index c2c28975293c..6d6be09a2fe5 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h | |||
@@ -19,11 +19,28 @@ | |||
19 | extern int rtc_month_days(unsigned int month, unsigned int year); | 19 | extern int rtc_month_days(unsigned int month, unsigned int year); |
20 | extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year); | 20 | extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year); |
21 | extern int rtc_valid_tm(struct rtc_time *tm); | 21 | extern int rtc_valid_tm(struct rtc_time *tm); |
22 | extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time); | 22 | extern time64_t rtc_tm_to_time64(struct rtc_time *tm); |
23 | extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); | 23 | extern void rtc_time64_to_tm(time64_t time, struct rtc_time *tm); |
24 | ktime_t rtc_tm_to_ktime(struct rtc_time tm); | 24 | ktime_t rtc_tm_to_ktime(struct rtc_time tm); |
25 | struct rtc_time rtc_ktime_to_tm(ktime_t kt); | 25 | struct rtc_time rtc_ktime_to_tm(ktime_t kt); |
26 | 26 | ||
27 | /** | ||
28 | * Deprecated. Use rtc_time64_to_tm(). | ||
29 | */ | ||
30 | static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) | ||
31 | { | ||
32 | rtc_time64_to_tm(time, tm); | ||
33 | } | ||
34 | |||
35 | /** | ||
36 | * Deprecated. Use rtc_tm_to_time64(). | ||
37 | */ | ||
38 | static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time) | ||
39 | { | ||
40 | *time = rtc_tm_to_time64(tm); | ||
41 | |||
42 | return 0; | ||
43 | } | ||
27 | 44 | ||
28 | #include <linux/device.h> | 45 | #include <linux/device.h> |
29 | #include <linux/seq_file.h> | 46 | #include <linux/seq_file.h> |
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 6cacbce1a06c..5db76a32fcab 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
@@ -17,6 +17,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, | |||
17 | u32 id, long expires, u32 error); | 17 | u32 id, long expires, u32 error); |
18 | 18 | ||
19 | void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); | 19 | void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); |
20 | struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, | ||
21 | unsigned change, gfp_t flags); | ||
22 | void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, | ||
23 | gfp_t flags); | ||
24 | |||
20 | 25 | ||
21 | /* RTNL is used as a global lock for all changes to network configuration */ | 26 | /* RTNL is used as a global lock for all changes to network configuration */ |
22 | extern void rtnl_lock(void); | 27 | extern void rtnl_lock(void); |
@@ -94,12 +99,15 @@ extern int ndo_dflt_fdb_add(struct ndmsg *ndm, | |||
94 | struct nlattr *tb[], | 99 | struct nlattr *tb[], |
95 | struct net_device *dev, | 100 | struct net_device *dev, |
96 | const unsigned char *addr, | 101 | const unsigned char *addr, |
97 | u16 flags); | 102 | u16 vid, |
103 | u16 flags); | ||
98 | extern int ndo_dflt_fdb_del(struct ndmsg *ndm, | 104 | extern int ndo_dflt_fdb_del(struct ndmsg *ndm, |
99 | struct nlattr *tb[], | 105 | struct nlattr *tb[], |
100 | struct net_device *dev, | 106 | struct net_device *dev, |
101 | const unsigned char *addr); | 107 | const unsigned char *addr, |
108 | u16 vid); | ||
102 | 109 | ||
103 | extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 110 | extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
104 | struct net_device *dev, u16 mode); | 111 | struct net_device *dev, u16 mode, |
112 | u32 flags, u32 mask); | ||
105 | #endif /* __LINUX_RTNETLINK_H */ | 113 | #endif /* __LINUX_RTNETLINK_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 5e344bbe63ec..8db31ef98d2f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -243,6 +243,43 @@ extern char ___assert_task_state[1 - 2*!!( | |||
243 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ | 243 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
244 | (task->flags & PF_FROZEN) == 0) | 244 | (task->flags & PF_FROZEN) == 0) |
245 | 245 | ||
246 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP | ||
247 | |||
248 | #define __set_task_state(tsk, state_value) \ | ||
249 | do { \ | ||
250 | (tsk)->task_state_change = _THIS_IP_; \ | ||
251 | (tsk)->state = (state_value); \ | ||
252 | } while (0) | ||
253 | #define set_task_state(tsk, state_value) \ | ||
254 | do { \ | ||
255 | (tsk)->task_state_change = _THIS_IP_; \ | ||
256 | set_mb((tsk)->state, (state_value)); \ | ||
257 | } while (0) | ||
258 | |||
259 | /* | ||
260 | * set_current_state() includes a barrier so that the write of current->state | ||
261 | * is correctly serialised wrt the caller's subsequent test of whether to | ||
262 | * actually sleep: | ||
263 | * | ||
264 | * set_current_state(TASK_UNINTERRUPTIBLE); | ||
265 | * if (do_i_need_to_sleep()) | ||
266 | * schedule(); | ||
267 | * | ||
268 | * If the caller does not need such serialisation then use __set_current_state() | ||
269 | */ | ||
270 | #define __set_current_state(state_value) \ | ||
271 | do { \ | ||
272 | current->task_state_change = _THIS_IP_; \ | ||
273 | current->state = (state_value); \ | ||
274 | } while (0) | ||
275 | #define set_current_state(state_value) \ | ||
276 | do { \ | ||
277 | current->task_state_change = _THIS_IP_; \ | ||
278 | set_mb(current->state, (state_value)); \ | ||
279 | } while (0) | ||
280 | |||
281 | #else | ||
282 | |||
246 | #define __set_task_state(tsk, state_value) \ | 283 | #define __set_task_state(tsk, state_value) \ |
247 | do { (tsk)->state = (state_value); } while (0) | 284 | do { (tsk)->state = (state_value); } while (0) |
248 | #define set_task_state(tsk, state_value) \ | 285 | #define set_task_state(tsk, state_value) \ |
@@ -259,11 +296,13 @@ extern char ___assert_task_state[1 - 2*!!( | |||
259 | * | 296 | * |
260 | * If the caller does not need such serialisation then use __set_current_state() | 297 | * If the caller does not need such serialisation then use __set_current_state() |
261 | */ | 298 | */ |
262 | #define __set_current_state(state_value) \ | 299 | #define __set_current_state(state_value) \ |
263 | do { current->state = (state_value); } while (0) | 300 | do { current->state = (state_value); } while (0) |
264 | #define set_current_state(state_value) \ | 301 | #define set_current_state(state_value) \ |
265 | set_mb(current->state, (state_value)) | 302 | set_mb(current->state, (state_value)) |
266 | 303 | ||
304 | #endif | ||
305 | |||
267 | /* Task command name length */ | 306 | /* Task command name length */ |
268 | #define TASK_COMM_LEN 16 | 307 | #define TASK_COMM_LEN 16 |
269 | 308 | ||
@@ -1278,9 +1317,9 @@ struct task_struct { | |||
1278 | union rcu_special rcu_read_unlock_special; | 1317 | union rcu_special rcu_read_unlock_special; |
1279 | struct list_head rcu_node_entry; | 1318 | struct list_head rcu_node_entry; |
1280 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | 1319 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
1281 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1320 | #ifdef CONFIG_PREEMPT_RCU |
1282 | struct rcu_node *rcu_blocked_node; | 1321 | struct rcu_node *rcu_blocked_node; |
1283 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1322 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
1284 | #ifdef CONFIG_TASKS_RCU | 1323 | #ifdef CONFIG_TASKS_RCU |
1285 | unsigned long rcu_tasks_nvcsw; | 1324 | unsigned long rcu_tasks_nvcsw; |
1286 | bool rcu_tasks_holdout; | 1325 | bool rcu_tasks_holdout; |
@@ -1325,6 +1364,10 @@ struct task_struct { | |||
1325 | unsigned sched_reset_on_fork:1; | 1364 | unsigned sched_reset_on_fork:1; |
1326 | unsigned sched_contributes_to_load:1; | 1365 | unsigned sched_contributes_to_load:1; |
1327 | 1366 | ||
1367 | #ifdef CONFIG_MEMCG_KMEM | ||
1368 | unsigned memcg_kmem_skip_account:1; | ||
1369 | #endif | ||
1370 | |||
1328 | unsigned long atomic_flags; /* Flags needing atomic access. */ | 1371 | unsigned long atomic_flags; /* Flags needing atomic access. */ |
1329 | 1372 | ||
1330 | pid_t pid; | 1373 | pid_t pid; |
@@ -1558,28 +1601,23 @@ struct task_struct { | |||
1558 | struct numa_group *numa_group; | 1601 | struct numa_group *numa_group; |
1559 | 1602 | ||
1560 | /* | 1603 | /* |
1561 | * Exponential decaying average of faults on a per-node basis. | 1604 | * numa_faults is an array split into four regions: |
1562 | * Scheduling placement decisions are made based on the these counts. | 1605 | * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer |
1563 | * The values remain static for the duration of a PTE scan | 1606 | * in this precise order. |
1607 | * | ||
1608 | * faults_memory: Exponential decaying average of faults on a per-node | ||
1609 | * basis. Scheduling placement decisions are made based on these | ||
1610 | * counts. The values remain static for the duration of a PTE scan. | ||
1611 | * faults_cpu: Track the nodes the process was running on when a NUMA | ||
1612 | * hinting fault was incurred. | ||
1613 | * faults_memory_buffer and faults_cpu_buffer: Record faults per node | ||
1614 | * during the current scan window. When the scan completes, the counts | ||
1615 | * in faults_memory and faults_cpu decay and these values are copied. | ||
1564 | */ | 1616 | */ |
1565 | unsigned long *numa_faults_memory; | 1617 | unsigned long *numa_faults; |
1566 | unsigned long total_numa_faults; | 1618 | unsigned long total_numa_faults; |
1567 | 1619 | ||
1568 | /* | 1620 | /* |
1569 | * numa_faults_buffer records faults per node during the current | ||
1570 | * scan window. When the scan completes, the counts in | ||
1571 | * numa_faults_memory decay and these values are copied. | ||
1572 | */ | ||
1573 | unsigned long *numa_faults_buffer_memory; | ||
1574 | |||
1575 | /* | ||
1576 | * Track the nodes the process was running on when a NUMA hinting | ||
1577 | * fault was incurred. | ||
1578 | */ | ||
1579 | unsigned long *numa_faults_cpu; | ||
1580 | unsigned long *numa_faults_buffer_cpu; | ||
1581 | |||
1582 | /* | ||
1583 | * numa_faults_locality tracks if faults recorded during the last | 1621 | * numa_faults_locality tracks if faults recorded during the last |
1584 | * scan window were remote/local. The task scan period is adapted | 1622 | * scan window were remote/local. The task scan period is adapted |
1585 | * based on the locality of the faults with different weights | 1623 | * based on the locality of the faults with different weights |
@@ -1645,8 +1683,7 @@ struct task_struct { | |||
1645 | /* bitmask and counter of trace recursion */ | 1683 | /* bitmask and counter of trace recursion */ |
1646 | unsigned long trace_recursion; | 1684 | unsigned long trace_recursion; |
1647 | #endif /* CONFIG_TRACING */ | 1685 | #endif /* CONFIG_TRACING */ |
1648 | #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ | 1686 | #ifdef CONFIG_MEMCG |
1649 | unsigned int memcg_kmem_skip_account; | ||
1650 | struct memcg_oom_info { | 1687 | struct memcg_oom_info { |
1651 | struct mem_cgroup *memcg; | 1688 | struct mem_cgroup *memcg; |
1652 | gfp_t gfp_mask; | 1689 | gfp_t gfp_mask; |
@@ -1661,6 +1698,9 @@ struct task_struct { | |||
1661 | unsigned int sequential_io; | 1698 | unsigned int sequential_io; |
1662 | unsigned int sequential_io_avg; | 1699 | unsigned int sequential_io_avg; |
1663 | #endif | 1700 | #endif |
1701 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP | ||
1702 | unsigned long task_state_change; | ||
1703 | #endif | ||
1664 | }; | 1704 | }; |
1665 | 1705 | ||
1666 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1706 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
@@ -2052,6 +2092,10 @@ static inline void tsk_restore_flags(struct task_struct *task, | |||
2052 | task->flags |= orig_flags & flags; | 2092 | task->flags |= orig_flags & flags; |
2053 | } | 2093 | } |
2054 | 2094 | ||
2095 | extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, | ||
2096 | const struct cpumask *trial); | ||
2097 | extern int task_can_attach(struct task_struct *p, | ||
2098 | const struct cpumask *cs_cpus_allowed); | ||
2055 | #ifdef CONFIG_SMP | 2099 | #ifdef CONFIG_SMP |
2056 | extern void do_set_cpus_allowed(struct task_struct *p, | 2100 | extern void do_set_cpus_allowed(struct task_struct *p, |
2057 | const struct cpumask *new_mask); | 2101 | const struct cpumask *new_mask); |
@@ -2441,6 +2485,10 @@ extern void do_group_exit(int); | |||
2441 | extern int do_execve(struct filename *, | 2485 | extern int do_execve(struct filename *, |
2442 | const char __user * const __user *, | 2486 | const char __user * const __user *, |
2443 | const char __user * const __user *); | 2487 | const char __user * const __user *); |
2488 | extern int do_execveat(int, struct filename *, | ||
2489 | const char __user * const __user *, | ||
2490 | const char __user * const __user *, | ||
2491 | int); | ||
2444 | extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); | 2492 | extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); |
2445 | struct task_struct *fork_idle(int); | 2493 | struct task_struct *fork_idle(int); |
2446 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | 2494 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); |
@@ -2760,7 +2808,7 @@ static inline int signal_pending_state(long state, struct task_struct *p) | |||
2760 | extern int _cond_resched(void); | 2808 | extern int _cond_resched(void); |
2761 | 2809 | ||
2762 | #define cond_resched() ({ \ | 2810 | #define cond_resched() ({ \ |
2763 | __might_sleep(__FILE__, __LINE__, 0); \ | 2811 | ___might_sleep(__FILE__, __LINE__, 0); \ |
2764 | _cond_resched(); \ | 2812 | _cond_resched(); \ |
2765 | }) | 2813 | }) |
2766 | 2814 | ||
@@ -2773,14 +2821,14 @@ extern int __cond_resched_lock(spinlock_t *lock); | |||
2773 | #endif | 2821 | #endif |
2774 | 2822 | ||
2775 | #define cond_resched_lock(lock) ({ \ | 2823 | #define cond_resched_lock(lock) ({ \ |
2776 | __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ | 2824 | ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ |
2777 | __cond_resched_lock(lock); \ | 2825 | __cond_resched_lock(lock); \ |
2778 | }) | 2826 | }) |
2779 | 2827 | ||
2780 | extern int __cond_resched_softirq(void); | 2828 | extern int __cond_resched_softirq(void); |
2781 | 2829 | ||
2782 | #define cond_resched_softirq() ({ \ | 2830 | #define cond_resched_softirq() ({ \ |
2783 | __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ | 2831 | ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ |
2784 | __cond_resched_softirq(); \ | 2832 | __cond_resched_softirq(); \ |
2785 | }) | 2833 | }) |
2786 | 2834 | ||
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h new file mode 100644 index 000000000000..9aafe0e24c68 --- /dev/null +++ b/include/linux/seq_buf.h | |||
@@ -0,0 +1,136 @@ | |||
1 | #ifndef _LINUX_SEQ_BUF_H | ||
2 | #define _LINUX_SEQ_BUF_H | ||
3 | |||
4 | #include <linux/fs.h> | ||
5 | |||
6 | /* | ||
7 | * Trace sequences are used to allow a function to call several other functions | ||
8 | * to create a string of data to use. | ||
9 | */ | ||
10 | |||
11 | /** | ||
12 | * seq_buf - seq buffer structure | ||
13 | * @buffer: pointer to the buffer | ||
14 | * @size: size of the buffer | ||
15 | * @len: the amount of data inside the buffer | ||
16 | * @readpos: The next position to read in the buffer. | ||
17 | */ | ||
18 | struct seq_buf { | ||
19 | char *buffer; | ||
20 | size_t size; | ||
21 | size_t len; | ||
22 | loff_t readpos; | ||
23 | }; | ||
24 | |||
25 | static inline void seq_buf_clear(struct seq_buf *s) | ||
26 | { | ||
27 | s->len = 0; | ||
28 | s->readpos = 0; | ||
29 | } | ||
30 | |||
31 | static inline void | ||
32 | seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size) | ||
33 | { | ||
34 | s->buffer = buf; | ||
35 | s->size = size; | ||
36 | seq_buf_clear(s); | ||
37 | } | ||
38 | |||
39 | /* | ||
40 | * seq_buf have a buffer that might overflow. When this happens | ||
41 | * the len and size are set to be equal. | ||
42 | */ | ||
43 | static inline bool | ||
44 | seq_buf_has_overflowed(struct seq_buf *s) | ||
45 | { | ||
46 | return s->len > s->size; | ||
47 | } | ||
48 | |||
49 | static inline void | ||
50 | seq_buf_set_overflow(struct seq_buf *s) | ||
51 | { | ||
52 | s->len = s->size + 1; | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * How much buffer is left on the seq_buf? | ||
57 | */ | ||
58 | static inline unsigned int | ||
59 | seq_buf_buffer_left(struct seq_buf *s) | ||
60 | { | ||
61 | if (seq_buf_has_overflowed(s)) | ||
62 | return 0; | ||
63 | |||
64 | return s->size - s->len; | ||
65 | } | ||
66 | |||
67 | /* How much buffer was written? */ | ||
68 | static inline unsigned int seq_buf_used(struct seq_buf *s) | ||
69 | { | ||
70 | return min(s->len, s->size); | ||
71 | } | ||
72 | |||
73 | /** | ||
74 | * seq_buf_get_buf - get buffer to write arbitrary data to | ||
75 | * @s: the seq_buf handle | ||
76 | * @bufp: the beginning of the buffer is stored here | ||
77 | * | ||
78 | * Return the number of bytes available in the buffer, or zero if | ||
79 | * there's no space. | ||
80 | */ | ||
81 | static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp) | ||
82 | { | ||
83 | WARN_ON(s->len > s->size + 1); | ||
84 | |||
85 | if (s->len < s->size) { | ||
86 | *bufp = s->buffer + s->len; | ||
87 | return s->size - s->len; | ||
88 | } | ||
89 | |||
90 | *bufp = NULL; | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | /** | ||
95 | * seq_buf_commit - commit data to the buffer | ||
96 | * @s: the seq_buf handle | ||
97 | * @num: the number of bytes to commit | ||
98 | * | ||
99 | * Commit @num bytes of data written to a buffer previously acquired | ||
100 | * by seq_buf_get. To signal an error condition, or that the data | ||
101 | * didn't fit in the available space, pass a negative @num value. | ||
102 | */ | ||
103 | static inline void seq_buf_commit(struct seq_buf *s, int num) | ||
104 | { | ||
105 | if (num < 0) { | ||
106 | seq_buf_set_overflow(s); | ||
107 | } else { | ||
108 | /* num must be negative on overflow */ | ||
109 | BUG_ON(s->len + num > s->size); | ||
110 | s->len += num; | ||
111 | } | ||
112 | } | ||
113 | |||
114 | extern __printf(2, 3) | ||
115 | int seq_buf_printf(struct seq_buf *s, const char *fmt, ...); | ||
116 | extern __printf(2, 0) | ||
117 | int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args); | ||
118 | extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s); | ||
119 | extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, | ||
120 | int cnt); | ||
121 | extern int seq_buf_puts(struct seq_buf *s, const char *str); | ||
122 | extern int seq_buf_putc(struct seq_buf *s, unsigned char c); | ||
123 | extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len); | ||
124 | extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, | ||
125 | unsigned int len); | ||
126 | extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc); | ||
127 | |||
128 | extern int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp, | ||
129 | int nmaskbits); | ||
130 | |||
131 | #ifdef CONFIG_BINARY_PRINTF | ||
132 | extern int | ||
133 | seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary); | ||
134 | #endif | ||
135 | |||
136 | #endif /* _LINUX_SEQ_BUF_H */ | ||
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 52e0097f61f0..cf6a9daaaf6d 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h | |||
@@ -43,6 +43,21 @@ struct seq_operations { | |||
43 | #define SEQ_SKIP 1 | 43 | #define SEQ_SKIP 1 |
44 | 44 | ||
45 | /** | 45 | /** |
46 | * seq_has_overflowed - check if the buffer has overflowed | ||
47 | * @m: the seq_file handle | ||
48 | * | ||
49 | * seq_files have a buffer which may overflow. When this happens a larger | ||
50 | * buffer is reallocated and all the data will be printed again. | ||
51 | * The overflow state is true when m->count == m->size. | ||
52 | * | ||
53 | * Returns true if the buffer received more than it can hold. | ||
54 | */ | ||
55 | static inline bool seq_has_overflowed(struct seq_file *m) | ||
56 | { | ||
57 | return m->count == m->size; | ||
58 | } | ||
59 | |||
60 | /** | ||
46 | * seq_get_buf - get buffer to write arbitrary data to | 61 | * seq_get_buf - get buffer to write arbitrary data to |
47 | * @m: the seq_file handle | 62 | * @m: the seq_file handle |
48 | * @bufp: the beginning of the buffer is stored here | 63 | * @bufp: the beginning of the buffer is stored here |
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 3df10d5f154b..e02acf0a0ec9 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h | |||
@@ -97,13 +97,10 @@ struct uart_8250_port { | |||
97 | unsigned char msr_saved_flags; | 97 | unsigned char msr_saved_flags; |
98 | 98 | ||
99 | struct uart_8250_dma *dma; | 99 | struct uart_8250_dma *dma; |
100 | struct serial_rs485 rs485; | ||
101 | 100 | ||
102 | /* 8250 specific callbacks */ | 101 | /* 8250 specific callbacks */ |
103 | int (*dl_read)(struct uart_8250_port *); | 102 | int (*dl_read)(struct uart_8250_port *); |
104 | void (*dl_write)(struct uart_8250_port *, int); | 103 | void (*dl_write)(struct uart_8250_port *, int); |
105 | int (*rs485_config)(struct uart_8250_port *, | ||
106 | struct serial_rs485 *rs485); | ||
107 | }; | 104 | }; |
108 | 105 | ||
109 | static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) | 106 | static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) |
diff --git a/include/linux/serial_bcm63xx.h b/include/linux/serial_bcm63xx.h index a80aa1a5bee2..570e964dc899 100644 --- a/include/linux/serial_bcm63xx.h +++ b/include/linux/serial_bcm63xx.h | |||
@@ -116,6 +116,4 @@ | |||
116 | UART_FIFO_PARERR_MASK | \ | 116 | UART_FIFO_PARERR_MASK | \ |
117 | UART_FIFO_BRKDET_MASK) | 117 | UART_FIFO_BRKDET_MASK) |
118 | 118 | ||
119 | #define UART_REG_SIZE 24 | ||
120 | |||
121 | #endif /* _LINUX_SERIAL_BCM63XX_H */ | 119 | #endif /* _LINUX_SERIAL_BCM63XX_H */ |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 21c2e05c1bc3..057038cf2788 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -63,7 +63,7 @@ struct uart_ops { | |||
63 | void (*flush_buffer)(struct uart_port *); | 63 | void (*flush_buffer)(struct uart_port *); |
64 | void (*set_termios)(struct uart_port *, struct ktermios *new, | 64 | void (*set_termios)(struct uart_port *, struct ktermios *new, |
65 | struct ktermios *old); | 65 | struct ktermios *old); |
66 | void (*set_ldisc)(struct uart_port *, int new); | 66 | void (*set_ldisc)(struct uart_port *, struct ktermios *); |
67 | void (*pm)(struct uart_port *, unsigned int state, | 67 | void (*pm)(struct uart_port *, unsigned int state, |
68 | unsigned int oldstate); | 68 | unsigned int oldstate); |
69 | 69 | ||
@@ -131,6 +131,8 @@ struct uart_port { | |||
131 | void (*pm)(struct uart_port *, unsigned int state, | 131 | void (*pm)(struct uart_port *, unsigned int state, |
132 | unsigned int old); | 132 | unsigned int old); |
133 | void (*handle_break)(struct uart_port *); | 133 | void (*handle_break)(struct uart_port *); |
134 | int (*rs485_config)(struct uart_port *, | ||
135 | struct serial_rs485 *rs485); | ||
134 | unsigned int irq; /* irq number */ | 136 | unsigned int irq; /* irq number */ |
135 | unsigned long irqflags; /* irq flags */ | 137 | unsigned long irqflags; /* irq flags */ |
136 | unsigned int uartclk; /* base uart clock */ | 138 | unsigned int uartclk; /* base uart clock */ |
@@ -140,12 +142,13 @@ struct uart_port { | |||
140 | unsigned char iotype; /* io access style */ | 142 | unsigned char iotype; /* io access style */ |
141 | unsigned char unused1; | 143 | unsigned char unused1; |
142 | 144 | ||
143 | #define UPIO_PORT (0) | 145 | #define UPIO_PORT (0) /* 8b I/O port access */ |
144 | #define UPIO_HUB6 (1) | 146 | #define UPIO_HUB6 (1) /* Hub6 ISA card */ |
145 | #define UPIO_MEM (2) | 147 | #define UPIO_MEM (2) /* 8b MMIO access */ |
146 | #define UPIO_MEM32 (3) | 148 | #define UPIO_MEM32 (3) /* 32b little endian */ |
147 | #define UPIO_AU (4) /* Au1x00 and RT288x type IO */ | 149 | #define UPIO_MEM32BE (4) /* 32b big endian */ |
148 | #define UPIO_TSI (5) /* Tsi108/109 type IO */ | 150 | #define UPIO_AU (5) /* Au1x00 and RT288x type IO */ |
151 | #define UPIO_TSI (6) /* Tsi108/109 type IO */ | ||
149 | 152 | ||
150 | unsigned int read_status_mask; /* driver specific */ | 153 | unsigned int read_status_mask; /* driver specific */ |
151 | unsigned int ignore_status_mask; /* driver specific */ | 154 | unsigned int ignore_status_mask; /* driver specific */ |
@@ -160,21 +163,33 @@ struct uart_port { | |||
160 | /* flags must be updated while holding port mutex */ | 163 | /* flags must be updated while holding port mutex */ |
161 | upf_t flags; | 164 | upf_t flags; |
162 | 165 | ||
163 | #define UPF_FOURPORT ((__force upf_t) (1 << 1)) | 166 | /* |
164 | #define UPF_SAK ((__force upf_t) (1 << 2)) | 167 | * These flags must be equivalent to the flags defined in |
165 | #define UPF_SPD_MASK ((__force upf_t) (0x1030)) | 168 | * include/uapi/linux/tty_flags.h which are the userspace definitions |
166 | #define UPF_SPD_HI ((__force upf_t) (0x0010)) | 169 | * assigned from the serial_struct flags in uart_set_info() |
167 | #define UPF_SPD_VHI ((__force upf_t) (0x0020)) | 170 | * [for bit definitions in the UPF_CHANGE_MASK] |
168 | #define UPF_SPD_CUST ((__force upf_t) (0x0030)) | 171 | * |
169 | #define UPF_SPD_SHI ((__force upf_t) (0x1000)) | 172 | * Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable |
170 | #define UPF_SPD_WARP ((__force upf_t) (0x1010)) | 173 | * except bit 15 (UPF_NO_TXEN_TEST) which is masked off. |
171 | #define UPF_SKIP_TEST ((__force upf_t) (1 << 6)) | 174 | * The remaining bits are serial-core specific and not modifiable by |
172 | #define UPF_AUTO_IRQ ((__force upf_t) (1 << 7)) | 175 | * userspace. |
173 | #define UPF_HARDPPS_CD ((__force upf_t) (1 << 11)) | 176 | */ |
174 | #define UPF_LOW_LATENCY ((__force upf_t) (1 << 13)) | 177 | #define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ ) |
175 | #define UPF_BUGGY_UART ((__force upf_t) (1 << 14)) | 178 | #define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ ) |
179 | #define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ ) | ||
180 | #define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ ) | ||
181 | #define UPF_SPD_CUST ((__force upf_t) ASYNC_SPD_CUST /* 0x0030 */ ) | ||
182 | #define UPF_SPD_WARP ((__force upf_t) ASYNC_SPD_WARP /* 0x1010 */ ) | ||
183 | #define UPF_SPD_MASK ((__force upf_t) ASYNC_SPD_MASK /* 0x1030 */ ) | ||
184 | #define UPF_SKIP_TEST ((__force upf_t) ASYNC_SKIP_TEST /* 6 */ ) | ||
185 | #define UPF_AUTO_IRQ ((__force upf_t) ASYNC_AUTO_IRQ /* 7 */ ) | ||
186 | #define UPF_HARDPPS_CD ((__force upf_t) ASYNC_HARDPPS_CD /* 11 */ ) | ||
187 | #define UPF_SPD_SHI ((__force upf_t) ASYNC_SPD_SHI /* 12 */ ) | ||
188 | #define UPF_LOW_LATENCY ((__force upf_t) ASYNC_LOW_LATENCY /* 13 */ ) | ||
189 | #define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ ) | ||
176 | #define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15)) | 190 | #define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15)) |
177 | #define UPF_MAGIC_MULTIPLIER ((__force upf_t) (1 << 16)) | 191 | #define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ ) |
192 | |||
178 | /* Port has hardware-assisted h/w flow control (iow, auto-RTS *not* auto-CTS) */ | 193 | /* Port has hardware-assisted h/w flow control (iow, auto-RTS *not* auto-CTS) */ |
179 | #define UPF_HARD_FLOW ((__force upf_t) (1 << 21)) | 194 | #define UPF_HARD_FLOW ((__force upf_t) (1 << 21)) |
180 | /* Port has hardware-assisted s/w flow control */ | 195 | /* Port has hardware-assisted s/w flow control */ |
@@ -190,9 +205,14 @@ struct uart_port { | |||
190 | #define UPF_DEAD ((__force upf_t) (1 << 30)) | 205 | #define UPF_DEAD ((__force upf_t) (1 << 30)) |
191 | #define UPF_IOREMAP ((__force upf_t) (1 << 31)) | 206 | #define UPF_IOREMAP ((__force upf_t) (1 << 31)) |
192 | 207 | ||
193 | #define UPF_CHANGE_MASK ((__force upf_t) (0x17fff)) | 208 | #define __UPF_CHANGE_MASK 0x17fff |
209 | #define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK) | ||
194 | #define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY)) | 210 | #define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY)) |
195 | 211 | ||
212 | #if __UPF_CHANGE_MASK > ASYNC_FLAGS | ||
213 | #error Change mask not equivalent to userspace-visible bit defines | ||
214 | #endif | ||
215 | |||
196 | /* status must be updated while holding port lock */ | 216 | /* status must be updated while holding port lock */ |
197 | upstat_t status; | 217 | upstat_t status; |
198 | 218 | ||
@@ -214,6 +234,7 @@ struct uart_port { | |||
214 | unsigned char unused[2]; | 234 | unsigned char unused[2]; |
215 | struct attribute_group *attr_group; /* port specific attributes */ | 235 | struct attribute_group *attr_group; /* port specific attributes */ |
216 | const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ | 236 | const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ |
237 | struct serial_rs485 rs485; | ||
217 | void *private_data; /* generic platform data pointer */ | 238 | void *private_data; /* generic platform data pointer */ |
218 | }; | 239 | }; |
219 | 240 | ||
@@ -367,7 +388,7 @@ static inline int uart_tx_stopped(struct uart_port *port) | |||
367 | 388 | ||
368 | static inline bool uart_cts_enabled(struct uart_port *uport) | 389 | static inline bool uart_cts_enabled(struct uart_port *uport) |
369 | { | 390 | { |
370 | return uport->status & UPSTAT_CTS_ENABLE; | 391 | return !!(uport->status & UPSTAT_CTS_ENABLE); |
371 | } | 392 | } |
372 | 393 | ||
373 | /* | 394 | /* |
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 68c097077ef0..f4aee75f00b1 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h | |||
@@ -18,8 +18,6 @@ struct shrink_control { | |||
18 | */ | 18 | */ |
19 | unsigned long nr_to_scan; | 19 | unsigned long nr_to_scan; |
20 | 20 | ||
21 | /* shrink from these nodes */ | ||
22 | nodemask_t nodes_to_scan; | ||
23 | /* current node being shrunk (for NUMA aware shrinkers) */ | 21 | /* current node being shrunk (for NUMA aware shrinkers) */ |
24 | int nid; | 22 | int nid; |
25 | }; | 23 | }; |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6c8b6f604e76..85ab7d72b54c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <linux/time.h> | 20 | #include <linux/time.h> |
21 | #include <linux/bug.h> | 21 | #include <linux/bug.h> |
22 | #include <linux/cache.h> | 22 | #include <linux/cache.h> |
23 | #include <linux/rbtree.h> | ||
24 | #include <linux/socket.h> | ||
23 | 25 | ||
24 | #include <linux/atomic.h> | 26 | #include <linux/atomic.h> |
25 | #include <asm/types.h> | 27 | #include <asm/types.h> |
@@ -148,6 +150,8 @@ | |||
148 | struct net_device; | 150 | struct net_device; |
149 | struct scatterlist; | 151 | struct scatterlist; |
150 | struct pipe_inode_info; | 152 | struct pipe_inode_info; |
153 | struct iov_iter; | ||
154 | struct napi_struct; | ||
151 | 155 | ||
152 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | 156 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
153 | struct nf_conntrack { | 157 | struct nf_conntrack { |
@@ -341,7 +345,6 @@ enum { | |||
341 | SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ | 345 | SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ |
342 | SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ | 346 | SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ |
343 | SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ | 347 | SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ |
344 | SKB_FCLONE_FREE, /* this companion fclone skb is available */ | ||
345 | }; | 348 | }; |
346 | 349 | ||
347 | enum { | 350 | enum { |
@@ -370,8 +373,7 @@ enum { | |||
370 | 373 | ||
371 | SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, | 374 | SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, |
372 | 375 | ||
373 | SKB_GSO_MPLS = 1 << 12, | 376 | SKB_GSO_TUNNEL_REMCSUM = 1 << 12, |
374 | |||
375 | }; | 377 | }; |
376 | 378 | ||
377 | #if BITS_PER_LONG > 32 | 379 | #if BITS_PER_LONG > 32 |
@@ -440,6 +442,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, | |||
440 | * @next: Next buffer in list | 442 | * @next: Next buffer in list |
441 | * @prev: Previous buffer in list | 443 | * @prev: Previous buffer in list |
442 | * @tstamp: Time we arrived/left | 444 | * @tstamp: Time we arrived/left |
445 | * @rbnode: RB tree node, alternative to next/prev for netem/tcp | ||
443 | * @sk: Socket we are owned by | 446 | * @sk: Socket we are owned by |
444 | * @dev: Device we arrived on/are leaving by | 447 | * @dev: Device we arrived on/are leaving by |
445 | * @cb: Control buffer. Free for use by every layer. Put private vars here | 448 | * @cb: Control buffer. Free for use by every layer. Put private vars here |
@@ -504,15 +507,19 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, | |||
504 | */ | 507 | */ |
505 | 508 | ||
506 | struct sk_buff { | 509 | struct sk_buff { |
507 | /* These two members must be first. */ | ||
508 | struct sk_buff *next; | ||
509 | struct sk_buff *prev; | ||
510 | |||
511 | union { | 510 | union { |
512 | ktime_t tstamp; | 511 | struct { |
513 | struct skb_mstamp skb_mstamp; | 512 | /* These two members must be first. */ |
513 | struct sk_buff *next; | ||
514 | struct sk_buff *prev; | ||
515 | |||
516 | union { | ||
517 | ktime_t tstamp; | ||
518 | struct skb_mstamp skb_mstamp; | ||
519 | }; | ||
520 | }; | ||
521 | struct rb_node rbnode; /* used in netem & tcp stack */ | ||
514 | }; | 522 | }; |
515 | |||
516 | struct sock *sk; | 523 | struct sock *sk; |
517 | struct net_device *dev; | 524 | struct net_device *dev; |
518 | 525 | ||
@@ -597,7 +604,8 @@ struct sk_buff { | |||
597 | #endif | 604 | #endif |
598 | __u8 ipvs_property:1; | 605 | __u8 ipvs_property:1; |
599 | __u8 inner_protocol_type:1; | 606 | __u8 inner_protocol_type:1; |
600 | /* 4 or 6 bit hole */ | 607 | __u8 remcsum_offload:1; |
608 | /* 3 or 5 bit hole */ | ||
601 | 609 | ||
602 | #ifdef CONFIG_NET_SCHED | 610 | #ifdef CONFIG_NET_SCHED |
603 | __u16 tc_index; /* traffic control index */ | 611 | __u16 tc_index; /* traffic control index */ |
@@ -666,6 +674,7 @@ struct sk_buff { | |||
666 | 674 | ||
667 | #define SKB_ALLOC_FCLONE 0x01 | 675 | #define SKB_ALLOC_FCLONE 0x01 |
668 | #define SKB_ALLOC_RX 0x02 | 676 | #define SKB_ALLOC_RX 0x02 |
677 | #define SKB_ALLOC_NAPI 0x04 | ||
669 | 678 | ||
670 | /* Returns true if the skb was allocated from PFMEMALLOC reserves */ | 679 | /* Returns true if the skb was allocated from PFMEMALLOC reserves */ |
671 | static inline bool skb_pfmemalloc(const struct sk_buff *skb) | 680 | static inline bool skb_pfmemalloc(const struct sk_buff *skb) |
@@ -710,9 +719,6 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) | |||
710 | skb->_skb_refdst = (unsigned long)dst; | 719 | skb->_skb_refdst = (unsigned long)dst; |
711 | } | 720 | } |
712 | 721 | ||
713 | void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, | ||
714 | bool force); | ||
715 | |||
716 | /** | 722 | /** |
717 | * skb_dst_set_noref - sets skb dst, hopefully, without taking reference | 723 | * skb_dst_set_noref - sets skb dst, hopefully, without taking reference |
718 | * @skb: buffer | 724 | * @skb: buffer |
@@ -725,24 +731,8 @@ void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, | |||
725 | */ | 731 | */ |
726 | static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) | 732 | static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) |
727 | { | 733 | { |
728 | __skb_dst_set_noref(skb, dst, false); | 734 | WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); |
729 | } | 735 | skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; |
730 | |||
731 | /** | ||
732 | * skb_dst_set_noref_force - sets skb dst, without taking reference | ||
733 | * @skb: buffer | ||
734 | * @dst: dst entry | ||
735 | * | ||
736 | * Sets skb dst, assuming a reference was not taken on dst. | ||
737 | * No reference is taken and no dst_release will be called. While for | ||
738 | * cached dsts deferred reclaim is a basic feature, for entries that are | ||
739 | * not cached it is caller's job to guarantee that last dst_release for | ||
740 | * provided dst happens when nobody uses it, eg. after a RCU grace period. | ||
741 | */ | ||
742 | static inline void skb_dst_set_noref_force(struct sk_buff *skb, | ||
743 | struct dst_entry *dst) | ||
744 | { | ||
745 | __skb_dst_set_noref(skb, dst, true); | ||
746 | } | 736 | } |
747 | 737 | ||
748 | /** | 738 | /** |
@@ -810,7 +800,7 @@ static inline bool skb_fclone_busy(const struct sock *sk, | |||
810 | fclones = container_of(skb, struct sk_buff_fclones, skb1); | 800 | fclones = container_of(skb, struct sk_buff_fclones, skb1); |
811 | 801 | ||
812 | return skb->fclone == SKB_FCLONE_ORIG && | 802 | return skb->fclone == SKB_FCLONE_ORIG && |
813 | fclones->skb2.fclone == SKB_FCLONE_CLONE && | 803 | atomic_read(&fclones->fclone_ref) > 1 && |
814 | fclones->skb2.sk == sk; | 804 | fclones->skb2.sk == sk; |
815 | } | 805 | } |
816 | 806 | ||
@@ -2176,47 +2166,61 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, | |||
2176 | return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); | 2166 | return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); |
2177 | } | 2167 | } |
2178 | 2168 | ||
2169 | void *napi_alloc_frag(unsigned int fragsz); | ||
2170 | struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, | ||
2171 | unsigned int length, gfp_t gfp_mask); | ||
2172 | static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, | ||
2173 | unsigned int length) | ||
2174 | { | ||
2175 | return __napi_alloc_skb(napi, length, GFP_ATOMIC); | ||
2176 | } | ||
2177 | |||
2179 | /** | 2178 | /** |
2180 | * __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data | 2179 | * __dev_alloc_pages - allocate page for network Rx |
2181 | * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX | 2180 | * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx |
2182 | * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used | 2181 | * @order: size of the allocation |
2183 | * @order: size of the allocation | ||
2184 | * | 2182 | * |
2185 | * Allocate a new page. | 2183 | * Allocate a new page. |
2186 | * | 2184 | * |
2187 | * %NULL is returned if there is no free memory. | 2185 | * %NULL is returned if there is no free memory. |
2188 | */ | 2186 | */ |
2189 | static inline struct page *__skb_alloc_pages(gfp_t gfp_mask, | 2187 | static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, |
2190 | struct sk_buff *skb, | 2188 | unsigned int order) |
2191 | unsigned int order) | 2189 | { |
2192 | { | 2190 | /* This piece of code contains several assumptions. |
2193 | struct page *page; | 2191 | * 1. This is for device Rx, therefor a cold page is preferred. |
2194 | 2192 | * 2. The expectation is the user wants a compound page. | |
2195 | gfp_mask |= __GFP_COLD; | 2193 | * 3. If requesting a order 0 page it will not be compound |
2196 | 2194 | * due to the check to see if order has a value in prep_new_page | |
2197 | if (!(gfp_mask & __GFP_NOMEMALLOC)) | 2195 | * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to |
2198 | gfp_mask |= __GFP_MEMALLOC; | 2196 | * code in gfp_to_alloc_flags that should be enforcing this. |
2197 | */ | ||
2198 | gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC; | ||
2199 | 2199 | ||
2200 | page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); | 2200 | return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); |
2201 | if (skb && page && page->pfmemalloc) | 2201 | } |
2202 | skb->pfmemalloc = true; | ||
2203 | 2202 | ||
2204 | return page; | 2203 | static inline struct page *dev_alloc_pages(unsigned int order) |
2204 | { | ||
2205 | return __dev_alloc_pages(GFP_ATOMIC, order); | ||
2205 | } | 2206 | } |
2206 | 2207 | ||
2207 | /** | 2208 | /** |
2208 | * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data | 2209 | * __dev_alloc_page - allocate a page for network Rx |
2209 | * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX | 2210 | * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx |
2210 | * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used | ||
2211 | * | 2211 | * |
2212 | * Allocate a new page. | 2212 | * Allocate a new page. |
2213 | * | 2213 | * |
2214 | * %NULL is returned if there is no free memory. | 2214 | * %NULL is returned if there is no free memory. |
2215 | */ | 2215 | */ |
2216 | static inline struct page *__skb_alloc_page(gfp_t gfp_mask, | 2216 | static inline struct page *__dev_alloc_page(gfp_t gfp_mask) |
2217 | struct sk_buff *skb) | 2217 | { |
2218 | return __dev_alloc_pages(gfp_mask, 0); | ||
2219 | } | ||
2220 | |||
2221 | static inline struct page *dev_alloc_page(void) | ||
2218 | { | 2222 | { |
2219 | return __skb_alloc_pages(gfp_mask, skb, 0); | 2223 | return __dev_alloc_page(GFP_ATOMIC); |
2220 | } | 2224 | } |
2221 | 2225 | ||
2222 | /** | 2226 | /** |
@@ -2448,7 +2452,6 @@ static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) | |||
2448 | * is untouched. Otherwise it is extended. Returns zero on | 2452 | * is untouched. Otherwise it is extended. Returns zero on |
2449 | * success. The skb is freed on error. | 2453 | * success. The skb is freed on error. |
2450 | */ | 2454 | */ |
2451 | |||
2452 | static inline int skb_padto(struct sk_buff *skb, unsigned int len) | 2455 | static inline int skb_padto(struct sk_buff *skb, unsigned int len) |
2453 | { | 2456 | { |
2454 | unsigned int size = skb->len; | 2457 | unsigned int size = skb->len; |
@@ -2457,6 +2460,29 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) | |||
2457 | return skb_pad(skb, len - size); | 2460 | return skb_pad(skb, len - size); |
2458 | } | 2461 | } |
2459 | 2462 | ||
2463 | /** | ||
2464 | * skb_put_padto - increase size and pad an skbuff up to a minimal size | ||
2465 | * @skb: buffer to pad | ||
2466 | * @len: minimal length | ||
2467 | * | ||
2468 | * Pads up a buffer to ensure the trailing bytes exist and are | ||
2469 | * blanked. If the buffer already contains sufficient data it | ||
2470 | * is untouched. Otherwise it is extended. Returns zero on | ||
2471 | * success. The skb is freed on error. | ||
2472 | */ | ||
2473 | static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) | ||
2474 | { | ||
2475 | unsigned int size = skb->len; | ||
2476 | |||
2477 | if (unlikely(size < len)) { | ||
2478 | len -= size; | ||
2479 | if (skb_pad(skb, len)) | ||
2480 | return -ENOMEM; | ||
2481 | __skb_put(skb, len); | ||
2482 | } | ||
2483 | return 0; | ||
2484 | } | ||
2485 | |||
2460 | static inline int skb_add_data(struct sk_buff *skb, | 2486 | static inline int skb_add_data(struct sk_buff *skb, |
2461 | char __user *from, int copy) | 2487 | char __user *from, int copy) |
2462 | { | 2488 | { |
@@ -2629,18 +2655,18 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, | |||
2629 | int *err); | 2655 | int *err); |
2630 | unsigned int datagram_poll(struct file *file, struct socket *sock, | 2656 | unsigned int datagram_poll(struct file *file, struct socket *sock, |
2631 | struct poll_table_struct *wait); | 2657 | struct poll_table_struct *wait); |
2632 | int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, | 2658 | int skb_copy_datagram_iter(const struct sk_buff *from, int offset, |
2633 | struct iovec *to, int size); | 2659 | struct iov_iter *to, int size); |
2634 | int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, | 2660 | static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, |
2635 | struct iovec *iov); | 2661 | struct msghdr *msg, int size) |
2636 | int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, | 2662 | { |
2637 | const struct iovec *from, int from_offset, | 2663 | return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size); |
2638 | int len); | 2664 | } |
2639 | int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm, | 2665 | int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, |
2640 | int offset, size_t count); | 2666 | struct msghdr *msg); |
2641 | int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset, | 2667 | int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, |
2642 | const struct iovec *to, int to_offset, | 2668 | struct iov_iter *from, int len); |
2643 | int size); | 2669 | int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); |
2644 | void skb_free_datagram(struct sock *sk, struct sk_buff *skb); | 2670 | void skb_free_datagram(struct sock *sk, struct sk_buff *skb); |
2645 | void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); | 2671 | void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); |
2646 | int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); | 2672 | int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); |
@@ -2661,6 +2687,20 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet); | |||
2661 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); | 2687 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); |
2662 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); | 2688 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); |
2663 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb); | 2689 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb); |
2690 | int skb_ensure_writable(struct sk_buff *skb, int write_len); | ||
2691 | int skb_vlan_pop(struct sk_buff *skb); | ||
2692 | int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); | ||
2693 | |||
2694 | static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) | ||
2695 | { | ||
2696 | /* XXX: stripping const */ | ||
2697 | return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len); | ||
2698 | } | ||
2699 | |||
2700 | static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) | ||
2701 | { | ||
2702 | return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; | ||
2703 | } | ||
2664 | 2704 | ||
2665 | struct skb_checksum_ops { | 2705 | struct skb_checksum_ops { |
2666 | __wsum (*update)(const void *mem, int len, __wsum wsum); | 2706 | __wsum (*update)(const void *mem, int len, __wsum wsum); |
diff --git a/include/linux/slab.h b/include/linux/slab.h index c265bec6a57d..9a139b637069 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -493,7 +493,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
493 | * @memcg: pointer to the memcg this cache belongs to | 493 | * @memcg: pointer to the memcg this cache belongs to |
494 | * @list: list_head for the list of all caches in this memcg | 494 | * @list: list_head for the list of all caches in this memcg |
495 | * @root_cache: pointer to the global, root cache, this cache was derived from | 495 | * @root_cache: pointer to the global, root cache, this cache was derived from |
496 | * @nr_pages: number of pages that belongs to this cache. | ||
497 | */ | 496 | */ |
498 | struct memcg_cache_params { | 497 | struct memcg_cache_params { |
499 | bool is_root_cache; | 498 | bool is_root_cache; |
@@ -506,17 +505,12 @@ struct memcg_cache_params { | |||
506 | struct mem_cgroup *memcg; | 505 | struct mem_cgroup *memcg; |
507 | struct list_head list; | 506 | struct list_head list; |
508 | struct kmem_cache *root_cache; | 507 | struct kmem_cache *root_cache; |
509 | atomic_t nr_pages; | ||
510 | }; | 508 | }; |
511 | }; | 509 | }; |
512 | }; | 510 | }; |
513 | 511 | ||
514 | int memcg_update_all_caches(int num_memcgs); | 512 | int memcg_update_all_caches(int num_memcgs); |
515 | 513 | ||
516 | struct seq_file; | ||
517 | int cache_show(struct kmem_cache *s, struct seq_file *m); | ||
518 | void print_slabinfo_header(struct seq_file *m); | ||
519 | |||
520 | /** | 514 | /** |
521 | * kmalloc_array - allocate memory for an array. | 515 | * kmalloc_array - allocate memory for an array. |
522 | * @n: number of elements. | 516 | * @n: number of elements. |
diff --git a/include/linux/socket.h b/include/linux/socket.h index bb9b83640070..6e49a14365dc 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h | |||
@@ -47,16 +47,25 @@ struct linger { | |||
47 | struct msghdr { | 47 | struct msghdr { |
48 | void *msg_name; /* ptr to socket address structure */ | 48 | void *msg_name; /* ptr to socket address structure */ |
49 | int msg_namelen; /* size of socket address structure */ | 49 | int msg_namelen; /* size of socket address structure */ |
50 | struct iovec *msg_iov; /* scatter/gather array */ | 50 | struct iov_iter msg_iter; /* data */ |
51 | __kernel_size_t msg_iovlen; /* # elements in msg_iov */ | ||
52 | void *msg_control; /* ancillary data */ | 51 | void *msg_control; /* ancillary data */ |
53 | __kernel_size_t msg_controllen; /* ancillary data buffer length */ | 52 | __kernel_size_t msg_controllen; /* ancillary data buffer length */ |
54 | unsigned int msg_flags; /* flags on received message */ | 53 | unsigned int msg_flags; /* flags on received message */ |
55 | }; | 54 | }; |
55 | |||
56 | struct user_msghdr { | ||
57 | void __user *msg_name; /* ptr to socket address structure */ | ||
58 | int msg_namelen; /* size of socket address structure */ | ||
59 | struct iovec __user *msg_iov; /* scatter/gather array */ | ||
60 | __kernel_size_t msg_iovlen; /* # elements in msg_iov */ | ||
61 | void __user *msg_control; /* ancillary data */ | ||
62 | __kernel_size_t msg_controllen; /* ancillary data buffer length */ | ||
63 | unsigned int msg_flags; /* flags on received message */ | ||
64 | }; | ||
56 | 65 | ||
57 | /* For recvmmsg/sendmmsg */ | 66 | /* For recvmmsg/sendmmsg */ |
58 | struct mmsghdr { | 67 | struct mmsghdr { |
59 | struct msghdr msg_hdr; | 68 | struct user_msghdr msg_hdr; |
60 | unsigned int msg_len; | 69 | unsigned int msg_len; |
61 | }; | 70 | }; |
62 | 71 | ||
@@ -94,6 +103,10 @@ struct cmsghdr { | |||
94 | (cmsg)->cmsg_len <= (unsigned long) \ | 103 | (cmsg)->cmsg_len <= (unsigned long) \ |
95 | ((mhdr)->msg_controllen - \ | 104 | ((mhdr)->msg_controllen - \ |
96 | ((char *)(cmsg) - (char *)(mhdr)->msg_control))) | 105 | ((char *)(cmsg) - (char *)(mhdr)->msg_control))) |
106 | #define for_each_cmsghdr(cmsg, msg) \ | ||
107 | for (cmsg = CMSG_FIRSTHDR(msg); \ | ||
108 | cmsg; \ | ||
109 | cmsg = CMSG_NXTHDR(msg, cmsg)) | ||
97 | 110 | ||
98 | /* | 111 | /* |
99 | * Get the next cmsg header | 112 | * Get the next cmsg header |
@@ -312,15 +325,14 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata, | |||
312 | extern unsigned long iov_pages(const struct iovec *iov, int offset, | 325 | extern unsigned long iov_pages(const struct iovec *iov, int offset, |
313 | unsigned long nr_segs); | 326 | unsigned long nr_segs); |
314 | 327 | ||
315 | extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); | ||
316 | extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); | 328 | extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); |
317 | extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); | 329 | extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); |
318 | 330 | ||
319 | struct timespec; | 331 | struct timespec; |
320 | 332 | ||
321 | /* The __sys_...msg variants allow MSG_CMSG_COMPAT */ | 333 | /* The __sys_...msg variants allow MSG_CMSG_COMPAT */ |
322 | extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); | 334 | extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); |
323 | extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); | 335 | extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); |
324 | extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, | 336 | extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, |
325 | unsigned int flags, struct timespec *timeout); | 337 | unsigned int flags, struct timespec *timeout); |
326 | extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, | 338 | extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 46d188a9947c..a6ef2a8e6de4 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -1049,4 +1049,10 @@ spi_unregister_device(struct spi_device *spi) | |||
1049 | extern const struct spi_device_id * | 1049 | extern const struct spi_device_id * |
1050 | spi_get_device_id(const struct spi_device *sdev); | 1050 | spi_get_device_id(const struct spi_device *sdev); |
1051 | 1051 | ||
1052 | static inline bool | ||
1053 | spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer) | ||
1054 | { | ||
1055 | return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers); | ||
1056 | } | ||
1057 | |||
1052 | #endif /* __LINUX_SPI_H */ | 1058 | #endif /* __LINUX_SPI_H */ |
diff --git a/include/linux/spmi.h b/include/linux/spmi.h index 91f5eab9e428..f84212cd3b7d 100644 --- a/include/linux/spmi.h +++ b/include/linux/spmi.h | |||
@@ -134,9 +134,6 @@ void spmi_controller_remove(struct spmi_controller *ctrl); | |||
134 | * this structure. | 134 | * this structure. |
135 | * @probe: binds this driver to a SPMI device. | 135 | * @probe: binds this driver to a SPMI device. |
136 | * @remove: unbinds this driver from the SPMI device. | 136 | * @remove: unbinds this driver from the SPMI device. |
137 | * @shutdown: standard shutdown callback used during powerdown/halt. | ||
138 | * @suspend: standard suspend callback used during system suspend. | ||
139 | * @resume: standard resume callback used during system resume. | ||
140 | * | 137 | * |
141 | * If PM runtime support is desired for a slave, a device driver can call | 138 | * If PM runtime support is desired for a slave, a device driver can call |
142 | * pm_runtime_put() from their probe() routine (and a balancing | 139 | * pm_runtime_put() from their probe() routine (and a balancing |
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 115b570e3bff..669045ab73f3 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __LINUX_STACKTRACE_H | 1 | #ifndef __LINUX_STACKTRACE_H |
2 | #define __LINUX_STACKTRACE_H | 2 | #define __LINUX_STACKTRACE_H |
3 | 3 | ||
4 | #include <linux/types.h> | ||
5 | |||
4 | struct task_struct; | 6 | struct task_struct; |
5 | struct pt_regs; | 7 | struct pt_regs; |
6 | 8 | ||
@@ -20,6 +22,8 @@ extern void save_stack_trace_tsk(struct task_struct *tsk, | |||
20 | struct stack_trace *trace); | 22 | struct stack_trace *trace); |
21 | 23 | ||
22 | extern void print_stack_trace(struct stack_trace *trace, int spaces); | 24 | extern void print_stack_trace(struct stack_trace *trace, int spaces); |
25 | extern int snprint_stack_trace(char *buf, size_t size, | ||
26 | struct stack_trace *trace, int spaces); | ||
23 | 27 | ||
24 | #ifdef CONFIG_USER_STACKTRACE_SUPPORT | 28 | #ifdef CONFIG_USER_STACKTRACE_SUPPORT |
25 | extern void save_stack_trace_user(struct stack_trace *trace); | 29 | extern void save_stack_trace_user(struct stack_trace *trace); |
@@ -32,6 +36,7 @@ extern void save_stack_trace_user(struct stack_trace *trace); | |||
32 | # define save_stack_trace_tsk(tsk, trace) do { } while (0) | 36 | # define save_stack_trace_tsk(tsk, trace) do { } while (0) |
33 | # define save_stack_trace_user(trace) do { } while (0) | 37 | # define save_stack_trace_user(trace) do { } while (0) |
34 | # define print_stack_trace(trace, spaces) do { } while (0) | 38 | # define print_stack_trace(trace, spaces) do { } while (0) |
39 | # define snprint_stack_trace(buf, size, trace, spaces) do { } while (0) | ||
35 | #endif | 40 | #endif |
36 | 41 | ||
37 | #endif | 42 | #endif |
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 8e030075fe79..a7cbb570cc5c 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h | |||
@@ -53,7 +53,7 @@ struct rpc_cred { | |||
53 | struct rcu_head cr_rcu; | 53 | struct rcu_head cr_rcu; |
54 | struct rpc_auth * cr_auth; | 54 | struct rpc_auth * cr_auth; |
55 | const struct rpc_credops *cr_ops; | 55 | const struct rpc_credops *cr_ops; |
56 | #ifdef RPC_DEBUG | 56 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
57 | unsigned long cr_magic; /* 0x0f4aa4f0 */ | 57 | unsigned long cr_magic; /* 0x0f4aa4f0 */ |
58 | #endif | 58 | #endif |
59 | unsigned long cr_expire; /* when to gc */ | 59 | unsigned long cr_expire; /* when to gc */ |
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 70736b98c721..d86acc63b25f 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
@@ -63,6 +63,9 @@ struct rpc_clnt { | |||
63 | struct rpc_rtt cl_rtt_default; | 63 | struct rpc_rtt cl_rtt_default; |
64 | struct rpc_timeout cl_timeout_default; | 64 | struct rpc_timeout cl_timeout_default; |
65 | const struct rpc_program *cl_program; | 65 | const struct rpc_program *cl_program; |
66 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
67 | struct dentry *cl_debugfs; /* debugfs directory */ | ||
68 | #endif | ||
66 | }; | 69 | }; |
67 | 70 | ||
68 | /* | 71 | /* |
@@ -176,5 +179,6 @@ size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); | |||
176 | const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); | 179 | const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); |
177 | int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t); | 180 | int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t); |
178 | 181 | ||
182 | const char *rpc_proc_name(const struct rpc_task *task); | ||
179 | #endif /* __KERNEL__ */ | 183 | #endif /* __KERNEL__ */ |
180 | #endif /* _LINUX_SUNRPC_CLNT_H */ | 184 | #endif /* _LINUX_SUNRPC_CLNT_H */ |
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index 9385bd74c860..c57d8ea0716c 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h | |||
@@ -10,22 +10,10 @@ | |||
10 | 10 | ||
11 | #include <uapi/linux/sunrpc/debug.h> | 11 | #include <uapi/linux/sunrpc/debug.h> |
12 | 12 | ||
13 | |||
14 | /* | ||
15 | * Enable RPC debugging/profiling. | ||
16 | */ | ||
17 | #ifdef CONFIG_SUNRPC_DEBUG | ||
18 | #define RPC_DEBUG | ||
19 | #endif | ||
20 | #ifdef CONFIG_TRACEPOINTS | ||
21 | #define RPC_TRACEPOINTS | ||
22 | #endif | ||
23 | /* #define RPC_PROFILE */ | ||
24 | |||
25 | /* | 13 | /* |
26 | * Debugging macros etc | 14 | * Debugging macros etc |
27 | */ | 15 | */ |
28 | #ifdef RPC_DEBUG | 16 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
29 | extern unsigned int rpc_debug; | 17 | extern unsigned int rpc_debug; |
30 | extern unsigned int nfs_debug; | 18 | extern unsigned int nfs_debug; |
31 | extern unsigned int nfsd_debug; | 19 | extern unsigned int nfsd_debug; |
@@ -36,7 +24,7 @@ extern unsigned int nlm_debug; | |||
36 | #define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) | 24 | #define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) |
37 | 25 | ||
38 | #undef ifdebug | 26 | #undef ifdebug |
39 | #ifdef RPC_DEBUG | 27 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
40 | # define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) | 28 | # define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) |
41 | 29 | ||
42 | # define dfprintk(fac, args...) \ | 30 | # define dfprintk(fac, args...) \ |
@@ -65,9 +53,55 @@ extern unsigned int nlm_debug; | |||
65 | /* | 53 | /* |
66 | * Sysctl interface for RPC debugging | 54 | * Sysctl interface for RPC debugging |
67 | */ | 55 | */ |
68 | #ifdef RPC_DEBUG | 56 | |
57 | struct rpc_clnt; | ||
58 | struct rpc_xprt; | ||
59 | |||
60 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
69 | void rpc_register_sysctl(void); | 61 | void rpc_register_sysctl(void); |
70 | void rpc_unregister_sysctl(void); | 62 | void rpc_unregister_sysctl(void); |
63 | int sunrpc_debugfs_init(void); | ||
64 | void sunrpc_debugfs_exit(void); | ||
65 | int rpc_clnt_debugfs_register(struct rpc_clnt *); | ||
66 | void rpc_clnt_debugfs_unregister(struct rpc_clnt *); | ||
67 | int rpc_xprt_debugfs_register(struct rpc_xprt *); | ||
68 | void rpc_xprt_debugfs_unregister(struct rpc_xprt *); | ||
69 | #else | ||
70 | static inline int | ||
71 | sunrpc_debugfs_init(void) | ||
72 | { | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static inline void | ||
77 | sunrpc_debugfs_exit(void) | ||
78 | { | ||
79 | return; | ||
80 | } | ||
81 | |||
82 | static inline int | ||
83 | rpc_clnt_debugfs_register(struct rpc_clnt *clnt) | ||
84 | { | ||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | static inline void | ||
89 | rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt) | ||
90 | { | ||
91 | return; | ||
92 | } | ||
93 | |||
94 | static inline int | ||
95 | rpc_xprt_debugfs_register(struct rpc_xprt *xprt) | ||
96 | { | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static inline void | ||
101 | rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt) | ||
102 | { | ||
103 | return; | ||
104 | } | ||
71 | #endif | 105 | #endif |
72 | 106 | ||
73 | #endif /* _LINUX_SUNRPC_DEBUG_H_ */ | 107 | #endif /* _LINUX_SUNRPC_DEBUG_H_ */ |
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h index 1565bbe86d51..eecb5a71e6c0 100644 --- a/include/linux/sunrpc/metrics.h +++ b/include/linux/sunrpc/metrics.h | |||
@@ -27,10 +27,13 @@ | |||
27 | 27 | ||
28 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
29 | #include <linux/ktime.h> | 29 | #include <linux/ktime.h> |
30 | #include <linux/spinlock.h> | ||
30 | 31 | ||
31 | #define RPC_IOSTATS_VERS "1.0" | 32 | #define RPC_IOSTATS_VERS "1.0" |
32 | 33 | ||
33 | struct rpc_iostats { | 34 | struct rpc_iostats { |
35 | spinlock_t om_lock; | ||
36 | |||
34 | /* | 37 | /* |
35 | * These counters give an idea about how many request | 38 | * These counters give an idea about how many request |
36 | * transmissions are required, on average, to complete that | 39 | * transmissions are required, on average, to complete that |
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 1a8959944c5f..5f1e6bd4c316 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -79,7 +79,7 @@ struct rpc_task { | |||
79 | unsigned short tk_flags; /* misc flags */ | 79 | unsigned short tk_flags; /* misc flags */ |
80 | unsigned short tk_timeouts; /* maj timeouts */ | 80 | unsigned short tk_timeouts; /* maj timeouts */ |
81 | 81 | ||
82 | #if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) | 82 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) |
83 | unsigned short tk_pid; /* debugging aid */ | 83 | unsigned short tk_pid; /* debugging aid */ |
84 | #endif | 84 | #endif |
85 | unsigned char tk_priority : 2,/* Task priority */ | 85 | unsigned char tk_priority : 2,/* Task priority */ |
@@ -187,7 +187,7 @@ struct rpc_wait_queue { | |||
187 | unsigned char nr; /* # tasks remaining for cookie */ | 187 | unsigned char nr; /* # tasks remaining for cookie */ |
188 | unsigned short qlen; /* total # tasks waiting in queue */ | 188 | unsigned short qlen; /* total # tasks waiting in queue */ |
189 | struct rpc_timer timer_list; | 189 | struct rpc_timer timer_list; |
190 | #if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) | 190 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) |
191 | const char * name; | 191 | const char * name; |
192 | #endif | 192 | #endif |
193 | }; | 193 | }; |
@@ -237,7 +237,7 @@ void rpc_free(void *); | |||
237 | int rpciod_up(void); | 237 | int rpciod_up(void); |
238 | void rpciod_down(void); | 238 | void rpciod_down(void); |
239 | int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); | 239 | int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); |
240 | #ifdef RPC_DEBUG | 240 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
241 | struct net; | 241 | struct net; |
242 | void rpc_show_tasks(struct net *); | 242 | void rpc_show_tasks(struct net *); |
243 | #endif | 243 | #endif |
@@ -251,7 +251,7 @@ static inline int rpc_wait_for_completion_task(struct rpc_task *task) | |||
251 | return __rpc_wait_for_completion_task(task, NULL); | 251 | return __rpc_wait_for_completion_task(task, NULL); |
252 | } | 252 | } |
253 | 253 | ||
254 | #if defined(RPC_DEBUG) || defined (RPC_TRACEPOINTS) | 254 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) |
255 | static inline const char * rpc_qname(const struct rpc_wait_queue *q) | 255 | static inline const char * rpc_qname(const struct rpc_wait_queue *q) |
256 | { | 256 | { |
257 | return ((q && q->name) ? q->name : "unknown"); | 257 | return ((q && q->name) ? q->name : "unknown"); |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index cf391eef2e6d..9d27ac45b909 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
@@ -239,6 +239,9 @@ struct rpc_xprt { | |||
239 | struct net *xprt_net; | 239 | struct net *xprt_net; |
240 | const char *servername; | 240 | const char *servername; |
241 | const char *address_strings[RPC_DISPLAY_MAX]; | 241 | const char *address_strings[RPC_DISPLAY_MAX]; |
242 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
243 | struct dentry *debugfs; /* debugfs directory */ | ||
244 | #endif | ||
242 | }; | 245 | }; |
243 | 246 | ||
244 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) | 247 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 1ad36cc25b2e..7591788e9fbf 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h | |||
@@ -17,6 +17,65 @@ void cleanup_socket_xprt(void); | |||
17 | #define RPC_DEF_MIN_RESVPORT (665U) | 17 | #define RPC_DEF_MIN_RESVPORT (665U) |
18 | #define RPC_DEF_MAX_RESVPORT (1023U) | 18 | #define RPC_DEF_MAX_RESVPORT (1023U) |
19 | 19 | ||
20 | struct sock_xprt { | ||
21 | struct rpc_xprt xprt; | ||
22 | |||
23 | /* | ||
24 | * Network layer | ||
25 | */ | ||
26 | struct socket * sock; | ||
27 | struct sock * inet; | ||
28 | |||
29 | /* | ||
30 | * State of TCP reply receive | ||
31 | */ | ||
32 | __be32 tcp_fraghdr, | ||
33 | tcp_xid, | ||
34 | tcp_calldir; | ||
35 | |||
36 | u32 tcp_offset, | ||
37 | tcp_reclen; | ||
38 | |||
39 | unsigned long tcp_copied, | ||
40 | tcp_flags; | ||
41 | |||
42 | /* | ||
43 | * Connection of transports | ||
44 | */ | ||
45 | struct delayed_work connect_worker; | ||
46 | struct sockaddr_storage srcaddr; | ||
47 | unsigned short srcport; | ||
48 | |||
49 | /* | ||
50 | * UDP socket buffer size parameters | ||
51 | */ | ||
52 | size_t rcvsize, | ||
53 | sndsize; | ||
54 | |||
55 | /* | ||
56 | * Saved socket callback addresses | ||
57 | */ | ||
58 | void (*old_data_ready)(struct sock *); | ||
59 | void (*old_state_change)(struct sock *); | ||
60 | void (*old_write_space)(struct sock *); | ||
61 | void (*old_error_report)(struct sock *); | ||
62 | }; | ||
63 | |||
64 | /* | ||
65 | * TCP receive state flags | ||
66 | */ | ||
67 | #define TCP_RCV_LAST_FRAG (1UL << 0) | ||
68 | #define TCP_RCV_COPY_FRAGHDR (1UL << 1) | ||
69 | #define TCP_RCV_COPY_XID (1UL << 2) | ||
70 | #define TCP_RCV_COPY_DATA (1UL << 3) | ||
71 | #define TCP_RCV_READ_CALLDIR (1UL << 4) | ||
72 | #define TCP_RCV_COPY_CALLDIR (1UL << 5) | ||
73 | |||
74 | /* | ||
75 | * TCP RPC flags | ||
76 | */ | ||
77 | #define TCP_RPC_REPLY (1UL << 6) | ||
78 | |||
20 | #endif /* __KERNEL__ */ | 79 | #endif /* __KERNEL__ */ |
21 | 80 | ||
22 | #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ | 81 | #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 37a585beef5c..34e8b60ab973 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -102,14 +102,6 @@ union swap_header { | |||
102 | } info; | 102 | } info; |
103 | }; | 103 | }; |
104 | 104 | ||
105 | /* A swap entry has to fit into a "unsigned long", as | ||
106 | * the entry is hidden in the "index" field of the | ||
107 | * swapper address space. | ||
108 | */ | ||
109 | typedef struct { | ||
110 | unsigned long val; | ||
111 | } swp_entry_t; | ||
112 | |||
113 | /* | 105 | /* |
114 | * current->reclaim_state points to one of these when a task is running | 106 | * current->reclaim_state points to one of these when a task is running |
115 | * memory reclaim | 107 | * memory reclaim |
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h new file mode 100644 index 000000000000..145306bdc92f --- /dev/null +++ b/include/linux/swap_cgroup.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef __LINUX_SWAP_CGROUP_H | ||
2 | #define __LINUX_SWAP_CGROUP_H | ||
3 | |||
4 | #include <linux/swap.h> | ||
5 | |||
6 | #ifdef CONFIG_MEMCG_SWAP | ||
7 | |||
8 | extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, | ||
9 | unsigned short old, unsigned short new); | ||
10 | extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); | ||
11 | extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); | ||
12 | extern int swap_cgroup_swapon(int type, unsigned long max_pages); | ||
13 | extern void swap_cgroup_swapoff(int type); | ||
14 | |||
15 | #else | ||
16 | |||
17 | static inline | ||
18 | unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) | ||
19 | { | ||
20 | return 0; | ||
21 | } | ||
22 | |||
23 | static inline | ||
24 | unsigned short lookup_swap_cgroup_id(swp_entry_t ent) | ||
25 | { | ||
26 | return 0; | ||
27 | } | ||
28 | |||
29 | static inline int | ||
30 | swap_cgroup_swapon(int type, unsigned long max_pages) | ||
31 | { | ||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | static inline void swap_cgroup_swapoff(int type) | ||
36 | { | ||
37 | return; | ||
38 | } | ||
39 | |||
40 | #endif /* CONFIG_MEMCG_SWAP */ | ||
41 | |||
42 | #endif /* __LINUX_SWAP_CGROUP_H */ | ||
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index bda9b81357cc..85893d744901 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -25,7 +25,7 @@ struct linux_dirent64; | |||
25 | struct list_head; | 25 | struct list_head; |
26 | struct mmap_arg_struct; | 26 | struct mmap_arg_struct; |
27 | struct msgbuf; | 27 | struct msgbuf; |
28 | struct msghdr; | 28 | struct user_msghdr; |
29 | struct mmsghdr; | 29 | struct mmsghdr; |
30 | struct msqid_ds; | 30 | struct msqid_ds; |
31 | struct new_utsname; | 31 | struct new_utsname; |
@@ -601,13 +601,13 @@ asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); | |||
601 | asmlinkage long sys_send(int, void __user *, size_t, unsigned); | 601 | asmlinkage long sys_send(int, void __user *, size_t, unsigned); |
602 | asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, | 602 | asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, |
603 | struct sockaddr __user *, int); | 603 | struct sockaddr __user *, int); |
604 | asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); | 604 | asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); |
605 | asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, | 605 | asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, |
606 | unsigned int vlen, unsigned flags); | 606 | unsigned int vlen, unsigned flags); |
607 | asmlinkage long sys_recv(int, void __user *, size_t, unsigned); | 607 | asmlinkage long sys_recv(int, void __user *, size_t, unsigned); |
608 | asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned, | 608 | asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned, |
609 | struct sockaddr __user *, int __user *); | 609 | struct sockaddr __user *, int __user *); |
610 | asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); | 610 | asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); |
611 | asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, | 611 | asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, |
612 | unsigned int vlen, unsigned flags, | 612 | unsigned int vlen, unsigned flags, |
613 | struct timespec __user *timeout); | 613 | struct timespec __user *timeout); |
@@ -877,4 +877,9 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags, | |||
877 | asmlinkage long sys_getrandom(char __user *buf, size_t count, | 877 | asmlinkage long sys_getrandom(char __user *buf, size_t count, |
878 | unsigned int flags); | 878 | unsigned int flags); |
879 | asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); | 879 | asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); |
880 | |||
881 | asmlinkage long sys_execveat(int dfd, const char __user *filename, | ||
882 | const char __user *const __user *argv, | ||
883 | const char __user *const __user *envp, int flags); | ||
884 | |||
880 | #endif | 885 | #endif |
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index f97d0dbb59fa..ddad16148bd6 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h | |||
@@ -70,6 +70,8 @@ struct attribute_group { | |||
70 | * for examples.. | 70 | * for examples.. |
71 | */ | 71 | */ |
72 | 72 | ||
73 | #define SYSFS_PREALLOC 010000 | ||
74 | |||
73 | #define __ATTR(_name, _mode, _show, _store) { \ | 75 | #define __ATTR(_name, _mode, _show, _store) { \ |
74 | .attr = {.name = __stringify(_name), \ | 76 | .attr = {.name = __stringify(_name), \ |
75 | .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ | 77 | .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ |
@@ -77,6 +79,13 @@ struct attribute_group { | |||
77 | .store = _store, \ | 79 | .store = _store, \ |
78 | } | 80 | } |
79 | 81 | ||
82 | #define __ATTR_PREALLOC(_name, _mode, _show, _store) { \ | ||
83 | .attr = {.name = __stringify(_name), \ | ||
84 | .mode = SYSFS_PREALLOC | VERIFY_OCTAL_PERMISSIONS(_mode) },\ | ||
85 | .show = _show, \ | ||
86 | .store = _store, \ | ||
87 | } | ||
88 | |||
80 | #define __ATTR_RO(_name) { \ | 89 | #define __ATTR_RO(_name) { \ |
81 | .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ | 90 | .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ |
82 | .show = _name##_show, \ | 91 | .show = _name##_show, \ |
diff --git a/include/linux/syslog.h b/include/linux/syslog.h index 98a3153c0f96..4b7b875a7ce1 100644 --- a/include/linux/syslog.h +++ b/include/linux/syslog.h | |||
@@ -49,4 +49,13 @@ | |||
49 | 49 | ||
50 | int do_syslog(int type, char __user *buf, int count, bool from_file); | 50 | int do_syslog(int type, char __user *buf, int count, bool from_file); |
51 | 51 | ||
52 | #ifdef CONFIG_PRINTK | ||
53 | int check_syslog_permissions(int type, bool from_file); | ||
54 | #else | ||
55 | static inline int check_syslog_permissions(int type, bool from_file) | ||
56 | { | ||
57 | return 0; | ||
58 | } | ||
59 | #endif | ||
60 | |||
52 | #endif /* _LINUX_SYSLOG_H */ | 61 | #endif /* _LINUX_SYSLOG_H */ |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index c2dee7deefa8..67309ece0772 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
@@ -130,7 +130,7 @@ struct tcp_sock { | |||
130 | /* inet_connection_sock has to be the first member of tcp_sock */ | 130 | /* inet_connection_sock has to be the first member of tcp_sock */ |
131 | struct inet_connection_sock inet_conn; | 131 | struct inet_connection_sock inet_conn; |
132 | u16 tcp_header_len; /* Bytes of tcp header to send */ | 132 | u16 tcp_header_len; /* Bytes of tcp header to send */ |
133 | u16 xmit_size_goal_segs; /* Goal for segmenting output packets */ | 133 | u16 gso_segs; /* Max number of segs per GSO packet */ |
134 | 134 | ||
135 | /* | 135 | /* |
136 | * Header prediction flags | 136 | * Header prediction flags |
@@ -162,7 +162,7 @@ struct tcp_sock { | |||
162 | struct { | 162 | struct { |
163 | struct sk_buff_head prequeue; | 163 | struct sk_buff_head prequeue; |
164 | struct task_struct *task; | 164 | struct task_struct *task; |
165 | struct iovec *iov; | 165 | struct msghdr *msg; |
166 | int memory; | 166 | int memory; |
167 | int len; | 167 | int len; |
168 | } ucopy; | 168 | } ucopy; |
@@ -204,10 +204,10 @@ struct tcp_sock { | |||
204 | 204 | ||
205 | u16 urg_data; /* Saved octet of OOB data and control flags */ | 205 | u16 urg_data; /* Saved octet of OOB data and control flags */ |
206 | u8 ecn_flags; /* ECN status bits. */ | 206 | u8 ecn_flags; /* ECN status bits. */ |
207 | u8 reordering; /* Packet reordering metric. */ | 207 | u8 keepalive_probes; /* num of allowed keep alive probes */ |
208 | u32 reordering; /* Packet reordering metric. */ | ||
208 | u32 snd_up; /* Urgent pointer */ | 209 | u32 snd_up; /* Urgent pointer */ |
209 | 210 | ||
210 | u8 keepalive_probes; /* num of allowed keep alive probes */ | ||
211 | /* | 211 | /* |
212 | * Options received (usually on last packet, some only on SYN packets). | 212 | * Options received (usually on last packet, some only on SYN packets). |
213 | */ | 213 | */ |
diff --git a/include/linux/time.h b/include/linux/time.h index 8c42cf8d2444..203c2ad40d71 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
@@ -39,9 +39,20 @@ static inline int timeval_compare(const struct timeval *lhs, const struct timeva | |||
39 | return lhs->tv_usec - rhs->tv_usec; | 39 | return lhs->tv_usec - rhs->tv_usec; |
40 | } | 40 | } |
41 | 41 | ||
42 | extern unsigned long mktime(const unsigned int year, const unsigned int mon, | 42 | extern time64_t mktime64(const unsigned int year, const unsigned int mon, |
43 | const unsigned int day, const unsigned int hour, | 43 | const unsigned int day, const unsigned int hour, |
44 | const unsigned int min, const unsigned int sec); | 44 | const unsigned int min, const unsigned int sec); |
45 | |||
46 | /** | ||
47 | * Deprecated. Use mktime64(). | ||
48 | */ | ||
49 | static inline unsigned long mktime(const unsigned int year, | ||
50 | const unsigned int mon, const unsigned int day, | ||
51 | const unsigned int hour, const unsigned int min, | ||
52 | const unsigned int sec) | ||
53 | { | ||
54 | return mktime64(year, mon, day, hour, min, sec); | ||
55 | } | ||
45 | 56 | ||
46 | extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); | 57 | extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); |
47 | 58 | ||
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 95640dcd1899..05af9a334893 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h | |||
@@ -42,6 +42,7 @@ struct tk_read_base { | |||
42 | * struct timekeeper - Structure holding internal timekeeping values. | 42 | * struct timekeeper - Structure holding internal timekeeping values. |
43 | * @tkr: The readout base structure | 43 | * @tkr: The readout base structure |
44 | * @xtime_sec: Current CLOCK_REALTIME time in seconds | 44 | * @xtime_sec: Current CLOCK_REALTIME time in seconds |
45 | * @ktime_sec: Current CLOCK_MONOTONIC time in seconds | ||
45 | * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset | 46 | * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset |
46 | * @offs_real: Offset clock monotonic -> clock realtime | 47 | * @offs_real: Offset clock monotonic -> clock realtime |
47 | * @offs_boot: Offset clock monotonic -> clock boottime | 48 | * @offs_boot: Offset clock monotonic -> clock boottime |
@@ -77,6 +78,7 @@ struct tk_read_base { | |||
77 | struct timekeeper { | 78 | struct timekeeper { |
78 | struct tk_read_base tkr; | 79 | struct tk_read_base tkr; |
79 | u64 xtime_sec; | 80 | u64 xtime_sec; |
81 | unsigned long ktime_sec; | ||
80 | struct timespec64 wall_to_monotonic; | 82 | struct timespec64 wall_to_monotonic; |
81 | ktime_t offs_real; | 83 | ktime_t offs_real; |
82 | ktime_t offs_boot; | 84 | ktime_t offs_boot; |
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 1caa6b04fdc5..9b63d13ba82b 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h | |||
@@ -10,7 +10,7 @@ extern int timekeeping_suspended; | |||
10 | * Get and set timeofday | 10 | * Get and set timeofday |
11 | */ | 11 | */ |
12 | extern void do_gettimeofday(struct timeval *tv); | 12 | extern void do_gettimeofday(struct timeval *tv); |
13 | extern int do_settimeofday(const struct timespec *tv); | 13 | extern int do_settimeofday64(const struct timespec64 *ts); |
14 | extern int do_sys_settimeofday(const struct timespec *tv, | 14 | extern int do_sys_settimeofday(const struct timespec *tv, |
15 | const struct timezone *tz); | 15 | const struct timezone *tz); |
16 | 16 | ||
@@ -25,14 +25,24 @@ struct timespec __current_kernel_time(void); | |||
25 | /* | 25 | /* |
26 | * timespec based interfaces | 26 | * timespec based interfaces |
27 | */ | 27 | */ |
28 | struct timespec get_monotonic_coarse(void); | 28 | struct timespec64 get_monotonic_coarse64(void); |
29 | extern void getrawmonotonic(struct timespec *ts); | 29 | extern void getrawmonotonic64(struct timespec64 *ts); |
30 | extern void ktime_get_ts64(struct timespec64 *ts); | 30 | extern void ktime_get_ts64(struct timespec64 *ts); |
31 | extern time64_t ktime_get_seconds(void); | ||
32 | extern time64_t ktime_get_real_seconds(void); | ||
31 | 33 | ||
32 | extern int __getnstimeofday64(struct timespec64 *tv); | 34 | extern int __getnstimeofday64(struct timespec64 *tv); |
33 | extern void getnstimeofday64(struct timespec64 *tv); | 35 | extern void getnstimeofday64(struct timespec64 *tv); |
34 | 36 | ||
35 | #if BITS_PER_LONG == 64 | 37 | #if BITS_PER_LONG == 64 |
38 | /** | ||
39 | * Deprecated. Use do_settimeofday64(). | ||
40 | */ | ||
41 | static inline int do_settimeofday(const struct timespec *ts) | ||
42 | { | ||
43 | return do_settimeofday64(ts); | ||
44 | } | ||
45 | |||
36 | static inline int __getnstimeofday(struct timespec *ts) | 46 | static inline int __getnstimeofday(struct timespec *ts) |
37 | { | 47 | { |
38 | return __getnstimeofday64(ts); | 48 | return __getnstimeofday64(ts); |
@@ -53,7 +63,27 @@ static inline void ktime_get_real_ts(struct timespec *ts) | |||
53 | getnstimeofday64(ts); | 63 | getnstimeofday64(ts); |
54 | } | 64 | } |
55 | 65 | ||
66 | static inline void getrawmonotonic(struct timespec *ts) | ||
67 | { | ||
68 | getrawmonotonic64(ts); | ||
69 | } | ||
70 | |||
71 | static inline struct timespec get_monotonic_coarse(void) | ||
72 | { | ||
73 | return get_monotonic_coarse64(); | ||
74 | } | ||
56 | #else | 75 | #else |
76 | /** | ||
77 | * Deprecated. Use do_settimeofday64(). | ||
78 | */ | ||
79 | static inline int do_settimeofday(const struct timespec *ts) | ||
80 | { | ||
81 | struct timespec64 ts64; | ||
82 | |||
83 | ts64 = timespec_to_timespec64(*ts); | ||
84 | return do_settimeofday64(&ts64); | ||
85 | } | ||
86 | |||
57 | static inline int __getnstimeofday(struct timespec *ts) | 87 | static inline int __getnstimeofday(struct timespec *ts) |
58 | { | 88 | { |
59 | struct timespec64 ts64; | 89 | struct timespec64 ts64; |
@@ -86,6 +116,19 @@ static inline void ktime_get_real_ts(struct timespec *ts) | |||
86 | getnstimeofday64(&ts64); | 116 | getnstimeofday64(&ts64); |
87 | *ts = timespec64_to_timespec(ts64); | 117 | *ts = timespec64_to_timespec(ts64); |
88 | } | 118 | } |
119 | |||
120 | static inline void getrawmonotonic(struct timespec *ts) | ||
121 | { | ||
122 | struct timespec64 ts64; | ||
123 | |||
124 | getrawmonotonic64(&ts64); | ||
125 | *ts = timespec64_to_timespec(ts64); | ||
126 | } | ||
127 | |||
128 | static inline struct timespec get_monotonic_coarse(void) | ||
129 | { | ||
130 | return timespec64_to_timespec(get_monotonic_coarse64()); | ||
131 | } | ||
89 | #endif | 132 | #endif |
90 | 133 | ||
91 | extern void getboottime(struct timespec *ts); | 134 | extern void getboottime(struct timespec *ts); |
@@ -182,7 +225,7 @@ static inline void timekeeping_clocktai(struct timespec *ts) | |||
182 | /* | 225 | /* |
183 | * RTC specific | 226 | * RTC specific |
184 | */ | 227 | */ |
185 | extern void timekeeping_inject_sleeptime(struct timespec *delta); | 228 | extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); |
186 | 229 | ||
187 | /* | 230 | /* |
188 | * PPS accessor | 231 | * PPS accessor |
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index ea6c9dea79e3..cfaf5a1d4bad 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef _LINUX_TRACE_SEQ_H | 1 | #ifndef _LINUX_TRACE_SEQ_H |
2 | #define _LINUX_TRACE_SEQ_H | 2 | #define _LINUX_TRACE_SEQ_H |
3 | 3 | ||
4 | #include <linux/fs.h> | 4 | #include <linux/seq_buf.h> |
5 | 5 | ||
6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
7 | 7 | ||
@@ -12,20 +12,36 @@ | |||
12 | 12 | ||
13 | struct trace_seq { | 13 | struct trace_seq { |
14 | unsigned char buffer[PAGE_SIZE]; | 14 | unsigned char buffer[PAGE_SIZE]; |
15 | unsigned int len; | 15 | struct seq_buf seq; |
16 | unsigned int readpos; | ||
17 | int full; | 16 | int full; |
18 | }; | 17 | }; |
19 | 18 | ||
20 | static inline void | 19 | static inline void |
21 | trace_seq_init(struct trace_seq *s) | 20 | trace_seq_init(struct trace_seq *s) |
22 | { | 21 | { |
23 | s->len = 0; | 22 | seq_buf_init(&s->seq, s->buffer, PAGE_SIZE); |
24 | s->readpos = 0; | ||
25 | s->full = 0; | 23 | s->full = 0; |
26 | } | 24 | } |
27 | 25 | ||
28 | /** | 26 | /** |
27 | * trace_seq_used - amount of actual data written to buffer | ||
28 | * @s: trace sequence descriptor | ||
29 | * | ||
30 | * Returns the amount of data written to the buffer. | ||
31 | * | ||
32 | * IMPORTANT! | ||
33 | * | ||
34 | * Use this instead of @s->seq.len if you need to pass the amount | ||
35 | * of data from the buffer to another buffer (userspace, or what not). | ||
36 | * The @s->seq.len on overflow is bigger than the buffer size and | ||
37 | * using it can cause access to undefined memory. | ||
38 | */ | ||
39 | static inline int trace_seq_used(struct trace_seq *s) | ||
40 | { | ||
41 | return seq_buf_used(&s->seq); | ||
42 | } | ||
43 | |||
44 | /** | ||
29 | * trace_seq_buffer_ptr - return pointer to next location in buffer | 45 | * trace_seq_buffer_ptr - return pointer to next location in buffer |
30 | * @s: trace sequence descriptor | 46 | * @s: trace sequence descriptor |
31 | * | 47 | * |
@@ -37,7 +53,19 @@ trace_seq_init(struct trace_seq *s) | |||
37 | static inline unsigned char * | 53 | static inline unsigned char * |
38 | trace_seq_buffer_ptr(struct trace_seq *s) | 54 | trace_seq_buffer_ptr(struct trace_seq *s) |
39 | { | 55 | { |
40 | return s->buffer + s->len; | 56 | return s->buffer + seq_buf_used(&s->seq); |
57 | } | ||
58 | |||
59 | /** | ||
60 | * trace_seq_has_overflowed - return true if the trace_seq took too much | ||
61 | * @s: trace sequence descriptor | ||
62 | * | ||
63 | * Returns true if too much data was added to the trace_seq and it is | ||
64 | * now full and will not take anymore. | ||
65 | */ | ||
66 | static inline bool trace_seq_has_overflowed(struct trace_seq *s) | ||
67 | { | ||
68 | return s->full || seq_buf_has_overflowed(&s->seq); | ||
41 | } | 69 | } |
42 | 70 | ||
43 | /* | 71 | /* |
@@ -45,40 +73,37 @@ trace_seq_buffer_ptr(struct trace_seq *s) | |||
45 | */ | 73 | */ |
46 | #ifdef CONFIG_TRACING | 74 | #ifdef CONFIG_TRACING |
47 | extern __printf(2, 3) | 75 | extern __printf(2, 3) |
48 | int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); | 76 | void trace_seq_printf(struct trace_seq *s, const char *fmt, ...); |
49 | extern __printf(2, 0) | 77 | extern __printf(2, 0) |
50 | int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); | 78 | void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); |
51 | extern int | 79 | extern void |
52 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); | 80 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); |
53 | extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); | 81 | extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); |
54 | extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | 82 | extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
55 | int cnt); | 83 | int cnt); |
56 | extern int trace_seq_puts(struct trace_seq *s, const char *str); | 84 | extern void trace_seq_puts(struct trace_seq *s, const char *str); |
57 | extern int trace_seq_putc(struct trace_seq *s, unsigned char c); | 85 | extern void trace_seq_putc(struct trace_seq *s, unsigned char c); |
58 | extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); | 86 | extern void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); |
59 | extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | 87 | extern void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, |
60 | unsigned int len); | 88 | unsigned int len); |
61 | extern int trace_seq_path(struct trace_seq *s, const struct path *path); | 89 | extern int trace_seq_path(struct trace_seq *s, const struct path *path); |
62 | 90 | ||
63 | extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, | 91 | extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, |
64 | int nmaskbits); | 92 | int nmaskbits); |
65 | 93 | ||
66 | #else /* CONFIG_TRACING */ | 94 | #else /* CONFIG_TRACING */ |
67 | static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | 95 | static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) |
68 | { | 96 | { |
69 | return 0; | ||
70 | } | 97 | } |
71 | static inline int | 98 | static inline void |
72 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) | 99 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) |
73 | { | 100 | { |
74 | return 0; | ||
75 | } | 101 | } |
76 | 102 | ||
77 | static inline int | 103 | static inline void |
78 | trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, | 104 | trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, |
79 | int nmaskbits) | 105 | int nmaskbits) |
80 | { | 106 | { |
81 | return 0; | ||
82 | } | 107 | } |
83 | 108 | ||
84 | static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) | 109 | static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) |
@@ -90,23 +115,19 @@ static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | |||
90 | { | 115 | { |
91 | return 0; | 116 | return 0; |
92 | } | 117 | } |
93 | static inline int trace_seq_puts(struct trace_seq *s, const char *str) | 118 | static inline void trace_seq_puts(struct trace_seq *s, const char *str) |
94 | { | 119 | { |
95 | return 0; | ||
96 | } | 120 | } |
97 | static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) | 121 | static inline void trace_seq_putc(struct trace_seq *s, unsigned char c) |
98 | { | 122 | { |
99 | return 0; | ||
100 | } | 123 | } |
101 | static inline int | 124 | static inline void |
102 | trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) | 125 | trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) |
103 | { | 126 | { |
104 | return 0; | ||
105 | } | 127 | } |
106 | static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | 128 | static inline void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, |
107 | unsigned int len) | 129 | unsigned int len) |
108 | { | 130 | { |
109 | return 0; | ||
110 | } | 131 | } |
111 | static inline int trace_seq_path(struct trace_seq *s, const struct path *path) | 132 | static inline int trace_seq_path(struct trace_seq *s, const struct path *path) |
112 | { | 133 | { |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 5171ef8f7b85..7d66ae508e5c 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -284,7 +284,7 @@ struct tty_struct { | |||
284 | 284 | ||
285 | #define N_TTY_BUF_SIZE 4096 | 285 | #define N_TTY_BUF_SIZE 4096 |
286 | 286 | ||
287 | unsigned char closing:1; | 287 | int closing; |
288 | unsigned char *write_buf; | 288 | unsigned char *write_buf; |
289 | int write_cnt; | 289 | int write_cnt; |
290 | /* If the tty has a pending do_SAK, queue it here - akpm */ | 290 | /* If the tty has a pending do_SAK, queue it here - akpm */ |
@@ -316,12 +316,10 @@ struct tty_file_private { | |||
316 | #define TTY_EXCLUSIVE 3 /* Exclusive open mode */ | 316 | #define TTY_EXCLUSIVE 3 /* Exclusive open mode */ |
317 | #define TTY_DEBUG 4 /* Debugging */ | 317 | #define TTY_DEBUG 4 /* Debugging */ |
318 | #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ | 318 | #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ |
319 | #define TTY_CLOSING 7 /* ->close() in progress */ | ||
320 | #define TTY_LDISC_OPEN 11 /* Line discipline is open */ | 319 | #define TTY_LDISC_OPEN 11 /* Line discipline is open */ |
321 | #define TTY_PTY_LOCK 16 /* pty private */ | 320 | #define TTY_PTY_LOCK 16 /* pty private */ |
322 | #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ | 321 | #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ |
323 | #define TTY_HUPPED 18 /* Post driver->hangup() */ | 322 | #define TTY_HUPPED 18 /* Post driver->hangup() */ |
324 | #define TTY_HUPPING 21 /* ->hangup() in progress */ | ||
325 | #define TTY_LDISC_HALTED 22 /* Line discipline is halted */ | 323 | #define TTY_LDISC_HALTED 22 /* Line discipline is halted */ |
326 | 324 | ||
327 | #define TTY_WRITE_FLUSH(tty) tty_write_flush((tty)) | 325 | #define TTY_WRITE_FLUSH(tty) tty_write_flush((tty)) |
@@ -437,14 +435,13 @@ extern int is_ignored(int sig); | |||
437 | extern int tty_signal(int sig, struct tty_struct *tty); | 435 | extern int tty_signal(int sig, struct tty_struct *tty); |
438 | extern void tty_hangup(struct tty_struct *tty); | 436 | extern void tty_hangup(struct tty_struct *tty); |
439 | extern void tty_vhangup(struct tty_struct *tty); | 437 | extern void tty_vhangup(struct tty_struct *tty); |
440 | extern void tty_unhangup(struct file *filp); | ||
441 | extern int tty_hung_up_p(struct file *filp); | 438 | extern int tty_hung_up_p(struct file *filp); |
442 | extern void do_SAK(struct tty_struct *tty); | 439 | extern void do_SAK(struct tty_struct *tty); |
443 | extern void __do_SAK(struct tty_struct *tty); | 440 | extern void __do_SAK(struct tty_struct *tty); |
444 | extern void no_tty(void); | 441 | extern void no_tty(void); |
445 | extern void tty_flush_to_ldisc(struct tty_struct *tty); | 442 | extern void tty_flush_to_ldisc(struct tty_struct *tty); |
446 | extern void tty_buffer_free_all(struct tty_port *port); | 443 | extern void tty_buffer_free_all(struct tty_port *port); |
447 | extern void tty_buffer_flush(struct tty_struct *tty); | 444 | extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld); |
448 | extern void tty_buffer_init(struct tty_port *port); | 445 | extern void tty_buffer_init(struct tty_port *port); |
449 | extern speed_t tty_termios_baud_rate(struct ktermios *termios); | 446 | extern speed_t tty_termios_baud_rate(struct ktermios *termios); |
450 | extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); | 447 | extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); |
@@ -498,9 +495,6 @@ extern int tty_init_termios(struct tty_struct *tty); | |||
498 | extern int tty_standard_install(struct tty_driver *driver, | 495 | extern int tty_standard_install(struct tty_driver *driver, |
499 | struct tty_struct *tty); | 496 | struct tty_struct *tty); |
500 | 497 | ||
501 | extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty); | ||
502 | extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); | ||
503 | |||
504 | extern struct mutex tty_mutex; | 498 | extern struct mutex tty_mutex; |
505 | extern spinlock_t tty_files_lock; | 499 | extern spinlock_t tty_files_lock; |
506 | 500 | ||
@@ -562,7 +556,7 @@ extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); | |||
562 | extern int tty_unregister_ldisc(int disc); | 556 | extern int tty_unregister_ldisc(int disc); |
563 | extern int tty_set_ldisc(struct tty_struct *tty, int ldisc); | 557 | extern int tty_set_ldisc(struct tty_struct *tty, int ldisc); |
564 | extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); | 558 | extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); |
565 | extern void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty); | 559 | extern void tty_ldisc_release(struct tty_struct *tty); |
566 | extern void tty_ldisc_init(struct tty_struct *tty); | 560 | extern void tty_ldisc_init(struct tty_struct *tty); |
567 | extern void tty_ldisc_deinit(struct tty_struct *tty); | 561 | extern void tty_ldisc_deinit(struct tty_struct *tty); |
568 | extern void tty_ldisc_begin(void); | 562 | extern void tty_ldisc_begin(void); |
@@ -623,14 +617,6 @@ extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, | |||
623 | extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file, | 617 | extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file, |
624 | unsigned int cmd, unsigned long arg); | 618 | unsigned int cmd, unsigned long arg); |
625 | 619 | ||
626 | /* serial.c */ | ||
627 | |||
628 | extern void serial_console_init(void); | ||
629 | |||
630 | /* pcxx.c */ | ||
631 | |||
632 | extern int pcxe_open(struct tty_struct *tty, struct file *filp); | ||
633 | |||
634 | /* vt.c */ | 620 | /* vt.c */ |
635 | 621 | ||
636 | extern int vt_ioctl(struct tty_struct *tty, | 622 | extern int vt_ioctl(struct tty_struct *tty, |
@@ -643,11 +629,9 @@ extern long vt_compat_ioctl(struct tty_struct *tty, | |||
643 | /* functions for preparation of BKL removal */ | 629 | /* functions for preparation of BKL removal */ |
644 | extern void __lockfunc tty_lock(struct tty_struct *tty); | 630 | extern void __lockfunc tty_lock(struct tty_struct *tty); |
645 | extern void __lockfunc tty_unlock(struct tty_struct *tty); | 631 | extern void __lockfunc tty_unlock(struct tty_struct *tty); |
646 | extern void __lockfunc tty_lock_pair(struct tty_struct *tty, | 632 | extern void __lockfunc tty_lock_slave(struct tty_struct *tty); |
647 | struct tty_struct *tty2); | 633 | extern void __lockfunc tty_unlock_slave(struct tty_struct *tty); |
648 | extern void __lockfunc tty_unlock_pair(struct tty_struct *tty, | 634 | extern void tty_set_lock_subclass(struct tty_struct *tty); |
649 | struct tty_struct *tty2); | ||
650 | |||
651 | /* | 635 | /* |
652 | * this shall be called only from where BTM is held (like close) | 636 | * this shall be called only from where BTM is held (like close) |
653 | * | 637 | * |
diff --git a/include/linux/uio.h b/include/linux/uio.h index 9b1581414cd4..a41e252396c0 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
@@ -31,6 +31,7 @@ struct iov_iter { | |||
31 | size_t count; | 31 | size_t count; |
32 | union { | 32 | union { |
33 | const struct iovec *iov; | 33 | const struct iovec *iov; |
34 | const struct kvec *kvec; | ||
34 | const struct bio_vec *bvec; | 35 | const struct bio_vec *bvec; |
35 | }; | 36 | }; |
36 | unsigned long nr_segs; | 37 | unsigned long nr_segs; |
@@ -82,10 +83,13 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | |||
82 | struct iov_iter *i); | 83 | struct iov_iter *i); |
83 | size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i); | 84 | size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i); |
84 | size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); | 85 | size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); |
86 | size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); | ||
85 | size_t iov_iter_zero(size_t bytes, struct iov_iter *); | 87 | size_t iov_iter_zero(size_t bytes, struct iov_iter *); |
86 | unsigned long iov_iter_alignment(const struct iov_iter *i); | 88 | unsigned long iov_iter_alignment(const struct iov_iter *i); |
87 | void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, | 89 | void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, |
88 | unsigned long nr_segs, size_t count); | 90 | unsigned long nr_segs, size_t count); |
91 | void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *iov, | ||
92 | unsigned long nr_segs, size_t count); | ||
89 | ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, | 93 | ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, |
90 | size_t maxsize, unsigned maxpages, size_t *start); | 94 | size_t maxsize, unsigned maxpages, size_t *start); |
91 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, | 95 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, |
@@ -123,9 +127,10 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) | |||
123 | { | 127 | { |
124 | i->count = count; | 128 | i->count = count; |
125 | } | 129 | } |
130 | size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); | ||
131 | size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); | ||
126 | 132 | ||
127 | int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); | 133 | int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); |
128 | int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); | ||
129 | int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, | 134 | int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, |
130 | int offset, int len); | 135 | int offset, int len); |
131 | int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, | 136 | int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, |
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index baa81718d985..32c0e83d6239 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h | |||
@@ -35,7 +35,7 @@ struct uio_map; | |||
35 | struct uio_mem { | 35 | struct uio_mem { |
36 | const char *name; | 36 | const char *name; |
37 | phys_addr_t addr; | 37 | phys_addr_t addr; |
38 | unsigned long size; | 38 | resource_size_t size; |
39 | int memtype; | 39 | int memtype; |
40 | void __iomem *internal_addr; | 40 | void __iomem *internal_addr; |
41 | struct uio_map *map; | 41 | struct uio_map *map; |
diff --git a/include/linux/usb.h b/include/linux/usb.h index 447a7e2fc19b..f89c24a03bd9 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
@@ -637,7 +637,7 @@ static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index) | |||
637 | #endif | 637 | #endif |
638 | 638 | ||
639 | /* USB autosuspend and autoresume */ | 639 | /* USB autosuspend and autoresume */ |
640 | #ifdef CONFIG_PM_RUNTIME | 640 | #ifdef CONFIG_PM |
641 | extern void usb_enable_autosuspend(struct usb_device *udev); | 641 | extern void usb_enable_autosuspend(struct usb_device *udev); |
642 | extern void usb_disable_autosuspend(struct usb_device *udev); | 642 | extern void usb_disable_autosuspend(struct usb_device *udev); |
643 | 643 | ||
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index e14c09a45c5a..535997a6681b 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h | |||
@@ -13,11 +13,12 @@ struct ci_hdrc_platform_data { | |||
13 | /* offset of the capability registers */ | 13 | /* offset of the capability registers */ |
14 | uintptr_t capoffset; | 14 | uintptr_t capoffset; |
15 | unsigned power_budget; | 15 | unsigned power_budget; |
16 | struct usb_phy *phy; | 16 | struct phy *phy; |
17 | /* old usb_phy interface */ | ||
18 | struct usb_phy *usb_phy; | ||
17 | enum usb_phy_interface phy_mode; | 19 | enum usb_phy_interface phy_mode; |
18 | unsigned long flags; | 20 | unsigned long flags; |
19 | #define CI_HDRC_REGS_SHARED BIT(0) | 21 | #define CI_HDRC_REGS_SHARED BIT(0) |
20 | #define CI_HDRC_REQUIRE_TRANSCEIVER BIT(1) | ||
21 | #define CI_HDRC_DISABLE_STREAMING BIT(3) | 22 | #define CI_HDRC_DISABLE_STREAMING BIT(3) |
22 | /* | 23 | /* |
23 | * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1, | 24 | * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1, |
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index c330f5ef42cf..3d87defcc527 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h | |||
@@ -427,6 +427,8 @@ static inline struct usb_composite_driver *to_cdriver( | |||
427 | * @b_vendor_code: bMS_VendorCode part of the OS string | 427 | * @b_vendor_code: bMS_VendorCode part of the OS string |
428 | * @use_os_string: false by default, interested gadgets set it | 428 | * @use_os_string: false by default, interested gadgets set it |
429 | * @os_desc_config: the configuration to be used with OS descriptors | 429 | * @os_desc_config: the configuration to be used with OS descriptors |
430 | * @setup_pending: true when setup request is queued but not completed | ||
431 | * @os_desc_pending: true when os_desc request is queued but not completed | ||
430 | * | 432 | * |
431 | * One of these devices is allocated and initialized before the | 433 | * One of these devices is allocated and initialized before the |
432 | * associated device driver's bind() is called. | 434 | * associated device driver's bind() is called. |
@@ -488,6 +490,9 @@ struct usb_composite_dev { | |||
488 | 490 | ||
489 | /* protects deactivations and delayed_status counts*/ | 491 | /* protects deactivations and delayed_status counts*/ |
490 | spinlock_t lock; | 492 | spinlock_t lock; |
493 | |||
494 | unsigned setup_pending:1; | ||
495 | unsigned os_desc_pending:1; | ||
491 | }; | 496 | }; |
492 | 497 | ||
493 | extern int usb_string_id(struct usb_composite_dev *c); | 498 | extern int usb_string_id(struct usb_composite_dev *c); |
@@ -501,6 +506,8 @@ extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n); | |||
501 | extern void composite_disconnect(struct usb_gadget *gadget); | 506 | extern void composite_disconnect(struct usb_gadget *gadget); |
502 | extern int composite_setup(struct usb_gadget *gadget, | 507 | extern int composite_setup(struct usb_gadget *gadget, |
503 | const struct usb_ctrlrequest *ctrl); | 508 | const struct usb_ctrlrequest *ctrl); |
509 | extern void composite_suspend(struct usb_gadget *gadget); | ||
510 | extern void composite_resume(struct usb_gadget *gadget); | ||
504 | 511 | ||
505 | /* | 512 | /* |
506 | * Some systems will need runtime overrides for the product identifiers | 513 | * Some systems will need runtime overrides for the product identifiers |
diff --git a/include/linux/usb/ehci-dbgp.h b/include/linux/usb/ehci-dbgp.h new file mode 100644 index 000000000000..7344d9e591cc --- /dev/null +++ b/include/linux/usb/ehci-dbgp.h | |||
@@ -0,0 +1,83 @@ | |||
1 | /* | ||
2 | * Standalone EHCI usb debug driver | ||
3 | * | ||
4 | * Originally written by: | ||
5 | * Eric W. Biederman" <ebiederm@xmission.com> and | ||
6 | * Yinghai Lu <yhlu.kernel@gmail.com> | ||
7 | * | ||
8 | * Changes for early/late printk and HW errata: | ||
9 | * Jason Wessel <jason.wessel@windriver.com> | ||
10 | * Copyright (C) 2009 Wind River Systems, Inc. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #ifndef __LINUX_USB_EHCI_DBGP_H | ||
15 | #define __LINUX_USB_EHCI_DBGP_H | ||
16 | |||
17 | #include <linux/console.h> | ||
18 | #include <linux/types.h> | ||
19 | |||
20 | /* Appendix C, Debug port ... intended for use with special "debug devices" | ||
21 | * that can help if there's no serial console. (nonstandard enumeration.) | ||
22 | */ | ||
23 | struct ehci_dbg_port { | ||
24 | u32 control; | ||
25 | #define DBGP_OWNER (1<<30) | ||
26 | #define DBGP_ENABLED (1<<28) | ||
27 | #define DBGP_DONE (1<<16) | ||
28 | #define DBGP_INUSE (1<<10) | ||
29 | #define DBGP_ERRCODE(x) (((x)>>7)&0x07) | ||
30 | # define DBGP_ERR_BAD 1 | ||
31 | # define DBGP_ERR_SIGNAL 2 | ||
32 | #define DBGP_ERROR (1<<6) | ||
33 | #define DBGP_GO (1<<5) | ||
34 | #define DBGP_OUT (1<<4) | ||
35 | #define DBGP_LEN(x) (((x)>>0)&0x0f) | ||
36 | u32 pids; | ||
37 | #define DBGP_PID_GET(x) (((x)>>16)&0xff) | ||
38 | #define DBGP_PID_SET(data, tok) (((data)<<8)|(tok)) | ||
39 | u32 data03; | ||
40 | u32 data47; | ||
41 | u32 address; | ||
42 | #define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep)) | ||
43 | }; | ||
44 | |||
45 | #ifdef CONFIG_EARLY_PRINTK_DBGP | ||
46 | extern int early_dbgp_init(char *s); | ||
47 | extern struct console early_dbgp_console; | ||
48 | #endif /* CONFIG_EARLY_PRINTK_DBGP */ | ||
49 | |||
50 | struct usb_hcd; | ||
51 | |||
52 | #ifdef CONFIG_XEN_DOM0 | ||
53 | extern int xen_dbgp_reset_prep(struct usb_hcd *); | ||
54 | extern int xen_dbgp_external_startup(struct usb_hcd *); | ||
55 | #else | ||
56 | static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd) | ||
57 | { | ||
58 | return 1; /* Shouldn't this be 0? */ | ||
59 | } | ||
60 | |||
61 | static inline int xen_dbgp_external_startup(struct usb_hcd *hcd) | ||
62 | { | ||
63 | return -1; | ||
64 | } | ||
65 | #endif | ||
66 | |||
67 | #ifdef CONFIG_EARLY_PRINTK_DBGP | ||
68 | /* Call backs from ehci host driver to ehci debug driver */ | ||
69 | extern int dbgp_external_startup(struct usb_hcd *); | ||
70 | extern int dbgp_reset_prep(struct usb_hcd *); | ||
71 | #else | ||
72 | static inline int dbgp_reset_prep(struct usb_hcd *hcd) | ||
73 | { | ||
74 | return xen_dbgp_reset_prep(hcd); | ||
75 | } | ||
76 | |||
77 | static inline int dbgp_external_startup(struct usb_hcd *hcd) | ||
78 | { | ||
79 | return xen_dbgp_external_startup(hcd); | ||
80 | } | ||
81 | #endif | ||
82 | |||
83 | #endif /* __LINUX_USB_EHCI_DBGP_H */ | ||
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index daec99af5d54..966889a20ea3 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h | |||
@@ -19,6 +19,8 @@ | |||
19 | #ifndef __LINUX_USB_EHCI_DEF_H | 19 | #ifndef __LINUX_USB_EHCI_DEF_H |
20 | #define __LINUX_USB_EHCI_DEF_H | 20 | #define __LINUX_USB_EHCI_DEF_H |
21 | 21 | ||
22 | #include <linux/usb/ehci-dbgp.h> | ||
23 | |||
22 | /* EHCI register interface, corresponds to EHCI Revision 0.95 specification */ | 24 | /* EHCI register interface, corresponds to EHCI Revision 0.95 specification */ |
23 | 25 | ||
24 | /* Section 2.2 Host Controller Capability Registers */ | 26 | /* Section 2.2 Host Controller Capability Registers */ |
@@ -190,67 +192,4 @@ struct ehci_regs { | |||
190 | #define USBMODE_EX_HC (3<<0) /* host controller mode */ | 192 | #define USBMODE_EX_HC (3<<0) /* host controller mode */ |
191 | }; | 193 | }; |
192 | 194 | ||
193 | /* Appendix C, Debug port ... intended for use with special "debug devices" | ||
194 | * that can help if there's no serial console. (nonstandard enumeration.) | ||
195 | */ | ||
196 | struct ehci_dbg_port { | ||
197 | u32 control; | ||
198 | #define DBGP_OWNER (1<<30) | ||
199 | #define DBGP_ENABLED (1<<28) | ||
200 | #define DBGP_DONE (1<<16) | ||
201 | #define DBGP_INUSE (1<<10) | ||
202 | #define DBGP_ERRCODE(x) (((x)>>7)&0x07) | ||
203 | # define DBGP_ERR_BAD 1 | ||
204 | # define DBGP_ERR_SIGNAL 2 | ||
205 | #define DBGP_ERROR (1<<6) | ||
206 | #define DBGP_GO (1<<5) | ||
207 | #define DBGP_OUT (1<<4) | ||
208 | #define DBGP_LEN(x) (((x)>>0)&0x0f) | ||
209 | u32 pids; | ||
210 | #define DBGP_PID_GET(x) (((x)>>16)&0xff) | ||
211 | #define DBGP_PID_SET(data, tok) (((data)<<8)|(tok)) | ||
212 | u32 data03; | ||
213 | u32 data47; | ||
214 | u32 address; | ||
215 | #define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep)) | ||
216 | }; | ||
217 | |||
218 | #ifdef CONFIG_EARLY_PRINTK_DBGP | ||
219 | #include <linux/init.h> | ||
220 | extern int __init early_dbgp_init(char *s); | ||
221 | extern struct console early_dbgp_console; | ||
222 | #endif /* CONFIG_EARLY_PRINTK_DBGP */ | ||
223 | |||
224 | struct usb_hcd; | ||
225 | |||
226 | #ifdef CONFIG_XEN_DOM0 | ||
227 | extern int xen_dbgp_reset_prep(struct usb_hcd *); | ||
228 | extern int xen_dbgp_external_startup(struct usb_hcd *); | ||
229 | #else | ||
230 | static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd) | ||
231 | { | ||
232 | return 1; /* Shouldn't this be 0? */ | ||
233 | } | ||
234 | |||
235 | static inline int xen_dbgp_external_startup(struct usb_hcd *hcd) | ||
236 | { | ||
237 | return -1; | ||
238 | } | ||
239 | #endif | ||
240 | |||
241 | #ifdef CONFIG_EARLY_PRINTK_DBGP | ||
242 | /* Call backs from ehci host driver to ehci debug driver */ | ||
243 | extern int dbgp_external_startup(struct usb_hcd *); | ||
244 | extern int dbgp_reset_prep(struct usb_hcd *hcd); | ||
245 | #else | ||
246 | static inline int dbgp_reset_prep(struct usb_hcd *hcd) | ||
247 | { | ||
248 | return xen_dbgp_reset_prep(hcd); | ||
249 | } | ||
250 | static inline int dbgp_external_startup(struct usb_hcd *hcd) | ||
251 | { | ||
252 | return xen_dbgp_external_startup(hcd); | ||
253 | } | ||
254 | #endif | ||
255 | |||
256 | #endif /* __LINUX_USB_EHCI_DEF_H */ | 195 | #endif /* __LINUX_USB_EHCI_DEF_H */ |
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 522cafe26790..70ddb3943b62 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h | |||
@@ -490,8 +490,7 @@ struct usb_gadget_ops { | |||
490 | void (*get_config_params)(struct usb_dcd_config_params *); | 490 | void (*get_config_params)(struct usb_dcd_config_params *); |
491 | int (*udc_start)(struct usb_gadget *, | 491 | int (*udc_start)(struct usb_gadget *, |
492 | struct usb_gadget_driver *); | 492 | struct usb_gadget_driver *); |
493 | int (*udc_stop)(struct usb_gadget *, | 493 | int (*udc_stop)(struct usb_gadget *); |
494 | struct usb_gadget_driver *); | ||
495 | }; | 494 | }; |
496 | 495 | ||
497 | /** | 496 | /** |
@@ -925,7 +924,7 @@ extern int usb_add_gadget_udc_release(struct device *parent, | |||
925 | struct usb_gadget *gadget, void (*release)(struct device *dev)); | 924 | struct usb_gadget *gadget, void (*release)(struct device *dev)); |
926 | extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); | 925 | extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); |
927 | extern void usb_del_gadget_udc(struct usb_gadget *gadget); | 926 | extern void usb_del_gadget_udc(struct usb_gadget *gadget); |
928 | extern int udc_attach_driver(const char *name, | 927 | extern int usb_udc_attach_driver(const char *name, |
929 | struct usb_gadget_driver *driver); | 928 | struct usb_gadget_driver *driver); |
930 | 929 | ||
931 | /*-------------------------------------------------------------------------*/ | 930 | /*-------------------------------------------------------------------------*/ |
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index cd96a2bc3388..086bf13307e6 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h | |||
@@ -93,7 +93,7 @@ struct usb_hcd { | |||
93 | 93 | ||
94 | struct timer_list rh_timer; /* drives root-hub polling */ | 94 | struct timer_list rh_timer; /* drives root-hub polling */ |
95 | struct urb *status_urb; /* the current status urb */ | 95 | struct urb *status_urb; /* the current status urb */ |
96 | #ifdef CONFIG_PM_RUNTIME | 96 | #ifdef CONFIG_PM |
97 | struct work_struct wakeup_work; /* for remote wakeup */ | 97 | struct work_struct wakeup_work; /* for remote wakeup */ |
98 | #endif | 98 | #endif |
99 | 99 | ||
@@ -379,6 +379,9 @@ struct hc_driver { | |||
379 | int (*disable_usb3_lpm_timeout)(struct usb_hcd *, | 379 | int (*disable_usb3_lpm_timeout)(struct usb_hcd *, |
380 | struct usb_device *, enum usb3_link_state state); | 380 | struct usb_device *, enum usb3_link_state state); |
381 | int (*find_raw_port_number)(struct usb_hcd *, int); | 381 | int (*find_raw_port_number)(struct usb_hcd *, int); |
382 | /* Call for power on/off the port if necessary */ | ||
383 | int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable); | ||
384 | |||
382 | }; | 385 | }; |
383 | 386 | ||
384 | static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd) | 387 | static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd) |
@@ -625,16 +628,13 @@ extern int usb_find_interface_driver(struct usb_device *dev, | |||
625 | extern void usb_root_hub_lost_power(struct usb_device *rhdev); | 628 | extern void usb_root_hub_lost_power(struct usb_device *rhdev); |
626 | extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); | 629 | extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); |
627 | extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); | 630 | extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); |
628 | #endif /* CONFIG_PM */ | ||
629 | |||
630 | #ifdef CONFIG_PM_RUNTIME | ||
631 | extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); | 631 | extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); |
632 | #else | 632 | #else |
633 | static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) | 633 | static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) |
634 | { | 634 | { |
635 | return; | 635 | return; |
636 | } | 636 | } |
637 | #endif /* CONFIG_PM_RUNTIME */ | 637 | #endif /* CONFIG_PM */ |
638 | 638 | ||
639 | /*-------------------------------------------------------------------------*/ | 639 | /*-------------------------------------------------------------------------*/ |
640 | 640 | ||
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index 154332b7c8c0..52661c5da690 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h | |||
@@ -9,15 +9,20 @@ | |||
9 | #ifndef __LINUX_USB_OTG_H | 9 | #ifndef __LINUX_USB_OTG_H |
10 | #define __LINUX_USB_OTG_H | 10 | #define __LINUX_USB_OTG_H |
11 | 11 | ||
12 | #include <linux/phy/phy.h> | ||
12 | #include <linux/usb/phy.h> | 13 | #include <linux/usb/phy.h> |
13 | 14 | ||
14 | struct usb_otg { | 15 | struct usb_otg { |
15 | u8 default_a; | 16 | u8 default_a; |
16 | 17 | ||
17 | struct usb_phy *phy; | 18 | struct phy *phy; |
19 | /* old usb_phy interface */ | ||
20 | struct usb_phy *usb_phy; | ||
18 | struct usb_bus *host; | 21 | struct usb_bus *host; |
19 | struct usb_gadget *gadget; | 22 | struct usb_gadget *gadget; |
20 | 23 | ||
24 | enum usb_otg_state state; | ||
25 | |||
21 | /* bind/unbind the host controller */ | 26 | /* bind/unbind the host controller */ |
22 | int (*set_host)(struct usb_otg *otg, struct usb_bus *host); | 27 | int (*set_host)(struct usb_otg *otg, struct usb_bus *host); |
23 | 28 | ||
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index 353053a33f21..f499c23e6342 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h | |||
@@ -77,7 +77,6 @@ struct usb_phy { | |||
77 | unsigned int flags; | 77 | unsigned int flags; |
78 | 78 | ||
79 | enum usb_phy_type type; | 79 | enum usb_phy_type type; |
80 | enum usb_otg_state state; | ||
81 | enum usb_phy_events last_event; | 80 | enum usb_phy_events last_event; |
82 | 81 | ||
83 | struct usb_otg *otg; | 82 | struct usb_otg *otg; |
@@ -210,6 +209,7 @@ extern void usb_put_phy(struct usb_phy *); | |||
210 | extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x); | 209 | extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x); |
211 | extern int usb_bind_phy(const char *dev_name, u8 index, | 210 | extern int usb_bind_phy(const char *dev_name, u8 index, |
212 | const char *phy_dev_name); | 211 | const char *phy_dev_name); |
212 | extern void usb_phy_set_event(struct usb_phy *x, unsigned long event); | ||
213 | #else | 213 | #else |
214 | static inline struct usb_phy *usb_get_phy(enum usb_phy_type type) | 214 | static inline struct usb_phy *usb_get_phy(enum usb_phy_type type) |
215 | { | 215 | { |
@@ -251,6 +251,10 @@ static inline int usb_bind_phy(const char *dev_name, u8 index, | |||
251 | { | 251 | { |
252 | return -EOPNOTSUPP; | 252 | return -EOPNOTSUPP; |
253 | } | 253 | } |
254 | |||
255 | static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event) | ||
256 | { | ||
257 | } | ||
254 | #endif | 258 | #endif |
255 | 259 | ||
256 | static inline int | 260 | static inline int |
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index d5952bb66752..9fd9e481ea98 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h | |||
@@ -145,6 +145,10 @@ struct renesas_usbhs_driver_param { | |||
145 | int d0_rx_id; | 145 | int d0_rx_id; |
146 | int d1_tx_id; | 146 | int d1_tx_id; |
147 | int d1_rx_id; | 147 | int d1_rx_id; |
148 | int d2_tx_id; | ||
149 | int d2_rx_id; | ||
150 | int d3_tx_id; | ||
151 | int d3_rx_id; | ||
148 | 152 | ||
149 | /* | 153 | /* |
150 | * option: | 154 | * option: |
diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h index a4c9547aae64..f8e76e08ebe4 100644 --- a/include/linux/vexpress.h +++ b/include/linux/vexpress.h | |||
@@ -15,8 +15,6 @@ | |||
15 | #define _LINUX_VEXPRESS_H | 15 | #define _LINUX_VEXPRESS_H |
16 | 16 | ||
17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/reboot.h> | ||
20 | #include <linux/regmap.h> | 18 | #include <linux/regmap.h> |
21 | 19 | ||
22 | #define VEXPRESS_SITE_MB 0 | 20 | #define VEXPRESS_SITE_MB 0 |
@@ -24,13 +22,6 @@ | |||
24 | #define VEXPRESS_SITE_DB2 2 | 22 | #define VEXPRESS_SITE_DB2 2 |
25 | #define VEXPRESS_SITE_MASTER 0xf | 23 | #define VEXPRESS_SITE_MASTER 0xf |
26 | 24 | ||
27 | #define VEXPRESS_RES_FUNC(_site, _func) \ | ||
28 | { \ | ||
29 | .start = (_site), \ | ||
30 | .end = (_func), \ | ||
31 | .flags = IORESOURCE_BUS, \ | ||
32 | } | ||
33 | |||
34 | /* Config infrastructure */ | 25 | /* Config infrastructure */ |
35 | 26 | ||
36 | void vexpress_config_set_master(u32 site); | 27 | void vexpress_config_set_master(u32 site); |
@@ -58,16 +49,6 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev); | |||
58 | 49 | ||
59 | /* Platform control */ | 50 | /* Platform control */ |
60 | 51 | ||
61 | unsigned int vexpress_get_mci_cardin(struct device *dev); | ||
62 | u32 vexpress_get_procid(int site); | ||
63 | void *vexpress_get_24mhz_clock_base(void); | ||
64 | void vexpress_flags_set(u32 data); | 52 | void vexpress_flags_set(u32 data); |
65 | 53 | ||
66 | void vexpress_sysreg_early_init(void __iomem *base); | ||
67 | int vexpress_syscfg_device_register(struct platform_device *pdev); | ||
68 | |||
69 | /* Clocks */ | ||
70 | |||
71 | void vexpress_clk_init(void __iomem *sp810_base); | ||
72 | |||
73 | #endif | 54 | #endif |
diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 65261a7244fc..d09e0938fd60 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h | |||
@@ -75,6 +75,9 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq); | |||
75 | 75 | ||
76 | bool virtqueue_is_broken(struct virtqueue *vq); | 76 | bool virtqueue_is_broken(struct virtqueue *vq); |
77 | 77 | ||
78 | void *virtqueue_get_avail(struct virtqueue *vq); | ||
79 | void *virtqueue_get_used(struct virtqueue *vq); | ||
80 | |||
78 | /** | 81 | /** |
79 | * virtio_device - representation of a device using virtio | 82 | * virtio_device - representation of a device using virtio |
80 | * @index: unique position on the virtio bus | 83 | * @index: unique position on the virtio bus |
@@ -101,11 +104,12 @@ struct virtio_device { | |||
101 | const struct virtio_config_ops *config; | 104 | const struct virtio_config_ops *config; |
102 | const struct vringh_config_ops *vringh_config; | 105 | const struct vringh_config_ops *vringh_config; |
103 | struct list_head vqs; | 106 | struct list_head vqs; |
104 | /* Note that this is a Linux set_bit-style bitmap. */ | 107 | u64 features; |
105 | unsigned long features[1]; | ||
106 | void *priv; | 108 | void *priv; |
107 | }; | 109 | }; |
108 | 110 | ||
111 | bool virtio_device_is_legacy_only(struct virtio_device_id id); | ||
112 | |||
109 | static inline struct virtio_device *dev_to_virtio(struct device *_dev) | 113 | static inline struct virtio_device *dev_to_virtio(struct device *_dev) |
110 | { | 114 | { |
111 | return container_of(_dev, struct virtio_device, dev); | 115 | return container_of(_dev, struct virtio_device, dev); |
@@ -128,6 +132,8 @@ int virtio_device_restore(struct virtio_device *dev); | |||
128 | * @id_table: the ids serviced by this driver. | 132 | * @id_table: the ids serviced by this driver. |
129 | * @feature_table: an array of feature numbers supported by this driver. | 133 | * @feature_table: an array of feature numbers supported by this driver. |
130 | * @feature_table_size: number of entries in the feature table array. | 134 | * @feature_table_size: number of entries in the feature table array. |
135 | * @feature_table_legacy: same as feature_table but when working in legacy mode. | ||
136 | * @feature_table_size_legacy: number of entries in feature table legacy array. | ||
131 | * @probe: the function to call when a device is found. Returns 0 or -errno. | 137 | * @probe: the function to call when a device is found. Returns 0 or -errno. |
132 | * @remove: the function to call when a device is removed. | 138 | * @remove: the function to call when a device is removed. |
133 | * @config_changed: optional function to call when the device configuration | 139 | * @config_changed: optional function to call when the device configuration |
@@ -138,6 +144,8 @@ struct virtio_driver { | |||
138 | const struct virtio_device_id *id_table; | 144 | const struct virtio_device_id *id_table; |
139 | const unsigned int *feature_table; | 145 | const unsigned int *feature_table; |
140 | unsigned int feature_table_size; | 146 | unsigned int feature_table_size; |
147 | const unsigned int *feature_table_legacy; | ||
148 | unsigned int feature_table_size_legacy; | ||
141 | int (*probe)(struct virtio_device *dev); | 149 | int (*probe)(struct virtio_device *dev); |
142 | void (*scan)(struct virtio_device *dev); | 150 | void (*scan)(struct virtio_device *dev); |
143 | void (*remove)(struct virtio_device *dev); | 151 | void (*remove)(struct virtio_device *dev); |
diff --git a/include/linux/virtio_byteorder.h b/include/linux/virtio_byteorder.h new file mode 100644 index 000000000000..51865d05b267 --- /dev/null +++ b/include/linux/virtio_byteorder.h | |||
@@ -0,0 +1,59 @@ | |||
1 | #ifndef _LINUX_VIRTIO_BYTEORDER_H | ||
2 | #define _LINUX_VIRTIO_BYTEORDER_H | ||
3 | #include <linux/types.h> | ||
4 | #include <uapi/linux/virtio_types.h> | ||
5 | |||
6 | /* | ||
7 | * Low-level memory accessors for handling virtio in modern little endian and in | ||
8 | * compatibility native endian format. | ||
9 | */ | ||
10 | |||
11 | static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val) | ||
12 | { | ||
13 | if (little_endian) | ||
14 | return le16_to_cpu((__force __le16)val); | ||
15 | else | ||
16 | return (__force u16)val; | ||
17 | } | ||
18 | |||
19 | static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val) | ||
20 | { | ||
21 | if (little_endian) | ||
22 | return (__force __virtio16)cpu_to_le16(val); | ||
23 | else | ||
24 | return (__force __virtio16)val; | ||
25 | } | ||
26 | |||
27 | static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val) | ||
28 | { | ||
29 | if (little_endian) | ||
30 | return le32_to_cpu((__force __le32)val); | ||
31 | else | ||
32 | return (__force u32)val; | ||
33 | } | ||
34 | |||
35 | static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val) | ||
36 | { | ||
37 | if (little_endian) | ||
38 | return (__force __virtio32)cpu_to_le32(val); | ||
39 | else | ||
40 | return (__force __virtio32)val; | ||
41 | } | ||
42 | |||
43 | static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val) | ||
44 | { | ||
45 | if (little_endian) | ||
46 | return le64_to_cpu((__force __le64)val); | ||
47 | else | ||
48 | return (__force u64)val; | ||
49 | } | ||
50 | |||
51 | static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val) | ||
52 | { | ||
53 | if (little_endian) | ||
54 | return (__force __virtio64)cpu_to_le64(val); | ||
55 | else | ||
56 | return (__force __virtio64)val; | ||
57 | } | ||
58 | |||
59 | #endif /* _LINUX_VIRTIO_BYTEORDER */ | ||
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 7f4ef66873ef..7979f850e7ac 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/err.h> | 4 | #include <linux/err.h> |
5 | #include <linux/bug.h> | 5 | #include <linux/bug.h> |
6 | #include <linux/virtio.h> | 6 | #include <linux/virtio.h> |
7 | #include <linux/virtio_byteorder.h> | ||
7 | #include <uapi/linux/virtio_config.h> | 8 | #include <uapi/linux/virtio_config.h> |
8 | 9 | ||
9 | /** | 10 | /** |
@@ -46,6 +47,7 @@ | |||
46 | * vdev: the virtio_device | 47 | * vdev: the virtio_device |
47 | * This gives the final feature bits for the device: it can change | 48 | * This gives the final feature bits for the device: it can change |
48 | * the dev->feature bits if it wants. | 49 | * the dev->feature bits if it wants. |
50 | * Returns 0 on success or error status | ||
49 | * @bus_name: return the bus name associated with the device | 51 | * @bus_name: return the bus name associated with the device |
50 | * vdev: the virtio_device | 52 | * vdev: the virtio_device |
51 | * This returns a pointer to the bus name a la pci_name from which | 53 | * This returns a pointer to the bus name a la pci_name from which |
@@ -66,8 +68,8 @@ struct virtio_config_ops { | |||
66 | vq_callback_t *callbacks[], | 68 | vq_callback_t *callbacks[], |
67 | const char *names[]); | 69 | const char *names[]); |
68 | void (*del_vqs)(struct virtio_device *); | 70 | void (*del_vqs)(struct virtio_device *); |
69 | u32 (*get_features)(struct virtio_device *vdev); | 71 | u64 (*get_features)(struct virtio_device *vdev); |
70 | void (*finalize_features)(struct virtio_device *vdev); | 72 | int (*finalize_features)(struct virtio_device *vdev); |
71 | const char *(*bus_name)(struct virtio_device *vdev); | 73 | const char *(*bus_name)(struct virtio_device *vdev); |
72 | int (*set_vq_affinity)(struct virtqueue *vq, int cpu); | 74 | int (*set_vq_affinity)(struct virtqueue *vq, int cpu); |
73 | }; | 75 | }; |
@@ -77,23 +79,70 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, | |||
77 | unsigned int fbit); | 79 | unsigned int fbit); |
78 | 80 | ||
79 | /** | 81 | /** |
80 | * virtio_has_feature - helper to determine if this device has this feature. | 82 | * __virtio_test_bit - helper to test feature bits. For use by transports. |
83 | * Devices should normally use virtio_has_feature, | ||
84 | * which includes more checks. | ||
81 | * @vdev: the device | 85 | * @vdev: the device |
82 | * @fbit: the feature bit | 86 | * @fbit: the feature bit |
83 | */ | 87 | */ |
84 | static inline bool virtio_has_feature(const struct virtio_device *vdev, | 88 | static inline bool __virtio_test_bit(const struct virtio_device *vdev, |
89 | unsigned int fbit) | ||
90 | { | ||
91 | /* Did you forget to fix assumptions on max features? */ | ||
92 | if (__builtin_constant_p(fbit)) | ||
93 | BUILD_BUG_ON(fbit >= 64); | ||
94 | else | ||
95 | BUG_ON(fbit >= 64); | ||
96 | |||
97 | return vdev->features & BIT_ULL(fbit); | ||
98 | } | ||
99 | |||
100 | /** | ||
101 | * __virtio_set_bit - helper to set feature bits. For use by transports. | ||
102 | * @vdev: the device | ||
103 | * @fbit: the feature bit | ||
104 | */ | ||
105 | static inline void __virtio_set_bit(struct virtio_device *vdev, | ||
106 | unsigned int fbit) | ||
107 | { | ||
108 | /* Did you forget to fix assumptions on max features? */ | ||
109 | if (__builtin_constant_p(fbit)) | ||
110 | BUILD_BUG_ON(fbit >= 64); | ||
111 | else | ||
112 | BUG_ON(fbit >= 64); | ||
113 | |||
114 | vdev->features |= BIT_ULL(fbit); | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * __virtio_clear_bit - helper to clear feature bits. For use by transports. | ||
119 | * @vdev: the device | ||
120 | * @fbit: the feature bit | ||
121 | */ | ||
122 | static inline void __virtio_clear_bit(struct virtio_device *vdev, | ||
85 | unsigned int fbit) | 123 | unsigned int fbit) |
86 | { | 124 | { |
87 | /* Did you forget to fix assumptions on max features? */ | 125 | /* Did you forget to fix assumptions on max features? */ |
88 | if (__builtin_constant_p(fbit)) | 126 | if (__builtin_constant_p(fbit)) |
89 | BUILD_BUG_ON(fbit >= 32); | 127 | BUILD_BUG_ON(fbit >= 64); |
90 | else | 128 | else |
91 | BUG_ON(fbit >= 32); | 129 | BUG_ON(fbit >= 64); |
92 | 130 | ||
131 | vdev->features &= ~BIT_ULL(fbit); | ||
132 | } | ||
133 | |||
134 | /** | ||
135 | * virtio_has_feature - helper to determine if this device has this feature. | ||
136 | * @vdev: the device | ||
137 | * @fbit: the feature bit | ||
138 | */ | ||
139 | static inline bool virtio_has_feature(const struct virtio_device *vdev, | ||
140 | unsigned int fbit) | ||
141 | { | ||
93 | if (fbit < VIRTIO_TRANSPORT_F_START) | 142 | if (fbit < VIRTIO_TRANSPORT_F_START) |
94 | virtio_check_driver_offered_feature(vdev, fbit); | 143 | virtio_check_driver_offered_feature(vdev, fbit); |
95 | 144 | ||
96 | return test_bit(fbit, vdev->features); | 145 | return __virtio_test_bit(vdev, fbit); |
97 | } | 146 | } |
98 | 147 | ||
99 | static inline | 148 | static inline |
@@ -152,6 +201,37 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu) | |||
152 | return 0; | 201 | return 0; |
153 | } | 202 | } |
154 | 203 | ||
204 | /* Memory accessors */ | ||
205 | static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val) | ||
206 | { | ||
207 | return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | ||
208 | } | ||
209 | |||
210 | static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val) | ||
211 | { | ||
212 | return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | ||
213 | } | ||
214 | |||
215 | static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val) | ||
216 | { | ||
217 | return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | ||
218 | } | ||
219 | |||
220 | static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val) | ||
221 | { | ||
222 | return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | ||
223 | } | ||
224 | |||
225 | static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val) | ||
226 | { | ||
227 | return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | ||
228 | } | ||
229 | |||
230 | static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) | ||
231 | { | ||
232 | return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | ||
233 | } | ||
234 | |||
155 | /* Config space accessors. */ | 235 | /* Config space accessors. */ |
156 | #define virtio_cread(vdev, structname, member, ptr) \ | 236 | #define virtio_cread(vdev, structname, member, ptr) \ |
157 | do { \ | 237 | do { \ |
@@ -239,12 +319,13 @@ static inline u16 virtio_cread16(struct virtio_device *vdev, | |||
239 | { | 319 | { |
240 | u16 ret; | 320 | u16 ret; |
241 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); | 321 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); |
242 | return ret; | 322 | return virtio16_to_cpu(vdev, (__force __virtio16)ret); |
243 | } | 323 | } |
244 | 324 | ||
245 | static inline void virtio_cwrite16(struct virtio_device *vdev, | 325 | static inline void virtio_cwrite16(struct virtio_device *vdev, |
246 | unsigned int offset, u16 val) | 326 | unsigned int offset, u16 val) |
247 | { | 327 | { |
328 | val = (__force u16)cpu_to_virtio16(vdev, val); | ||
248 | vdev->config->set(vdev, offset, &val, sizeof(val)); | 329 | vdev->config->set(vdev, offset, &val, sizeof(val)); |
249 | } | 330 | } |
250 | 331 | ||
@@ -253,12 +334,13 @@ static inline u32 virtio_cread32(struct virtio_device *vdev, | |||
253 | { | 334 | { |
254 | u32 ret; | 335 | u32 ret; |
255 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); | 336 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); |
256 | return ret; | 337 | return virtio32_to_cpu(vdev, (__force __virtio32)ret); |
257 | } | 338 | } |
258 | 339 | ||
259 | static inline void virtio_cwrite32(struct virtio_device *vdev, | 340 | static inline void virtio_cwrite32(struct virtio_device *vdev, |
260 | unsigned int offset, u32 val) | 341 | unsigned int offset, u32 val) |
261 | { | 342 | { |
343 | val = (__force u32)cpu_to_virtio32(vdev, val); | ||
262 | vdev->config->set(vdev, offset, &val, sizeof(val)); | 344 | vdev->config->set(vdev, offset, &val, sizeof(val)); |
263 | } | 345 | } |
264 | 346 | ||
@@ -267,12 +349,13 @@ static inline u64 virtio_cread64(struct virtio_device *vdev, | |||
267 | { | 349 | { |
268 | u64 ret; | 350 | u64 ret; |
269 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); | 351 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); |
270 | return ret; | 352 | return virtio64_to_cpu(vdev, (__force __virtio64)ret); |
271 | } | 353 | } |
272 | 354 | ||
273 | static inline void virtio_cwrite64(struct virtio_device *vdev, | 355 | static inline void virtio_cwrite64(struct virtio_device *vdev, |
274 | unsigned int offset, u64 val) | 356 | unsigned int offset, u64 val) |
275 | { | 357 | { |
358 | val = (__force u64)cpu_to_virtio64(vdev, val); | ||
276 | vdev->config->set(vdev, offset, &val, sizeof(val)); | 359 | vdev->config->set(vdev, offset, &val, sizeof(val)); |
277 | } | 360 | } |
278 | 361 | ||
diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h deleted file mode 100644 index de429d1f4357..000000000000 --- a/include/linux/virtio_scsi.h +++ /dev/null | |||
@@ -1,162 +0,0 @@ | |||
1 | /* | ||
2 | * This header is BSD licensed so anyone can use the definitions to implement | ||
3 | * compatible drivers/servers. | ||
4 | * | ||
5 | * Redistribution and use in source and binary forms, with or without | ||
6 | * modification, are permitted provided that the following conditions | ||
7 | * are met: | ||
8 | * 1. Redistributions of source code must retain the above copyright | ||
9 | * notice, this list of conditions and the following disclaimer. | ||
10 | * 2. Redistributions in binary form must reproduce the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer in the | ||
12 | * documentation and/or other materials provided with the distribution. | ||
13 | * | ||
14 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND | ||
15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
17 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE | ||
18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||
23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
24 | * SUCH DAMAGE. | ||
25 | */ | ||
26 | |||
27 | #ifndef _LINUX_VIRTIO_SCSI_H | ||
28 | #define _LINUX_VIRTIO_SCSI_H | ||
29 | |||
30 | #define VIRTIO_SCSI_CDB_SIZE 32 | ||
31 | #define VIRTIO_SCSI_SENSE_SIZE 96 | ||
32 | |||
33 | /* SCSI command request, followed by data-out */ | ||
34 | struct virtio_scsi_cmd_req { | ||
35 | u8 lun[8]; /* Logical Unit Number */ | ||
36 | u64 tag; /* Command identifier */ | ||
37 | u8 task_attr; /* Task attribute */ | ||
38 | u8 prio; /* SAM command priority field */ | ||
39 | u8 crn; | ||
40 | u8 cdb[VIRTIO_SCSI_CDB_SIZE]; | ||
41 | } __packed; | ||
42 | |||
43 | /* SCSI command request, followed by protection information */ | ||
44 | struct virtio_scsi_cmd_req_pi { | ||
45 | u8 lun[8]; /* Logical Unit Number */ | ||
46 | u64 tag; /* Command identifier */ | ||
47 | u8 task_attr; /* Task attribute */ | ||
48 | u8 prio; /* SAM command priority field */ | ||
49 | u8 crn; | ||
50 | u32 pi_bytesout; /* DataOUT PI Number of bytes */ | ||
51 | u32 pi_bytesin; /* DataIN PI Number of bytes */ | ||
52 | u8 cdb[VIRTIO_SCSI_CDB_SIZE]; | ||
53 | } __packed; | ||
54 | |||
55 | /* Response, followed by sense data and data-in */ | ||
56 | struct virtio_scsi_cmd_resp { | ||
57 | u32 sense_len; /* Sense data length */ | ||
58 | u32 resid; /* Residual bytes in data buffer */ | ||
59 | u16 status_qualifier; /* Status qualifier */ | ||
60 | u8 status; /* Command completion status */ | ||
61 | u8 response; /* Response values */ | ||
62 | u8 sense[VIRTIO_SCSI_SENSE_SIZE]; | ||
63 | } __packed; | ||
64 | |||
65 | /* Task Management Request */ | ||
66 | struct virtio_scsi_ctrl_tmf_req { | ||
67 | u32 type; | ||
68 | u32 subtype; | ||
69 | u8 lun[8]; | ||
70 | u64 tag; | ||
71 | } __packed; | ||
72 | |||
73 | struct virtio_scsi_ctrl_tmf_resp { | ||
74 | u8 response; | ||
75 | } __packed; | ||
76 | |||
77 | /* Asynchronous notification query/subscription */ | ||
78 | struct virtio_scsi_ctrl_an_req { | ||
79 | u32 type; | ||
80 | u8 lun[8]; | ||
81 | u32 event_requested; | ||
82 | } __packed; | ||
83 | |||
84 | struct virtio_scsi_ctrl_an_resp { | ||
85 | u32 event_actual; | ||
86 | u8 response; | ||
87 | } __packed; | ||
88 | |||
89 | struct virtio_scsi_event { | ||
90 | u32 event; | ||
91 | u8 lun[8]; | ||
92 | u32 reason; | ||
93 | } __packed; | ||
94 | |||
95 | struct virtio_scsi_config { | ||
96 | u32 num_queues; | ||
97 | u32 seg_max; | ||
98 | u32 max_sectors; | ||
99 | u32 cmd_per_lun; | ||
100 | u32 event_info_size; | ||
101 | u32 sense_size; | ||
102 | u32 cdb_size; | ||
103 | u16 max_channel; | ||
104 | u16 max_target; | ||
105 | u32 max_lun; | ||
106 | } __packed; | ||
107 | |||
108 | /* Feature Bits */ | ||
109 | #define VIRTIO_SCSI_F_INOUT 0 | ||
110 | #define VIRTIO_SCSI_F_HOTPLUG 1 | ||
111 | #define VIRTIO_SCSI_F_CHANGE 2 | ||
112 | #define VIRTIO_SCSI_F_T10_PI 3 | ||
113 | |||
114 | /* Response codes */ | ||
115 | #define VIRTIO_SCSI_S_OK 0 | ||
116 | #define VIRTIO_SCSI_S_OVERRUN 1 | ||
117 | #define VIRTIO_SCSI_S_ABORTED 2 | ||
118 | #define VIRTIO_SCSI_S_BAD_TARGET 3 | ||
119 | #define VIRTIO_SCSI_S_RESET 4 | ||
120 | #define VIRTIO_SCSI_S_BUSY 5 | ||
121 | #define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6 | ||
122 | #define VIRTIO_SCSI_S_TARGET_FAILURE 7 | ||
123 | #define VIRTIO_SCSI_S_NEXUS_FAILURE 8 | ||
124 | #define VIRTIO_SCSI_S_FAILURE 9 | ||
125 | #define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10 | ||
126 | #define VIRTIO_SCSI_S_FUNCTION_REJECTED 11 | ||
127 | #define VIRTIO_SCSI_S_INCORRECT_LUN 12 | ||
128 | |||
129 | /* Controlq type codes. */ | ||
130 | #define VIRTIO_SCSI_T_TMF 0 | ||
131 | #define VIRTIO_SCSI_T_AN_QUERY 1 | ||
132 | #define VIRTIO_SCSI_T_AN_SUBSCRIBE 2 | ||
133 | |||
134 | /* Valid TMF subtypes. */ | ||
135 | #define VIRTIO_SCSI_T_TMF_ABORT_TASK 0 | ||
136 | #define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1 | ||
137 | #define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2 | ||
138 | #define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3 | ||
139 | #define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4 | ||
140 | #define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5 | ||
141 | #define VIRTIO_SCSI_T_TMF_QUERY_TASK 6 | ||
142 | #define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7 | ||
143 | |||
144 | /* Events. */ | ||
145 | #define VIRTIO_SCSI_T_EVENTS_MISSED 0x80000000 | ||
146 | #define VIRTIO_SCSI_T_NO_EVENT 0 | ||
147 | #define VIRTIO_SCSI_T_TRANSPORT_RESET 1 | ||
148 | #define VIRTIO_SCSI_T_ASYNC_NOTIFY 2 | ||
149 | #define VIRTIO_SCSI_T_PARAM_CHANGE 3 | ||
150 | |||
151 | /* Reasons of transport reset event */ | ||
152 | #define VIRTIO_SCSI_EVT_RESET_HARD 0 | ||
153 | #define VIRTIO_SCSI_EVT_RESET_RESCAN 1 | ||
154 | #define VIRTIO_SCSI_EVT_RESET_REMOVED 2 | ||
155 | |||
156 | #define VIRTIO_SCSI_S_SIMPLE 0 | ||
157 | #define VIRTIO_SCSI_S_ORDERED 1 | ||
158 | #define VIRTIO_SCSI_S_HEAD 2 | ||
159 | #define VIRTIO_SCSI_S_ACA 3 | ||
160 | |||
161 | |||
162 | #endif /* _LINUX_VIRTIO_SCSI_H */ | ||
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 730334cdf037..9246d32dc973 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h | |||
@@ -90,6 +90,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
90 | #ifdef CONFIG_DEBUG_VM_VMACACHE | 90 | #ifdef CONFIG_DEBUG_VM_VMACACHE |
91 | VMACACHE_FIND_CALLS, | 91 | VMACACHE_FIND_CALLS, |
92 | VMACACHE_FIND_HITS, | 92 | VMACACHE_FIND_HITS, |
93 | VMACACHE_FULL_FLUSHES, | ||
93 | #endif | 94 | #endif |
94 | NR_VM_EVENT_ITEMS | 95 | NR_VM_EVENT_ITEMS |
95 | }; | 96 | }; |
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h index 023430e265fe..5691f752ce8f 100644 --- a/include/linux/vmw_vmci_api.h +++ b/include/linux/vmw_vmci_api.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #define VMCI_KERNEL_API_VERSION_2 2 | 24 | #define VMCI_KERNEL_API_VERSION_2 2 |
25 | #define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2 | 25 | #define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2 |
26 | 26 | ||
27 | struct msghdr; | ||
27 | typedef void (vmci_device_shutdown_fn) (void *device_registration, | 28 | typedef void (vmci_device_shutdown_fn) (void *device_registration, |
28 | void *user_data); | 29 | void *user_data); |
29 | 30 | ||
@@ -75,8 +76,8 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size, | |||
75 | ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, | 76 | ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, |
76 | void *iov, size_t iov_size, int mode); | 77 | void *iov, size_t iov_size, int mode); |
77 | ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, | 78 | ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, |
78 | void *iov, size_t iov_size, int mode); | 79 | struct msghdr *msg, size_t iov_size, int mode); |
79 | ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, void *iov, size_t iov_size, | 80 | ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size, |
80 | int mode); | 81 | int mode); |
81 | 82 | ||
82 | #endif /* !__VMW_VMCI_API_H__ */ | 83 | #endif /* !__VMW_VMCI_API_H__ */ |
diff --git a/include/linux/wait.h b/include/linux/wait.h index e4a8eb9312ea..2232ed16635a 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -13,9 +13,12 @@ typedef struct __wait_queue wait_queue_t; | |||
13 | typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); | 13 | typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); |
14 | int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); | 14 | int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); |
15 | 15 | ||
16 | /* __wait_queue::flags */ | ||
17 | #define WQ_FLAG_EXCLUSIVE 0x01 | ||
18 | #define WQ_FLAG_WOKEN 0x02 | ||
19 | |||
16 | struct __wait_queue { | 20 | struct __wait_queue { |
17 | unsigned int flags; | 21 | unsigned int flags; |
18 | #define WQ_FLAG_EXCLUSIVE 0x01 | ||
19 | void *private; | 22 | void *private; |
20 | wait_queue_func_t func; | 23 | wait_queue_func_t func; |
21 | struct list_head task_list; | 24 | struct list_head task_list; |
@@ -258,11 +261,37 @@ __out: __ret; \ | |||
258 | */ | 261 | */ |
259 | #define wait_event(wq, condition) \ | 262 | #define wait_event(wq, condition) \ |
260 | do { \ | 263 | do { \ |
264 | might_sleep(); \ | ||
261 | if (condition) \ | 265 | if (condition) \ |
262 | break; \ | 266 | break; \ |
263 | __wait_event(wq, condition); \ | 267 | __wait_event(wq, condition); \ |
264 | } while (0) | 268 | } while (0) |
265 | 269 | ||
270 | #define __wait_event_freezable(wq, condition) \ | ||
271 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ | ||
272 | schedule(); try_to_freeze()) | ||
273 | |||
274 | /** | ||
275 | * wait_event - sleep (or freeze) until a condition gets true | ||
276 | * @wq: the waitqueue to wait on | ||
277 | * @condition: a C expression for the event to wait for | ||
278 | * | ||
279 | * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute | ||
280 | * to system load) until the @condition evaluates to true. The | ||
281 | * @condition is checked each time the waitqueue @wq is woken up. | ||
282 | * | ||
283 | * wake_up() has to be called after changing any variable that could | ||
284 | * change the result of the wait condition. | ||
285 | */ | ||
286 | #define wait_event_freezable(wq, condition) \ | ||
287 | ({ \ | ||
288 | int __ret = 0; \ | ||
289 | might_sleep(); \ | ||
290 | if (!(condition)) \ | ||
291 | __ret = __wait_event_freezable(wq, condition); \ | ||
292 | __ret; \ | ||
293 | }) | ||
294 | |||
266 | #define __wait_event_timeout(wq, condition, timeout) \ | 295 | #define __wait_event_timeout(wq, condition, timeout) \ |
267 | ___wait_event(wq, ___wait_cond_timeout(condition), \ | 296 | ___wait_event(wq, ___wait_cond_timeout(condition), \ |
268 | TASK_UNINTERRUPTIBLE, 0, timeout, \ | 297 | TASK_UNINTERRUPTIBLE, 0, timeout, \ |
@@ -290,11 +319,30 @@ do { \ | |||
290 | #define wait_event_timeout(wq, condition, timeout) \ | 319 | #define wait_event_timeout(wq, condition, timeout) \ |
291 | ({ \ | 320 | ({ \ |
292 | long __ret = timeout; \ | 321 | long __ret = timeout; \ |
322 | might_sleep(); \ | ||
293 | if (!___wait_cond_timeout(condition)) \ | 323 | if (!___wait_cond_timeout(condition)) \ |
294 | __ret = __wait_event_timeout(wq, condition, timeout); \ | 324 | __ret = __wait_event_timeout(wq, condition, timeout); \ |
295 | __ret; \ | 325 | __ret; \ |
296 | }) | 326 | }) |
297 | 327 | ||
328 | #define __wait_event_freezable_timeout(wq, condition, timeout) \ | ||
329 | ___wait_event(wq, ___wait_cond_timeout(condition), \ | ||
330 | TASK_INTERRUPTIBLE, 0, timeout, \ | ||
331 | __ret = schedule_timeout(__ret); try_to_freeze()) | ||
332 | |||
333 | /* | ||
334 | * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid | ||
335 | * increasing load and is freezable. | ||
336 | */ | ||
337 | #define wait_event_freezable_timeout(wq, condition, timeout) \ | ||
338 | ({ \ | ||
339 | long __ret = timeout; \ | ||
340 | might_sleep(); \ | ||
341 | if (!___wait_cond_timeout(condition)) \ | ||
342 | __ret = __wait_event_freezable_timeout(wq, condition, timeout); \ | ||
343 | __ret; \ | ||
344 | }) | ||
345 | |||
298 | #define __wait_event_cmd(wq, condition, cmd1, cmd2) \ | 346 | #define __wait_event_cmd(wq, condition, cmd1, cmd2) \ |
299 | (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ | 347 | (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ |
300 | cmd1; schedule(); cmd2) | 348 | cmd1; schedule(); cmd2) |
@@ -315,6 +363,7 @@ do { \ | |||
315 | */ | 363 | */ |
316 | #define wait_event_cmd(wq, condition, cmd1, cmd2) \ | 364 | #define wait_event_cmd(wq, condition, cmd1, cmd2) \ |
317 | do { \ | 365 | do { \ |
366 | might_sleep(); \ | ||
318 | if (condition) \ | 367 | if (condition) \ |
319 | break; \ | 368 | break; \ |
320 | __wait_event_cmd(wq, condition, cmd1, cmd2); \ | 369 | __wait_event_cmd(wq, condition, cmd1, cmd2); \ |
@@ -342,6 +391,7 @@ do { \ | |||
342 | #define wait_event_interruptible(wq, condition) \ | 391 | #define wait_event_interruptible(wq, condition) \ |
343 | ({ \ | 392 | ({ \ |
344 | int __ret = 0; \ | 393 | int __ret = 0; \ |
394 | might_sleep(); \ | ||
345 | if (!(condition)) \ | 395 | if (!(condition)) \ |
346 | __ret = __wait_event_interruptible(wq, condition); \ | 396 | __ret = __wait_event_interruptible(wq, condition); \ |
347 | __ret; \ | 397 | __ret; \ |
@@ -375,6 +425,7 @@ do { \ | |||
375 | #define wait_event_interruptible_timeout(wq, condition, timeout) \ | 425 | #define wait_event_interruptible_timeout(wq, condition, timeout) \ |
376 | ({ \ | 426 | ({ \ |
377 | long __ret = timeout; \ | 427 | long __ret = timeout; \ |
428 | might_sleep(); \ | ||
378 | if (!___wait_cond_timeout(condition)) \ | 429 | if (!___wait_cond_timeout(condition)) \ |
379 | __ret = __wait_event_interruptible_timeout(wq, \ | 430 | __ret = __wait_event_interruptible_timeout(wq, \ |
380 | condition, timeout); \ | 431 | condition, timeout); \ |
@@ -425,6 +476,7 @@ do { \ | |||
425 | #define wait_event_hrtimeout(wq, condition, timeout) \ | 476 | #define wait_event_hrtimeout(wq, condition, timeout) \ |
426 | ({ \ | 477 | ({ \ |
427 | int __ret = 0; \ | 478 | int __ret = 0; \ |
479 | might_sleep(); \ | ||
428 | if (!(condition)) \ | 480 | if (!(condition)) \ |
429 | __ret = __wait_event_hrtimeout(wq, condition, timeout, \ | 481 | __ret = __wait_event_hrtimeout(wq, condition, timeout, \ |
430 | TASK_UNINTERRUPTIBLE); \ | 482 | TASK_UNINTERRUPTIBLE); \ |
@@ -450,6 +502,7 @@ do { \ | |||
450 | #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ | 502 | #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ |
451 | ({ \ | 503 | ({ \ |
452 | long __ret = 0; \ | 504 | long __ret = 0; \ |
505 | might_sleep(); \ | ||
453 | if (!(condition)) \ | 506 | if (!(condition)) \ |
454 | __ret = __wait_event_hrtimeout(wq, condition, timeout, \ | 507 | __ret = __wait_event_hrtimeout(wq, condition, timeout, \ |
455 | TASK_INTERRUPTIBLE); \ | 508 | TASK_INTERRUPTIBLE); \ |
@@ -463,12 +516,27 @@ do { \ | |||
463 | #define wait_event_interruptible_exclusive(wq, condition) \ | 516 | #define wait_event_interruptible_exclusive(wq, condition) \ |
464 | ({ \ | 517 | ({ \ |
465 | int __ret = 0; \ | 518 | int __ret = 0; \ |
519 | might_sleep(); \ | ||
466 | if (!(condition)) \ | 520 | if (!(condition)) \ |
467 | __ret = __wait_event_interruptible_exclusive(wq, condition);\ | 521 | __ret = __wait_event_interruptible_exclusive(wq, condition);\ |
468 | __ret; \ | 522 | __ret; \ |
469 | }) | 523 | }) |
470 | 524 | ||
471 | 525 | ||
526 | #define __wait_event_freezable_exclusive(wq, condition) \ | ||
527 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ | ||
528 | schedule(); try_to_freeze()) | ||
529 | |||
530 | #define wait_event_freezable_exclusive(wq, condition) \ | ||
531 | ({ \ | ||
532 | int __ret = 0; \ | ||
533 | might_sleep(); \ | ||
534 | if (!(condition)) \ | ||
535 | __ret = __wait_event_freezable_exclusive(wq, condition);\ | ||
536 | __ret; \ | ||
537 | }) | ||
538 | |||
539 | |||
472 | #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ | 540 | #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ |
473 | ({ \ | 541 | ({ \ |
474 | int __ret = 0; \ | 542 | int __ret = 0; \ |
@@ -637,6 +705,7 @@ do { \ | |||
637 | #define wait_event_killable(wq, condition) \ | 705 | #define wait_event_killable(wq, condition) \ |
638 | ({ \ | 706 | ({ \ |
639 | int __ret = 0; \ | 707 | int __ret = 0; \ |
708 | might_sleep(); \ | ||
640 | if (!(condition)) \ | 709 | if (!(condition)) \ |
641 | __ret = __wait_event_killable(wq, condition); \ | 710 | __ret = __wait_event_killable(wq, condition); \ |
642 | __ret; \ | 711 | __ret; \ |
@@ -830,6 +899,8 @@ void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int sta | |||
830 | long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state); | 899 | long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state); |
831 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); | 900 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); |
832 | void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key); | 901 | void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key); |
902 | long wait_woken(wait_queue_t *wait, unsigned mode, long timeout); | ||
903 | int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | ||
833 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | 904 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); |
834 | int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | 905 | int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); |
835 | 906 | ||
@@ -886,6 +957,7 @@ extern int bit_wait_io_timeout(struct wait_bit_key *); | |||
886 | static inline int | 957 | static inline int |
887 | wait_on_bit(void *word, int bit, unsigned mode) | 958 | wait_on_bit(void *word, int bit, unsigned mode) |
888 | { | 959 | { |
960 | might_sleep(); | ||
889 | if (!test_bit(bit, word)) | 961 | if (!test_bit(bit, word)) |
890 | return 0; | 962 | return 0; |
891 | return out_of_line_wait_on_bit(word, bit, | 963 | return out_of_line_wait_on_bit(word, bit, |
@@ -910,6 +982,7 @@ wait_on_bit(void *word, int bit, unsigned mode) | |||
910 | static inline int | 982 | static inline int |
911 | wait_on_bit_io(void *word, int bit, unsigned mode) | 983 | wait_on_bit_io(void *word, int bit, unsigned mode) |
912 | { | 984 | { |
985 | might_sleep(); | ||
913 | if (!test_bit(bit, word)) | 986 | if (!test_bit(bit, word)) |
914 | return 0; | 987 | return 0; |
915 | return out_of_line_wait_on_bit(word, bit, | 988 | return out_of_line_wait_on_bit(word, bit, |
@@ -936,6 +1009,7 @@ wait_on_bit_io(void *word, int bit, unsigned mode) | |||
936 | static inline int | 1009 | static inline int |
937 | wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) | 1010 | wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) |
938 | { | 1011 | { |
1012 | might_sleep(); | ||
939 | if (!test_bit(bit, word)) | 1013 | if (!test_bit(bit, word)) |
940 | return 0; | 1014 | return 0; |
941 | return out_of_line_wait_on_bit(word, bit, action, mode); | 1015 | return out_of_line_wait_on_bit(word, bit, action, mode); |
@@ -963,6 +1037,7 @@ wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode | |||
963 | static inline int | 1037 | static inline int |
964 | wait_on_bit_lock(void *word, int bit, unsigned mode) | 1038 | wait_on_bit_lock(void *word, int bit, unsigned mode) |
965 | { | 1039 | { |
1040 | might_sleep(); | ||
966 | if (!test_and_set_bit(bit, word)) | 1041 | if (!test_and_set_bit(bit, word)) |
967 | return 0; | 1042 | return 0; |
968 | return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode); | 1043 | return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode); |
@@ -986,6 +1061,7 @@ wait_on_bit_lock(void *word, int bit, unsigned mode) | |||
986 | static inline int | 1061 | static inline int |
987 | wait_on_bit_lock_io(void *word, int bit, unsigned mode) | 1062 | wait_on_bit_lock_io(void *word, int bit, unsigned mode) |
988 | { | 1063 | { |
1064 | might_sleep(); | ||
989 | if (!test_and_set_bit(bit, word)) | 1065 | if (!test_and_set_bit(bit, word)) |
990 | return 0; | 1066 | return 0; |
991 | return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode); | 1067 | return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode); |
@@ -1011,6 +1087,7 @@ wait_on_bit_lock_io(void *word, int bit, unsigned mode) | |||
1011 | static inline int | 1087 | static inline int |
1012 | wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) | 1088 | wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) |
1013 | { | 1089 | { |
1090 | might_sleep(); | ||
1014 | if (!test_and_set_bit(bit, word)) | 1091 | if (!test_and_set_bit(bit, word)) |
1015 | return 0; | 1092 | return 0; |
1016 | return out_of_line_wait_on_bit_lock(word, bit, action, mode); | 1093 | return out_of_line_wait_on_bit_lock(word, bit, action, mode); |
@@ -1029,6 +1106,7 @@ wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned | |||
1029 | static inline | 1106 | static inline |
1030 | int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode) | 1107 | int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode) |
1031 | { | 1108 | { |
1109 | might_sleep(); | ||
1032 | if (atomic_read(val) == 0) | 1110 | if (atomic_read(val) == 0) |
1033 | return 0; | 1111 | return 0; |
1034 | return out_of_line_wait_on_atomic_t(val, action, mode); | 1112 | return out_of_line_wait_on_atomic_t(val, action, mode); |