diff options
Diffstat (limited to 'include/linux')
237 files changed, 5989 insertions, 1885 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 407a12f663eb..856d381b1d5b 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
| 29 | #include <linux/ioport.h> /* for struct resource */ | 29 | #include <linux/ioport.h> /* for struct resource */ |
| 30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
| 31 | #include <linux/property.h> | ||
| 31 | 32 | ||
| 32 | #ifndef _LINUX | 33 | #ifndef _LINUX |
| 33 | #define _LINUX | 34 | #define _LINUX |
| @@ -123,6 +124,10 @@ int acpi_numa_init (void); | |||
| 123 | 124 | ||
| 124 | int acpi_table_init (void); | 125 | int acpi_table_init (void); |
| 125 | int acpi_table_parse(char *id, acpi_tbl_table_handler handler); | 126 | int acpi_table_parse(char *id, acpi_tbl_table_handler handler); |
| 127 | int __init acpi_parse_entries(char *id, unsigned long table_size, | ||
| 128 | acpi_tbl_entry_handler handler, | ||
| 129 | struct acpi_table_header *table_header, | ||
| 130 | int entry_id, unsigned int max_entries); | ||
| 126 | int __init acpi_table_parse_entries(char *id, unsigned long table_size, | 131 | int __init acpi_table_parse_entries(char *id, unsigned long table_size, |
| 127 | int entry_id, | 132 | int entry_id, |
| 128 | acpi_tbl_entry_handler handler, | 133 | acpi_tbl_entry_handler handler, |
| @@ -148,6 +153,7 @@ int acpi_unmap_lsapic(int cpu); | |||
| 148 | 153 | ||
| 149 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); | 154 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); |
| 150 | int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); | 155 | int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); |
| 156 | int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base); | ||
| 151 | void acpi_irq_stats_init(void); | 157 | void acpi_irq_stats_init(void); |
| 152 | extern u32 acpi_irq_handled; | 158 | extern u32 acpi_irq_handled; |
| 153 | extern u32 acpi_irq_not_handled; | 159 | extern u32 acpi_irq_not_handled; |
| @@ -423,14 +429,11 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), | |||
| 423 | const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, | 429 | const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, |
| 424 | const struct device *dev); | 430 | const struct device *dev); |
| 425 | 431 | ||
| 426 | static inline bool acpi_driver_match_device(struct device *dev, | 432 | extern bool acpi_driver_match_device(struct device *dev, |
| 427 | const struct device_driver *drv) | 433 | const struct device_driver *drv); |
| 428 | { | ||
| 429 | return !!acpi_match_device(drv->acpi_match_table, dev); | ||
| 430 | } | ||
| 431 | |||
| 432 | int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); | 434 | int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); |
| 433 | int acpi_device_modalias(struct device *, char *, int); | 435 | int acpi_device_modalias(struct device *, char *, int); |
| 436 | void acpi_walk_dep_device_list(acpi_handle handle); | ||
| 434 | 437 | ||
| 435 | struct platform_device *acpi_create_platform_device(struct acpi_device *); | 438 | struct platform_device *acpi_create_platform_device(struct acpi_device *); |
| 436 | #define ACPI_PTR(_ptr) (_ptr) | 439 | #define ACPI_PTR(_ptr) (_ptr) |
| @@ -443,6 +446,23 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *); | |||
| 443 | #define ACPI_COMPANION_SET(dev, adev) do { } while (0) | 446 | #define ACPI_COMPANION_SET(dev, adev) do { } while (0) |
| 444 | #define ACPI_HANDLE(dev) (NULL) | 447 | #define ACPI_HANDLE(dev) (NULL) |
| 445 | 448 | ||
| 449 | struct fwnode_handle; | ||
| 450 | |||
| 451 | static inline bool is_acpi_node(struct fwnode_handle *fwnode) | ||
| 452 | { | ||
| 453 | return false; | ||
| 454 | } | ||
| 455 | |||
| 456 | static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode) | ||
| 457 | { | ||
| 458 | return NULL; | ||
| 459 | } | ||
| 460 | |||
| 461 | static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) | ||
| 462 | { | ||
| 463 | return NULL; | ||
| 464 | } | ||
| 465 | |||
| 446 | static inline const char *acpi_dev_name(struct acpi_device *adev) | 466 | static inline const char *acpi_dev_name(struct acpi_device *adev) |
| 447 | { | 467 | { |
| 448 | return NULL; | 468 | return NULL; |
| @@ -553,16 +573,26 @@ static inline void arch_reserve_mem_area(acpi_physical_address addr, | |||
| 553 | #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) | 573 | #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) |
| 554 | #endif | 574 | #endif |
| 555 | 575 | ||
| 556 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM_RUNTIME) | 576 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM) |
| 557 | int acpi_dev_runtime_suspend(struct device *dev); | 577 | int acpi_dev_runtime_suspend(struct device *dev); |
| 558 | int acpi_dev_runtime_resume(struct device *dev); | 578 | int acpi_dev_runtime_resume(struct device *dev); |
| 559 | int acpi_subsys_runtime_suspend(struct device *dev); | 579 | int acpi_subsys_runtime_suspend(struct device *dev); |
| 560 | int acpi_subsys_runtime_resume(struct device *dev); | 580 | int acpi_subsys_runtime_resume(struct device *dev); |
| 581 | struct acpi_device *acpi_dev_pm_get_node(struct device *dev); | ||
| 582 | int acpi_dev_pm_attach(struct device *dev, bool power_on); | ||
| 561 | #else | 583 | #else |
| 562 | static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } | 584 | static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } |
| 563 | static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } | 585 | static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } |
| 564 | static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } | 586 | static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } |
| 565 | static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } | 587 | static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } |
| 588 | static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) | ||
| 589 | { | ||
| 590 | return NULL; | ||
| 591 | } | ||
| 592 | static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) | ||
| 593 | { | ||
| 594 | return -ENODEV; | ||
| 595 | } | ||
| 566 | #endif | 596 | #endif |
| 567 | 597 | ||
| 568 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) | 598 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) |
| @@ -585,20 +615,6 @@ static inline int acpi_subsys_suspend(struct device *dev) { return 0; } | |||
| 585 | static inline int acpi_subsys_freeze(struct device *dev) { return 0; } | 615 | static inline int acpi_subsys_freeze(struct device *dev) { return 0; } |
| 586 | #endif | 616 | #endif |
| 587 | 617 | ||
| 588 | #if defined(CONFIG_ACPI) && defined(CONFIG_PM) | ||
| 589 | struct acpi_device *acpi_dev_pm_get_node(struct device *dev); | ||
| 590 | int acpi_dev_pm_attach(struct device *dev, bool power_on); | ||
| 591 | #else | ||
| 592 | static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) | ||
| 593 | { | ||
| 594 | return NULL; | ||
| 595 | } | ||
| 596 | static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) | ||
| 597 | { | ||
| 598 | return -ENODEV; | ||
| 599 | } | ||
| 600 | #endif | ||
| 601 | |||
| 602 | #ifdef CONFIG_ACPI | 618 | #ifdef CONFIG_ACPI |
| 603 | __printf(3, 4) | 619 | __printf(3, 4) |
| 604 | void acpi_handle_printk(const char *level, acpi_handle handle, | 620 | void acpi_handle_printk(const char *level, acpi_handle handle, |
| @@ -659,4 +675,114 @@ do { \ | |||
| 659 | #endif | 675 | #endif |
| 660 | #endif | 676 | #endif |
| 661 | 677 | ||
| 678 | struct acpi_gpio_params { | ||
| 679 | unsigned int crs_entry_index; | ||
| 680 | unsigned int line_index; | ||
| 681 | bool active_low; | ||
| 682 | }; | ||
| 683 | |||
| 684 | struct acpi_gpio_mapping { | ||
| 685 | const char *name; | ||
| 686 | const struct acpi_gpio_params *data; | ||
| 687 | unsigned int size; | ||
| 688 | }; | ||
| 689 | |||
| 690 | #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) | ||
| 691 | int acpi_dev_add_driver_gpios(struct acpi_device *adev, | ||
| 692 | const struct acpi_gpio_mapping *gpios); | ||
| 693 | |||
| 694 | static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) | ||
| 695 | { | ||
| 696 | if (adev) | ||
| 697 | adev->driver_gpios = NULL; | ||
| 698 | } | ||
| 699 | #else | ||
| 700 | static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, | ||
| 701 | const struct acpi_gpio_mapping *gpios) | ||
| 702 | { | ||
| 703 | return -ENXIO; | ||
| 704 | } | ||
| 705 | static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} | ||
| 706 | #endif | ||
| 707 | |||
| 708 | /* Device properties */ | ||
| 709 | |||
| 710 | #define MAX_ACPI_REFERENCE_ARGS 8 | ||
| 711 | struct acpi_reference_args { | ||
| 712 | struct acpi_device *adev; | ||
| 713 | size_t nargs; | ||
| 714 | u64 args[MAX_ACPI_REFERENCE_ARGS]; | ||
| 715 | }; | ||
| 716 | |||
| 717 | #ifdef CONFIG_ACPI | ||
| 718 | int acpi_dev_get_property(struct acpi_device *adev, const char *name, | ||
| 719 | acpi_object_type type, const union acpi_object **obj); | ||
| 720 | int acpi_dev_get_property_array(struct acpi_device *adev, const char *name, | ||
| 721 | acpi_object_type type, | ||
| 722 | const union acpi_object **obj); | ||
| 723 | int acpi_dev_get_property_reference(struct acpi_device *adev, | ||
| 724 | const char *name, size_t index, | ||
| 725 | struct acpi_reference_args *args); | ||
| 726 | |||
| 727 | int acpi_dev_prop_get(struct acpi_device *adev, const char *propname, | ||
| 728 | void **valptr); | ||
| 729 | int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, | ||
| 730 | enum dev_prop_type proptype, void *val); | ||
| 731 | int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, | ||
| 732 | enum dev_prop_type proptype, void *val, size_t nval); | ||
| 733 | |||
| 734 | struct acpi_device *acpi_get_next_child(struct device *dev, | ||
| 735 | struct acpi_device *child); | ||
| 736 | #else | ||
| 737 | static inline int acpi_dev_get_property(struct acpi_device *adev, | ||
| 738 | const char *name, acpi_object_type type, | ||
| 739 | const union acpi_object **obj) | ||
| 740 | { | ||
| 741 | return -ENXIO; | ||
| 742 | } | ||
| 743 | static inline int acpi_dev_get_property_array(struct acpi_device *adev, | ||
| 744 | const char *name, | ||
| 745 | acpi_object_type type, | ||
| 746 | const union acpi_object **obj) | ||
| 747 | { | ||
| 748 | return -ENXIO; | ||
| 749 | } | ||
| 750 | static inline int acpi_dev_get_property_reference(struct acpi_device *adev, | ||
| 751 | const char *name, const char *cells_name, | ||
| 752 | size_t index, struct acpi_reference_args *args) | ||
| 753 | { | ||
| 754 | return -ENXIO; | ||
| 755 | } | ||
| 756 | |||
| 757 | static inline int acpi_dev_prop_get(struct acpi_device *adev, | ||
| 758 | const char *propname, | ||
| 759 | void **valptr) | ||
| 760 | { | ||
| 761 | return -ENXIO; | ||
| 762 | } | ||
| 763 | |||
| 764 | static inline int acpi_dev_prop_read_single(struct acpi_device *adev, | ||
| 765 | const char *propname, | ||
| 766 | enum dev_prop_type proptype, | ||
| 767 | void *val) | ||
| 768 | { | ||
| 769 | return -ENXIO; | ||
| 770 | } | ||
| 771 | |||
| 772 | static inline int acpi_dev_prop_read(struct acpi_device *adev, | ||
| 773 | const char *propname, | ||
| 774 | enum dev_prop_type proptype, | ||
| 775 | void *val, size_t nval) | ||
| 776 | { | ||
| 777 | return -ENXIO; | ||
| 778 | } | ||
| 779 | |||
| 780 | static inline struct acpi_device *acpi_get_next_child(struct device *dev, | ||
| 781 | struct acpi_device *child) | ||
| 782 | { | ||
| 783 | return NULL; | ||
| 784 | } | ||
| 785 | |||
| 786 | #endif | ||
| 787 | |||
| 662 | #endif /*_LINUX_ACPI_H*/ | 788 | #endif /*_LINUX_ACPI_H*/ |
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index c324f5700d1a..2afc618b15ce 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | 23 | ||
| 24 | #define AMBA_NR_IRQS 9 | 24 | #define AMBA_NR_IRQS 9 |
| 25 | #define AMBA_CID 0xb105f00d | 25 | #define AMBA_CID 0xb105f00d |
| 26 | #define CORESIGHT_CID 0xb105900d | ||
| 26 | 27 | ||
| 27 | struct clk; | 28 | struct clk; |
| 28 | 29 | ||
| @@ -97,6 +98,16 @@ void amba_release_regions(struct amba_device *); | |||
| 97 | #define amba_pclk_disable(d) \ | 98 | #define amba_pclk_disable(d) \ |
| 98 | do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) | 99 | do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) |
| 99 | 100 | ||
| 101 | static inline int amba_pclk_prepare(struct amba_device *dev) | ||
| 102 | { | ||
| 103 | return clk_prepare(dev->pclk); | ||
| 104 | } | ||
| 105 | |||
| 106 | static inline void amba_pclk_unprepare(struct amba_device *dev) | ||
| 107 | { | ||
| 108 | clk_unprepare(dev->pclk); | ||
| 109 | } | ||
| 110 | |||
| 100 | /* Some drivers don't use the struct amba_device */ | 111 | /* Some drivers don't use the struct amba_device */ |
| 101 | #define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) | 112 | #define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) |
| 102 | #define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) | 113 | #define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) |
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h index a495a959e8a7..33eb274cd0e6 100644 --- a/include/linux/ath9k_platform.h +++ b/include/linux/ath9k_platform.h | |||
| @@ -31,8 +31,11 @@ struct ath9k_platform_data { | |||
| 31 | u32 gpio_mask; | 31 | u32 gpio_mask; |
| 32 | u32 gpio_val; | 32 | u32 gpio_val; |
| 33 | 33 | ||
| 34 | bool endian_check; | ||
| 34 | bool is_clk_25mhz; | 35 | bool is_clk_25mhz; |
| 35 | bool tx_gain_buffalo; | 36 | bool tx_gain_buffalo; |
| 37 | bool disable_2ghz; | ||
| 38 | bool disable_5ghz; | ||
| 36 | 39 | ||
| 37 | int (*get_mac_revision)(void); | 40 | int (*get_mac_revision)(void); |
| 38 | int (*external_reset)(void); | 41 | int (*external_reset)(void); |
diff --git a/include/linux/audit.h b/include/linux/audit.h index e58fe7df8b9c..0c04917c2f12 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
| @@ -130,6 +130,7 @@ extern void audit_putname(struct filename *name); | |||
| 130 | #define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ | 130 | #define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ |
| 131 | extern void __audit_inode(struct filename *name, const struct dentry *dentry, | 131 | extern void __audit_inode(struct filename *name, const struct dentry *dentry, |
| 132 | unsigned int flags); | 132 | unsigned int flags); |
| 133 | extern void __audit_file(const struct file *); | ||
| 133 | extern void __audit_inode_child(const struct inode *parent, | 134 | extern void __audit_inode_child(const struct inode *parent, |
| 134 | const struct dentry *dentry, | 135 | const struct dentry *dentry, |
| 135 | const unsigned char type); | 136 | const unsigned char type); |
| @@ -183,6 +184,11 @@ static inline void audit_inode(struct filename *name, | |||
| 183 | __audit_inode(name, dentry, flags); | 184 | __audit_inode(name, dentry, flags); |
| 184 | } | 185 | } |
| 185 | } | 186 | } |
| 187 | static inline void audit_file(struct file *file) | ||
| 188 | { | ||
| 189 | if (unlikely(!audit_dummy_context())) | ||
| 190 | __audit_file(file); | ||
| 191 | } | ||
| 186 | static inline void audit_inode_parent_hidden(struct filename *name, | 192 | static inline void audit_inode_parent_hidden(struct filename *name, |
| 187 | const struct dentry *dentry) | 193 | const struct dentry *dentry) |
| 188 | { | 194 | { |
| @@ -357,6 +363,9 @@ static inline void audit_inode(struct filename *name, | |||
| 357 | const struct dentry *dentry, | 363 | const struct dentry *dentry, |
| 358 | unsigned int parent) | 364 | unsigned int parent) |
| 359 | { } | 365 | { } |
| 366 | static inline void audit_file(struct file *file) | ||
| 367 | { | ||
| 368 | } | ||
| 360 | static inline void audit_inode_parent_hidden(struct filename *name, | 369 | static inline void audit_inode_parent_hidden(struct filename *name, |
| 361 | const struct dentry *dentry) | 370 | const struct dentry *dentry) |
| 362 | { } | 371 | { } |
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 729f48e6b20b..eb1c6a47b67f 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h | |||
| @@ -447,4 +447,6 @@ extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset); | |||
| 447 | #define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */ | 447 | #define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */ |
| 448 | extern u32 bcma_core_dma_translation(struct bcma_device *core); | 448 | extern u32 bcma_core_dma_translation(struct bcma_device *core); |
| 449 | 449 | ||
| 450 | extern unsigned int bcma_core_irq(struct bcma_device *core, int num); | ||
| 451 | |||
| 450 | #endif /* LINUX_BCMA_H_ */ | 452 | #endif /* LINUX_BCMA_H_ */ |
diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h index fb61f3fb4ddb..0b3b32aeeb8a 100644 --- a/include/linux/bcma/bcma_driver_mips.h +++ b/include/linux/bcma/bcma_driver_mips.h | |||
| @@ -43,12 +43,12 @@ struct bcma_drv_mips { | |||
| 43 | extern void bcma_core_mips_init(struct bcma_drv_mips *mcore); | 43 | extern void bcma_core_mips_init(struct bcma_drv_mips *mcore); |
| 44 | extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore); | 44 | extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore); |
| 45 | 45 | ||
| 46 | extern unsigned int bcma_core_irq(struct bcma_device *core); | 46 | extern unsigned int bcma_core_mips_irq(struct bcma_device *dev); |
| 47 | #else | 47 | #else |
| 48 | static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { } | 48 | static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { } |
| 49 | static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { } | 49 | static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { } |
| 50 | 50 | ||
| 51 | static inline unsigned int bcma_core_irq(struct bcma_device *core) | 51 | static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev) |
| 52 | { | 52 | { |
| 53 | return 0; | 53 | return 0; |
| 54 | } | 54 | } |
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 61f29e5ea840..576e4639ca60 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
| @@ -53,6 +53,10 @@ struct linux_binprm { | |||
| 53 | #define BINPRM_FLAGS_EXECFD_BIT 1 | 53 | #define BINPRM_FLAGS_EXECFD_BIT 1 |
| 54 | #define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) | 54 | #define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) |
| 55 | 55 | ||
| 56 | /* filename of the binary will be inaccessible after exec */ | ||
| 57 | #define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2 | ||
| 58 | #define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT) | ||
| 59 | |||
| 56 | /* Function parameter for binfmt->coredump */ | 60 | /* Function parameter for binfmt->coredump */ |
| 57 | struct coredump_params { | 61 | struct coredump_params { |
| 58 | const siginfo_t *siginfo; | 62 | const siginfo_t *siginfo; |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 7347f486ceca..efead0b532c4 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
| @@ -443,6 +443,11 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, | |||
| 443 | extern void bio_set_pages_dirty(struct bio *bio); | 443 | extern void bio_set_pages_dirty(struct bio *bio); |
| 444 | extern void bio_check_pages_dirty(struct bio *bio); | 444 | extern void bio_check_pages_dirty(struct bio *bio); |
| 445 | 445 | ||
| 446 | void generic_start_io_acct(int rw, unsigned long sectors, | ||
| 447 | struct hd_struct *part); | ||
| 448 | void generic_end_io_acct(int rw, struct hd_struct *part, | ||
| 449 | unsigned long start_time); | ||
| 450 | |||
| 446 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | 451 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
| 447 | # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" | 452 | # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" |
| 448 | #endif | 453 | #endif |
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index e1c8d080c427..202e4034fe26 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h | |||
| @@ -45,6 +45,7 @@ | |||
| 45 | * bitmap_set(dst, pos, nbits) Set specified bit area | 45 | * bitmap_set(dst, pos, nbits) Set specified bit area |
| 46 | * bitmap_clear(dst, pos, nbits) Clear specified bit area | 46 | * bitmap_clear(dst, pos, nbits) Clear specified bit area |
| 47 | * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area | 47 | * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area |
| 48 | * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above | ||
| 48 | * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n | 49 | * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n |
| 49 | * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n | 50 | * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n |
| 50 | * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) | 51 | * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) |
| @@ -60,6 +61,7 @@ | |||
| 60 | * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region | 61 | * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region |
| 61 | * bitmap_release_region(bitmap, pos, order) Free specified bit region | 62 | * bitmap_release_region(bitmap, pos, order) Free specified bit region |
| 62 | * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region | 63 | * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region |
| 64 | * bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex | ||
| 63 | */ | 65 | */ |
| 64 | 66 | ||
| 65 | /* | 67 | /* |
| @@ -114,11 +116,36 @@ extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); | |||
| 114 | 116 | ||
| 115 | extern void bitmap_set(unsigned long *map, unsigned int start, int len); | 117 | extern void bitmap_set(unsigned long *map, unsigned int start, int len); |
| 116 | extern void bitmap_clear(unsigned long *map, unsigned int start, int len); | 118 | extern void bitmap_clear(unsigned long *map, unsigned int start, int len); |
| 117 | extern unsigned long bitmap_find_next_zero_area(unsigned long *map, | 119 | |
| 118 | unsigned long size, | 120 | extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, |
| 119 | unsigned long start, | 121 | unsigned long size, |
| 120 | unsigned int nr, | 122 | unsigned long start, |
| 121 | unsigned long align_mask); | 123 | unsigned int nr, |
| 124 | unsigned long align_mask, | ||
| 125 | unsigned long align_offset); | ||
| 126 | |||
| 127 | /** | ||
| 128 | * bitmap_find_next_zero_area - find a contiguous aligned zero area | ||
| 129 | * @map: The address to base the search on | ||
| 130 | * @size: The bitmap size in bits | ||
| 131 | * @start: The bitnumber to start searching at | ||
| 132 | * @nr: The number of zeroed bits we're looking for | ||
| 133 | * @align_mask: Alignment mask for zero area | ||
| 134 | * | ||
| 135 | * The @align_mask should be one less than a power of 2; the effect is that | ||
| 136 | * the bit offset of all zero areas this function finds is multiples of that | ||
| 137 | * power of 2. A @align_mask of 0 means no alignment is required. | ||
| 138 | */ | ||
| 139 | static inline unsigned long | ||
| 140 | bitmap_find_next_zero_area(unsigned long *map, | ||
| 141 | unsigned long size, | ||
| 142 | unsigned long start, | ||
| 143 | unsigned int nr, | ||
| 144 | unsigned long align_mask) | ||
| 145 | { | ||
| 146 | return bitmap_find_next_zero_area_off(map, size, start, nr, | ||
| 147 | align_mask, 0); | ||
| 148 | } | ||
| 122 | 149 | ||
| 123 | extern int bitmap_scnprintf(char *buf, unsigned int len, | 150 | extern int bitmap_scnprintf(char *buf, unsigned int len, |
| 124 | const unsigned long *src, int nbits); | 151 | const unsigned long *src, int nbits); |
| @@ -145,6 +172,8 @@ extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int o | |||
| 145 | extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); | 172 | extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); |
| 146 | extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); | 173 | extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); |
| 147 | extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); | 174 | extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); |
| 175 | extern int bitmap_print_to_pagebuf(bool list, char *buf, | ||
| 176 | const unsigned long *maskp, int nmaskbits); | ||
| 148 | 177 | ||
| 149 | #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) | 178 | #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) |
| 150 | #define BITMAP_LAST_WORD_MASK(nbits) \ | 179 | #define BITMAP_LAST_WORD_MASK(nbits) \ |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 15f7034aa377..8aded9ab2e4e 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
| @@ -79,7 +79,13 @@ struct blk_mq_tag_set { | |||
| 79 | struct list_head tag_list; | 79 | struct list_head tag_list; |
| 80 | }; | 80 | }; |
| 81 | 81 | ||
| 82 | typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *, bool); | 82 | struct blk_mq_queue_data { |
| 83 | struct request *rq; | ||
| 84 | struct list_head *list; | ||
| 85 | bool last; | ||
| 86 | }; | ||
| 87 | |||
| 88 | typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); | ||
| 83 | typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); | 89 | typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); |
| 84 | typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); | 90 | typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); |
| 85 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); | 91 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); |
| @@ -140,6 +146,7 @@ enum { | |||
| 140 | BLK_MQ_F_TAG_SHARED = 1 << 1, | 146 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
| 141 | BLK_MQ_F_SG_MERGE = 1 << 2, | 147 | BLK_MQ_F_SG_MERGE = 1 << 2, |
| 142 | BLK_MQ_F_SYSFS_UP = 1 << 3, | 148 | BLK_MQ_F_SYSFS_UP = 1 << 3, |
| 149 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, | ||
| 143 | 150 | ||
| 144 | BLK_MQ_S_STOPPED = 0, | 151 | BLK_MQ_S_STOPPED = 0, |
| 145 | BLK_MQ_S_TAG_ACTIVE = 1, | 152 | BLK_MQ_S_TAG_ACTIVE = 1, |
| @@ -162,6 +169,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); | |||
| 162 | void blk_mq_insert_request(struct request *, bool, bool, bool); | 169 | void blk_mq_insert_request(struct request *, bool, bool, bool); |
| 163 | void blk_mq_run_queues(struct request_queue *q, bool async); | 170 | void blk_mq_run_queues(struct request_queue *q, bool async); |
| 164 | void blk_mq_free_request(struct request *rq); | 171 | void blk_mq_free_request(struct request *rq); |
| 172 | void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); | ||
| 165 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); | 173 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); |
| 166 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, | 174 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, |
| 167 | gfp_t gfp, bool reserved); | 175 | gfp_t gfp, bool reserved); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6d76b8b4aa2b..92f4b4b288dd 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -398,7 +398,7 @@ struct request_queue { | |||
| 398 | */ | 398 | */ |
| 399 | struct kobject mq_kobj; | 399 | struct kobject mq_kobj; |
| 400 | 400 | ||
| 401 | #ifdef CONFIG_PM_RUNTIME | 401 | #ifdef CONFIG_PM |
| 402 | struct device *dev; | 402 | struct device *dev; |
| 403 | int rpm_status; | 403 | int rpm_status; |
| 404 | unsigned int nr_pending; | 404 | unsigned int nr_pending; |
| @@ -1057,7 +1057,7 @@ extern void blk_put_queue(struct request_queue *); | |||
| 1057 | /* | 1057 | /* |
| 1058 | * block layer runtime pm functions | 1058 | * block layer runtime pm functions |
| 1059 | */ | 1059 | */ |
| 1060 | #ifdef CONFIG_PM_RUNTIME | 1060 | #ifdef CONFIG_PM |
| 1061 | extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); | 1061 | extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); |
| 1062 | extern int blk_pre_runtime_suspend(struct request_queue *q); | 1062 | extern int blk_pre_runtime_suspend(struct request_queue *q); |
| 1063 | extern void blk_post_runtime_suspend(struct request_queue *q, int err); | 1063 | extern void blk_post_runtime_suspend(struct request_queue *q, int err); |
| @@ -1184,7 +1184,6 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | |||
| 1184 | enum blk_default_limits { | 1184 | enum blk_default_limits { |
| 1185 | BLK_MAX_SEGMENTS = 128, | 1185 | BLK_MAX_SEGMENTS = 128, |
| 1186 | BLK_SAFE_MAX_SECTORS = 255, | 1186 | BLK_SAFE_MAX_SECTORS = 255, |
| 1187 | BLK_DEF_MAX_SECTORS = 1024, | ||
| 1188 | BLK_MAX_SEGMENT_SIZE = 65536, | 1187 | BLK_MAX_SEGMENT_SIZE = 65536, |
| 1189 | BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, | 1188 | BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, |
| 1190 | }; | 1189 | }; |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3cf91754a957..bbfceb756452 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
| @@ -22,7 +22,7 @@ struct bpf_map_ops { | |||
| 22 | 22 | ||
| 23 | /* funcs callable from userspace and from eBPF programs */ | 23 | /* funcs callable from userspace and from eBPF programs */ |
| 24 | void *(*map_lookup_elem)(struct bpf_map *map, void *key); | 24 | void *(*map_lookup_elem)(struct bpf_map *map, void *key); |
| 25 | int (*map_update_elem)(struct bpf_map *map, void *key, void *value); | 25 | int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); |
| 26 | int (*map_delete_elem)(struct bpf_map *map, void *key); | 26 | int (*map_delete_elem)(struct bpf_map *map, void *key); |
| 27 | }; | 27 | }; |
| 28 | 28 | ||
| @@ -128,9 +128,18 @@ struct bpf_prog_aux { | |||
| 128 | struct work_struct work; | 128 | struct work_struct work; |
| 129 | }; | 129 | }; |
| 130 | 130 | ||
| 131 | #ifdef CONFIG_BPF_SYSCALL | ||
| 131 | void bpf_prog_put(struct bpf_prog *prog); | 132 | void bpf_prog_put(struct bpf_prog *prog); |
| 133 | #else | ||
| 134 | static inline void bpf_prog_put(struct bpf_prog *prog) {} | ||
| 135 | #endif | ||
| 132 | struct bpf_prog *bpf_prog_get(u32 ufd); | 136 | struct bpf_prog *bpf_prog_get(u32 ufd); |
| 133 | /* verify correctness of eBPF program */ | 137 | /* verify correctness of eBPF program */ |
| 134 | int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); | 138 | int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); |
| 135 | 139 | ||
| 140 | /* verifier prototypes for helper functions called from eBPF programs */ | ||
| 141 | extern struct bpf_func_proto bpf_map_lookup_elem_proto; | ||
| 142 | extern struct bpf_func_proto bpf_map_update_elem_proto; | ||
| 143 | extern struct bpf_func_proto bpf_map_delete_elem_proto; | ||
| 144 | |||
| 136 | #endif /* _LINUX_BPF_H */ | 145 | #endif /* _LINUX_BPF_H */ |
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h new file mode 100644 index 000000000000..3daf5ed392c9 --- /dev/null +++ b/include/linux/cacheinfo.h | |||
| @@ -0,0 +1,100 @@ | |||
| 1 | #ifndef _LINUX_CACHEINFO_H | ||
| 2 | #define _LINUX_CACHEINFO_H | ||
| 3 | |||
| 4 | #include <linux/bitops.h> | ||
| 5 | #include <linux/cpumask.h> | ||
| 6 | #include <linux/smp.h> | ||
| 7 | |||
| 8 | struct device_node; | ||
| 9 | struct attribute; | ||
| 10 | |||
| 11 | enum cache_type { | ||
| 12 | CACHE_TYPE_NOCACHE = 0, | ||
| 13 | CACHE_TYPE_INST = BIT(0), | ||
| 14 | CACHE_TYPE_DATA = BIT(1), | ||
| 15 | CACHE_TYPE_SEPARATE = CACHE_TYPE_INST | CACHE_TYPE_DATA, | ||
| 16 | CACHE_TYPE_UNIFIED = BIT(2), | ||
| 17 | }; | ||
| 18 | |||
| 19 | /** | ||
| 20 | * struct cacheinfo - represent a cache leaf node | ||
| 21 | * @type: type of the cache - data, inst or unified | ||
| 22 | * @level: represents the hierarcy in the multi-level cache | ||
| 23 | * @coherency_line_size: size of each cache line usually representing | ||
| 24 | * the minimum amount of data that gets transferred from memory | ||
| 25 | * @number_of_sets: total number of sets, a set is a collection of cache | ||
| 26 | * lines sharing the same index | ||
| 27 | * @ways_of_associativity: number of ways in which a particular memory | ||
| 28 | * block can be placed in the cache | ||
| 29 | * @physical_line_partition: number of physical cache lines sharing the | ||
| 30 | * same cachetag | ||
| 31 | * @size: Total size of the cache | ||
| 32 | * @shared_cpu_map: logical cpumask representing all the cpus sharing | ||
| 33 | * this cache node | ||
| 34 | * @attributes: bitfield representing various cache attributes | ||
| 35 | * @of_node: if devicetree is used, this represents either the cpu node in | ||
| 36 | * case there's no explicit cache node or the cache node itself in the | ||
| 37 | * device tree | ||
| 38 | * @disable_sysfs: indicates whether this node is visible to the user via | ||
| 39 | * sysfs or not | ||
| 40 | * @priv: pointer to any private data structure specific to particular | ||
| 41 | * cache design | ||
| 42 | * | ||
| 43 | * While @of_node, @disable_sysfs and @priv are used for internal book | ||
| 44 | * keeping, the remaining members form the core properties of the cache | ||
| 45 | */ | ||
| 46 | struct cacheinfo { | ||
| 47 | enum cache_type type; | ||
| 48 | unsigned int level; | ||
| 49 | unsigned int coherency_line_size; | ||
| 50 | unsigned int number_of_sets; | ||
| 51 | unsigned int ways_of_associativity; | ||
| 52 | unsigned int physical_line_partition; | ||
| 53 | unsigned int size; | ||
| 54 | cpumask_t shared_cpu_map; | ||
| 55 | unsigned int attributes; | ||
| 56 | #define CACHE_WRITE_THROUGH BIT(0) | ||
| 57 | #define CACHE_WRITE_BACK BIT(1) | ||
| 58 | #define CACHE_WRITE_POLICY_MASK \ | ||
| 59 | (CACHE_WRITE_THROUGH | CACHE_WRITE_BACK) | ||
| 60 | #define CACHE_READ_ALLOCATE BIT(2) | ||
| 61 | #define CACHE_WRITE_ALLOCATE BIT(3) | ||
| 62 | #define CACHE_ALLOCATE_POLICY_MASK \ | ||
| 63 | (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE) | ||
| 64 | |||
| 65 | struct device_node *of_node; | ||
| 66 | bool disable_sysfs; | ||
| 67 | void *priv; | ||
| 68 | }; | ||
| 69 | |||
| 70 | struct cpu_cacheinfo { | ||
| 71 | struct cacheinfo *info_list; | ||
| 72 | unsigned int num_levels; | ||
| 73 | unsigned int num_leaves; | ||
| 74 | }; | ||
| 75 | |||
| 76 | /* | ||
| 77 | * Helpers to make sure "func" is executed on the cpu whose cache | ||
| 78 | * attributes are being detected | ||
| 79 | */ | ||
| 80 | #define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \ | ||
| 81 | static inline void _##func(void *ret) \ | ||
| 82 | { \ | ||
| 83 | int cpu = smp_processor_id(); \ | ||
| 84 | *(int *)ret = __##func(cpu); \ | ||
| 85 | } \ | ||
| 86 | \ | ||
| 87 | int func(unsigned int cpu) \ | ||
| 88 | { \ | ||
| 89 | int ret; \ | ||
| 90 | smp_call_function_single(cpu, _##func, &ret, true); \ | ||
| 91 | return ret; \ | ||
| 92 | } | ||
| 93 | |||
| 94 | struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu); | ||
| 95 | int init_cache_level(unsigned int cpu); | ||
| 96 | int populate_cache_leaves(unsigned int cpu); | ||
| 97 | |||
| 98 | const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); | ||
| 99 | |||
| 100 | #endif /* _LINUX_CACHEINFO_H */ | ||
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index b37ea95bc348..c05ff0f9f9a5 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h | |||
| @@ -127,6 +127,9 @@ void unregister_candev(struct net_device *dev); | |||
| 127 | int can_restart_now(struct net_device *dev); | 127 | int can_restart_now(struct net_device *dev); |
| 128 | void can_bus_off(struct net_device *dev); | 128 | void can_bus_off(struct net_device *dev); |
| 129 | 129 | ||
| 130 | void can_change_state(struct net_device *dev, struct can_frame *cf, | ||
| 131 | enum can_state tx_state, enum can_state rx_state); | ||
| 132 | |||
| 130 | void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, | 133 | void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, |
| 131 | unsigned int idx); | 134 | unsigned int idx); |
| 132 | unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); | 135 | unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); |
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h index 5f3386844134..260d78b587c4 100644 --- a/include/linux/ceph/auth.h +++ b/include/linux/ceph/auth.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | 13 | ||
| 14 | struct ceph_auth_client; | 14 | struct ceph_auth_client; |
| 15 | struct ceph_authorizer; | 15 | struct ceph_authorizer; |
| 16 | struct ceph_msg; | ||
| 16 | 17 | ||
| 17 | struct ceph_auth_handshake { | 18 | struct ceph_auth_handshake { |
| 18 | struct ceph_authorizer *authorizer; | 19 | struct ceph_authorizer *authorizer; |
| @@ -20,6 +21,10 @@ struct ceph_auth_handshake { | |||
| 20 | size_t authorizer_buf_len; | 21 | size_t authorizer_buf_len; |
| 21 | void *authorizer_reply_buf; | 22 | void *authorizer_reply_buf; |
| 22 | size_t authorizer_reply_buf_len; | 23 | size_t authorizer_reply_buf_len; |
| 24 | int (*sign_message)(struct ceph_auth_handshake *auth, | ||
| 25 | struct ceph_msg *msg); | ||
| 26 | int (*check_message_signature)(struct ceph_auth_handshake *auth, | ||
| 27 | struct ceph_msg *msg); | ||
| 23 | }; | 28 | }; |
| 24 | 29 | ||
| 25 | struct ceph_auth_client_ops { | 30 | struct ceph_auth_client_ops { |
| @@ -66,6 +71,11 @@ struct ceph_auth_client_ops { | |||
| 66 | void (*reset)(struct ceph_auth_client *ac); | 71 | void (*reset)(struct ceph_auth_client *ac); |
| 67 | 72 | ||
| 68 | void (*destroy)(struct ceph_auth_client *ac); | 73 | void (*destroy)(struct ceph_auth_client *ac); |
| 74 | |||
| 75 | int (*sign_message)(struct ceph_auth_handshake *auth, | ||
| 76 | struct ceph_msg *msg); | ||
| 77 | int (*check_message_signature)(struct ceph_auth_handshake *auth, | ||
| 78 | struct ceph_msg *msg); | ||
| 69 | }; | 79 | }; |
| 70 | 80 | ||
| 71 | struct ceph_auth_client { | 81 | struct ceph_auth_client { |
| @@ -113,4 +123,20 @@ extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, | |||
| 113 | extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, | 123 | extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, |
| 114 | int peer_type); | 124 | int peer_type); |
| 115 | 125 | ||
| 126 | static inline int ceph_auth_sign_message(struct ceph_auth_handshake *auth, | ||
| 127 | struct ceph_msg *msg) | ||
| 128 | { | ||
| 129 | if (auth->sign_message) | ||
| 130 | return auth->sign_message(auth, msg); | ||
| 131 | return 0; | ||
| 132 | } | ||
| 133 | |||
| 134 | static inline | ||
| 135 | int ceph_auth_check_message_signature(struct ceph_auth_handshake *auth, | ||
| 136 | struct ceph_msg *msg) | ||
| 137 | { | ||
| 138 | if (auth->check_message_signature) | ||
| 139 | return auth->check_message_signature(auth, msg); | ||
| 140 | return 0; | ||
| 141 | } | ||
| 116 | #endif | 142 | #endif |
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h index 07ad423cc37f..07ca15e76100 100644 --- a/include/linux/ceph/buffer.h +++ b/include/linux/ceph/buffer.h | |||
| @@ -10,8 +10,7 @@ | |||
| 10 | /* | 10 | /* |
| 11 | * a simple reference counted buffer. | 11 | * a simple reference counted buffer. |
| 12 | * | 12 | * |
| 13 | * use kmalloc for small sizes (<= one page), vmalloc for larger | 13 | * use kmalloc for smaller sizes, vmalloc for larger sizes. |
| 14 | * sizes. | ||
| 15 | */ | 14 | */ |
| 16 | struct ceph_buffer { | 15 | struct ceph_buffer { |
| 17 | struct kref kref; | 16 | struct kref kref; |
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index d12659ce550d..71e05bbf8ceb 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h | |||
| @@ -84,6 +84,7 @@ static inline u64 ceph_sanitize_features(u64 features) | |||
| 84 | CEPH_FEATURE_PGPOOL3 | \ | 84 | CEPH_FEATURE_PGPOOL3 | \ |
| 85 | CEPH_FEATURE_OSDENC | \ | 85 | CEPH_FEATURE_OSDENC | \ |
| 86 | CEPH_FEATURE_CRUSH_TUNABLES | \ | 86 | CEPH_FEATURE_CRUSH_TUNABLES | \ |
| 87 | CEPH_FEATURE_MSG_AUTH | \ | ||
| 87 | CEPH_FEATURE_CRUSH_TUNABLES2 | \ | 88 | CEPH_FEATURE_CRUSH_TUNABLES2 | \ |
| 88 | CEPH_FEATURE_REPLY_CREATE_INODE | \ | 89 | CEPH_FEATURE_REPLY_CREATE_INODE | \ |
| 89 | CEPH_FEATURE_OSDHASHPSPOOL | \ | 90 | CEPH_FEATURE_OSDHASHPSPOOL | \ |
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 3c97d5e9b951..c0dadaac26e3 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h | |||
| @@ -522,8 +522,11 @@ struct ceph_mds_reply_dirfrag { | |||
| 522 | __le32 dist[]; | 522 | __le32 dist[]; |
| 523 | } __attribute__ ((packed)); | 523 | } __attribute__ ((packed)); |
| 524 | 524 | ||
| 525 | #define CEPH_LOCK_FCNTL 1 | 525 | #define CEPH_LOCK_FCNTL 1 |
| 526 | #define CEPH_LOCK_FLOCK 2 | 526 | #define CEPH_LOCK_FLOCK 2 |
| 527 | #define CEPH_LOCK_FCNTL_INTR 3 | ||
| 528 | #define CEPH_LOCK_FLOCK_INTR 4 | ||
| 529 | |||
| 527 | 530 | ||
| 528 | #define CEPH_LOCK_SHARED 1 | 531 | #define CEPH_LOCK_SHARED 1 |
| 529 | #define CEPH_LOCK_EXCL 2 | 532 | #define CEPH_LOCK_EXCL 2 |
| @@ -549,6 +552,7 @@ struct ceph_filelock { | |||
| 549 | 552 | ||
| 550 | int ceph_flags_to_mode(int flags); | 553 | int ceph_flags_to_mode(int flags); |
| 551 | 554 | ||
| 555 | #define CEPH_INLINE_NONE ((__u64)-1) | ||
| 552 | 556 | ||
| 553 | /* capability bits */ | 557 | /* capability bits */ |
| 554 | #define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */ | 558 | #define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */ |
| @@ -613,6 +617,8 @@ int ceph_flags_to_mode(int flags); | |||
| 613 | CEPH_CAP_LINK_SHARED | \ | 617 | CEPH_CAP_LINK_SHARED | \ |
| 614 | CEPH_CAP_FILE_SHARED | \ | 618 | CEPH_CAP_FILE_SHARED | \ |
| 615 | CEPH_CAP_XATTR_SHARED) | 619 | CEPH_CAP_XATTR_SHARED) |
| 620 | #define CEPH_STAT_CAP_INLINE_DATA (CEPH_CAP_FILE_SHARED | \ | ||
| 621 | CEPH_CAP_FILE_RD) | ||
| 616 | 622 | ||
| 617 | #define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \ | 623 | #define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \ |
| 618 | CEPH_CAP_LINK_SHARED | \ | 624 | CEPH_CAP_LINK_SHARED | \ |
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 07bc359b88ac..8b11a79ca1cb 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ | 29 | #define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ |
| 30 | #define CEPH_OPT_MYIP (1<<2) /* specified my ip */ | 30 | #define CEPH_OPT_MYIP (1<<2) /* specified my ip */ |
| 31 | #define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ | 31 | #define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ |
| 32 | #define CEPH_OPT_NOMSGAUTH (1<<4) /* not require cephx message signature */ | ||
| 32 | 33 | ||
| 33 | #define CEPH_OPT_DEFAULT (0) | 34 | #define CEPH_OPT_DEFAULT (0) |
| 34 | 35 | ||
| @@ -184,7 +185,6 @@ extern bool libceph_compatible(void *data); | |||
| 184 | extern const char *ceph_msg_type_name(int type); | 185 | extern const char *ceph_msg_type_name(int type); |
| 185 | extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid); | 186 | extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid); |
| 186 | extern void *ceph_kvmalloc(size_t size, gfp_t flags); | 187 | extern void *ceph_kvmalloc(size_t size, gfp_t flags); |
| 187 | extern void ceph_kvfree(const void *ptr); | ||
| 188 | 188 | ||
| 189 | extern struct ceph_options *ceph_parse_options(char *options, | 189 | extern struct ceph_options *ceph_parse_options(char *options, |
| 190 | const char *dev_name, const char *dev_name_end, | 190 | const char *dev_name, const char *dev_name_end, |
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 40ae58e3e9db..d9d396c16503 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h | |||
| @@ -42,6 +42,10 @@ struct ceph_connection_operations { | |||
| 42 | struct ceph_msg * (*alloc_msg) (struct ceph_connection *con, | 42 | struct ceph_msg * (*alloc_msg) (struct ceph_connection *con, |
| 43 | struct ceph_msg_header *hdr, | 43 | struct ceph_msg_header *hdr, |
| 44 | int *skip); | 44 | int *skip); |
| 45 | int (*sign_message) (struct ceph_connection *con, struct ceph_msg *msg); | ||
| 46 | |||
| 47 | int (*check_message_signature) (struct ceph_connection *con, | ||
| 48 | struct ceph_msg *msg); | ||
| 45 | }; | 49 | }; |
| 46 | 50 | ||
| 47 | /* use format string %s%d */ | 51 | /* use format string %s%d */ |
| @@ -142,7 +146,10 @@ struct ceph_msg_data_cursor { | |||
| 142 | */ | 146 | */ |
| 143 | struct ceph_msg { | 147 | struct ceph_msg { |
| 144 | struct ceph_msg_header hdr; /* header */ | 148 | struct ceph_msg_header hdr; /* header */ |
| 145 | struct ceph_msg_footer footer; /* footer */ | 149 | union { |
| 150 | struct ceph_msg_footer footer; /* footer */ | ||
| 151 | struct ceph_msg_footer_old old_footer; /* old format footer */ | ||
| 152 | }; | ||
| 146 | struct kvec front; /* unaligned blobs of message */ | 153 | struct kvec front; /* unaligned blobs of message */ |
| 147 | struct ceph_buffer *middle; | 154 | struct ceph_buffer *middle; |
| 148 | 155 | ||
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h index 3d94a73b5f30..1c1887206ffa 100644 --- a/include/linux/ceph/msgr.h +++ b/include/linux/ceph/msgr.h | |||
| @@ -152,7 +152,8 @@ struct ceph_msg_header { | |||
| 152 | receiver: mask against ~PAGE_MASK */ | 152 | receiver: mask against ~PAGE_MASK */ |
| 153 | 153 | ||
| 154 | struct ceph_entity_name src; | 154 | struct ceph_entity_name src; |
| 155 | __le32 reserved; | 155 | __le16 compat_version; |
| 156 | __le16 reserved; | ||
| 156 | __le32 crc; /* header crc32c */ | 157 | __le32 crc; /* header crc32c */ |
| 157 | } __attribute__ ((packed)); | 158 | } __attribute__ ((packed)); |
| 158 | 159 | ||
| @@ -164,13 +165,21 @@ struct ceph_msg_header { | |||
| 164 | /* | 165 | /* |
| 165 | * follows data payload | 166 | * follows data payload |
| 166 | */ | 167 | */ |
| 168 | struct ceph_msg_footer_old { | ||
| 169 | __le32 front_crc, middle_crc, data_crc; | ||
| 170 | __u8 flags; | ||
| 171 | } __attribute__ ((packed)); | ||
| 172 | |||
| 167 | struct ceph_msg_footer { | 173 | struct ceph_msg_footer { |
| 168 | __le32 front_crc, middle_crc, data_crc; | 174 | __le32 front_crc, middle_crc, data_crc; |
| 175 | // sig holds the 64 bits of the digital signature for the message PLR | ||
| 176 | __le64 sig; | ||
| 169 | __u8 flags; | 177 | __u8 flags; |
| 170 | } __attribute__ ((packed)); | 178 | } __attribute__ ((packed)); |
| 171 | 179 | ||
| 172 | #define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */ | 180 | #define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */ |
| 173 | #define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */ | 181 | #define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */ |
| 182 | #define CEPH_MSG_FOOTER_SIGNED (1<<2) /* msg was signed */ | ||
| 174 | 183 | ||
| 175 | 184 | ||
| 176 | #endif | 185 | #endif |
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 03aeb27fcc69..5d86416d35f2 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h | |||
| @@ -87,6 +87,13 @@ struct ceph_osd_req_op { | |||
| 87 | struct ceph_osd_data osd_data; | 87 | struct ceph_osd_data osd_data; |
| 88 | } extent; | 88 | } extent; |
| 89 | struct { | 89 | struct { |
| 90 | __le32 name_len; | ||
| 91 | __le32 value_len; | ||
| 92 | __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ | ||
| 93 | __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ | ||
| 94 | struct ceph_osd_data osd_data; | ||
| 95 | } xattr; | ||
| 96 | struct { | ||
| 90 | const char *class_name; | 97 | const char *class_name; |
| 91 | const char *method_name; | 98 | const char *method_name; |
| 92 | struct ceph_osd_data request_info; | 99 | struct ceph_osd_data request_info; |
| @@ -295,6 +302,9 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, | |||
| 295 | extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req, | 302 | extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req, |
| 296 | unsigned int which, u16 opcode, | 303 | unsigned int which, u16 opcode, |
| 297 | const char *class, const char *method); | 304 | const char *class, const char *method); |
| 305 | extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, | ||
| 306 | u16 opcode, const char *name, const void *value, | ||
| 307 | size_t size, u8 cmp_op, u8 cmp_mode); | ||
| 298 | extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req, | 308 | extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req, |
| 299 | unsigned int which, u16 opcode, | 309 | unsigned int which, u16 opcode, |
| 300 | u64 cookie, u64 version, int flag); | 310 | u64 cookie, u64 version, int flag); |
| @@ -318,7 +328,8 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, | |||
| 318 | struct ceph_file_layout *layout, | 328 | struct ceph_file_layout *layout, |
| 319 | struct ceph_vino vino, | 329 | struct ceph_vino vino, |
| 320 | u64 offset, u64 *len, | 330 | u64 offset, u64 *len, |
| 321 | int num_ops, int opcode, int flags, | 331 | unsigned int which, int num_ops, |
| 332 | int opcode, int flags, | ||
| 322 | struct ceph_snap_context *snapc, | 333 | struct ceph_snap_context *snapc, |
| 323 | u32 truncate_seq, u64 truncate_size, | 334 | u32 truncate_seq, u64 truncate_size, |
| 324 | bool use_mempool); | 335 | bool use_mempool); |
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h index 5f871d84ddce..13d71fe18b0c 100644 --- a/include/linux/ceph/pagelist.h +++ b/include/linux/ceph/pagelist.h | |||
| @@ -1,8 +1,10 @@ | |||
| 1 | #ifndef __FS_CEPH_PAGELIST_H | 1 | #ifndef __FS_CEPH_PAGELIST_H |
| 2 | #define __FS_CEPH_PAGELIST_H | 2 | #define __FS_CEPH_PAGELIST_H |
| 3 | 3 | ||
| 4 | #include <linux/list.h> | 4 | #include <asm/byteorder.h> |
| 5 | #include <linux/atomic.h> | 5 | #include <linux/atomic.h> |
| 6 | #include <linux/list.h> | ||
| 7 | #include <linux/types.h> | ||
| 6 | 8 | ||
| 7 | struct ceph_pagelist { | 9 | struct ceph_pagelist { |
| 8 | struct list_head head; | 10 | struct list_head head; |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 1d5196889048..da0dae0600e6 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
| @@ -113,6 +113,19 @@ static inline void css_get(struct cgroup_subsys_state *css) | |||
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | /** | 115 | /** |
| 116 | * css_get_many - obtain references on the specified css | ||
| 117 | * @css: target css | ||
| 118 | * @n: number of references to get | ||
| 119 | * | ||
| 120 | * The caller must already have a reference. | ||
| 121 | */ | ||
| 122 | static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) | ||
| 123 | { | ||
| 124 | if (!(css->flags & CSS_NO_REF)) | ||
| 125 | percpu_ref_get_many(&css->refcnt, n); | ||
| 126 | } | ||
| 127 | |||
| 128 | /** | ||
| 116 | * css_tryget - try to obtain a reference on the specified css | 129 | * css_tryget - try to obtain a reference on the specified css |
| 117 | * @css: target css | 130 | * @css: target css |
| 118 | * | 131 | * |
| @@ -159,6 +172,19 @@ static inline void css_put(struct cgroup_subsys_state *css) | |||
| 159 | percpu_ref_put(&css->refcnt); | 172 | percpu_ref_put(&css->refcnt); |
| 160 | } | 173 | } |
| 161 | 174 | ||
| 175 | /** | ||
| 176 | * css_put_many - put css references | ||
| 177 | * @css: target css | ||
| 178 | * @n: number of references to put | ||
| 179 | * | ||
| 180 | * Put references obtained via css_get() and css_tryget_online(). | ||
| 181 | */ | ||
| 182 | static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) | ||
| 183 | { | ||
| 184 | if (!(css->flags & CSS_NO_REF)) | ||
| 185 | percpu_ref_put_many(&css->refcnt, n); | ||
| 186 | } | ||
| 187 | |||
| 162 | /* bits in struct cgroup flags field */ | 188 | /* bits in struct cgroup flags field */ |
| 163 | enum { | 189 | enum { |
| 164 | /* Control Group requires release notifications to userspace */ | 190 | /* Control Group requires release notifications to userspace */ |
| @@ -367,8 +393,8 @@ struct css_set { | |||
| 367 | * struct cftype: handler definitions for cgroup control files | 393 | * struct cftype: handler definitions for cgroup control files |
| 368 | * | 394 | * |
| 369 | * When reading/writing to a file: | 395 | * When reading/writing to a file: |
| 370 | * - the cgroup to use is file->f_dentry->d_parent->d_fsdata | 396 | * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata |
| 371 | * - the 'cftype' of the file is file->f_dentry->d_fsdata | 397 | * - the 'cftype' of the file is file->f_path.dentry->d_fsdata |
| 372 | */ | 398 | */ |
| 373 | 399 | ||
| 374 | /* cftype->flags */ | 400 | /* cftype->flags */ |
| @@ -612,8 +638,10 @@ struct cgroup_subsys { | |||
| 612 | struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); | 638 | struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); |
| 613 | int (*css_online)(struct cgroup_subsys_state *css); | 639 | int (*css_online)(struct cgroup_subsys_state *css); |
| 614 | void (*css_offline)(struct cgroup_subsys_state *css); | 640 | void (*css_offline)(struct cgroup_subsys_state *css); |
| 641 | void (*css_released)(struct cgroup_subsys_state *css); | ||
| 615 | void (*css_free)(struct cgroup_subsys_state *css); | 642 | void (*css_free)(struct cgroup_subsys_state *css); |
| 616 | void (*css_reset)(struct cgroup_subsys_state *css); | 643 | void (*css_reset)(struct cgroup_subsys_state *css); |
| 644 | void (*css_e_css_changed)(struct cgroup_subsys_state *css); | ||
| 617 | 645 | ||
| 618 | int (*can_attach)(struct cgroup_subsys_state *css, | 646 | int (*can_attach)(struct cgroup_subsys_state *css, |
| 619 | struct cgroup_taskset *tset); | 647 | struct cgroup_taskset *tset); |
| @@ -908,6 +936,8 @@ void css_task_iter_end(struct css_task_iter *it); | |||
| 908 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); | 936 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); |
| 909 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); | 937 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); |
| 910 | 938 | ||
| 939 | struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, | ||
| 940 | struct cgroup_subsys *ss); | ||
| 911 | struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, | 941 | struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, |
| 912 | struct cgroup_subsys *ss); | 942 | struct cgroup_subsys *ss); |
| 913 | 943 | ||
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 2839c639f092..d936409520f8 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h | |||
| @@ -176,7 +176,7 @@ struct clk_ops { | |||
| 176 | unsigned long *parent_rate); | 176 | unsigned long *parent_rate); |
| 177 | long (*determine_rate)(struct clk_hw *hw, unsigned long rate, | 177 | long (*determine_rate)(struct clk_hw *hw, unsigned long rate, |
| 178 | unsigned long *best_parent_rate, | 178 | unsigned long *best_parent_rate, |
| 179 | struct clk **best_parent_clk); | 179 | struct clk_hw **best_parent_hw); |
| 180 | int (*set_parent)(struct clk_hw *hw, u8 index); | 180 | int (*set_parent)(struct clk_hw *hw, u8 index); |
| 181 | u8 (*get_parent)(struct clk_hw *hw); | 181 | u8 (*get_parent)(struct clk_hw *hw); |
| 182 | int (*set_rate)(struct clk_hw *hw, unsigned long rate, | 182 | int (*set_rate)(struct clk_hw *hw, unsigned long rate, |
| @@ -544,16 +544,14 @@ u8 __clk_get_num_parents(struct clk *clk); | |||
| 544 | struct clk *__clk_get_parent(struct clk *clk); | 544 | struct clk *__clk_get_parent(struct clk *clk); |
| 545 | struct clk *clk_get_parent_by_index(struct clk *clk, u8 index); | 545 | struct clk *clk_get_parent_by_index(struct clk *clk, u8 index); |
| 546 | unsigned int __clk_get_enable_count(struct clk *clk); | 546 | unsigned int __clk_get_enable_count(struct clk *clk); |
| 547 | unsigned int __clk_get_prepare_count(struct clk *clk); | ||
| 548 | unsigned long __clk_get_rate(struct clk *clk); | 547 | unsigned long __clk_get_rate(struct clk *clk); |
| 549 | unsigned long __clk_get_accuracy(struct clk *clk); | ||
| 550 | unsigned long __clk_get_flags(struct clk *clk); | 548 | unsigned long __clk_get_flags(struct clk *clk); |
| 551 | bool __clk_is_prepared(struct clk *clk); | 549 | bool __clk_is_prepared(struct clk *clk); |
| 552 | bool __clk_is_enabled(struct clk *clk); | 550 | bool __clk_is_enabled(struct clk *clk); |
| 553 | struct clk *__clk_lookup(const char *name); | 551 | struct clk *__clk_lookup(const char *name); |
| 554 | long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, | 552 | long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, |
| 555 | unsigned long *best_parent_rate, | 553 | unsigned long *best_parent_rate, |
| 556 | struct clk **best_parent_p); | 554 | struct clk_hw **best_parent_p); |
| 557 | 555 | ||
| 558 | /* | 556 | /* |
| 559 | * FIXME clock api without lock protection | 557 | * FIXME clock api without lock protection |
| @@ -652,7 +650,7 @@ static inline void clk_writel(u32 val, u32 __iomem *reg) | |||
| 652 | #endif /* platform dependent I/O accessors */ | 650 | #endif /* platform dependent I/O accessors */ |
| 653 | 651 | ||
| 654 | #ifdef CONFIG_DEBUG_FS | 652 | #ifdef CONFIG_DEBUG_FS |
| 655 | struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode, | 653 | struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode, |
| 656 | void *data, const struct file_operations *fops); | 654 | void *data, const struct file_operations *fops); |
| 657 | #endif | 655 | #endif |
| 658 | 656 | ||
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 74e5341463c9..55ef529a0dbf 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h | |||
| @@ -264,7 +264,7 @@ int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw, | |||
| 264 | long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, | 264 | long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, |
| 265 | unsigned long rate, | 265 | unsigned long rate, |
| 266 | unsigned long *best_parent_rate, | 266 | unsigned long *best_parent_rate, |
| 267 | struct clk **best_parent_clk); | 267 | struct clk_hw **best_parent_clk); |
| 268 | unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, | 268 | unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, |
| 269 | unsigned long parent_rate); | 269 | unsigned long parent_rate); |
| 270 | long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, | 270 | long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, |
| @@ -273,7 +273,7 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, | |||
| 273 | long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, | 273 | long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, |
| 274 | unsigned long rate, | 274 | unsigned long rate, |
| 275 | unsigned long *best_parent_rate, | 275 | unsigned long *best_parent_rate, |
| 276 | struct clk **best_parent_clk); | 276 | struct clk_hw **best_parent_clk); |
| 277 | u8 omap2_init_dpll_parent(struct clk_hw *hw); | 277 | u8 omap2_init_dpll_parent(struct clk_hw *hw); |
| 278 | unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate); | 278 | unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate); |
| 279 | long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, | 279 | long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, |
diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h new file mode 100644 index 000000000000..4d1019d56f7f --- /dev/null +++ b/include/linux/clock_cooling.h | |||
| @@ -0,0 +1,65 @@ | |||
| 1 | /* | ||
| 2 | * linux/include/linux/clock_cooling.h | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com> | ||
| 5 | * | ||
| 6 | * Copyright (C) 2013 Texas Instruments Inc. | ||
| 7 | * Contact: Eduardo Valentin <eduardo.valentin@ti.com> | ||
| 8 | * | ||
| 9 | * Highly based on cpu_cooling.c. | ||
| 10 | * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) | ||
| 11 | * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org> | ||
| 12 | * | ||
| 13 | * This program is free software; you can redistribute it and/or modify | ||
| 14 | * it under the terms of the GNU General Public License as published by | ||
| 15 | * the Free Software Foundation; version 2 of the License. | ||
| 16 | * | ||
| 17 | * This program is distributed in the hope that it will be useful, but | ||
| 18 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 20 | * General Public License for more details. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #ifndef __CPU_COOLING_H__ | ||
| 24 | #define __CPU_COOLING_H__ | ||
| 25 | |||
| 26 | #include <linux/of.h> | ||
| 27 | #include <linux/thermal.h> | ||
| 28 | #include <linux/cpumask.h> | ||
| 29 | |||
| 30 | #ifdef CONFIG_CLOCK_THERMAL | ||
| 31 | /** | ||
| 32 | * clock_cooling_register - function to create clock cooling device. | ||
| 33 | * @dev: struct device pointer to the device used as clock cooling device. | ||
| 34 | * @clock_name: string containing the clock used as cooling mechanism. | ||
| 35 | */ | ||
| 36 | struct thermal_cooling_device * | ||
| 37 | clock_cooling_register(struct device *dev, const char *clock_name); | ||
| 38 | |||
| 39 | /** | ||
| 40 | * clock_cooling_unregister - function to remove clock cooling device. | ||
| 41 | * @cdev: thermal cooling device pointer. | ||
| 42 | */ | ||
| 43 | void clock_cooling_unregister(struct thermal_cooling_device *cdev); | ||
| 44 | |||
| 45 | unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, | ||
| 46 | unsigned long freq); | ||
| 47 | #else /* !CONFIG_CLOCK_THERMAL */ | ||
| 48 | static inline struct thermal_cooling_device * | ||
| 49 | clock_cooling_register(struct device *dev, const char *clock_name) | ||
| 50 | { | ||
| 51 | return NULL; | ||
| 52 | } | ||
| 53 | static inline | ||
| 54 | void clock_cooling_unregister(struct thermal_cooling_device *cdev) | ||
| 55 | { | ||
| 56 | } | ||
| 57 | static inline | ||
| 58 | unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, | ||
| 59 | unsigned long freq) | ||
| 60 | { | ||
| 61 | return THERMAL_CSTATE_INVALID; | ||
| 62 | } | ||
| 63 | #endif /* CONFIG_CLOCK_THERMAL */ | ||
| 64 | |||
| 65 | #endif /* __CPU_COOLING_H__ */ | ||
diff --git a/include/linux/cma.h b/include/linux/cma.h index a93438beb33c..9384ba66e975 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | 15 | ||
| 16 | struct cma; | 16 | struct cma; |
| 17 | 17 | ||
| 18 | extern unsigned long totalcma_pages; | ||
| 18 | extern phys_addr_t cma_get_base(struct cma *cma); | 19 | extern phys_addr_t cma_get_base(struct cma *cma); |
| 19 | extern unsigned long cma_get_size(struct cma *cma); | 20 | extern unsigned long cma_get_size(struct cma *cma); |
| 20 | 21 | ||
diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 60bdf8dc02a3..3238ffa33f68 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h | |||
| @@ -33,10 +33,11 @@ extern int fragmentation_index(struct zone *zone, unsigned int order); | |||
| 33 | extern unsigned long try_to_compact_pages(struct zonelist *zonelist, | 33 | extern unsigned long try_to_compact_pages(struct zonelist *zonelist, |
| 34 | int order, gfp_t gfp_mask, nodemask_t *mask, | 34 | int order, gfp_t gfp_mask, nodemask_t *mask, |
| 35 | enum migrate_mode mode, int *contended, | 35 | enum migrate_mode mode, int *contended, |
| 36 | struct zone **candidate_zone); | 36 | int alloc_flags, int classzone_idx); |
| 37 | extern void compact_pgdat(pg_data_t *pgdat, int order); | 37 | extern void compact_pgdat(pg_data_t *pgdat, int order); |
| 38 | extern void reset_isolation_suitable(pg_data_t *pgdat); | 38 | extern void reset_isolation_suitable(pg_data_t *pgdat); |
| 39 | extern unsigned long compaction_suitable(struct zone *zone, int order); | 39 | extern unsigned long compaction_suitable(struct zone *zone, int order, |
| 40 | int alloc_flags, int classzone_idx); | ||
| 40 | 41 | ||
| 41 | /* Do not skip compaction more than 64 times */ | 42 | /* Do not skip compaction more than 64 times */ |
| 42 | #define COMPACT_MAX_DEFER_SHIFT 6 | 43 | #define COMPACT_MAX_DEFER_SHIFT 6 |
| @@ -103,7 +104,7 @@ static inline bool compaction_restarting(struct zone *zone, int order) | |||
| 103 | static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, | 104 | static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, |
| 104 | int order, gfp_t gfp_mask, nodemask_t *nodemask, | 105 | int order, gfp_t gfp_mask, nodemask_t *nodemask, |
| 105 | enum migrate_mode mode, int *contended, | 106 | enum migrate_mode mode, int *contended, |
| 106 | struct zone **candidate_zone) | 107 | int alloc_flags, int classzone_idx) |
| 107 | { | 108 | { |
| 108 | return COMPACT_CONTINUE; | 109 | return COMPACT_CONTINUE; |
| 109 | } | 110 | } |
| @@ -116,7 +117,8 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) | |||
| 116 | { | 117 | { |
| 117 | } | 118 | } |
| 118 | 119 | ||
| 119 | static inline unsigned long compaction_suitable(struct zone *zone, int order) | 120 | static inline unsigned long compaction_suitable(struct zone *zone, int order, |
| 121 | int alloc_flags, int classzone_idx) | ||
| 120 | { | 122 | { |
| 121 | return COMPACT_SKIPPED; | 123 | return COMPACT_SKIPPED; |
| 122 | } | 124 | } |
diff --git a/include/linux/compat.h b/include/linux/compat.h index e6494261eaff..7450ca2ac1fc 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
| @@ -357,6 +357,9 @@ asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int); | |||
| 357 | 357 | ||
| 358 | asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, | 358 | asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, |
| 359 | const compat_uptr_t __user *envp); | 359 | const compat_uptr_t __user *envp); |
| 360 | asmlinkage long compat_sys_execveat(int dfd, const char __user *filename, | ||
| 361 | const compat_uptr_t __user *argv, | ||
| 362 | const compat_uptr_t __user *envp, int flags); | ||
| 360 | 363 | ||
| 361 | asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, | 364 | asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, |
| 362 | compat_ulong_t __user *outp, compat_ulong_t __user *exp, | 365 | compat_ulong_t __user *outp, compat_ulong_t __user *exp, |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index d5ad7b1118fc..a1c81f80978e 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -186,6 +186,80 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
| 186 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) | 186 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) |
| 187 | #endif | 187 | #endif |
| 188 | 188 | ||
| 189 | #include <uapi/linux/types.h> | ||
| 190 | |||
| 191 | static __always_inline void data_access_exceeds_word_size(void) | ||
| 192 | #ifdef __compiletime_warning | ||
| 193 | __compiletime_warning("data access exceeds word size and won't be atomic") | ||
| 194 | #endif | ||
| 195 | ; | ||
| 196 | |||
| 197 | static __always_inline void data_access_exceeds_word_size(void) | ||
| 198 | { | ||
| 199 | } | ||
| 200 | |||
| 201 | static __always_inline void __read_once_size(volatile void *p, void *res, int size) | ||
| 202 | { | ||
| 203 | switch (size) { | ||
| 204 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; | ||
| 205 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; | ||
| 206 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; | ||
| 207 | #ifdef CONFIG_64BIT | ||
| 208 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; | ||
| 209 | #endif | ||
| 210 | default: | ||
| 211 | barrier(); | ||
| 212 | __builtin_memcpy((void *)res, (const void *)p, size); | ||
| 213 | data_access_exceeds_word_size(); | ||
| 214 | barrier(); | ||
| 215 | } | ||
| 216 | } | ||
| 217 | |||
| 218 | static __always_inline void __assign_once_size(volatile void *p, void *res, int size) | ||
| 219 | { | ||
| 220 | switch (size) { | ||
| 221 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | ||
| 222 | case 2: *(volatile __u16 *)p = *(__u16 *)res; break; | ||
| 223 | case 4: *(volatile __u32 *)p = *(__u32 *)res; break; | ||
| 224 | #ifdef CONFIG_64BIT | ||
| 225 | case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | ||
| 226 | #endif | ||
| 227 | default: | ||
| 228 | barrier(); | ||
| 229 | __builtin_memcpy((void *)p, (const void *)res, size); | ||
| 230 | data_access_exceeds_word_size(); | ||
| 231 | barrier(); | ||
| 232 | } | ||
| 233 | } | ||
| 234 | |||
| 235 | /* | ||
| 236 | * Prevent the compiler from merging or refetching reads or writes. The | ||
| 237 | * compiler is also forbidden from reordering successive instances of | ||
| 238 | * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the | ||
| 239 | * compiler is aware of some particular ordering. One way to make the | ||
| 240 | * compiler aware of ordering is to put the two invocations of READ_ONCE, | ||
| 241 | * ASSIGN_ONCE or ACCESS_ONCE() in different C statements. | ||
| 242 | * | ||
| 243 | * In contrast to ACCESS_ONCE these two macros will also work on aggregate | ||
| 244 | * data types like structs or unions. If the size of the accessed data | ||
| 245 | * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) | ||
| 246 | * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a | ||
| 247 | * compile-time warning. | ||
| 248 | * | ||
| 249 | * Their two major use cases are: (1) Mediating communication between | ||
| 250 | * process-level code and irq/NMI handlers, all running on the same CPU, | ||
| 251 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise | ||
| 252 | * mutilate accesses that either do not require ordering or that interact | ||
| 253 | * with an explicit memory barrier or atomic instruction that provides the | ||
| 254 | * required ordering. | ||
| 255 | */ | ||
| 256 | |||
| 257 | #define READ_ONCE(x) \ | ||
| 258 | ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) | ||
| 259 | |||
| 260 | #define ASSIGN_ONCE(val, x) \ | ||
| 261 | ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; }) | ||
| 262 | |||
| 189 | #endif /* __KERNEL__ */ | 263 | #endif /* __KERNEL__ */ |
| 190 | 264 | ||
| 191 | #endif /* __ASSEMBLY__ */ | 265 | #endif /* __ASSEMBLY__ */ |
diff --git a/include/linux/coresight.h b/include/linux/coresight.h new file mode 100644 index 000000000000..5d3c54311f7a --- /dev/null +++ b/include/linux/coresight.h | |||
| @@ -0,0 +1,263 @@ | |||
| 1 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved. | ||
| 2 | * | ||
| 3 | * This program is free software; you can redistribute it and/or modify | ||
| 4 | * it under the terms of the GNU General Public License version 2 and | ||
| 5 | * only version 2 as published by the Free Software Foundation. | ||
| 6 | * | ||
| 7 | * This program is distributed in the hope that it will be useful, | ||
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 10 | * GNU General Public License for more details. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #ifndef _LINUX_CORESIGHT_H | ||
| 14 | #define _LINUX_CORESIGHT_H | ||
| 15 | |||
| 16 | #include <linux/device.h> | ||
| 17 | |||
| 18 | /* Peripheral id registers (0xFD0-0xFEC) */ | ||
| 19 | #define CORESIGHT_PERIPHIDR4 0xfd0 | ||
| 20 | #define CORESIGHT_PERIPHIDR5 0xfd4 | ||
| 21 | #define CORESIGHT_PERIPHIDR6 0xfd8 | ||
| 22 | #define CORESIGHT_PERIPHIDR7 0xfdC | ||
| 23 | #define CORESIGHT_PERIPHIDR0 0xfe0 | ||
| 24 | #define CORESIGHT_PERIPHIDR1 0xfe4 | ||
| 25 | #define CORESIGHT_PERIPHIDR2 0xfe8 | ||
| 26 | #define CORESIGHT_PERIPHIDR3 0xfeC | ||
| 27 | /* Component id registers (0xFF0-0xFFC) */ | ||
| 28 | #define CORESIGHT_COMPIDR0 0xff0 | ||
| 29 | #define CORESIGHT_COMPIDR1 0xff4 | ||
| 30 | #define CORESIGHT_COMPIDR2 0xff8 | ||
| 31 | #define CORESIGHT_COMPIDR3 0xffC | ||
| 32 | |||
| 33 | #define ETM_ARCH_V3_3 0x23 | ||
| 34 | #define ETM_ARCH_V3_5 0x25 | ||
| 35 | #define PFT_ARCH_V1_0 0x30 | ||
| 36 | #define PFT_ARCH_V1_1 0x31 | ||
| 37 | |||
| 38 | #define CORESIGHT_UNLOCK 0xc5acce55 | ||
| 39 | |||
| 40 | extern struct bus_type coresight_bustype; | ||
| 41 | |||
| 42 | enum coresight_dev_type { | ||
| 43 | CORESIGHT_DEV_TYPE_NONE, | ||
| 44 | CORESIGHT_DEV_TYPE_SINK, | ||
| 45 | CORESIGHT_DEV_TYPE_LINK, | ||
| 46 | CORESIGHT_DEV_TYPE_LINKSINK, | ||
| 47 | CORESIGHT_DEV_TYPE_SOURCE, | ||
| 48 | }; | ||
| 49 | |||
| 50 | enum coresight_dev_subtype_sink { | ||
| 51 | CORESIGHT_DEV_SUBTYPE_SINK_NONE, | ||
| 52 | CORESIGHT_DEV_SUBTYPE_SINK_PORT, | ||
| 53 | CORESIGHT_DEV_SUBTYPE_SINK_BUFFER, | ||
| 54 | }; | ||
| 55 | |||
| 56 | enum coresight_dev_subtype_link { | ||
| 57 | CORESIGHT_DEV_SUBTYPE_LINK_NONE, | ||
| 58 | CORESIGHT_DEV_SUBTYPE_LINK_MERG, | ||
| 59 | CORESIGHT_DEV_SUBTYPE_LINK_SPLIT, | ||
| 60 | CORESIGHT_DEV_SUBTYPE_LINK_FIFO, | ||
| 61 | }; | ||
| 62 | |||
| 63 | enum coresight_dev_subtype_source { | ||
| 64 | CORESIGHT_DEV_SUBTYPE_SOURCE_NONE, | ||
| 65 | CORESIGHT_DEV_SUBTYPE_SOURCE_PROC, | ||
| 66 | CORESIGHT_DEV_SUBTYPE_SOURCE_BUS, | ||
| 67 | CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE, | ||
| 68 | }; | ||
| 69 | |||
| 70 | /** | ||
| 71 | * struct coresight_dev_subtype - further characterisation of a type | ||
| 72 | * @sink_subtype: type of sink this component is, as defined | ||
| 73 | by @coresight_dev_subtype_sink. | ||
| 74 | * @link_subtype: type of link this component is, as defined | ||
| 75 | by @coresight_dev_subtype_link. | ||
| 76 | * @source_subtype: type of source this component is, as defined | ||
| 77 | by @coresight_dev_subtype_source. | ||
| 78 | */ | ||
| 79 | struct coresight_dev_subtype { | ||
| 80 | enum coresight_dev_subtype_sink sink_subtype; | ||
| 81 | enum coresight_dev_subtype_link link_subtype; | ||
| 82 | enum coresight_dev_subtype_source source_subtype; | ||
| 83 | }; | ||
| 84 | |||
| 85 | /** | ||
| 86 | * struct coresight_platform_data - data harvested from the DT specification | ||
| 87 | * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs. | ||
| 88 | * @name: name of the component as shown under sysfs. | ||
| 89 | * @nr_inport: number of input ports for this component. | ||
| 90 | * @outports: list of remote endpoint port number. | ||
| 91 | * @child_names:name of all child components connected to this device. | ||
| 92 | * @child_ports:child component port number the current component is | ||
| 93 | connected to. | ||
| 94 | * @nr_outport: number of output ports for this component. | ||
| 95 | * @clk: The clock this component is associated to. | ||
| 96 | */ | ||
| 97 | struct coresight_platform_data { | ||
| 98 | int cpu; | ||
| 99 | const char *name; | ||
| 100 | int nr_inport; | ||
| 101 | int *outports; | ||
| 102 | const char **child_names; | ||
| 103 | int *child_ports; | ||
| 104 | int nr_outport; | ||
| 105 | struct clk *clk; | ||
| 106 | }; | ||
| 107 | |||
| 108 | /** | ||
| 109 | * struct coresight_desc - description of a component required from drivers | ||
| 110 | * @type: as defined by @coresight_dev_type. | ||
| 111 | * @subtype: as defined by @coresight_dev_subtype. | ||
| 112 | * @ops: generic operations for this component, as defined | ||
| 113 | by @coresight_ops. | ||
| 114 | * @pdata: platform data collected from DT. | ||
| 115 | * @dev: The device entity associated to this component. | ||
| 116 | * @groups: operations specific to this component. These will end up | ||
| 117 | in the component's sysfs sub-directory. | ||
| 118 | */ | ||
| 119 | struct coresight_desc { | ||
| 120 | enum coresight_dev_type type; | ||
| 121 | struct coresight_dev_subtype subtype; | ||
| 122 | const struct coresight_ops *ops; | ||
| 123 | struct coresight_platform_data *pdata; | ||
| 124 | struct device *dev; | ||
| 125 | const struct attribute_group **groups; | ||
| 126 | }; | ||
| 127 | |||
| 128 | /** | ||
| 129 | * struct coresight_connection - representation of a single connection | ||
| 130 | * @outport: a connection's output port number. | ||
| 131 | * @chid_name: remote component's name. | ||
| 132 | * @child_port: remote component's port number @output is connected to. | ||
| 133 | * @child_dev: a @coresight_device representation of the component | ||
| 134 | connected to @outport. | ||
| 135 | */ | ||
| 136 | struct coresight_connection { | ||
| 137 | int outport; | ||
| 138 | const char *child_name; | ||
| 139 | int child_port; | ||
| 140 | struct coresight_device *child_dev; | ||
| 141 | }; | ||
| 142 | |||
| 143 | /** | ||
| 144 | * struct coresight_device - representation of a device as used by the framework | ||
| 145 | * @conns: array of coresight_connections associated to this component. | ||
| 146 | * @nr_inport: number of input port associated to this component. | ||
| 147 | * @nr_outport: number of output port associated to this component. | ||
| 148 | * @type: as defined by @coresight_dev_type. | ||
| 149 | * @subtype: as defined by @coresight_dev_subtype. | ||
| 150 | * @ops: generic operations for this component, as defined | ||
| 151 | by @coresight_ops. | ||
| 152 | * @dev: The device entity associated to this component. | ||
| 153 | * @refcnt: keep track of what is in use. | ||
| 154 | * @path_link: link of current component into the path being enabled. | ||
| 155 | * @orphan: true if the component has connections that haven't been linked. | ||
| 156 | * @enable: 'true' if component is currently part of an active path. | ||
| 157 | * @activated: 'true' only if a _sink_ has been activated. A sink can be | ||
| 158 | activated but not yet enabled. Enabling for a _sink_ | ||
| 159 | happens when a source has been selected for that it. | ||
| 160 | */ | ||
| 161 | struct coresight_device { | ||
| 162 | struct coresight_connection *conns; | ||
| 163 | int nr_inport; | ||
| 164 | int nr_outport; | ||
| 165 | enum coresight_dev_type type; | ||
| 166 | struct coresight_dev_subtype subtype; | ||
| 167 | const struct coresight_ops *ops; | ||
| 168 | struct device dev; | ||
| 169 | atomic_t *refcnt; | ||
| 170 | struct list_head path_link; | ||
| 171 | bool orphan; | ||
| 172 | bool enable; /* true only if configured as part of a path */ | ||
| 173 | bool activated; /* true only if a sink is part of a path */ | ||
| 174 | }; | ||
| 175 | |||
| 176 | #define to_coresight_device(d) container_of(d, struct coresight_device, dev) | ||
| 177 | |||
| 178 | #define source_ops(csdev) csdev->ops->source_ops | ||
| 179 | #define sink_ops(csdev) csdev->ops->sink_ops | ||
| 180 | #define link_ops(csdev) csdev->ops->link_ops | ||
| 181 | |||
| 182 | #define CORESIGHT_DEBUGFS_ENTRY(__name, __entry_name, \ | ||
| 183 | __mode, __get, __set, __fmt) \ | ||
| 184 | DEFINE_SIMPLE_ATTRIBUTE(__name ## _ops, __get, __set, __fmt); \ | ||
| 185 | static const struct coresight_ops_entry __name ## _entry = { \ | ||
| 186 | .name = __entry_name, \ | ||
| 187 | .mode = __mode, \ | ||
| 188 | .ops = &__name ## _ops \ | ||
| 189 | } | ||
| 190 | |||
| 191 | /** | ||
| 192 | * struct coresight_ops_sink - basic operations for a sink | ||
| 193 | * Operations available for sinks | ||
| 194 | * @enable: enables the sink. | ||
| 195 | * @disable: disables the sink. | ||
| 196 | */ | ||
| 197 | struct coresight_ops_sink { | ||
| 198 | int (*enable)(struct coresight_device *csdev); | ||
| 199 | void (*disable)(struct coresight_device *csdev); | ||
| 200 | }; | ||
| 201 | |||
| 202 | /** | ||
| 203 | * struct coresight_ops_link - basic operations for a link | ||
| 204 | * Operations available for links. | ||
| 205 | * @enable: enables flow between iport and oport. | ||
| 206 | * @disable: disables flow between iport and oport. | ||
| 207 | */ | ||
| 208 | struct coresight_ops_link { | ||
| 209 | int (*enable)(struct coresight_device *csdev, int iport, int oport); | ||
| 210 | void (*disable)(struct coresight_device *csdev, int iport, int oport); | ||
| 211 | }; | ||
| 212 | |||
| 213 | /** | ||
| 214 | * struct coresight_ops_source - basic operations for a source | ||
| 215 | * Operations available for sources. | ||
| 216 | * @trace_id: returns the value of the component's trace ID as known | ||
| 217 | to the HW. | ||
| 218 | * @enable: enables tracing from a source. | ||
| 219 | * @disable: disables tracing for a source. | ||
| 220 | */ | ||
| 221 | struct coresight_ops_source { | ||
| 222 | int (*trace_id)(struct coresight_device *csdev); | ||
| 223 | int (*enable)(struct coresight_device *csdev); | ||
| 224 | void (*disable)(struct coresight_device *csdev); | ||
| 225 | }; | ||
| 226 | |||
| 227 | struct coresight_ops { | ||
| 228 | const struct coresight_ops_sink *sink_ops; | ||
| 229 | const struct coresight_ops_link *link_ops; | ||
| 230 | const struct coresight_ops_source *source_ops; | ||
| 231 | }; | ||
| 232 | |||
| 233 | #ifdef CONFIG_CORESIGHT | ||
| 234 | extern struct coresight_device * | ||
| 235 | coresight_register(struct coresight_desc *desc); | ||
| 236 | extern void coresight_unregister(struct coresight_device *csdev); | ||
| 237 | extern int coresight_enable(struct coresight_device *csdev); | ||
| 238 | extern void coresight_disable(struct coresight_device *csdev); | ||
| 239 | extern int coresight_is_bit_set(u32 val, int position, int value); | ||
| 240 | extern int coresight_timeout(void __iomem *addr, u32 offset, | ||
| 241 | int position, int value); | ||
| 242 | #ifdef CONFIG_OF | ||
| 243 | extern struct coresight_platform_data *of_get_coresight_platform_data( | ||
| 244 | struct device *dev, struct device_node *node); | ||
| 245 | #endif | ||
| 246 | #else | ||
| 247 | static inline struct coresight_device * | ||
| 248 | coresight_register(struct coresight_desc *desc) { return NULL; } | ||
| 249 | static inline void coresight_unregister(struct coresight_device *csdev) {} | ||
| 250 | static inline int | ||
| 251 | coresight_enable(struct coresight_device *csdev) { return -ENOSYS; } | ||
| 252 | static inline void coresight_disable(struct coresight_device *csdev) {} | ||
| 253 | static inline int coresight_is_bit_set(u32 val, int position, int value) | ||
| 254 | { return 0; } | ||
| 255 | static inline int coresight_timeout(void __iomem *addr, u32 offset, | ||
| 256 | int position, int value) { return 1; } | ||
| 257 | #ifdef CONFIG_OF | ||
| 258 | static inline struct coresight_platform_data *of_get_coresight_platform_data( | ||
| 259 | struct device *dev, struct device_node *node) { return NULL; } | ||
| 260 | #endif | ||
| 261 | #endif | ||
| 262 | |||
| 263 | #endif | ||
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index b2d9a43012b2..4260e8594bd7 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | 19 | ||
| 20 | struct device; | 20 | struct device; |
| 21 | struct device_node; | 21 | struct device_node; |
| 22 | struct attribute_group; | ||
| 22 | 23 | ||
| 23 | struct cpu { | 24 | struct cpu { |
| 24 | int node_id; /* The node which contains the CPU */ | 25 | int node_id; /* The node which contains the CPU */ |
| @@ -39,6 +40,9 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr); | |||
| 39 | extern int cpu_add_dev_attr_group(struct attribute_group *attrs); | 40 | extern int cpu_add_dev_attr_group(struct attribute_group *attrs); |
| 40 | extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); | 41 | extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); |
| 41 | 42 | ||
| 43 | extern struct device *cpu_device_create(struct device *parent, void *drvdata, | ||
| 44 | const struct attribute_group **groups, | ||
| 45 | const char *fmt, ...); | ||
| 42 | #ifdef CONFIG_HOTPLUG_CPU | 46 | #ifdef CONFIG_HOTPLUG_CPU |
| 43 | extern void unregister_cpu(struct cpu *cpu); | 47 | extern void unregister_cpu(struct cpu *cpu); |
| 44 | extern ssize_t arch_cpu_probe(const char *, size_t); | 48 | extern ssize_t arch_cpu_probe(const char *, size_t); |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 503b085b7832..4d078cebafd2 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
| @@ -217,26 +217,26 @@ __ATTR(_name, 0644, show_##_name, store_##_name) | |||
| 217 | 217 | ||
| 218 | 218 | ||
| 219 | struct cpufreq_driver { | 219 | struct cpufreq_driver { |
| 220 | char name[CPUFREQ_NAME_LEN]; | 220 | char name[CPUFREQ_NAME_LEN]; |
| 221 | u8 flags; | 221 | u8 flags; |
| 222 | void *driver_data; | 222 | void *driver_data; |
| 223 | 223 | ||
| 224 | /* needed by all drivers */ | 224 | /* needed by all drivers */ |
| 225 | int (*init) (struct cpufreq_policy *policy); | 225 | int (*init)(struct cpufreq_policy *policy); |
| 226 | int (*verify) (struct cpufreq_policy *policy); | 226 | int (*verify)(struct cpufreq_policy *policy); |
| 227 | 227 | ||
| 228 | /* define one out of two */ | 228 | /* define one out of two */ |
| 229 | int (*setpolicy) (struct cpufreq_policy *policy); | 229 | int (*setpolicy)(struct cpufreq_policy *policy); |
| 230 | 230 | ||
| 231 | /* | 231 | /* |
| 232 | * On failure, should always restore frequency to policy->restore_freq | 232 | * On failure, should always restore frequency to policy->restore_freq |
| 233 | * (i.e. old freq). | 233 | * (i.e. old freq). |
| 234 | */ | 234 | */ |
| 235 | int (*target) (struct cpufreq_policy *policy, /* Deprecated */ | 235 | int (*target)(struct cpufreq_policy *policy, |
| 236 | unsigned int target_freq, | 236 | unsigned int target_freq, |
| 237 | unsigned int relation); | 237 | unsigned int relation); /* Deprecated */ |
| 238 | int (*target_index) (struct cpufreq_policy *policy, | 238 | int (*target_index)(struct cpufreq_policy *policy, |
| 239 | unsigned int index); | 239 | unsigned int index); |
| 240 | /* | 240 | /* |
| 241 | * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION | 241 | * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION |
| 242 | * unset. | 242 | * unset. |
| @@ -252,27 +252,31 @@ struct cpufreq_driver { | |||
| 252 | * wish to switch to intermediate frequency for some target frequency. | 252 | * wish to switch to intermediate frequency for some target frequency. |
| 253 | * In that case core will directly call ->target_index(). | 253 | * In that case core will directly call ->target_index(). |
| 254 | */ | 254 | */ |
| 255 | unsigned int (*get_intermediate)(struct cpufreq_policy *policy, | 255 | unsigned int (*get_intermediate)(struct cpufreq_policy *policy, |
| 256 | unsigned int index); | 256 | unsigned int index); |
| 257 | int (*target_intermediate)(struct cpufreq_policy *policy, | 257 | int (*target_intermediate)(struct cpufreq_policy *policy, |
| 258 | unsigned int index); | 258 | unsigned int index); |
| 259 | 259 | ||
| 260 | /* should be defined, if possible */ | 260 | /* should be defined, if possible */ |
| 261 | unsigned int (*get) (unsigned int cpu); | 261 | unsigned int (*get)(unsigned int cpu); |
| 262 | 262 | ||
| 263 | /* optional */ | 263 | /* optional */ |
| 264 | int (*bios_limit) (int cpu, unsigned int *limit); | 264 | int (*bios_limit)(int cpu, unsigned int *limit); |
| 265 | |||
| 266 | int (*exit)(struct cpufreq_policy *policy); | ||
| 267 | void (*stop_cpu)(struct cpufreq_policy *policy); | ||
| 268 | int (*suspend)(struct cpufreq_policy *policy); | ||
| 269 | int (*resume)(struct cpufreq_policy *policy); | ||
| 270 | |||
| 271 | /* Will be called after the driver is fully initialized */ | ||
| 272 | void (*ready)(struct cpufreq_policy *policy); | ||
| 265 | 273 | ||
| 266 | int (*exit) (struct cpufreq_policy *policy); | 274 | struct freq_attr **attr; |
| 267 | void (*stop_cpu) (struct cpufreq_policy *policy); | ||
| 268 | int (*suspend) (struct cpufreq_policy *policy); | ||
| 269 | int (*resume) (struct cpufreq_policy *policy); | ||
| 270 | struct freq_attr **attr; | ||
| 271 | 275 | ||
| 272 | /* platform specific boost support code */ | 276 | /* platform specific boost support code */ |
| 273 | bool boost_supported; | 277 | bool boost_supported; |
| 274 | bool boost_enabled; | 278 | bool boost_enabled; |
| 275 | int (*set_boost) (int state); | 279 | int (*set_boost)(int state); |
| 276 | }; | 280 | }; |
| 277 | 281 | ||
| 278 | /* flags */ | 282 | /* flags */ |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 25e0df6155a4..a07e087f54b2 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
| @@ -53,7 +53,7 @@ struct cpuidle_state { | |||
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| 55 | /* Idle State Flags */ | 55 | /* Idle State Flags */ |
| 56 | #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ | 56 | #define CPUIDLE_FLAG_TIME_INVALID (0x01) /* is residency time measurable? */ |
| 57 | #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ | 57 | #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ |
| 58 | #define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ | 58 | #define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ |
| 59 | 59 | ||
| @@ -90,7 +90,7 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); | |||
| 90 | * cpuidle_get_last_residency - retrieves the last state's residency time | 90 | * cpuidle_get_last_residency - retrieves the last state's residency time |
| 91 | * @dev: the target CPU | 91 | * @dev: the target CPU |
| 92 | * | 92 | * |
| 93 | * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_VALID isn't set | 93 | * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set |
| 94 | */ | 94 | */ |
| 95 | static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) | 95 | static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) |
| 96 | { | 96 | { |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 0a9a6da21e74..b950e9d6008b 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
| @@ -803,6 +803,23 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu) | |||
| 803 | } | 803 | } |
| 804 | #endif /* NR_CPUS > BITS_PER_LONG */ | 804 | #endif /* NR_CPUS > BITS_PER_LONG */ |
| 805 | 805 | ||
| 806 | /** | ||
| 807 | * cpumap_print_to_pagebuf - copies the cpumask into the buffer either | ||
| 808 | * as comma-separated list of cpus or hex values of cpumask | ||
| 809 | * @list: indicates whether the cpumap must be list | ||
| 810 | * @mask: the cpumask to copy | ||
| 811 | * @buf: the buffer to copy into | ||
| 812 | * | ||
| 813 | * Returns the length of the (null-terminated) @buf string, zero if | ||
| 814 | * nothing is copied. | ||
| 815 | */ | ||
| 816 | static inline ssize_t | ||
| 817 | cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) | ||
| 818 | { | ||
| 819 | return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask), | ||
| 820 | nr_cpumask_bits); | ||
| 821 | } | ||
| 822 | |||
| 806 | /* | 823 | /* |
| 807 | * | 824 | * |
| 808 | * From here down, all obsolete. Use cpumask_ variants! | 825 | * From here down, all obsolete. Use cpumask_ variants! |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 2f073db7392e..1b357997cac5 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
| @@ -48,29 +48,16 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | |||
| 48 | void cpuset_init_current_mems_allowed(void); | 48 | void cpuset_init_current_mems_allowed(void); |
| 49 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); | 49 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
| 50 | 50 | ||
| 51 | extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); | 51 | extern int __cpuset_node_allowed(int node, gfp_t gfp_mask); |
| 52 | extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); | ||
| 53 | 52 | ||
| 54 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | 53 | static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) |
| 55 | { | 54 | { |
| 56 | return nr_cpusets() <= 1 || | 55 | return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask); |
| 57 | __cpuset_node_allowed_softwall(node, gfp_mask); | ||
| 58 | } | 56 | } |
| 59 | 57 | ||
| 60 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | 58 | static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
| 61 | { | 59 | { |
| 62 | return nr_cpusets() <= 1 || | 60 | return cpuset_node_allowed(zone_to_nid(z), gfp_mask); |
| 63 | __cpuset_node_allowed_hardwall(node, gfp_mask); | ||
| 64 | } | ||
| 65 | |||
| 66 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | ||
| 67 | { | ||
| 68 | return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); | ||
| 69 | } | ||
| 70 | |||
| 71 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | ||
| 72 | { | ||
| 73 | return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); | ||
| 74 | } | 61 | } |
| 75 | 62 | ||
| 76 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | 63 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
| @@ -179,22 +166,12 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) | |||
| 179 | return 1; | 166 | return 1; |
| 180 | } | 167 | } |
| 181 | 168 | ||
| 182 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | 169 | static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) |
| 183 | { | ||
| 184 | return 1; | ||
| 185 | } | ||
| 186 | |||
| 187 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | ||
| 188 | { | ||
| 189 | return 1; | ||
| 190 | } | ||
| 191 | |||
| 192 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | ||
| 193 | { | 170 | { |
| 194 | return 1; | 171 | return 1; |
| 195 | } | 172 | } |
| 196 | 173 | ||
| 197 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | 174 | static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
| 198 | { | 175 | { |
| 199 | return 1; | 176 | return 1; |
| 200 | } | 177 | } |
diff --git a/include/linux/cred.h b/include/linux/cred.h index b2d0820837c4..2fb2ca2127ed 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
| @@ -68,6 +68,7 @@ extern void groups_free(struct group_info *); | |||
| 68 | extern int set_current_groups(struct group_info *); | 68 | extern int set_current_groups(struct group_info *); |
| 69 | extern void set_groups(struct cred *, struct group_info *); | 69 | extern void set_groups(struct cred *, struct group_info *); |
| 70 | extern int groups_search(const struct group_info *, kgid_t); | 70 | extern int groups_search(const struct group_info *, kgid_t); |
| 71 | extern bool may_setgroups(void); | ||
| 71 | 72 | ||
| 72 | /* access the groups "array" with this macro */ | 73 | /* access the groups "array" with this macro */ |
| 73 | #define GROUP_AT(gi, i) \ | 74 | #define GROUP_AT(gi, i) \ |
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index d45e949699ea..9c8776d0ada8 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
| @@ -26,6 +26,19 @@ | |||
| 26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
| 27 | 27 | ||
| 28 | /* | 28 | /* |
| 29 | * Autoloaded crypto modules should only use a prefixed name to avoid allowing | ||
| 30 | * arbitrary modules to be loaded. Loading from userspace may still need the | ||
| 31 | * unprefixed names, so retains those aliases as well. | ||
| 32 | * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 | ||
| 33 | * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro | ||
| 34 | * expands twice on the same line. Instead, use a separate base name for the | ||
| 35 | * alias. | ||
| 36 | */ | ||
| 37 | #define MODULE_ALIAS_CRYPTO(name) \ | ||
| 38 | __MODULE_INFO(alias, alias_userspace, name); \ | ||
| 39 | __MODULE_INFO(alias, alias_crypto, "crypto-" name) | ||
| 40 | |||
| 41 | /* | ||
| 29 | * Algorithm masks and types. | 42 | * Algorithm masks and types. |
| 30 | */ | 43 | */ |
| 31 | #define CRYPTO_ALG_TYPE_MASK 0x0000000f | 44 | #define CRYPTO_ALG_TYPE_MASK 0x0000000f |
| @@ -127,6 +140,13 @@ struct skcipher_givcrypt_request; | |||
| 127 | 140 | ||
| 128 | typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); | 141 | typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); |
| 129 | 142 | ||
| 143 | /** | ||
| 144 | * DOC: Block Cipher Context Data Structures | ||
| 145 | * | ||
| 146 | * These data structures define the operating context for each block cipher | ||
| 147 | * type. | ||
| 148 | */ | ||
| 149 | |||
| 130 | struct crypto_async_request { | 150 | struct crypto_async_request { |
| 131 | struct list_head list; | 151 | struct list_head list; |
| 132 | crypto_completion_t complete; | 152 | crypto_completion_t complete; |
| @@ -194,9 +214,63 @@ struct hash_desc { | |||
| 194 | u32 flags; | 214 | u32 flags; |
| 195 | }; | 215 | }; |
| 196 | 216 | ||
| 197 | /* | 217 | /** |
| 198 | * Algorithms: modular crypto algorithm implementations, managed | 218 | * DOC: Block Cipher Algorithm Definitions |
| 199 | * via crypto_register_alg() and crypto_unregister_alg(). | 219 | * |
| 220 | * These data structures define modular crypto algorithm implementations, | ||
| 221 | * managed via crypto_register_alg() and crypto_unregister_alg(). | ||
| 222 | */ | ||
| 223 | |||
| 224 | /** | ||
| 225 | * struct ablkcipher_alg - asynchronous block cipher definition | ||
| 226 | * @min_keysize: Minimum key size supported by the transformation. This is the | ||
| 227 | * smallest key length supported by this transformation algorithm. | ||
| 228 | * This must be set to one of the pre-defined values as this is | ||
| 229 | * not hardware specific. Possible values for this field can be | ||
| 230 | * found via git grep "_MIN_KEY_SIZE" include/crypto/ | ||
| 231 | * @max_keysize: Maximum key size supported by the transformation. This is the | ||
| 232 | * largest key length supported by this transformation algorithm. | ||
| 233 | * This must be set to one of the pre-defined values as this is | ||
| 234 | * not hardware specific. Possible values for this field can be | ||
| 235 | * found via git grep "_MAX_KEY_SIZE" include/crypto/ | ||
| 236 | * @setkey: Set key for the transformation. This function is used to either | ||
| 237 | * program a supplied key into the hardware or store the key in the | ||
| 238 | * transformation context for programming it later. Note that this | ||
| 239 | * function does modify the transformation context. This function can | ||
| 240 | * be called multiple times during the existence of the transformation | ||
| 241 | * object, so one must make sure the key is properly reprogrammed into | ||
| 242 | * the hardware. This function is also responsible for checking the key | ||
| 243 | * length for validity. In case a software fallback was put in place in | ||
| 244 | * the @cra_init call, this function might need to use the fallback if | ||
| 245 | * the algorithm doesn't support all of the key sizes. | ||
| 246 | * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt | ||
| 247 | * the supplied scatterlist containing the blocks of data. The crypto | ||
| 248 | * API consumer is responsible for aligning the entries of the | ||
| 249 | * scatterlist properly and making sure the chunks are correctly | ||
| 250 | * sized. In case a software fallback was put in place in the | ||
| 251 | * @cra_init call, this function might need to use the fallback if | ||
| 252 | * the algorithm doesn't support all of the key sizes. In case the | ||
| 253 | * key was stored in transformation context, the key might need to be | ||
| 254 | * re-programmed into the hardware in this function. This function | ||
| 255 | * shall not modify the transformation context, as this function may | ||
| 256 | * be called in parallel with the same transformation object. | ||
| 257 | * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt | ||
| 258 | * and the conditions are exactly the same. | ||
| 259 | * @givencrypt: Update the IV for encryption. With this function, a cipher | ||
| 260 | * implementation may provide the function on how to update the IV | ||
| 261 | * for encryption. | ||
| 262 | * @givdecrypt: Update the IV for decryption. This is the reverse of | ||
| 263 | * @givencrypt . | ||
| 264 | * @geniv: The transformation implementation may use an "IV generator" provided | ||
| 265 | * by the kernel crypto API. Several use cases have a predefined | ||
| 266 | * approach how IVs are to be updated. For such use cases, the kernel | ||
| 267 | * crypto API provides ready-to-use implementations that can be | ||
| 268 | * referenced with this variable. | ||
| 269 | * @ivsize: IV size applicable for transformation. The consumer must provide an | ||
| 270 | * IV of exactly that size to perform the encrypt or decrypt operation. | ||
| 271 | * | ||
| 272 | * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are | ||
| 273 | * mandatory and must be filled. | ||
| 200 | */ | 274 | */ |
| 201 | struct ablkcipher_alg { | 275 | struct ablkcipher_alg { |
| 202 | int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, | 276 | int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, |
| @@ -213,6 +287,32 @@ struct ablkcipher_alg { | |||
| 213 | unsigned int ivsize; | 287 | unsigned int ivsize; |
| 214 | }; | 288 | }; |
| 215 | 289 | ||
| 290 | /** | ||
| 291 | * struct aead_alg - AEAD cipher definition | ||
| 292 | * @maxauthsize: Set the maximum authentication tag size supported by the | ||
| 293 | * transformation. A transformation may support smaller tag sizes. | ||
| 294 | * As the authentication tag is a message digest to ensure the | ||
| 295 | * integrity of the encrypted data, a consumer typically wants the | ||
| 296 | * largest authentication tag possible as defined by this | ||
| 297 | * variable. | ||
| 298 | * @setauthsize: Set authentication size for the AEAD transformation. This | ||
| 299 | * function is used to specify the consumer requested size of the | ||
| 300 | * authentication tag to be either generated by the transformation | ||
| 301 | * during encryption or the size of the authentication tag to be | ||
| 302 | * supplied during the decryption operation. This function is also | ||
| 303 | * responsible for checking the authentication tag size for | ||
| 304 | * validity. | ||
| 305 | * @setkey: see struct ablkcipher_alg | ||
| 306 | * @encrypt: see struct ablkcipher_alg | ||
| 307 | * @decrypt: see struct ablkcipher_alg | ||
| 308 | * @givencrypt: see struct ablkcipher_alg | ||
| 309 | * @givdecrypt: see struct ablkcipher_alg | ||
| 310 | * @geniv: see struct ablkcipher_alg | ||
| 311 | * @ivsize: see struct ablkcipher_alg | ||
| 312 | * | ||
| 313 | * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are | ||
| 314 | * mandatory and must be filled. | ||
| 315 | */ | ||
| 216 | struct aead_alg { | 316 | struct aead_alg { |
| 217 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, | 317 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, |
| 218 | unsigned int keylen); | 318 | unsigned int keylen); |
| @@ -228,6 +328,18 @@ struct aead_alg { | |||
| 228 | unsigned int maxauthsize; | 328 | unsigned int maxauthsize; |
| 229 | }; | 329 | }; |
| 230 | 330 | ||
| 331 | /** | ||
| 332 | * struct blkcipher_alg - synchronous block cipher definition | ||
| 333 | * @min_keysize: see struct ablkcipher_alg | ||
| 334 | * @max_keysize: see struct ablkcipher_alg | ||
| 335 | * @setkey: see struct ablkcipher_alg | ||
| 336 | * @encrypt: see struct ablkcipher_alg | ||
| 337 | * @decrypt: see struct ablkcipher_alg | ||
| 338 | * @geniv: see struct ablkcipher_alg | ||
| 339 | * @ivsize: see struct ablkcipher_alg | ||
| 340 | * | ||
| 341 | * All fields except @geniv and @ivsize are mandatory and must be filled. | ||
| 342 | */ | ||
| 231 | struct blkcipher_alg { | 343 | struct blkcipher_alg { |
| 232 | int (*setkey)(struct crypto_tfm *tfm, const u8 *key, | 344 | int (*setkey)(struct crypto_tfm *tfm, const u8 *key, |
| 233 | unsigned int keylen); | 345 | unsigned int keylen); |
| @@ -245,6 +357,53 @@ struct blkcipher_alg { | |||
| 245 | unsigned int ivsize; | 357 | unsigned int ivsize; |
| 246 | }; | 358 | }; |
| 247 | 359 | ||
| 360 | /** | ||
| 361 | * struct cipher_alg - single-block symmetric ciphers definition | ||
| 362 | * @cia_min_keysize: Minimum key size supported by the transformation. This is | ||
| 363 | * the smallest key length supported by this transformation | ||
| 364 | * algorithm. This must be set to one of the pre-defined | ||
| 365 | * values as this is not hardware specific. Possible values | ||
| 366 | * for this field can be found via git grep "_MIN_KEY_SIZE" | ||
| 367 | * include/crypto/ | ||
| 368 | * @cia_max_keysize: Maximum key size supported by the transformation. This is | ||
| 369 | * the largest key length supported by this transformation | ||
| 370 | * algorithm. This must be set to one of the pre-defined values | ||
| 371 | * as this is not hardware specific. Possible values for this | ||
| 372 | * field can be found via git grep "_MAX_KEY_SIZE" | ||
| 373 | * include/crypto/ | ||
| 374 | * @cia_setkey: Set key for the transformation. This function is used to either | ||
| 375 | * program a supplied key into the hardware or store the key in the | ||
| 376 | * transformation context for programming it later. Note that this | ||
| 377 | * function does modify the transformation context. This function | ||
| 378 | * can be called multiple times during the existence of the | ||
| 379 | * transformation object, so one must make sure the key is properly | ||
| 380 | * reprogrammed into the hardware. This function is also | ||
| 381 | * responsible for checking the key length for validity. | ||
| 382 | * @cia_encrypt: Encrypt a single block. This function is used to encrypt a | ||
| 383 | * single block of data, which must be @cra_blocksize big. This | ||
| 384 | * always operates on a full @cra_blocksize and it is not possible | ||
| 385 | * to encrypt a block of smaller size. The supplied buffers must | ||
| 386 | * therefore also be at least of @cra_blocksize size. Both the | ||
| 387 | * input and output buffers are always aligned to @cra_alignmask. | ||
| 388 | * In case either of the input or output buffer supplied by user | ||
| 389 | * of the crypto API is not aligned to @cra_alignmask, the crypto | ||
| 390 | * API will re-align the buffers. The re-alignment means that a | ||
| 391 | * new buffer will be allocated, the data will be copied into the | ||
| 392 | * new buffer, then the processing will happen on the new buffer, | ||
| 393 | * then the data will be copied back into the original buffer and | ||
| 394 | * finally the new buffer will be freed. In case a software | ||
| 395 | * fallback was put in place in the @cra_init call, this function | ||
| 396 | * might need to use the fallback if the algorithm doesn't support | ||
| 397 | * all of the key sizes. In case the key was stored in | ||
| 398 | * transformation context, the key might need to be re-programmed | ||
| 399 | * into the hardware in this function. This function shall not | ||
| 400 | * modify the transformation context, as this function may be | ||
| 401 | * called in parallel with the same transformation object. | ||
| 402 | * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to | ||
| 403 | * @cia_encrypt, and the conditions are exactly the same. | ||
| 404 | * | ||
| 405 | * All fields are mandatory and must be filled. | ||
| 406 | */ | ||
| 248 | struct cipher_alg { | 407 | struct cipher_alg { |
| 249 | unsigned int cia_min_keysize; | 408 | unsigned int cia_min_keysize; |
| 250 | unsigned int cia_max_keysize; | 409 | unsigned int cia_max_keysize; |
| @@ -261,6 +420,25 @@ struct compress_alg { | |||
| 261 | unsigned int slen, u8 *dst, unsigned int *dlen); | 420 | unsigned int slen, u8 *dst, unsigned int *dlen); |
| 262 | }; | 421 | }; |
| 263 | 422 | ||
| 423 | /** | ||
| 424 | * struct rng_alg - random number generator definition | ||
| 425 | * @rng_make_random: The function defined by this variable obtains a random | ||
| 426 | * number. The random number generator transform must generate | ||
| 427 | * the random number out of the context provided with this | ||
| 428 | * call. | ||
| 429 | * @rng_reset: Reset of the random number generator by clearing the entire state. | ||
| 430 | * With the invocation of this function call, the random number | ||
| 431 | * generator shall completely reinitialize its state. If the random | ||
| 432 | * number generator requires a seed for setting up a new state, | ||
| 433 | * the seed must be provided by the consumer while invoking this | ||
| 434 | * function. The required size of the seed is defined with | ||
| 435 | * @seedsize . | ||
| 436 | * @seedsize: The seed size required for a random number generator | ||
| 437 | * initialization defined with this variable. Some random number | ||
| 438 | * generators like the SP800-90A DRBG does not require a seed as the | ||
| 439 | * seeding is implemented internally without the need of support by | ||
| 440 | * the consumer. In this case, the seed size is set to zero. | ||
| 441 | */ | ||
| 264 | struct rng_alg { | 442 | struct rng_alg { |
| 265 | int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, | 443 | int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, |
| 266 | unsigned int dlen); | 444 | unsigned int dlen); |
| @@ -277,6 +455,81 @@ struct rng_alg { | |||
| 277 | #define cra_compress cra_u.compress | 455 | #define cra_compress cra_u.compress |
| 278 | #define cra_rng cra_u.rng | 456 | #define cra_rng cra_u.rng |
| 279 | 457 | ||
| 458 | /** | ||
| 459 | * struct crypto_alg - definition of a cryptograpic cipher algorithm | ||
| 460 | * @cra_flags: Flags describing this transformation. See include/linux/crypto.h | ||
| 461 | * CRYPTO_ALG_* flags for the flags which go in here. Those are | ||
| 462 | * used for fine-tuning the description of the transformation | ||
| 463 | * algorithm. | ||
| 464 | * @cra_blocksize: Minimum block size of this transformation. The size in bytes | ||
| 465 | * of the smallest possible unit which can be transformed with | ||
| 466 | * this algorithm. The users must respect this value. | ||
| 467 | * In case of HASH transformation, it is possible for a smaller | ||
| 468 | * block than @cra_blocksize to be passed to the crypto API for | ||
| 469 | * transformation, in case of any other transformation type, an | ||
| 470 | * error will be returned upon any attempt to transform smaller | ||
| 471 | * than @cra_blocksize chunks. | ||
| 472 | * @cra_ctxsize: Size of the operational context of the transformation. This | ||
| 473 | * value informs the kernel crypto API about the memory size | ||
| 474 | * needed to be allocated for the transformation context. | ||
| 475 | * @cra_alignmask: Alignment mask for the input and output data buffer. The data | ||
| 476 | * buffer containing the input data for the algorithm must be | ||
| 477 | * aligned to this alignment mask. The data buffer for the | ||
| 478 | * output data must be aligned to this alignment mask. Note that | ||
| 479 | * the Crypto API will do the re-alignment in software, but | ||
| 480 | * only under special conditions and there is a performance hit. | ||
| 481 | * The re-alignment happens at these occasions for different | ||
| 482 | * @cra_u types: cipher -- For both input data and output data | ||
| 483 | * buffer; ahash -- For output hash destination buf; shash -- | ||
| 484 | * For output hash destination buf. | ||
| 485 | * This is needed on hardware which is flawed by design and | ||
| 486 | * cannot pick data from arbitrary addresses. | ||
| 487 | * @cra_priority: Priority of this transformation implementation. In case | ||
| 488 | * multiple transformations with same @cra_name are available to | ||
| 489 | * the Crypto API, the kernel will use the one with highest | ||
| 490 | * @cra_priority. | ||
| 491 | * @cra_name: Generic name (usable by multiple implementations) of the | ||
| 492 | * transformation algorithm. This is the name of the transformation | ||
| 493 | * itself. This field is used by the kernel when looking up the | ||
| 494 | * providers of particular transformation. | ||
| 495 | * @cra_driver_name: Unique name of the transformation provider. This is the | ||
| 496 | * name of the provider of the transformation. This can be any | ||
| 497 | * arbitrary value, but in the usual case, this contains the | ||
| 498 | * name of the chip or provider and the name of the | ||
| 499 | * transformation algorithm. | ||
| 500 | * @cra_type: Type of the cryptographic transformation. This is a pointer to | ||
| 501 | * struct crypto_type, which implements callbacks common for all | ||
| 502 | * trasnformation types. There are multiple options: | ||
| 503 | * &crypto_blkcipher_type, &crypto_ablkcipher_type, | ||
| 504 | * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type. | ||
| 505 | * This field might be empty. In that case, there are no common | ||
| 506 | * callbacks. This is the case for: cipher, compress, shash. | ||
| 507 | * @cra_u: Callbacks implementing the transformation. This is a union of | ||
| 508 | * multiple structures. Depending on the type of transformation selected | ||
| 509 | * by @cra_type and @cra_flags above, the associated structure must be | ||
| 510 | * filled with callbacks. This field might be empty. This is the case | ||
| 511 | * for ahash, shash. | ||
| 512 | * @cra_init: Initialize the cryptographic transformation object. This function | ||
| 513 | * is used to initialize the cryptographic transformation object. | ||
| 514 | * This function is called only once at the instantiation time, right | ||
| 515 | * after the transformation context was allocated. In case the | ||
| 516 | * cryptographic hardware has some special requirements which need to | ||
| 517 | * be handled by software, this function shall check for the precise | ||
| 518 | * requirement of the transformation and put any software fallbacks | ||
| 519 | * in place. | ||
| 520 | * @cra_exit: Deinitialize the cryptographic transformation object. This is a | ||
| 521 | * counterpart to @cra_init, used to remove various changes set in | ||
| 522 | * @cra_init. | ||
| 523 | * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE | ||
| 524 | * @cra_list: internally used | ||
| 525 | * @cra_users: internally used | ||
| 526 | * @cra_refcnt: internally used | ||
| 527 | * @cra_destroy: internally used | ||
| 528 | * | ||
| 529 | * The struct crypto_alg describes a generic Crypto API algorithm and is common | ||
| 530 | * for all of the transformations. Any variable not documented here shall not | ||
| 531 | * be used by a cipher implementation as it is internal to the Crypto API. | ||
| 532 | */ | ||
| 280 | struct crypto_alg { | 533 | struct crypto_alg { |
| 281 | struct list_head cra_list; | 534 | struct list_head cra_list; |
| 282 | struct list_head cra_users; | 535 | struct list_head cra_users; |
| @@ -581,6 +834,50 @@ static inline u32 crypto_skcipher_mask(u32 mask) | |||
| 581 | return mask; | 834 | return mask; |
| 582 | } | 835 | } |
| 583 | 836 | ||
| 837 | /** | ||
| 838 | * DOC: Asynchronous Block Cipher API | ||
| 839 | * | ||
| 840 | * Asynchronous block cipher API is used with the ciphers of type | ||
| 841 | * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). | ||
| 842 | * | ||
| 843 | * Asynchronous cipher operations imply that the function invocation for a | ||
| 844 | * cipher request returns immediately before the completion of the operation. | ||
| 845 | * The cipher request is scheduled as a separate kernel thread and therefore | ||
| 846 | * load-balanced on the different CPUs via the process scheduler. To allow | ||
| 847 | * the kernel crypto API to inform the caller about the completion of a cipher | ||
| 848 | * request, the caller must provide a callback function. That function is | ||
| 849 | * invoked with the cipher handle when the request completes. | ||
| 850 | * | ||
| 851 | * To support the asynchronous operation, additional information than just the | ||
| 852 | * cipher handle must be supplied to the kernel crypto API. That additional | ||
| 853 | * information is given by filling in the ablkcipher_request data structure. | ||
| 854 | * | ||
| 855 | * For the asynchronous block cipher API, the state is maintained with the tfm | ||
| 856 | * cipher handle. A single tfm can be used across multiple calls and in | ||
| 857 | * parallel. For asynchronous block cipher calls, context data supplied and | ||
| 858 | * only used by the caller can be referenced the request data structure in | ||
| 859 | * addition to the IV used for the cipher request. The maintenance of such | ||
| 860 | * state information would be important for a crypto driver implementer to | ||
| 861 | * have, because when calling the callback function upon completion of the | ||
| 862 | * cipher operation, that callback function may need some information about | ||
| 863 | * which operation just finished if it invoked multiple in parallel. This | ||
| 864 | * state information is unused by the kernel crypto API. | ||
| 865 | */ | ||
| 866 | |||
| 867 | /** | ||
| 868 | * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle | ||
| 869 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
| 870 | * ablkcipher cipher | ||
| 871 | * @type: specifies the type of the cipher | ||
| 872 | * @mask: specifies the mask for the cipher | ||
| 873 | * | ||
| 874 | * Allocate a cipher handle for an ablkcipher. The returned struct | ||
| 875 | * crypto_ablkcipher is the cipher handle that is required for any subsequent | ||
| 876 | * API invocation for that ablkcipher. | ||
| 877 | * | ||
| 878 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
| 879 | * of an error, PTR_ERR() returns the error code. | ||
| 880 | */ | ||
| 584 | struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, | 881 | struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, |
| 585 | u32 type, u32 mask); | 882 | u32 type, u32 mask); |
| 586 | 883 | ||
| @@ -590,11 +887,25 @@ static inline struct crypto_tfm *crypto_ablkcipher_tfm( | |||
| 590 | return &tfm->base; | 887 | return &tfm->base; |
| 591 | } | 888 | } |
| 592 | 889 | ||
| 890 | /** | ||
| 891 | * crypto_free_ablkcipher() - zeroize and free cipher handle | ||
| 892 | * @tfm: cipher handle to be freed | ||
| 893 | */ | ||
| 593 | static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) | 894 | static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) |
| 594 | { | 895 | { |
| 595 | crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); | 896 | crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); |
| 596 | } | 897 | } |
| 597 | 898 | ||
| 899 | /** | ||
| 900 | * crypto_has_ablkcipher() - Search for the availability of an ablkcipher. | ||
| 901 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
| 902 | * ablkcipher | ||
| 903 | * @type: specifies the type of the cipher | ||
| 904 | * @mask: specifies the mask for the cipher | ||
| 905 | * | ||
| 906 | * Return: true when the ablkcipher is known to the kernel crypto API; false | ||
| 907 | * otherwise | ||
| 908 | */ | ||
| 598 | static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, | 909 | static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, |
| 599 | u32 mask) | 910 | u32 mask) |
| 600 | { | 911 | { |
| @@ -608,12 +919,31 @@ static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( | |||
| 608 | return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; | 919 | return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; |
| 609 | } | 920 | } |
| 610 | 921 | ||
| 922 | /** | ||
| 923 | * crypto_ablkcipher_ivsize() - obtain IV size | ||
| 924 | * @tfm: cipher handle | ||
| 925 | * | ||
| 926 | * The size of the IV for the ablkcipher referenced by the cipher handle is | ||
| 927 | * returned. This IV size may be zero if the cipher does not need an IV. | ||
| 928 | * | ||
| 929 | * Return: IV size in bytes | ||
| 930 | */ | ||
| 611 | static inline unsigned int crypto_ablkcipher_ivsize( | 931 | static inline unsigned int crypto_ablkcipher_ivsize( |
| 612 | struct crypto_ablkcipher *tfm) | 932 | struct crypto_ablkcipher *tfm) |
| 613 | { | 933 | { |
| 614 | return crypto_ablkcipher_crt(tfm)->ivsize; | 934 | return crypto_ablkcipher_crt(tfm)->ivsize; |
| 615 | } | 935 | } |
| 616 | 936 | ||
| 937 | /** | ||
| 938 | * crypto_ablkcipher_blocksize() - obtain block size of cipher | ||
| 939 | * @tfm: cipher handle | ||
| 940 | * | ||
| 941 | * The block size for the ablkcipher referenced with the cipher handle is | ||
| 942 | * returned. The caller may use that information to allocate appropriate | ||
| 943 | * memory for the data returned by the encryption or decryption operation | ||
| 944 | * | ||
| 945 | * Return: block size of cipher | ||
| 946 | */ | ||
| 617 | static inline unsigned int crypto_ablkcipher_blocksize( | 947 | static inline unsigned int crypto_ablkcipher_blocksize( |
| 618 | struct crypto_ablkcipher *tfm) | 948 | struct crypto_ablkcipher *tfm) |
| 619 | { | 949 | { |
| @@ -643,6 +973,22 @@ static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, | |||
| 643 | crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); | 973 | crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); |
| 644 | } | 974 | } |
| 645 | 975 | ||
| 976 | /** | ||
| 977 | * crypto_ablkcipher_setkey() - set key for cipher | ||
| 978 | * @tfm: cipher handle | ||
| 979 | * @key: buffer holding the key | ||
| 980 | * @keylen: length of the key in bytes | ||
| 981 | * | ||
| 982 | * The caller provided key is set for the ablkcipher referenced by the cipher | ||
| 983 | * handle. | ||
| 984 | * | ||
| 985 | * Note, the key length determines the cipher type. Many block ciphers implement | ||
| 986 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | ||
| 987 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | ||
| 988 | * is performed. | ||
| 989 | * | ||
| 990 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
| 991 | */ | ||
| 646 | static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, | 992 | static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, |
| 647 | const u8 *key, unsigned int keylen) | 993 | const u8 *key, unsigned int keylen) |
| 648 | { | 994 | { |
| @@ -651,12 +997,32 @@ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, | |||
| 651 | return crt->setkey(crt->base, key, keylen); | 997 | return crt->setkey(crt->base, key, keylen); |
| 652 | } | 998 | } |
| 653 | 999 | ||
| 1000 | /** | ||
| 1001 | * crypto_ablkcipher_reqtfm() - obtain cipher handle from request | ||
| 1002 | * @req: ablkcipher_request out of which the cipher handle is to be obtained | ||
| 1003 | * | ||
| 1004 | * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request | ||
| 1005 | * data structure. | ||
| 1006 | * | ||
| 1007 | * Return: crypto_ablkcipher handle | ||
| 1008 | */ | ||
| 654 | static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( | 1009 | static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( |
| 655 | struct ablkcipher_request *req) | 1010 | struct ablkcipher_request *req) |
| 656 | { | 1011 | { |
| 657 | return __crypto_ablkcipher_cast(req->base.tfm); | 1012 | return __crypto_ablkcipher_cast(req->base.tfm); |
| 658 | } | 1013 | } |
| 659 | 1014 | ||
| 1015 | /** | ||
| 1016 | * crypto_ablkcipher_encrypt() - encrypt plaintext | ||
| 1017 | * @req: reference to the ablkcipher_request handle that holds all information | ||
| 1018 | * needed to perform the cipher operation | ||
| 1019 | * | ||
| 1020 | * Encrypt plaintext data using the ablkcipher_request handle. That data | ||
| 1021 | * structure and how it is filled with data is discussed with the | ||
| 1022 | * ablkcipher_request_* functions. | ||
| 1023 | * | ||
| 1024 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
| 1025 | */ | ||
| 660 | static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) | 1026 | static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) |
| 661 | { | 1027 | { |
| 662 | struct ablkcipher_tfm *crt = | 1028 | struct ablkcipher_tfm *crt = |
| @@ -664,6 +1030,17 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) | |||
| 664 | return crt->encrypt(req); | 1030 | return crt->encrypt(req); |
| 665 | } | 1031 | } |
| 666 | 1032 | ||
| 1033 | /** | ||
| 1034 | * crypto_ablkcipher_decrypt() - decrypt ciphertext | ||
| 1035 | * @req: reference to the ablkcipher_request handle that holds all information | ||
| 1036 | * needed to perform the cipher operation | ||
| 1037 | * | ||
| 1038 | * Decrypt ciphertext data using the ablkcipher_request handle. That data | ||
| 1039 | * structure and how it is filled with data is discussed with the | ||
| 1040 | * ablkcipher_request_* functions. | ||
| 1041 | * | ||
| 1042 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
| 1043 | */ | ||
| 667 | static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) | 1044 | static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) |
| 668 | { | 1045 | { |
| 669 | struct ablkcipher_tfm *crt = | 1046 | struct ablkcipher_tfm *crt = |
| @@ -671,12 +1048,37 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) | |||
| 671 | return crt->decrypt(req); | 1048 | return crt->decrypt(req); |
| 672 | } | 1049 | } |
| 673 | 1050 | ||
| 1051 | /** | ||
| 1052 | * DOC: Asynchronous Cipher Request Handle | ||
| 1053 | * | ||
| 1054 | * The ablkcipher_request data structure contains all pointers to data | ||
| 1055 | * required for the asynchronous cipher operation. This includes the cipher | ||
| 1056 | * handle (which can be used by multiple ablkcipher_request instances), pointer | ||
| 1057 | * to plaintext and ciphertext, asynchronous callback function, etc. It acts | ||
| 1058 | * as a handle to the ablkcipher_request_* API calls in a similar way as | ||
| 1059 | * ablkcipher handle to the crypto_ablkcipher_* API calls. | ||
| 1060 | */ | ||
| 1061 | |||
| 1062 | /** | ||
| 1063 | * crypto_ablkcipher_reqsize() - obtain size of the request data structure | ||
| 1064 | * @tfm: cipher handle | ||
| 1065 | * | ||
| 1066 | * Return: number of bytes | ||
| 1067 | */ | ||
| 674 | static inline unsigned int crypto_ablkcipher_reqsize( | 1068 | static inline unsigned int crypto_ablkcipher_reqsize( |
| 675 | struct crypto_ablkcipher *tfm) | 1069 | struct crypto_ablkcipher *tfm) |
| 676 | { | 1070 | { |
| 677 | return crypto_ablkcipher_crt(tfm)->reqsize; | 1071 | return crypto_ablkcipher_crt(tfm)->reqsize; |
| 678 | } | 1072 | } |
| 679 | 1073 | ||
| 1074 | /** | ||
| 1075 | * ablkcipher_request_set_tfm() - update cipher handle reference in request | ||
| 1076 | * @req: request handle to be modified | ||
| 1077 | * @tfm: cipher handle that shall be added to the request handle | ||
| 1078 | * | ||
| 1079 | * Allow the caller to replace the existing ablkcipher handle in the request | ||
| 1080 | * data structure with a different one. | ||
| 1081 | */ | ||
| 680 | static inline void ablkcipher_request_set_tfm( | 1082 | static inline void ablkcipher_request_set_tfm( |
| 681 | struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) | 1083 | struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) |
| 682 | { | 1084 | { |
| @@ -689,6 +1091,18 @@ static inline struct ablkcipher_request *ablkcipher_request_cast( | |||
| 689 | return container_of(req, struct ablkcipher_request, base); | 1091 | return container_of(req, struct ablkcipher_request, base); |
| 690 | } | 1092 | } |
| 691 | 1093 | ||
| 1094 | /** | ||
| 1095 | * ablkcipher_request_alloc() - allocate request data structure | ||
| 1096 | * @tfm: cipher handle to be registered with the request | ||
| 1097 | * @gfp: memory allocation flag that is handed to kmalloc by the API call. | ||
| 1098 | * | ||
| 1099 | * Allocate the request data structure that must be used with the ablkcipher | ||
| 1100 | * encrypt and decrypt API calls. During the allocation, the provided ablkcipher | ||
| 1101 | * handle is registered in the request data structure. | ||
| 1102 | * | ||
| 1103 | * Return: allocated request handle in case of success; IS_ERR() is true in case | ||
| 1104 | * of an error, PTR_ERR() returns the error code. | ||
| 1105 | */ | ||
| 692 | static inline struct ablkcipher_request *ablkcipher_request_alloc( | 1106 | static inline struct ablkcipher_request *ablkcipher_request_alloc( |
| 693 | struct crypto_ablkcipher *tfm, gfp_t gfp) | 1107 | struct crypto_ablkcipher *tfm, gfp_t gfp) |
| 694 | { | 1108 | { |
| @@ -703,11 +1117,40 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc( | |||
| 703 | return req; | 1117 | return req; |
| 704 | } | 1118 | } |
| 705 | 1119 | ||
| 1120 | /** | ||
| 1121 | * ablkcipher_request_free() - zeroize and free request data structure | ||
| 1122 | * @req: request data structure cipher handle to be freed | ||
| 1123 | */ | ||
| 706 | static inline void ablkcipher_request_free(struct ablkcipher_request *req) | 1124 | static inline void ablkcipher_request_free(struct ablkcipher_request *req) |
| 707 | { | 1125 | { |
| 708 | kzfree(req); | 1126 | kzfree(req); |
| 709 | } | 1127 | } |
| 710 | 1128 | ||
| 1129 | /** | ||
| 1130 | * ablkcipher_request_set_callback() - set asynchronous callback function | ||
| 1131 | * @req: request handle | ||
| 1132 | * @flags: specify zero or an ORing of the flags | ||
| 1133 | * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and | ||
| 1134 | * increase the wait queue beyond the initial maximum size; | ||
| 1135 | * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep | ||
| 1136 | * @compl: callback function pointer to be registered with the request handle | ||
| 1137 | * @data: The data pointer refers to memory that is not used by the kernel | ||
| 1138 | * crypto API, but provided to the callback function for it to use. Here, | ||
| 1139 | * the caller can provide a reference to memory the callback function can | ||
| 1140 | * operate on. As the callback function is invoked asynchronously to the | ||
| 1141 | * related functionality, it may need to access data structures of the | ||
| 1142 | * related functionality which can be referenced using this pointer. The | ||
| 1143 | * callback function can access the memory via the "data" field in the | ||
| 1144 | * crypto_async_request data structure provided to the callback function. | ||
| 1145 | * | ||
| 1146 | * This function allows setting the callback function that is triggered once the | ||
| 1147 | * cipher operation completes. | ||
| 1148 | * | ||
| 1149 | * The callback function is registered with the ablkcipher_request handle and | ||
| 1150 | * must comply with the following template: | ||
| 1151 | * | ||
| 1152 | * void callback_function(struct crypto_async_request *req, int error) | ||
| 1153 | */ | ||
| 711 | static inline void ablkcipher_request_set_callback( | 1154 | static inline void ablkcipher_request_set_callback( |
| 712 | struct ablkcipher_request *req, | 1155 | struct ablkcipher_request *req, |
| 713 | u32 flags, crypto_completion_t compl, void *data) | 1156 | u32 flags, crypto_completion_t compl, void *data) |
| @@ -717,6 +1160,22 @@ static inline void ablkcipher_request_set_callback( | |||
| 717 | req->base.flags = flags; | 1160 | req->base.flags = flags; |
| 718 | } | 1161 | } |
| 719 | 1162 | ||
| 1163 | /** | ||
| 1164 | * ablkcipher_request_set_crypt() - set data buffers | ||
| 1165 | * @req: request handle | ||
| 1166 | * @src: source scatter / gather list | ||
| 1167 | * @dst: destination scatter / gather list | ||
| 1168 | * @nbytes: number of bytes to process from @src | ||
| 1169 | * @iv: IV for the cipher operation which must comply with the IV size defined | ||
| 1170 | * by crypto_ablkcipher_ivsize | ||
| 1171 | * | ||
| 1172 | * This function allows setting of the source data and destination data | ||
| 1173 | * scatter / gather lists. | ||
| 1174 | * | ||
| 1175 | * For encryption, the source is treated as the plaintext and the | ||
| 1176 | * destination is the ciphertext. For a decryption operation, the use is | ||
| 1177 | * reversed: the source is the ciphertext and the destination is the plaintext. | ||
| 1178 | */ | ||
| 720 | static inline void ablkcipher_request_set_crypt( | 1179 | static inline void ablkcipher_request_set_crypt( |
| 721 | struct ablkcipher_request *req, | 1180 | struct ablkcipher_request *req, |
| 722 | struct scatterlist *src, struct scatterlist *dst, | 1181 | struct scatterlist *src, struct scatterlist *dst, |
| @@ -728,11 +1187,55 @@ static inline void ablkcipher_request_set_crypt( | |||
| 728 | req->info = iv; | 1187 | req->info = iv; |
| 729 | } | 1188 | } |
| 730 | 1189 | ||
| 1190 | /** | ||
| 1191 | * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API | ||
| 1192 | * | ||
| 1193 | * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD | ||
| 1194 | * (listed as type "aead" in /proc/crypto) | ||
| 1195 | * | ||
| 1196 | * The most prominent examples for this type of encryption is GCM and CCM. | ||
| 1197 | * However, the kernel supports other types of AEAD ciphers which are defined | ||
| 1198 | * with the following cipher string: | ||
| 1199 | * | ||
| 1200 | * authenc(keyed message digest, block cipher) | ||
| 1201 | * | ||
| 1202 | * For example: authenc(hmac(sha256), cbc(aes)) | ||
| 1203 | * | ||
| 1204 | * The example code provided for the asynchronous block cipher operation | ||
| 1205 | * applies here as well. Naturally all *ablkcipher* symbols must be exchanged | ||
| 1206 | * the *aead* pendants discussed in the following. In addtion, for the AEAD | ||
| 1207 | * operation, the aead_request_set_assoc function must be used to set the | ||
| 1208 | * pointer to the associated data memory location before performing the | ||
| 1209 | * encryption or decryption operation. In case of an encryption, the associated | ||
| 1210 | * data memory is filled during the encryption operation. For decryption, the | ||
| 1211 | * associated data memory must contain data that is used to verify the integrity | ||
| 1212 | * of the decrypted data. Another deviation from the asynchronous block cipher | ||
| 1213 | * operation is that the caller should explicitly check for -EBADMSG of the | ||
| 1214 | * crypto_aead_decrypt. That error indicates an authentication error, i.e. | ||
| 1215 | * a breach in the integrity of the message. In essence, that -EBADMSG error | ||
| 1216 | * code is the key bonus an AEAD cipher has over "standard" block chaining | ||
| 1217 | * modes. | ||
| 1218 | */ | ||
| 1219 | |||
| 731 | static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) | 1220 | static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) |
| 732 | { | 1221 | { |
| 733 | return (struct crypto_aead *)tfm; | 1222 | return (struct crypto_aead *)tfm; |
| 734 | } | 1223 | } |
| 735 | 1224 | ||
| 1225 | /** | ||
| 1226 | * crypto_alloc_aead() - allocate AEAD cipher handle | ||
| 1227 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
| 1228 | * AEAD cipher | ||
| 1229 | * @type: specifies the type of the cipher | ||
| 1230 | * @mask: specifies the mask for the cipher | ||
| 1231 | * | ||
| 1232 | * Allocate a cipher handle for an AEAD. The returned struct | ||
| 1233 | * crypto_aead is the cipher handle that is required for any subsequent | ||
| 1234 | * API invocation for that AEAD. | ||
| 1235 | * | ||
| 1236 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
| 1237 | * of an error, PTR_ERR() returns the error code. | ||
| 1238 | */ | ||
| 736 | struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); | 1239 | struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); |
| 737 | 1240 | ||
| 738 | static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) | 1241 | static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) |
| @@ -740,6 +1243,10 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) | |||
| 740 | return &tfm->base; | 1243 | return &tfm->base; |
| 741 | } | 1244 | } |
| 742 | 1245 | ||
| 1246 | /** | ||
| 1247 | * crypto_free_aead() - zeroize and free aead handle | ||
| 1248 | * @tfm: cipher handle to be freed | ||
| 1249 | */ | ||
| 743 | static inline void crypto_free_aead(struct crypto_aead *tfm) | 1250 | static inline void crypto_free_aead(struct crypto_aead *tfm) |
| 744 | { | 1251 | { |
| 745 | crypto_free_tfm(crypto_aead_tfm(tfm)); | 1252 | crypto_free_tfm(crypto_aead_tfm(tfm)); |
| @@ -750,16 +1257,47 @@ static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm) | |||
| 750 | return &crypto_aead_tfm(tfm)->crt_aead; | 1257 | return &crypto_aead_tfm(tfm)->crt_aead; |
| 751 | } | 1258 | } |
| 752 | 1259 | ||
| 1260 | /** | ||
| 1261 | * crypto_aead_ivsize() - obtain IV size | ||
| 1262 | * @tfm: cipher handle | ||
| 1263 | * | ||
| 1264 | * The size of the IV for the aead referenced by the cipher handle is | ||
| 1265 | * returned. This IV size may be zero if the cipher does not need an IV. | ||
| 1266 | * | ||
| 1267 | * Return: IV size in bytes | ||
| 1268 | */ | ||
| 753 | static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) | 1269 | static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) |
| 754 | { | 1270 | { |
| 755 | return crypto_aead_crt(tfm)->ivsize; | 1271 | return crypto_aead_crt(tfm)->ivsize; |
| 756 | } | 1272 | } |
| 757 | 1273 | ||
| 1274 | /** | ||
| 1275 | * crypto_aead_authsize() - obtain maximum authentication data size | ||
| 1276 | * @tfm: cipher handle | ||
| 1277 | * | ||
| 1278 | * The maximum size of the authentication data for the AEAD cipher referenced | ||
| 1279 | * by the AEAD cipher handle is returned. The authentication data size may be | ||
| 1280 | * zero if the cipher implements a hard-coded maximum. | ||
| 1281 | * | ||
| 1282 | * The authentication data may also be known as "tag value". | ||
| 1283 | * | ||
| 1284 | * Return: authentication data size / tag size in bytes | ||
| 1285 | */ | ||
| 758 | static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) | 1286 | static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) |
| 759 | { | 1287 | { |
| 760 | return crypto_aead_crt(tfm)->authsize; | 1288 | return crypto_aead_crt(tfm)->authsize; |
| 761 | } | 1289 | } |
| 762 | 1290 | ||
| 1291 | /** | ||
| 1292 | * crypto_aead_blocksize() - obtain block size of cipher | ||
| 1293 | * @tfm: cipher handle | ||
| 1294 | * | ||
| 1295 | * The block size for the AEAD referenced with the cipher handle is returned. | ||
| 1296 | * The caller may use that information to allocate appropriate memory for the | ||
| 1297 | * data returned by the encryption or decryption operation | ||
| 1298 | * | ||
| 1299 | * Return: block size of cipher | ||
| 1300 | */ | ||
| 763 | static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) | 1301 | static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) |
| 764 | { | 1302 | { |
| 765 | return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); | 1303 | return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); |
| @@ -785,6 +1323,22 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) | |||
| 785 | crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); | 1323 | crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); |
| 786 | } | 1324 | } |
| 787 | 1325 | ||
| 1326 | /** | ||
| 1327 | * crypto_aead_setkey() - set key for cipher | ||
| 1328 | * @tfm: cipher handle | ||
| 1329 | * @key: buffer holding the key | ||
| 1330 | * @keylen: length of the key in bytes | ||
| 1331 | * | ||
| 1332 | * The caller provided key is set for the AEAD referenced by the cipher | ||
| 1333 | * handle. | ||
| 1334 | * | ||
| 1335 | * Note, the key length determines the cipher type. Many block ciphers implement | ||
| 1336 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | ||
| 1337 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | ||
| 1338 | * is performed. | ||
| 1339 | * | ||
| 1340 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
| 1341 | */ | ||
| 788 | static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, | 1342 | static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, |
| 789 | unsigned int keylen) | 1343 | unsigned int keylen) |
| 790 | { | 1344 | { |
| @@ -793,6 +1347,16 @@ static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
| 793 | return crt->setkey(crt->base, key, keylen); | 1347 | return crt->setkey(crt->base, key, keylen); |
| 794 | } | 1348 | } |
| 795 | 1349 | ||
| 1350 | /** | ||
| 1351 | * crypto_aead_setauthsize() - set authentication data size | ||
| 1352 | * @tfm: cipher handle | ||
| 1353 | * @authsize: size of the authentication data / tag in bytes | ||
| 1354 | * | ||
| 1355 | * Set the authentication data size / tag size. AEAD requires an authentication | ||
| 1356 | * tag (or MAC) in addition to the associated data. | ||
| 1357 | * | ||
| 1358 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
| 1359 | */ | ||
| 796 | int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); | 1360 | int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); |
| 797 | 1361 | ||
| 798 | static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) | 1362 | static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) |
| @@ -800,27 +1364,105 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) | |||
| 800 | return __crypto_aead_cast(req->base.tfm); | 1364 | return __crypto_aead_cast(req->base.tfm); |
| 801 | } | 1365 | } |
| 802 | 1366 | ||
| 1367 | /** | ||
| 1368 | * crypto_aead_encrypt() - encrypt plaintext | ||
| 1369 | * @req: reference to the aead_request handle that holds all information | ||
| 1370 | * needed to perform the cipher operation | ||
| 1371 | * | ||
| 1372 | * Encrypt plaintext data using the aead_request handle. That data structure | ||
| 1373 | * and how it is filled with data is discussed with the aead_request_* | ||
| 1374 | * functions. | ||
| 1375 | * | ||
| 1376 | * IMPORTANT NOTE The encryption operation creates the authentication data / | ||
| 1377 | * tag. That data is concatenated with the created ciphertext. | ||
| 1378 | * The ciphertext memory size is therefore the given number of | ||
| 1379 | * block cipher blocks + the size defined by the | ||
| 1380 | * crypto_aead_setauthsize invocation. The caller must ensure | ||
| 1381 | * that sufficient memory is available for the ciphertext and | ||
| 1382 | * the authentication tag. | ||
| 1383 | * | ||
| 1384 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
| 1385 | */ | ||
| 803 | static inline int crypto_aead_encrypt(struct aead_request *req) | 1386 | static inline int crypto_aead_encrypt(struct aead_request *req) |
| 804 | { | 1387 | { |
| 805 | return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); | 1388 | return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); |
| 806 | } | 1389 | } |
| 807 | 1390 | ||
| 1391 | /** | ||
| 1392 | * crypto_aead_decrypt() - decrypt ciphertext | ||
| 1393 | * @req: reference to the ablkcipher_request handle that holds all information | ||
| 1394 | * needed to perform the cipher operation | ||
| 1395 | * | ||
| 1396 | * Decrypt ciphertext data using the aead_request handle. That data structure | ||
| 1397 | * and how it is filled with data is discussed with the aead_request_* | ||
| 1398 | * functions. | ||
| 1399 | * | ||
| 1400 | * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the | ||
| 1401 | * authentication data / tag. That authentication data / tag | ||
| 1402 | * must have the size defined by the crypto_aead_setauthsize | ||
| 1403 | * invocation. | ||
| 1404 | * | ||
| 1405 | * | ||
| 1406 | * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD | ||
| 1407 | * cipher operation performs the authentication of the data during the | ||
| 1408 | * decryption operation. Therefore, the function returns this error if | ||
| 1409 | * the authentication of the ciphertext was unsuccessful (i.e. the | ||
| 1410 | * integrity of the ciphertext or the associated data was violated); | ||
| 1411 | * < 0 if an error occurred. | ||
| 1412 | */ | ||
| 808 | static inline int crypto_aead_decrypt(struct aead_request *req) | 1413 | static inline int crypto_aead_decrypt(struct aead_request *req) |
| 809 | { | 1414 | { |
| 810 | return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); | 1415 | return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); |
| 811 | } | 1416 | } |
| 812 | 1417 | ||
| 1418 | /** | ||
| 1419 | * DOC: Asynchronous AEAD Request Handle | ||
| 1420 | * | ||
| 1421 | * The aead_request data structure contains all pointers to data required for | ||
| 1422 | * the AEAD cipher operation. This includes the cipher handle (which can be | ||
| 1423 | * used by multiple aead_request instances), pointer to plaintext and | ||
| 1424 | * ciphertext, asynchronous callback function, etc. It acts as a handle to the | ||
| 1425 | * aead_request_* API calls in a similar way as AEAD handle to the | ||
| 1426 | * crypto_aead_* API calls. | ||
| 1427 | */ | ||
| 1428 | |||
| 1429 | /** | ||
| 1430 | * crypto_aead_reqsize() - obtain size of the request data structure | ||
| 1431 | * @tfm: cipher handle | ||
| 1432 | * | ||
| 1433 | * Return: number of bytes | ||
| 1434 | */ | ||
| 813 | static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) | 1435 | static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) |
| 814 | { | 1436 | { |
| 815 | return crypto_aead_crt(tfm)->reqsize; | 1437 | return crypto_aead_crt(tfm)->reqsize; |
| 816 | } | 1438 | } |
| 817 | 1439 | ||
| 1440 | /** | ||
| 1441 | * aead_request_set_tfm() - update cipher handle reference in request | ||
| 1442 | * @req: request handle to be modified | ||
| 1443 | * @tfm: cipher handle that shall be added to the request handle | ||
| 1444 | * | ||
| 1445 | * Allow the caller to replace the existing aead handle in the request | ||
| 1446 | * data structure with a different one. | ||
| 1447 | */ | ||
| 818 | static inline void aead_request_set_tfm(struct aead_request *req, | 1448 | static inline void aead_request_set_tfm(struct aead_request *req, |
| 819 | struct crypto_aead *tfm) | 1449 | struct crypto_aead *tfm) |
| 820 | { | 1450 | { |
| 821 | req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); | 1451 | req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); |
| 822 | } | 1452 | } |
| 823 | 1453 | ||
| 1454 | /** | ||
| 1455 | * aead_request_alloc() - allocate request data structure | ||
| 1456 | * @tfm: cipher handle to be registered with the request | ||
| 1457 | * @gfp: memory allocation flag that is handed to kmalloc by the API call. | ||
| 1458 | * | ||
| 1459 | * Allocate the request data structure that must be used with the AEAD | ||
| 1460 | * encrypt and decrypt API calls. During the allocation, the provided aead | ||
| 1461 | * handle is registered in the request data structure. | ||
| 1462 | * | ||
| 1463 | * Return: allocated request handle in case of success; IS_ERR() is true in case | ||
| 1464 | * of an error, PTR_ERR() returns the error code. | ||
| 1465 | */ | ||
| 824 | static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, | 1466 | static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, |
| 825 | gfp_t gfp) | 1467 | gfp_t gfp) |
| 826 | { | 1468 | { |
| @@ -834,11 +1476,40 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, | |||
| 834 | return req; | 1476 | return req; |
| 835 | } | 1477 | } |
| 836 | 1478 | ||
| 1479 | /** | ||
| 1480 | * aead_request_free() - zeroize and free request data structure | ||
| 1481 | * @req: request data structure cipher handle to be freed | ||
| 1482 | */ | ||
| 837 | static inline void aead_request_free(struct aead_request *req) | 1483 | static inline void aead_request_free(struct aead_request *req) |
| 838 | { | 1484 | { |
| 839 | kzfree(req); | 1485 | kzfree(req); |
| 840 | } | 1486 | } |
| 841 | 1487 | ||
| 1488 | /** | ||
| 1489 | * aead_request_set_callback() - set asynchronous callback function | ||
| 1490 | * @req: request handle | ||
| 1491 | * @flags: specify zero or an ORing of the flags | ||
| 1492 | * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and | ||
| 1493 | * increase the wait queue beyond the initial maximum size; | ||
| 1494 | * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep | ||
| 1495 | * @compl: callback function pointer to be registered with the request handle | ||
| 1496 | * @data: The data pointer refers to memory that is not used by the kernel | ||
| 1497 | * crypto API, but provided to the callback function for it to use. Here, | ||
| 1498 | * the caller can provide a reference to memory the callback function can | ||
| 1499 | * operate on. As the callback function is invoked asynchronously to the | ||
| 1500 | * related functionality, it may need to access data structures of the | ||
| 1501 | * related functionality which can be referenced using this pointer. The | ||
| 1502 | * callback function can access the memory via the "data" field in the | ||
| 1503 | * crypto_async_request data structure provided to the callback function. | ||
| 1504 | * | ||
| 1505 | * Setting the callback function that is triggered once the cipher operation | ||
| 1506 | * completes | ||
| 1507 | * | ||
| 1508 | * The callback function is registered with the aead_request handle and | ||
| 1509 | * must comply with the following template: | ||
| 1510 | * | ||
| 1511 | * void callback_function(struct crypto_async_request *req, int error) | ||
| 1512 | */ | ||
| 842 | static inline void aead_request_set_callback(struct aead_request *req, | 1513 | static inline void aead_request_set_callback(struct aead_request *req, |
| 843 | u32 flags, | 1514 | u32 flags, |
| 844 | crypto_completion_t compl, | 1515 | crypto_completion_t compl, |
| @@ -849,6 +1520,36 @@ static inline void aead_request_set_callback(struct aead_request *req, | |||
| 849 | req->base.flags = flags; | 1520 | req->base.flags = flags; |
| 850 | } | 1521 | } |
| 851 | 1522 | ||
| 1523 | /** | ||
| 1524 | * aead_request_set_crypt - set data buffers | ||
| 1525 | * @req: request handle | ||
| 1526 | * @src: source scatter / gather list | ||
| 1527 | * @dst: destination scatter / gather list | ||
| 1528 | * @cryptlen: number of bytes to process from @src | ||
| 1529 | * @iv: IV for the cipher operation which must comply with the IV size defined | ||
| 1530 | * by crypto_aead_ivsize() | ||
| 1531 | * | ||
| 1532 | * Setting the source data and destination data scatter / gather lists. | ||
| 1533 | * | ||
| 1534 | * For encryption, the source is treated as the plaintext and the | ||
| 1535 | * destination is the ciphertext. For a decryption operation, the use is | ||
| 1536 | * reversed: the source is the ciphertext and the destination is the plaintext. | ||
| 1537 | * | ||
| 1538 | * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption, | ||
| 1539 | * the caller must concatenate the ciphertext followed by the | ||
| 1540 | * authentication tag and provide the entire data stream to the | ||
| 1541 | * decryption operation (i.e. the data length used for the | ||
| 1542 | * initialization of the scatterlist and the data length for the | ||
| 1543 | * decryption operation is identical). For encryption, however, | ||
| 1544 | * the authentication tag is created while encrypting the data. | ||
| 1545 | * The destination buffer must hold sufficient space for the | ||
| 1546 | * ciphertext and the authentication tag while the encryption | ||
| 1547 | * invocation must only point to the plaintext data size. The | ||
| 1548 | * following code snippet illustrates the memory usage | ||
| 1549 | * buffer = kmalloc(ptbuflen + (enc ? authsize : 0)); | ||
| 1550 | * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0)); | ||
| 1551 | * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv); | ||
| 1552 | */ | ||
| 852 | static inline void aead_request_set_crypt(struct aead_request *req, | 1553 | static inline void aead_request_set_crypt(struct aead_request *req, |
| 853 | struct scatterlist *src, | 1554 | struct scatterlist *src, |
| 854 | struct scatterlist *dst, | 1555 | struct scatterlist *dst, |
| @@ -860,6 +1561,15 @@ static inline void aead_request_set_crypt(struct aead_request *req, | |||
| 860 | req->iv = iv; | 1561 | req->iv = iv; |
| 861 | } | 1562 | } |
| 862 | 1563 | ||
| 1564 | /** | ||
| 1565 | * aead_request_set_assoc() - set the associated data scatter / gather list | ||
| 1566 | * @req: request handle | ||
| 1567 | * @assoc: associated data scatter / gather list | ||
| 1568 | * @assoclen: number of bytes to process from @assoc | ||
| 1569 | * | ||
| 1570 | * For encryption, the memory is filled with the associated data. For | ||
| 1571 | * decryption, the memory must point to the associated data. | ||
| 1572 | */ | ||
| 863 | static inline void aead_request_set_assoc(struct aead_request *req, | 1573 | static inline void aead_request_set_assoc(struct aead_request *req, |
| 864 | struct scatterlist *assoc, | 1574 | struct scatterlist *assoc, |
| 865 | unsigned int assoclen) | 1575 | unsigned int assoclen) |
| @@ -868,6 +1578,36 @@ static inline void aead_request_set_assoc(struct aead_request *req, | |||
| 868 | req->assoclen = assoclen; | 1578 | req->assoclen = assoclen; |
| 869 | } | 1579 | } |
| 870 | 1580 | ||
| 1581 | /** | ||
| 1582 | * DOC: Synchronous Block Cipher API | ||
| 1583 | * | ||
| 1584 | * The synchronous block cipher API is used with the ciphers of type | ||
| 1585 | * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto) | ||
| 1586 | * | ||
| 1587 | * Synchronous calls, have a context in the tfm. But since a single tfm can be | ||
| 1588 | * used in multiple calls and in parallel, this info should not be changeable | ||
| 1589 | * (unless a lock is used). This applies, for example, to the symmetric key. | ||
| 1590 | * However, the IV is changeable, so there is an iv field in blkcipher_tfm | ||
| 1591 | * structure for synchronous blkcipher api. So, its the only state info that can | ||
| 1592 | * be kept for synchronous calls without using a big lock across a tfm. | ||
| 1593 | * | ||
| 1594 | * The block cipher API allows the use of a complete cipher, i.e. a cipher | ||
| 1595 | * consisting of a template (a block chaining mode) and a single block cipher | ||
| 1596 | * primitive (e.g. AES). | ||
| 1597 | * | ||
| 1598 | * The plaintext data buffer and the ciphertext data buffer are pointed to | ||
| 1599 | * by using scatter/gather lists. The cipher operation is performed | ||
| 1600 | * on all segments of the provided scatter/gather lists. | ||
| 1601 | * | ||
| 1602 | * The kernel crypto API supports a cipher operation "in-place" which means that | ||
| 1603 | * the caller may provide the same scatter/gather list for the plaintext and | ||
| 1604 | * cipher text. After the completion of the cipher operation, the plaintext | ||
| 1605 | * data is replaced with the ciphertext data in case of an encryption and vice | ||
| 1606 | * versa for a decryption. The caller must ensure that the scatter/gather lists | ||
| 1607 | * for the output data point to sufficiently large buffers, i.e. multiples of | ||
| 1608 | * the block size of the cipher. | ||
| 1609 | */ | ||
| 1610 | |||
| 871 | static inline struct crypto_blkcipher *__crypto_blkcipher_cast( | 1611 | static inline struct crypto_blkcipher *__crypto_blkcipher_cast( |
| 872 | struct crypto_tfm *tfm) | 1612 | struct crypto_tfm *tfm) |
| 873 | { | 1613 | { |
| @@ -881,6 +1621,20 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast( | |||
| 881 | return __crypto_blkcipher_cast(tfm); | 1621 | return __crypto_blkcipher_cast(tfm); |
| 882 | } | 1622 | } |
| 883 | 1623 | ||
| 1624 | /** | ||
| 1625 | * crypto_alloc_blkcipher() - allocate synchronous block cipher handle | ||
| 1626 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
| 1627 | * blkcipher cipher | ||
| 1628 | * @type: specifies the type of the cipher | ||
| 1629 | * @mask: specifies the mask for the cipher | ||
| 1630 | * | ||
| 1631 | * Allocate a cipher handle for a block cipher. The returned struct | ||
| 1632 | * crypto_blkcipher is the cipher handle that is required for any subsequent | ||
| 1633 | * API invocation for that block cipher. | ||
| 1634 | * | ||
| 1635 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
| 1636 | * of an error, PTR_ERR() returns the error code. | ||
| 1637 | */ | ||
| 884 | static inline struct crypto_blkcipher *crypto_alloc_blkcipher( | 1638 | static inline struct crypto_blkcipher *crypto_alloc_blkcipher( |
| 885 | const char *alg_name, u32 type, u32 mask) | 1639 | const char *alg_name, u32 type, u32 mask) |
| 886 | { | 1640 | { |
| @@ -897,11 +1651,25 @@ static inline struct crypto_tfm *crypto_blkcipher_tfm( | |||
| 897 | return &tfm->base; | 1651 | return &tfm->base; |
| 898 | } | 1652 | } |
| 899 | 1653 | ||
| 1654 | /** | ||
| 1655 | * crypto_free_blkcipher() - zeroize and free the block cipher handle | ||
| 1656 | * @tfm: cipher handle to be freed | ||
| 1657 | */ | ||
| 900 | static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) | 1658 | static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) |
| 901 | { | 1659 | { |
| 902 | crypto_free_tfm(crypto_blkcipher_tfm(tfm)); | 1660 | crypto_free_tfm(crypto_blkcipher_tfm(tfm)); |
| 903 | } | 1661 | } |
| 904 | 1662 | ||
| 1663 | /** | ||
| 1664 | * crypto_has_blkcipher() - Search for the availability of a block cipher | ||
| 1665 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
| 1666 | * block cipher | ||
| 1667 | * @type: specifies the type of the cipher | ||
| 1668 | * @mask: specifies the mask for the cipher | ||
| 1669 | * | ||
| 1670 | * Return: true when the block cipher is known to the kernel crypto API; false | ||
| 1671 | * otherwise | ||
| 1672 | */ | ||
| 905 | static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) | 1673 | static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) |
| 906 | { | 1674 | { |
| 907 | type &= ~CRYPTO_ALG_TYPE_MASK; | 1675 | type &= ~CRYPTO_ALG_TYPE_MASK; |
| @@ -911,6 +1679,12 @@ static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) | |||
| 911 | return crypto_has_alg(alg_name, type, mask); | 1679 | return crypto_has_alg(alg_name, type, mask); |
| 912 | } | 1680 | } |
| 913 | 1681 | ||
| 1682 | /** | ||
| 1683 | * crypto_blkcipher_name() - return the name / cra_name from the cipher handle | ||
| 1684 | * @tfm: cipher handle | ||
| 1685 | * | ||
| 1686 | * Return: The character string holding the name of the cipher | ||
| 1687 | */ | ||
| 914 | static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) | 1688 | static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) |
| 915 | { | 1689 | { |
| 916 | return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); | 1690 | return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); |
| @@ -928,11 +1702,30 @@ static inline struct blkcipher_alg *crypto_blkcipher_alg( | |||
| 928 | return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; | 1702 | return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; |
| 929 | } | 1703 | } |
| 930 | 1704 | ||
| 1705 | /** | ||
| 1706 | * crypto_blkcipher_ivsize() - obtain IV size | ||
| 1707 | * @tfm: cipher handle | ||
| 1708 | * | ||
| 1709 | * The size of the IV for the block cipher referenced by the cipher handle is | ||
| 1710 | * returned. This IV size may be zero if the cipher does not need an IV. | ||
| 1711 | * | ||
| 1712 | * Return: IV size in bytes | ||
| 1713 | */ | ||
| 931 | static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) | 1714 | static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) |
| 932 | { | 1715 | { |
| 933 | return crypto_blkcipher_alg(tfm)->ivsize; | 1716 | return crypto_blkcipher_alg(tfm)->ivsize; |
| 934 | } | 1717 | } |
| 935 | 1718 | ||
| 1719 | /** | ||
| 1720 | * crypto_blkcipher_blocksize() - obtain block size of cipher | ||
| 1721 | * @tfm: cipher handle | ||
| 1722 | * | ||
| 1723 | * The block size for the block cipher referenced with the cipher handle is | ||
| 1724 | * returned. The caller may use that information to allocate appropriate | ||
| 1725 | * memory for the data returned by the encryption or decryption operation. | ||
| 1726 | * | ||
| 1727 | * Return: block size of cipher | ||
| 1728 | */ | ||
| 936 | static inline unsigned int crypto_blkcipher_blocksize( | 1729 | static inline unsigned int crypto_blkcipher_blocksize( |
| 937 | struct crypto_blkcipher *tfm) | 1730 | struct crypto_blkcipher *tfm) |
| 938 | { | 1731 | { |
| @@ -962,6 +1755,22 @@ static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, | |||
| 962 | crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); | 1755 | crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); |
| 963 | } | 1756 | } |
| 964 | 1757 | ||
| 1758 | /** | ||
| 1759 | * crypto_blkcipher_setkey() - set key for cipher | ||
| 1760 | * @tfm: cipher handle | ||
| 1761 | * @key: buffer holding the key | ||
| 1762 | * @keylen: length of the key in bytes | ||
| 1763 | * | ||
| 1764 | * The caller provided key is set for the block cipher referenced by the cipher | ||
| 1765 | * handle. | ||
| 1766 | * | ||
| 1767 | * Note, the key length determines the cipher type. Many block ciphers implement | ||
| 1768 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | ||
| 1769 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | ||
| 1770 | * is performed. | ||
| 1771 | * | ||
| 1772 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
| 1773 | */ | ||
| 965 | static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, | 1774 | static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, |
| 966 | const u8 *key, unsigned int keylen) | 1775 | const u8 *key, unsigned int keylen) |
| 967 | { | 1776 | { |
| @@ -969,6 +1778,24 @@ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, | |||
| 969 | key, keylen); | 1778 | key, keylen); |
| 970 | } | 1779 | } |
| 971 | 1780 | ||
| 1781 | /** | ||
| 1782 | * crypto_blkcipher_encrypt() - encrypt plaintext | ||
| 1783 | * @desc: reference to the block cipher handle with meta data | ||
| 1784 | * @dst: scatter/gather list that is filled by the cipher operation with the | ||
| 1785 | * ciphertext | ||
| 1786 | * @src: scatter/gather list that holds the plaintext | ||
| 1787 | * @nbytes: number of bytes of the plaintext to encrypt. | ||
| 1788 | * | ||
| 1789 | * Encrypt plaintext data using the IV set by the caller with a preceding | ||
| 1790 | * call of crypto_blkcipher_set_iv. | ||
| 1791 | * | ||
| 1792 | * The blkcipher_desc data structure must be filled by the caller and can | ||
| 1793 | * reside on the stack. The caller must fill desc as follows: desc.tfm is filled | ||
| 1794 | * with the block cipher handle; desc.flags is filled with either | ||
| 1795 | * CRYPTO_TFM_REQ_MAY_SLEEP or 0. | ||
| 1796 | * | ||
| 1797 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
| 1798 | */ | ||
| 972 | static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, | 1799 | static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, |
| 973 | struct scatterlist *dst, | 1800 | struct scatterlist *dst, |
| 974 | struct scatterlist *src, | 1801 | struct scatterlist *src, |
| @@ -978,6 +1805,25 @@ static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, | |||
| 978 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); | 1805 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); |
| 979 | } | 1806 | } |
| 980 | 1807 | ||
| 1808 | /** | ||
| 1809 | * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV | ||
| 1810 | * @desc: reference to the block cipher handle with meta data | ||
| 1811 | * @dst: scatter/gather list that is filled by the cipher operation with the | ||
| 1812 | * ciphertext | ||
| 1813 | * @src: scatter/gather list that holds the plaintext | ||
| 1814 | * @nbytes: number of bytes of the plaintext to encrypt. | ||
| 1815 | * | ||
| 1816 | * Encrypt plaintext data with the use of an IV that is solely used for this | ||
| 1817 | * cipher operation. Any previously set IV is not used. | ||
| 1818 | * | ||
| 1819 | * The blkcipher_desc data structure must be filled by the caller and can | ||
| 1820 | * reside on the stack. The caller must fill desc as follows: desc.tfm is filled | ||
| 1821 | * with the block cipher handle; desc.info is filled with the IV to be used for | ||
| 1822 | * the current operation; desc.flags is filled with either | ||
| 1823 | * CRYPTO_TFM_REQ_MAY_SLEEP or 0. | ||
| 1824 | * | ||
| 1825 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
| 1826 | */ | ||
| 981 | static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, | 1827 | static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, |
| 982 | struct scatterlist *dst, | 1828 | struct scatterlist *dst, |
| 983 | struct scatterlist *src, | 1829 | struct scatterlist *src, |
| @@ -986,6 +1832,23 @@ static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, | |||
| 986 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); | 1832 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); |
| 987 | } | 1833 | } |
| 988 | 1834 | ||
| 1835 | /** | ||
| 1836 | * crypto_blkcipher_decrypt() - decrypt ciphertext | ||
| 1837 | * @desc: reference to the block cipher handle with meta data | ||
| 1838 | * @dst: scatter/gather list that is filled by the cipher operation with the | ||
| 1839 | * plaintext | ||
| 1840 | * @src: scatter/gather list that holds the ciphertext | ||
| 1841 | * @nbytes: number of bytes of the ciphertext to decrypt. | ||
| 1842 | * | ||
| 1843 | * Decrypt ciphertext data using the IV set by the caller with a preceding | ||
| 1844 | * call of crypto_blkcipher_set_iv. | ||
| 1845 | * | ||
| 1846 | * The blkcipher_desc data structure must be filled by the caller as documented | ||
| 1847 | * for the crypto_blkcipher_encrypt call above. | ||
| 1848 | * | ||
| 1849 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
| 1850 | * | ||
| 1851 | */ | ||
| 989 | static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, | 1852 | static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, |
| 990 | struct scatterlist *dst, | 1853 | struct scatterlist *dst, |
| 991 | struct scatterlist *src, | 1854 | struct scatterlist *src, |
| @@ -995,6 +1858,22 @@ static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, | |||
| 995 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); | 1858 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); |
| 996 | } | 1859 | } |
| 997 | 1860 | ||
| 1861 | /** | ||
| 1862 | * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV | ||
| 1863 | * @desc: reference to the block cipher handle with meta data | ||
| 1864 | * @dst: scatter/gather list that is filled by the cipher operation with the | ||
| 1865 | * plaintext | ||
| 1866 | * @src: scatter/gather list that holds the ciphertext | ||
| 1867 | * @nbytes: number of bytes of the ciphertext to decrypt. | ||
| 1868 | * | ||
| 1869 | * Decrypt ciphertext data with the use of an IV that is solely used for this | ||
| 1870 | * cipher operation. Any previously set IV is not used. | ||
| 1871 | * | ||
| 1872 | * The blkcipher_desc data structure must be filled by the caller as documented | ||
| 1873 | * for the crypto_blkcipher_encrypt_iv call above. | ||
| 1874 | * | ||
| 1875 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | ||
| 1876 | */ | ||
| 998 | static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, | 1877 | static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, |
| 999 | struct scatterlist *dst, | 1878 | struct scatterlist *dst, |
| 1000 | struct scatterlist *src, | 1879 | struct scatterlist *src, |
| @@ -1003,18 +1882,54 @@ static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, | |||
| 1003 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); | 1882 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); |
| 1004 | } | 1883 | } |
| 1005 | 1884 | ||
| 1885 | /** | ||
| 1886 | * crypto_blkcipher_set_iv() - set IV for cipher | ||
| 1887 | * @tfm: cipher handle | ||
| 1888 | * @src: buffer holding the IV | ||
| 1889 | * @len: length of the IV in bytes | ||
| 1890 | * | ||
| 1891 | * The caller provided IV is set for the block cipher referenced by the cipher | ||
| 1892 | * handle. | ||
| 1893 | */ | ||
| 1006 | static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, | 1894 | static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, |
| 1007 | const u8 *src, unsigned int len) | 1895 | const u8 *src, unsigned int len) |
| 1008 | { | 1896 | { |
| 1009 | memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); | 1897 | memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); |
| 1010 | } | 1898 | } |
| 1011 | 1899 | ||
| 1900 | /** | ||
| 1901 | * crypto_blkcipher_get_iv() - obtain IV from cipher | ||
| 1902 | * @tfm: cipher handle | ||
| 1903 | * @dst: buffer filled with the IV | ||
| 1904 | * @len: length of the buffer dst | ||
| 1905 | * | ||
| 1906 | * The caller can obtain the IV set for the block cipher referenced by the | ||
| 1907 | * cipher handle and store it into the user-provided buffer. If the buffer | ||
| 1908 | * has an insufficient space, the IV is truncated to fit the buffer. | ||
| 1909 | */ | ||
| 1012 | static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, | 1910 | static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, |
| 1013 | u8 *dst, unsigned int len) | 1911 | u8 *dst, unsigned int len) |
| 1014 | { | 1912 | { |
| 1015 | memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); | 1913 | memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); |
| 1016 | } | 1914 | } |
| 1017 | 1915 | ||
| 1916 | /** | ||
| 1917 | * DOC: Single Block Cipher API | ||
| 1918 | * | ||
| 1919 | * The single block cipher API is used with the ciphers of type | ||
| 1920 | * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). | ||
| 1921 | * | ||
| 1922 | * Using the single block cipher API calls, operations with the basic cipher | ||
| 1923 | * primitive can be implemented. These cipher primitives exclude any block | ||
| 1924 | * chaining operations including IV handling. | ||
| 1925 | * | ||
| 1926 | * The purpose of this single block cipher API is to support the implementation | ||
| 1927 | * of templates or other concepts that only need to perform the cipher operation | ||
| 1928 | * on one block at a time. Templates invoke the underlying cipher primitive | ||
| 1929 | * block-wise and process either the input or the output data of these cipher | ||
| 1930 | * operations. | ||
| 1931 | */ | ||
| 1932 | |||
| 1018 | static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) | 1933 | static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) |
| 1019 | { | 1934 | { |
| 1020 | return (struct crypto_cipher *)tfm; | 1935 | return (struct crypto_cipher *)tfm; |
| @@ -1026,6 +1941,20 @@ static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) | |||
| 1026 | return __crypto_cipher_cast(tfm); | 1941 | return __crypto_cipher_cast(tfm); |
| 1027 | } | 1942 | } |
| 1028 | 1943 | ||
| 1944 | /** | ||
| 1945 | * crypto_alloc_cipher() - allocate single block cipher handle | ||
| 1946 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
| 1947 | * single block cipher | ||
| 1948 | * @type: specifies the type of the cipher | ||
| 1949 | * @mask: specifies the mask for the cipher | ||
| 1950 | * | ||
| 1951 | * Allocate a cipher handle for a single block cipher. The returned struct | ||
| 1952 | * crypto_cipher is the cipher handle that is required for any subsequent API | ||
| 1953 | * invocation for that single block cipher. | ||
| 1954 | * | ||
| 1955 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
| 1956 | * of an error, PTR_ERR() returns the error code. | ||
| 1957 | */ | ||
| 1029 | static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, | 1958 | static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, |
| 1030 | u32 type, u32 mask) | 1959 | u32 type, u32 mask) |
| 1031 | { | 1960 | { |
| @@ -1041,11 +1970,25 @@ static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) | |||
| 1041 | return &tfm->base; | 1970 | return &tfm->base; |
| 1042 | } | 1971 | } |
| 1043 | 1972 | ||
| 1973 | /** | ||
| 1974 | * crypto_free_cipher() - zeroize and free the single block cipher handle | ||
| 1975 | * @tfm: cipher handle to be freed | ||
| 1976 | */ | ||
| 1044 | static inline void crypto_free_cipher(struct crypto_cipher *tfm) | 1977 | static inline void crypto_free_cipher(struct crypto_cipher *tfm) |
| 1045 | { | 1978 | { |
| 1046 | crypto_free_tfm(crypto_cipher_tfm(tfm)); | 1979 | crypto_free_tfm(crypto_cipher_tfm(tfm)); |
| 1047 | } | 1980 | } |
| 1048 | 1981 | ||
| 1982 | /** | ||
| 1983 | * crypto_has_cipher() - Search for the availability of a single block cipher | ||
| 1984 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
| 1985 | * single block cipher | ||
| 1986 | * @type: specifies the type of the cipher | ||
| 1987 | * @mask: specifies the mask for the cipher | ||
| 1988 | * | ||
| 1989 | * Return: true when the single block cipher is known to the kernel crypto API; | ||
| 1990 | * false otherwise | ||
| 1991 | */ | ||
| 1049 | static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) | 1992 | static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) |
| 1050 | { | 1993 | { |
| 1051 | type &= ~CRYPTO_ALG_TYPE_MASK; | 1994 | type &= ~CRYPTO_ALG_TYPE_MASK; |
| @@ -1060,6 +2003,16 @@ static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) | |||
| 1060 | return &crypto_cipher_tfm(tfm)->crt_cipher; | 2003 | return &crypto_cipher_tfm(tfm)->crt_cipher; |
| 1061 | } | 2004 | } |
| 1062 | 2005 | ||
| 2006 | /** | ||
| 2007 | * crypto_cipher_blocksize() - obtain block size for cipher | ||
| 2008 | * @tfm: cipher handle | ||
| 2009 | * | ||
| 2010 | * The block size for the single block cipher referenced with the cipher handle | ||
| 2011 | * tfm is returned. The caller may use that information to allocate appropriate | ||
| 2012 | * memory for the data returned by the encryption or decryption operation | ||
| 2013 | * | ||
| 2014 | * Return: block size of cipher | ||
| 2015 | */ | ||
| 1063 | static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) | 2016 | static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) |
| 1064 | { | 2017 | { |
| 1065 | return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); | 2018 | return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); |
| @@ -1087,6 +2040,22 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, | |||
| 1087 | crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); | 2040 | crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); |
| 1088 | } | 2041 | } |
| 1089 | 2042 | ||
| 2043 | /** | ||
| 2044 | * crypto_cipher_setkey() - set key for cipher | ||
| 2045 | * @tfm: cipher handle | ||
| 2046 | * @key: buffer holding the key | ||
| 2047 | * @keylen: length of the key in bytes | ||
| 2048 | * | ||
| 2049 | * The caller provided key is set for the single block cipher referenced by the | ||
| 2050 | * cipher handle. | ||
| 2051 | * | ||
| 2052 | * Note, the key length determines the cipher type. Many block ciphers implement | ||
| 2053 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | ||
| 2054 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | ||
| 2055 | * is performed. | ||
| 2056 | * | ||
| 2057 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
| 2058 | */ | ||
| 1090 | static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, | 2059 | static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, |
| 1091 | const u8 *key, unsigned int keylen) | 2060 | const u8 *key, unsigned int keylen) |
| 1092 | { | 2061 | { |
| @@ -1094,6 +2063,15 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, | |||
| 1094 | key, keylen); | 2063 | key, keylen); |
| 1095 | } | 2064 | } |
| 1096 | 2065 | ||
| 2066 | /** | ||
| 2067 | * crypto_cipher_encrypt_one() - encrypt one block of plaintext | ||
| 2068 | * @tfm: cipher handle | ||
| 2069 | * @dst: points to the buffer that will be filled with the ciphertext | ||
| 2070 | * @src: buffer holding the plaintext to be encrypted | ||
| 2071 | * | ||
| 2072 | * Invoke the encryption operation of one block. The caller must ensure that | ||
| 2073 | * the plaintext and ciphertext buffers are at least one block in size. | ||
| 2074 | */ | ||
| 1097 | static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, | 2075 | static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, |
| 1098 | u8 *dst, const u8 *src) | 2076 | u8 *dst, const u8 *src) |
| 1099 | { | 2077 | { |
| @@ -1101,6 +2079,15 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, | |||
| 1101 | dst, src); | 2079 | dst, src); |
| 1102 | } | 2080 | } |
| 1103 | 2081 | ||
| 2082 | /** | ||
| 2083 | * crypto_cipher_decrypt_one() - decrypt one block of ciphertext | ||
| 2084 | * @tfm: cipher handle | ||
| 2085 | * @dst: points to the buffer that will be filled with the plaintext | ||
| 2086 | * @src: buffer holding the ciphertext to be decrypted | ||
| 2087 | * | ||
| 2088 | * Invoke the decryption operation of one block. The caller must ensure that | ||
| 2089 | * the plaintext and ciphertext buffers are at least one block in size. | ||
| 2090 | */ | ||
| 1104 | static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, | 2091 | static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, |
| 1105 | u8 *dst, const u8 *src) | 2092 | u8 *dst, const u8 *src) |
| 1106 | { | 2093 | { |
| @@ -1108,6 +2095,13 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, | |||
| 1108 | dst, src); | 2095 | dst, src); |
| 1109 | } | 2096 | } |
| 1110 | 2097 | ||
| 2098 | /** | ||
| 2099 | * DOC: Synchronous Message Digest API | ||
| 2100 | * | ||
| 2101 | * The synchronous message digest API is used with the ciphers of type | ||
| 2102 | * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto) | ||
| 2103 | */ | ||
| 2104 | |||
| 1111 | static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) | 2105 | static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) |
| 1112 | { | 2106 | { |
| 1113 | return (struct crypto_hash *)tfm; | 2107 | return (struct crypto_hash *)tfm; |
| @@ -1120,6 +2114,20 @@ static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm) | |||
| 1120 | return __crypto_hash_cast(tfm); | 2114 | return __crypto_hash_cast(tfm); |
| 1121 | } | 2115 | } |
| 1122 | 2116 | ||
| 2117 | /** | ||
| 2118 | * crypto_alloc_hash() - allocate synchronous message digest handle | ||
| 2119 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
| 2120 | * message digest cipher | ||
| 2121 | * @type: specifies the type of the cipher | ||
| 2122 | * @mask: specifies the mask for the cipher | ||
| 2123 | * | ||
| 2124 | * Allocate a cipher handle for a message digest. The returned struct | ||
| 2125 | * crypto_hash is the cipher handle that is required for any subsequent | ||
| 2126 | * API invocation for that message digest. | ||
| 2127 | * | ||
| 2128 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | ||
| 2129 | * of an error, PTR_ERR() returns the error code. | ||
| 2130 | */ | ||
| 1123 | static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, | 2131 | static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, |
| 1124 | u32 type, u32 mask) | 2132 | u32 type, u32 mask) |
| 1125 | { | 2133 | { |
| @@ -1136,11 +2144,25 @@ static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm) | |||
| 1136 | return &tfm->base; | 2144 | return &tfm->base; |
| 1137 | } | 2145 | } |
| 1138 | 2146 | ||
| 2147 | /** | ||
| 2148 | * crypto_free_hash() - zeroize and free message digest handle | ||
| 2149 | * @tfm: cipher handle to be freed | ||
| 2150 | */ | ||
| 1139 | static inline void crypto_free_hash(struct crypto_hash *tfm) | 2151 | static inline void crypto_free_hash(struct crypto_hash *tfm) |
| 1140 | { | 2152 | { |
| 1141 | crypto_free_tfm(crypto_hash_tfm(tfm)); | 2153 | crypto_free_tfm(crypto_hash_tfm(tfm)); |
| 1142 | } | 2154 | } |
| 1143 | 2155 | ||
| 2156 | /** | ||
| 2157 | * crypto_has_hash() - Search for the availability of a message digest | ||
| 2158 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | ||
| 2159 | * message digest cipher | ||
| 2160 | * @type: specifies the type of the cipher | ||
| 2161 | * @mask: specifies the mask for the cipher | ||
| 2162 | * | ||
| 2163 | * Return: true when the message digest cipher is known to the kernel crypto | ||
| 2164 | * API; false otherwise | ||
| 2165 | */ | ||
| 1144 | static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) | 2166 | static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) |
| 1145 | { | 2167 | { |
| 1146 | type &= ~CRYPTO_ALG_TYPE_MASK; | 2168 | type &= ~CRYPTO_ALG_TYPE_MASK; |
| @@ -1156,6 +2178,15 @@ static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm) | |||
| 1156 | return &crypto_hash_tfm(tfm)->crt_hash; | 2178 | return &crypto_hash_tfm(tfm)->crt_hash; |
| 1157 | } | 2179 | } |
| 1158 | 2180 | ||
| 2181 | /** | ||
| 2182 | * crypto_hash_blocksize() - obtain block size for message digest | ||
| 2183 | * @tfm: cipher handle | ||
| 2184 | * | ||
| 2185 | * The block size for the message digest cipher referenced with the cipher | ||
| 2186 | * handle is returned. | ||
| 2187 | * | ||
| 2188 | * Return: block size of cipher | ||
| 2189 | */ | ||
| 1159 | static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) | 2190 | static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) |
| 1160 | { | 2191 | { |
| 1161 | return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); | 2192 | return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); |
| @@ -1166,6 +2197,15 @@ static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm) | |||
| 1166 | return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); | 2197 | return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); |
| 1167 | } | 2198 | } |
| 1168 | 2199 | ||
| 2200 | /** | ||
| 2201 | * crypto_hash_digestsize() - obtain message digest size | ||
| 2202 | * @tfm: cipher handle | ||
| 2203 | * | ||
| 2204 | * The size for the message digest created by the message digest cipher | ||
| 2205 | * referenced with the cipher handle is returned. | ||
| 2206 | * | ||
| 2207 | * Return: message digest size | ||
| 2208 | */ | ||
| 1169 | static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) | 2209 | static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) |
| 1170 | { | 2210 | { |
| 1171 | return crypto_hash_crt(tfm)->digestsize; | 2211 | return crypto_hash_crt(tfm)->digestsize; |
| @@ -1186,11 +2226,38 @@ static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags) | |||
| 1186 | crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); | 2226 | crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); |
| 1187 | } | 2227 | } |
| 1188 | 2228 | ||
| 2229 | /** | ||
| 2230 | * crypto_hash_init() - (re)initialize message digest handle | ||
| 2231 | * @desc: cipher request handle that to be filled by caller -- | ||
| 2232 | * desc.tfm is filled with the hash cipher handle; | ||
| 2233 | * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0. | ||
| 2234 | * | ||
| 2235 | * The call (re-)initializes the message digest referenced by the hash cipher | ||
| 2236 | * request handle. Any potentially existing state created by previous | ||
| 2237 | * operations is discarded. | ||
| 2238 | * | ||
| 2239 | * Return: 0 if the message digest initialization was successful; < 0 if an | ||
| 2240 | * error occurred | ||
| 2241 | */ | ||
| 1189 | static inline int crypto_hash_init(struct hash_desc *desc) | 2242 | static inline int crypto_hash_init(struct hash_desc *desc) |
| 1190 | { | 2243 | { |
| 1191 | return crypto_hash_crt(desc->tfm)->init(desc); | 2244 | return crypto_hash_crt(desc->tfm)->init(desc); |
| 1192 | } | 2245 | } |
| 1193 | 2246 | ||
| 2247 | /** | ||
| 2248 | * crypto_hash_update() - add data to message digest for processing | ||
| 2249 | * @desc: cipher request handle | ||
| 2250 | * @sg: scatter / gather list pointing to the data to be added to the message | ||
| 2251 | * digest | ||
| 2252 | * @nbytes: number of bytes to be processed from @sg | ||
| 2253 | * | ||
| 2254 | * Updates the message digest state of the cipher handle pointed to by the | ||
| 2255 | * hash cipher request handle with the input data pointed to by the | ||
| 2256 | * scatter/gather list. | ||
| 2257 | * | ||
| 2258 | * Return: 0 if the message digest update was successful; < 0 if an error | ||
| 2259 | * occurred | ||
| 2260 | */ | ||
| 1194 | static inline int crypto_hash_update(struct hash_desc *desc, | 2261 | static inline int crypto_hash_update(struct hash_desc *desc, |
| 1195 | struct scatterlist *sg, | 2262 | struct scatterlist *sg, |
| 1196 | unsigned int nbytes) | 2263 | unsigned int nbytes) |
| @@ -1198,11 +2265,39 @@ static inline int crypto_hash_update(struct hash_desc *desc, | |||
| 1198 | return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); | 2265 | return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); |
| 1199 | } | 2266 | } |
| 1200 | 2267 | ||
| 2268 | /** | ||
| 2269 | * crypto_hash_final() - calculate message digest | ||
| 2270 | * @desc: cipher request handle | ||
| 2271 | * @out: message digest output buffer -- The caller must ensure that the out | ||
| 2272 | * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize | ||
| 2273 | * function). | ||
| 2274 | * | ||
| 2275 | * Finalize the message digest operation and create the message digest | ||
| 2276 | * based on all data added to the cipher handle. The message digest is placed | ||
| 2277 | * into the output buffer. | ||
| 2278 | * | ||
| 2279 | * Return: 0 if the message digest creation was successful; < 0 if an error | ||
| 2280 | * occurred | ||
| 2281 | */ | ||
| 1201 | static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) | 2282 | static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) |
| 1202 | { | 2283 | { |
| 1203 | return crypto_hash_crt(desc->tfm)->final(desc, out); | 2284 | return crypto_hash_crt(desc->tfm)->final(desc, out); |
| 1204 | } | 2285 | } |
| 1205 | 2286 | ||
| 2287 | /** | ||
| 2288 | * crypto_hash_digest() - calculate message digest for a buffer | ||
| 2289 | * @desc: see crypto_hash_final() | ||
| 2290 | * @sg: see crypto_hash_update() | ||
| 2291 | * @nbytes: see crypto_hash_update() | ||
| 2292 | * @out: see crypto_hash_final() | ||
| 2293 | * | ||
| 2294 | * This function is a "short-hand" for the function calls of crypto_hash_init, | ||
| 2295 | * crypto_hash_update and crypto_hash_final. The parameters have the same | ||
| 2296 | * meaning as discussed for those separate three functions. | ||
| 2297 | * | ||
| 2298 | * Return: 0 if the message digest creation was successful; < 0 if an error | ||
| 2299 | * occurred | ||
| 2300 | */ | ||
| 1206 | static inline int crypto_hash_digest(struct hash_desc *desc, | 2301 | static inline int crypto_hash_digest(struct hash_desc *desc, |
| 1207 | struct scatterlist *sg, | 2302 | struct scatterlist *sg, |
| 1208 | unsigned int nbytes, u8 *out) | 2303 | unsigned int nbytes, u8 *out) |
| @@ -1210,6 +2305,17 @@ static inline int crypto_hash_digest(struct hash_desc *desc, | |||
| 1210 | return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); | 2305 | return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); |
| 1211 | } | 2306 | } |
| 1212 | 2307 | ||
| 2308 | /** | ||
| 2309 | * crypto_hash_setkey() - set key for message digest | ||
| 2310 | * @hash: cipher handle | ||
| 2311 | * @key: buffer holding the key | ||
| 2312 | * @keylen: length of the key in bytes | ||
| 2313 | * | ||
| 2314 | * The caller provided key is set for the message digest cipher. The cipher | ||
| 2315 | * handle must point to a keyed hash in order for this function to succeed. | ||
| 2316 | * | ||
| 2317 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | ||
| 2318 | */ | ||
| 1213 | static inline int crypto_hash_setkey(struct crypto_hash *hash, | 2319 | static inline int crypto_hash_setkey(struct crypto_hash *hash, |
| 1214 | const u8 *key, unsigned int keylen) | 2320 | const u8 *key, unsigned int keylen) |
| 1215 | { | 2321 | { |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index b2a2a08523bf..5a813988e6d4 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
| @@ -124,15 +124,15 @@ struct dentry { | |||
| 124 | void *d_fsdata; /* fs-specific data */ | 124 | void *d_fsdata; /* fs-specific data */ |
| 125 | 125 | ||
| 126 | struct list_head d_lru; /* LRU list */ | 126 | struct list_head d_lru; /* LRU list */ |
| 127 | struct list_head d_child; /* child of parent list */ | ||
| 128 | struct list_head d_subdirs; /* our children */ | ||
| 127 | /* | 129 | /* |
| 128 | * d_child and d_rcu can share memory | 130 | * d_alias and d_rcu can share memory |
| 129 | */ | 131 | */ |
| 130 | union { | 132 | union { |
| 131 | struct list_head d_child; /* child of parent list */ | 133 | struct hlist_node d_alias; /* inode alias list */ |
| 132 | struct rcu_head d_rcu; | 134 | struct rcu_head d_rcu; |
| 133 | } d_u; | 135 | } d_u; |
| 134 | struct list_head d_subdirs; /* our children */ | ||
| 135 | struct hlist_node d_alias; /* inode alias list */ | ||
| 136 | }; | 136 | }; |
| 137 | 137 | ||
| 138 | /* | 138 | /* |
| @@ -230,7 +230,6 @@ extern seqlock_t rename_lock; | |||
| 230 | */ | 230 | */ |
| 231 | extern void d_instantiate(struct dentry *, struct inode *); | 231 | extern void d_instantiate(struct dentry *, struct inode *); |
| 232 | extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); | 232 | extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); |
| 233 | extern struct dentry * d_materialise_unique(struct dentry *, struct inode *); | ||
| 234 | extern int d_instantiate_no_diralias(struct dentry *, struct inode *); | 233 | extern int d_instantiate_no_diralias(struct dentry *, struct inode *); |
| 235 | extern void __d_drop(struct dentry *dentry); | 234 | extern void __d_drop(struct dentry *dentry); |
| 236 | extern void d_drop(struct dentry *dentry); | 235 | extern void d_drop(struct dentry *dentry); |
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 4d0b4d1aa132..da4c4983adbe 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | 20 | ||
| 21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
| 22 | 22 | ||
| 23 | struct device; | ||
| 23 | struct file_operations; | 24 | struct file_operations; |
| 24 | 25 | ||
| 25 | struct debugfs_blob_wrapper { | 26 | struct debugfs_blob_wrapper { |
| @@ -92,20 +93,25 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode, | |||
| 92 | struct dentry *parent, | 93 | struct dentry *parent, |
| 93 | struct debugfs_regset32 *regset); | 94 | struct debugfs_regset32 *regset); |
| 94 | 95 | ||
| 95 | int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, | 96 | void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, |
| 96 | int nregs, void __iomem *base, char *prefix); | 97 | int nregs, void __iomem *base, char *prefix); |
| 97 | 98 | ||
| 98 | struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, | 99 | struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, |
| 99 | struct dentry *parent, | 100 | struct dentry *parent, |
| 100 | u32 *array, u32 elements); | 101 | u32 *array, u32 elements); |
| 101 | 102 | ||
| 103 | struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, | ||
| 104 | struct dentry *parent, | ||
| 105 | int (*read_fn)(struct seq_file *s, | ||
| 106 | void *data)); | ||
| 107 | |||
| 102 | bool debugfs_initialized(void); | 108 | bool debugfs_initialized(void); |
| 103 | 109 | ||
| 104 | #else | 110 | #else |
| 105 | 111 | ||
| 106 | #include <linux/err.h> | 112 | #include <linux/err.h> |
| 107 | 113 | ||
| 108 | /* | 114 | /* |
| 109 | * We do not return NULL from these functions if CONFIG_DEBUG_FS is not enabled | 115 | * We do not return NULL from these functions if CONFIG_DEBUG_FS is not enabled |
| 110 | * so users have a chance to detect if there was a real error or not. We don't | 116 | * so users have a chance to detect if there was a real error or not. We don't |
| 111 | * want to duplicate the design decision mistakes of procfs and devfs again. | 117 | * want to duplicate the design decision mistakes of procfs and devfs again. |
| @@ -233,10 +239,9 @@ static inline struct dentry *debugfs_create_regset32(const char *name, | |||
| 233 | return ERR_PTR(-ENODEV); | 239 | return ERR_PTR(-ENODEV); |
| 234 | } | 240 | } |
| 235 | 241 | ||
| 236 | static inline int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, | 242 | static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, |
| 237 | int nregs, void __iomem *base, char *prefix) | 243 | int nregs, void __iomem *base, char *prefix) |
| 238 | { | 244 | { |
| 239 | return 0; | ||
| 240 | } | 245 | } |
| 241 | 246 | ||
| 242 | static inline bool debugfs_initialized(void) | 247 | static inline bool debugfs_initialized(void) |
| @@ -251,6 +256,15 @@ static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t | |||
| 251 | return ERR_PTR(-ENODEV); | 256 | return ERR_PTR(-ENODEV); |
| 252 | } | 257 | } |
| 253 | 258 | ||
| 259 | static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev, | ||
| 260 | const char *name, | ||
| 261 | struct dentry *parent, | ||
| 262 | int (*read_fn)(struct seq_file *s, | ||
| 263 | void *data)) | ||
| 264 | { | ||
| 265 | return ERR_PTR(-ENODEV); | ||
| 266 | } | ||
| 267 | |||
| 254 | #endif | 268 | #endif |
| 255 | 269 | ||
| 256 | #endif | 270 | #endif |
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index f1863dcd83ea..ce447f0f1bad 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h | |||
| @@ -188,7 +188,7 @@ extern struct devfreq *devm_devfreq_add_device(struct device *dev, | |||
| 188 | extern void devm_devfreq_remove_device(struct device *dev, | 188 | extern void devm_devfreq_remove_device(struct device *dev, |
| 189 | struct devfreq *devfreq); | 189 | struct devfreq *devfreq); |
| 190 | 190 | ||
| 191 | /* Supposed to be called by PM_SLEEP/PM_RUNTIME callbacks */ | 191 | /* Supposed to be called by PM callbacks */ |
| 192 | extern int devfreq_suspend_device(struct devfreq *devfreq); | 192 | extern int devfreq_suspend_device(struct devfreq *devfreq); |
| 193 | extern int devfreq_resume_device(struct devfreq *devfreq); | 193 | extern int devfreq_resume_device(struct devfreq *devfreq); |
| 194 | 194 | ||
diff --git a/include/linux/device.h b/include/linux/device.h index ce1f21608b16..fb506738f7b7 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
| @@ -911,6 +911,11 @@ static inline void device_unlock(struct device *dev) | |||
| 911 | mutex_unlock(&dev->mutex); | 911 | mutex_unlock(&dev->mutex); |
| 912 | } | 912 | } |
| 913 | 913 | ||
| 914 | static inline void device_lock_assert(struct device *dev) | ||
| 915 | { | ||
| 916 | lockdep_assert_held(&dev->mutex); | ||
| 917 | } | ||
| 918 | |||
| 914 | void driver_init(void); | 919 | void driver_init(void); |
| 915 | 920 | ||
| 916 | /* | 921 | /* |
| @@ -1118,6 +1123,41 @@ do { \ | |||
| 1118 | }) | 1123 | }) |
| 1119 | #endif | 1124 | #endif |
| 1120 | 1125 | ||
| 1126 | #ifdef CONFIG_PRINTK | ||
| 1127 | #define dev_level_once(dev_level, dev, fmt, ...) \ | ||
| 1128 | do { \ | ||
| 1129 | static bool __print_once __read_mostly; \ | ||
| 1130 | \ | ||
| 1131 | if (!__print_once) { \ | ||
| 1132 | __print_once = true; \ | ||
| 1133 | dev_level(dev, fmt, ##__VA_ARGS__); \ | ||
| 1134 | } \ | ||
| 1135 | } while (0) | ||
| 1136 | #else | ||
| 1137 | #define dev_level_once(dev_level, dev, fmt, ...) \ | ||
| 1138 | do { \ | ||
| 1139 | if (0) \ | ||
| 1140 | dev_level(dev, fmt, ##__VA_ARGS__); \ | ||
| 1141 | } while (0) | ||
| 1142 | #endif | ||
| 1143 | |||
| 1144 | #define dev_emerg_once(dev, fmt, ...) \ | ||
| 1145 | dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__) | ||
| 1146 | #define dev_alert_once(dev, fmt, ...) \ | ||
| 1147 | dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__) | ||
| 1148 | #define dev_crit_once(dev, fmt, ...) \ | ||
| 1149 | dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__) | ||
| 1150 | #define dev_err_once(dev, fmt, ...) \ | ||
| 1151 | dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__) | ||
| 1152 | #define dev_warn_once(dev, fmt, ...) \ | ||
| 1153 | dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__) | ||
| 1154 | #define dev_notice_once(dev, fmt, ...) \ | ||
| 1155 | dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__) | ||
| 1156 | #define dev_info_once(dev, fmt, ...) \ | ||
| 1157 | dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) | ||
| 1158 | #define dev_dbg_once(dev, fmt, ...) \ | ||
| 1159 | dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) | ||
| 1160 | |||
| 1121 | #define dev_level_ratelimited(dev_level, dev, fmt, ...) \ | 1161 | #define dev_level_ratelimited(dev_level, dev, fmt, ...) \ |
| 1122 | do { \ | 1162 | do { \ |
| 1123 | static DEFINE_RATELIMIT_STATE(_rs, \ | 1163 | static DEFINE_RATELIMIT_STATE(_rs, \ |
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index d5d388160f42..c3007cb4bfa6 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
| @@ -129,11 +129,14 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) | |||
| 129 | 129 | ||
| 130 | extern u64 dma_get_required_mask(struct device *dev); | 130 | extern u64 dma_get_required_mask(struct device *dev); |
| 131 | 131 | ||
| 132 | #ifndef set_arch_dma_coherent_ops | 132 | #ifndef arch_setup_dma_ops |
| 133 | static inline int set_arch_dma_coherent_ops(struct device *dev) | 133 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, |
| 134 | { | 134 | u64 size, struct iommu_ops *iommu, |
| 135 | return 0; | 135 | bool coherent) { } |
| 136 | } | 136 | #endif |
| 137 | |||
| 138 | #ifndef arch_teardown_dma_ops | ||
| 139 | static inline void arch_teardown_dma_ops(struct device *dev) { } | ||
| 137 | #endif | 140 | #endif |
| 138 | 141 | ||
| 139 | static inline unsigned int dma_get_max_seg_size(struct device *dev) | 142 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 653a1fd07ae8..40cd75e21ea2 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -447,7 +447,8 @@ struct dmaengine_unmap_data { | |||
| 447 | * communicate status | 447 | * communicate status |
| 448 | * @phys: physical address of the descriptor | 448 | * @phys: physical address of the descriptor |
| 449 | * @chan: target channel for this operation | 449 | * @chan: target channel for this operation |
| 450 | * @tx_submit: set the prepared descriptor(s) to be executed by the engine | 450 | * @tx_submit: accept the descriptor, assign ordered cookie and mark the |
| 451 | * descriptor pending. To be pushed on .issue_pending() call | ||
| 451 | * @callback: routine to call after this operation is complete | 452 | * @callback: routine to call after this operation is complete |
| 452 | * @callback_param: general parameter to pass to the callback routine | 453 | * @callback_param: general parameter to pass to the callback routine |
| 453 | * ---async_tx api specific fields--- | 454 | * ---async_tx api specific fields--- |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 593fff99e6bf..30624954dec5 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
| @@ -30,6 +30,12 @@ | |||
| 30 | 30 | ||
| 31 | struct acpi_dmar_header; | 31 | struct acpi_dmar_header; |
| 32 | 32 | ||
| 33 | #ifdef CONFIG_X86 | ||
| 34 | # define DMAR_UNITS_SUPPORTED MAX_IO_APICS | ||
| 35 | #else | ||
| 36 | # define DMAR_UNITS_SUPPORTED 64 | ||
| 37 | #endif | ||
| 38 | |||
| 33 | /* DMAR Flags */ | 39 | /* DMAR Flags */ |
| 34 | #define DMAR_INTR_REMAP 0x1 | 40 | #define DMAR_INTR_REMAP 0x1 |
| 35 | #define DMAR_X2APIC_OPT_OUT 0x2 | 41 | #define DMAR_X2APIC_OPT_OUT 0x2 |
| @@ -120,28 +126,60 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, | |||
| 120 | /* Intel IOMMU detection */ | 126 | /* Intel IOMMU detection */ |
| 121 | extern int detect_intel_iommu(void); | 127 | extern int detect_intel_iommu(void); |
| 122 | extern int enable_drhd_fault_handling(void); | 128 | extern int enable_drhd_fault_handling(void); |
| 129 | extern int dmar_device_add(acpi_handle handle); | ||
| 130 | extern int dmar_device_remove(acpi_handle handle); | ||
| 131 | |||
| 132 | static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg) | ||
| 133 | { | ||
| 134 | return 0; | ||
| 135 | } | ||
| 123 | 136 | ||
| 124 | #ifdef CONFIG_INTEL_IOMMU | 137 | #ifdef CONFIG_INTEL_IOMMU |
| 125 | extern int iommu_detected, no_iommu; | 138 | extern int iommu_detected, no_iommu; |
| 126 | extern int intel_iommu_init(void); | 139 | extern int intel_iommu_init(void); |
| 127 | extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); | 140 | extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg); |
| 128 | extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); | 141 | extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg); |
| 142 | extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg); | ||
| 143 | extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg); | ||
| 144 | extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); | ||
| 129 | extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); | 145 | extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); |
| 130 | #else /* !CONFIG_INTEL_IOMMU: */ | 146 | #else /* !CONFIG_INTEL_IOMMU: */ |
| 131 | static inline int intel_iommu_init(void) { return -ENODEV; } | 147 | static inline int intel_iommu_init(void) { return -ENODEV; } |
| 132 | static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header) | 148 | |
| 149 | #define dmar_parse_one_rmrr dmar_res_noop | ||
| 150 | #define dmar_parse_one_atsr dmar_res_noop | ||
| 151 | #define dmar_check_one_atsr dmar_res_noop | ||
| 152 | #define dmar_release_one_atsr dmar_res_noop | ||
| 153 | |||
| 154 | static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) | ||
| 133 | { | 155 | { |
| 134 | return 0; | 156 | return 0; |
| 135 | } | 157 | } |
| 136 | static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) | 158 | |
| 159 | static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) | ||
| 137 | { | 160 | { |
| 138 | return 0; | 161 | return 0; |
| 139 | } | 162 | } |
| 140 | static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) | 163 | #endif /* CONFIG_INTEL_IOMMU */ |
| 164 | |||
| 165 | #ifdef CONFIG_IRQ_REMAP | ||
| 166 | extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert); | ||
| 167 | #else /* CONFIG_IRQ_REMAP */ | ||
| 168 | static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) | ||
| 169 | { return 0; } | ||
| 170 | #endif /* CONFIG_IRQ_REMAP */ | ||
| 171 | |||
| 172 | #else /* CONFIG_DMAR_TABLE */ | ||
| 173 | |||
| 174 | static inline int dmar_device_add(void *handle) | ||
| 175 | { | ||
| 176 | return 0; | ||
| 177 | } | ||
| 178 | |||
| 179 | static inline int dmar_device_remove(void *handle) | ||
| 141 | { | 180 | { |
| 142 | return 0; | 181 | return 0; |
| 143 | } | 182 | } |
| 144 | #endif /* CONFIG_INTEL_IOMMU */ | ||
| 145 | 183 | ||
| 146 | #endif /* CONFIG_DMAR_TABLE */ | 184 | #endif /* CONFIG_DMAR_TABLE */ |
| 147 | 185 | ||
diff --git a/include/linux/drbd.h b/include/linux/drbd.h index debb70d40547..8723f2a99e15 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h | |||
| @@ -172,7 +172,7 @@ enum drbd_ret_code { | |||
| 172 | ERR_RES_NOT_KNOWN = 158, | 172 | ERR_RES_NOT_KNOWN = 158, |
| 173 | ERR_RES_IN_USE = 159, | 173 | ERR_RES_IN_USE = 159, |
| 174 | ERR_MINOR_CONFIGURED = 160, | 174 | ERR_MINOR_CONFIGURED = 160, |
| 175 | ERR_MINOR_EXISTS = 161, | 175 | ERR_MINOR_OR_VOLUME_EXISTS = 161, |
| 176 | ERR_INVALID_REQUEST = 162, | 176 | ERR_INVALID_REQUEST = 162, |
| 177 | ERR_NEED_APV_100 = 163, | 177 | ERR_NEED_APV_100 = 163, |
| 178 | ERR_NEED_ALLOW_TWO_PRI = 164, | 178 | ERR_NEED_ALLOW_TWO_PRI = 164, |
diff --git a/include/linux/elf.h b/include/linux/elf.h index 67a5fa7830c4..20fa8d8ae313 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h | |||
| @@ -15,6 +15,11 @@ | |||
| 15 | set_personality(PER_LINUX | (current->personality & (~PER_MASK))) | 15 | set_personality(PER_LINUX | (current->personality & (~PER_MASK))) |
| 16 | #endif | 16 | #endif |
| 17 | 17 | ||
| 18 | #ifndef SET_PERSONALITY2 | ||
| 19 | #define SET_PERSONALITY2(ex, state) \ | ||
| 20 | SET_PERSONALITY(ex) | ||
| 21 | #endif | ||
| 22 | |||
| 18 | #if ELF_CLASS == ELFCLASS32 | 23 | #if ELF_CLASS == ELFCLASS32 |
| 19 | 24 | ||
| 20 | extern Elf32_Dyn _DYNAMIC []; | 25 | extern Elf32_Dyn _DYNAMIC []; |
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 733980fce8e3..41c891d05f04 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h | |||
| @@ -392,4 +392,16 @@ static inline unsigned long compare_ether_header(const void *a, const void *b) | |||
| 392 | #endif | 392 | #endif |
| 393 | } | 393 | } |
| 394 | 394 | ||
| 395 | /** | ||
| 396 | * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame | ||
| 397 | * @skb: Buffer to pad | ||
| 398 | * | ||
| 399 | * An Ethernet frame should have a minimum size of 60 bytes. This function | ||
| 400 | * takes short frames and pads them with zeros up to the 60 byte limit. | ||
| 401 | */ | ||
| 402 | static inline int eth_skb_pad(struct sk_buff *skb) | ||
| 403 | { | ||
| 404 | return skb_put_padto(skb, ETH_ZLEN); | ||
| 405 | } | ||
| 406 | |||
| 395 | #endif /* _LINUX_ETHERDEVICE_H */ | 407 | #endif /* _LINUX_ETHERDEVICE_H */ |
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index c1a2d60dfb82..653dc9c4ebac 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h | |||
| @@ -59,6 +59,26 @@ enum ethtool_phys_id_state { | |||
| 59 | ETHTOOL_ID_OFF | 59 | ETHTOOL_ID_OFF |
| 60 | }; | 60 | }; |
| 61 | 61 | ||
| 62 | enum { | ||
| 63 | ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */ | ||
| 64 | ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */ | ||
| 65 | |||
| 66 | /* | ||
| 67 | * Add your fresh new hash function bits above and remember to update | ||
| 68 | * rss_hash_func_strings[] in ethtool.c | ||
| 69 | */ | ||
| 70 | ETH_RSS_HASH_FUNCS_COUNT | ||
| 71 | }; | ||
| 72 | |||
| 73 | #define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) | ||
| 74 | #define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT) | ||
| 75 | |||
| 76 | #define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP) | ||
| 77 | #define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR) | ||
| 78 | |||
| 79 | #define ETH_RSS_HASH_UNKNOWN 0 | ||
| 80 | #define ETH_RSS_HASH_NO_CHANGE 0 | ||
| 81 | |||
| 62 | struct net_device; | 82 | struct net_device; |
| 63 | 83 | ||
| 64 | /* Some generic methods drivers may use in their ethtool_ops */ | 84 | /* Some generic methods drivers may use in their ethtool_ops */ |
| @@ -158,17 +178,14 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) | |||
| 158 | * Returns zero if not supported for this specific device. | 178 | * Returns zero if not supported for this specific device. |
| 159 | * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. | 179 | * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. |
| 160 | * Returns zero if not supported for this specific device. | 180 | * Returns zero if not supported for this specific device. |
| 161 | * @get_rxfh: Get the contents of the RX flow hash indirection table and hash | 181 | * @get_rxfh: Get the contents of the RX flow hash indirection table, hash key |
| 162 | * key. | 182 | * and/or hash function. |
| 163 | * Will only be called if one or both of @get_rxfh_indir_size and | ||
| 164 | * @get_rxfh_key_size are implemented and return non-zero. | ||
| 165 | * Returns a negative error code or zero. | ||
| 166 | * @set_rxfh: Set the contents of the RX flow hash indirection table and/or | ||
| 167 | * hash key. In case only the indirection table or hash key is to be | ||
| 168 | * changed, the other argument will be %NULL. | ||
| 169 | * Will only be called if one or both of @get_rxfh_indir_size and | ||
| 170 | * @get_rxfh_key_size are implemented and return non-zero. | ||
| 171 | * Returns a negative error code or zero. | 183 | * Returns a negative error code or zero. |
| 184 | * @set_rxfh: Set the contents of the RX flow hash indirection table, hash | ||
| 185 | * key, and/or hash function. Arguments which are set to %NULL or zero | ||
| 186 | * will remain unchanged. | ||
| 187 | * Returns a negative error code or zero. An error code must be returned | ||
| 188 | * if at least one unsupported change was requested. | ||
| 172 | * @get_channels: Get number of channels. | 189 | * @get_channels: Get number of channels. |
| 173 | * @set_channels: Set number of channels. Returns a negative error code or | 190 | * @set_channels: Set number of channels. Returns a negative error code or |
| 174 | * zero. | 191 | * zero. |
| @@ -241,9 +258,10 @@ struct ethtool_ops { | |||
| 241 | int (*reset)(struct net_device *, u32 *); | 258 | int (*reset)(struct net_device *, u32 *); |
| 242 | u32 (*get_rxfh_key_size)(struct net_device *); | 259 | u32 (*get_rxfh_key_size)(struct net_device *); |
| 243 | u32 (*get_rxfh_indir_size)(struct net_device *); | 260 | u32 (*get_rxfh_indir_size)(struct net_device *); |
| 244 | int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key); | 261 | int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key, |
| 262 | u8 *hfunc); | ||
| 245 | int (*set_rxfh)(struct net_device *, const u32 *indir, | 263 | int (*set_rxfh)(struct net_device *, const u32 *indir, |
| 246 | const u8 *key); | 264 | const u8 *key, const u8 hfunc); |
| 247 | void (*get_channels)(struct net_device *, struct ethtool_channels *); | 265 | void (*get_channels)(struct net_device *, struct ethtool_channels *); |
| 248 | int (*set_channels)(struct net_device *, struct ethtool_channels *); | 266 | int (*set_channels)(struct net_device *, struct ethtool_channels *); |
| 249 | int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); | 267 | int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); |
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 860313a33a43..87f14e90e984 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h | |||
| @@ -33,7 +33,8 @@ | |||
| 33 | #define F2FS_META_INO(sbi) (sbi->meta_ino_num) | 33 | #define F2FS_META_INO(sbi) (sbi->meta_ino_num) |
| 34 | 34 | ||
| 35 | /* This flag is used by node and meta inodes, and by recovery */ | 35 | /* This flag is used by node and meta inodes, and by recovery */ |
| 36 | #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) | 36 | #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) |
| 37 | #define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM) | ||
| 37 | 38 | ||
| 38 | /* | 39 | /* |
| 39 | * For further optimization on multi-head logs, on-disk layout supports maximum | 40 | * For further optimization on multi-head logs, on-disk layout supports maximum |
| @@ -170,14 +171,12 @@ struct f2fs_extent { | |||
| 170 | 171 | ||
| 171 | #define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */ | 172 | #define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */ |
| 172 | #define F2FS_INLINE_DATA 0x02 /* file inline data flag */ | 173 | #define F2FS_INLINE_DATA 0x02 /* file inline data flag */ |
| 174 | #define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */ | ||
| 175 | #define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ | ||
| 173 | 176 | ||
| 174 | #define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \ | 177 | #define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \ |
| 175 | F2FS_INLINE_XATTR_ADDRS - 1)) | 178 | F2FS_INLINE_XATTR_ADDRS - 1)) |
| 176 | 179 | ||
| 177 | #define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) -\ | ||
| 178 | sizeof(__le32) * (DEF_ADDRS_PER_INODE + \ | ||
| 179 | DEF_NIDS_PER_INODE - 1)) | ||
| 180 | |||
| 181 | struct f2fs_inode { | 180 | struct f2fs_inode { |
| 182 | __le16 i_mode; /* file mode */ | 181 | __le16 i_mode; /* file mode */ |
| 183 | __u8 i_advise; /* file hints */ | 182 | __u8 i_advise; /* file hints */ |
| @@ -435,6 +434,24 @@ struct f2fs_dentry_block { | |||
| 435 | __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN]; | 434 | __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN]; |
| 436 | } __packed; | 435 | } __packed; |
| 437 | 436 | ||
| 437 | /* for inline dir */ | ||
| 438 | #define NR_INLINE_DENTRY (MAX_INLINE_DATA * BITS_PER_BYTE / \ | ||
| 439 | ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ | ||
| 440 | BITS_PER_BYTE + 1)) | ||
| 441 | #define INLINE_DENTRY_BITMAP_SIZE ((NR_INLINE_DENTRY + \ | ||
| 442 | BITS_PER_BYTE - 1) / BITS_PER_BYTE) | ||
| 443 | #define INLINE_RESERVED_SIZE (MAX_INLINE_DATA - \ | ||
| 444 | ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ | ||
| 445 | NR_INLINE_DENTRY + INLINE_DENTRY_BITMAP_SIZE)) | ||
| 446 | |||
| 447 | /* inline directory entry structure */ | ||
| 448 | struct f2fs_inline_dentry { | ||
| 449 | __u8 dentry_bitmap[INLINE_DENTRY_BITMAP_SIZE]; | ||
| 450 | __u8 reserved[INLINE_RESERVED_SIZE]; | ||
| 451 | struct f2fs_dir_entry dentry[NR_INLINE_DENTRY]; | ||
| 452 | __u8 filename[NR_INLINE_DENTRY][F2FS_SLOT_LEN]; | ||
| 453 | } __packed; | ||
| 454 | |||
| 438 | /* file types used in inode_info->flags */ | 455 | /* file types used in inode_info->flags */ |
| 439 | enum { | 456 | enum { |
| 440 | F2FS_FT_UNKNOWN, | 457 | F2FS_FT_UNKNOWN, |
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index c6f996f2abb6..798fad9e420d 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
| 7 | #include <linux/debugfs.h> | 7 | #include <linux/debugfs.h> |
| 8 | #include <linux/ratelimit.h> | ||
| 8 | #include <linux/atomic.h> | 9 | #include <linux/atomic.h> |
| 9 | 10 | ||
| 10 | /* | 11 | /* |
| @@ -25,14 +26,18 @@ struct fault_attr { | |||
| 25 | unsigned long reject_end; | 26 | unsigned long reject_end; |
| 26 | 27 | ||
| 27 | unsigned long count; | 28 | unsigned long count; |
| 29 | struct ratelimit_state ratelimit_state; | ||
| 30 | struct dentry *dname; | ||
| 28 | }; | 31 | }; |
| 29 | 32 | ||
| 30 | #define FAULT_ATTR_INITIALIZER { \ | 33 | #define FAULT_ATTR_INITIALIZER { \ |
| 31 | .interval = 1, \ | 34 | .interval = 1, \ |
| 32 | .times = ATOMIC_INIT(1), \ | 35 | .times = ATOMIC_INIT(1), \ |
| 33 | .require_end = ULONG_MAX, \ | 36 | .require_end = ULONG_MAX, \ |
| 34 | .stacktrace_depth = 32, \ | 37 | .stacktrace_depth = 32, \ |
| 35 | .verbose = 2, \ | 38 | .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \ |
| 39 | .verbose = 2, \ | ||
| 40 | .dname = NULL, \ | ||
| 36 | } | 41 | } |
| 37 | 42 | ||
| 38 | #define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER | 43 | #define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER |
diff --git a/include/linux/fence.h b/include/linux/fence.h index d174585b874b..39efee130d2b 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h | |||
| @@ -128,8 +128,8 @@ struct fence_cb { | |||
| 128 | * from irq context, so normal spinlocks can be used. | 128 | * from irq context, so normal spinlocks can be used. |
| 129 | * | 129 | * |
| 130 | * A return value of false indicates the fence already passed, | 130 | * A return value of false indicates the fence already passed, |
| 131 | * or some failure occured that made it impossible to enable | 131 | * or some failure occurred that made it impossible to enable |
| 132 | * signaling. True indicates succesful enabling. | 132 | * signaling. True indicates successful enabling. |
| 133 | * | 133 | * |
| 134 | * fence->status may be set in enable_signaling, but only when false is | 134 | * fence->status may be set in enable_signaling, but only when false is |
| 135 | * returned. | 135 | * returned. |
diff --git a/include/linux/file.h b/include/linux/file.h index 4d69123377a2..f87d30882a24 100644 --- a/include/linux/file.h +++ b/include/linux/file.h | |||
| @@ -66,7 +66,6 @@ extern void set_close_on_exec(unsigned int fd, int flag); | |||
| 66 | extern bool get_close_on_exec(unsigned int fd); | 66 | extern bool get_close_on_exec(unsigned int fd); |
| 67 | extern void put_filp(struct file *); | 67 | extern void put_filp(struct file *); |
| 68 | extern int get_unused_fd_flags(unsigned flags); | 68 | extern int get_unused_fd_flags(unsigned flags); |
| 69 | #define get_unused_fd() get_unused_fd_flags(0) | ||
| 70 | extern void put_unused_fd(unsigned int fd); | 69 | extern void put_unused_fd(unsigned int fd); |
| 71 | 70 | ||
| 72 | extern void fd_install(unsigned int fd, struct file *file); | 71 | extern void fd_install(unsigned int fd, struct file *file); |
diff --git a/include/linux/filter.h b/include/linux/filter.h index ca95abd2bed1..caac2087a4d5 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
| @@ -381,6 +381,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); | |||
| 381 | void bpf_prog_destroy(struct bpf_prog *fp); | 381 | void bpf_prog_destroy(struct bpf_prog *fp); |
| 382 | 382 | ||
| 383 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); | 383 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
| 384 | int sk_attach_bpf(u32 ufd, struct sock *sk); | ||
| 384 | int sk_detach_filter(struct sock *sk); | 385 | int sk_detach_filter(struct sock *sk); |
| 385 | 386 | ||
| 386 | int bpf_check_classic(const struct sock_filter *filter, unsigned int flen); | 387 | int bpf_check_classic(const struct sock_filter *filter, unsigned int flen); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 9ab779e8a63c..f90c0282c114 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/pid.h> | 18 | #include <linux/pid.h> |
| 19 | #include <linux/bug.h> | 19 | #include <linux/bug.h> |
| 20 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
| 21 | #include <linux/rwsem.h> | ||
| 21 | #include <linux/capability.h> | 22 | #include <linux/capability.h> |
| 22 | #include <linux/semaphore.h> | 23 | #include <linux/semaphore.h> |
| 23 | #include <linux/fiemap.h> | 24 | #include <linux/fiemap.h> |
| @@ -401,7 +402,7 @@ struct address_space { | |||
| 401 | atomic_t i_mmap_writable;/* count VM_SHARED mappings */ | 402 | atomic_t i_mmap_writable;/* count VM_SHARED mappings */ |
| 402 | struct rb_root i_mmap; /* tree of private and shared mappings */ | 403 | struct rb_root i_mmap; /* tree of private and shared mappings */ |
| 403 | struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ | 404 | struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ |
| 404 | struct mutex i_mmap_mutex; /* protect tree, count, list */ | 405 | struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ |
| 405 | /* Protected by tree_lock together with the radix tree */ | 406 | /* Protected by tree_lock together with the radix tree */ |
| 406 | unsigned long nrpages; /* number of total pages */ | 407 | unsigned long nrpages; /* number of total pages */ |
| 407 | unsigned long nrshadows; /* number of shadow entries */ | 408 | unsigned long nrshadows; /* number of shadow entries */ |
| @@ -467,6 +468,26 @@ struct block_device { | |||
| 467 | 468 | ||
| 468 | int mapping_tagged(struct address_space *mapping, int tag); | 469 | int mapping_tagged(struct address_space *mapping, int tag); |
| 469 | 470 | ||
| 471 | static inline void i_mmap_lock_write(struct address_space *mapping) | ||
| 472 | { | ||
| 473 | down_write(&mapping->i_mmap_rwsem); | ||
| 474 | } | ||
| 475 | |||
| 476 | static inline void i_mmap_unlock_write(struct address_space *mapping) | ||
| 477 | { | ||
| 478 | up_write(&mapping->i_mmap_rwsem); | ||
| 479 | } | ||
| 480 | |||
| 481 | static inline void i_mmap_lock_read(struct address_space *mapping) | ||
| 482 | { | ||
| 483 | down_read(&mapping->i_mmap_rwsem); | ||
| 484 | } | ||
| 485 | |||
| 486 | static inline void i_mmap_unlock_read(struct address_space *mapping) | ||
| 487 | { | ||
| 488 | up_read(&mapping->i_mmap_rwsem); | ||
| 489 | } | ||
| 490 | |||
| 470 | /* | 491 | /* |
| 471 | * Might pages of this file be mapped into userspace? | 492 | * Might pages of this file be mapped into userspace? |
| 472 | */ | 493 | */ |
| @@ -606,9 +627,6 @@ struct inode { | |||
| 606 | const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ | 627 | const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ |
| 607 | struct file_lock *i_flock; | 628 | struct file_lock *i_flock; |
| 608 | struct address_space i_data; | 629 | struct address_space i_data; |
| 609 | #ifdef CONFIG_QUOTA | ||
| 610 | struct dquot *i_dquot[MAXQUOTAS]; | ||
| 611 | #endif | ||
| 612 | struct list_head i_devices; | 630 | struct list_head i_devices; |
| 613 | union { | 631 | union { |
| 614 | struct pipe_inode_info *i_pipe; | 632 | struct pipe_inode_info *i_pipe; |
| @@ -789,7 +807,6 @@ struct file { | |||
| 789 | struct rcu_head fu_rcuhead; | 807 | struct rcu_head fu_rcuhead; |
| 790 | } f_u; | 808 | } f_u; |
| 791 | struct path f_path; | 809 | struct path f_path; |
| 792 | #define f_dentry f_path.dentry | ||
| 793 | struct inode *f_inode; /* cached value */ | 810 | struct inode *f_inode; /* cached value */ |
| 794 | const struct file_operations *f_op; | 811 | const struct file_operations *f_op; |
| 795 | 812 | ||
| @@ -1224,6 +1241,7 @@ struct super_block { | |||
| 1224 | struct backing_dev_info *s_bdi; | 1241 | struct backing_dev_info *s_bdi; |
| 1225 | struct mtd_info *s_mtd; | 1242 | struct mtd_info *s_mtd; |
| 1226 | struct hlist_node s_instances; | 1243 | struct hlist_node s_instances; |
| 1244 | unsigned int s_quota_types; /* Bitmask of supported quota types */ | ||
| 1227 | struct quota_info s_dquot; /* Diskquota specific options */ | 1245 | struct quota_info s_dquot; /* Diskquota specific options */ |
| 1228 | 1246 | ||
| 1229 | struct sb_writers s_writers; | 1247 | struct sb_writers s_writers; |
| @@ -1467,7 +1485,10 @@ int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); | |||
| 1467 | * This allows the kernel to read directories into kernel space or | 1485 | * This allows the kernel to read directories into kernel space or |
| 1468 | * to have different dirent layouts depending on the binary type. | 1486 | * to have different dirent layouts depending on the binary type. |
| 1469 | */ | 1487 | */ |
| 1470 | typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); | 1488 | struct dir_context; |
| 1489 | typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, | ||
| 1490 | unsigned); | ||
| 1491 | |||
| 1471 | struct dir_context { | 1492 | struct dir_context { |
| 1472 | const filldir_t actor; | 1493 | const filldir_t actor; |
| 1473 | loff_t pos; | 1494 | loff_t pos; |
| @@ -1497,6 +1518,7 @@ struct file_operations { | |||
| 1497 | long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); | 1518 | long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); |
| 1498 | long (*compat_ioctl) (struct file *, unsigned int, unsigned long); | 1519 | long (*compat_ioctl) (struct file *, unsigned int, unsigned long); |
| 1499 | int (*mmap) (struct file *, struct vm_area_struct *); | 1520 | int (*mmap) (struct file *, struct vm_area_struct *); |
| 1521 | void (*mremap)(struct file *, struct vm_area_struct *); | ||
| 1500 | int (*open) (struct inode *, struct file *); | 1522 | int (*open) (struct inode *, struct file *); |
| 1501 | int (*flush) (struct file *, fl_owner_t id); | 1523 | int (*flush) (struct file *, fl_owner_t id); |
| 1502 | int (*release) (struct inode *, struct file *); | 1524 | int (*release) (struct inode *, struct file *); |
| @@ -1513,7 +1535,7 @@ struct file_operations { | |||
| 1513 | int (*setlease)(struct file *, long, struct file_lock **, void **); | 1535 | int (*setlease)(struct file *, long, struct file_lock **, void **); |
| 1514 | long (*fallocate)(struct file *file, int mode, loff_t offset, | 1536 | long (*fallocate)(struct file *file, int mode, loff_t offset, |
| 1515 | loff_t len); | 1537 | loff_t len); |
| 1516 | int (*show_fdinfo)(struct seq_file *m, struct file *f); | 1538 | void (*show_fdinfo)(struct seq_file *m, struct file *f); |
| 1517 | }; | 1539 | }; |
| 1518 | 1540 | ||
| 1519 | struct inode_operations { | 1541 | struct inode_operations { |
| @@ -1560,6 +1582,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, | |||
| 1560 | struct iovec *fast_pointer, | 1582 | struct iovec *fast_pointer, |
| 1561 | struct iovec **ret_pointer); | 1583 | struct iovec **ret_pointer); |
| 1562 | 1584 | ||
| 1585 | extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *); | ||
| 1563 | extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); | 1586 | extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); |
| 1564 | extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); | 1587 | extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); |
| 1565 | extern ssize_t vfs_readv(struct file *, const struct iovec __user *, | 1588 | extern ssize_t vfs_readv(struct file *, const struct iovec __user *, |
| @@ -1577,7 +1600,9 @@ struct super_operations { | |||
| 1577 | void (*evict_inode) (struct inode *); | 1600 | void (*evict_inode) (struct inode *); |
| 1578 | void (*put_super) (struct super_block *); | 1601 | void (*put_super) (struct super_block *); |
| 1579 | int (*sync_fs)(struct super_block *sb, int wait); | 1602 | int (*sync_fs)(struct super_block *sb, int wait); |
| 1603 | int (*freeze_super) (struct super_block *); | ||
| 1580 | int (*freeze_fs) (struct super_block *); | 1604 | int (*freeze_fs) (struct super_block *); |
| 1605 | int (*thaw_super) (struct super_block *); | ||
| 1581 | int (*unfreeze_fs) (struct super_block *); | 1606 | int (*unfreeze_fs) (struct super_block *); |
| 1582 | int (*statfs) (struct dentry *, struct kstatfs *); | 1607 | int (*statfs) (struct dentry *, struct kstatfs *); |
| 1583 | int (*remount_fs) (struct super_block *, int *, char *); | 1608 | int (*remount_fs) (struct super_block *, int *, char *); |
| @@ -1590,6 +1615,7 @@ struct super_operations { | |||
| 1590 | #ifdef CONFIG_QUOTA | 1615 | #ifdef CONFIG_QUOTA |
| 1591 | ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); | 1616 | ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); |
| 1592 | ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); | 1617 | ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); |
| 1618 | struct dquot **(*get_dquots)(struct inode *); | ||
| 1593 | #endif | 1619 | #endif |
| 1594 | int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); | 1620 | int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); |
| 1595 | long (*nr_cached_objects)(struct super_block *, int); | 1621 | long (*nr_cached_objects)(struct super_block *, int); |
| @@ -2060,7 +2086,7 @@ struct filename { | |||
| 2060 | extern long vfs_truncate(struct path *, loff_t); | 2086 | extern long vfs_truncate(struct path *, loff_t); |
| 2061 | extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, | 2087 | extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, |
| 2062 | struct file *filp); | 2088 | struct file *filp); |
| 2063 | extern int do_fallocate(struct file *file, int mode, loff_t offset, | 2089 | extern int vfs_fallocate(struct file *file, int mode, loff_t offset, |
| 2064 | loff_t len); | 2090 | loff_t len); |
| 2065 | extern long do_sys_open(int dfd, const char __user *filename, int flags, | 2091 | extern long do_sys_open(int dfd, const char __user *filename, int flags, |
| 2066 | umode_t mode); | 2092 | umode_t mode); |
| @@ -2072,6 +2098,7 @@ extern int vfs_open(const struct path *, struct file *, const struct cred *); | |||
| 2072 | extern struct file * dentry_open(const struct path *, int, const struct cred *); | 2098 | extern struct file * dentry_open(const struct path *, int, const struct cred *); |
| 2073 | extern int filp_close(struct file *, fl_owner_t id); | 2099 | extern int filp_close(struct file *, fl_owner_t id); |
| 2074 | 2100 | ||
| 2101 | extern struct filename *getname_flags(const char __user *, int, int *); | ||
| 2075 | extern struct filename *getname(const char __user *); | 2102 | extern struct filename *getname(const char __user *); |
| 2076 | extern struct filename *getname_kernel(const char *); | 2103 | extern struct filename *getname_kernel(const char *); |
| 2077 | 2104 | ||
| @@ -2149,7 +2176,6 @@ static inline int sb_is_blkdev_sb(struct super_block *sb) | |||
| 2149 | extern int sync_filesystem(struct super_block *); | 2176 | extern int sync_filesystem(struct super_block *); |
| 2150 | extern const struct file_operations def_blk_fops; | 2177 | extern const struct file_operations def_blk_fops; |
| 2151 | extern const struct file_operations def_chr_fops; | 2178 | extern const struct file_operations def_chr_fops; |
| 2152 | extern const struct file_operations bad_sock_fops; | ||
| 2153 | #ifdef CONFIG_BLOCK | 2179 | #ifdef CONFIG_BLOCK |
| 2154 | extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); | 2180 | extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); |
| 2155 | extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); | 2181 | extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); |
| @@ -2786,6 +2812,11 @@ static inline void inode_has_no_xattr(struct inode *inode) | |||
| 2786 | inode->i_flags |= S_NOSEC; | 2812 | inode->i_flags |= S_NOSEC; |
| 2787 | } | 2813 | } |
| 2788 | 2814 | ||
| 2815 | static inline bool is_root_inode(struct inode *inode) | ||
| 2816 | { | ||
| 2817 | return inode == inode->i_sb->s_root->d_inode; | ||
| 2818 | } | ||
| 2819 | |||
| 2789 | static inline bool dir_emit(struct dir_context *ctx, | 2820 | static inline bool dir_emit(struct dir_context *ctx, |
| 2790 | const char *name, int namelen, | 2821 | const char *name, int namelen, |
| 2791 | u64 ino, unsigned type) | 2822 | u64 ino, unsigned type) |
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h index 84d60cb841b1..bf0321eabbda 100644 --- a/include/linux/fsl_ifc.h +++ b/include/linux/fsl_ifc.h | |||
| @@ -29,7 +29,16 @@ | |||
| 29 | #include <linux/of_platform.h> | 29 | #include <linux/of_platform.h> |
| 30 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
| 31 | 31 | ||
| 32 | #define FSL_IFC_BANK_COUNT 4 | 32 | /* |
| 33 | * The actual number of banks implemented depends on the IFC version | ||
| 34 | * - IFC version 1.0 implements 4 banks. | ||
| 35 | * - IFC version 1.1 onward implements 8 banks. | ||
| 36 | */ | ||
| 37 | #define FSL_IFC_BANK_COUNT 8 | ||
| 38 | |||
| 39 | #define FSL_IFC_VERSION_MASK 0x0F0F0000 | ||
| 40 | #define FSL_IFC_VERSION_1_0_0 0x01000000 | ||
| 41 | #define FSL_IFC_VERSION_1_1_0 0x01010000 | ||
| 33 | 42 | ||
| 34 | /* | 43 | /* |
| 35 | * CSPR - Chip Select Property Register | 44 | * CSPR - Chip Select Property Register |
| @@ -776,23 +785,23 @@ struct fsl_ifc_regs { | |||
| 776 | __be32 cspr; | 785 | __be32 cspr; |
| 777 | u32 res2; | 786 | u32 res2; |
| 778 | } cspr_cs[FSL_IFC_BANK_COUNT]; | 787 | } cspr_cs[FSL_IFC_BANK_COUNT]; |
| 779 | u32 res3[0x19]; | 788 | u32 res3[0xd]; |
| 780 | struct { | 789 | struct { |
| 781 | __be32 amask; | 790 | __be32 amask; |
| 782 | u32 res4[0x2]; | 791 | u32 res4[0x2]; |
| 783 | } amask_cs[FSL_IFC_BANK_COUNT]; | 792 | } amask_cs[FSL_IFC_BANK_COUNT]; |
| 784 | u32 res5[0x18]; | 793 | u32 res5[0xc]; |
| 785 | struct { | 794 | struct { |
| 786 | __be32 csor; | 795 | __be32 csor; |
| 787 | __be32 csor_ext; | 796 | __be32 csor_ext; |
| 788 | u32 res6; | 797 | u32 res6; |
| 789 | } csor_cs[FSL_IFC_BANK_COUNT]; | 798 | } csor_cs[FSL_IFC_BANK_COUNT]; |
| 790 | u32 res7[0x18]; | 799 | u32 res7[0xc]; |
| 791 | struct { | 800 | struct { |
| 792 | __be32 ftim[4]; | 801 | __be32 ftim[4]; |
| 793 | u32 res8[0x8]; | 802 | u32 res8[0x8]; |
| 794 | } ftim_cs[FSL_IFC_BANK_COUNT]; | 803 | } ftim_cs[FSL_IFC_BANK_COUNT]; |
| 795 | u32 res9[0x60]; | 804 | u32 res9[0x30]; |
| 796 | __be32 rb_stat; | 805 | __be32 rb_stat; |
| 797 | u32 res10[0x2]; | 806 | u32 res10[0x2]; |
| 798 | __be32 ifc_gcr; | 807 | __be32 ifc_gcr; |
| @@ -827,6 +836,8 @@ struct fsl_ifc_ctrl { | |||
| 827 | int nand_irq; | 836 | int nand_irq; |
| 828 | spinlock_t lock; | 837 | spinlock_t lock; |
| 829 | void *nand; | 838 | void *nand; |
| 839 | int version; | ||
| 840 | int banks; | ||
| 830 | 841 | ||
| 831 | u32 nand_stat; | 842 | u32 nand_stat; |
| 832 | wait_queue_head_t nand_wait; | 843 | wait_queue_head_t nand_wait; |
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index ca060d7c4fa6..0f313f93c586 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h | |||
| @@ -197,24 +197,6 @@ struct fsnotify_group { | |||
| 197 | #define FSNOTIFY_EVENT_INODE 2 | 197 | #define FSNOTIFY_EVENT_INODE 2 |
| 198 | 198 | ||
| 199 | /* | 199 | /* |
| 200 | * Inode specific fields in an fsnotify_mark | ||
| 201 | */ | ||
| 202 | struct fsnotify_inode_mark { | ||
| 203 | struct inode *inode; /* inode this mark is associated with */ | ||
| 204 | struct hlist_node i_list; /* list of marks by inode->i_fsnotify_marks */ | ||
| 205 | struct list_head free_i_list; /* tmp list used when freeing this mark */ | ||
| 206 | }; | ||
| 207 | |||
| 208 | /* | ||
| 209 | * Mount point specific fields in an fsnotify_mark | ||
| 210 | */ | ||
| 211 | struct fsnotify_vfsmount_mark { | ||
| 212 | struct vfsmount *mnt; /* vfsmount this mark is associated with */ | ||
| 213 | struct hlist_node m_list; /* list of marks by inode->i_fsnotify_marks */ | ||
| 214 | struct list_head free_m_list; /* tmp list used when freeing this mark */ | ||
| 215 | }; | ||
| 216 | |||
| 217 | /* | ||
| 218 | * a mark is simply an object attached to an in core inode which allows an | 200 | * a mark is simply an object attached to an in core inode which allows an |
| 219 | * fsnotify listener to indicate they are either no longer interested in events | 201 | * fsnotify listener to indicate they are either no longer interested in events |
| 220 | * of a type matching mask or only interested in those events. | 202 | * of a type matching mask or only interested in those events. |
| @@ -230,11 +212,17 @@ struct fsnotify_mark { | |||
| 230 | * in kernel that found and may be using this mark. */ | 212 | * in kernel that found and may be using this mark. */ |
| 231 | atomic_t refcnt; /* active things looking at this mark */ | 213 | atomic_t refcnt; /* active things looking at this mark */ |
| 232 | struct fsnotify_group *group; /* group this mark is for */ | 214 | struct fsnotify_group *group; /* group this mark is for */ |
| 233 | struct list_head g_list; /* list of marks by group->i_fsnotify_marks */ | 215 | struct list_head g_list; /* list of marks by group->i_fsnotify_marks |
| 216 | * Also reused for queueing mark into | ||
| 217 | * destroy_list when it's waiting for | ||
| 218 | * the end of SRCU period before it can | ||
| 219 | * be freed */ | ||
| 234 | spinlock_t lock; /* protect group and inode */ | 220 | spinlock_t lock; /* protect group and inode */ |
| 221 | struct hlist_node obj_list; /* list of marks for inode / vfsmount */ | ||
| 222 | struct list_head free_list; /* tmp list used when freeing this mark */ | ||
| 235 | union { | 223 | union { |
| 236 | struct fsnotify_inode_mark i; | 224 | struct inode *inode; /* inode this mark is associated with */ |
| 237 | struct fsnotify_vfsmount_mark m; | 225 | struct vfsmount *mnt; /* vfsmount this mark is associated with */ |
| 238 | }; | 226 | }; |
| 239 | __u32 ignored_mask; /* events types to ignore */ | 227 | __u32 ignored_mask; /* events types to ignore */ |
| 240 | #define FSNOTIFY_MARK_FLAG_INODE 0x01 | 228 | #define FSNOTIFY_MARK_FLAG_INODE 0x01 |
| @@ -243,7 +231,6 @@ struct fsnotify_mark { | |||
| 243 | #define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08 | 231 | #define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08 |
| 244 | #define FSNOTIFY_MARK_FLAG_ALIVE 0x10 | 232 | #define FSNOTIFY_MARK_FLAG_ALIVE 0x10 |
| 245 | unsigned int flags; /* vfsmount or inode mark? */ | 233 | unsigned int flags; /* vfsmount or inode mark? */ |
| 246 | struct list_head destroy_list; | ||
| 247 | void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */ | 234 | void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */ |
| 248 | }; | 235 | }; |
| 249 | 236 | ||
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 662697babd48..1da602982cf9 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
| @@ -39,6 +39,12 @@ | |||
| 39 | # define FTRACE_FORCE_LIST_FUNC 0 | 39 | # define FTRACE_FORCE_LIST_FUNC 0 |
| 40 | #endif | 40 | #endif |
| 41 | 41 | ||
| 42 | /* Main tracing buffer and events set up */ | ||
| 43 | #ifdef CONFIG_TRACING | ||
| 44 | void trace_init(void); | ||
| 45 | #else | ||
| 46 | static inline void trace_init(void) { } | ||
| 47 | #endif | ||
| 42 | 48 | ||
| 43 | struct module; | 49 | struct module; |
| 44 | struct ftrace_hash; | 50 | struct ftrace_hash; |
| @@ -61,6 +67,11 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); | |||
| 61 | /* | 67 | /* |
| 62 | * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are | 68 | * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are |
| 63 | * set in the flags member. | 69 | * set in the flags member. |
| 70 | * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and | ||
| 71 | * IPMODIFY are a kind of attribute flags which can be set only before | ||
| 72 | * registering the ftrace_ops, and can not be modified while registered. | ||
| 73 | * Changing those attribute flags after regsitering ftrace_ops will | ||
| 74 | * cause unexpected results. | ||
| 64 | * | 75 | * |
| 65 | * ENABLED - set/unset when ftrace_ops is registered/unregistered | 76 | * ENABLED - set/unset when ftrace_ops is registered/unregistered |
| 66 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically | 77 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically |
| @@ -94,6 +105,17 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); | |||
| 94 | * ADDING - The ops is in the process of being added. | 105 | * ADDING - The ops is in the process of being added. |
| 95 | * REMOVING - The ops is in the process of being removed. | 106 | * REMOVING - The ops is in the process of being removed. |
| 96 | * MODIFYING - The ops is in the process of changing its filter functions. | 107 | * MODIFYING - The ops is in the process of changing its filter functions. |
| 108 | * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. | ||
| 109 | * The arch specific code sets this flag when it allocated a | ||
| 110 | * trampoline. This lets the arch know that it can update the | ||
| 111 | * trampoline in case the callback function changes. | ||
| 112 | * The ftrace_ops trampoline can be set by the ftrace users, and | ||
| 113 | * in such cases the arch must not modify it. Only the arch ftrace | ||
| 114 | * core code should set this flag. | ||
| 115 | * IPMODIFY - The ops can modify the IP register. This can only be set with | ||
| 116 | * SAVE_REGS. If another ops with this flag set is already registered | ||
| 117 | * for any of the functions that this ops will be registered for, then | ||
| 118 | * this ops will fail to register or set_filter_ip. | ||
| 97 | */ | 119 | */ |
| 98 | enum { | 120 | enum { |
| 99 | FTRACE_OPS_FL_ENABLED = 1 << 0, | 121 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
| @@ -108,6 +130,8 @@ enum { | |||
| 108 | FTRACE_OPS_FL_ADDING = 1 << 9, | 130 | FTRACE_OPS_FL_ADDING = 1 << 9, |
| 109 | FTRACE_OPS_FL_REMOVING = 1 << 10, | 131 | FTRACE_OPS_FL_REMOVING = 1 << 10, |
| 110 | FTRACE_OPS_FL_MODIFYING = 1 << 11, | 132 | FTRACE_OPS_FL_MODIFYING = 1 << 11, |
| 133 | FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, | ||
| 134 | FTRACE_OPS_FL_IPMODIFY = 1 << 13, | ||
| 111 | }; | 135 | }; |
| 112 | 136 | ||
| 113 | #ifdef CONFIG_DYNAMIC_FTRACE | 137 | #ifdef CONFIG_DYNAMIC_FTRACE |
| @@ -142,6 +166,7 @@ struct ftrace_ops { | |||
| 142 | struct ftrace_ops_hash *func_hash; | 166 | struct ftrace_ops_hash *func_hash; |
| 143 | struct ftrace_ops_hash old_hash; | 167 | struct ftrace_ops_hash old_hash; |
| 144 | unsigned long trampoline; | 168 | unsigned long trampoline; |
| 169 | unsigned long trampoline_size; | ||
| 145 | #endif | 170 | #endif |
| 146 | }; | 171 | }; |
| 147 | 172 | ||
| @@ -255,7 +280,9 @@ struct ftrace_func_command { | |||
| 255 | int ftrace_arch_code_modify_prepare(void); | 280 | int ftrace_arch_code_modify_prepare(void); |
| 256 | int ftrace_arch_code_modify_post_process(void); | 281 | int ftrace_arch_code_modify_post_process(void); |
| 257 | 282 | ||
| 258 | void ftrace_bug(int err, unsigned long ip); | 283 | struct dyn_ftrace; |
| 284 | |||
| 285 | void ftrace_bug(int err, struct dyn_ftrace *rec); | ||
| 259 | 286 | ||
| 260 | struct seq_file; | 287 | struct seq_file; |
| 261 | 288 | ||
| @@ -287,6 +314,8 @@ extern int ftrace_text_reserved(const void *start, const void *end); | |||
| 287 | 314 | ||
| 288 | extern int ftrace_nr_registered_ops(void); | 315 | extern int ftrace_nr_registered_ops(void); |
| 289 | 316 | ||
| 317 | bool is_ftrace_trampoline(unsigned long addr); | ||
| 318 | |||
| 290 | /* | 319 | /* |
| 291 | * The dyn_ftrace record's flags field is split into two parts. | 320 | * The dyn_ftrace record's flags field is split into two parts. |
| 292 | * the first part which is '0-FTRACE_REF_MAX' is a counter of | 321 | * the first part which is '0-FTRACE_REF_MAX' is a counter of |
| @@ -297,6 +326,7 @@ extern int ftrace_nr_registered_ops(void); | |||
| 297 | * ENABLED - the function is being traced | 326 | * ENABLED - the function is being traced |
| 298 | * REGS - the record wants the function to save regs | 327 | * REGS - the record wants the function to save regs |
| 299 | * REGS_EN - the function is set up to save regs. | 328 | * REGS_EN - the function is set up to save regs. |
| 329 | * IPMODIFY - the record allows for the IP address to be changed. | ||
| 300 | * | 330 | * |
| 301 | * When a new ftrace_ops is registered and wants a function to save | 331 | * When a new ftrace_ops is registered and wants a function to save |
| 302 | * pt_regs, the rec->flag REGS is set. When the function has been | 332 | * pt_regs, the rec->flag REGS is set. When the function has been |
| @@ -310,10 +340,11 @@ enum { | |||
| 310 | FTRACE_FL_REGS_EN = (1UL << 29), | 340 | FTRACE_FL_REGS_EN = (1UL << 29), |
| 311 | FTRACE_FL_TRAMP = (1UL << 28), | 341 | FTRACE_FL_TRAMP = (1UL << 28), |
| 312 | FTRACE_FL_TRAMP_EN = (1UL << 27), | 342 | FTRACE_FL_TRAMP_EN = (1UL << 27), |
| 343 | FTRACE_FL_IPMODIFY = (1UL << 26), | ||
| 313 | }; | 344 | }; |
| 314 | 345 | ||
| 315 | #define FTRACE_REF_MAX_SHIFT 27 | 346 | #define FTRACE_REF_MAX_SHIFT 26 |
| 316 | #define FTRACE_FL_BITS 5 | 347 | #define FTRACE_FL_BITS 6 |
| 317 | #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) | 348 | #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) |
| 318 | #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) | 349 | #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) |
| 319 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) | 350 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) |
| @@ -586,6 +617,11 @@ static inline ssize_t ftrace_notrace_write(struct file *file, const char __user | |||
| 586 | size_t cnt, loff_t *ppos) { return -ENODEV; } | 617 | size_t cnt, loff_t *ppos) { return -ENODEV; } |
| 587 | static inline int | 618 | static inline int |
| 588 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } | 619 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } |
| 620 | |||
| 621 | static inline bool is_ftrace_trampoline(unsigned long addr) | ||
| 622 | { | ||
| 623 | return false; | ||
| 624 | } | ||
| 589 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 625 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 590 | 626 | ||
| 591 | /* totally disable ftrace - can not re-enable after this */ | 627 | /* totally disable ftrace - can not re-enable after this */ |
| @@ -843,6 +879,7 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk) | |||
| 843 | enum ftrace_dump_mode; | 879 | enum ftrace_dump_mode; |
| 844 | 880 | ||
| 845 | extern enum ftrace_dump_mode ftrace_dump_on_oops; | 881 | extern enum ftrace_dump_mode ftrace_dump_on_oops; |
| 882 | extern int tracepoint_printk; | ||
| 846 | 883 | ||
| 847 | extern void disable_trace_on_warning(void); | 884 | extern void disable_trace_on_warning(void); |
| 848 | extern int __disable_trace_on_warning; | 885 | extern int __disable_trace_on_warning; |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 28672e87e910..0bebb5c348b8 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
| @@ -138,6 +138,17 @@ enum print_line_t { | |||
| 138 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ | 138 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ |
| 139 | }; | 139 | }; |
| 140 | 140 | ||
| 141 | /* | ||
| 142 | * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq | ||
| 143 | * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function | ||
| 144 | * simplifies those functions and keeps them in sync. | ||
| 145 | */ | ||
| 146 | static inline enum print_line_t trace_handle_return(struct trace_seq *s) | ||
| 147 | { | ||
| 148 | return trace_seq_has_overflowed(s) ? | ||
| 149 | TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; | ||
| 150 | } | ||
| 151 | |||
| 141 | void tracing_generic_entry_update(struct trace_entry *entry, | 152 | void tracing_generic_entry_update(struct trace_entry *entry, |
| 142 | unsigned long flags, | 153 | unsigned long flags, |
| 143 | int pc); | 154 | int pc); |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 41b30fd4d041..b840e3b2770d 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -110,11 +110,8 @@ struct vm_area_struct; | |||
| 110 | #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ | 110 | #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ |
| 111 | __GFP_RECLAIMABLE) | 111 | __GFP_RECLAIMABLE) |
| 112 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) | 112 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) |
| 113 | #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ | 113 | #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) |
| 114 | __GFP_HIGHMEM) | 114 | #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) |
| 115 | #define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ | ||
| 116 | __GFP_HARDWALL | __GFP_HIGHMEM | \ | ||
| 117 | __GFP_MOVABLE) | ||
| 118 | #define GFP_IOFS (__GFP_IO | __GFP_FS) | 115 | #define GFP_IOFS (__GFP_IO | __GFP_FS) |
| 119 | #define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ | 116 | #define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ |
| 120 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ | 117 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ |
| @@ -381,8 +378,8 @@ extern void free_kmem_pages(unsigned long addr, unsigned int order); | |||
| 381 | 378 | ||
| 382 | void page_alloc_init(void); | 379 | void page_alloc_init(void); |
| 383 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); | 380 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); |
| 384 | void drain_all_pages(void); | 381 | void drain_all_pages(struct zone *zone); |
| 385 | void drain_local_pages(void *dummy); | 382 | void drain_local_pages(struct zone *zone); |
| 386 | 383 | ||
| 387 | /* | 384 | /* |
| 388 | * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what | 385 | * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what |
diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 85aa5d0b9357..ab81339a8590 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h | |||
| @@ -216,14 +216,15 @@ static inline int gpio_to_irq(unsigned gpio) | |||
| 216 | return -EINVAL; | 216 | return -EINVAL; |
| 217 | } | 217 | } |
| 218 | 218 | ||
| 219 | static inline int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset) | 219 | static inline int gpiochip_lock_as_irq(struct gpio_chip *chip, |
| 220 | unsigned int offset) | ||
| 220 | { | 221 | { |
| 221 | WARN_ON(1); | 222 | WARN_ON(1); |
| 222 | return -EINVAL; | 223 | return -EINVAL; |
| 223 | } | 224 | } |
| 224 | 225 | ||
| 225 | static inline void gpio_unlock_as_irq(struct gpio_chip *chip, | 226 | static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip, |
| 226 | unsigned int offset) | 227 | unsigned int offset) |
| 227 | { | 228 | { |
| 228 | WARN_ON(1); | 229 | WARN_ON(1); |
| 229 | } | 230 | } |
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 12f146fa6604..fd85cb120ee0 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h | |||
| @@ -66,7 +66,7 @@ __devm_gpiod_get_index_optional(struct device *dev, const char *con_id, | |||
| 66 | unsigned int index, enum gpiod_flags flags); | 66 | unsigned int index, enum gpiod_flags flags); |
| 67 | void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); | 67 | void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); |
| 68 | 68 | ||
| 69 | int gpiod_get_direction(const struct gpio_desc *desc); | 69 | int gpiod_get_direction(struct gpio_desc *desc); |
| 70 | int gpiod_direction_input(struct gpio_desc *desc); | 70 | int gpiod_direction_input(struct gpio_desc *desc); |
| 71 | int gpiod_direction_output(struct gpio_desc *desc, int value); | 71 | int gpiod_direction_output(struct gpio_desc *desc, int value); |
| 72 | int gpiod_direction_output_raw(struct gpio_desc *desc, int value); | 72 | int gpiod_direction_output_raw(struct gpio_desc *desc, int value); |
| @@ -74,14 +74,24 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value); | |||
| 74 | /* Value get/set from non-sleeping context */ | 74 | /* Value get/set from non-sleeping context */ |
| 75 | int gpiod_get_value(const struct gpio_desc *desc); | 75 | int gpiod_get_value(const struct gpio_desc *desc); |
| 76 | void gpiod_set_value(struct gpio_desc *desc, int value); | 76 | void gpiod_set_value(struct gpio_desc *desc, int value); |
| 77 | void gpiod_set_array(unsigned int array_size, | ||
| 78 | struct gpio_desc **desc_array, int *value_array); | ||
| 77 | int gpiod_get_raw_value(const struct gpio_desc *desc); | 79 | int gpiod_get_raw_value(const struct gpio_desc *desc); |
| 78 | void gpiod_set_raw_value(struct gpio_desc *desc, int value); | 80 | void gpiod_set_raw_value(struct gpio_desc *desc, int value); |
| 81 | void gpiod_set_raw_array(unsigned int array_size, | ||
| 82 | struct gpio_desc **desc_array, int *value_array); | ||
| 79 | 83 | ||
| 80 | /* Value get/set from sleeping context */ | 84 | /* Value get/set from sleeping context */ |
| 81 | int gpiod_get_value_cansleep(const struct gpio_desc *desc); | 85 | int gpiod_get_value_cansleep(const struct gpio_desc *desc); |
| 82 | void gpiod_set_value_cansleep(struct gpio_desc *desc, int value); | 86 | void gpiod_set_value_cansleep(struct gpio_desc *desc, int value); |
| 87 | void gpiod_set_array_cansleep(unsigned int array_size, | ||
| 88 | struct gpio_desc **desc_array, | ||
| 89 | int *value_array); | ||
| 83 | int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc); | 90 | int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc); |
| 84 | void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value); | 91 | void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value); |
| 92 | void gpiod_set_raw_array_cansleep(unsigned int array_size, | ||
| 93 | struct gpio_desc **desc_array, | ||
| 94 | int *value_array); | ||
| 85 | 95 | ||
| 86 | int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); | 96 | int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); |
| 87 | 97 | ||
| @@ -94,6 +104,13 @@ int gpiod_to_irq(const struct gpio_desc *desc); | |||
| 94 | struct gpio_desc *gpio_to_desc(unsigned gpio); | 104 | struct gpio_desc *gpio_to_desc(unsigned gpio); |
| 95 | int desc_to_gpio(const struct gpio_desc *desc); | 105 | int desc_to_gpio(const struct gpio_desc *desc); |
| 96 | 106 | ||
| 107 | /* Child properties interface */ | ||
| 108 | struct fwnode_handle; | ||
| 109 | |||
| 110 | struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, | ||
| 111 | const char *propname); | ||
| 112 | struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, | ||
| 113 | struct fwnode_handle *child); | ||
| 97 | #else /* CONFIG_GPIOLIB */ | 114 | #else /* CONFIG_GPIOLIB */ |
| 98 | 115 | ||
| 99 | static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev, | 116 | static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev, |
| @@ -210,6 +227,13 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value) | |||
| 210 | /* GPIO can never have been requested */ | 227 | /* GPIO can never have been requested */ |
| 211 | WARN_ON(1); | 228 | WARN_ON(1); |
| 212 | } | 229 | } |
| 230 | static inline void gpiod_set_array(unsigned int array_size, | ||
| 231 | struct gpio_desc **desc_array, | ||
| 232 | int *value_array) | ||
| 233 | { | ||
| 234 | /* GPIO can never have been requested */ | ||
| 235 | WARN_ON(1); | ||
| 236 | } | ||
| 213 | static inline int gpiod_get_raw_value(const struct gpio_desc *desc) | 237 | static inline int gpiod_get_raw_value(const struct gpio_desc *desc) |
| 214 | { | 238 | { |
| 215 | /* GPIO can never have been requested */ | 239 | /* GPIO can never have been requested */ |
| @@ -221,6 +245,13 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value) | |||
| 221 | /* GPIO can never have been requested */ | 245 | /* GPIO can never have been requested */ |
| 222 | WARN_ON(1); | 246 | WARN_ON(1); |
| 223 | } | 247 | } |
| 248 | static inline void gpiod_set_raw_array(unsigned int array_size, | ||
| 249 | struct gpio_desc **desc_array, | ||
| 250 | int *value_array) | ||
| 251 | { | ||
| 252 | /* GPIO can never have been requested */ | ||
| 253 | WARN_ON(1); | ||
| 254 | } | ||
| 224 | 255 | ||
| 225 | static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) | 256 | static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) |
| 226 | { | 257 | { |
| @@ -233,6 +264,13 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) | |||
| 233 | /* GPIO can never have been requested */ | 264 | /* GPIO can never have been requested */ |
| 234 | WARN_ON(1); | 265 | WARN_ON(1); |
| 235 | } | 266 | } |
| 267 | static inline void gpiod_set_array_cansleep(unsigned int array_size, | ||
| 268 | struct gpio_desc **desc_array, | ||
| 269 | int *value_array) | ||
| 270 | { | ||
| 271 | /* GPIO can never have been requested */ | ||
| 272 | WARN_ON(1); | ||
| 273 | } | ||
| 236 | static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) | 274 | static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) |
| 237 | { | 275 | { |
| 238 | /* GPIO can never have been requested */ | 276 | /* GPIO can never have been requested */ |
| @@ -245,6 +283,13 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, | |||
| 245 | /* GPIO can never have been requested */ | 283 | /* GPIO can never have been requested */ |
| 246 | WARN_ON(1); | 284 | WARN_ON(1); |
| 247 | } | 285 | } |
| 286 | static inline void gpiod_set_raw_array_cansleep(unsigned int array_size, | ||
| 287 | struct gpio_desc **desc_array, | ||
| 288 | int *value_array) | ||
| 289 | { | ||
| 290 | /* GPIO can never have been requested */ | ||
| 291 | WARN_ON(1); | ||
| 292 | } | ||
| 248 | 293 | ||
| 249 | static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) | 294 | static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) |
| 250 | { | 295 | { |
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 249db3057e4d..c497c62889d1 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h | |||
| @@ -32,6 +32,7 @@ struct seq_file; | |||
| 32 | * @get: returns value for signal "offset"; for output signals this | 32 | * @get: returns value for signal "offset"; for output signals this |
| 33 | * returns either the value actually sensed, or zero | 33 | * returns either the value actually sensed, or zero |
| 34 | * @set: assigns output value for signal "offset" | 34 | * @set: assigns output value for signal "offset" |
| 35 | * @set_multiple: assigns output values for multiple signals defined by "mask" | ||
| 35 | * @set_debounce: optional hook for setting debounce time for specified gpio in | 36 | * @set_debounce: optional hook for setting debounce time for specified gpio in |
| 36 | * interrupt triggered gpio chips | 37 | * interrupt triggered gpio chips |
| 37 | * @to_irq: optional hook supporting non-static gpio_to_irq() mappings; | 38 | * @to_irq: optional hook supporting non-static gpio_to_irq() mappings; |
| @@ -89,6 +90,9 @@ struct gpio_chip { | |||
| 89 | unsigned offset); | 90 | unsigned offset); |
| 90 | void (*set)(struct gpio_chip *chip, | 91 | void (*set)(struct gpio_chip *chip, |
| 91 | unsigned offset, int value); | 92 | unsigned offset, int value); |
| 93 | void (*set_multiple)(struct gpio_chip *chip, | ||
| 94 | unsigned long *mask, | ||
| 95 | unsigned long *bits); | ||
| 92 | int (*set_debounce)(struct gpio_chip *chip, | 96 | int (*set_debounce)(struct gpio_chip *chip, |
| 93 | unsigned offset, | 97 | unsigned offset, |
| 94 | unsigned debounce); | 98 | unsigned debounce); |
| @@ -149,8 +153,8 @@ extern struct gpio_chip *gpiochip_find(void *data, | |||
| 149 | int (*match)(struct gpio_chip *chip, void *data)); | 153 | int (*match)(struct gpio_chip *chip, void *data)); |
| 150 | 154 | ||
| 151 | /* lock/unlock as IRQ */ | 155 | /* lock/unlock as IRQ */ |
| 152 | int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset); | 156 | int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); |
| 153 | void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); | 157 | void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); |
| 154 | 158 | ||
| 155 | struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); | 159 | struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); |
| 156 | 160 | ||
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index 8b622468952c..ee2d8c6f9130 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define _GPIO_KEYS_H | 2 | #define _GPIO_KEYS_H |
| 3 | 3 | ||
| 4 | struct device; | 4 | struct device; |
| 5 | struct gpio_desc; | ||
| 5 | 6 | ||
| 6 | /** | 7 | /** |
| 7 | * struct gpio_keys_button - configuration parameters | 8 | * struct gpio_keys_button - configuration parameters |
| @@ -17,6 +18,7 @@ struct device; | |||
| 17 | * disable button via sysfs | 18 | * disable button via sysfs |
| 18 | * @value: axis value for %EV_ABS | 19 | * @value: axis value for %EV_ABS |
| 19 | * @irq: Irq number in case of interrupt keys | 20 | * @irq: Irq number in case of interrupt keys |
| 21 | * @gpiod: GPIO descriptor | ||
| 20 | */ | 22 | */ |
| 21 | struct gpio_keys_button { | 23 | struct gpio_keys_button { |
| 22 | unsigned int code; | 24 | unsigned int code; |
| @@ -29,6 +31,7 @@ struct gpio_keys_button { | |||
| 29 | bool can_disable; | 31 | bool can_disable; |
| 30 | int value; | 32 | int value; |
| 31 | unsigned int irq; | 33 | unsigned int irq; |
| 34 | struct gpio_desc *gpiod; | ||
| 32 | }; | 35 | }; |
| 33 | 36 | ||
| 34 | /** | 37 | /** |
diff --git a/include/linux/hash.h b/include/linux/hash.h index d0494c399392..1afde47e1528 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | #include <asm/types.h> | 17 | #include <asm/types.h> |
| 18 | #include <asm/hash.h> | ||
| 19 | #include <linux/compiler.h> | 18 | #include <linux/compiler.h> |
| 20 | 19 | ||
| 21 | /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ | 20 | /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ |
| @@ -84,38 +83,4 @@ static inline u32 hash32_ptr(const void *ptr) | |||
| 84 | return (u32)val; | 83 | return (u32)val; |
| 85 | } | 84 | } |
| 86 | 85 | ||
| 87 | struct fast_hash_ops { | ||
| 88 | u32 (*hash)(const void *data, u32 len, u32 seed); | ||
| 89 | u32 (*hash2)(const u32 *data, u32 len, u32 seed); | ||
| 90 | }; | ||
| 91 | |||
| 92 | /** | ||
| 93 | * arch_fast_hash - Caclulates a hash over a given buffer that can have | ||
| 94 | * arbitrary size. This function will eventually use an | ||
| 95 | * architecture-optimized hashing implementation if | ||
| 96 | * available, and trades off distribution for speed. | ||
| 97 | * | ||
| 98 | * @data: buffer to hash | ||
| 99 | * @len: length of buffer in bytes | ||
| 100 | * @seed: start seed | ||
| 101 | * | ||
| 102 | * Returns 32bit hash. | ||
| 103 | */ | ||
| 104 | extern u32 arch_fast_hash(const void *data, u32 len, u32 seed); | ||
| 105 | |||
| 106 | /** | ||
| 107 | * arch_fast_hash2 - Caclulates a hash over a given buffer that has a | ||
| 108 | * size that is of a multiple of 32bit words. This | ||
| 109 | * function will eventually use an architecture- | ||
| 110 | * optimized hashing implementation if available, | ||
| 111 | * and trades off distribution for speed. | ||
| 112 | * | ||
| 113 | * @data: buffer to hash (must be 32bit padded) | ||
| 114 | * @len: number of 32bit words | ||
| 115 | * @seed: start seed | ||
| 116 | * | ||
| 117 | * Returns 32bit hash. | ||
| 118 | */ | ||
| 119 | extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed); | ||
| 120 | |||
| 121 | #endif /* _LINUX_HASH_H */ | 86 | #endif /* _LINUX_HASH_H */ |
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 11c0182a153b..cbb5790a35cd 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h | |||
| @@ -1,9 +1,24 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2012 Avionic Design GmbH | 2 | * Copyright (C) 2012 Avionic Design GmbH |
| 3 | * | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * it under the terms of the GNU General Public License version 2 as | 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * published by the Free Software Foundation. | 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice (including the | ||
| 12 | * next paragraph) shall be included in all copies or substantial portions | ||
| 13 | * of the Software. | ||
| 14 | * | ||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 21 | * DEALINGS IN THE SOFTWARE. | ||
| 7 | */ | 22 | */ |
| 8 | 23 | ||
| 9 | #ifndef __LINUX_HDMI_H_ | 24 | #ifndef __LINUX_HDMI_H_ |
diff --git a/include/linux/hid.h b/include/linux/hid.h index 78ea9bf941cd..06c4607744f6 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
| @@ -234,6 +234,33 @@ struct hid_item { | |||
| 234 | #define HID_DG_BARRELSWITCH 0x000d0044 | 234 | #define HID_DG_BARRELSWITCH 0x000d0044 |
| 235 | #define HID_DG_ERASER 0x000d0045 | 235 | #define HID_DG_ERASER 0x000d0045 |
| 236 | #define HID_DG_TABLETPICK 0x000d0046 | 236 | #define HID_DG_TABLETPICK 0x000d0046 |
| 237 | |||
| 238 | #define HID_CP_CONSUMERCONTROL 0x000c0001 | ||
| 239 | #define HID_CP_NUMERICKEYPAD 0x000c0002 | ||
| 240 | #define HID_CP_PROGRAMMABLEBUTTONS 0x000c0003 | ||
| 241 | #define HID_CP_MICROPHONE 0x000c0004 | ||
| 242 | #define HID_CP_HEADPHONE 0x000c0005 | ||
| 243 | #define HID_CP_GRAPHICEQUALIZER 0x000c0006 | ||
| 244 | #define HID_CP_FUNCTIONBUTTONS 0x000c0036 | ||
| 245 | #define HID_CP_SELECTION 0x000c0080 | ||
| 246 | #define HID_CP_MEDIASELECTION 0x000c0087 | ||
| 247 | #define HID_CP_SELECTDISC 0x000c00ba | ||
| 248 | #define HID_CP_PLAYBACKSPEED 0x000c00f1 | ||
| 249 | #define HID_CP_PROXIMITY 0x000c0109 | ||
| 250 | #define HID_CP_SPEAKERSYSTEM 0x000c0160 | ||
| 251 | #define HID_CP_CHANNELLEFT 0x000c0161 | ||
| 252 | #define HID_CP_CHANNELRIGHT 0x000c0162 | ||
| 253 | #define HID_CP_CHANNELCENTER 0x000c0163 | ||
| 254 | #define HID_CP_CHANNELFRONT 0x000c0164 | ||
| 255 | #define HID_CP_CHANNELCENTERFRONT 0x000c0165 | ||
| 256 | #define HID_CP_CHANNELSIDE 0x000c0166 | ||
| 257 | #define HID_CP_CHANNELSURROUND 0x000c0167 | ||
| 258 | #define HID_CP_CHANNELLOWFREQUENCYENHANCEMENT 0x000c0168 | ||
| 259 | #define HID_CP_CHANNELTOP 0x000c0169 | ||
| 260 | #define HID_CP_CHANNELUNKNOWN 0x000c016a | ||
| 261 | #define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180 | ||
| 262 | #define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200 | ||
| 263 | |||
| 237 | #define HID_DG_CONFIDENCE 0x000d0047 | 264 | #define HID_DG_CONFIDENCE 0x000d0047 |
| 238 | #define HID_DG_WIDTH 0x000d0048 | 265 | #define HID_DG_WIDTH 0x000d0048 |
| 239 | #define HID_DG_HEIGHT 0x000d0049 | 266 | #define HID_DG_HEIGHT 0x000d0049 |
| @@ -312,11 +339,8 @@ struct hid_item { | |||
| 312 | * Vendor specific HID device groups | 339 | * Vendor specific HID device groups |
| 313 | */ | 340 | */ |
| 314 | #define HID_GROUP_RMI 0x0100 | 341 | #define HID_GROUP_RMI 0x0100 |
| 315 | |||
| 316 | /* | ||
| 317 | * Vendor specific HID device groups | ||
| 318 | */ | ||
| 319 | #define HID_GROUP_WACOM 0x0101 | 342 | #define HID_GROUP_WACOM 0x0101 |
| 343 | #define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102 | ||
| 320 | 344 | ||
| 321 | /* | 345 | /* |
| 322 | * This is the global environment of the parser. This information is | 346 | * This is the global environment of the parser. This information is |
| @@ -1063,6 +1087,17 @@ static inline void hid_hw_wait(struct hid_device *hdev) | |||
| 1063 | hdev->ll_driver->wait(hdev); | 1087 | hdev->ll_driver->wait(hdev); |
| 1064 | } | 1088 | } |
| 1065 | 1089 | ||
| 1090 | /** | ||
| 1091 | * hid_report_len - calculate the report length | ||
| 1092 | * | ||
| 1093 | * @report: the report we want to know the length | ||
| 1094 | */ | ||
| 1095 | static inline int hid_report_len(struct hid_report *report) | ||
| 1096 | { | ||
| 1097 | /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ | ||
| 1098 | return ((report->size - 1) >> 3) + 1 + (report->id > 0); | ||
| 1099 | } | ||
| 1100 | |||
| 1066 | int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, | 1101 | int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, |
| 1067 | int interrupt); | 1102 | int interrupt); |
| 1068 | 1103 | ||
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 6e6d338641fe..431b7fc605c9 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
| @@ -175,6 +175,52 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb, | |||
| 175 | } | 175 | } |
| 176 | 176 | ||
| 177 | #endif /* !CONFIG_HUGETLB_PAGE */ | 177 | #endif /* !CONFIG_HUGETLB_PAGE */ |
| 178 | /* | ||
| 179 | * hugepages at page global directory. If arch support | ||
| 180 | * hugepages at pgd level, they need to define this. | ||
| 181 | */ | ||
| 182 | #ifndef pgd_huge | ||
| 183 | #define pgd_huge(x) 0 | ||
| 184 | #endif | ||
| 185 | |||
| 186 | #ifndef pgd_write | ||
| 187 | static inline int pgd_write(pgd_t pgd) | ||
| 188 | { | ||
| 189 | BUG(); | ||
| 190 | return 0; | ||
| 191 | } | ||
| 192 | #endif | ||
| 193 | |||
| 194 | #ifndef pud_write | ||
| 195 | static inline int pud_write(pud_t pud) | ||
| 196 | { | ||
| 197 | BUG(); | ||
| 198 | return 0; | ||
| 199 | } | ||
| 200 | #endif | ||
| 201 | |||
| 202 | #ifndef is_hugepd | ||
| 203 | /* | ||
| 204 | * Some architectures requires a hugepage directory format that is | ||
| 205 | * required to support multiple hugepage sizes. For example | ||
| 206 | * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables" | ||
| 207 | * introduced the same on powerpc. This allows for a more flexible hugepage | ||
| 208 | * pagetable layout. | ||
| 209 | */ | ||
| 210 | typedef struct { unsigned long pd; } hugepd_t; | ||
| 211 | #define is_hugepd(hugepd) (0) | ||
| 212 | #define __hugepd(x) ((hugepd_t) { (x) }) | ||
| 213 | static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, | ||
| 214 | unsigned pdshift, unsigned long end, | ||
| 215 | int write, struct page **pages, int *nr) | ||
| 216 | { | ||
| 217 | return 0; | ||
| 218 | } | ||
| 219 | #else | ||
| 220 | extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr, | ||
| 221 | unsigned pdshift, unsigned long end, | ||
| 222 | int write, struct page **pages, int *nr); | ||
| 223 | #endif | ||
| 178 | 224 | ||
| 179 | #define HUGETLB_ANON_FILE "anon_hugepage" | 225 | #define HUGETLB_ANON_FILE "anon_hugepage" |
| 180 | 226 | ||
| @@ -311,7 +357,8 @@ static inline struct hstate *hstate_sizelog(int page_size_log) | |||
| 311 | { | 357 | { |
| 312 | if (!page_size_log) | 358 | if (!page_size_log) |
| 313 | return &default_hstate; | 359 | return &default_hstate; |
| 314 | return size_to_hstate(1 << page_size_log); | 360 | |
| 361 | return size_to_hstate(1UL << page_size_log); | ||
| 315 | } | 362 | } |
| 316 | 363 | ||
| 317 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) | 364 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) |
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 0129f89cf98d..bcc853eccc85 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h | |||
| @@ -16,7 +16,6 @@ | |||
| 16 | #define _LINUX_HUGETLB_CGROUP_H | 16 | #define _LINUX_HUGETLB_CGROUP_H |
| 17 | 17 | ||
| 18 | #include <linux/mmdebug.h> | 18 | #include <linux/mmdebug.h> |
| 19 | #include <linux/res_counter.h> | ||
| 20 | 19 | ||
| 21 | struct hugetlb_cgroup; | 20 | struct hugetlb_cgroup; |
| 22 | /* | 21 | /* |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 08cfaff8a072..476c685ca6f9 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
| @@ -650,6 +650,8 @@ struct vmbus_channel { | |||
| 650 | u8 monitor_grp; | 650 | u8 monitor_grp; |
| 651 | u8 monitor_bit; | 651 | u8 monitor_bit; |
| 652 | 652 | ||
| 653 | bool rescind; /* got rescind msg */ | ||
| 654 | |||
| 653 | u32 ringbuffer_gpadlhandle; | 655 | u32 ringbuffer_gpadlhandle; |
| 654 | 656 | ||
| 655 | /* Allocated memory for ring buffer */ | 657 | /* Allocated memory for ring buffer */ |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index b556e0ab946f..e3a1721c8354 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
| @@ -46,6 +46,8 @@ struct i2c_client; | |||
| 46 | struct i2c_driver; | 46 | struct i2c_driver; |
| 47 | union i2c_smbus_data; | 47 | union i2c_smbus_data; |
| 48 | struct i2c_board_info; | 48 | struct i2c_board_info; |
| 49 | enum i2c_slave_event; | ||
| 50 | typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *); | ||
| 49 | 51 | ||
| 50 | struct module; | 52 | struct module; |
| 51 | 53 | ||
| @@ -209,6 +211,8 @@ struct i2c_driver { | |||
| 209 | * @irq: indicates the IRQ generated by this device (if any) | 211 | * @irq: indicates the IRQ generated by this device (if any) |
| 210 | * @detected: member of an i2c_driver.clients list or i2c-core's | 212 | * @detected: member of an i2c_driver.clients list or i2c-core's |
| 211 | * userspace_devices list | 213 | * userspace_devices list |
| 214 | * @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter | ||
| 215 | * calls it to pass on slave events to the slave driver. | ||
| 212 | * | 216 | * |
| 213 | * An i2c_client identifies a single device (i.e. chip) connected to an | 217 | * An i2c_client identifies a single device (i.e. chip) connected to an |
| 214 | * i2c bus. The behaviour exposed to Linux is defined by the driver | 218 | * i2c bus. The behaviour exposed to Linux is defined by the driver |
| @@ -224,6 +228,7 @@ struct i2c_client { | |||
| 224 | struct device dev; /* the device structure */ | 228 | struct device dev; /* the device structure */ |
| 225 | int irq; /* irq issued by device */ | 229 | int irq; /* irq issued by device */ |
| 226 | struct list_head detected; | 230 | struct list_head detected; |
| 231 | i2c_slave_cb_t slave_cb; /* callback for slave mode */ | ||
| 227 | }; | 232 | }; |
| 228 | #define to_i2c_client(d) container_of(d, struct i2c_client, dev) | 233 | #define to_i2c_client(d) container_of(d, struct i2c_client, dev) |
| 229 | 234 | ||
| @@ -246,6 +251,25 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) | |||
| 246 | dev_set_drvdata(&dev->dev, data); | 251 | dev_set_drvdata(&dev->dev, data); |
| 247 | } | 252 | } |
| 248 | 253 | ||
| 254 | /* I2C slave support */ | ||
| 255 | |||
| 256 | enum i2c_slave_event { | ||
| 257 | I2C_SLAVE_REQ_READ_START, | ||
| 258 | I2C_SLAVE_REQ_READ_END, | ||
| 259 | I2C_SLAVE_REQ_WRITE_START, | ||
| 260 | I2C_SLAVE_REQ_WRITE_END, | ||
| 261 | I2C_SLAVE_STOP, | ||
| 262 | }; | ||
| 263 | |||
| 264 | extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb); | ||
| 265 | extern int i2c_slave_unregister(struct i2c_client *client); | ||
| 266 | |||
| 267 | static inline int i2c_slave_event(struct i2c_client *client, | ||
| 268 | enum i2c_slave_event event, u8 *val) | ||
| 269 | { | ||
| 270 | return client->slave_cb(client, event, val); | ||
| 271 | } | ||
| 272 | |||
| 249 | /** | 273 | /** |
| 250 | * struct i2c_board_info - template for device creation | 274 | * struct i2c_board_info - template for device creation |
| 251 | * @type: chip type, to initialize i2c_client.name | 275 | * @type: chip type, to initialize i2c_client.name |
| @@ -352,6 +376,8 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, | |||
| 352 | * into I2C transfers instead. | 376 | * into I2C transfers instead. |
| 353 | * @functionality: Return the flags that this algorithm/adapter pair supports | 377 | * @functionality: Return the flags that this algorithm/adapter pair supports |
| 354 | * from the I2C_FUNC_* flags. | 378 | * from the I2C_FUNC_* flags. |
| 379 | * @reg_slave: Register given client to I2C slave mode of this adapter | ||
| 380 | * @unreg_slave: Unregister given client from I2C slave mode of this adapter | ||
| 355 | * | 381 | * |
| 356 | * The following structs are for those who like to implement new bus drivers: | 382 | * The following structs are for those who like to implement new bus drivers: |
| 357 | * i2c_algorithm is the interface to a class of hardware solutions which can | 383 | * i2c_algorithm is the interface to a class of hardware solutions which can |
| @@ -359,7 +385,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, | |||
| 359 | * to name two of the most common. | 385 | * to name two of the most common. |
| 360 | * | 386 | * |
| 361 | * The return codes from the @master_xfer field should indicate the type of | 387 | * The return codes from the @master_xfer field should indicate the type of |
| 362 | * error code that occured during the transfer, as documented in the kernel | 388 | * error code that occurred during the transfer, as documented in the kernel |
| 363 | * Documentation file Documentation/i2c/fault-codes. | 389 | * Documentation file Documentation/i2c/fault-codes. |
| 364 | */ | 390 | */ |
| 365 | struct i2c_algorithm { | 391 | struct i2c_algorithm { |
| @@ -377,6 +403,9 @@ struct i2c_algorithm { | |||
| 377 | 403 | ||
| 378 | /* To determine what the adapter supports */ | 404 | /* To determine what the adapter supports */ |
| 379 | u32 (*functionality) (struct i2c_adapter *); | 405 | u32 (*functionality) (struct i2c_adapter *); |
| 406 | |||
| 407 | int (*reg_slave)(struct i2c_client *client); | ||
| 408 | int (*unreg_slave)(struct i2c_client *client); | ||
| 380 | }; | 409 | }; |
| 381 | 410 | ||
| 382 | /** | 411 | /** |
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h index 8cfb50f38529..0bc03f100d04 100644 --- a/include/linux/i2c/twl.h +++ b/include/linux/i2c/twl.h | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #define __TWL_H_ | 26 | #define __TWL_H_ |
| 27 | 27 | ||
| 28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
| 29 | #include <linux/phy/phy.h> | ||
| 30 | #include <linux/input/matrix_keypad.h> | 29 | #include <linux/input/matrix_keypad.h> |
| 31 | 30 | ||
| 32 | /* | 31 | /* |
| @@ -634,7 +633,6 @@ enum twl4030_usb_mode { | |||
| 634 | struct twl4030_usb_data { | 633 | struct twl4030_usb_data { |
| 635 | enum twl4030_usb_mode usb_mode; | 634 | enum twl4030_usb_mode usb_mode; |
| 636 | unsigned long features; | 635 | unsigned long features; |
| 637 | struct phy_init_data *init_data; | ||
| 638 | 636 | ||
| 639 | int (*phy_init)(struct device *dev); | 637 | int (*phy_init)(struct device *dev); |
| 640 | int (*phy_exit)(struct device *dev); | 638 | int (*phy_exit)(struct device *dev); |
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index b1be39c76931..4f4eea8a6288 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
| 20 | #include <linux/if_ether.h> | 20 | #include <linux/if_ether.h> |
| 21 | #include <asm/byteorder.h> | 21 | #include <asm/byteorder.h> |
| 22 | #include <asm/unaligned.h> | ||
| 22 | 23 | ||
| 23 | /* | 24 | /* |
| 24 | * DS bit usage | 25 | * DS bit usage |
| @@ -1066,6 +1067,12 @@ struct ieee80211_pspoll { | |||
| 1066 | 1067 | ||
| 1067 | /* TDLS */ | 1068 | /* TDLS */ |
| 1068 | 1069 | ||
| 1070 | /* Channel switch timing */ | ||
| 1071 | struct ieee80211_ch_switch_timing { | ||
| 1072 | __le16 switch_time; | ||
| 1073 | __le16 switch_timeout; | ||
| 1074 | } __packed; | ||
| 1075 | |||
| 1069 | /* Link-id information element */ | 1076 | /* Link-id information element */ |
| 1070 | struct ieee80211_tdls_lnkie { | 1077 | struct ieee80211_tdls_lnkie { |
| 1071 | u8 ie_type; /* Link Identifier IE */ | 1078 | u8 ie_type; /* Link Identifier IE */ |
| @@ -1107,6 +1114,15 @@ struct ieee80211_tdls_data { | |||
| 1107 | u8 dialog_token; | 1114 | u8 dialog_token; |
| 1108 | u8 variable[0]; | 1115 | u8 variable[0]; |
| 1109 | } __packed discover_req; | 1116 | } __packed discover_req; |
| 1117 | struct { | ||
| 1118 | u8 target_channel; | ||
| 1119 | u8 oper_class; | ||
| 1120 | u8 variable[0]; | ||
| 1121 | } __packed chan_switch_req; | ||
| 1122 | struct { | ||
| 1123 | __le16 status_code; | ||
| 1124 | u8 variable[0]; | ||
| 1125 | } __packed chan_switch_resp; | ||
| 1110 | } u; | 1126 | } u; |
| 1111 | } __packed; | 1127 | } __packed; |
| 1112 | 1128 | ||
| @@ -1274,7 +1290,7 @@ struct ieee80211_ht_cap { | |||
| 1274 | #define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2 | 1290 | #define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2 |
| 1275 | 1291 | ||
| 1276 | /* | 1292 | /* |
| 1277 | * Maximum length of AMPDU that the STA can receive. | 1293 | * Maximum length of AMPDU that the STA can receive in high-throughput (HT). |
| 1278 | * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) | 1294 | * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) |
| 1279 | */ | 1295 | */ |
| 1280 | enum ieee80211_max_ampdu_length_exp { | 1296 | enum ieee80211_max_ampdu_length_exp { |
| @@ -1284,6 +1300,21 @@ enum ieee80211_max_ampdu_length_exp { | |||
| 1284 | IEEE80211_HT_MAX_AMPDU_64K = 3 | 1300 | IEEE80211_HT_MAX_AMPDU_64K = 3 |
| 1285 | }; | 1301 | }; |
| 1286 | 1302 | ||
| 1303 | /* | ||
| 1304 | * Maximum length of AMPDU that the STA can receive in VHT. | ||
| 1305 | * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) | ||
| 1306 | */ | ||
| 1307 | enum ieee80211_vht_max_ampdu_length_exp { | ||
| 1308 | IEEE80211_VHT_MAX_AMPDU_8K = 0, | ||
| 1309 | IEEE80211_VHT_MAX_AMPDU_16K = 1, | ||
| 1310 | IEEE80211_VHT_MAX_AMPDU_32K = 2, | ||
| 1311 | IEEE80211_VHT_MAX_AMPDU_64K = 3, | ||
| 1312 | IEEE80211_VHT_MAX_AMPDU_128K = 4, | ||
| 1313 | IEEE80211_VHT_MAX_AMPDU_256K = 5, | ||
| 1314 | IEEE80211_VHT_MAX_AMPDU_512K = 6, | ||
| 1315 | IEEE80211_VHT_MAX_AMPDU_1024K = 7 | ||
| 1316 | }; | ||
| 1317 | |||
| 1287 | #define IEEE80211_HT_MAX_AMPDU_FACTOR 13 | 1318 | #define IEEE80211_HT_MAX_AMPDU_FACTOR 13 |
| 1288 | 1319 | ||
| 1289 | /* Minimum MPDU start spacing */ | 1320 | /* Minimum MPDU start spacing */ |
| @@ -1998,6 +2029,16 @@ enum ieee80211_tdls_actioncode { | |||
| 1998 | WLAN_TDLS_DISCOVERY_REQUEST = 10, | 2029 | WLAN_TDLS_DISCOVERY_REQUEST = 10, |
| 1999 | }; | 2030 | }; |
| 2000 | 2031 | ||
| 2032 | /* Extended Channel Switching capability to be set in the 1st byte of | ||
| 2033 | * the @WLAN_EID_EXT_CAPABILITY information element | ||
| 2034 | */ | ||
| 2035 | #define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2) | ||
| 2036 | |||
| 2037 | /* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */ | ||
| 2038 | #define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4) | ||
| 2039 | #define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5) | ||
| 2040 | #define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6) | ||
| 2041 | |||
| 2001 | /* Interworking capabilities are set in 7th bit of 4th byte of the | 2042 | /* Interworking capabilities are set in 7th bit of 4th byte of the |
| 2002 | * @WLAN_EID_EXT_CAPABILITY information element | 2043 | * @WLAN_EID_EXT_CAPABILITY information element |
| 2003 | */ | 2044 | */ |
| @@ -2009,6 +2050,7 @@ enum ieee80211_tdls_actioncode { | |||
| 2009 | */ | 2050 | */ |
| 2010 | #define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5) | 2051 | #define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5) |
| 2011 | #define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) | 2052 | #define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) |
| 2053 | #define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7) | ||
| 2012 | 2054 | ||
| 2013 | #define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) | 2055 | #define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) |
| 2014 | #define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7) | 2056 | #define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7) |
| @@ -2016,6 +2058,9 @@ enum ieee80211_tdls_actioncode { | |||
| 2016 | /* TDLS specific payload type in the LLC/SNAP header */ | 2058 | /* TDLS specific payload type in the LLC/SNAP header */ |
| 2017 | #define WLAN_TDLS_SNAP_RFTYPE 0x2 | 2059 | #define WLAN_TDLS_SNAP_RFTYPE 0x2 |
| 2018 | 2060 | ||
| 2061 | /* BSS Coex IE information field bits */ | ||
| 2062 | #define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0) | ||
| 2063 | |||
| 2019 | /** | 2064 | /** |
| 2020 | * enum - mesh synchronization method identifier | 2065 | * enum - mesh synchronization method identifier |
| 2021 | * | 2066 | * |
| @@ -2398,6 +2443,30 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, | |||
| 2398 | return !!(tim->virtual_map[index] & mask); | 2443 | return !!(tim->virtual_map[index] & mask); |
| 2399 | } | 2444 | } |
| 2400 | 2445 | ||
| 2446 | /** | ||
| 2447 | * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet) | ||
| 2448 | * @skb: the skb containing the frame, length will not be checked | ||
| 2449 | * @hdr_size: the size of the ieee80211_hdr that starts at skb->data | ||
| 2450 | * | ||
| 2451 | * This function assumes the frame is a data frame, and that the network header | ||
| 2452 | * is in the correct place. | ||
| 2453 | */ | ||
| 2454 | static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size) | ||
| 2455 | { | ||
| 2456 | if (!skb_is_nonlinear(skb) && | ||
| 2457 | skb->len > (skb_network_offset(skb) + 2)) { | ||
| 2458 | /* Point to where the indication of TDLS should start */ | ||
| 2459 | const u8 *tdls_data = skb_network_header(skb) - 2; | ||
| 2460 | |||
| 2461 | if (get_unaligned_be16(tdls_data) == ETH_P_TDLS && | ||
| 2462 | tdls_data[2] == WLAN_TDLS_SNAP_RFTYPE && | ||
| 2463 | tdls_data[3] == WLAN_CATEGORY_TDLS) | ||
| 2464 | return tdls_data[4]; | ||
| 2465 | } | ||
| 2466 | |||
| 2467 | return -1; | ||
| 2468 | } | ||
| 2469 | |||
| 2401 | /* convert time units */ | 2470 | /* convert time units */ |
| 2402 | #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) | 2471 | #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) |
| 2403 | #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) | 2472 | #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) |
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h new file mode 100644 index 000000000000..6e82d888287c --- /dev/null +++ b/include/linux/ieee802154.h | |||
| @@ -0,0 +1,242 @@ | |||
| 1 | /* | ||
| 2 | * IEEE802.15.4-2003 specification | ||
| 3 | * | ||
| 4 | * Copyright (C) 2007, 2008 Siemens AG | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 | ||
| 8 | * as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | * Written by: | ||
| 16 | * Pavel Smolenskiy <pavel.smolenskiy@gmail.com> | ||
| 17 | * Maxim Gorbachyov <maxim.gorbachev@siemens.com> | ||
| 18 | * Maxim Osipov <maxim.osipov@siemens.com> | ||
| 19 | * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> | ||
| 20 | * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> | ||
| 21 | */ | ||
| 22 | |||
| 23 | #ifndef LINUX_IEEE802154_H | ||
| 24 | #define LINUX_IEEE802154_H | ||
| 25 | |||
| 26 | #include <linux/types.h> | ||
| 27 | #include <linux/random.h> | ||
| 28 | #include <asm/byteorder.h> | ||
| 29 | |||
| 30 | #define IEEE802154_MTU 127 | ||
| 31 | #define IEEE802154_MIN_PSDU_LEN 5 | ||
| 32 | |||
| 33 | #define IEEE802154_PAN_ID_BROADCAST 0xffff | ||
| 34 | #define IEEE802154_ADDR_SHORT_BROADCAST 0xffff | ||
| 35 | #define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe | ||
| 36 | |||
| 37 | #define IEEE802154_EXTENDED_ADDR_LEN 8 | ||
| 38 | |||
| 39 | #define IEEE802154_LIFS_PERIOD 40 | ||
| 40 | #define IEEE802154_SIFS_PERIOD 12 | ||
| 41 | |||
| 42 | #define IEEE802154_MAX_CHANNEL 26 | ||
| 43 | #define IEEE802154_MAX_PAGE 31 | ||
| 44 | |||
| 45 | #define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */ | ||
| 46 | #define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */ | ||
| 47 | #define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */ | ||
| 48 | #define IEEE802154_FC_TYPE_MAC_CMD 0x3 /* Frame is MAC command */ | ||
| 49 | |||
| 50 | #define IEEE802154_FC_TYPE_SHIFT 0 | ||
| 51 | #define IEEE802154_FC_TYPE_MASK ((1 << 3) - 1) | ||
| 52 | #define IEEE802154_FC_TYPE(x) ((x & IEEE802154_FC_TYPE_MASK) >> IEEE802154_FC_TYPE_SHIFT) | ||
| 53 | #define IEEE802154_FC_SET_TYPE(v, x) do { \ | ||
| 54 | v = (((v) & ~IEEE802154_FC_TYPE_MASK) | \ | ||
| 55 | (((x) << IEEE802154_FC_TYPE_SHIFT) & IEEE802154_FC_TYPE_MASK)); \ | ||
| 56 | } while (0) | ||
| 57 | |||
| 58 | #define IEEE802154_FC_SECEN_SHIFT 3 | ||
| 59 | #define IEEE802154_FC_SECEN (1 << IEEE802154_FC_SECEN_SHIFT) | ||
| 60 | #define IEEE802154_FC_FRPEND_SHIFT 4 | ||
| 61 | #define IEEE802154_FC_FRPEND (1 << IEEE802154_FC_FRPEND_SHIFT) | ||
| 62 | #define IEEE802154_FC_ACK_REQ_SHIFT 5 | ||
| 63 | #define IEEE802154_FC_ACK_REQ (1 << IEEE802154_FC_ACK_REQ_SHIFT) | ||
| 64 | #define IEEE802154_FC_INTRA_PAN_SHIFT 6 | ||
| 65 | #define IEEE802154_FC_INTRA_PAN (1 << IEEE802154_FC_INTRA_PAN_SHIFT) | ||
| 66 | |||
| 67 | #define IEEE802154_FC_SAMODE_SHIFT 14 | ||
| 68 | #define IEEE802154_FC_SAMODE_MASK (3 << IEEE802154_FC_SAMODE_SHIFT) | ||
| 69 | #define IEEE802154_FC_DAMODE_SHIFT 10 | ||
| 70 | #define IEEE802154_FC_DAMODE_MASK (3 << IEEE802154_FC_DAMODE_SHIFT) | ||
| 71 | |||
| 72 | #define IEEE802154_FC_VERSION_SHIFT 12 | ||
| 73 | #define IEEE802154_FC_VERSION_MASK (3 << IEEE802154_FC_VERSION_SHIFT) | ||
| 74 | #define IEEE802154_FC_VERSION(x) ((x & IEEE802154_FC_VERSION_MASK) >> IEEE802154_FC_VERSION_SHIFT) | ||
| 75 | |||
| 76 | #define IEEE802154_FC_SAMODE(x) \ | ||
| 77 | (((x) & IEEE802154_FC_SAMODE_MASK) >> IEEE802154_FC_SAMODE_SHIFT) | ||
| 78 | |||
| 79 | #define IEEE802154_FC_DAMODE(x) \ | ||
| 80 | (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT) | ||
| 81 | |||
| 82 | #define IEEE802154_SCF_SECLEVEL_MASK 7 | ||
| 83 | #define IEEE802154_SCF_SECLEVEL_SHIFT 0 | ||
| 84 | #define IEEE802154_SCF_SECLEVEL(x) (x & IEEE802154_SCF_SECLEVEL_MASK) | ||
| 85 | #define IEEE802154_SCF_KEY_ID_MODE_SHIFT 3 | ||
| 86 | #define IEEE802154_SCF_KEY_ID_MODE_MASK (3 << IEEE802154_SCF_KEY_ID_MODE_SHIFT) | ||
| 87 | #define IEEE802154_SCF_KEY_ID_MODE(x) \ | ||
| 88 | ((x & IEEE802154_SCF_KEY_ID_MODE_MASK) >> IEEE802154_SCF_KEY_ID_MODE_SHIFT) | ||
| 89 | |||
| 90 | #define IEEE802154_SCF_KEY_IMPLICIT 0 | ||
| 91 | #define IEEE802154_SCF_KEY_INDEX 1 | ||
| 92 | #define IEEE802154_SCF_KEY_SHORT_INDEX 2 | ||
| 93 | #define IEEE802154_SCF_KEY_HW_INDEX 3 | ||
| 94 | |||
| 95 | #define IEEE802154_SCF_SECLEVEL_NONE 0 | ||
| 96 | #define IEEE802154_SCF_SECLEVEL_MIC32 1 | ||
| 97 | #define IEEE802154_SCF_SECLEVEL_MIC64 2 | ||
| 98 | #define IEEE802154_SCF_SECLEVEL_MIC128 3 | ||
| 99 | #define IEEE802154_SCF_SECLEVEL_ENC 4 | ||
| 100 | #define IEEE802154_SCF_SECLEVEL_ENC_MIC32 5 | ||
| 101 | #define IEEE802154_SCF_SECLEVEL_ENC_MIC64 6 | ||
| 102 | #define IEEE802154_SCF_SECLEVEL_ENC_MIC128 7 | ||
| 103 | |||
| 104 | /* MAC footer size */ | ||
| 105 | #define IEEE802154_MFR_SIZE 2 /* 2 octets */ | ||
| 106 | |||
| 107 | /* MAC's Command Frames Identifiers */ | ||
| 108 | #define IEEE802154_CMD_ASSOCIATION_REQ 0x01 | ||
| 109 | #define IEEE802154_CMD_ASSOCIATION_RESP 0x02 | ||
| 110 | #define IEEE802154_CMD_DISASSOCIATION_NOTIFY 0x03 | ||
| 111 | #define IEEE802154_CMD_DATA_REQ 0x04 | ||
| 112 | #define IEEE802154_CMD_PANID_CONFLICT_NOTIFY 0x05 | ||
| 113 | #define IEEE802154_CMD_ORPHAN_NOTIFY 0x06 | ||
| 114 | #define IEEE802154_CMD_BEACON_REQ 0x07 | ||
| 115 | #define IEEE802154_CMD_COORD_REALIGN_NOTIFY 0x08 | ||
| 116 | #define IEEE802154_CMD_GTS_REQ 0x09 | ||
| 117 | |||
| 118 | /* | ||
| 119 | * The return values of MAC operations | ||
| 120 | */ | ||
| 121 | enum { | ||
| 122 | /* | ||
| 123 | * The requested operation was completed successfully. | ||
| 124 | * For a transmission request, this value indicates | ||
| 125 | * a successful transmission. | ||
| 126 | */ | ||
| 127 | IEEE802154_SUCCESS = 0x0, | ||
| 128 | |||
| 129 | /* The beacon was lost following a synchronization request. */ | ||
| 130 | IEEE802154_BEACON_LOSS = 0xe0, | ||
| 131 | /* | ||
| 132 | * A transmission could not take place due to activity on the | ||
| 133 | * channel, i.e., the CSMA-CA mechanism has failed. | ||
| 134 | */ | ||
| 135 | IEEE802154_CHNL_ACCESS_FAIL = 0xe1, | ||
| 136 | /* The GTS request has been denied by the PAN coordinator. */ | ||
| 137 | IEEE802154_DENINED = 0xe2, | ||
| 138 | /* The attempt to disable the transceiver has failed. */ | ||
| 139 | IEEE802154_DISABLE_TRX_FAIL = 0xe3, | ||
| 140 | /* | ||
| 141 | * The received frame induces a failed security check according to | ||
| 142 | * the security suite. | ||
| 143 | */ | ||
| 144 | IEEE802154_FAILED_SECURITY_CHECK = 0xe4, | ||
| 145 | /* | ||
| 146 | * The frame resulting from secure processing has a length that is | ||
| 147 | * greater than aMACMaxFrameSize. | ||
| 148 | */ | ||
| 149 | IEEE802154_FRAME_TOO_LONG = 0xe5, | ||
| 150 | /* | ||
| 151 | * The requested GTS transmission failed because the specified GTS | ||
| 152 | * either did not have a transmit GTS direction or was not defined. | ||
| 153 | */ | ||
| 154 | IEEE802154_INVALID_GTS = 0xe6, | ||
| 155 | /* | ||
| 156 | * A request to purge an MSDU from the transaction queue was made using | ||
| 157 | * an MSDU handle that was not found in the transaction table. | ||
| 158 | */ | ||
| 159 | IEEE802154_INVALID_HANDLE = 0xe7, | ||
| 160 | /* A parameter in the primitive is out of the valid range.*/ | ||
| 161 | IEEE802154_INVALID_PARAMETER = 0xe8, | ||
| 162 | /* No acknowledgment was received after aMaxFrameRetries. */ | ||
| 163 | IEEE802154_NO_ACK = 0xe9, | ||
| 164 | /* A scan operation failed to find any network beacons.*/ | ||
| 165 | IEEE802154_NO_BEACON = 0xea, | ||
| 166 | /* No response data were available following a request. */ | ||
| 167 | IEEE802154_NO_DATA = 0xeb, | ||
| 168 | /* The operation failed because a short address was not allocated. */ | ||
| 169 | IEEE802154_NO_SHORT_ADDRESS = 0xec, | ||
| 170 | /* | ||
| 171 | * A receiver enable request was unsuccessful because it could not be | ||
| 172 | * completed within the CAP. | ||
| 173 | */ | ||
| 174 | IEEE802154_OUT_OF_CAP = 0xed, | ||
| 175 | /* | ||
| 176 | * A PAN identifier conflict has been detected and communicated to the | ||
| 177 | * PAN coordinator. | ||
| 178 | */ | ||
| 179 | IEEE802154_PANID_CONFLICT = 0xee, | ||
| 180 | /* A coordinator realignment command has been received. */ | ||
| 181 | IEEE802154_REALIGMENT = 0xef, | ||
| 182 | /* The transaction has expired and its information discarded. */ | ||
| 183 | IEEE802154_TRANSACTION_EXPIRED = 0xf0, | ||
| 184 | /* There is no capacity to store the transaction. */ | ||
| 185 | IEEE802154_TRANSACTION_OVERFLOW = 0xf1, | ||
| 186 | /* | ||
| 187 | * The transceiver was in the transmitter enabled state when the | ||
| 188 | * receiver was requested to be enabled. | ||
| 189 | */ | ||
| 190 | IEEE802154_TX_ACTIVE = 0xf2, | ||
| 191 | /* The appropriate key is not available in the ACL. */ | ||
| 192 | IEEE802154_UNAVAILABLE_KEY = 0xf3, | ||
| 193 | /* | ||
| 194 | * A SET/GET request was issued with the identifier of a PIB attribute | ||
| 195 | * that is not supported. | ||
| 196 | */ | ||
| 197 | IEEE802154_UNSUPPORTED_ATTR = 0xf4, | ||
| 198 | /* | ||
| 199 | * A request to perform a scan operation failed because the MLME was | ||
| 200 | * in the process of performing a previously initiated scan operation. | ||
| 201 | */ | ||
| 202 | IEEE802154_SCAN_IN_PROGRESS = 0xfc, | ||
| 203 | }; | ||
| 204 | |||
| 205 | /** | ||
| 206 | * ieee802154_is_valid_psdu_len - check if psdu len is valid | ||
| 207 | * @len: psdu len with (MHR + payload + MFR) | ||
| 208 | */ | ||
| 209 | static inline bool ieee802154_is_valid_psdu_len(const u8 len) | ||
| 210 | { | ||
| 211 | return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU); | ||
| 212 | } | ||
| 213 | |||
| 214 | /** | ||
| 215 | * ieee802154_is_valid_psdu_len - check if extended addr is valid | ||
| 216 | * @addr: extended addr to check | ||
| 217 | */ | ||
| 218 | static inline bool ieee802154_is_valid_extended_addr(const __le64 addr) | ||
| 219 | { | ||
| 220 | /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff | ||
| 221 | * is used internally as extended to short address broadcast mapping. | ||
| 222 | * This is currently a workaround because neighbor discovery can't | ||
| 223 | * deal with short addresses types right now. | ||
| 224 | */ | ||
| 225 | return ((addr != cpu_to_le64(0x0000000000000000ULL)) && | ||
| 226 | (addr != cpu_to_le64(0xffffffffffffffffULL))); | ||
| 227 | } | ||
| 228 | |||
| 229 | /** | ||
| 230 | * ieee802154_random_extended_addr - generates a random extended address | ||
| 231 | * @addr: extended addr pointer to place the random address | ||
| 232 | */ | ||
| 233 | static inline void ieee802154_random_extended_addr(__le64 *addr) | ||
| 234 | { | ||
| 235 | get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN); | ||
| 236 | |||
| 237 | /* toggle some bit if we hit an invalid extended addr */ | ||
| 238 | if (!ieee802154_is_valid_extended_addr(*addr)) | ||
| 239 | ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01; | ||
| 240 | } | ||
| 241 | |||
| 242 | #endif /* LINUX_IEEE802154_H */ | ||
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 808dcb8cc04f..0a8ce762a47f 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | 15 | ||
| 16 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
| 17 | #include <uapi/linux/if_bridge.h> | 17 | #include <uapi/linux/if_bridge.h> |
| 18 | #include <linux/bitops.h> | ||
| 18 | 19 | ||
| 19 | struct br_ip { | 20 | struct br_ip { |
| 20 | union { | 21 | union { |
| @@ -32,11 +33,41 @@ struct br_ip_list { | |||
| 32 | struct br_ip addr; | 33 | struct br_ip addr; |
| 33 | }; | 34 | }; |
| 34 | 35 | ||
| 36 | #define BR_HAIRPIN_MODE BIT(0) | ||
| 37 | #define BR_BPDU_GUARD BIT(1) | ||
| 38 | #define BR_ROOT_BLOCK BIT(2) | ||
| 39 | #define BR_MULTICAST_FAST_LEAVE BIT(3) | ||
| 40 | #define BR_ADMIN_COST BIT(4) | ||
| 41 | #define BR_LEARNING BIT(5) | ||
| 42 | #define BR_FLOOD BIT(6) | ||
| 43 | #define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING) | ||
| 44 | #define BR_PROMISC BIT(7) | ||
| 45 | #define BR_PROXYARP BIT(8) | ||
| 46 | #define BR_LEARNING_SYNC BIT(9) | ||
| 47 | |||
| 35 | extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); | 48 | extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); |
| 36 | 49 | ||
| 37 | typedef int br_should_route_hook_t(struct sk_buff *skb); | 50 | typedef int br_should_route_hook_t(struct sk_buff *skb); |
| 38 | extern br_should_route_hook_t __rcu *br_should_route_hook; | 51 | extern br_should_route_hook_t __rcu *br_should_route_hook; |
| 39 | 52 | ||
| 53 | #if IS_ENABLED(CONFIG_BRIDGE) | ||
| 54 | int br_fdb_external_learn_add(struct net_device *dev, | ||
| 55 | const unsigned char *addr, u16 vid); | ||
| 56 | int br_fdb_external_learn_del(struct net_device *dev, | ||
| 57 | const unsigned char *addr, u16 vid); | ||
| 58 | #else | ||
| 59 | static inline int br_fdb_external_learn_add(struct net_device *dev, | ||
| 60 | const unsigned char *addr, u16 vid) | ||
| 61 | { | ||
| 62 | return 0; | ||
| 63 | } | ||
| 64 | static inline int br_fdb_external_learn_del(struct net_device *dev, | ||
| 65 | const unsigned char *addr, u16 vid) | ||
| 66 | { | ||
| 67 | return 0; | ||
| 68 | } | ||
| 69 | #endif | ||
| 70 | |||
| 40 | #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) | 71 | #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) |
| 41 | int br_multicast_list_adjacent(struct net_device *dev, | 72 | int br_multicast_list_adjacent(struct net_device *dev, |
| 42 | struct list_head *br_ip_list); | 73 | struct list_head *br_ip_list); |
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index d69f0577a319..515a35e2a48a 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h | |||
| @@ -282,28 +282,24 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features, | |||
| 282 | } | 282 | } |
| 283 | 283 | ||
| 284 | /** | 284 | /** |
| 285 | * vlan_insert_tag - regular VLAN tag inserting | 285 | * __vlan_insert_tag - regular VLAN tag inserting |
| 286 | * @skb: skbuff to tag | 286 | * @skb: skbuff to tag |
| 287 | * @vlan_proto: VLAN encapsulation protocol | 287 | * @vlan_proto: VLAN encapsulation protocol |
| 288 | * @vlan_tci: VLAN TCI to insert | 288 | * @vlan_tci: VLAN TCI to insert |
| 289 | * | 289 | * |
| 290 | * Inserts the VLAN tag into @skb as part of the payload | 290 | * Inserts the VLAN tag into @skb as part of the payload |
| 291 | * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. | 291 | * Returns error if skb_cow_head failes. |
| 292 | * | ||
| 293 | * Following the skb_unshare() example, in case of error, the calling function | ||
| 294 | * doesn't have to worry about freeing the original skb. | ||
| 295 | * | 292 | * |
| 296 | * Does not change skb->protocol so this function can be used during receive. | 293 | * Does not change skb->protocol so this function can be used during receive. |
| 297 | */ | 294 | */ |
| 298 | static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, | 295 | static inline int __vlan_insert_tag(struct sk_buff *skb, |
| 299 | __be16 vlan_proto, u16 vlan_tci) | 296 | __be16 vlan_proto, u16 vlan_tci) |
| 300 | { | 297 | { |
| 301 | struct vlan_ethhdr *veth; | 298 | struct vlan_ethhdr *veth; |
| 302 | 299 | ||
| 303 | if (skb_cow_head(skb, VLAN_HLEN) < 0) { | 300 | if (skb_cow_head(skb, VLAN_HLEN) < 0) |
| 304 | dev_kfree_skb_any(skb); | 301 | return -ENOMEM; |
| 305 | return NULL; | 302 | |
| 306 | } | ||
| 307 | veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); | 303 | veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); |
| 308 | 304 | ||
| 309 | /* Move the mac addresses to the beginning of the new header. */ | 305 | /* Move the mac addresses to the beginning of the new header. */ |
| @@ -316,12 +312,40 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, | |||
| 316 | /* now, the TCI */ | 312 | /* now, the TCI */ |
| 317 | veth->h_vlan_TCI = htons(vlan_tci); | 313 | veth->h_vlan_TCI = htons(vlan_tci); |
| 318 | 314 | ||
| 315 | return 0; | ||
| 316 | } | ||
| 317 | |||
| 318 | /** | ||
| 319 | * vlan_insert_tag - regular VLAN tag inserting | ||
| 320 | * @skb: skbuff to tag | ||
| 321 | * @vlan_proto: VLAN encapsulation protocol | ||
| 322 | * @vlan_tci: VLAN TCI to insert | ||
| 323 | * | ||
| 324 | * Inserts the VLAN tag into @skb as part of the payload | ||
| 325 | * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. | ||
| 326 | * | ||
| 327 | * Following the skb_unshare() example, in case of error, the calling function | ||
| 328 | * doesn't have to worry about freeing the original skb. | ||
| 329 | * | ||
| 330 | * Does not change skb->protocol so this function can be used during receive. | ||
| 331 | */ | ||
| 332 | static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, | ||
| 333 | __be16 vlan_proto, u16 vlan_tci) | ||
| 334 | { | ||
| 335 | int err; | ||
| 336 | |||
| 337 | err = __vlan_insert_tag(skb, vlan_proto, vlan_tci); | ||
| 338 | if (err) { | ||
| 339 | dev_kfree_skb_any(skb); | ||
| 340 | return NULL; | ||
| 341 | } | ||
| 319 | return skb; | 342 | return skb; |
| 320 | } | 343 | } |
| 321 | 344 | ||
| 322 | /** | 345 | /** |
| 323 | * __vlan_put_tag - regular VLAN tag inserting | 346 | * vlan_insert_tag_set_proto - regular VLAN tag inserting |
| 324 | * @skb: skbuff to tag | 347 | * @skb: skbuff to tag |
| 348 | * @vlan_proto: VLAN encapsulation protocol | ||
| 325 | * @vlan_tci: VLAN TCI to insert | 349 | * @vlan_tci: VLAN TCI to insert |
| 326 | * | 350 | * |
| 327 | * Inserts the VLAN tag into @skb as part of the payload | 351 | * Inserts the VLAN tag into @skb as part of the payload |
| @@ -330,8 +354,9 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, | |||
| 330 | * Following the skb_unshare() example, in case of error, the calling function | 354 | * Following the skb_unshare() example, in case of error, the calling function |
| 331 | * doesn't have to worry about freeing the original skb. | 355 | * doesn't have to worry about freeing the original skb. |
| 332 | */ | 356 | */ |
| 333 | static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, | 357 | static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, |
| 334 | __be16 vlan_proto, u16 vlan_tci) | 358 | __be16 vlan_proto, |
| 359 | u16 vlan_tci) | ||
| 335 | { | 360 | { |
| 336 | skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); | 361 | skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); |
| 337 | if (skb) | 362 | if (skb) |
| @@ -339,39 +364,53 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, | |||
| 339 | return skb; | 364 | return skb; |
| 340 | } | 365 | } |
| 341 | 366 | ||
| 342 | /** | 367 | /* |
| 343 | * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting | 368 | * __vlan_hwaccel_push_inside - pushes vlan tag to the payload |
| 344 | * @skb: skbuff to tag | 369 | * @skb: skbuff to tag |
| 345 | * @vlan_proto: VLAN encapsulation protocol | ||
| 346 | * @vlan_tci: VLAN TCI to insert | ||
| 347 | * | 370 | * |
| 348 | * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest | 371 | * Pushes the VLAN tag from @skb->vlan_tci inside to the payload. |
| 372 | * | ||
| 373 | * Following the skb_unshare() example, in case of error, the calling function | ||
| 374 | * doesn't have to worry about freeing the original skb. | ||
| 349 | */ | 375 | */ |
| 350 | static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, | 376 | static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) |
| 351 | __be16 vlan_proto, | ||
| 352 | u16 vlan_tci) | ||
| 353 | { | 377 | { |
| 354 | skb->vlan_proto = vlan_proto; | 378 | skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, |
| 355 | skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; | 379 | vlan_tx_tag_get(skb)); |
| 380 | if (likely(skb)) | ||
| 381 | skb->vlan_tci = 0; | ||
| 382 | return skb; | ||
| 383 | } | ||
| 384 | /* | ||
| 385 | * vlan_hwaccel_push_inside - pushes vlan tag to the payload | ||
| 386 | * @skb: skbuff to tag | ||
| 387 | * | ||
| 388 | * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the | ||
| 389 | * VLAN tag from @skb->vlan_tci inside to the payload. | ||
| 390 | * | ||
| 391 | * Following the skb_unshare() example, in case of error, the calling function | ||
| 392 | * doesn't have to worry about freeing the original skb. | ||
| 393 | */ | ||
| 394 | static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) | ||
| 395 | { | ||
| 396 | if (vlan_tx_tag_present(skb)) | ||
| 397 | skb = __vlan_hwaccel_push_inside(skb); | ||
| 356 | return skb; | 398 | return skb; |
| 357 | } | 399 | } |
| 358 | 400 | ||
| 359 | /** | 401 | /** |
| 360 | * vlan_put_tag - inserts VLAN tag according to device features | 402 | * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting |
| 361 | * @skb: skbuff to tag | 403 | * @skb: skbuff to tag |
| 404 | * @vlan_proto: VLAN encapsulation protocol | ||
| 362 | * @vlan_tci: VLAN TCI to insert | 405 | * @vlan_tci: VLAN TCI to insert |
| 363 | * | 406 | * |
| 364 | * Assumes skb->dev is the target that will xmit this frame. | 407 | * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest |
| 365 | * Returns a VLAN tagged skb. | ||
| 366 | */ | 408 | */ |
| 367 | static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, | 409 | static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, |
| 368 | __be16 vlan_proto, u16 vlan_tci) | 410 | __be16 vlan_proto, u16 vlan_tci) |
| 369 | { | 411 | { |
| 370 | if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) { | 412 | skb->vlan_proto = vlan_proto; |
| 371 | return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); | 413 | skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; |
| 372 | } else { | ||
| 373 | return __vlan_put_tag(skb, vlan_proto, vlan_tci); | ||
| 374 | } | ||
| 375 | } | 414 | } |
| 376 | 415 | ||
| 377 | /** | 416 | /** |
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index d8257ab60bac..2c476acb87d9 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h | |||
| @@ -164,7 +164,7 @@ struct st_sensor_transfer_function { | |||
| 164 | }; | 164 | }; |
| 165 | 165 | ||
| 166 | /** | 166 | /** |
| 167 | * struct st_sensors - ST sensors list | 167 | * struct st_sensor_settings - ST specific sensor settings |
| 168 | * @wai: Contents of WhoAmI register. | 168 | * @wai: Contents of WhoAmI register. |
| 169 | * @sensors_supported: List of supported sensors by struct itself. | 169 | * @sensors_supported: List of supported sensors by struct itself. |
| 170 | * @ch: IIO channels for the sensor. | 170 | * @ch: IIO channels for the sensor. |
| @@ -177,7 +177,7 @@ struct st_sensor_transfer_function { | |||
| 177 | * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. | 177 | * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. |
| 178 | * @bootime: samples to discard when sensor passing from power-down to power-up. | 178 | * @bootime: samples to discard when sensor passing from power-down to power-up. |
| 179 | */ | 179 | */ |
| 180 | struct st_sensors { | 180 | struct st_sensor_settings { |
| 181 | u8 wai; | 181 | u8 wai; |
| 182 | char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME]; | 182 | char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME]; |
| 183 | struct iio_chan_spec *ch; | 183 | struct iio_chan_spec *ch; |
| @@ -196,7 +196,7 @@ struct st_sensors { | |||
| 196 | * struct st_sensor_data - ST sensor device status | 196 | * struct st_sensor_data - ST sensor device status |
| 197 | * @dev: Pointer to instance of struct device (I2C or SPI). | 197 | * @dev: Pointer to instance of struct device (I2C or SPI). |
| 198 | * @trig: The trigger in use by the core driver. | 198 | * @trig: The trigger in use by the core driver. |
| 199 | * @sensor: Pointer to the current sensor struct in use. | 199 | * @sensor_settings: Pointer to the specific sensor settings in use. |
| 200 | * @current_fullscale: Maximum range of measure by the sensor. | 200 | * @current_fullscale: Maximum range of measure by the sensor. |
| 201 | * @vdd: Pointer to sensor's Vdd power supply | 201 | * @vdd: Pointer to sensor's Vdd power supply |
| 202 | * @vdd_io: Pointer to sensor's Vdd-IO power supply | 202 | * @vdd_io: Pointer to sensor's Vdd-IO power supply |
| @@ -213,7 +213,7 @@ struct st_sensors { | |||
| 213 | struct st_sensor_data { | 213 | struct st_sensor_data { |
| 214 | struct device *dev; | 214 | struct device *dev; |
| 215 | struct iio_trigger *trig; | 215 | struct iio_trigger *trig; |
| 216 | struct st_sensors *sensor; | 216 | struct st_sensor_settings *sensor_settings; |
| 217 | struct st_sensor_fullscale_avl *current_fullscale; | 217 | struct st_sensor_fullscale_avl *current_fullscale; |
| 218 | struct regulator *vdd; | 218 | struct regulator *vdd; |
| 219 | struct regulator *vdd_io; | 219 | struct regulator *vdd_io; |
| @@ -279,7 +279,7 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev, | |||
| 279 | struct iio_chan_spec const *ch, int *val); | 279 | struct iio_chan_spec const *ch, int *val); |
| 280 | 280 | ||
| 281 | int st_sensors_check_device_support(struct iio_dev *indio_dev, | 281 | int st_sensors_check_device_support(struct iio_dev *indio_dev, |
| 282 | int num_sensors_list, const struct st_sensors *sensors); | 282 | int num_sensors_list, const struct st_sensor_settings *sensor_settings); |
| 283 | 283 | ||
| 284 | ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, | 284 | ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, |
| 285 | struct device_attribute *attr, char *buf); | 285 | struct device_attribute *attr, char *buf); |
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 15dc6bc2bdd2..3642ce7ef512 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
| 14 | #include <linux/cdev.h> | 14 | #include <linux/cdev.h> |
| 15 | #include <linux/iio/types.h> | 15 | #include <linux/iio/types.h> |
| 16 | #include <linux/of.h> | ||
| 16 | /* IIO TODO LIST */ | 17 | /* IIO TODO LIST */ |
| 17 | /* | 18 | /* |
| 18 | * Provide means of adjusting timer accuracy. | 19 | * Provide means of adjusting timer accuracy. |
| @@ -326,6 +327,11 @@ struct iio_dev; | |||
| 326 | * @update_scan_mode: function to configure device and scan buffer when | 327 | * @update_scan_mode: function to configure device and scan buffer when |
| 327 | * channels have changed | 328 | * channels have changed |
| 328 | * @debugfs_reg_access: function to read or write register value of device | 329 | * @debugfs_reg_access: function to read or write register value of device |
| 330 | * @of_xlate: function pointer to obtain channel specifier index. | ||
| 331 | * When #iio-cells is greater than '0', the driver could | ||
| 332 | * provide a custom of_xlate function that reads the | ||
| 333 | * *args* and returns the appropriate index in registered | ||
| 334 | * IIO channels array. | ||
| 329 | **/ | 335 | **/ |
| 330 | struct iio_info { | 336 | struct iio_info { |
| 331 | struct module *driver_module; | 337 | struct module *driver_module; |
| @@ -385,6 +391,8 @@ struct iio_info { | |||
| 385 | int (*debugfs_reg_access)(struct iio_dev *indio_dev, | 391 | int (*debugfs_reg_access)(struct iio_dev *indio_dev, |
| 386 | unsigned reg, unsigned writeval, | 392 | unsigned reg, unsigned writeval, |
| 387 | unsigned *readval); | 393 | unsigned *readval); |
| 394 | int (*of_xlate)(struct iio_dev *indio_dev, | ||
| 395 | const struct of_phandle_args *iiospec); | ||
| 388 | }; | 396 | }; |
| 389 | 397 | ||
| 390 | /** | 398 | /** |
diff --git a/include/linux/integrity.h b/include/linux/integrity.h index 83222cebd47b..c2d6082a1a4c 100644 --- a/include/linux/integrity.h +++ b/include/linux/integrity.h | |||
| @@ -24,6 +24,7 @@ enum integrity_status { | |||
| 24 | #ifdef CONFIG_INTEGRITY | 24 | #ifdef CONFIG_INTEGRITY |
| 25 | extern struct integrity_iint_cache *integrity_inode_get(struct inode *inode); | 25 | extern struct integrity_iint_cache *integrity_inode_get(struct inode *inode); |
| 26 | extern void integrity_inode_free(struct inode *inode); | 26 | extern void integrity_inode_free(struct inode *inode); |
| 27 | extern void __init integrity_load_keys(void); | ||
| 27 | 28 | ||
| 28 | #else | 29 | #else |
| 29 | static inline struct integrity_iint_cache * | 30 | static inline struct integrity_iint_cache * |
| @@ -36,5 +37,10 @@ static inline void integrity_inode_free(struct inode *inode) | |||
| 36 | { | 37 | { |
| 37 | return; | 38 | return; |
| 38 | } | 39 | } |
| 40 | |||
| 41 | static inline void integrity_load_keys(void) | ||
| 42 | { | ||
| 43 | } | ||
| 39 | #endif /* CONFIG_INTEGRITY */ | 44 | #endif /* CONFIG_INTEGRITY */ |
| 45 | |||
| 40 | #endif /* _LINUX_INTEGRITY_H */ | 46 | #endif /* _LINUX_INTEGRITY_H */ |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 69517a24bc50..d9b05b5bf8c7 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -556,12 +556,6 @@ static inline void tasklet_enable(struct tasklet_struct *t) | |||
| 556 | atomic_dec(&t->count); | 556 | atomic_dec(&t->count); |
| 557 | } | 557 | } |
| 558 | 558 | ||
| 559 | static inline void tasklet_hi_enable(struct tasklet_struct *t) | ||
| 560 | { | ||
| 561 | smp_mb__before_atomic(); | ||
| 562 | atomic_dec(&t->count); | ||
| 563 | } | ||
| 564 | |||
| 565 | extern void tasklet_kill(struct tasklet_struct *t); | 559 | extern void tasklet_kill(struct tasklet_struct *t); |
| 566 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); | 560 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); |
| 567 | extern void tasklet_init(struct tasklet_struct *t, | 561 | extern void tasklet_init(struct tasklet_struct *t, |
diff --git a/include/linux/io.h b/include/linux/io.h index d5fc9b8d8b03..fa02e55e5a2e 100644 --- a/include/linux/io.h +++ b/include/linux/io.h | |||
| @@ -61,9 +61,9 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr) | |||
| 61 | #define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err) | 61 | #define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err) |
| 62 | 62 | ||
| 63 | void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, | 63 | void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, |
| 64 | unsigned long size); | 64 | resource_size_t size); |
| 65 | void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, | 65 | void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, |
| 66 | unsigned long size); | 66 | resource_size_t size); |
| 67 | void devm_iounmap(struct device *dev, void __iomem *addr); | 67 | void devm_iounmap(struct device *dev, void __iomem *addr); |
| 68 | int check_signature(const volatile void __iomem *io_addr, | 68 | int check_signature(const volatile void __iomem *io_addr, |
| 69 | const unsigned char *signature, int length); | 69 | const unsigned char *signature, int length); |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index b29a5982e1c3..38daa453f2e5 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | 21 | ||
| 22 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
| 23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
| 24 | #include <linux/of.h> | ||
| 24 | #include <linux/types.h> | 25 | #include <linux/types.h> |
| 25 | #include <linux/scatterlist.h> | 26 | #include <linux/scatterlist.h> |
| 26 | #include <trace/events/iommu.h> | 27 | #include <trace/events/iommu.h> |
| @@ -28,7 +29,7 @@ | |||
| 28 | #define IOMMU_READ (1 << 0) | 29 | #define IOMMU_READ (1 << 0) |
| 29 | #define IOMMU_WRITE (1 << 1) | 30 | #define IOMMU_WRITE (1 << 1) |
| 30 | #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ | 31 | #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ |
| 31 | #define IOMMU_EXEC (1 << 3) | 32 | #define IOMMU_NOEXEC (1 << 3) |
| 32 | 33 | ||
| 33 | struct iommu_ops; | 34 | struct iommu_ops; |
| 34 | struct iommu_group; | 35 | struct iommu_group; |
| @@ -62,6 +63,7 @@ enum iommu_cap { | |||
| 62 | IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA | 63 | IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA |
| 63 | transactions */ | 64 | transactions */ |
| 64 | IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ | 65 | IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ |
| 66 | IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ | ||
| 65 | }; | 67 | }; |
| 66 | 68 | ||
| 67 | /* | 69 | /* |
| @@ -105,7 +107,9 @@ enum iommu_attr { | |||
| 105 | * @remove_device: remove device from iommu grouping | 107 | * @remove_device: remove device from iommu grouping |
| 106 | * @domain_get_attr: Query domain attributes | 108 | * @domain_get_attr: Query domain attributes |
| 107 | * @domain_set_attr: Change domain attributes | 109 | * @domain_set_attr: Change domain attributes |
| 110 | * @of_xlate: add OF master IDs to iommu grouping | ||
| 108 | * @pgsize_bitmap: bitmap of supported page sizes | 111 | * @pgsize_bitmap: bitmap of supported page sizes |
| 112 | * @priv: per-instance data private to the iommu driver | ||
| 109 | */ | 113 | */ |
| 110 | struct iommu_ops { | 114 | struct iommu_ops { |
| 111 | bool (*capable)(enum iommu_cap); | 115 | bool (*capable)(enum iommu_cap); |
| @@ -137,7 +141,12 @@ struct iommu_ops { | |||
| 137 | /* Get the numer of window per domain */ | 141 | /* Get the numer of window per domain */ |
| 138 | u32 (*domain_get_windows)(struct iommu_domain *domain); | 142 | u32 (*domain_get_windows)(struct iommu_domain *domain); |
| 139 | 143 | ||
| 144 | #ifdef CONFIG_OF_IOMMU | ||
| 145 | int (*of_xlate)(struct device *dev, struct of_phandle_args *args); | ||
| 146 | #endif | ||
| 147 | |||
| 140 | unsigned long pgsize_bitmap; | 148 | unsigned long pgsize_bitmap; |
| 149 | void *priv; | ||
| 141 | }; | 150 | }; |
| 142 | 151 | ||
| 143 | #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ | 152 | #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ |
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index 35e7eca4e33b..1eee6bcfcf76 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h | |||
| @@ -6,15 +6,7 @@ | |||
| 6 | #include <linux/rwsem.h> | 6 | #include <linux/rwsem.h> |
| 7 | #include <linux/notifier.h> | 7 | #include <linux/notifier.h> |
| 8 | #include <linux/nsproxy.h> | 8 | #include <linux/nsproxy.h> |
| 9 | 9 | #include <linux/ns_common.h> | |
| 10 | /* | ||
| 11 | * ipc namespace events | ||
| 12 | */ | ||
| 13 | #define IPCNS_MEMCHANGED 0x00000001 /* Notify lowmem size changed */ | ||
| 14 | #define IPCNS_CREATED 0x00000002 /* Notify new ipc namespace created */ | ||
| 15 | #define IPCNS_REMOVED 0x00000003 /* Notify ipc namespace removed */ | ||
| 16 | |||
| 17 | #define IPCNS_CALLBACK_PRI 0 | ||
| 18 | 10 | ||
| 19 | struct user_namespace; | 11 | struct user_namespace; |
| 20 | 12 | ||
| @@ -38,7 +30,6 @@ struct ipc_namespace { | |||
| 38 | unsigned int msg_ctlmni; | 30 | unsigned int msg_ctlmni; |
| 39 | atomic_t msg_bytes; | 31 | atomic_t msg_bytes; |
| 40 | atomic_t msg_hdrs; | 32 | atomic_t msg_hdrs; |
| 41 | int auto_msgmni; | ||
| 42 | 33 | ||
| 43 | size_t shm_ctlmax; | 34 | size_t shm_ctlmax; |
| 44 | size_t shm_ctlall; | 35 | size_t shm_ctlall; |
| @@ -68,7 +59,7 @@ struct ipc_namespace { | |||
| 68 | /* user_ns which owns the ipc ns */ | 59 | /* user_ns which owns the ipc ns */ |
| 69 | struct user_namespace *user_ns; | 60 | struct user_namespace *user_ns; |
| 70 | 61 | ||
| 71 | unsigned int proc_inum; | 62 | struct ns_common ns; |
| 72 | }; | 63 | }; |
| 73 | 64 | ||
| 74 | extern struct ipc_namespace init_ipc_ns; | 65 | extern struct ipc_namespace init_ipc_ns; |
| @@ -77,18 +68,8 @@ extern atomic_t nr_ipc_ns; | |||
| 77 | extern spinlock_t mq_lock; | 68 | extern spinlock_t mq_lock; |
| 78 | 69 | ||
| 79 | #ifdef CONFIG_SYSVIPC | 70 | #ifdef CONFIG_SYSVIPC |
| 80 | extern int register_ipcns_notifier(struct ipc_namespace *); | ||
| 81 | extern int cond_register_ipcns_notifier(struct ipc_namespace *); | ||
| 82 | extern void unregister_ipcns_notifier(struct ipc_namespace *); | ||
| 83 | extern int ipcns_notify(unsigned long); | ||
| 84 | extern void shm_destroy_orphaned(struct ipc_namespace *ns); | 71 | extern void shm_destroy_orphaned(struct ipc_namespace *ns); |
| 85 | #else /* CONFIG_SYSVIPC */ | 72 | #else /* CONFIG_SYSVIPC */ |
| 86 | static inline int register_ipcns_notifier(struct ipc_namespace *ns) | ||
| 87 | { return 0; } | ||
| 88 | static inline int cond_register_ipcns_notifier(struct ipc_namespace *ns) | ||
| 89 | { return 0; } | ||
| 90 | static inline void unregister_ipcns_notifier(struct ipc_namespace *ns) { } | ||
| 91 | static inline int ipcns_notify(unsigned long l) { return 0; } | ||
| 92 | static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {} | 73 | static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {} |
| 93 | #endif /* CONFIG_SYSVIPC */ | 74 | #endif /* CONFIG_SYSVIPC */ |
| 94 | 75 | ||
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 76d2acbfa7c6..838dbfa3c331 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | 37 | ||
| 38 | #include <linux/list.h> | 38 | #include <linux/list.h> |
| 39 | #include <linux/proc_fs.h> | 39 | #include <linux/proc_fs.h> |
| 40 | #include <linux/acpi.h> /* For acpi_handle */ | ||
| 40 | 41 | ||
| 41 | struct module; | 42 | struct module; |
| 42 | struct device; | 43 | struct device; |
| @@ -278,15 +279,18 @@ enum ipmi_addr_src { | |||
| 278 | SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, | 279 | SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, |
| 279 | SI_PCI, SI_DEVICETREE, SI_DEFAULT | 280 | SI_PCI, SI_DEVICETREE, SI_DEFAULT |
| 280 | }; | 281 | }; |
| 282 | const char *ipmi_addr_src_to_str(enum ipmi_addr_src src); | ||
| 281 | 283 | ||
| 282 | union ipmi_smi_info_union { | 284 | union ipmi_smi_info_union { |
| 285 | #ifdef CONFIG_ACPI | ||
| 283 | /* | 286 | /* |
| 284 | * the acpi_info element is defined for the SI_ACPI | 287 | * the acpi_info element is defined for the SI_ACPI |
| 285 | * address type | 288 | * address type |
| 286 | */ | 289 | */ |
| 287 | struct { | 290 | struct { |
| 288 | void *acpi_handle; | 291 | acpi_handle acpi_handle; |
| 289 | } acpi_info; | 292 | } acpi_info; |
| 293 | #endif | ||
| 290 | }; | 294 | }; |
| 291 | 295 | ||
| 292 | struct ipmi_smi_info { | 296 | struct ipmi_smi_info { |
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index bd349240d50e..0b1e569f5ff5 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h | |||
| @@ -98,12 +98,11 @@ struct ipmi_smi_handlers { | |||
| 98 | operation is not allowed to fail. If an error occurs, it | 98 | operation is not allowed to fail. If an error occurs, it |
| 99 | should report back the error in a received message. It may | 99 | should report back the error in a received message. It may |
| 100 | do this in the current call context, since no write locks | 100 | do this in the current call context, since no write locks |
| 101 | are held when this is run. If the priority is > 0, the | 101 | are held when this is run. Message are delivered one at |
| 102 | message will go into a high-priority queue and be sent | 102 | a time by the message handler, a new message will not be |
| 103 | first. Otherwise, it goes into a normal-priority queue. */ | 103 | delivered until the previous message is returned. */ |
| 104 | void (*sender)(void *send_info, | 104 | void (*sender)(void *send_info, |
| 105 | struct ipmi_smi_msg *msg, | 105 | struct ipmi_smi_msg *msg); |
| 106 | int priority); | ||
| 107 | 106 | ||
| 108 | /* Called by the upper layer to request that we try to get | 107 | /* Called by the upper layer to request that we try to get |
| 109 | events from the BMC we are attached to. */ | 108 | events from the BMC we are attached to. */ |
| @@ -212,7 +211,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
| 212 | void *send_info, | 211 | void *send_info, |
| 213 | struct ipmi_device_id *device_id, | 212 | struct ipmi_device_id *device_id, |
| 214 | struct device *dev, | 213 | struct device *dev, |
| 215 | const char *sysfs_name, | ||
| 216 | unsigned char slave_addr); | 214 | unsigned char slave_addr); |
| 217 | 215 | ||
| 218 | /* | 216 | /* |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ff560537dd61..c694e7baa621 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
| @@ -42,6 +42,7 @@ struct ipv6_devconf { | |||
| 42 | __s32 accept_ra_from_local; | 42 | __s32 accept_ra_from_local; |
| 43 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 43 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
| 44 | __s32 optimistic_dad; | 44 | __s32 optimistic_dad; |
| 45 | __s32 use_optimistic; | ||
| 45 | #endif | 46 | #endif |
| 46 | #ifdef CONFIG_IPV6_MROUTE | 47 | #ifdef CONFIG_IPV6_MROUTE |
| 47 | __s32 mc_forwarding; | 48 | __s32 mc_forwarding; |
| @@ -316,14 +317,4 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) | |||
| 316 | #define tcp_twsk_ipv6only(__sk) 0 | 317 | #define tcp_twsk_ipv6only(__sk) 0 |
| 317 | #define inet_v6_ipv6only(__sk) 0 | 318 | #define inet_v6_ipv6only(__sk) 0 |
| 318 | #endif /* IS_ENABLED(CONFIG_IPV6) */ | 319 | #endif /* IS_ENABLED(CONFIG_IPV6) */ |
| 319 | |||
| 320 | #define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \ | ||
| 321 | (((__sk)->sk_portpair == (__ports)) && \ | ||
| 322 | ((__sk)->sk_family == AF_INET6) && \ | ||
| 323 | ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \ | ||
| 324 | ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \ | ||
| 325 | (!(__sk)->sk_bound_dev_if || \ | ||
| 326 | ((__sk)->sk_bound_dev_if == (__dif))) && \ | ||
| 327 | net_eq(sock_net(__sk), (__net))) | ||
| 328 | |||
| 329 | #endif /* _IPV6_H */ | 320 | #endif /* _IPV6_H */ |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 03a4ea37ba86..1e8b0cf30792 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
| @@ -49,6 +49,10 @@ | |||
| 49 | #define GICD_CTLR_ENABLE_G1A (1U << 1) | 49 | #define GICD_CTLR_ENABLE_G1A (1U << 1) |
| 50 | #define GICD_CTLR_ENABLE_G1 (1U << 0) | 50 | #define GICD_CTLR_ENABLE_G1 (1U << 0) |
| 51 | 51 | ||
| 52 | #define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) | ||
| 53 | #define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) | ||
| 54 | #define GICD_TYPER_LPIS (1U << 17) | ||
| 55 | |||
| 52 | #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) | 56 | #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) |
| 53 | #define GICD_IROUTER_SPI_MODE_ANY (1U << 31) | 57 | #define GICD_IROUTER_SPI_MODE_ANY (1U << 31) |
| 54 | 58 | ||
| @@ -76,9 +80,27 @@ | |||
| 76 | #define GICR_MOVALLR 0x0110 | 80 | #define GICR_MOVALLR 0x0110 |
| 77 | #define GICR_PIDR2 GICD_PIDR2 | 81 | #define GICR_PIDR2 GICD_PIDR2 |
| 78 | 82 | ||
| 83 | #define GICR_CTLR_ENABLE_LPIS (1UL << 0) | ||
| 84 | |||
| 85 | #define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) | ||
| 86 | |||
| 79 | #define GICR_WAKER_ProcessorSleep (1U << 1) | 87 | #define GICR_WAKER_ProcessorSleep (1U << 1) |
| 80 | #define GICR_WAKER_ChildrenAsleep (1U << 2) | 88 | #define GICR_WAKER_ChildrenAsleep (1U << 2) |
| 81 | 89 | ||
| 90 | #define GICR_PROPBASER_NonShareable (0U << 10) | ||
| 91 | #define GICR_PROPBASER_InnerShareable (1U << 10) | ||
| 92 | #define GICR_PROPBASER_OuterShareable (2U << 10) | ||
| 93 | #define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10) | ||
| 94 | #define GICR_PROPBASER_nCnB (0U << 7) | ||
| 95 | #define GICR_PROPBASER_nC (1U << 7) | ||
| 96 | #define GICR_PROPBASER_RaWt (2U << 7) | ||
| 97 | #define GICR_PROPBASER_RaWb (3U << 7) | ||
| 98 | #define GICR_PROPBASER_WaWt (4U << 7) | ||
| 99 | #define GICR_PROPBASER_WaWb (5U << 7) | ||
| 100 | #define GICR_PROPBASER_RaWaWt (6U << 7) | ||
| 101 | #define GICR_PROPBASER_RaWaWb (7U << 7) | ||
| 102 | #define GICR_PROPBASER_IDBITS_MASK (0x1f) | ||
| 103 | |||
| 82 | /* | 104 | /* |
| 83 | * Re-Distributor registers, offsets from SGI_base | 105 | * Re-Distributor registers, offsets from SGI_base |
| 84 | */ | 106 | */ |
| @@ -91,9 +113,93 @@ | |||
| 91 | #define GICR_IPRIORITYR0 GICD_IPRIORITYR | 113 | #define GICR_IPRIORITYR0 GICD_IPRIORITYR |
| 92 | #define GICR_ICFGR0 GICD_ICFGR | 114 | #define GICR_ICFGR0 GICD_ICFGR |
| 93 | 115 | ||
| 116 | #define GICR_TYPER_PLPIS (1U << 0) | ||
| 94 | #define GICR_TYPER_VLPIS (1U << 1) | 117 | #define GICR_TYPER_VLPIS (1U << 1) |
| 95 | #define GICR_TYPER_LAST (1U << 4) | 118 | #define GICR_TYPER_LAST (1U << 4) |
| 96 | 119 | ||
| 120 | #define LPI_PROP_GROUP1 (1 << 1) | ||
| 121 | #define LPI_PROP_ENABLED (1 << 0) | ||
| 122 | |||
| 123 | /* | ||
| 124 | * ITS registers, offsets from ITS_base | ||
| 125 | */ | ||
| 126 | #define GITS_CTLR 0x0000 | ||
| 127 | #define GITS_IIDR 0x0004 | ||
| 128 | #define GITS_TYPER 0x0008 | ||
| 129 | #define GITS_CBASER 0x0080 | ||
| 130 | #define GITS_CWRITER 0x0088 | ||
| 131 | #define GITS_CREADR 0x0090 | ||
| 132 | #define GITS_BASER 0x0100 | ||
| 133 | #define GITS_PIDR2 GICR_PIDR2 | ||
| 134 | |||
| 135 | #define GITS_TRANSLATER 0x10040 | ||
| 136 | |||
| 137 | #define GITS_TYPER_PTA (1UL << 19) | ||
| 138 | |||
| 139 | #define GITS_CBASER_VALID (1UL << 63) | ||
| 140 | #define GITS_CBASER_nCnB (0UL << 59) | ||
| 141 | #define GITS_CBASER_nC (1UL << 59) | ||
| 142 | #define GITS_CBASER_RaWt (2UL << 59) | ||
| 143 | #define GITS_CBASER_RaWb (3UL << 59) | ||
| 144 | #define GITS_CBASER_WaWt (4UL << 59) | ||
| 145 | #define GITS_CBASER_WaWb (5UL << 59) | ||
| 146 | #define GITS_CBASER_RaWaWt (6UL << 59) | ||
| 147 | #define GITS_CBASER_RaWaWb (7UL << 59) | ||
| 148 | #define GITS_CBASER_NonShareable (0UL << 10) | ||
| 149 | #define GITS_CBASER_InnerShareable (1UL << 10) | ||
| 150 | #define GITS_CBASER_OuterShareable (2UL << 10) | ||
| 151 | #define GITS_CBASER_SHAREABILITY_MASK (3UL << 10) | ||
| 152 | |||
| 153 | #define GITS_BASER_NR_REGS 8 | ||
| 154 | |||
| 155 | #define GITS_BASER_VALID (1UL << 63) | ||
| 156 | #define GITS_BASER_nCnB (0UL << 59) | ||
| 157 | #define GITS_BASER_nC (1UL << 59) | ||
| 158 | #define GITS_BASER_RaWt (2UL << 59) | ||
| 159 | #define GITS_BASER_RaWb (3UL << 59) | ||
| 160 | #define GITS_BASER_WaWt (4UL << 59) | ||
| 161 | #define GITS_BASER_WaWb (5UL << 59) | ||
| 162 | #define GITS_BASER_RaWaWt (6UL << 59) | ||
| 163 | #define GITS_BASER_RaWaWb (7UL << 59) | ||
| 164 | #define GITS_BASER_TYPE_SHIFT (56) | ||
| 165 | #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) | ||
| 166 | #define GITS_BASER_ENTRY_SIZE_SHIFT (48) | ||
| 167 | #define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1) | ||
| 168 | #define GITS_BASER_NonShareable (0UL << 10) | ||
| 169 | #define GITS_BASER_InnerShareable (1UL << 10) | ||
| 170 | #define GITS_BASER_OuterShareable (2UL << 10) | ||
| 171 | #define GITS_BASER_SHAREABILITY_SHIFT (10) | ||
| 172 | #define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT) | ||
| 173 | #define GITS_BASER_PAGE_SIZE_SHIFT (8) | ||
| 174 | #define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT) | ||
| 175 | #define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) | ||
| 176 | #define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) | ||
| 177 | #define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) | ||
| 178 | |||
| 179 | #define GITS_BASER_TYPE_NONE 0 | ||
| 180 | #define GITS_BASER_TYPE_DEVICE 1 | ||
| 181 | #define GITS_BASER_TYPE_VCPU 2 | ||
| 182 | #define GITS_BASER_TYPE_CPU 3 | ||
| 183 | #define GITS_BASER_TYPE_COLLECTION 4 | ||
| 184 | #define GITS_BASER_TYPE_RESERVED5 5 | ||
| 185 | #define GITS_BASER_TYPE_RESERVED6 6 | ||
| 186 | #define GITS_BASER_TYPE_RESERVED7 7 | ||
| 187 | |||
| 188 | /* | ||
| 189 | * ITS commands | ||
| 190 | */ | ||
| 191 | #define GITS_CMD_MAPD 0x08 | ||
| 192 | #define GITS_CMD_MAPC 0x09 | ||
| 193 | #define GITS_CMD_MAPVI 0x0a | ||
| 194 | #define GITS_CMD_MOVI 0x01 | ||
| 195 | #define GITS_CMD_DISCARD 0x0f | ||
| 196 | #define GITS_CMD_INV 0x0c | ||
| 197 | #define GITS_CMD_MOVALL 0x0e | ||
| 198 | #define GITS_CMD_INVALL 0x0d | ||
| 199 | #define GITS_CMD_INT 0x03 | ||
| 200 | #define GITS_CMD_CLEAR 0x04 | ||
| 201 | #define GITS_CMD_SYNC 0x05 | ||
| 202 | |||
| 97 | /* | 203 | /* |
| 98 | * CPU interface registers | 204 | * CPU interface registers |
| 99 | */ | 205 | */ |
| @@ -189,12 +295,34 @@ | |||
| 189 | 295 | ||
| 190 | #include <linux/stringify.h> | 296 | #include <linux/stringify.h> |
| 191 | 297 | ||
| 298 | /* | ||
| 299 | * We need a value to serve as a irq-type for LPIs. Choose one that will | ||
| 300 | * hopefully pique the interest of the reviewer. | ||
| 301 | */ | ||
| 302 | #define GIC_IRQ_TYPE_LPI 0xa110c8ed | ||
| 303 | |||
| 304 | struct rdists { | ||
| 305 | struct { | ||
| 306 | void __iomem *rd_base; | ||
| 307 | struct page *pend_page; | ||
| 308 | phys_addr_t phys_base; | ||
| 309 | } __percpu *rdist; | ||
| 310 | struct page *prop_page; | ||
| 311 | int id_bits; | ||
| 312 | u64 flags; | ||
| 313 | }; | ||
| 314 | |||
| 192 | static inline void gic_write_eoir(u64 irq) | 315 | static inline void gic_write_eoir(u64 irq) |
| 193 | { | 316 | { |
| 194 | asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq)); | 317 | asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq)); |
| 195 | isb(); | 318 | isb(); |
| 196 | } | 319 | } |
| 197 | 320 | ||
| 321 | struct irq_domain; | ||
| 322 | int its_cpu_init(void); | ||
| 323 | int its_init(struct device_node *node, struct rdists *rdists, | ||
| 324 | struct irq_domain *domain); | ||
| 325 | |||
| 198 | #endif | 326 | #endif |
| 199 | 327 | ||
| 200 | #endif | 328 | #endif |
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 13eed92c7d24..71d706d5f169 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h | |||
| @@ -91,6 +91,8 @@ | |||
| 91 | 91 | ||
| 92 | #ifndef __ASSEMBLY__ | 92 | #ifndef __ASSEMBLY__ |
| 93 | 93 | ||
| 94 | #include <linux/irqdomain.h> | ||
| 95 | |||
| 94 | struct device_node; | 96 | struct device_node; |
| 95 | 97 | ||
| 96 | extern struct irq_chip gic_arch_extn; | 98 | extern struct irq_chip gic_arch_extn; |
| @@ -106,6 +108,8 @@ static inline void gic_init(unsigned int nr, int start, | |||
| 106 | gic_init_bases(nr, start, dist, cpu, 0, NULL); | 108 | gic_init_bases(nr, start, dist, cpu, 0, NULL); |
| 107 | } | 109 | } |
| 108 | 110 | ||
| 111 | int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); | ||
| 112 | |||
| 109 | void gic_send_sgi(unsigned int cpu_id, unsigned int irq); | 113 | void gic_send_sgi(unsigned int cpu_id, unsigned int irq); |
| 110 | int gic_get_cpu_id(unsigned int cpu); | 114 | int gic_get_cpu_id(unsigned int cpu); |
| 111 | void gic_migrate_target(unsigned int new_cpu_id); | 115 | void gic_migrate_target(unsigned int new_cpu_id); |
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h new file mode 100644 index 000000000000..420f77b34d02 --- /dev/null +++ b/include/linux/irqchip/mips-gic.h | |||
| @@ -0,0 +1,249 @@ | |||
| 1 | /* | ||
| 2 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 3 | * License. See the file "COPYING" in the main directory of this archive | ||
| 4 | * for more details. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2000, 07 MIPS Technologies, Inc. | ||
| 7 | */ | ||
| 8 | #ifndef __LINUX_IRQCHIP_MIPS_GIC_H | ||
| 9 | #define __LINUX_IRQCHIP_MIPS_GIC_H | ||
| 10 | |||
| 11 | #include <linux/clocksource.h> | ||
| 12 | |||
| 13 | #define GIC_MAX_INTRS 256 | ||
| 14 | |||
| 15 | /* Constants */ | ||
| 16 | #define GIC_POL_POS 1 | ||
| 17 | #define GIC_POL_NEG 0 | ||
| 18 | #define GIC_TRIG_EDGE 1 | ||
| 19 | #define GIC_TRIG_LEVEL 0 | ||
| 20 | #define GIC_TRIG_DUAL_ENABLE 1 | ||
| 21 | #define GIC_TRIG_DUAL_DISABLE 0 | ||
| 22 | |||
| 23 | #define MSK(n) ((1 << (n)) - 1) | ||
| 24 | |||
| 25 | /* Accessors */ | ||
| 26 | #define GIC_REG(segment, offset) (segment##_##SECTION_OFS + offset##_##OFS) | ||
| 27 | |||
| 28 | /* GIC Address Space */ | ||
| 29 | #define SHARED_SECTION_OFS 0x0000 | ||
| 30 | #define SHARED_SECTION_SIZE 0x8000 | ||
| 31 | #define VPE_LOCAL_SECTION_OFS 0x8000 | ||
| 32 | #define VPE_LOCAL_SECTION_SIZE 0x4000 | ||
| 33 | #define VPE_OTHER_SECTION_OFS 0xc000 | ||
| 34 | #define VPE_OTHER_SECTION_SIZE 0x4000 | ||
| 35 | #define USM_VISIBLE_SECTION_OFS 0x10000 | ||
| 36 | #define USM_VISIBLE_SECTION_SIZE 0x10000 | ||
| 37 | |||
| 38 | /* Register Map for Shared Section */ | ||
| 39 | |||
| 40 | #define GIC_SH_CONFIG_OFS 0x0000 | ||
| 41 | |||
| 42 | /* Shared Global Counter */ | ||
| 43 | #define GIC_SH_COUNTER_31_00_OFS 0x0010 | ||
| 44 | #define GIC_SH_COUNTER_63_32_OFS 0x0014 | ||
| 45 | #define GIC_SH_REVISIONID_OFS 0x0020 | ||
| 46 | |||
| 47 | /* Convert an interrupt number to a byte offset/bit for multi-word registers */ | ||
| 48 | #define GIC_INTR_OFS(intr) (((intr) / 32) * 4) | ||
| 49 | #define GIC_INTR_BIT(intr) ((intr) % 32) | ||
| 50 | |||
| 51 | /* Polarity : Reset Value is always 0 */ | ||
| 52 | #define GIC_SH_SET_POLARITY_OFS 0x0100 | ||
| 53 | |||
| 54 | /* Triggering : Reset Value is always 0 */ | ||
| 55 | #define GIC_SH_SET_TRIGGER_OFS 0x0180 | ||
| 56 | |||
| 57 | /* Dual edge triggering : Reset Value is always 0 */ | ||
| 58 | #define GIC_SH_SET_DUAL_OFS 0x0200 | ||
| 59 | |||
| 60 | /* Set/Clear corresponding bit in Edge Detect Register */ | ||
| 61 | #define GIC_SH_WEDGE_OFS 0x0280 | ||
| 62 | |||
| 63 | /* Mask manipulation */ | ||
| 64 | #define GIC_SH_RMASK_OFS 0x0300 | ||
| 65 | #define GIC_SH_SMASK_OFS 0x0380 | ||
| 66 | |||
| 67 | /* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */ | ||
| 68 | #define GIC_SH_MASK_OFS 0x0400 | ||
| 69 | |||
| 70 | /* Pending Global Interrupts (RO) */ | ||
| 71 | #define GIC_SH_PEND_OFS 0x0480 | ||
| 72 | |||
| 73 | /* Maps Interrupt X to a Pin */ | ||
| 74 | #define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500 | ||
| 75 | #define GIC_SH_MAP_TO_PIN(intr) (4 * (intr)) | ||
| 76 | |||
| 77 | /* Maps Interrupt X to a VPE */ | ||
| 78 | #define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000 | ||
| 79 | #define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \ | ||
| 80 | ((32 * (intr)) + (((vpe) / 32) * 4)) | ||
| 81 | #define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32)) | ||
| 82 | |||
| 83 | /* Register Map for Local Section */ | ||
| 84 | #define GIC_VPE_CTL_OFS 0x0000 | ||
| 85 | #define GIC_VPE_PEND_OFS 0x0004 | ||
| 86 | #define GIC_VPE_MASK_OFS 0x0008 | ||
| 87 | #define GIC_VPE_RMASK_OFS 0x000c | ||
| 88 | #define GIC_VPE_SMASK_OFS 0x0010 | ||
| 89 | #define GIC_VPE_WD_MAP_OFS 0x0040 | ||
| 90 | #define GIC_VPE_COMPARE_MAP_OFS 0x0044 | ||
| 91 | #define GIC_VPE_TIMER_MAP_OFS 0x0048 | ||
| 92 | #define GIC_VPE_FDC_MAP_OFS 0x004c | ||
| 93 | #define GIC_VPE_PERFCTR_MAP_OFS 0x0050 | ||
| 94 | #define GIC_VPE_SWINT0_MAP_OFS 0x0054 | ||
| 95 | #define GIC_VPE_SWINT1_MAP_OFS 0x0058 | ||
| 96 | #define GIC_VPE_OTHER_ADDR_OFS 0x0080 | ||
| 97 | #define GIC_VPE_WD_CONFIG0_OFS 0x0090 | ||
| 98 | #define GIC_VPE_WD_COUNT0_OFS 0x0094 | ||
| 99 | #define GIC_VPE_WD_INITIAL0_OFS 0x0098 | ||
| 100 | #define GIC_VPE_COMPARE_LO_OFS 0x00a0 | ||
| 101 | #define GIC_VPE_COMPARE_HI_OFS 0x00a4 | ||
| 102 | |||
| 103 | #define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100 | ||
| 104 | #define GIC_VPE_EIC_SS(intr) (4 * (intr)) | ||
| 105 | |||
| 106 | #define GIC_VPE_EIC_VEC_BASE_OFS 0x0800 | ||
| 107 | #define GIC_VPE_EIC_VEC(intr) (4 * (intr)) | ||
| 108 | |||
| 109 | #define GIC_VPE_TENABLE_NMI_OFS 0x1000 | ||
| 110 | #define GIC_VPE_TENABLE_YQ_OFS 0x1004 | ||
| 111 | #define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080 | ||
| 112 | #define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084 | ||
| 113 | |||
| 114 | /* User Mode Visible Section Register Map */ | ||
| 115 | #define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000 | ||
| 116 | #define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004 | ||
| 117 | |||
| 118 | /* Masks */ | ||
| 119 | #define GIC_SH_CONFIG_COUNTSTOP_SHF 28 | ||
| 120 | #define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF) | ||
| 121 | |||
| 122 | #define GIC_SH_CONFIG_COUNTBITS_SHF 24 | ||
| 123 | #define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF) | ||
| 124 | |||
| 125 | #define GIC_SH_CONFIG_NUMINTRS_SHF 16 | ||
| 126 | #define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF) | ||
| 127 | |||
| 128 | #define GIC_SH_CONFIG_NUMVPES_SHF 0 | ||
| 129 | #define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF) | ||
| 130 | |||
| 131 | #define GIC_SH_WEDGE_SET(intr) ((intr) | (0x1 << 31)) | ||
| 132 | #define GIC_SH_WEDGE_CLR(intr) ((intr) & ~(0x1 << 31)) | ||
| 133 | |||
| 134 | #define GIC_MAP_TO_PIN_SHF 31 | ||
| 135 | #define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF) | ||
| 136 | #define GIC_MAP_TO_NMI_SHF 30 | ||
| 137 | #define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF) | ||
| 138 | #define GIC_MAP_TO_YQ_SHF 29 | ||
| 139 | #define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF) | ||
| 140 | #define GIC_MAP_SHF 0 | ||
| 141 | #define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF) | ||
| 142 | |||
| 143 | /* GIC_VPE_CTL Masks */ | ||
| 144 | #define GIC_VPE_CTL_FDC_RTBL_SHF 4 | ||
| 145 | #define GIC_VPE_CTL_FDC_RTBL_MSK (MSK(1) << GIC_VPE_CTL_FDC_RTBL_SHF) | ||
| 146 | #define GIC_VPE_CTL_SWINT_RTBL_SHF 3 | ||
| 147 | #define GIC_VPE_CTL_SWINT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_SWINT_RTBL_SHF) | ||
| 148 | #define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2 | ||
| 149 | #define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF) | ||
| 150 | #define GIC_VPE_CTL_TIMER_RTBL_SHF 1 | ||
| 151 | #define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF) | ||
| 152 | #define GIC_VPE_CTL_EIC_MODE_SHF 0 | ||
| 153 | #define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF) | ||
| 154 | |||
| 155 | /* GIC_VPE_PEND Masks */ | ||
| 156 | #define GIC_VPE_PEND_WD_SHF 0 | ||
| 157 | #define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF) | ||
| 158 | #define GIC_VPE_PEND_CMP_SHF 1 | ||
| 159 | #define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF) | ||
| 160 | #define GIC_VPE_PEND_TIMER_SHF 2 | ||
| 161 | #define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF) | ||
| 162 | #define GIC_VPE_PEND_PERFCOUNT_SHF 3 | ||
| 163 | #define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF) | ||
| 164 | #define GIC_VPE_PEND_SWINT0_SHF 4 | ||
| 165 | #define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF) | ||
| 166 | #define GIC_VPE_PEND_SWINT1_SHF 5 | ||
| 167 | #define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF) | ||
| 168 | |||
| 169 | /* GIC_VPE_RMASK Masks */ | ||
| 170 | #define GIC_VPE_RMASK_WD_SHF 0 | ||
| 171 | #define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF) | ||
| 172 | #define GIC_VPE_RMASK_CMP_SHF 1 | ||
| 173 | #define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF) | ||
| 174 | #define GIC_VPE_RMASK_TIMER_SHF 2 | ||
| 175 | #define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF) | ||
| 176 | #define GIC_VPE_RMASK_PERFCNT_SHF 3 | ||
| 177 | #define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF) | ||
| 178 | #define GIC_VPE_RMASK_SWINT0_SHF 4 | ||
| 179 | #define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF) | ||
| 180 | #define GIC_VPE_RMASK_SWINT1_SHF 5 | ||
| 181 | #define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF) | ||
| 182 | |||
| 183 | /* GIC_VPE_SMASK Masks */ | ||
| 184 | #define GIC_VPE_SMASK_WD_SHF 0 | ||
| 185 | #define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF) | ||
| 186 | #define GIC_VPE_SMASK_CMP_SHF 1 | ||
| 187 | #define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF) | ||
| 188 | #define GIC_VPE_SMASK_TIMER_SHF 2 | ||
| 189 | #define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF) | ||
| 190 | #define GIC_VPE_SMASK_PERFCNT_SHF 3 | ||
| 191 | #define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF) | ||
| 192 | #define GIC_VPE_SMASK_SWINT0_SHF 4 | ||
| 193 | #define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF) | ||
| 194 | #define GIC_VPE_SMASK_SWINT1_SHF 5 | ||
| 195 | #define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF) | ||
| 196 | |||
| 197 | /* GIC nomenclature for Core Interrupt Pins. */ | ||
| 198 | #define GIC_CPU_INT0 0 /* Core Interrupt 2 */ | ||
| 199 | #define GIC_CPU_INT1 1 /* . */ | ||
| 200 | #define GIC_CPU_INT2 2 /* . */ | ||
| 201 | #define GIC_CPU_INT3 3 /* . */ | ||
| 202 | #define GIC_CPU_INT4 4 /* . */ | ||
| 203 | #define GIC_CPU_INT5 5 /* Core Interrupt 7 */ | ||
| 204 | |||
| 205 | /* Add 2 to convert GIC CPU pin to core interrupt */ | ||
| 206 | #define GIC_CPU_PIN_OFFSET 2 | ||
| 207 | |||
| 208 | /* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */ | ||
| 209 | #define GIC_CPU_TO_VEC_OFFSET 2 | ||
| 210 | |||
| 211 | /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ | ||
| 212 | #define GIC_PIN_TO_VEC_OFFSET 1 | ||
| 213 | |||
| 214 | /* Local GIC interrupts. */ | ||
| 215 | #define GIC_LOCAL_INT_WD 0 /* GIC watchdog */ | ||
| 216 | #define GIC_LOCAL_INT_COMPARE 1 /* GIC count and compare timer */ | ||
| 217 | #define GIC_LOCAL_INT_TIMER 2 /* CPU timer interrupt */ | ||
| 218 | #define GIC_LOCAL_INT_PERFCTR 3 /* CPU performance counter */ | ||
| 219 | #define GIC_LOCAL_INT_SWINT0 4 /* CPU software interrupt 0 */ | ||
| 220 | #define GIC_LOCAL_INT_SWINT1 5 /* CPU software interrupt 1 */ | ||
| 221 | #define GIC_LOCAL_INT_FDC 6 /* CPU fast debug channel */ | ||
| 222 | #define GIC_NUM_LOCAL_INTRS 7 | ||
| 223 | |||
| 224 | /* Convert between local/shared IRQ number and GIC HW IRQ number. */ | ||
| 225 | #define GIC_LOCAL_HWIRQ_BASE 0 | ||
| 226 | #define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x)) | ||
| 227 | #define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE) | ||
| 228 | #define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS | ||
| 229 | #define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) | ||
| 230 | #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) | ||
| 231 | |||
| 232 | extern unsigned int gic_present; | ||
| 233 | |||
| 234 | extern void gic_init(unsigned long gic_base_addr, | ||
| 235 | unsigned long gic_addrspace_size, unsigned int cpu_vec, | ||
| 236 | unsigned int irqbase); | ||
| 237 | extern void gic_clocksource_init(unsigned int); | ||
| 238 | extern cycle_t gic_read_count(void); | ||
| 239 | extern unsigned int gic_get_count_width(void); | ||
| 240 | extern cycle_t gic_read_compare(void); | ||
| 241 | extern void gic_write_compare(cycle_t cnt); | ||
| 242 | extern void gic_write_cpu_compare(cycle_t cnt, int cpu); | ||
| 243 | extern void gic_send_ipi(unsigned int intr); | ||
| 244 | extern unsigned int plat_ipi_call_int_xlate(unsigned int); | ||
| 245 | extern unsigned int plat_ipi_resched_int_xlate(unsigned int); | ||
| 246 | extern unsigned int gic_get_timer_pending(void); | ||
| 247 | extern int gic_get_c0_compare_int(void); | ||
| 248 | extern int gic_get_c0_perfcount_int(void); | ||
| 249 | #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */ | ||
diff --git a/include/linux/kcmp.h b/include/linux/kcmp.h deleted file mode 100644 index 2dcd1b3aafc8..000000000000 --- a/include/linux/kcmp.h +++ /dev/null | |||
| @@ -1,17 +0,0 @@ | |||
| 1 | #ifndef _LINUX_KCMP_H | ||
| 2 | #define _LINUX_KCMP_H | ||
| 3 | |||
| 4 | /* Comparison type */ | ||
| 5 | enum kcmp_type { | ||
| 6 | KCMP_FILE, | ||
| 7 | KCMP_VM, | ||
| 8 | KCMP_FILES, | ||
| 9 | KCMP_FS, | ||
| 10 | KCMP_SIGHAND, | ||
| 11 | KCMP_IO, | ||
| 12 | KCMP_SYSVSEM, | ||
| 13 | |||
| 14 | KCMP_TYPES, | ||
| 15 | }; | ||
| 16 | |||
| 17 | #endif /* _LINUX_KCMP_H */ | ||
diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h index 866caaa9e2bb..c2ce155d83cc 100644 --- a/include/linux/kern_levels.h +++ b/include/linux/kern_levels.h | |||
| @@ -22,4 +22,17 @@ | |||
| 22 | */ | 22 | */ |
| 23 | #define KERN_CONT "" | 23 | #define KERN_CONT "" |
| 24 | 24 | ||
| 25 | /* integer equivalents of KERN_<LEVEL> */ | ||
| 26 | #define LOGLEVEL_SCHED -2 /* Deferred messages from sched code | ||
| 27 | * are set to this special level */ | ||
| 28 | #define LOGLEVEL_DEFAULT -1 /* default (or last) loglevel */ | ||
| 29 | #define LOGLEVEL_EMERG 0 /* system is unusable */ | ||
| 30 | #define LOGLEVEL_ALERT 1 /* action must be taken immediately */ | ||
| 31 | #define LOGLEVEL_CRIT 2 /* critical conditions */ | ||
| 32 | #define LOGLEVEL_ERR 3 /* error conditions */ | ||
| 33 | #define LOGLEVEL_WARNING 4 /* warning conditions */ | ||
| 34 | #define LOGLEVEL_NOTICE 5 /* normal but significant condition */ | ||
| 35 | #define LOGLEVEL_INFO 6 /* informational */ | ||
| 36 | #define LOGLEVEL_DEBUG 7 /* debug-level messages */ | ||
| 37 | |||
| 25 | #endif | 38 | #endif |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 446d76a87ba1..5449d2f4a1ef 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -416,9 +416,6 @@ extern int __kernel_text_address(unsigned long addr); | |||
| 416 | extern int kernel_text_address(unsigned long addr); | 416 | extern int kernel_text_address(unsigned long addr); |
| 417 | extern int func_ptr_is_kernel_text(void *ptr); | 417 | extern int func_ptr_is_kernel_text(void *ptr); |
| 418 | 418 | ||
| 419 | struct pid; | ||
| 420 | extern struct pid *session_of_pgrp(struct pid *pgrp); | ||
| 421 | |||
| 422 | unsigned long int_sqrt(unsigned long); | 419 | unsigned long int_sqrt(unsigned long); |
| 423 | 420 | ||
| 424 | extern void bust_spinlocks(int yes); | 421 | extern void bust_spinlocks(int yes); |
| @@ -427,6 +424,7 @@ extern int panic_timeout; | |||
| 427 | extern int panic_on_oops; | 424 | extern int panic_on_oops; |
| 428 | extern int panic_on_unrecovered_nmi; | 425 | extern int panic_on_unrecovered_nmi; |
| 429 | extern int panic_on_io_nmi; | 426 | extern int panic_on_io_nmi; |
| 427 | extern int panic_on_warn; | ||
| 430 | extern int sysctl_panic_on_stackoverflow; | 428 | extern int sysctl_panic_on_stackoverflow; |
| 431 | /* | 429 | /* |
| 432 | * Only to be used by arch init code. If the user over-wrote the default | 430 | * Only to be used by arch init code. If the user over-wrote the default |
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 30faf797c2c3..d4e01b358341 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h | |||
| @@ -179,6 +179,7 @@ struct kernfs_open_file { | |||
| 179 | struct mutex mutex; | 179 | struct mutex mutex; |
| 180 | int event; | 180 | int event; |
| 181 | struct list_head list; | 181 | struct list_head list; |
| 182 | char *prealloc_buf; | ||
| 182 | 183 | ||
| 183 | size_t atomic_write_len; | 184 | size_t atomic_write_len; |
| 184 | bool mmapped; | 185 | bool mmapped; |
| @@ -214,6 +215,13 @@ struct kernfs_ops { | |||
| 214 | * larger ones are rejected with -E2BIG. | 215 | * larger ones are rejected with -E2BIG. |
| 215 | */ | 216 | */ |
| 216 | size_t atomic_write_len; | 217 | size_t atomic_write_len; |
| 218 | /* | ||
| 219 | * "prealloc" causes a buffer to be allocated at open for | ||
| 220 | * all read/write requests. As ->seq_show uses seq_read() | ||
| 221 | * which does its own allocation, it is incompatible with | ||
| 222 | * ->prealloc. Provide ->read and ->write with ->prealloc. | ||
| 223 | */ | ||
| 224 | bool prealloc; | ||
| 217 | ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes, | 225 | ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes, |
| 218 | loff_t off); | 226 | loff_t off); |
| 219 | 227 | ||
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 057e95971014..e705467ddb47 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h | |||
| @@ -21,6 +21,8 @@ | |||
| 21 | #ifndef __KMEMLEAK_H | 21 | #ifndef __KMEMLEAK_H |
| 22 | #define __KMEMLEAK_H | 22 | #define __KMEMLEAK_H |
| 23 | 23 | ||
| 24 | #include <linux/slab.h> | ||
| 25 | |||
| 24 | #ifdef CONFIG_DEBUG_KMEMLEAK | 26 | #ifdef CONFIG_DEBUG_KMEMLEAK |
| 25 | 27 | ||
| 26 | extern void kmemleak_init(void) __ref; | 28 | extern void kmemleak_init(void) __ref; |
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index f7296e57d614..5297f9fa0ef2 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
| @@ -335,6 +335,7 @@ extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, | |||
| 335 | extern int arch_prepare_kprobe_ftrace(struct kprobe *p); | 335 | extern int arch_prepare_kprobe_ftrace(struct kprobe *p); |
| 336 | #endif | 336 | #endif |
| 337 | 337 | ||
| 338 | int arch_check_ftrace_location(struct kprobe *p); | ||
| 338 | 339 | ||
| 339 | /* Get the kprobe at this addr (if any) - called with preemption disabled */ | 340 | /* Get the kprobe at this addr (if any) - called with preemption disabled */ |
| 340 | struct kprobe *get_kprobe(void *addr); | 341 | struct kprobe *get_kprobe(void *addr); |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a6059bdf7b03..26f106022c88 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | * include/linux/kvm_h. | 43 | * include/linux/kvm_h. |
| 44 | */ | 44 | */ |
| 45 | #define KVM_MEMSLOT_INVALID (1UL << 16) | 45 | #define KVM_MEMSLOT_INVALID (1UL << 16) |
| 46 | #define KVM_MEMSLOT_INCOHERENT (1UL << 17) | ||
| 46 | 47 | ||
| 47 | /* Two fragments for cross MMIO pages. */ | 48 | /* Two fragments for cross MMIO pages. */ |
| 48 | #define KVM_MAX_MMIO_FRAGMENTS 2 | 49 | #define KVM_MAX_MMIO_FRAGMENTS 2 |
| @@ -353,6 +354,8 @@ struct kvm_memslots { | |||
| 353 | struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; | 354 | struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; |
| 354 | /* The mapping table from slot id to the index in memslots[]. */ | 355 | /* The mapping table from slot id to the index in memslots[]. */ |
| 355 | short id_to_index[KVM_MEM_SLOTS_NUM]; | 356 | short id_to_index[KVM_MEM_SLOTS_NUM]; |
| 357 | atomic_t lru_slot; | ||
| 358 | int used_slots; | ||
| 356 | }; | 359 | }; |
| 357 | 360 | ||
| 358 | struct kvm { | 361 | struct kvm { |
| @@ -395,7 +398,6 @@ struct kvm { | |||
| 395 | * Update side is protected by irq_lock. | 398 | * Update side is protected by irq_lock. |
| 396 | */ | 399 | */ |
| 397 | struct kvm_irq_routing_table __rcu *irq_routing; | 400 | struct kvm_irq_routing_table __rcu *irq_routing; |
| 398 | struct hlist_head mask_notifier_list; | ||
| 399 | #endif | 401 | #endif |
| 400 | #ifdef CONFIG_HAVE_KVM_IRQFD | 402 | #ifdef CONFIG_HAVE_KVM_IRQFD |
| 401 | struct hlist_head irq_ack_notifier_list; | 403 | struct hlist_head irq_ack_notifier_list; |
| @@ -447,6 +449,14 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); | |||
| 447 | int __must_check vcpu_load(struct kvm_vcpu *vcpu); | 449 | int __must_check vcpu_load(struct kvm_vcpu *vcpu); |
| 448 | void vcpu_put(struct kvm_vcpu *vcpu); | 450 | void vcpu_put(struct kvm_vcpu *vcpu); |
| 449 | 451 | ||
| 452 | #ifdef __KVM_HAVE_IOAPIC | ||
| 453 | void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); | ||
| 454 | #else | ||
| 455 | static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) | ||
| 456 | { | ||
| 457 | } | ||
| 458 | #endif | ||
| 459 | |||
| 450 | #ifdef CONFIG_HAVE_KVM_IRQFD | 460 | #ifdef CONFIG_HAVE_KVM_IRQFD |
| 451 | int kvm_irqfd_init(void); | 461 | int kvm_irqfd_init(void); |
| 452 | void kvm_irqfd_exit(void); | 462 | void kvm_irqfd_exit(void); |
| @@ -711,44 +721,6 @@ struct kvm_irq_ack_notifier { | |||
| 711 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); | 721 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); |
| 712 | }; | 722 | }; |
| 713 | 723 | ||
| 714 | struct kvm_assigned_dev_kernel { | ||
| 715 | struct kvm_irq_ack_notifier ack_notifier; | ||
| 716 | struct list_head list; | ||
| 717 | int assigned_dev_id; | ||
| 718 | int host_segnr; | ||
| 719 | int host_busnr; | ||
| 720 | int host_devfn; | ||
| 721 | unsigned int entries_nr; | ||
| 722 | int host_irq; | ||
| 723 | bool host_irq_disabled; | ||
| 724 | bool pci_2_3; | ||
| 725 | struct msix_entry *host_msix_entries; | ||
| 726 | int guest_irq; | ||
| 727 | struct msix_entry *guest_msix_entries; | ||
| 728 | unsigned long irq_requested_type; | ||
| 729 | int irq_source_id; | ||
| 730 | int flags; | ||
| 731 | struct pci_dev *dev; | ||
| 732 | struct kvm *kvm; | ||
| 733 | spinlock_t intx_lock; | ||
| 734 | spinlock_t intx_mask_lock; | ||
| 735 | char irq_name[32]; | ||
| 736 | struct pci_saved_state *pci_saved_state; | ||
| 737 | }; | ||
| 738 | |||
| 739 | struct kvm_irq_mask_notifier { | ||
| 740 | void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); | ||
| 741 | int irq; | ||
| 742 | struct hlist_node link; | ||
| 743 | }; | ||
| 744 | |||
| 745 | void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, | ||
| 746 | struct kvm_irq_mask_notifier *kimn); | ||
| 747 | void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, | ||
| 748 | struct kvm_irq_mask_notifier *kimn); | ||
| 749 | void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, | ||
| 750 | bool mask); | ||
| 751 | |||
| 752 | int kvm_irq_map_gsi(struct kvm *kvm, | 724 | int kvm_irq_map_gsi(struct kvm *kvm, |
| 753 | struct kvm_kernel_irq_routing_entry *entries, int gsi); | 725 | struct kvm_kernel_irq_routing_entry *entries, int gsi); |
| 754 | int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); | 726 | int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); |
| @@ -770,12 +742,6 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | |||
| 770 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT | 742 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT |
| 771 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); | 743 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); |
| 772 | void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); | 744 | void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); |
| 773 | int kvm_iommu_map_guest(struct kvm *kvm); | ||
| 774 | int kvm_iommu_unmap_guest(struct kvm *kvm); | ||
| 775 | int kvm_assign_device(struct kvm *kvm, | ||
| 776 | struct kvm_assigned_dev_kernel *assigned_dev); | ||
| 777 | int kvm_deassign_device(struct kvm *kvm, | ||
| 778 | struct kvm_assigned_dev_kernel *assigned_dev); | ||
| 779 | #else | 745 | #else |
| 780 | static inline int kvm_iommu_map_pages(struct kvm *kvm, | 746 | static inline int kvm_iommu_map_pages(struct kvm *kvm, |
| 781 | struct kvm_memory_slot *slot) | 747 | struct kvm_memory_slot *slot) |
| @@ -787,11 +753,6 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm, | |||
| 787 | struct kvm_memory_slot *slot) | 753 | struct kvm_memory_slot *slot) |
| 788 | { | 754 | { |
| 789 | } | 755 | } |
| 790 | |||
| 791 | static inline int kvm_iommu_unmap_guest(struct kvm *kvm) | ||
| 792 | { | ||
| 793 | return 0; | ||
| 794 | } | ||
| 795 | #endif | 756 | #endif |
| 796 | 757 | ||
| 797 | static inline void kvm_guest_enter(void) | 758 | static inline void kvm_guest_enter(void) |
| @@ -832,12 +793,28 @@ static inline void kvm_guest_exit(void) | |||
| 832 | static inline struct kvm_memory_slot * | 793 | static inline struct kvm_memory_slot * |
| 833 | search_memslots(struct kvm_memslots *slots, gfn_t gfn) | 794 | search_memslots(struct kvm_memslots *slots, gfn_t gfn) |
| 834 | { | 795 | { |
| 835 | struct kvm_memory_slot *memslot; | 796 | int start = 0, end = slots->used_slots; |
| 797 | int slot = atomic_read(&slots->lru_slot); | ||
| 798 | struct kvm_memory_slot *memslots = slots->memslots; | ||
| 799 | |||
| 800 | if (gfn >= memslots[slot].base_gfn && | ||
| 801 | gfn < memslots[slot].base_gfn + memslots[slot].npages) | ||
| 802 | return &memslots[slot]; | ||
| 836 | 803 | ||
| 837 | kvm_for_each_memslot(memslot, slots) | 804 | while (start < end) { |
| 838 | if (gfn >= memslot->base_gfn && | 805 | slot = start + (end - start) / 2; |
| 839 | gfn < memslot->base_gfn + memslot->npages) | 806 | |
| 840 | return memslot; | 807 | if (gfn >= memslots[slot].base_gfn) |
| 808 | end = slot; | ||
| 809 | else | ||
| 810 | start = slot + 1; | ||
| 811 | } | ||
| 812 | |||
| 813 | if (gfn >= memslots[start].base_gfn && | ||
| 814 | gfn < memslots[start].base_gfn + memslots[start].npages) { | ||
| 815 | atomic_set(&slots->lru_slot, start); | ||
| 816 | return &memslots[start]; | ||
| 817 | } | ||
| 841 | 818 | ||
| 842 | return NULL; | 819 | return NULL; |
| 843 | } | 820 | } |
| @@ -1011,25 +988,6 @@ static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } | |||
| 1011 | 988 | ||
| 1012 | #endif | 989 | #endif |
| 1013 | 990 | ||
| 1014 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT | ||
| 1015 | |||
| 1016 | long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | ||
| 1017 | unsigned long arg); | ||
| 1018 | |||
| 1019 | void kvm_free_all_assigned_devices(struct kvm *kvm); | ||
| 1020 | |||
| 1021 | #else | ||
| 1022 | |||
| 1023 | static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | ||
| 1024 | unsigned long arg) | ||
| 1025 | { | ||
| 1026 | return -ENOTTY; | ||
| 1027 | } | ||
| 1028 | |||
| 1029 | static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {} | ||
| 1030 | |||
| 1031 | #endif | ||
| 1032 | |||
| 1033 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) | 991 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) |
| 1034 | { | 992 | { |
| 1035 | set_bit(req, &vcpu->requests); | 993 | set_bit(req, &vcpu->requests); |
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index b606bb689a3e..931da7e917cf 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h | |||
| @@ -54,33 +54,6 @@ typedef u64 hfn_t; | |||
| 54 | 54 | ||
| 55 | typedef hfn_t pfn_t; | 55 | typedef hfn_t pfn_t; |
| 56 | 56 | ||
| 57 | union kvm_ioapic_redirect_entry { | ||
| 58 | u64 bits; | ||
| 59 | struct { | ||
| 60 | u8 vector; | ||
| 61 | u8 delivery_mode:3; | ||
| 62 | u8 dest_mode:1; | ||
| 63 | u8 delivery_status:1; | ||
| 64 | u8 polarity:1; | ||
| 65 | u8 remote_irr:1; | ||
| 66 | u8 trig_mode:1; | ||
| 67 | u8 mask:1; | ||
| 68 | u8 reserve:7; | ||
| 69 | u8 reserved[4]; | ||
| 70 | u8 dest_id; | ||
| 71 | } fields; | ||
| 72 | }; | ||
| 73 | |||
| 74 | struct kvm_lapic_irq { | ||
| 75 | u32 vector; | ||
| 76 | u32 delivery_mode; | ||
| 77 | u32 dest_mode; | ||
| 78 | u32 level; | ||
| 79 | u32 trig_mode; | ||
| 80 | u32 shorthand; | ||
| 81 | u32 dest_id; | ||
| 82 | }; | ||
| 83 | |||
| 84 | struct gfn_to_hva_cache { | 57 | struct gfn_to_hva_cache { |
| 85 | u64 generation; | 58 | u64 generation; |
| 86 | gpa_t gpa; | 59 | gpa_t gpa; |
diff --git a/include/linux/leds.h b/include/linux/leds.h index a57611d0c94e..cfceef32c9b3 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #define __LINUX_LEDS_H_INCLUDED | 13 | #define __LINUX_LEDS_H_INCLUDED |
| 14 | 14 | ||
| 15 | #include <linux/list.h> | 15 | #include <linux/list.h> |
| 16 | #include <linux/mutex.h> | ||
| 16 | #include <linux/rwsem.h> | 17 | #include <linux/rwsem.h> |
| 17 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
| 18 | #include <linux/timer.h> | 19 | #include <linux/timer.h> |
| @@ -42,11 +43,20 @@ struct led_classdev { | |||
| 42 | #define LED_BLINK_ONESHOT (1 << 17) | 43 | #define LED_BLINK_ONESHOT (1 << 17) |
| 43 | #define LED_BLINK_ONESHOT_STOP (1 << 18) | 44 | #define LED_BLINK_ONESHOT_STOP (1 << 18) |
| 44 | #define LED_BLINK_INVERT (1 << 19) | 45 | #define LED_BLINK_INVERT (1 << 19) |
| 46 | #define LED_SYSFS_DISABLE (1 << 20) | ||
| 47 | #define SET_BRIGHTNESS_ASYNC (1 << 21) | ||
| 48 | #define SET_BRIGHTNESS_SYNC (1 << 22) | ||
| 45 | 49 | ||
| 46 | /* Set LED brightness level */ | 50 | /* Set LED brightness level */ |
| 47 | /* Must not sleep, use a workqueue if needed */ | 51 | /* Must not sleep, use a workqueue if needed */ |
| 48 | void (*brightness_set)(struct led_classdev *led_cdev, | 52 | void (*brightness_set)(struct led_classdev *led_cdev, |
| 49 | enum led_brightness brightness); | 53 | enum led_brightness brightness); |
| 54 | /* | ||
| 55 | * Set LED brightness level immediately - it can block the caller for | ||
| 56 | * the time required for accessing a LED device register. | ||
| 57 | */ | ||
| 58 | int (*brightness_set_sync)(struct led_classdev *led_cdev, | ||
| 59 | enum led_brightness brightness); | ||
| 50 | /* Get LED brightness level */ | 60 | /* Get LED brightness level */ |
| 51 | enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); | 61 | enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); |
| 52 | 62 | ||
| @@ -85,6 +95,9 @@ struct led_classdev { | |||
| 85 | /* true if activated - deactivate routine uses it to do cleanup */ | 95 | /* true if activated - deactivate routine uses it to do cleanup */ |
| 86 | bool activated; | 96 | bool activated; |
| 87 | #endif | 97 | #endif |
| 98 | |||
| 99 | /* Ensures consistent access to the LED Flash Class device */ | ||
| 100 | struct mutex led_access; | ||
| 88 | }; | 101 | }; |
| 89 | 102 | ||
| 90 | extern int led_classdev_register(struct device *parent, | 103 | extern int led_classdev_register(struct device *parent, |
| @@ -151,6 +164,33 @@ extern void led_set_brightness(struct led_classdev *led_cdev, | |||
| 151 | */ | 164 | */ |
| 152 | extern int led_update_brightness(struct led_classdev *led_cdev); | 165 | extern int led_update_brightness(struct led_classdev *led_cdev); |
| 153 | 166 | ||
| 167 | /** | ||
| 168 | * led_sysfs_disable - disable LED sysfs interface | ||
| 169 | * @led_cdev: the LED to set | ||
| 170 | * | ||
| 171 | * Disable the led_cdev's sysfs interface. | ||
| 172 | */ | ||
| 173 | extern void led_sysfs_disable(struct led_classdev *led_cdev); | ||
| 174 | |||
| 175 | /** | ||
| 176 | * led_sysfs_enable - enable LED sysfs interface | ||
| 177 | * @led_cdev: the LED to set | ||
| 178 | * | ||
| 179 | * Enable the led_cdev's sysfs interface. | ||
| 180 | */ | ||
| 181 | extern void led_sysfs_enable(struct led_classdev *led_cdev); | ||
| 182 | |||
| 183 | /** | ||
| 184 | * led_sysfs_is_disabled - check if LED sysfs interface is disabled | ||
| 185 | * @led_cdev: the LED to query | ||
| 186 | * | ||
| 187 | * Returns: true if the led_cdev's sysfs interface is disabled. | ||
| 188 | */ | ||
| 189 | static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev) | ||
| 190 | { | ||
| 191 | return led_cdev->flags & LED_SYSFS_DISABLE; | ||
| 192 | } | ||
| 193 | |||
| 154 | /* | 194 | /* |
| 155 | * LED Triggers | 195 | * LED Triggers |
| 156 | */ | 196 | */ |
| @@ -261,6 +301,7 @@ struct gpio_led { | |||
| 261 | unsigned retain_state_suspended : 1; | 301 | unsigned retain_state_suspended : 1; |
| 262 | unsigned default_state : 2; | 302 | unsigned default_state : 2; |
| 263 | /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ | 303 | /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ |
| 304 | struct gpio_desc *gpiod; | ||
| 264 | }; | 305 | }; |
| 265 | #define LEDS_GPIO_DEFSTATE_OFF 0 | 306 | #define LEDS_GPIO_DEFSTATE_OFF 0 |
| 266 | #define LEDS_GPIO_DEFSTATE_ON 1 | 307 | #define LEDS_GPIO_DEFSTATE_ON 1 |
| @@ -273,7 +314,7 @@ struct gpio_led_platform_data { | |||
| 273 | #define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */ | 314 | #define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */ |
| 274 | #define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */ | 315 | #define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */ |
| 275 | #define GPIO_LED_BLINK 2 /* Please, blink */ | 316 | #define GPIO_LED_BLINK 2 /* Please, blink */ |
| 276 | int (*gpio_blink_set)(unsigned gpio, int state, | 317 | int (*gpio_blink_set)(struct gpio_desc *desc, int state, |
| 277 | unsigned long *delay_on, | 318 | unsigned long *delay_on, |
| 278 | unsigned long *delay_off); | 319 | unsigned long *delay_off); |
| 279 | }; | 320 | }; |
diff --git a/include/linux/libata.h b/include/linux/libata.h index bfbc817c34ee..2d182413b1db 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -191,7 +191,8 @@ enum { | |||
| 191 | ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */ | 191 | ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */ |
| 192 | ATA_DEV_SEMB = 7, /* SEMB */ | 192 | ATA_DEV_SEMB = 7, /* SEMB */ |
| 193 | ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ | 193 | ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ |
| 194 | ATA_DEV_NONE = 9, /* no device */ | 194 | ATA_DEV_ZAC = 9, /* ZAC device */ |
| 195 | ATA_DEV_NONE = 10, /* no device */ | ||
| 195 | 196 | ||
| 196 | /* struct ata_link flags */ | 197 | /* struct ata_link flags */ |
| 197 | ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ | 198 | ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ |
| @@ -1491,7 +1492,8 @@ static inline unsigned int ata_tag_internal(unsigned int tag) | |||
| 1491 | static inline unsigned int ata_class_enabled(unsigned int class) | 1492 | static inline unsigned int ata_class_enabled(unsigned int class) |
| 1492 | { | 1493 | { |
| 1493 | return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI || | 1494 | return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI || |
| 1494 | class == ATA_DEV_PMP || class == ATA_DEV_SEMB; | 1495 | class == ATA_DEV_PMP || class == ATA_DEV_SEMB || |
| 1496 | class == ATA_DEV_ZAC; | ||
| 1495 | } | 1497 | } |
| 1496 | 1498 | ||
| 1497 | static inline unsigned int ata_class_disabled(unsigned int class) | 1499 | static inline unsigned int ata_class_disabled(unsigned int class) |
diff --git a/include/linux/list.h b/include/linux/list.h index f33f831eb3c8..feb773c76ee0 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
| @@ -346,7 +346,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 346 | * list_entry - get the struct for this entry | 346 | * list_entry - get the struct for this entry |
| 347 | * @ptr: the &struct list_head pointer. | 347 | * @ptr: the &struct list_head pointer. |
| 348 | * @type: the type of the struct this is embedded in. | 348 | * @type: the type of the struct this is embedded in. |
| 349 | * @member: the name of the list_struct within the struct. | 349 | * @member: the name of the list_head within the struct. |
| 350 | */ | 350 | */ |
| 351 | #define list_entry(ptr, type, member) \ | 351 | #define list_entry(ptr, type, member) \ |
| 352 | container_of(ptr, type, member) | 352 | container_of(ptr, type, member) |
| @@ -355,7 +355,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 355 | * list_first_entry - get the first element from a list | 355 | * list_first_entry - get the first element from a list |
| 356 | * @ptr: the list head to take the element from. | 356 | * @ptr: the list head to take the element from. |
| 357 | * @type: the type of the struct this is embedded in. | 357 | * @type: the type of the struct this is embedded in. |
| 358 | * @member: the name of the list_struct within the struct. | 358 | * @member: the name of the list_head within the struct. |
| 359 | * | 359 | * |
| 360 | * Note, that list is expected to be not empty. | 360 | * Note, that list is expected to be not empty. |
| 361 | */ | 361 | */ |
| @@ -366,7 +366,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 366 | * list_last_entry - get the last element from a list | 366 | * list_last_entry - get the last element from a list |
| 367 | * @ptr: the list head to take the element from. | 367 | * @ptr: the list head to take the element from. |
| 368 | * @type: the type of the struct this is embedded in. | 368 | * @type: the type of the struct this is embedded in. |
| 369 | * @member: the name of the list_struct within the struct. | 369 | * @member: the name of the list_head within the struct. |
| 370 | * | 370 | * |
| 371 | * Note, that list is expected to be not empty. | 371 | * Note, that list is expected to be not empty. |
| 372 | */ | 372 | */ |
| @@ -377,7 +377,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 377 | * list_first_entry_or_null - get the first element from a list | 377 | * list_first_entry_or_null - get the first element from a list |
| 378 | * @ptr: the list head to take the element from. | 378 | * @ptr: the list head to take the element from. |
| 379 | * @type: the type of the struct this is embedded in. | 379 | * @type: the type of the struct this is embedded in. |
| 380 | * @member: the name of the list_struct within the struct. | 380 | * @member: the name of the list_head within the struct. |
| 381 | * | 381 | * |
| 382 | * Note that if the list is empty, it returns NULL. | 382 | * Note that if the list is empty, it returns NULL. |
| 383 | */ | 383 | */ |
| @@ -387,7 +387,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 387 | /** | 387 | /** |
| 388 | * list_next_entry - get the next element in list | 388 | * list_next_entry - get the next element in list |
| 389 | * @pos: the type * to cursor | 389 | * @pos: the type * to cursor |
| 390 | * @member: the name of the list_struct within the struct. | 390 | * @member: the name of the list_head within the struct. |
| 391 | */ | 391 | */ |
| 392 | #define list_next_entry(pos, member) \ | 392 | #define list_next_entry(pos, member) \ |
| 393 | list_entry((pos)->member.next, typeof(*(pos)), member) | 393 | list_entry((pos)->member.next, typeof(*(pos)), member) |
| @@ -395,7 +395,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 395 | /** | 395 | /** |
| 396 | * list_prev_entry - get the prev element in list | 396 | * list_prev_entry - get the prev element in list |
| 397 | * @pos: the type * to cursor | 397 | * @pos: the type * to cursor |
| 398 | * @member: the name of the list_struct within the struct. | 398 | * @member: the name of the list_head within the struct. |
| 399 | */ | 399 | */ |
| 400 | #define list_prev_entry(pos, member) \ | 400 | #define list_prev_entry(pos, member) \ |
| 401 | list_entry((pos)->member.prev, typeof(*(pos)), member) | 401 | list_entry((pos)->member.prev, typeof(*(pos)), member) |
| @@ -441,7 +441,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 441 | * list_for_each_entry - iterate over list of given type | 441 | * list_for_each_entry - iterate over list of given type |
| 442 | * @pos: the type * to use as a loop cursor. | 442 | * @pos: the type * to use as a loop cursor. |
| 443 | * @head: the head for your list. | 443 | * @head: the head for your list. |
| 444 | * @member: the name of the list_struct within the struct. | 444 | * @member: the name of the list_head within the struct. |
| 445 | */ | 445 | */ |
| 446 | #define list_for_each_entry(pos, head, member) \ | 446 | #define list_for_each_entry(pos, head, member) \ |
| 447 | for (pos = list_first_entry(head, typeof(*pos), member); \ | 447 | for (pos = list_first_entry(head, typeof(*pos), member); \ |
| @@ -452,7 +452,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 452 | * list_for_each_entry_reverse - iterate backwards over list of given type. | 452 | * list_for_each_entry_reverse - iterate backwards over list of given type. |
| 453 | * @pos: the type * to use as a loop cursor. | 453 | * @pos: the type * to use as a loop cursor. |
| 454 | * @head: the head for your list. | 454 | * @head: the head for your list. |
| 455 | * @member: the name of the list_struct within the struct. | 455 | * @member: the name of the list_head within the struct. |
| 456 | */ | 456 | */ |
| 457 | #define list_for_each_entry_reverse(pos, head, member) \ | 457 | #define list_for_each_entry_reverse(pos, head, member) \ |
| 458 | for (pos = list_last_entry(head, typeof(*pos), member); \ | 458 | for (pos = list_last_entry(head, typeof(*pos), member); \ |
| @@ -463,7 +463,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 463 | * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() | 463 | * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() |
| 464 | * @pos: the type * to use as a start point | 464 | * @pos: the type * to use as a start point |
| 465 | * @head: the head of the list | 465 | * @head: the head of the list |
| 466 | * @member: the name of the list_struct within the struct. | 466 | * @member: the name of the list_head within the struct. |
| 467 | * | 467 | * |
| 468 | * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). | 468 | * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). |
| 469 | */ | 469 | */ |
| @@ -474,7 +474,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 474 | * list_for_each_entry_continue - continue iteration over list of given type | 474 | * list_for_each_entry_continue - continue iteration over list of given type |
| 475 | * @pos: the type * to use as a loop cursor. | 475 | * @pos: the type * to use as a loop cursor. |
| 476 | * @head: the head for your list. | 476 | * @head: the head for your list. |
| 477 | * @member: the name of the list_struct within the struct. | 477 | * @member: the name of the list_head within the struct. |
| 478 | * | 478 | * |
| 479 | * Continue to iterate over list of given type, continuing after | 479 | * Continue to iterate over list of given type, continuing after |
| 480 | * the current position. | 480 | * the current position. |
| @@ -488,7 +488,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 488 | * list_for_each_entry_continue_reverse - iterate backwards from the given point | 488 | * list_for_each_entry_continue_reverse - iterate backwards from the given point |
| 489 | * @pos: the type * to use as a loop cursor. | 489 | * @pos: the type * to use as a loop cursor. |
| 490 | * @head: the head for your list. | 490 | * @head: the head for your list. |
| 491 | * @member: the name of the list_struct within the struct. | 491 | * @member: the name of the list_head within the struct. |
| 492 | * | 492 | * |
| 493 | * Start to iterate over list of given type backwards, continuing after | 493 | * Start to iterate over list of given type backwards, continuing after |
| 494 | * the current position. | 494 | * the current position. |
| @@ -502,7 +502,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 502 | * list_for_each_entry_from - iterate over list of given type from the current point | 502 | * list_for_each_entry_from - iterate over list of given type from the current point |
| 503 | * @pos: the type * to use as a loop cursor. | 503 | * @pos: the type * to use as a loop cursor. |
| 504 | * @head: the head for your list. | 504 | * @head: the head for your list. |
| 505 | * @member: the name of the list_struct within the struct. | 505 | * @member: the name of the list_head within the struct. |
| 506 | * | 506 | * |
| 507 | * Iterate over list of given type, continuing from current position. | 507 | * Iterate over list of given type, continuing from current position. |
| 508 | */ | 508 | */ |
| @@ -515,7 +515,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 515 | * @pos: the type * to use as a loop cursor. | 515 | * @pos: the type * to use as a loop cursor. |
| 516 | * @n: another type * to use as temporary storage | 516 | * @n: another type * to use as temporary storage |
| 517 | * @head: the head for your list. | 517 | * @head: the head for your list. |
| 518 | * @member: the name of the list_struct within the struct. | 518 | * @member: the name of the list_head within the struct. |
| 519 | */ | 519 | */ |
| 520 | #define list_for_each_entry_safe(pos, n, head, member) \ | 520 | #define list_for_each_entry_safe(pos, n, head, member) \ |
| 521 | for (pos = list_first_entry(head, typeof(*pos), member), \ | 521 | for (pos = list_first_entry(head, typeof(*pos), member), \ |
| @@ -528,7 +528,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 528 | * @pos: the type * to use as a loop cursor. | 528 | * @pos: the type * to use as a loop cursor. |
| 529 | * @n: another type * to use as temporary storage | 529 | * @n: another type * to use as temporary storage |
| 530 | * @head: the head for your list. | 530 | * @head: the head for your list. |
| 531 | * @member: the name of the list_struct within the struct. | 531 | * @member: the name of the list_head within the struct. |
| 532 | * | 532 | * |
| 533 | * Iterate over list of given type, continuing after current point, | 533 | * Iterate over list of given type, continuing after current point, |
| 534 | * safe against removal of list entry. | 534 | * safe against removal of list entry. |
| @@ -544,7 +544,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 544 | * @pos: the type * to use as a loop cursor. | 544 | * @pos: the type * to use as a loop cursor. |
| 545 | * @n: another type * to use as temporary storage | 545 | * @n: another type * to use as temporary storage |
| 546 | * @head: the head for your list. | 546 | * @head: the head for your list. |
| 547 | * @member: the name of the list_struct within the struct. | 547 | * @member: the name of the list_head within the struct. |
| 548 | * | 548 | * |
| 549 | * Iterate over list of given type from current point, safe against | 549 | * Iterate over list of given type from current point, safe against |
| 550 | * removal of list entry. | 550 | * removal of list entry. |
| @@ -559,7 +559,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 559 | * @pos: the type * to use as a loop cursor. | 559 | * @pos: the type * to use as a loop cursor. |
| 560 | * @n: another type * to use as temporary storage | 560 | * @n: another type * to use as temporary storage |
| 561 | * @head: the head for your list. | 561 | * @head: the head for your list. |
| 562 | * @member: the name of the list_struct within the struct. | 562 | * @member: the name of the list_head within the struct. |
| 563 | * | 563 | * |
| 564 | * Iterate backwards over list of given type, safe against removal | 564 | * Iterate backwards over list of given type, safe against removal |
| 565 | * of list entry. | 565 | * of list entry. |
| @@ -574,7 +574,7 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 574 | * list_safe_reset_next - reset a stale list_for_each_entry_safe loop | 574 | * list_safe_reset_next - reset a stale list_for_each_entry_safe loop |
| 575 | * @pos: the loop cursor used in the list_for_each_entry_safe loop | 575 | * @pos: the loop cursor used in the list_for_each_entry_safe loop |
| 576 | * @n: temporary storage used in list_for_each_entry_safe | 576 | * @n: temporary storage used in list_for_each_entry_safe |
| 577 | * @member: the name of the list_struct within the struct. | 577 | * @member: the name of the list_head within the struct. |
| 578 | * | 578 | * |
| 579 | * list_safe_reset_next is not safe to use in general if the list may be | 579 | * list_safe_reset_next is not safe to use in general if the list may be |
| 580 | * modified concurrently (eg. the lock is dropped in the loop body). An | 580 | * modified concurrently (eg. the lock is dropped in the loop body). An |
diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h index 257d3779f2ab..0ca8109934e4 100644 --- a/include/linux/lockd/debug.h +++ b/include/linux/lockd/debug.h | |||
| @@ -17,12 +17,8 @@ | |||
| 17 | * Enable lockd debugging. | 17 | * Enable lockd debugging. |
| 18 | * Requires RPC_DEBUG. | 18 | * Requires RPC_DEBUG. |
| 19 | */ | 19 | */ |
| 20 | #ifdef RPC_DEBUG | ||
| 21 | # define LOCKD_DEBUG 1 | ||
| 22 | #endif | ||
| 23 | |||
| 24 | #undef ifdebug | 20 | #undef ifdebug |
| 25 | #if defined(RPC_DEBUG) && defined(LOCKD_DEBUG) | 21 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 26 | # define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag)) | 22 | # define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag)) |
| 27 | #else | 23 | #else |
| 28 | # define ifdebug(flag) if (0) | 24 | # define ifdebug(flag) if (0) |
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h index 307d9cab2026..1726ccbd8009 100644 --- a/include/linux/mailbox_client.h +++ b/include/linux/mailbox_client.h | |||
| @@ -25,6 +25,8 @@ struct mbox_chan; | |||
| 25 | * if the client receives some ACK packet for transmission. | 25 | * if the client receives some ACK packet for transmission. |
| 26 | * Unused if the controller already has TX_Done/RTR IRQ. | 26 | * Unused if the controller already has TX_Done/RTR IRQ. |
| 27 | * @rx_callback: Atomic callback to provide client the data received | 27 | * @rx_callback: Atomic callback to provide client the data received |
| 28 | * @tx_prepare: Atomic callback to ask client to prepare the payload | ||
| 29 | * before initiating the transmission if required. | ||
| 28 | * @tx_done: Atomic callback to tell client of data transmission | 30 | * @tx_done: Atomic callback to tell client of data transmission |
| 29 | */ | 31 | */ |
| 30 | struct mbox_client { | 32 | struct mbox_client { |
| @@ -34,6 +36,7 @@ struct mbox_client { | |||
| 34 | bool knows_txdone; | 36 | bool knows_txdone; |
| 35 | 37 | ||
| 36 | void (*rx_callback)(struct mbox_client *cl, void *mssg); | 38 | void (*rx_callback)(struct mbox_client *cl, void *mssg); |
| 39 | void (*tx_prepare)(struct mbox_client *cl, void *mssg); | ||
| 37 | void (*tx_done)(struct mbox_client *cl, void *mssg, int r); | 40 | void (*tx_done)(struct mbox_client *cl, void *mssg, int r); |
| 38 | }; | 41 | }; |
| 39 | 42 | ||
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 8e9a029e093d..e6982ac3200d 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 | 16 | #define MARVELL_PHY_ID_88E1318S 0x01410e90 |
| 17 | #define MARVELL_PHY_ID_88E1116R 0x01410e40 | 17 | #define MARVELL_PHY_ID_88E1116R 0x01410e40 |
| 18 | #define MARVELL_PHY_ID_88E1510 0x01410dd0 | 18 | #define MARVELL_PHY_ID_88E1510 0x01410dd0 |
| 19 | #define MARVELL_PHY_ID_88E3016 0x01410e60 | ||
| 19 | 20 | ||
| 20 | /* struct phy_device dev_flags definitions */ | 21 | /* struct phy_device dev_flags definitions */ |
| 21 | #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 | 22 | #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6b75640ef5ab..7c95af8d552c 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -25,7 +25,6 @@ | |||
| 25 | #include <linux/jump_label.h> | 25 | #include <linux/jump_label.h> |
| 26 | 26 | ||
| 27 | struct mem_cgroup; | 27 | struct mem_cgroup; |
| 28 | struct page_cgroup; | ||
| 29 | struct page; | 28 | struct page; |
| 30 | struct mm_struct; | 29 | struct mm_struct; |
| 31 | struct kmem_cache; | 30 | struct kmem_cache; |
| @@ -68,10 +67,9 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, | |||
| 68 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); | 67 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); |
| 69 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); | 68 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); |
| 70 | 69 | ||
| 71 | bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, | 70 | bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, |
| 72 | struct mem_cgroup *memcg); | 71 | struct mem_cgroup *root); |
| 73 | bool task_in_mem_cgroup(struct task_struct *task, | 72 | bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); |
| 74 | const struct mem_cgroup *memcg); | ||
| 75 | 73 | ||
| 76 | extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); | 74 | extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); |
| 77 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | 75 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
| @@ -79,15 +77,16 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | |||
| 79 | extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); | 77 | extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); |
| 80 | extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); | 78 | extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); |
| 81 | 79 | ||
| 82 | static inline | 80 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
| 83 | bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) | 81 | struct mem_cgroup *memcg) |
| 84 | { | 82 | { |
| 85 | struct mem_cgroup *task_memcg; | 83 | struct mem_cgroup *task_memcg; |
| 86 | bool match; | 84 | bool match = false; |
| 87 | 85 | ||
| 88 | rcu_read_lock(); | 86 | rcu_read_lock(); |
| 89 | task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | 87 | task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
| 90 | match = __mem_cgroup_same_or_subtree(memcg, task_memcg); | 88 | if (task_memcg) |
| 89 | match = mem_cgroup_is_descendant(task_memcg, memcg); | ||
| 91 | rcu_read_unlock(); | 90 | rcu_read_unlock(); |
| 92 | return match; | 91 | return match; |
| 93 | } | 92 | } |
| @@ -141,8 +140,8 @@ static inline bool mem_cgroup_disabled(void) | |||
| 141 | 140 | ||
| 142 | struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, | 141 | struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, |
| 143 | unsigned long *flags); | 142 | unsigned long *flags); |
| 144 | void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked, | 143 | void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked, |
| 145 | unsigned long flags); | 144 | unsigned long *flags); |
| 146 | void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, | 145 | void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, |
| 147 | enum mem_cgroup_stat_index idx, int val); | 146 | enum mem_cgroup_stat_index idx, int val); |
| 148 | 147 | ||
| @@ -174,10 +173,6 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, | |||
| 174 | void mem_cgroup_split_huge_fixup(struct page *head); | 173 | void mem_cgroup_split_huge_fixup(struct page *head); |
| 175 | #endif | 174 | #endif |
| 176 | 175 | ||
| 177 | #ifdef CONFIG_DEBUG_VM | ||
| 178 | bool mem_cgroup_bad_page_check(struct page *page); | ||
| 179 | void mem_cgroup_print_bad_page(struct page *page); | ||
| 180 | #endif | ||
| 181 | #else /* CONFIG_MEMCG */ | 176 | #else /* CONFIG_MEMCG */ |
| 182 | struct mem_cgroup; | 177 | struct mem_cgroup; |
| 183 | 178 | ||
| @@ -297,7 +292,7 @@ static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, | |||
| 297 | } | 292 | } |
| 298 | 293 | ||
| 299 | static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, | 294 | static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, |
| 300 | bool locked, unsigned long flags) | 295 | bool *locked, unsigned long *flags) |
| 301 | { | 296 | { |
| 302 | } | 297 | } |
| 303 | 298 | ||
| @@ -347,19 +342,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) | |||
| 347 | } | 342 | } |
| 348 | #endif /* CONFIG_MEMCG */ | 343 | #endif /* CONFIG_MEMCG */ |
| 349 | 344 | ||
| 350 | #if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) | ||
| 351 | static inline bool | ||
| 352 | mem_cgroup_bad_page_check(struct page *page) | ||
| 353 | { | ||
| 354 | return false; | ||
| 355 | } | ||
| 356 | |||
| 357 | static inline void | ||
| 358 | mem_cgroup_print_bad_page(struct page *page) | ||
| 359 | { | ||
| 360 | } | ||
| 361 | #endif | ||
| 362 | |||
| 363 | enum { | 345 | enum { |
| 364 | UNDER_LIMIT, | 346 | UNDER_LIMIT, |
| 365 | SOFT_LIMIT, | 347 | SOFT_LIMIT, |
| @@ -418,8 +400,8 @@ int memcg_cache_id(struct mem_cgroup *memcg); | |||
| 418 | 400 | ||
| 419 | void memcg_update_array_size(int num_groups); | 401 | void memcg_update_array_size(int num_groups); |
| 420 | 402 | ||
| 421 | struct kmem_cache * | 403 | struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); |
| 422 | __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); | 404 | void __memcg_kmem_put_cache(struct kmem_cache *cachep); |
| 423 | 405 | ||
| 424 | int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order); | 406 | int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order); |
| 425 | void __memcg_uncharge_slab(struct kmem_cache *cachep, int order); | 407 | void __memcg_uncharge_slab(struct kmem_cache *cachep, int order); |
| @@ -447,9 +429,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) | |||
| 447 | /* | 429 | /* |
| 448 | * __GFP_NOFAIL allocations will move on even if charging is not | 430 | * __GFP_NOFAIL allocations will move on even if charging is not |
| 449 | * possible. Therefore we don't even try, and have this allocation | 431 | * possible. Therefore we don't even try, and have this allocation |
| 450 | * unaccounted. We could in theory charge it with | 432 | * unaccounted. We could in theory charge it forcibly, but we hope |
| 451 | * res_counter_charge_nofail, but we hope those allocations are rare, | 433 | * those allocations are rare, and won't be worth the trouble. |
| 452 | * and won't be worth the trouble. | ||
| 453 | */ | 434 | */ |
| 454 | if (gfp & __GFP_NOFAIL) | 435 | if (gfp & __GFP_NOFAIL) |
| 455 | return true; | 436 | return true; |
| @@ -467,8 +448,6 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) | |||
| 467 | * memcg_kmem_uncharge_pages: uncharge pages from memcg | 448 | * memcg_kmem_uncharge_pages: uncharge pages from memcg |
| 468 | * @page: pointer to struct page being freed | 449 | * @page: pointer to struct page being freed |
| 469 | * @order: allocation order. | 450 | * @order: allocation order. |
| 470 | * | ||
| 471 | * there is no need to specify memcg here, since it is embedded in page_cgroup | ||
| 472 | */ | 451 | */ |
| 473 | static inline void | 452 | static inline void |
| 474 | memcg_kmem_uncharge_pages(struct page *page, int order) | 453 | memcg_kmem_uncharge_pages(struct page *page, int order) |
| @@ -485,8 +464,7 @@ memcg_kmem_uncharge_pages(struct page *page, int order) | |||
| 485 | * | 464 | * |
| 486 | * Needs to be called after memcg_kmem_newpage_charge, regardless of success or | 465 | * Needs to be called after memcg_kmem_newpage_charge, regardless of success or |
| 487 | * failure of the allocation. if @page is NULL, this function will revert the | 466 | * failure of the allocation. if @page is NULL, this function will revert the |
| 488 | * charges. Otherwise, it will commit the memcg given by @memcg to the | 467 | * charges. Otherwise, it will commit @page to @memcg. |
| 489 | * corresponding page_cgroup. | ||
| 490 | */ | 468 | */ |
| 491 | static inline void | 469 | static inline void |
| 492 | memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) | 470 | memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) |
| @@ -514,7 +492,13 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
| 514 | if (unlikely(fatal_signal_pending(current))) | 492 | if (unlikely(fatal_signal_pending(current))) |
| 515 | return cachep; | 493 | return cachep; |
| 516 | 494 | ||
| 517 | return __memcg_kmem_get_cache(cachep, gfp); | 495 | return __memcg_kmem_get_cache(cachep); |
| 496 | } | ||
| 497 | |||
| 498 | static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | ||
| 499 | { | ||
| 500 | if (memcg_kmem_enabled()) | ||
| 501 | __memcg_kmem_put_cache(cachep); | ||
| 518 | } | 502 | } |
| 519 | #else | 503 | #else |
| 520 | #define for_each_memcg_cache_index(_idx) \ | 504 | #define for_each_memcg_cache_index(_idx) \ |
| @@ -550,6 +534,10 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
| 550 | { | 534 | { |
| 551 | return cachep; | 535 | return cachep; |
| 552 | } | 536 | } |
| 537 | |||
| 538 | static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | ||
| 539 | { | ||
| 540 | } | ||
| 553 | #endif /* CONFIG_MEMCG_KMEM */ | 541 | #endif /* CONFIG_MEMCG_KMEM */ |
| 554 | #endif /* _LINUX_MEMCONTROL_H */ | 542 | #endif /* _LINUX_MEMCONTROL_H */ |
| 555 | 543 | ||
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h index f34723f7663c..910e3aa1e965 100644 --- a/include/linux/mfd/arizona/core.h +++ b/include/linux/mfd/arizona/core.h | |||
| @@ -141,6 +141,7 @@ struct arizona { | |||
| 141 | 141 | ||
| 142 | uint16_t dac_comp_coeff; | 142 | uint16_t dac_comp_coeff; |
| 143 | uint8_t dac_comp_enabled; | 143 | uint8_t dac_comp_enabled; |
| 144 | struct mutex dac_comp_lock; | ||
| 144 | }; | 145 | }; |
| 145 | 146 | ||
| 146 | int arizona_clk32k_enable(struct arizona *arizona); | 147 | int arizona_clk32k_enable(struct arizona *arizona); |
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h index cb01496bfa49..8e1cdbef3dad 100644 --- a/include/linux/mfd/davinci_voicecodec.h +++ b/include/linux/mfd/davinci_voicecodec.h | |||
| @@ -99,12 +99,6 @@ struct davinci_vcif { | |||
| 99 | dma_addr_t dma_rx_addr; | 99 | dma_addr_t dma_rx_addr; |
| 100 | }; | 100 | }; |
| 101 | 101 | ||
| 102 | struct cq93vc { | ||
| 103 | struct platform_device *pdev; | ||
| 104 | struct snd_soc_codec *codec; | ||
| 105 | u32 sysclk; | ||
| 106 | }; | ||
| 107 | |||
| 108 | struct davinci_vc; | 102 | struct davinci_vc; |
| 109 | 103 | ||
| 110 | struct davinci_vc { | 104 | struct davinci_vc { |
| @@ -122,7 +116,6 @@ struct davinci_vc { | |||
| 122 | 116 | ||
| 123 | /* Client devices */ | 117 | /* Client devices */ |
| 124 | struct davinci_vcif davinci_vcif; | 118 | struct davinci_vcif davinci_vcif; |
| 125 | struct cq93vc cq93vc; | ||
| 126 | }; | 119 | }; |
| 127 | 120 | ||
| 128 | #endif | 121 | #endif |
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 53d33dee70e1..2e5b194b9b19 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h | |||
| @@ -37,7 +37,6 @@ | |||
| 37 | 37 | ||
| 38 | /* struct phy_device dev_flags definitions */ | 38 | /* struct phy_device dev_flags definitions */ |
| 39 | #define MICREL_PHY_50MHZ_CLK 0x00000001 | 39 | #define MICREL_PHY_50MHZ_CLK 0x00000001 |
| 40 | #define MICREL_PHY_25MHZ_CLK 0x00000002 | ||
| 41 | 40 | ||
| 42 | #define MICREL_KSZ9021_EXTREG_CTRL 0xB | 41 | #define MICREL_KSZ9021_EXTREG_CTRL 0xB |
| 43 | #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC | 42 | #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 01aad3ed89ec..fab9b32ace8e 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
| @@ -36,9 +36,6 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, | |||
| 36 | 36 | ||
| 37 | extern int migrate_prep(void); | 37 | extern int migrate_prep(void); |
| 38 | extern int migrate_prep_local(void); | 38 | extern int migrate_prep_local(void); |
| 39 | extern int migrate_vmas(struct mm_struct *mm, | ||
| 40 | const nodemask_t *from, const nodemask_t *to, | ||
| 41 | unsigned long flags); | ||
| 42 | extern void migrate_page_copy(struct page *newpage, struct page *page); | 39 | extern void migrate_page_copy(struct page *newpage, struct page *page); |
| 43 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, | 40 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, |
| 44 | struct page *newpage, struct page *page); | 41 | struct page *newpage, struct page *page); |
| @@ -57,13 +54,6 @@ static inline int migrate_pages(struct list_head *l, new_page_t new, | |||
| 57 | static inline int migrate_prep(void) { return -ENOSYS; } | 54 | static inline int migrate_prep(void) { return -ENOSYS; } |
| 58 | static inline int migrate_prep_local(void) { return -ENOSYS; } | 55 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
| 59 | 56 | ||
| 60 | static inline int migrate_vmas(struct mm_struct *mm, | ||
| 61 | const nodemask_t *from, const nodemask_t *to, | ||
| 62 | unsigned long flags) | ||
| 63 | { | ||
| 64 | return -ENOSYS; | ||
| 65 | } | ||
| 66 | |||
| 67 | static inline void migrate_page_copy(struct page *newpage, | 57 | static inline void migrate_page_copy(struct page *newpage, |
| 68 | struct page *page) {} | 58 | struct page *page) {} |
| 69 | 59 | ||
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 379c02648ab3..64d25941b329 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h | |||
| @@ -67,6 +67,8 @@ enum { | |||
| 67 | MLX4_CMD_MAP_ICM_AUX = 0xffc, | 67 | MLX4_CMD_MAP_ICM_AUX = 0xffc, |
| 68 | MLX4_CMD_UNMAP_ICM_AUX = 0xffb, | 68 | MLX4_CMD_UNMAP_ICM_AUX = 0xffb, |
| 69 | MLX4_CMD_SET_ICM_SIZE = 0xffd, | 69 | MLX4_CMD_SET_ICM_SIZE = 0xffd, |
| 70 | MLX4_CMD_ACCESS_REG = 0x3b, | ||
| 71 | |||
| 70 | /*master notify fw on finish for slave's flr*/ | 72 | /*master notify fw on finish for slave's flr*/ |
| 71 | MLX4_CMD_INFORM_FLR_DONE = 0x5b, | 73 | MLX4_CMD_INFORM_FLR_DONE = 0x5b, |
| 72 | MLX4_CMD_GET_OP_REQ = 0x59, | 74 | MLX4_CMD_GET_OP_REQ = 0x59, |
| @@ -197,6 +199,33 @@ enum { | |||
| 197 | MLX4_CMD_NATIVE | 199 | MLX4_CMD_NATIVE |
| 198 | }; | 200 | }; |
| 199 | 201 | ||
| 202 | /* | ||
| 203 | * MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP - | ||
| 204 | * Receive checksum value is reported in CQE also for non TCP/UDP packets. | ||
| 205 | * | ||
| 206 | * MLX4_RX_CSUM_MODE_L4 - | ||
| 207 | * L4_CSUM bit in CQE, which indicates whether or not L4 checksum | ||
| 208 | * was validated correctly, is supported. | ||
| 209 | * | ||
| 210 | * MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP - | ||
| 211 | * IP_OK CQE's field is supported also for non TCP/UDP IP packets. | ||
| 212 | * | ||
| 213 | * MLX4_RX_CSUM_MODE_MULTI_VLAN - | ||
| 214 | * Receive Checksum offload is supported for packets with more than 2 vlan headers. | ||
| 215 | */ | ||
| 216 | enum mlx4_rx_csum_mode { | ||
| 217 | MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP = 1UL << 0, | ||
| 218 | MLX4_RX_CSUM_MODE_L4 = 1UL << 1, | ||
| 219 | MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP = 1UL << 2, | ||
| 220 | MLX4_RX_CSUM_MODE_MULTI_VLAN = 1UL << 3 | ||
| 221 | }; | ||
| 222 | |||
| 223 | struct mlx4_config_dev_params { | ||
| 224 | u16 vxlan_udp_dport; | ||
| 225 | u8 rx_csum_flags_port_1; | ||
| 226 | u8 rx_csum_flags_port_2; | ||
| 227 | }; | ||
| 228 | |||
| 200 | struct mlx4_dev; | 229 | struct mlx4_dev; |
| 201 | 230 | ||
| 202 | struct mlx4_cmd_mailbox { | 231 | struct mlx4_cmd_mailbox { |
| @@ -248,6 +277,8 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); | |||
| 248 | int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); | 277 | int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); |
| 249 | int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); | 278 | int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); |
| 250 | int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); | 279 | int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); |
| 280 | int mlx4_config_dev_retrieval(struct mlx4_dev *dev, | ||
| 281 | struct mlx4_config_dev_params *params); | ||
| 251 | /* | 282 | /* |
| 252 | * mlx4_get_slave_default_vlan - | 283 | * mlx4_get_slave_default_vlan - |
| 253 | * return true if VST ( default vlan) | 284 | * return true if VST ( default vlan) |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 37e4404d0227..25c791e295fd 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -95,7 +95,7 @@ enum { | |||
| 95 | 95 | ||
| 96 | enum { | 96 | enum { |
| 97 | MLX4_MAX_NUM_PF = 16, | 97 | MLX4_MAX_NUM_PF = 16, |
| 98 | MLX4_MAX_NUM_VF = 64, | 98 | MLX4_MAX_NUM_VF = 126, |
| 99 | MLX4_MAX_NUM_VF_P_PORT = 64, | 99 | MLX4_MAX_NUM_VF_P_PORT = 64, |
| 100 | MLX4_MFUNC_MAX = 80, | 100 | MLX4_MFUNC_MAX = 80, |
| 101 | MLX4_MAX_EQ_NUM = 1024, | 101 | MLX4_MAX_EQ_NUM = 1024, |
| @@ -117,6 +117,14 @@ enum { | |||
| 117 | MLX4_STEERING_MODE_DEVICE_MANAGED | 117 | MLX4_STEERING_MODE_DEVICE_MANAGED |
| 118 | }; | 118 | }; |
| 119 | 119 | ||
| 120 | enum { | ||
| 121 | MLX4_STEERING_DMFS_A0_DEFAULT, | ||
| 122 | MLX4_STEERING_DMFS_A0_DYNAMIC, | ||
| 123 | MLX4_STEERING_DMFS_A0_STATIC, | ||
| 124 | MLX4_STEERING_DMFS_A0_DISABLE, | ||
| 125 | MLX4_STEERING_DMFS_A0_NOT_SUPPORTED | ||
| 126 | }; | ||
| 127 | |||
| 120 | static inline const char *mlx4_steering_mode_str(int steering_mode) | 128 | static inline const char *mlx4_steering_mode_str(int steering_mode) |
| 121 | { | 129 | { |
| 122 | switch (steering_mode) { | 130 | switch (steering_mode) { |
| @@ -186,7 +194,31 @@ enum { | |||
| 186 | MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, | 194 | MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, |
| 187 | MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, | 195 | MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, |
| 188 | MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, | 196 | MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, |
| 189 | MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13 | 197 | MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13, |
| 198 | MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14, | ||
| 199 | MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15, | ||
| 200 | MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, | ||
| 201 | MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, | ||
| 202 | MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, | ||
| 203 | MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19 | ||
| 204 | }; | ||
| 205 | |||
| 206 | enum { | ||
| 207 | MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0, | ||
| 208 | MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1 | ||
| 209 | }; | ||
| 210 | |||
| 211 | /* bit enums for an 8-bit flags field indicating special use | ||
| 212 | * QPs which require special handling in qp_reserve_range. | ||
| 213 | * Currently, this only includes QPs used by the ETH interface, | ||
| 214 | * where we expect to use blueflame. These QPs must not have | ||
| 215 | * bits 6 and 7 set in their qp number. | ||
| 216 | * | ||
| 217 | * This enum may use only bits 0..7. | ||
| 218 | */ | ||
| 219 | enum { | ||
| 220 | MLX4_RESERVE_A0_QP = 1 << 6, | ||
| 221 | MLX4_RESERVE_ETH_BF_QP = 1 << 7, | ||
| 190 | }; | 222 | }; |
| 191 | 223 | ||
| 192 | enum { | 224 | enum { |
| @@ -202,7 +234,8 @@ enum { | |||
| 202 | 234 | ||
| 203 | enum { | 235 | enum { |
| 204 | MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, | 236 | MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, |
| 205 | MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1 | 237 | MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1, |
| 238 | MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2 | ||
| 206 | }; | 239 | }; |
| 207 | 240 | ||
| 208 | 241 | ||
| @@ -328,6 +361,8 @@ enum { | |||
| 328 | 361 | ||
| 329 | enum mlx4_qp_region { | 362 | enum mlx4_qp_region { |
| 330 | MLX4_QP_REGION_FW = 0, | 363 | MLX4_QP_REGION_FW = 0, |
| 364 | MLX4_QP_REGION_RSS_RAW_ETH, | ||
| 365 | MLX4_QP_REGION_BOTTOM = MLX4_QP_REGION_RSS_RAW_ETH, | ||
| 331 | MLX4_QP_REGION_ETH_ADDR, | 366 | MLX4_QP_REGION_ETH_ADDR, |
| 332 | MLX4_QP_REGION_FC_ADDR, | 367 | MLX4_QP_REGION_FC_ADDR, |
| 333 | MLX4_QP_REGION_FC_EXCH, | 368 | MLX4_QP_REGION_FC_EXCH, |
| @@ -379,6 +414,13 @@ enum { | |||
| 379 | #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ | 414 | #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ |
| 380 | MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) | 415 | MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) |
| 381 | 416 | ||
| 417 | enum mlx4_module_id { | ||
| 418 | MLX4_MODULE_ID_SFP = 0x3, | ||
| 419 | MLX4_MODULE_ID_QSFP = 0xC, | ||
| 420 | MLX4_MODULE_ID_QSFP_PLUS = 0xD, | ||
| 421 | MLX4_MODULE_ID_QSFP28 = 0x11, | ||
| 422 | }; | ||
| 423 | |||
| 382 | static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) | 424 | static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) |
| 383 | { | 425 | { |
| 384 | return (major << 32) | (minor << 16) | subminor; | 426 | return (major << 32) | (minor << 16) | subminor; |
| @@ -433,6 +475,7 @@ struct mlx4_caps { | |||
| 433 | int num_cqs; | 475 | int num_cqs; |
| 434 | int max_cqes; | 476 | int max_cqes; |
| 435 | int reserved_cqs; | 477 | int reserved_cqs; |
| 478 | int num_sys_eqs; | ||
| 436 | int num_eqs; | 479 | int num_eqs; |
| 437 | int reserved_eqs; | 480 | int reserved_eqs; |
| 438 | int num_comp_vectors; | 481 | int num_comp_vectors; |
| @@ -449,6 +492,7 @@ struct mlx4_caps { | |||
| 449 | int reserved_mcgs; | 492 | int reserved_mcgs; |
| 450 | int num_qp_per_mgm; | 493 | int num_qp_per_mgm; |
| 451 | int steering_mode; | 494 | int steering_mode; |
| 495 | int dmfs_high_steer_mode; | ||
| 452 | int fs_log_max_ucast_qp_range_size; | 496 | int fs_log_max_ucast_qp_range_size; |
| 453 | int num_pds; | 497 | int num_pds; |
| 454 | int reserved_pds; | 498 | int reserved_pds; |
| @@ -487,6 +531,10 @@ struct mlx4_caps { | |||
| 487 | u16 hca_core_clock; | 531 | u16 hca_core_clock; |
| 488 | u64 phys_port_id[MLX4_MAX_PORTS + 1]; | 532 | u64 phys_port_id[MLX4_MAX_PORTS + 1]; |
| 489 | int tunnel_offload_mode; | 533 | int tunnel_offload_mode; |
| 534 | u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1]; | ||
| 535 | u8 alloc_res_qp_mask; | ||
| 536 | u32 dmfs_high_rate_qpn_base; | ||
| 537 | u32 dmfs_high_rate_qpn_range; | ||
| 490 | }; | 538 | }; |
| 491 | 539 | ||
| 492 | struct mlx4_buf_list { | 540 | struct mlx4_buf_list { |
| @@ -607,6 +655,11 @@ struct mlx4_cq { | |||
| 607 | 655 | ||
| 608 | atomic_t refcount; | 656 | atomic_t refcount; |
| 609 | struct completion free; | 657 | struct completion free; |
| 658 | struct { | ||
| 659 | struct list_head list; | ||
| 660 | void (*comp)(struct mlx4_cq *); | ||
| 661 | void *priv; | ||
| 662 | } tasklet_ctx; | ||
| 610 | }; | 663 | }; |
| 611 | 664 | ||
| 612 | struct mlx4_qp { | 665 | struct mlx4_qp { |
| @@ -799,6 +852,26 @@ struct mlx4_init_port_param { | |||
| 799 | u64 si_guid; | 852 | u64 si_guid; |
| 800 | }; | 853 | }; |
| 801 | 854 | ||
| 855 | #define MAD_IFC_DATA_SZ 192 | ||
| 856 | /* MAD IFC Mailbox */ | ||
| 857 | struct mlx4_mad_ifc { | ||
| 858 | u8 base_version; | ||
| 859 | u8 mgmt_class; | ||
| 860 | u8 class_version; | ||
| 861 | u8 method; | ||
| 862 | __be16 status; | ||
| 863 | __be16 class_specific; | ||
| 864 | __be64 tid; | ||
| 865 | __be16 attr_id; | ||
| 866 | __be16 resv; | ||
| 867 | __be32 attr_mod; | ||
| 868 | __be64 mkey; | ||
| 869 | __be16 dr_slid; | ||
| 870 | __be16 dr_dlid; | ||
| 871 | u8 reserved[28]; | ||
| 872 | u8 data[MAD_IFC_DATA_SZ]; | ||
| 873 | } __packed; | ||
| 874 | |||
| 802 | #define mlx4_foreach_port(port, dev, type) \ | 875 | #define mlx4_foreach_port(port, dev, type) \ |
| 803 | for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ | 876 | for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ |
| 804 | if ((type) == (dev)->caps.port_mask[(port)]) | 877 | if ((type) == (dev)->caps.port_mask[(port)]) |
| @@ -835,7 +908,9 @@ static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev) | |||
| 835 | static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) | 908 | static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) |
| 836 | { | 909 | { |
| 837 | return (qpn < dev->phys_caps.base_sqpn + 8 + | 910 | return (qpn < dev->phys_caps.base_sqpn + 8 + |
| 838 | 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev)); | 911 | 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev) && |
| 912 | qpn >= dev->phys_caps.base_sqpn) || | ||
| 913 | (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]); | ||
| 839 | } | 914 | } |
| 840 | 915 | ||
| 841 | static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) | 916 | static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) |
| @@ -911,8 +986,8 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | |||
| 911 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, | 986 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, |
| 912 | unsigned vector, int collapsed, int timestamp_en); | 987 | unsigned vector, int collapsed, int timestamp_en); |
| 913 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); | 988 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); |
| 914 | 989 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, | |
| 915 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); | 990 | int *base, u8 flags); |
| 916 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); | 991 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); |
| 917 | 992 | ||
| 918 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, | 993 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, |
| @@ -1283,10 +1358,50 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | |||
| 1283 | u64 iova, u64 size, int npages, | 1358 | u64 iova, u64 size, int npages, |
| 1284 | int page_shift, struct mlx4_mpt_entry *mpt_entry); | 1359 | int page_shift, struct mlx4_mpt_entry *mpt_entry); |
| 1285 | 1360 | ||
| 1361 | int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, | ||
| 1362 | u16 offset, u16 size, u8 *data); | ||
| 1363 | |||
| 1286 | /* Returns true if running in low memory profile (kdump kernel) */ | 1364 | /* Returns true if running in low memory profile (kdump kernel) */ |
| 1287 | static inline bool mlx4_low_memory_profile(void) | 1365 | static inline bool mlx4_low_memory_profile(void) |
| 1288 | { | 1366 | { |
| 1289 | return is_kdump_kernel(); | 1367 | return is_kdump_kernel(); |
| 1290 | } | 1368 | } |
| 1291 | 1369 | ||
| 1370 | /* ACCESS REG commands */ | ||
| 1371 | enum mlx4_access_reg_method { | ||
| 1372 | MLX4_ACCESS_REG_QUERY = 0x1, | ||
| 1373 | MLX4_ACCESS_REG_WRITE = 0x2, | ||
| 1374 | }; | ||
| 1375 | |||
| 1376 | /* ACCESS PTYS Reg command */ | ||
| 1377 | enum mlx4_ptys_proto { | ||
| 1378 | MLX4_PTYS_IB = 1<<0, | ||
| 1379 | MLX4_PTYS_EN = 1<<2, | ||
| 1380 | }; | ||
| 1381 | |||
| 1382 | struct mlx4_ptys_reg { | ||
| 1383 | u8 resrvd1; | ||
| 1384 | u8 local_port; | ||
| 1385 | u8 resrvd2; | ||
| 1386 | u8 proto_mask; | ||
| 1387 | __be32 resrvd3[2]; | ||
| 1388 | __be32 eth_proto_cap; | ||
| 1389 | __be16 ib_width_cap; | ||
| 1390 | __be16 ib_speed_cap; | ||
| 1391 | __be32 resrvd4; | ||
| 1392 | __be32 eth_proto_admin; | ||
| 1393 | __be16 ib_width_admin; | ||
| 1394 | __be16 ib_speed_admin; | ||
| 1395 | __be32 resrvd5; | ||
| 1396 | __be32 eth_proto_oper; | ||
| 1397 | __be16 ib_width_oper; | ||
| 1398 | __be16 ib_speed_oper; | ||
| 1399 | __be32 resrvd6; | ||
| 1400 | __be32 eth_proto_lp_adv; | ||
| 1401 | } __packed; | ||
| 1402 | |||
| 1403 | int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, | ||
| 1404 | enum mlx4_access_reg_method method, | ||
| 1405 | struct mlx4_ptys_reg *ptys_reg); | ||
| 1406 | |||
| 1292 | #endif /* MLX4_DEVICE_H */ | 1407 | #endif /* MLX4_DEVICE_H */ |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 5f4e36cf0091..467ccdf94c98 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
| @@ -120,13 +120,15 @@ enum { | |||
| 120 | MLX4_RSS_QPC_FLAG_OFFSET = 13, | 120 | MLX4_RSS_QPC_FLAG_OFFSET = 13, |
| 121 | }; | 121 | }; |
| 122 | 122 | ||
| 123 | #define MLX4_EN_RSS_KEY_SIZE 40 | ||
| 124 | |||
| 123 | struct mlx4_rss_context { | 125 | struct mlx4_rss_context { |
| 124 | __be32 base_qpn; | 126 | __be32 base_qpn; |
| 125 | __be32 default_qpn; | 127 | __be32 default_qpn; |
| 126 | u16 reserved; | 128 | u16 reserved; |
| 127 | u8 hash_fn; | 129 | u8 hash_fn; |
| 128 | u8 flags; | 130 | u8 flags; |
| 129 | __be32 rss_key[10]; | 131 | __be32 rss_key[MLX4_EN_RSS_KEY_SIZE / sizeof(__be32)]; |
| 130 | __be32 base_qpn_udp; | 132 | __be32 base_qpn_udp; |
| 131 | }; | 133 | }; |
| 132 | 134 | ||
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 1d67fd32e71c..4e5bd813bb9a 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
| @@ -120,6 +120,15 @@ enum { | |||
| 120 | }; | 120 | }; |
| 121 | 121 | ||
| 122 | enum { | 122 | enum { |
| 123 | MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31 | ||
| 124 | }; | ||
| 125 | |||
| 126 | enum { | ||
| 127 | MLX5_PFAULT_SUBTYPE_WQE = 0, | ||
| 128 | MLX5_PFAULT_SUBTYPE_RDMA = 1, | ||
| 129 | }; | ||
| 130 | |||
| 131 | enum { | ||
| 123 | MLX5_PERM_LOCAL_READ = 1 << 2, | 132 | MLX5_PERM_LOCAL_READ = 1 << 2, |
| 124 | MLX5_PERM_LOCAL_WRITE = 1 << 3, | 133 | MLX5_PERM_LOCAL_WRITE = 1 << 3, |
| 125 | MLX5_PERM_REMOTE_READ = 1 << 4, | 134 | MLX5_PERM_REMOTE_READ = 1 << 4, |
| @@ -180,6 +189,19 @@ enum { | |||
| 180 | MLX5_MKEY_MASK_FREE = 1ull << 29, | 189 | MLX5_MKEY_MASK_FREE = 1ull << 29, |
| 181 | }; | 190 | }; |
| 182 | 191 | ||
| 192 | enum { | ||
| 193 | MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4), | ||
| 194 | |||
| 195 | MLX5_UMR_CHECK_NOT_FREE = (1 << 5), | ||
| 196 | MLX5_UMR_CHECK_FREE = (2 << 5), | ||
| 197 | |||
| 198 | MLX5_UMR_INLINE = (1 << 7), | ||
| 199 | }; | ||
| 200 | |||
| 201 | #define MLX5_UMR_MTT_ALIGNMENT 0x40 | ||
| 202 | #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1) | ||
| 203 | #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT | ||
| 204 | |||
| 183 | enum mlx5_event { | 205 | enum mlx5_event { |
| 184 | MLX5_EVENT_TYPE_COMP = 0x0, | 206 | MLX5_EVENT_TYPE_COMP = 0x0, |
| 185 | 207 | ||
| @@ -206,6 +228,8 @@ enum mlx5_event { | |||
| 206 | 228 | ||
| 207 | MLX5_EVENT_TYPE_CMD = 0x0a, | 229 | MLX5_EVENT_TYPE_CMD = 0x0a, |
| 208 | MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, | 230 | MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, |
| 231 | |||
| 232 | MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, | ||
| 209 | }; | 233 | }; |
| 210 | 234 | ||
| 211 | enum { | 235 | enum { |
| @@ -219,11 +243,7 @@ enum { | |||
| 219 | }; | 243 | }; |
| 220 | 244 | ||
| 221 | enum { | 245 | enum { |
| 222 | MLX5_DEV_CAP_FLAG_RC = 1LL << 0, | ||
| 223 | MLX5_DEV_CAP_FLAG_UC = 1LL << 1, | ||
| 224 | MLX5_DEV_CAP_FLAG_UD = 1LL << 2, | ||
| 225 | MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, | 246 | MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, |
| 226 | MLX5_DEV_CAP_FLAG_SRQ = 1LL << 6, | ||
| 227 | MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, | 247 | MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, |
| 228 | MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, | 248 | MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, |
| 229 | MLX5_DEV_CAP_FLAG_APM = 1LL << 17, | 249 | MLX5_DEV_CAP_FLAG_APM = 1LL << 17, |
| @@ -232,10 +252,7 @@ enum { | |||
| 232 | MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, | 252 | MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, |
| 233 | MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, | 253 | MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, |
| 234 | MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, | 254 | MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, |
| 235 | MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, | ||
| 236 | MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, | 255 | MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, |
| 237 | MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, | ||
| 238 | MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, | ||
| 239 | MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, | 256 | MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, |
| 240 | MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, | 257 | MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, |
| 241 | }; | 258 | }; |
| @@ -298,6 +315,8 @@ enum { | |||
| 298 | enum { | 315 | enum { |
| 299 | HCA_CAP_OPMOD_GET_MAX = 0, | 316 | HCA_CAP_OPMOD_GET_MAX = 0, |
| 300 | HCA_CAP_OPMOD_GET_CUR = 1, | 317 | HCA_CAP_OPMOD_GET_CUR = 1, |
| 318 | HCA_CAP_OPMOD_GET_ODP_MAX = 4, | ||
| 319 | HCA_CAP_OPMOD_GET_ODP_CUR = 5 | ||
| 301 | }; | 320 | }; |
| 302 | 321 | ||
| 303 | struct mlx5_inbox_hdr { | 322 | struct mlx5_inbox_hdr { |
| @@ -327,6 +346,23 @@ struct mlx5_cmd_query_adapter_mbox_out { | |||
| 327 | u8 vsd_psid[16]; | 346 | u8 vsd_psid[16]; |
| 328 | }; | 347 | }; |
| 329 | 348 | ||
| 349 | enum mlx5_odp_transport_cap_bits { | ||
| 350 | MLX5_ODP_SUPPORT_SEND = 1 << 31, | ||
| 351 | MLX5_ODP_SUPPORT_RECV = 1 << 30, | ||
| 352 | MLX5_ODP_SUPPORT_WRITE = 1 << 29, | ||
| 353 | MLX5_ODP_SUPPORT_READ = 1 << 28, | ||
| 354 | }; | ||
| 355 | |||
| 356 | struct mlx5_odp_caps { | ||
| 357 | char reserved[0x10]; | ||
| 358 | struct { | ||
| 359 | __be32 rc_odp_caps; | ||
| 360 | __be32 uc_odp_caps; | ||
| 361 | __be32 ud_odp_caps; | ||
| 362 | } per_transport_caps; | ||
| 363 | char reserved2[0xe4]; | ||
| 364 | }; | ||
| 365 | |||
| 330 | struct mlx5_cmd_init_hca_mbox_in { | 366 | struct mlx5_cmd_init_hca_mbox_in { |
| 331 | struct mlx5_inbox_hdr hdr; | 367 | struct mlx5_inbox_hdr hdr; |
| 332 | u8 rsvd0[2]; | 368 | u8 rsvd0[2]; |
| @@ -447,6 +483,27 @@ struct mlx5_eqe_page_req { | |||
| 447 | __be32 rsvd1[5]; | 483 | __be32 rsvd1[5]; |
| 448 | }; | 484 | }; |
| 449 | 485 | ||
| 486 | struct mlx5_eqe_page_fault { | ||
| 487 | __be32 bytes_committed; | ||
| 488 | union { | ||
| 489 | struct { | ||
| 490 | u16 reserved1; | ||
| 491 | __be16 wqe_index; | ||
| 492 | u16 reserved2; | ||
| 493 | __be16 packet_length; | ||
| 494 | u8 reserved3[12]; | ||
| 495 | } __packed wqe; | ||
| 496 | struct { | ||
| 497 | __be32 r_key; | ||
| 498 | u16 reserved1; | ||
| 499 | __be16 packet_length; | ||
| 500 | __be32 rdma_op_len; | ||
| 501 | __be64 rdma_va; | ||
| 502 | } __packed rdma; | ||
| 503 | } __packed; | ||
| 504 | __be32 flags_qpn; | ||
| 505 | } __packed; | ||
| 506 | |||
| 450 | union ev_data { | 507 | union ev_data { |
| 451 | __be32 raw[7]; | 508 | __be32 raw[7]; |
| 452 | struct mlx5_eqe_cmd cmd; | 509 | struct mlx5_eqe_cmd cmd; |
| @@ -458,6 +515,7 @@ union ev_data { | |||
| 458 | struct mlx5_eqe_congestion cong; | 515 | struct mlx5_eqe_congestion cong; |
| 459 | struct mlx5_eqe_stall_vl stall_vl; | 516 | struct mlx5_eqe_stall_vl stall_vl; |
| 460 | struct mlx5_eqe_page_req req_pages; | 517 | struct mlx5_eqe_page_req req_pages; |
| 518 | struct mlx5_eqe_page_fault page_fault; | ||
| 461 | } __packed; | 519 | } __packed; |
| 462 | 520 | ||
| 463 | struct mlx5_eqe { | 521 | struct mlx5_eqe { |
| @@ -784,6 +842,10 @@ struct mlx5_query_eq_mbox_out { | |||
| 784 | struct mlx5_eq_context ctx; | 842 | struct mlx5_eq_context ctx; |
| 785 | }; | 843 | }; |
| 786 | 844 | ||
| 845 | enum { | ||
| 846 | MLX5_MKEY_STATUS_FREE = 1 << 6, | ||
| 847 | }; | ||
| 848 | |||
| 787 | struct mlx5_mkey_seg { | 849 | struct mlx5_mkey_seg { |
| 788 | /* This is a two bit field occupying bits 31-30. | 850 | /* This is a two bit field occupying bits 31-30. |
| 789 | * bit 31 is always 0, | 851 | * bit 31 is always 0, |
| @@ -820,7 +882,7 @@ struct mlx5_query_special_ctxs_mbox_out { | |||
| 820 | struct mlx5_create_mkey_mbox_in { | 882 | struct mlx5_create_mkey_mbox_in { |
| 821 | struct mlx5_inbox_hdr hdr; | 883 | struct mlx5_inbox_hdr hdr; |
| 822 | __be32 input_mkey_index; | 884 | __be32 input_mkey_index; |
| 823 | u8 rsvd0[4]; | 885 | __be32 flags; |
| 824 | struct mlx5_mkey_seg seg; | 886 | struct mlx5_mkey_seg seg; |
| 825 | u8 rsvd1[16]; | 887 | u8 rsvd1[16]; |
| 826 | __be32 xlat_oct_act_size; | 888 | __be32 xlat_oct_act_size; |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 246310dc8bef..166d9315fe4b 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -113,6 +113,13 @@ enum { | |||
| 113 | MLX5_REG_HOST_ENDIANNESS = 0x7004, | 113 | MLX5_REG_HOST_ENDIANNESS = 0x7004, |
| 114 | }; | 114 | }; |
| 115 | 115 | ||
| 116 | enum mlx5_page_fault_resume_flags { | ||
| 117 | MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0, | ||
| 118 | MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1, | ||
| 119 | MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2, | ||
| 120 | MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7, | ||
| 121 | }; | ||
| 122 | |||
| 116 | enum dbg_rsc_type { | 123 | enum dbg_rsc_type { |
| 117 | MLX5_DBG_RSC_QP, | 124 | MLX5_DBG_RSC_QP, |
| 118 | MLX5_DBG_RSC_EQ, | 125 | MLX5_DBG_RSC_EQ, |
| @@ -467,7 +474,7 @@ struct mlx5_priv { | |||
| 467 | struct workqueue_struct *pg_wq; | 474 | struct workqueue_struct *pg_wq; |
| 468 | struct rb_root page_root; | 475 | struct rb_root page_root; |
| 469 | int fw_pages; | 476 | int fw_pages; |
| 470 | int reg_pages; | 477 | atomic_t reg_pages; |
| 471 | struct list_head free_list; | 478 | struct list_head free_list; |
| 472 | 479 | ||
| 473 | struct mlx5_core_health health; | 480 | struct mlx5_core_health health; |
| @@ -633,14 +640,6 @@ static inline void *mlx5_vzalloc(unsigned long size) | |||
| 633 | return rtn; | 640 | return rtn; |
| 634 | } | 641 | } |
| 635 | 642 | ||
| 636 | static inline void mlx5_vfree(const void *addr) | ||
| 637 | { | ||
| 638 | if (addr && is_vmalloc_addr(addr)) | ||
| 639 | vfree(addr); | ||
| 640 | else | ||
| 641 | kfree(addr); | ||
| 642 | } | ||
| 643 | |||
| 644 | static inline u32 mlx5_base_mkey(const u32 key) | 643 | static inline u32 mlx5_base_mkey(const u32 key) |
| 645 | { | 644 | { |
| 646 | return key & 0xffffff00u; | 645 | return key & 0xffffff00u; |
| @@ -711,6 +710,9 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev); | |||
| 711 | void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); | 710 | void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); |
| 712 | void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); | 711 | void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); |
| 713 | void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); | 712 | void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); |
| 713 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | ||
| 714 | void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); | ||
| 715 | #endif | ||
| 714 | void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); | 716 | void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); |
| 715 | struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); | 717 | struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); |
| 716 | void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); | 718 | void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); |
| @@ -748,6 +750,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, | |||
| 748 | int npsvs, u32 *sig_index); | 750 | int npsvs, u32 *sig_index); |
| 749 | int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); | 751 | int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); |
| 750 | void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); | 752 | void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); |
| 753 | int mlx5_query_odp_caps(struct mlx5_core_dev *dev, | ||
| 754 | struct mlx5_odp_caps *odp_caps); | ||
| 751 | 755 | ||
| 752 | static inline u32 mlx5_mkey_to_idx(u32 mkey) | 756 | static inline u32 mlx5_mkey_to_idx(u32 mkey) |
| 753 | { | 757 | { |
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 3fa075daeb1d..61f7a342d1bf 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h | |||
| @@ -50,6 +50,9 @@ | |||
| 50 | #define MLX5_BSF_APPTAG_ESCAPE 0x1 | 50 | #define MLX5_BSF_APPTAG_ESCAPE 0x1 |
| 51 | #define MLX5_BSF_APPREF_ESCAPE 0x2 | 51 | #define MLX5_BSF_APPREF_ESCAPE 0x2 |
| 52 | 52 | ||
| 53 | #define MLX5_QPN_BITS 24 | ||
| 54 | #define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1) | ||
| 55 | |||
| 53 | enum mlx5_qp_optpar { | 56 | enum mlx5_qp_optpar { |
| 54 | MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, | 57 | MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, |
| 55 | MLX5_QP_OPTPAR_RRE = 1 << 1, | 58 | MLX5_QP_OPTPAR_RRE = 1 << 1, |
| @@ -189,6 +192,14 @@ struct mlx5_wqe_ctrl_seg { | |||
| 189 | __be32 imm; | 192 | __be32 imm; |
| 190 | }; | 193 | }; |
| 191 | 194 | ||
| 195 | #define MLX5_WQE_CTRL_DS_MASK 0x3f | ||
| 196 | #define MLX5_WQE_CTRL_QPN_MASK 0xffffff00 | ||
| 197 | #define MLX5_WQE_CTRL_QPN_SHIFT 8 | ||
| 198 | #define MLX5_WQE_DS_UNITS 16 | ||
| 199 | #define MLX5_WQE_CTRL_OPCODE_MASK 0xff | ||
| 200 | #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 | ||
| 201 | #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 | ||
| 202 | |||
| 192 | struct mlx5_wqe_xrc_seg { | 203 | struct mlx5_wqe_xrc_seg { |
| 193 | __be32 xrc_srqn; | 204 | __be32 xrc_srqn; |
| 194 | u8 rsvd[12]; | 205 | u8 rsvd[12]; |
| @@ -292,6 +303,8 @@ struct mlx5_wqe_signature_seg { | |||
| 292 | u8 rsvd1[11]; | 303 | u8 rsvd1[11]; |
| 293 | }; | 304 | }; |
| 294 | 305 | ||
| 306 | #define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff | ||
| 307 | |||
| 295 | struct mlx5_wqe_inline_seg { | 308 | struct mlx5_wqe_inline_seg { |
| 296 | __be32 byte_count; | 309 | __be32 byte_count; |
| 297 | }; | 310 | }; |
| @@ -360,9 +373,46 @@ struct mlx5_stride_block_ctrl_seg { | |||
| 360 | __be16 num_entries; | 373 | __be16 num_entries; |
| 361 | }; | 374 | }; |
| 362 | 375 | ||
| 376 | enum mlx5_pagefault_flags { | ||
| 377 | MLX5_PFAULT_REQUESTOR = 1 << 0, | ||
| 378 | MLX5_PFAULT_WRITE = 1 << 1, | ||
| 379 | MLX5_PFAULT_RDMA = 1 << 2, | ||
| 380 | }; | ||
| 381 | |||
| 382 | /* Contains the details of a pagefault. */ | ||
| 383 | struct mlx5_pagefault { | ||
| 384 | u32 bytes_committed; | ||
| 385 | u8 event_subtype; | ||
| 386 | enum mlx5_pagefault_flags flags; | ||
| 387 | union { | ||
| 388 | /* Initiator or send message responder pagefault details. */ | ||
| 389 | struct { | ||
| 390 | /* Received packet size, only valid for responders. */ | ||
| 391 | u32 packet_size; | ||
| 392 | /* | ||
| 393 | * WQE index. Refers to either the send queue or | ||
| 394 | * receive queue, according to event_subtype. | ||
| 395 | */ | ||
| 396 | u16 wqe_index; | ||
| 397 | } wqe; | ||
| 398 | /* RDMA responder pagefault details */ | ||
| 399 | struct { | ||
| 400 | u32 r_key; | ||
| 401 | /* | ||
| 402 | * Received packet size, minimal size page fault | ||
| 403 | * resolution required for forward progress. | ||
| 404 | */ | ||
| 405 | u32 packet_size; | ||
| 406 | u32 rdma_op_len; | ||
| 407 | u64 rdma_va; | ||
| 408 | } rdma; | ||
| 409 | }; | ||
| 410 | }; | ||
| 411 | |||
| 363 | struct mlx5_core_qp { | 412 | struct mlx5_core_qp { |
| 364 | struct mlx5_core_rsc_common common; /* must be first */ | 413 | struct mlx5_core_rsc_common common; /* must be first */ |
| 365 | void (*event) (struct mlx5_core_qp *, int); | 414 | void (*event) (struct mlx5_core_qp *, int); |
| 415 | void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *); | ||
| 366 | int qpn; | 416 | int qpn; |
| 367 | struct mlx5_rsc_debug *dbg; | 417 | struct mlx5_rsc_debug *dbg; |
| 368 | int pid; | 418 | int pid; |
| @@ -530,6 +580,17 @@ static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u | |||
| 530 | return radix_tree_lookup(&dev->priv.mr_table.tree, key); | 580 | return radix_tree_lookup(&dev->priv.mr_table.tree, key); |
| 531 | } | 581 | } |
| 532 | 582 | ||
| 583 | struct mlx5_page_fault_resume_mbox_in { | ||
| 584 | struct mlx5_inbox_hdr hdr; | ||
| 585 | __be32 flags_qpn; | ||
| 586 | u8 reserved[4]; | ||
| 587 | }; | ||
| 588 | |||
| 589 | struct mlx5_page_fault_resume_mbox_out { | ||
| 590 | struct mlx5_outbox_hdr hdr; | ||
| 591 | u8 rsvd[8]; | ||
| 592 | }; | ||
| 593 | |||
| 533 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, | 594 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, |
| 534 | struct mlx5_core_qp *qp, | 595 | struct mlx5_core_qp *qp, |
| 535 | struct mlx5_create_qp_mbox_in *in, | 596 | struct mlx5_create_qp_mbox_in *in, |
| @@ -549,6 +610,10 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev); | |||
| 549 | void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); | 610 | void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); |
| 550 | int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); | 611 | int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); |
| 551 | void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); | 612 | void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); |
| 613 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | ||
| 614 | int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, | ||
| 615 | u8 context, int error); | ||
| 616 | #endif | ||
| 552 | 617 | ||
| 553 | static inline const char *mlx5_qp_type_str(int type) | 618 | static inline const char *mlx5_qp_type_str(int type) |
| 554 | { | 619 | { |
diff --git a/include/linux/mm.h b/include/linux/mm.h index f7606d3a0915..f80d0194c9bc 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/bit_spinlock.h> | 19 | #include <linux/bit_spinlock.h> |
| 20 | #include <linux/shrinker.h> | 20 | #include <linux/shrinker.h> |
| 21 | #include <linux/resource.h> | 21 | #include <linux/resource.h> |
| 22 | #include <linux/page_ext.h> | ||
| 22 | 23 | ||
| 23 | struct mempolicy; | 24 | struct mempolicy; |
| 24 | struct anon_vma; | 25 | struct anon_vma; |
| @@ -56,6 +57,17 @@ extern int sysctl_legacy_va_layout; | |||
| 56 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) | 57 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) |
| 57 | #endif | 58 | #endif |
| 58 | 59 | ||
| 60 | /* | ||
| 61 | * To prevent common memory management code establishing | ||
| 62 | * a zero page mapping on a read fault. | ||
| 63 | * This macro should be defined within <asm/pgtable.h>. | ||
| 64 | * s390 does this to prevent multiplexing of hardware bits | ||
| 65 | * related to the physical page in case of virtualization. | ||
| 66 | */ | ||
| 67 | #ifndef mm_forbids_zeropage | ||
| 68 | #define mm_forbids_zeropage(X) (0) | ||
| 69 | #endif | ||
| 70 | |||
| 59 | extern unsigned long sysctl_user_reserve_kbytes; | 71 | extern unsigned long sysctl_user_reserve_kbytes; |
| 60 | extern unsigned long sysctl_admin_reserve_kbytes; | 72 | extern unsigned long sysctl_admin_reserve_kbytes; |
| 61 | 73 | ||
| @@ -274,8 +286,6 @@ struct vm_operations_struct { | |||
| 274 | */ | 286 | */ |
| 275 | struct mempolicy *(*get_policy)(struct vm_area_struct *vma, | 287 | struct mempolicy *(*get_policy)(struct vm_area_struct *vma, |
| 276 | unsigned long addr); | 288 | unsigned long addr); |
| 277 | int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from, | ||
| 278 | const nodemask_t *to, unsigned long flags); | ||
| 279 | #endif | 289 | #endif |
| 280 | /* called by sys_remap_file_pages() to populate non-linear mapping */ | 290 | /* called by sys_remap_file_pages() to populate non-linear mapping */ |
| 281 | int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr, | 291 | int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr, |
| @@ -2049,7 +2059,22 @@ static inline void vm_stat_account(struct mm_struct *mm, | |||
| 2049 | #endif /* CONFIG_PROC_FS */ | 2059 | #endif /* CONFIG_PROC_FS */ |
| 2050 | 2060 | ||
| 2051 | #ifdef CONFIG_DEBUG_PAGEALLOC | 2061 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 2052 | extern void kernel_map_pages(struct page *page, int numpages, int enable); | 2062 | extern bool _debug_pagealloc_enabled; |
| 2063 | extern void __kernel_map_pages(struct page *page, int numpages, int enable); | ||
| 2064 | |||
| 2065 | static inline bool debug_pagealloc_enabled(void) | ||
| 2066 | { | ||
| 2067 | return _debug_pagealloc_enabled; | ||
| 2068 | } | ||
| 2069 | |||
| 2070 | static inline void | ||
| 2071 | kernel_map_pages(struct page *page, int numpages, int enable) | ||
| 2072 | { | ||
| 2073 | if (!debug_pagealloc_enabled()) | ||
| 2074 | return; | ||
| 2075 | |||
| 2076 | __kernel_map_pages(page, numpages, enable); | ||
| 2077 | } | ||
| 2053 | #ifdef CONFIG_HIBERNATION | 2078 | #ifdef CONFIG_HIBERNATION |
| 2054 | extern bool kernel_page_present(struct page *page); | 2079 | extern bool kernel_page_present(struct page *page); |
| 2055 | #endif /* CONFIG_HIBERNATION */ | 2080 | #endif /* CONFIG_HIBERNATION */ |
| @@ -2083,9 +2108,9 @@ int drop_caches_sysctl_handler(struct ctl_table *, int, | |||
| 2083 | void __user *, size_t *, loff_t *); | 2108 | void __user *, size_t *, loff_t *); |
| 2084 | #endif | 2109 | #endif |
| 2085 | 2110 | ||
| 2086 | unsigned long shrink_slab(struct shrink_control *shrink, | 2111 | unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid, |
| 2087 | unsigned long nr_pages_scanned, | 2112 | unsigned long nr_scanned, |
| 2088 | unsigned long lru_pages); | 2113 | unsigned long nr_eligible); |
| 2089 | 2114 | ||
| 2090 | #ifndef CONFIG_MMU | 2115 | #ifndef CONFIG_MMU |
| 2091 | #define randomize_va_space 0 | 2116 | #define randomize_va_space 0 |
| @@ -2144,20 +2169,36 @@ extern void copy_user_huge_page(struct page *dst, struct page *src, | |||
| 2144 | unsigned int pages_per_huge_page); | 2169 | unsigned int pages_per_huge_page); |
| 2145 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ | 2170 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ |
| 2146 | 2171 | ||
| 2172 | extern struct page_ext_operations debug_guardpage_ops; | ||
| 2173 | extern struct page_ext_operations page_poisoning_ops; | ||
| 2174 | |||
| 2147 | #ifdef CONFIG_DEBUG_PAGEALLOC | 2175 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 2148 | extern unsigned int _debug_guardpage_minorder; | 2176 | extern unsigned int _debug_guardpage_minorder; |
| 2177 | extern bool _debug_guardpage_enabled; | ||
| 2149 | 2178 | ||
| 2150 | static inline unsigned int debug_guardpage_minorder(void) | 2179 | static inline unsigned int debug_guardpage_minorder(void) |
| 2151 | { | 2180 | { |
| 2152 | return _debug_guardpage_minorder; | 2181 | return _debug_guardpage_minorder; |
| 2153 | } | 2182 | } |
| 2154 | 2183 | ||
| 2184 | static inline bool debug_guardpage_enabled(void) | ||
| 2185 | { | ||
| 2186 | return _debug_guardpage_enabled; | ||
| 2187 | } | ||
| 2188 | |||
| 2155 | static inline bool page_is_guard(struct page *page) | 2189 | static inline bool page_is_guard(struct page *page) |
| 2156 | { | 2190 | { |
| 2157 | return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); | 2191 | struct page_ext *page_ext; |
| 2192 | |||
| 2193 | if (!debug_guardpage_enabled()) | ||
| 2194 | return false; | ||
| 2195 | |||
| 2196 | page_ext = lookup_page_ext(page); | ||
| 2197 | return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); | ||
| 2158 | } | 2198 | } |
| 2159 | #else | 2199 | #else |
| 2160 | static inline unsigned int debug_guardpage_minorder(void) { return 0; } | 2200 | static inline unsigned int debug_guardpage_minorder(void) { return 0; } |
| 2201 | static inline bool debug_guardpage_enabled(void) { return false; } | ||
| 2161 | static inline bool page_is_guard(struct page *page) { return false; } | 2202 | static inline bool page_is_guard(struct page *page) { return false; } |
| 2162 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 2203 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
| 2163 | 2204 | ||
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 004e9d17b47e..6d34aa266a8c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -10,7 +10,6 @@ | |||
| 10 | #include <linux/rwsem.h> | 10 | #include <linux/rwsem.h> |
| 11 | #include <linux/completion.h> | 11 | #include <linux/completion.h> |
| 12 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
| 13 | #include <linux/page-debug-flags.h> | ||
| 14 | #include <linux/uprobes.h> | 13 | #include <linux/uprobes.h> |
| 15 | #include <linux/page-flags-layout.h> | 14 | #include <linux/page-flags-layout.h> |
| 16 | #include <asm/page.h> | 15 | #include <asm/page.h> |
| @@ -22,6 +21,7 @@ | |||
| 22 | #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) | 21 | #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) |
| 23 | 22 | ||
| 24 | struct address_space; | 23 | struct address_space; |
| 24 | struct mem_cgroup; | ||
| 25 | 25 | ||
| 26 | #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) | 26 | #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) |
| 27 | #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ | 27 | #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ |
| @@ -167,6 +167,10 @@ struct page { | |||
| 167 | struct page *first_page; /* Compound tail pages */ | 167 | struct page *first_page; /* Compound tail pages */ |
| 168 | }; | 168 | }; |
| 169 | 169 | ||
| 170 | #ifdef CONFIG_MEMCG | ||
| 171 | struct mem_cgroup *mem_cgroup; | ||
| 172 | #endif | ||
| 173 | |||
| 170 | /* | 174 | /* |
| 171 | * On machines where all RAM is mapped into kernel address space, | 175 | * On machines where all RAM is mapped into kernel address space, |
| 172 | * we can simply calculate the virtual address. On machines with | 176 | * we can simply calculate the virtual address. On machines with |
| @@ -181,9 +185,6 @@ struct page { | |||
| 181 | void *virtual; /* Kernel virtual address (NULL if | 185 | void *virtual; /* Kernel virtual address (NULL if |
| 182 | not kmapped, ie. highmem) */ | 186 | not kmapped, ie. highmem) */ |
| 183 | #endif /* WANT_PAGE_VIRTUAL */ | 187 | #endif /* WANT_PAGE_VIRTUAL */ |
| 184 | #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS | ||
| 185 | unsigned long debug_flags; /* Use atomic bitops on this */ | ||
| 186 | #endif | ||
| 187 | 188 | ||
| 188 | #ifdef CONFIG_KMEMCHECK | 189 | #ifdef CONFIG_KMEMCHECK |
| 189 | /* | 190 | /* |
| @@ -529,4 +530,12 @@ enum tlb_flush_reason { | |||
| 529 | NR_TLB_FLUSH_REASONS, | 530 | NR_TLB_FLUSH_REASONS, |
| 530 | }; | 531 | }; |
| 531 | 532 | ||
| 533 | /* | ||
| 534 | * A swap entry has to fit into a "unsigned long", as the entry is hidden | ||
| 535 | * in the "index" field of the swapper address space. | ||
| 536 | */ | ||
| 537 | typedef struct { | ||
| 538 | unsigned long val; | ||
| 539 | } swp_entry_t; | ||
| 540 | |||
| 532 | #endif /* _LINUX_MM_TYPES_H */ | 541 | #endif /* _LINUX_MM_TYPES_H */ |
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 88787bb4b3b9..95243d28a0ee 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h | |||
| @@ -98,11 +98,11 @@ struct mmu_notifier_ops { | |||
| 98 | /* | 98 | /* |
| 99 | * invalidate_range_start() and invalidate_range_end() must be | 99 | * invalidate_range_start() and invalidate_range_end() must be |
| 100 | * paired and are called only when the mmap_sem and/or the | 100 | * paired and are called only when the mmap_sem and/or the |
| 101 | * locks protecting the reverse maps are held. The subsystem | 101 | * locks protecting the reverse maps are held. If the subsystem |
| 102 | * must guarantee that no additional references are taken to | 102 | * can't guarantee that no additional references are taken to |
| 103 | * the pages in the range established between the call to | 103 | * the pages in the range, it has to implement the |
| 104 | * invalidate_range_start() and the matching call to | 104 | * invalidate_range() notifier to remove any references taken |
| 105 | * invalidate_range_end(). | 105 | * after invalidate_range_start(). |
| 106 | * | 106 | * |
| 107 | * Invalidation of multiple concurrent ranges may be | 107 | * Invalidation of multiple concurrent ranges may be |
| 108 | * optionally permitted by the driver. Either way the | 108 | * optionally permitted by the driver. Either way the |
| @@ -144,6 +144,29 @@ struct mmu_notifier_ops { | |||
| 144 | void (*invalidate_range_end)(struct mmu_notifier *mn, | 144 | void (*invalidate_range_end)(struct mmu_notifier *mn, |
| 145 | struct mm_struct *mm, | 145 | struct mm_struct *mm, |
| 146 | unsigned long start, unsigned long end); | 146 | unsigned long start, unsigned long end); |
| 147 | |||
| 148 | /* | ||
| 149 | * invalidate_range() is either called between | ||
| 150 | * invalidate_range_start() and invalidate_range_end() when the | ||
| 151 | * VM has to free pages that where unmapped, but before the | ||
| 152 | * pages are actually freed, or outside of _start()/_end() when | ||
| 153 | * a (remote) TLB is necessary. | ||
| 154 | * | ||
| 155 | * If invalidate_range() is used to manage a non-CPU TLB with | ||
| 156 | * shared page-tables, it not necessary to implement the | ||
| 157 | * invalidate_range_start()/end() notifiers, as | ||
| 158 | * invalidate_range() alread catches the points in time when an | ||
| 159 | * external TLB range needs to be flushed. | ||
| 160 | * | ||
| 161 | * The invalidate_range() function is called under the ptl | ||
| 162 | * spin-lock and not allowed to sleep. | ||
| 163 | * | ||
| 164 | * Note that this function might be called with just a sub-range | ||
| 165 | * of what was passed to invalidate_range_start()/end(), if | ||
| 166 | * called between those functions. | ||
| 167 | */ | ||
| 168 | void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, | ||
| 169 | unsigned long start, unsigned long end); | ||
| 147 | }; | 170 | }; |
| 148 | 171 | ||
| 149 | /* | 172 | /* |
| @@ -154,7 +177,7 @@ struct mmu_notifier_ops { | |||
| 154 | * Therefore notifier chains can only be traversed when either | 177 | * Therefore notifier chains can only be traversed when either |
| 155 | * | 178 | * |
| 156 | * 1. mmap_sem is held. | 179 | * 1. mmap_sem is held. |
| 157 | * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem). | 180 | * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem). |
| 158 | * 3. No other concurrent thread can access the list (release) | 181 | * 3. No other concurrent thread can access the list (release) |
| 159 | */ | 182 | */ |
| 160 | struct mmu_notifier { | 183 | struct mmu_notifier { |
| @@ -190,6 +213,8 @@ extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, | |||
| 190 | unsigned long start, unsigned long end); | 213 | unsigned long start, unsigned long end); |
| 191 | extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, | 214 | extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, |
| 192 | unsigned long start, unsigned long end); | 215 | unsigned long start, unsigned long end); |
| 216 | extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, | ||
| 217 | unsigned long start, unsigned long end); | ||
| 193 | 218 | ||
| 194 | static inline void mmu_notifier_release(struct mm_struct *mm) | 219 | static inline void mmu_notifier_release(struct mm_struct *mm) |
| 195 | { | 220 | { |
| @@ -242,6 +267,13 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, | |||
| 242 | __mmu_notifier_invalidate_range_end(mm, start, end); | 267 | __mmu_notifier_invalidate_range_end(mm, start, end); |
| 243 | } | 268 | } |
| 244 | 269 | ||
| 270 | static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, | ||
| 271 | unsigned long start, unsigned long end) | ||
| 272 | { | ||
| 273 | if (mm_has_notifiers(mm)) | ||
| 274 | __mmu_notifier_invalidate_range(mm, start, end); | ||
| 275 | } | ||
| 276 | |||
| 245 | static inline void mmu_notifier_mm_init(struct mm_struct *mm) | 277 | static inline void mmu_notifier_mm_init(struct mm_struct *mm) |
| 246 | { | 278 | { |
| 247 | mm->mmu_notifier_mm = NULL; | 279 | mm->mmu_notifier_mm = NULL; |
| @@ -279,6 +311,44 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
| 279 | __young; \ | 311 | __young; \ |
| 280 | }) | 312 | }) |
| 281 | 313 | ||
| 314 | #define ptep_clear_flush_notify(__vma, __address, __ptep) \ | ||
| 315 | ({ \ | ||
| 316 | unsigned long ___addr = __address & PAGE_MASK; \ | ||
| 317 | struct mm_struct *___mm = (__vma)->vm_mm; \ | ||
| 318 | pte_t ___pte; \ | ||
| 319 | \ | ||
| 320 | ___pte = ptep_clear_flush(__vma, __address, __ptep); \ | ||
| 321 | mmu_notifier_invalidate_range(___mm, ___addr, \ | ||
| 322 | ___addr + PAGE_SIZE); \ | ||
| 323 | \ | ||
| 324 | ___pte; \ | ||
| 325 | }) | ||
| 326 | |||
| 327 | #define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \ | ||
| 328 | ({ \ | ||
| 329 | unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ | ||
| 330 | struct mm_struct *___mm = (__vma)->vm_mm; \ | ||
| 331 | pmd_t ___pmd; \ | ||
| 332 | \ | ||
| 333 | ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \ | ||
| 334 | mmu_notifier_invalidate_range(___mm, ___haddr, \ | ||
| 335 | ___haddr + HPAGE_PMD_SIZE); \ | ||
| 336 | \ | ||
| 337 | ___pmd; \ | ||
| 338 | }) | ||
| 339 | |||
| 340 | #define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \ | ||
| 341 | ({ \ | ||
| 342 | unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ | ||
| 343 | pmd_t ___pmd; \ | ||
| 344 | \ | ||
| 345 | ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \ | ||
| 346 | mmu_notifier_invalidate_range(__mm, ___haddr, \ | ||
| 347 | ___haddr + HPAGE_PMD_SIZE); \ | ||
| 348 | \ | ||
| 349 | ___pmd; \ | ||
| 350 | }) | ||
| 351 | |||
| 282 | /* | 352 | /* |
| 283 | * set_pte_at_notify() sets the pte _after_ running the notifier. | 353 | * set_pte_at_notify() sets the pte _after_ running the notifier. |
| 284 | * This is safe to start by updating the secondary MMUs, because the primary MMU | 354 | * This is safe to start by updating the secondary MMUs, because the primary MMU |
| @@ -342,6 +412,11 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, | |||
| 342 | { | 412 | { |
| 343 | } | 413 | } |
| 344 | 414 | ||
| 415 | static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, | ||
| 416 | unsigned long start, unsigned long end) | ||
| 417 | { | ||
| 418 | } | ||
| 419 | |||
| 345 | static inline void mmu_notifier_mm_init(struct mm_struct *mm) | 420 | static inline void mmu_notifier_mm_init(struct mm_struct *mm) |
| 346 | { | 421 | { |
| 347 | } | 422 | } |
| @@ -352,6 +427,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
| 352 | 427 | ||
| 353 | #define ptep_clear_flush_young_notify ptep_clear_flush_young | 428 | #define ptep_clear_flush_young_notify ptep_clear_flush_young |
| 354 | #define pmdp_clear_flush_young_notify pmdp_clear_flush_young | 429 | #define pmdp_clear_flush_young_notify pmdp_clear_flush_young |
| 430 | #define ptep_clear_flush_notify ptep_clear_flush | ||
| 431 | #define pmdp_clear_flush_notify pmdp_clear_flush | ||
| 432 | #define pmdp_get_and_clear_notify pmdp_get_and_clear | ||
| 355 | #define set_pte_at_notify set_pte_at | 433 | #define set_pte_at_notify set_pte_at |
| 356 | 434 | ||
| 357 | #endif /* CONFIG_MMU_NOTIFIER */ | 435 | #endif /* CONFIG_MMU_NOTIFIER */ |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ffe66e381c04..2f0856d14b21 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -722,8 +722,8 @@ typedef struct pglist_data { | |||
| 722 | int nr_zones; | 722 | int nr_zones; |
| 723 | #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ | 723 | #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ |
| 724 | struct page *node_mem_map; | 724 | struct page *node_mem_map; |
| 725 | #ifdef CONFIG_MEMCG | 725 | #ifdef CONFIG_PAGE_EXTENSION |
| 726 | struct page_cgroup *node_page_cgroup; | 726 | struct page_ext *node_page_ext; |
| 727 | #endif | 727 | #endif |
| 728 | #endif | 728 | #endif |
| 729 | #ifndef CONFIG_NO_BOOTMEM | 729 | #ifndef CONFIG_NO_BOOTMEM |
| @@ -1078,7 +1078,7 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) | |||
| 1078 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) | 1078 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) |
| 1079 | 1079 | ||
| 1080 | struct page; | 1080 | struct page; |
| 1081 | struct page_cgroup; | 1081 | struct page_ext; |
| 1082 | struct mem_section { | 1082 | struct mem_section { |
| 1083 | /* | 1083 | /* |
| 1084 | * This is, logically, a pointer to an array of struct | 1084 | * This is, logically, a pointer to an array of struct |
| @@ -1096,12 +1096,12 @@ struct mem_section { | |||
| 1096 | 1096 | ||
| 1097 | /* See declaration of similar field in struct zone */ | 1097 | /* See declaration of similar field in struct zone */ |
| 1098 | unsigned long *pageblock_flags; | 1098 | unsigned long *pageblock_flags; |
| 1099 | #ifdef CONFIG_MEMCG | 1099 | #ifdef CONFIG_PAGE_EXTENSION |
| 1100 | /* | 1100 | /* |
| 1101 | * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use | 1101 | * If !SPARSEMEM, pgdat doesn't have page_ext pointer. We use |
| 1102 | * section. (see memcontrol.h/page_cgroup.h about this.) | 1102 | * section. (see page_ext.h about this.) |
| 1103 | */ | 1103 | */ |
| 1104 | struct page_cgroup *page_cgroup; | 1104 | struct page_ext *page_ext; |
| 1105 | unsigned long pad; | 1105 | unsigned long pad; |
| 1106 | #endif | 1106 | #endif |
| 1107 | /* | 1107 | /* |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 44eeef0da186..745def862580 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
| @@ -69,7 +69,7 @@ struct ieee1394_device_id { | |||
| 69 | * @bDeviceClass: Class of device; numbers are assigned | 69 | * @bDeviceClass: Class of device; numbers are assigned |
| 70 | * by the USB forum. Products may choose to implement classes, | 70 | * by the USB forum. Products may choose to implement classes, |
| 71 | * or be vendor-specific. Device classes specify behavior of all | 71 | * or be vendor-specific. Device classes specify behavior of all |
| 72 | * the interfaces on a devices. | 72 | * the interfaces on a device. |
| 73 | * @bDeviceSubClass: Subclass of device; associated with bDeviceClass. | 73 | * @bDeviceSubClass: Subclass of device; associated with bDeviceClass. |
| 74 | * @bDeviceProtocol: Protocol of device; associated with bDeviceClass. | 74 | * @bDeviceProtocol: Protocol of device; associated with bDeviceClass. |
| 75 | * @bInterfaceClass: Class of interface; numbers are assigned | 75 | * @bInterfaceClass: Class of interface; numbers are assigned |
diff --git a/include/linux/module.h b/include/linux/module.h index 71f282a4e307..ebfb0e153c6a 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
| @@ -210,20 +210,6 @@ enum module_state { | |||
| 210 | MODULE_STATE_UNFORMED, /* Still setting it up. */ | 210 | MODULE_STATE_UNFORMED, /* Still setting it up. */ |
| 211 | }; | 211 | }; |
| 212 | 212 | ||
| 213 | /** | ||
| 214 | * struct module_ref - per cpu module reference counts | ||
| 215 | * @incs: number of module get on this cpu | ||
| 216 | * @decs: number of module put on this cpu | ||
| 217 | * | ||
| 218 | * We force an alignment on 8 or 16 bytes, so that alloc_percpu() | ||
| 219 | * put @incs/@decs in same cache line, with no extra memory cost, | ||
| 220 | * since alloc_percpu() is fine grained. | ||
| 221 | */ | ||
| 222 | struct module_ref { | ||
| 223 | unsigned long incs; | ||
| 224 | unsigned long decs; | ||
| 225 | } __attribute((aligned(2 * sizeof(unsigned long)))); | ||
| 226 | |||
| 227 | struct module { | 213 | struct module { |
| 228 | enum module_state state; | 214 | enum module_state state; |
| 229 | 215 | ||
| @@ -367,7 +353,7 @@ struct module { | |||
| 367 | /* Destruction function. */ | 353 | /* Destruction function. */ |
| 368 | void (*exit)(void); | 354 | void (*exit)(void); |
| 369 | 355 | ||
| 370 | struct module_ref __percpu *refptr; | 356 | atomic_t refcnt; |
| 371 | #endif | 357 | #endif |
| 372 | 358 | ||
| 373 | #ifdef CONFIG_CONSTRUCTORS | 359 | #ifdef CONFIG_CONSTRUCTORS |
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index e4d451e4600b..3d4ea7eb2b68 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
| @@ -455,8 +455,21 @@ struct nand_hw_control { | |||
| 455 | * be provided if an hardware ECC is available | 455 | * be provided if an hardware ECC is available |
| 456 | * @calculate: function for ECC calculation or readback from ECC hardware | 456 | * @calculate: function for ECC calculation or readback from ECC hardware |
| 457 | * @correct: function for ECC correction, matching to ECC generator (sw/hw) | 457 | * @correct: function for ECC correction, matching to ECC generator (sw/hw) |
| 458 | * @read_page_raw: function to read a raw page without ECC | 458 | * @read_page_raw: function to read a raw page without ECC. This function |
| 459 | * @write_page_raw: function to write a raw page without ECC | 459 | * should hide the specific layout used by the ECC |
| 460 | * controller and always return contiguous in-band and | ||
| 461 | * out-of-band data even if they're not stored | ||
| 462 | * contiguously on the NAND chip (e.g. | ||
| 463 | * NAND_ECC_HW_SYNDROME interleaves in-band and | ||
| 464 | * out-of-band data). | ||
| 465 | * @write_page_raw: function to write a raw page without ECC. This function | ||
| 466 | * should hide the specific layout used by the ECC | ||
| 467 | * controller and consider the passed data as contiguous | ||
| 468 | * in-band and out-of-band data. ECC controller is | ||
| 469 | * responsible for doing the appropriate transformations | ||
| 470 | * to adapt to its specific layout (e.g. | ||
| 471 | * NAND_ECC_HW_SYNDROME interleaves in-band and | ||
| 472 | * out-of-band data). | ||
| 460 | * @read_page: function to read a page according to the ECC generator | 473 | * @read_page: function to read a page according to the ECC generator |
| 461 | * requirements; returns maximum number of bitflips corrected in | 474 | * requirements; returns maximum number of bitflips corrected in |
| 462 | * any single ECC step, 0 if bitflips uncorrectable, -EIO hw error | 475 | * any single ECC step, 0 if bitflips uncorrectable, -EIO hw error |
| @@ -723,6 +736,7 @@ struct nand_chip { | |||
| 723 | #define NAND_MFR_EON 0x92 | 736 | #define NAND_MFR_EON 0x92 |
| 724 | #define NAND_MFR_SANDISK 0x45 | 737 | #define NAND_MFR_SANDISK 0x45 |
| 725 | #define NAND_MFR_INTEL 0x89 | 738 | #define NAND_MFR_INTEL 0x89 |
| 739 | #define NAND_MFR_ATO 0x9b | ||
| 726 | 740 | ||
| 727 | /* The maximum expected count of bytes in the NAND ID sequence */ | 741 | /* The maximum expected count of bytes in the NAND ID sequence */ |
| 728 | #define NAND_MAX_ID_LEN 8 | 742 | #define NAND_MAX_ID_LEN 8 |
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 046a0a2e4c4e..63aeccf9ddc8 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h | |||
| @@ -116,6 +116,10 @@ enum spi_nor_ops { | |||
| 116 | SPI_NOR_OPS_UNLOCK, | 116 | SPI_NOR_OPS_UNLOCK, |
| 117 | }; | 117 | }; |
| 118 | 118 | ||
| 119 | enum spi_nor_option_flags { | ||
| 120 | SNOR_F_USE_FSR = BIT(0), | ||
| 121 | }; | ||
| 122 | |||
| 119 | /** | 123 | /** |
| 120 | * struct spi_nor - Structure for defining a the SPI NOR layer | 124 | * struct spi_nor - Structure for defining a the SPI NOR layer |
| 121 | * @mtd: point to a mtd_info structure | 125 | * @mtd: point to a mtd_info structure |
| @@ -129,6 +133,7 @@ enum spi_nor_ops { | |||
| 129 | * @program_opcode: the program opcode | 133 | * @program_opcode: the program opcode |
| 130 | * @flash_read: the mode of the read | 134 | * @flash_read: the mode of the read |
| 131 | * @sst_write_second: used by the SST write operation | 135 | * @sst_write_second: used by the SST write operation |
| 136 | * @flags: flag options for the current SPI-NOR (SNOR_F_*) | ||
| 132 | * @cfg: used by the read_xfer/write_xfer | 137 | * @cfg: used by the read_xfer/write_xfer |
| 133 | * @cmd_buf: used by the write_reg | 138 | * @cmd_buf: used by the write_reg |
| 134 | * @prepare: [OPTIONAL] do some preparations for the | 139 | * @prepare: [OPTIONAL] do some preparations for the |
| @@ -139,9 +144,6 @@ enum spi_nor_ops { | |||
| 139 | * @write_xfer: [OPTIONAL] the writefundamental primitive | 144 | * @write_xfer: [OPTIONAL] the writefundamental primitive |
| 140 | * @read_reg: [DRIVER-SPECIFIC] read out the register | 145 | * @read_reg: [DRIVER-SPECIFIC] read out the register |
| 141 | * @write_reg: [DRIVER-SPECIFIC] write data to the register | 146 | * @write_reg: [DRIVER-SPECIFIC] write data to the register |
| 142 | * @read_id: [REPLACEABLE] read out the ID data, and find | ||
| 143 | * the proper spi_device_id | ||
| 144 | * @wait_till_ready: [REPLACEABLE] wait till the NOR becomes ready | ||
| 145 | * @read: [DRIVER-SPECIFIC] read data from the SPI NOR | 147 | * @read: [DRIVER-SPECIFIC] read data from the SPI NOR |
| 146 | * @write: [DRIVER-SPECIFIC] write data to the SPI NOR | 148 | * @write: [DRIVER-SPECIFIC] write data to the SPI NOR |
| 147 | * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR | 149 | * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR |
| @@ -160,6 +162,7 @@ struct spi_nor { | |||
| 160 | u8 program_opcode; | 162 | u8 program_opcode; |
| 161 | enum read_mode flash_read; | 163 | enum read_mode flash_read; |
| 162 | bool sst_write_second; | 164 | bool sst_write_second; |
| 165 | u32 flags; | ||
| 163 | struct spi_nor_xfer_cfg cfg; | 166 | struct spi_nor_xfer_cfg cfg; |
| 164 | u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; | 167 | u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; |
| 165 | 168 | ||
| @@ -172,8 +175,6 @@ struct spi_nor { | |||
| 172 | int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); | 175 | int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); |
| 173 | int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len, | 176 | int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len, |
| 174 | int write_enable); | 177 | int write_enable); |
| 175 | const struct spi_device_id *(*read_id)(struct spi_nor *nor); | ||
| 176 | int (*wait_till_ready)(struct spi_nor *nor); | ||
| 177 | 178 | ||
| 178 | int (*read)(struct spi_nor *nor, loff_t from, | 179 | int (*read)(struct spi_nor *nor, loff_t from, |
| 179 | size_t len, size_t *retlen, u_char *read_buf); | 180 | size_t len, size_t *retlen, u_char *read_buf); |
diff --git a/include/linux/namei.h b/include/linux/namei.h index 492de72560fa..c8990779f0c3 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h | |||
| @@ -7,21 +7,10 @@ | |||
| 7 | #include <linux/path.h> | 7 | #include <linux/path.h> |
| 8 | 8 | ||
| 9 | struct vfsmount; | 9 | struct vfsmount; |
| 10 | struct nameidata; | ||
| 10 | 11 | ||
| 11 | enum { MAX_NESTED_LINKS = 8 }; | 12 | enum { MAX_NESTED_LINKS = 8 }; |
| 12 | 13 | ||
| 13 | struct nameidata { | ||
| 14 | struct path path; | ||
| 15 | struct qstr last; | ||
| 16 | struct path root; | ||
| 17 | struct inode *inode; /* path.dentry.d_inode */ | ||
| 18 | unsigned int flags; | ||
| 19 | unsigned seq, m_seq; | ||
| 20 | int last_type; | ||
| 21 | unsigned depth; | ||
| 22 | char *saved_names[MAX_NESTED_LINKS + 1]; | ||
| 23 | }; | ||
| 24 | |||
| 25 | /* | 14 | /* |
| 26 | * Type of the last component on LOOKUP_PARENT | 15 | * Type of the last component on LOOKUP_PARENT |
| 27 | */ | 16 | */ |
| @@ -82,16 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *); | |||
| 82 | extern void unlock_rename(struct dentry *, struct dentry *); | 71 | extern void unlock_rename(struct dentry *, struct dentry *); |
| 83 | 72 | ||
| 84 | extern void nd_jump_link(struct nameidata *nd, struct path *path); | 73 | extern void nd_jump_link(struct nameidata *nd, struct path *path); |
| 85 | 74 | extern void nd_set_link(struct nameidata *nd, char *path); | |
| 86 | static inline void nd_set_link(struct nameidata *nd, char *path) | 75 | extern char *nd_get_link(struct nameidata *nd); |
| 87 | { | ||
| 88 | nd->saved_names[nd->depth] = path; | ||
| 89 | } | ||
| 90 | |||
| 91 | static inline char *nd_get_link(struct nameidata *nd) | ||
| 92 | { | ||
| 93 | return nd->saved_names[nd->depth]; | ||
| 94 | } | ||
| 95 | 76 | ||
| 96 | static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) | 77 | static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) |
| 97 | { | 78 | { |
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index dcfdecbfa0b7..8e30685affeb 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h | |||
| @@ -47,9 +47,9 @@ enum { | |||
| 47 | NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ | 47 | NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ |
| 48 | NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ | 48 | NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ |
| 49 | NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ | 49 | NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ |
| 50 | NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ | 50 | NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ |
| 51 | /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ | 51 | /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ |
| 52 | NETIF_F_GSO_MPLS_BIT, | 52 | NETIF_F_GSO_TUNNEL_REMCSUM_BIT, |
| 53 | 53 | ||
| 54 | NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ | 54 | NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ |
| 55 | NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ | 55 | NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ |
| @@ -118,7 +118,7 @@ enum { | |||
| 118 | #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) | 118 | #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) |
| 119 | #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) | 119 | #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) |
| 120 | #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) | 120 | #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) |
| 121 | #define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) | 121 | #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) |
| 122 | #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) | 122 | #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) |
| 123 | #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) | 123 | #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) |
| 124 | #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) | 124 | #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) |
| @@ -181,7 +181,6 @@ enum { | |||
| 181 | NETIF_F_GSO_IPIP | \ | 181 | NETIF_F_GSO_IPIP | \ |
| 182 | NETIF_F_GSO_SIT | \ | 182 | NETIF_F_GSO_SIT | \ |
| 183 | NETIF_F_GSO_UDP_TUNNEL | \ | 183 | NETIF_F_GSO_UDP_TUNNEL | \ |
| 184 | NETIF_F_GSO_UDP_TUNNEL_CSUM | \ | 184 | NETIF_F_GSO_UDP_TUNNEL_CSUM) |
| 185 | NETIF_F_GSO_MPLS) | ||
| 186 | 185 | ||
| 187 | #endif /* _LINUX_NETDEV_FEATURES_H */ | 186 | #endif /* _LINUX_NETDEV_FEATURES_H */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 74fd5d37f15a..c31f74d76ebd 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -57,6 +57,8 @@ struct device; | |||
| 57 | struct phy_device; | 57 | struct phy_device; |
| 58 | /* 802.11 specific */ | 58 | /* 802.11 specific */ |
| 59 | struct wireless_dev; | 59 | struct wireless_dev; |
| 60 | /* 802.15.4 specific */ | ||
| 61 | struct wpan_dev; | ||
| 60 | 62 | ||
| 61 | void netdev_set_default_ethtool_ops(struct net_device *dev, | 63 | void netdev_set_default_ethtool_ops(struct net_device *dev, |
| 62 | const struct ethtool_ops *ops); | 64 | const struct ethtool_ops *ops); |
| @@ -314,6 +316,7 @@ struct napi_struct { | |||
| 314 | struct net_device *dev; | 316 | struct net_device *dev; |
| 315 | struct sk_buff *gro_list; | 317 | struct sk_buff *gro_list; |
| 316 | struct sk_buff *skb; | 318 | struct sk_buff *skb; |
| 319 | struct hrtimer timer; | ||
| 317 | struct list_head dev_list; | 320 | struct list_head dev_list; |
| 318 | struct hlist_node napi_hash_node; | 321 | struct hlist_node napi_hash_node; |
| 319 | unsigned int napi_id; | 322 | unsigned int napi_id; |
| @@ -386,6 +389,7 @@ typedef enum rx_handler_result rx_handler_result_t; | |||
| 386 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); | 389 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); |
| 387 | 390 | ||
| 388 | void __napi_schedule(struct napi_struct *n); | 391 | void __napi_schedule(struct napi_struct *n); |
| 392 | void __napi_schedule_irqoff(struct napi_struct *n); | ||
| 389 | 393 | ||
| 390 | static inline bool napi_disable_pending(struct napi_struct *n) | 394 | static inline bool napi_disable_pending(struct napi_struct *n) |
| 391 | { | 395 | { |
| @@ -420,6 +424,18 @@ static inline void napi_schedule(struct napi_struct *n) | |||
| 420 | __napi_schedule(n); | 424 | __napi_schedule(n); |
| 421 | } | 425 | } |
| 422 | 426 | ||
| 427 | /** | ||
| 428 | * napi_schedule_irqoff - schedule NAPI poll | ||
| 429 | * @n: napi context | ||
| 430 | * | ||
| 431 | * Variant of napi_schedule(), assuming hard irqs are masked. | ||
| 432 | */ | ||
| 433 | static inline void napi_schedule_irqoff(struct napi_struct *n) | ||
| 434 | { | ||
| 435 | if (napi_schedule_prep(n)) | ||
| 436 | __napi_schedule_irqoff(n); | ||
| 437 | } | ||
| 438 | |||
| 423 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ | 439 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ |
| 424 | static inline bool napi_reschedule(struct napi_struct *napi) | 440 | static inline bool napi_reschedule(struct napi_struct *napi) |
| 425 | { | 441 | { |
| @@ -430,14 +446,19 @@ static inline bool napi_reschedule(struct napi_struct *napi) | |||
| 430 | return false; | 446 | return false; |
| 431 | } | 447 | } |
| 432 | 448 | ||
| 449 | void __napi_complete(struct napi_struct *n); | ||
| 450 | void napi_complete_done(struct napi_struct *n, int work_done); | ||
| 433 | /** | 451 | /** |
| 434 | * napi_complete - NAPI processing complete | 452 | * napi_complete - NAPI processing complete |
| 435 | * @n: napi context | 453 | * @n: napi context |
| 436 | * | 454 | * |
| 437 | * Mark NAPI processing as complete. | 455 | * Mark NAPI processing as complete. |
| 456 | * Consider using napi_complete_done() instead. | ||
| 438 | */ | 457 | */ |
| 439 | void __napi_complete(struct napi_struct *n); | 458 | static inline void napi_complete(struct napi_struct *n) |
| 440 | void napi_complete(struct napi_struct *n); | 459 | { |
| 460 | return napi_complete_done(n, 0); | ||
| 461 | } | ||
| 441 | 462 | ||
| 442 | /** | 463 | /** |
| 443 | * napi_by_id - lookup a NAPI by napi_id | 464 | * napi_by_id - lookup a NAPI by napi_id |
| @@ -472,14 +493,7 @@ void napi_hash_del(struct napi_struct *napi); | |||
| 472 | * Stop NAPI from being scheduled on this context. | 493 | * Stop NAPI from being scheduled on this context. |
| 473 | * Waits till any outstanding processing completes. | 494 | * Waits till any outstanding processing completes. |
| 474 | */ | 495 | */ |
| 475 | static inline void napi_disable(struct napi_struct *n) | 496 | void napi_disable(struct napi_struct *n); |
| 476 | { | ||
| 477 | might_sleep(); | ||
| 478 | set_bit(NAPI_STATE_DISABLE, &n->state); | ||
| 479 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) | ||
| 480 | msleep(1); | ||
| 481 | clear_bit(NAPI_STATE_DISABLE, &n->state); | ||
| 482 | } | ||
| 483 | 497 | ||
| 484 | /** | 498 | /** |
| 485 | * napi_enable - enable NAPI scheduling | 499 | * napi_enable - enable NAPI scheduling |
| @@ -740,13 +754,13 @@ struct netdev_fcoe_hbainfo { | |||
| 740 | }; | 754 | }; |
| 741 | #endif | 755 | #endif |
| 742 | 756 | ||
| 743 | #define MAX_PHYS_PORT_ID_LEN 32 | 757 | #define MAX_PHYS_ITEM_ID_LEN 32 |
| 744 | 758 | ||
| 745 | /* This structure holds a unique identifier to identify the | 759 | /* This structure holds a unique identifier to identify some |
| 746 | * physical port used by a netdevice. | 760 | * physical item (port for example) used by a netdevice. |
| 747 | */ | 761 | */ |
| 748 | struct netdev_phys_port_id { | 762 | struct netdev_phys_item_id { |
| 749 | unsigned char id[MAX_PHYS_PORT_ID_LEN]; | 763 | unsigned char id[MAX_PHYS_ITEM_ID_LEN]; |
| 750 | unsigned char id_len; | 764 | unsigned char id_len; |
| 751 | }; | 765 | }; |
| 752 | 766 | ||
| @@ -937,11 +951,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | |||
| 937 | * | 951 | * |
| 938 | * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], | 952 | * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], |
| 939 | * struct net_device *dev, | 953 | * struct net_device *dev, |
| 940 | * const unsigned char *addr, u16 flags) | 954 | * const unsigned char *addr, u16 vid, u16 flags) |
| 941 | * Adds an FDB entry to dev for addr. | 955 | * Adds an FDB entry to dev for addr. |
| 942 | * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], | 956 | * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], |
| 943 | * struct net_device *dev, | 957 | * struct net_device *dev, |
| 944 | * const unsigned char *addr) | 958 | * const unsigned char *addr, u16 vid) |
| 945 | * Deletes the FDB entry from dev coresponding to addr. | 959 | * Deletes the FDB entry from dev coresponding to addr. |
| 946 | * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, | 960 | * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, |
| 947 | * struct net_device *dev, struct net_device *filter_dev, | 961 | * struct net_device *dev, struct net_device *filter_dev, |
| @@ -962,7 +976,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | |||
| 962 | * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. | 976 | * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. |
| 963 | * | 977 | * |
| 964 | * int (*ndo_get_phys_port_id)(struct net_device *dev, | 978 | * int (*ndo_get_phys_port_id)(struct net_device *dev, |
| 965 | * struct netdev_phys_port_id *ppid); | 979 | * struct netdev_phys_item_id *ppid); |
| 966 | * Called to get ID of physical port of this device. If driver does | 980 | * Called to get ID of physical port of this device. If driver does |
| 967 | * not implement this, it is assumed that the hw is not able to have | 981 | * not implement this, it is assumed that the hw is not able to have |
| 968 | * multiple net devices on single physical port. | 982 | * multiple net devices on single physical port. |
| @@ -1004,6 +1018,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | |||
| 1004 | * performing GSO on a packet. The device returns true if it is | 1018 | * performing GSO on a packet. The device returns true if it is |
| 1005 | * able to GSO the packet, false otherwise. If the return value is | 1019 | * able to GSO the packet, false otherwise. If the return value is |
| 1006 | * false the stack will do software GSO. | 1020 | * false the stack will do software GSO. |
| 1021 | * | ||
| 1022 | * int (*ndo_switch_parent_id_get)(struct net_device *dev, | ||
| 1023 | * struct netdev_phys_item_id *psid); | ||
| 1024 | * Called to get an ID of the switch chip this port is part of. | ||
| 1025 | * If driver implements this, it indicates that it represents a port | ||
| 1026 | * of a switch chip. | ||
| 1027 | * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state); | ||
| 1028 | * Called to notify switch device port of bridge port STP | ||
| 1029 | * state change. | ||
| 1007 | */ | 1030 | */ |
| 1008 | struct net_device_ops { | 1031 | struct net_device_ops { |
| 1009 | int (*ndo_init)(struct net_device *dev); | 1032 | int (*ndo_init)(struct net_device *dev); |
| @@ -1114,11 +1137,13 @@ struct net_device_ops { | |||
| 1114 | struct nlattr *tb[], | 1137 | struct nlattr *tb[], |
| 1115 | struct net_device *dev, | 1138 | struct net_device *dev, |
| 1116 | const unsigned char *addr, | 1139 | const unsigned char *addr, |
| 1140 | u16 vid, | ||
| 1117 | u16 flags); | 1141 | u16 flags); |
| 1118 | int (*ndo_fdb_del)(struct ndmsg *ndm, | 1142 | int (*ndo_fdb_del)(struct ndmsg *ndm, |
| 1119 | struct nlattr *tb[], | 1143 | struct nlattr *tb[], |
| 1120 | struct net_device *dev, | 1144 | struct net_device *dev, |
| 1121 | const unsigned char *addr); | 1145 | const unsigned char *addr, |
| 1146 | u16 vid); | ||
| 1122 | int (*ndo_fdb_dump)(struct sk_buff *skb, | 1147 | int (*ndo_fdb_dump)(struct sk_buff *skb, |
| 1123 | struct netlink_callback *cb, | 1148 | struct netlink_callback *cb, |
| 1124 | struct net_device *dev, | 1149 | struct net_device *dev, |
| @@ -1136,7 +1161,7 @@ struct net_device_ops { | |||
| 1136 | int (*ndo_change_carrier)(struct net_device *dev, | 1161 | int (*ndo_change_carrier)(struct net_device *dev, |
| 1137 | bool new_carrier); | 1162 | bool new_carrier); |
| 1138 | int (*ndo_get_phys_port_id)(struct net_device *dev, | 1163 | int (*ndo_get_phys_port_id)(struct net_device *dev, |
| 1139 | struct netdev_phys_port_id *ppid); | 1164 | struct netdev_phys_item_id *ppid); |
| 1140 | void (*ndo_add_vxlan_port)(struct net_device *dev, | 1165 | void (*ndo_add_vxlan_port)(struct net_device *dev, |
| 1141 | sa_family_t sa_family, | 1166 | sa_family_t sa_family, |
| 1142 | __be16 port); | 1167 | __be16 port); |
| @@ -1155,6 +1180,12 @@ struct net_device_ops { | |||
| 1155 | int (*ndo_get_lock_subclass)(struct net_device *dev); | 1180 | int (*ndo_get_lock_subclass)(struct net_device *dev); |
| 1156 | bool (*ndo_gso_check) (struct sk_buff *skb, | 1181 | bool (*ndo_gso_check) (struct sk_buff *skb, |
| 1157 | struct net_device *dev); | 1182 | struct net_device *dev); |
| 1183 | #ifdef CONFIG_NET_SWITCHDEV | ||
| 1184 | int (*ndo_switch_parent_id_get)(struct net_device *dev, | ||
| 1185 | struct netdev_phys_item_id *psid); | ||
| 1186 | int (*ndo_switch_port_stp_update)(struct net_device *dev, | ||
| 1187 | u8 state); | ||
| 1188 | #endif | ||
| 1158 | }; | 1189 | }; |
| 1159 | 1190 | ||
| 1160 | /** | 1191 | /** |
| @@ -1216,6 +1247,8 @@ enum netdev_priv_flags { | |||
| 1216 | IFF_LIVE_ADDR_CHANGE = 1<<20, | 1247 | IFF_LIVE_ADDR_CHANGE = 1<<20, |
| 1217 | IFF_MACVLAN = 1<<21, | 1248 | IFF_MACVLAN = 1<<21, |
| 1218 | IFF_XMIT_DST_RELEASE_PERM = 1<<22, | 1249 | IFF_XMIT_DST_RELEASE_PERM = 1<<22, |
| 1250 | IFF_IPVLAN_MASTER = 1<<23, | ||
| 1251 | IFF_IPVLAN_SLAVE = 1<<24, | ||
| 1219 | }; | 1252 | }; |
| 1220 | 1253 | ||
| 1221 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN | 1254 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN |
| @@ -1241,6 +1274,8 @@ enum netdev_priv_flags { | |||
| 1241 | #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE | 1274 | #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE |
| 1242 | #define IFF_MACVLAN IFF_MACVLAN | 1275 | #define IFF_MACVLAN IFF_MACVLAN |
| 1243 | #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM | 1276 | #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM |
| 1277 | #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER | ||
| 1278 | #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE | ||
| 1244 | 1279 | ||
| 1245 | /** | 1280 | /** |
| 1246 | * struct net_device - The DEVICE structure. | 1281 | * struct net_device - The DEVICE structure. |
| @@ -1572,6 +1607,7 @@ struct net_device { | |||
| 1572 | struct inet6_dev __rcu *ip6_ptr; | 1607 | struct inet6_dev __rcu *ip6_ptr; |
| 1573 | void *ax25_ptr; | 1608 | void *ax25_ptr; |
| 1574 | struct wireless_dev *ieee80211_ptr; | 1609 | struct wireless_dev *ieee80211_ptr; |
| 1610 | struct wpan_dev *ieee802154_ptr; | ||
| 1575 | 1611 | ||
| 1576 | /* | 1612 | /* |
| 1577 | * Cache lines mostly used on receive path (including eth_type_trans()) | 1613 | * Cache lines mostly used on receive path (including eth_type_trans()) |
| @@ -1590,6 +1626,7 @@ struct net_device { | |||
| 1590 | 1626 | ||
| 1591 | #endif | 1627 | #endif |
| 1592 | 1628 | ||
| 1629 | unsigned long gro_flush_timeout; | ||
| 1593 | rx_handler_func_t __rcu *rx_handler; | 1630 | rx_handler_func_t __rcu *rx_handler; |
| 1594 | void __rcu *rx_handler_data; | 1631 | void __rcu *rx_handler_data; |
| 1595 | 1632 | ||
| @@ -2316,10 +2353,7 @@ extern int netdev_flow_limit_table_len; | |||
| 2316 | * Incoming packets are placed on per-cpu queues | 2353 | * Incoming packets are placed on per-cpu queues |
| 2317 | */ | 2354 | */ |
| 2318 | struct softnet_data { | 2355 | struct softnet_data { |
| 2319 | struct Qdisc *output_queue; | ||
| 2320 | struct Qdisc **output_queue_tailp; | ||
| 2321 | struct list_head poll_list; | 2356 | struct list_head poll_list; |
| 2322 | struct sk_buff *completion_queue; | ||
| 2323 | struct sk_buff_head process_queue; | 2357 | struct sk_buff_head process_queue; |
| 2324 | 2358 | ||
| 2325 | /* stats */ | 2359 | /* stats */ |
| @@ -2327,10 +2361,17 @@ struct softnet_data { | |||
| 2327 | unsigned int time_squeeze; | 2361 | unsigned int time_squeeze; |
| 2328 | unsigned int cpu_collision; | 2362 | unsigned int cpu_collision; |
| 2329 | unsigned int received_rps; | 2363 | unsigned int received_rps; |
| 2330 | |||
| 2331 | #ifdef CONFIG_RPS | 2364 | #ifdef CONFIG_RPS |
| 2332 | struct softnet_data *rps_ipi_list; | 2365 | struct softnet_data *rps_ipi_list; |
| 2366 | #endif | ||
| 2367 | #ifdef CONFIG_NET_FLOW_LIMIT | ||
| 2368 | struct sd_flow_limit __rcu *flow_limit; | ||
| 2369 | #endif | ||
| 2370 | struct Qdisc *output_queue; | ||
| 2371 | struct Qdisc **output_queue_tailp; | ||
| 2372 | struct sk_buff *completion_queue; | ||
| 2333 | 2373 | ||
| 2374 | #ifdef CONFIG_RPS | ||
| 2334 | /* Elements below can be accessed between CPUs for RPS */ | 2375 | /* Elements below can be accessed between CPUs for RPS */ |
| 2335 | struct call_single_data csd ____cacheline_aligned_in_smp; | 2376 | struct call_single_data csd ____cacheline_aligned_in_smp; |
| 2336 | struct softnet_data *rps_ipi_next; | 2377 | struct softnet_data *rps_ipi_next; |
| @@ -2342,9 +2383,6 @@ struct softnet_data { | |||
| 2342 | struct sk_buff_head input_pkt_queue; | 2383 | struct sk_buff_head input_pkt_queue; |
| 2343 | struct napi_struct backlog; | 2384 | struct napi_struct backlog; |
| 2344 | 2385 | ||
| 2345 | #ifdef CONFIG_NET_FLOW_LIMIT | ||
| 2346 | struct sd_flow_limit __rcu *flow_limit; | ||
| 2347 | #endif | ||
| 2348 | }; | 2386 | }; |
| 2349 | 2387 | ||
| 2350 | static inline void input_queue_head_incr(struct softnet_data *sd) | 2388 | static inline void input_queue_head_incr(struct softnet_data *sd) |
| @@ -2748,23 +2786,6 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev, | |||
| 2748 | } | 2786 | } |
| 2749 | #endif | 2787 | #endif |
| 2750 | 2788 | ||
| 2751 | static inline int netif_copy_real_num_queues(struct net_device *to_dev, | ||
| 2752 | const struct net_device *from_dev) | ||
| 2753 | { | ||
| 2754 | int err; | ||
| 2755 | |||
| 2756 | err = netif_set_real_num_tx_queues(to_dev, | ||
| 2757 | from_dev->real_num_tx_queues); | ||
| 2758 | if (err) | ||
| 2759 | return err; | ||
| 2760 | #ifdef CONFIG_SYSFS | ||
| 2761 | return netif_set_real_num_rx_queues(to_dev, | ||
| 2762 | from_dev->real_num_rx_queues); | ||
| 2763 | #else | ||
| 2764 | return 0; | ||
| 2765 | #endif | ||
| 2766 | } | ||
| 2767 | |||
| 2768 | #ifdef CONFIG_SYSFS | 2789 | #ifdef CONFIG_SYSFS |
| 2769 | static inline unsigned int get_netdev_rx_queue_index( | 2790 | static inline unsigned int get_netdev_rx_queue_index( |
| 2770 | struct netdev_rx_queue *queue) | 2791 | struct netdev_rx_queue *queue) |
| @@ -2864,7 +2885,7 @@ void dev_set_group(struct net_device *, int); | |||
| 2864 | int dev_set_mac_address(struct net_device *, struct sockaddr *); | 2885 | int dev_set_mac_address(struct net_device *, struct sockaddr *); |
| 2865 | int dev_change_carrier(struct net_device *, bool new_carrier); | 2886 | int dev_change_carrier(struct net_device *, bool new_carrier); |
| 2866 | int dev_get_phys_port_id(struct net_device *dev, | 2887 | int dev_get_phys_port_id(struct net_device *dev, |
| 2867 | struct netdev_phys_port_id *ppid); | 2888 | struct netdev_phys_item_id *ppid); |
| 2868 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); | 2889 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); |
| 2869 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 2890 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
| 2870 | struct netdev_queue *txq, int *ret); | 2891 | struct netdev_queue *txq, int *ret); |
| @@ -3425,6 +3446,12 @@ void netdev_upper_dev_unlink(struct net_device *dev, | |||
| 3425 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); | 3446 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); |
| 3426 | void *netdev_lower_dev_get_private(struct net_device *dev, | 3447 | void *netdev_lower_dev_get_private(struct net_device *dev, |
| 3427 | struct net_device *lower_dev); | 3448 | struct net_device *lower_dev); |
| 3449 | |||
| 3450 | /* RSS keys are 40 or 52 bytes long */ | ||
| 3451 | #define NETDEV_RSS_KEY_LEN 52 | ||
| 3452 | extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; | ||
| 3453 | void netdev_rss_key_fill(void *buffer, size_t len); | ||
| 3454 | |||
| 3428 | int dev_get_nest_level(struct net_device *dev, | 3455 | int dev_get_nest_level(struct net_device *dev, |
| 3429 | bool (*type_check)(struct net_device *dev)); | 3456 | bool (*type_check)(struct net_device *dev)); |
| 3430 | int skb_checksum_help(struct sk_buff *skb); | 3457 | int skb_checksum_help(struct sk_buff *skb); |
| @@ -3569,7 +3596,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) | |||
| 3569 | BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); | 3596 | BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); |
| 3570 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); | 3597 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); |
| 3571 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); | 3598 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); |
| 3572 | BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT)); | 3599 | BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); |
| 3573 | 3600 | ||
| 3574 | return (features & feature) == feature; | 3601 | return (features & feature) == feature; |
| 3575 | } | 3602 | } |
| @@ -3614,6 +3641,21 @@ static inline bool netif_is_macvlan(struct net_device *dev) | |||
| 3614 | return dev->priv_flags & IFF_MACVLAN; | 3641 | return dev->priv_flags & IFF_MACVLAN; |
| 3615 | } | 3642 | } |
| 3616 | 3643 | ||
| 3644 | static inline bool netif_is_macvlan_port(struct net_device *dev) | ||
| 3645 | { | ||
| 3646 | return dev->priv_flags & IFF_MACVLAN_PORT; | ||
| 3647 | } | ||
| 3648 | |||
| 3649 | static inline bool netif_is_ipvlan(struct net_device *dev) | ||
| 3650 | { | ||
| 3651 | return dev->priv_flags & IFF_IPVLAN_SLAVE; | ||
| 3652 | } | ||
| 3653 | |||
| 3654 | static inline bool netif_is_ipvlan_port(struct net_device *dev) | ||
| 3655 | { | ||
| 3656 | return dev->priv_flags & IFF_IPVLAN_MASTER; | ||
| 3657 | } | ||
| 3658 | |||
| 3617 | static inline bool netif_is_bond_master(struct net_device *dev) | 3659 | static inline bool netif_is_bond_master(struct net_device *dev) |
| 3618 | { | 3660 | { |
| 3619 | return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; | 3661 | return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; |
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 356acc2846fd..022b761dbf0a 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h | |||
| @@ -490,6 +490,8 @@ enum { | |||
| 490 | 490 | ||
| 491 | /* nfs42 */ | 491 | /* nfs42 */ |
| 492 | NFSPROC4_CLNT_SEEK, | 492 | NFSPROC4_CLNT_SEEK, |
| 493 | NFSPROC4_CLNT_ALLOCATE, | ||
| 494 | NFSPROC4_CLNT_DEALLOCATE, | ||
| 493 | }; | 495 | }; |
| 494 | 496 | ||
| 495 | /* nfs41 types */ | 497 | /* nfs41 types */ |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index c72d1ad41ad4..6d627b92df53 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
| @@ -163,7 +163,7 @@ struct nfs_inode { | |||
| 163 | */ | 163 | */ |
| 164 | __be32 cookieverf[2]; | 164 | __be32 cookieverf[2]; |
| 165 | 165 | ||
| 166 | unsigned long npages; | 166 | unsigned long nrequests; |
| 167 | struct nfs_mds_commit_info commit_info; | 167 | struct nfs_mds_commit_info commit_info; |
| 168 | 168 | ||
| 169 | /* Open contexts for shared mmap writes */ | 169 | /* Open contexts for shared mmap writes */ |
| @@ -520,7 +520,7 @@ extern void nfs_commit_free(struct nfs_commit_data *data); | |||
| 520 | static inline int | 520 | static inline int |
| 521 | nfs_have_writebacks(struct inode *inode) | 521 | nfs_have_writebacks(struct inode *inode) |
| 522 | { | 522 | { |
| 523 | return NFS_I(inode)->npages != 0; | 523 | return NFS_I(inode)->nrequests != 0; |
| 524 | } | 524 | } |
| 525 | 525 | ||
| 526 | /* | 526 | /* |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index a32ba0d7a98f..1e37fbb78f7a 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
| @@ -231,5 +231,7 @@ struct nfs_server { | |||
| 231 | #define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17) | 231 | #define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17) |
| 232 | #define NFS_CAP_SECURITY_LABEL (1U << 18) | 232 | #define NFS_CAP_SECURITY_LABEL (1U << 18) |
| 233 | #define NFS_CAP_SEEK (1U << 19) | 233 | #define NFS_CAP_SEEK (1U << 19) |
| 234 | #define NFS_CAP_ALLOCATE (1U << 20) | ||
| 235 | #define NFS_CAP_DEALLOCATE (1U << 21) | ||
| 234 | 236 | ||
| 235 | #endif | 237 | #endif |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 47ebb4fafd87..467c84efb596 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
| @@ -1243,6 +1243,20 @@ nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) | |||
| 1243 | #endif /* CONFIG_NFS_V4_1 */ | 1243 | #endif /* CONFIG_NFS_V4_1 */ |
| 1244 | 1244 | ||
| 1245 | #ifdef CONFIG_NFS_V4_2 | 1245 | #ifdef CONFIG_NFS_V4_2 |
| 1246 | struct nfs42_falloc_args { | ||
| 1247 | struct nfs4_sequence_args seq_args; | ||
| 1248 | |||
| 1249 | struct nfs_fh *falloc_fh; | ||
| 1250 | nfs4_stateid falloc_stateid; | ||
| 1251 | u64 falloc_offset; | ||
| 1252 | u64 falloc_length; | ||
| 1253 | }; | ||
| 1254 | |||
| 1255 | struct nfs42_falloc_res { | ||
| 1256 | struct nfs4_sequence_res seq_res; | ||
| 1257 | unsigned int status; | ||
| 1258 | }; | ||
| 1259 | |||
| 1246 | struct nfs42_seek_args { | 1260 | struct nfs42_seek_args { |
| 1247 | struct nfs4_sequence_args seq_args; | 1261 | struct nfs4_sequence_args seq_args; |
| 1248 | 1262 | ||
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h index 20163b9a0eae..167342c2ce6b 100644 --- a/include/linux/nl802154.h +++ b/include/linux/nl802154.h | |||
| @@ -12,10 +12,6 @@ | |||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
| 14 | * | 14 | * |
| 15 | * You should have received a copy of the GNU General Public License along | ||
| 16 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
| 17 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 18 | * | ||
| 19 | */ | 15 | */ |
| 20 | 16 | ||
| 21 | #ifndef NL802154_H | 17 | #ifndef NL802154_H |
diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h new file mode 100644 index 000000000000..85a5c8c16be9 --- /dev/null +++ b/include/linux/ns_common.h | |||
| @@ -0,0 +1,12 @@ | |||
| 1 | #ifndef _LINUX_NS_COMMON_H | ||
| 2 | #define _LINUX_NS_COMMON_H | ||
| 3 | |||
| 4 | struct proc_ns_operations; | ||
| 5 | |||
| 6 | struct ns_common { | ||
| 7 | atomic_long_t stashed; | ||
| 8 | const struct proc_ns_operations *ops; | ||
| 9 | unsigned int inum; | ||
| 10 | }; | ||
| 11 | |||
| 12 | #endif | ||
diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 2bf403195c09..258945fcabf1 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
| 20 | #include <linux/miscdevice.h> | 20 | #include <linux/miscdevice.h> |
| 21 | #include <linux/kref.h> | 21 | #include <linux/kref.h> |
| 22 | #include <linux/blk-mq.h> | ||
| 22 | 23 | ||
| 23 | struct nvme_bar { | 24 | struct nvme_bar { |
| 24 | __u64 cap; /* Controller Capabilities */ | 25 | __u64 cap; /* Controller Capabilities */ |
| @@ -38,6 +39,7 @@ struct nvme_bar { | |||
| 38 | #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) | 39 | #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) |
| 39 | #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) | 40 | #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) |
| 40 | #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) | 41 | #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) |
| 42 | #define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) | ||
| 41 | 43 | ||
| 42 | enum { | 44 | enum { |
| 43 | NVME_CC_ENABLE = 1 << 0, | 45 | NVME_CC_ENABLE = 1 << 0, |
| @@ -70,8 +72,10 @@ extern unsigned char nvme_io_timeout; | |||
| 70 | */ | 72 | */ |
| 71 | struct nvme_dev { | 73 | struct nvme_dev { |
| 72 | struct list_head node; | 74 | struct list_head node; |
| 73 | struct nvme_queue __rcu **queues; | 75 | struct nvme_queue **queues; |
| 74 | unsigned short __percpu *io_queue; | 76 | struct request_queue *admin_q; |
| 77 | struct blk_mq_tag_set tagset; | ||
| 78 | struct blk_mq_tag_set admin_tagset; | ||
| 75 | u32 __iomem *dbs; | 79 | u32 __iomem *dbs; |
| 76 | struct pci_dev *pci_dev; | 80 | struct pci_dev *pci_dev; |
| 77 | struct dma_pool *prp_page_pool; | 81 | struct dma_pool *prp_page_pool; |
| @@ -90,15 +94,16 @@ struct nvme_dev { | |||
| 90 | struct miscdevice miscdev; | 94 | struct miscdevice miscdev; |
| 91 | work_func_t reset_workfn; | 95 | work_func_t reset_workfn; |
| 92 | struct work_struct reset_work; | 96 | struct work_struct reset_work; |
| 93 | struct work_struct cpu_work; | ||
| 94 | char name[12]; | 97 | char name[12]; |
| 95 | char serial[20]; | 98 | char serial[20]; |
| 96 | char model[40]; | 99 | char model[40]; |
| 97 | char firmware_rev[8]; | 100 | char firmware_rev[8]; |
| 98 | u32 max_hw_sectors; | 101 | u32 max_hw_sectors; |
| 99 | u32 stripe_size; | 102 | u32 stripe_size; |
| 103 | u32 page_size; | ||
| 100 | u16 oncs; | 104 | u16 oncs; |
| 101 | u16 abort_limit; | 105 | u16 abort_limit; |
| 106 | u8 event_limit; | ||
| 102 | u8 vwc; | 107 | u8 vwc; |
| 103 | u8 initialized; | 108 | u8 initialized; |
| 104 | }; | 109 | }; |
| @@ -132,7 +137,6 @@ struct nvme_iod { | |||
| 132 | int offset; /* Of PRP list */ | 137 | int offset; /* Of PRP list */ |
| 133 | int nents; /* Used in scatterlist */ | 138 | int nents; /* Used in scatterlist */ |
| 134 | int length; /* Of data, in bytes */ | 139 | int length; /* Of data, in bytes */ |
| 135 | unsigned long start_time; | ||
| 136 | dma_addr_t first_dma; | 140 | dma_addr_t first_dma; |
| 137 | struct list_head node; | 141 | struct list_head node; |
| 138 | struct scatterlist sg[0]; | 142 | struct scatterlist sg[0]; |
| @@ -150,12 +154,14 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) | |||
| 150 | */ | 154 | */ |
| 151 | void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod); | 155 | void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod); |
| 152 | 156 | ||
| 153 | int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int , gfp_t); | 157 | int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t); |
| 154 | struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, | 158 | struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, |
| 155 | unsigned long addr, unsigned length); | 159 | unsigned long addr, unsigned length); |
| 156 | void nvme_unmap_user_pages(struct nvme_dev *dev, int write, | 160 | void nvme_unmap_user_pages(struct nvme_dev *dev, int write, |
| 157 | struct nvme_iod *iod); | 161 | struct nvme_iod *iod); |
| 158 | int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *); | 162 | int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *, |
| 163 | struct nvme_command *, u32 *); | ||
| 164 | int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns); | ||
| 159 | int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *, | 165 | int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *, |
| 160 | u32 *result); | 166 | u32 *result); |
| 161 | int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns, | 167 | int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns, |
diff --git a/include/linux/of.h b/include/linux/of.h index c55b50018ac4..dfde07e77a63 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
| @@ -23,6 +23,8 @@ | |||
| 23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
| 24 | #include <linux/topology.h> | 24 | #include <linux/topology.h> |
| 25 | #include <linux/notifier.h> | 25 | #include <linux/notifier.h> |
| 26 | #include <linux/property.h> | ||
| 27 | #include <linux/list.h> | ||
| 26 | 28 | ||
| 27 | #include <asm/byteorder.h> | 29 | #include <asm/byteorder.h> |
| 28 | #include <asm/errno.h> | 30 | #include <asm/errno.h> |
| @@ -49,14 +51,13 @@ struct device_node { | |||
| 49 | const char *type; | 51 | const char *type; |
| 50 | phandle phandle; | 52 | phandle phandle; |
| 51 | const char *full_name; | 53 | const char *full_name; |
| 54 | struct fwnode_handle fwnode; | ||
| 52 | 55 | ||
| 53 | struct property *properties; | 56 | struct property *properties; |
| 54 | struct property *deadprops; /* removed properties */ | 57 | struct property *deadprops; /* removed properties */ |
| 55 | struct device_node *parent; | 58 | struct device_node *parent; |
| 56 | struct device_node *child; | 59 | struct device_node *child; |
| 57 | struct device_node *sibling; | 60 | struct device_node *sibling; |
| 58 | struct device_node *next; /* next device of same type */ | ||
| 59 | struct device_node *allnext; /* next in list of all nodes */ | ||
| 60 | struct kobject kobj; | 61 | struct kobject kobj; |
| 61 | unsigned long _flags; | 62 | unsigned long _flags; |
| 62 | void *data; | 63 | void *data; |
| @@ -74,11 +75,18 @@ struct of_phandle_args { | |||
| 74 | uint32_t args[MAX_PHANDLE_ARGS]; | 75 | uint32_t args[MAX_PHANDLE_ARGS]; |
| 75 | }; | 76 | }; |
| 76 | 77 | ||
| 78 | struct of_reconfig_data { | ||
| 79 | struct device_node *dn; | ||
| 80 | struct property *prop; | ||
| 81 | struct property *old_prop; | ||
| 82 | }; | ||
| 83 | |||
| 77 | /* initialize a node */ | 84 | /* initialize a node */ |
| 78 | extern struct kobj_type of_node_ktype; | 85 | extern struct kobj_type of_node_ktype; |
| 79 | static inline void of_node_init(struct device_node *node) | 86 | static inline void of_node_init(struct device_node *node) |
| 80 | { | 87 | { |
| 81 | kobject_init(&node->kobj, &of_node_ktype); | 88 | kobject_init(&node->kobj, &of_node_ktype); |
| 89 | node->fwnode.type = FWNODE_OF; | ||
| 82 | } | 90 | } |
| 83 | 91 | ||
| 84 | /* true when node is initialized */ | 92 | /* true when node is initialized */ |
| @@ -105,18 +113,27 @@ static inline struct device_node *of_node_get(struct device_node *node) | |||
| 105 | static inline void of_node_put(struct device_node *node) { } | 113 | static inline void of_node_put(struct device_node *node) { } |
| 106 | #endif /* !CONFIG_OF_DYNAMIC */ | 114 | #endif /* !CONFIG_OF_DYNAMIC */ |
| 107 | 115 | ||
| 108 | #ifdef CONFIG_OF | ||
| 109 | |||
| 110 | /* Pointer for first entry in chain of all nodes. */ | 116 | /* Pointer for first entry in chain of all nodes. */ |
| 111 | extern struct device_node *of_allnodes; | 117 | extern struct device_node *of_root; |
| 112 | extern struct device_node *of_chosen; | 118 | extern struct device_node *of_chosen; |
| 113 | extern struct device_node *of_aliases; | 119 | extern struct device_node *of_aliases; |
| 114 | extern struct device_node *of_stdout; | 120 | extern struct device_node *of_stdout; |
| 115 | extern raw_spinlock_t devtree_lock; | 121 | extern raw_spinlock_t devtree_lock; |
| 116 | 122 | ||
| 123 | #ifdef CONFIG_OF | ||
| 124 | static inline bool is_of_node(struct fwnode_handle *fwnode) | ||
| 125 | { | ||
| 126 | return fwnode && fwnode->type == FWNODE_OF; | ||
| 127 | } | ||
| 128 | |||
| 129 | static inline struct device_node *of_node(struct fwnode_handle *fwnode) | ||
| 130 | { | ||
| 131 | return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL; | ||
| 132 | } | ||
| 133 | |||
| 117 | static inline bool of_have_populated_dt(void) | 134 | static inline bool of_have_populated_dt(void) |
| 118 | { | 135 | { |
| 119 | return of_allnodes != NULL; | 136 | return of_root != NULL; |
| 120 | } | 137 | } |
| 121 | 138 | ||
| 122 | static inline bool of_node_is_root(const struct device_node *node) | 139 | static inline bool of_node_is_root(const struct device_node *node) |
| @@ -160,6 +177,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag | |||
| 160 | clear_bit(flag, &p->_flags); | 177 | clear_bit(flag, &p->_flags); |
| 161 | } | 178 | } |
| 162 | 179 | ||
| 180 | extern struct device_node *__of_find_all_nodes(struct device_node *prev); | ||
| 163 | extern struct device_node *of_find_all_nodes(struct device_node *prev); | 181 | extern struct device_node *of_find_all_nodes(struct device_node *prev); |
| 164 | 182 | ||
| 165 | /* | 183 | /* |
| @@ -215,8 +233,9 @@ static inline const char *of_node_full_name(const struct device_node *np) | |||
| 215 | return np ? np->full_name : "<no-node>"; | 233 | return np ? np->full_name : "<no-node>"; |
| 216 | } | 234 | } |
| 217 | 235 | ||
| 218 | #define for_each_of_allnodes(dn) \ | 236 | #define for_each_of_allnodes_from(from, dn) \ |
| 219 | for (dn = of_allnodes; dn; dn = dn->allnext) | 237 | for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn)) |
| 238 | #define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn) | ||
| 220 | extern struct device_node *of_find_node_by_name(struct device_node *from, | 239 | extern struct device_node *of_find_node_by_name(struct device_node *from, |
| 221 | const char *name); | 240 | const char *name); |
| 222 | extern struct device_node *of_find_node_by_type(struct device_node *from, | 241 | extern struct device_node *of_find_node_by_type(struct device_node *from, |
| @@ -228,7 +247,13 @@ extern struct device_node *of_find_matching_node_and_match( | |||
| 228 | const struct of_device_id *matches, | 247 | const struct of_device_id *matches, |
| 229 | const struct of_device_id **match); | 248 | const struct of_device_id **match); |
| 230 | 249 | ||
| 231 | extern struct device_node *of_find_node_by_path(const char *path); | 250 | extern struct device_node *of_find_node_opts_by_path(const char *path, |
| 251 | const char **opts); | ||
| 252 | static inline struct device_node *of_find_node_by_path(const char *path) | ||
| 253 | { | ||
| 254 | return of_find_node_opts_by_path(path, NULL); | ||
| 255 | } | ||
| 256 | |||
| 232 | extern struct device_node *of_find_node_by_phandle(phandle handle); | 257 | extern struct device_node *of_find_node_by_phandle(phandle handle); |
| 233 | extern struct device_node *of_get_parent(const struct device_node *node); | 258 | extern struct device_node *of_get_parent(const struct device_node *node); |
| 234 | extern struct device_node *of_get_next_parent(struct device_node *node); | 259 | extern struct device_node *of_get_next_parent(struct device_node *node); |
| @@ -263,6 +288,10 @@ extern int of_property_read_u32_array(const struct device_node *np, | |||
| 263 | size_t sz); | 288 | size_t sz); |
| 264 | extern int of_property_read_u64(const struct device_node *np, | 289 | extern int of_property_read_u64(const struct device_node *np, |
| 265 | const char *propname, u64 *out_value); | 290 | const char *propname, u64 *out_value); |
| 291 | extern int of_property_read_u64_array(const struct device_node *np, | ||
| 292 | const char *propname, | ||
| 293 | u64 *out_values, | ||
| 294 | size_t sz); | ||
| 266 | 295 | ||
| 267 | extern int of_property_read_string(struct device_node *np, | 296 | extern int of_property_read_string(struct device_node *np, |
| 268 | const char *propname, | 297 | const char *propname, |
| @@ -275,7 +304,7 @@ extern int of_property_read_string_helper(struct device_node *np, | |||
| 275 | const char **out_strs, size_t sz, int index); | 304 | const char **out_strs, size_t sz, int index); |
| 276 | extern int of_device_is_compatible(const struct device_node *device, | 305 | extern int of_device_is_compatible(const struct device_node *device, |
| 277 | const char *); | 306 | const char *); |
| 278 | extern int of_device_is_available(const struct device_node *device); | 307 | extern bool of_device_is_available(const struct device_node *device); |
| 279 | extern const void *of_get_property(const struct device_node *node, | 308 | extern const void *of_get_property(const struct device_node *node, |
| 280 | const char *name, | 309 | const char *name, |
| 281 | int *lenp); | 310 | int *lenp); |
| @@ -317,16 +346,6 @@ extern int of_update_property(struct device_node *np, struct property *newprop); | |||
| 317 | #define OF_RECONFIG_REMOVE_PROPERTY 0x0004 | 346 | #define OF_RECONFIG_REMOVE_PROPERTY 0x0004 |
| 318 | #define OF_RECONFIG_UPDATE_PROPERTY 0x0005 | 347 | #define OF_RECONFIG_UPDATE_PROPERTY 0x0005 |
| 319 | 348 | ||
| 320 | struct of_prop_reconfig { | ||
| 321 | struct device_node *dn; | ||
| 322 | struct property *prop; | ||
| 323 | struct property *old_prop; | ||
| 324 | }; | ||
| 325 | |||
| 326 | extern int of_reconfig_notifier_register(struct notifier_block *); | ||
| 327 | extern int of_reconfig_notifier_unregister(struct notifier_block *); | ||
| 328 | extern int of_reconfig_notify(unsigned long, void *); | ||
| 329 | |||
| 330 | extern int of_attach_node(struct device_node *); | 349 | extern int of_attach_node(struct device_node *); |
| 331 | extern int of_detach_node(struct device_node *); | 350 | extern int of_detach_node(struct device_node *); |
| 332 | 351 | ||
| @@ -355,6 +374,16 @@ bool of_console_check(struct device_node *dn, char *name, int index); | |||
| 355 | 374 | ||
| 356 | #else /* CONFIG_OF */ | 375 | #else /* CONFIG_OF */ |
| 357 | 376 | ||
| 377 | static inline bool is_of_node(struct fwnode_handle *fwnode) | ||
| 378 | { | ||
| 379 | return false; | ||
| 380 | } | ||
| 381 | |||
| 382 | static inline struct device_node *of_node(struct fwnode_handle *fwnode) | ||
| 383 | { | ||
| 384 | return NULL; | ||
| 385 | } | ||
| 386 | |||
| 358 | static inline const char* of_node_full_name(const struct device_node *np) | 387 | static inline const char* of_node_full_name(const struct device_node *np) |
| 359 | { | 388 | { |
| 360 | return "<no-node>"; | 389 | return "<no-node>"; |
| @@ -385,6 +414,12 @@ static inline struct device_node *of_find_node_by_path(const char *path) | |||
| 385 | return NULL; | 414 | return NULL; |
| 386 | } | 415 | } |
| 387 | 416 | ||
| 417 | static inline struct device_node *of_find_node_opts_by_path(const char *path, | ||
| 418 | const char **opts) | ||
| 419 | { | ||
| 420 | return NULL; | ||
| 421 | } | ||
| 422 | |||
| 388 | static inline struct device_node *of_get_parent(const struct device_node *node) | 423 | static inline struct device_node *of_get_parent(const struct device_node *node) |
| 389 | { | 424 | { |
| 390 | return NULL; | 425 | return NULL; |
| @@ -426,9 +461,9 @@ static inline int of_device_is_compatible(const struct device_node *device, | |||
| 426 | return 0; | 461 | return 0; |
| 427 | } | 462 | } |
| 428 | 463 | ||
| 429 | static inline int of_device_is_available(const struct device_node *device) | 464 | static inline bool of_device_is_available(const struct device_node *device) |
| 430 | { | 465 | { |
| 431 | return 0; | 466 | return false; |
| 432 | } | 467 | } |
| 433 | 468 | ||
| 434 | static inline struct property *of_find_property(const struct device_node *np, | 469 | static inline struct property *of_find_property(const struct device_node *np, |
| @@ -477,6 +512,13 @@ static inline int of_property_read_u32_array(const struct device_node *np, | |||
| 477 | return -ENOSYS; | 512 | return -ENOSYS; |
| 478 | } | 513 | } |
| 479 | 514 | ||
| 515 | static inline int of_property_read_u64_array(const struct device_node *np, | ||
| 516 | const char *propname, | ||
| 517 | u64 *out_values, size_t sz) | ||
| 518 | { | ||
| 519 | return -ENOSYS; | ||
| 520 | } | ||
| 521 | |||
| 480 | static inline int of_property_read_string(struct device_node *np, | 522 | static inline int of_property_read_string(struct device_node *np, |
| 481 | const char *propname, | 523 | const char *propname, |
| 482 | const char **out_string) | 524 | const char **out_string) |
| @@ -760,6 +802,13 @@ static inline int of_property_read_u32(const struct device_node *np, | |||
| 760 | return of_property_read_u32_array(np, propname, out_value, 1); | 802 | return of_property_read_u32_array(np, propname, out_value, 1); |
| 761 | } | 803 | } |
| 762 | 804 | ||
| 805 | static inline int of_property_read_s32(const struct device_node *np, | ||
| 806 | const char *propname, | ||
| 807 | s32 *out_value) | ||
| 808 | { | ||
| 809 | return of_property_read_u32(np, propname, (u32*) out_value); | ||
| 810 | } | ||
| 811 | |||
| 763 | #define of_property_for_each_u32(np, propname, prop, p, u) \ | 812 | #define of_property_for_each_u32(np, propname, prop, p, u) \ |
| 764 | for (prop = of_find_property(np, propname, NULL), \ | 813 | for (prop = of_find_property(np, propname, NULL), \ |
| 765 | p = of_prop_next_u32(prop, NULL, &u); \ | 814 | p = of_prop_next_u32(prop, NULL, &u); \ |
| @@ -828,7 +877,7 @@ static inline int of_get_available_child_count(const struct device_node *np) | |||
| 828 | = { .compatible = compat, \ | 877 | = { .compatible = compat, \ |
| 829 | .data = (fn == (fn_type)NULL) ? fn : fn } | 878 | .data = (fn == (fn_type)NULL) ? fn : fn } |
| 830 | #else | 879 | #else |
| 831 | #define _OF_DECLARE(table, name, compat, fn, fn_type) \ | 880 | #define _OF_DECLARE(table, name, compat, fn, fn_type) \ |
| 832 | static const struct of_device_id __of_table_##name \ | 881 | static const struct of_device_id __of_table_##name \ |
| 833 | __attribute__((unused)) \ | 882 | __attribute__((unused)) \ |
| 834 | = { .compatible = compat, \ | 883 | = { .compatible = compat, \ |
| @@ -879,7 +928,19 @@ struct of_changeset { | |||
| 879 | struct list_head entries; | 928 | struct list_head entries; |
| 880 | }; | 929 | }; |
| 881 | 930 | ||
| 931 | enum of_reconfig_change { | ||
| 932 | OF_RECONFIG_NO_CHANGE = 0, | ||
| 933 | OF_RECONFIG_CHANGE_ADD, | ||
| 934 | OF_RECONFIG_CHANGE_REMOVE, | ||
| 935 | }; | ||
| 936 | |||
| 882 | #ifdef CONFIG_OF_DYNAMIC | 937 | #ifdef CONFIG_OF_DYNAMIC |
| 938 | extern int of_reconfig_notifier_register(struct notifier_block *); | ||
| 939 | extern int of_reconfig_notifier_unregister(struct notifier_block *); | ||
| 940 | extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd); | ||
| 941 | extern int of_reconfig_get_state_change(unsigned long action, | ||
| 942 | struct of_reconfig_data *arg); | ||
| 943 | |||
| 883 | extern void of_changeset_init(struct of_changeset *ocs); | 944 | extern void of_changeset_init(struct of_changeset *ocs); |
| 884 | extern void of_changeset_destroy(struct of_changeset *ocs); | 945 | extern void of_changeset_destroy(struct of_changeset *ocs); |
| 885 | extern int of_changeset_apply(struct of_changeset *ocs); | 946 | extern int of_changeset_apply(struct of_changeset *ocs); |
| @@ -917,7 +978,26 @@ static inline int of_changeset_update_property(struct of_changeset *ocs, | |||
| 917 | { | 978 | { |
| 918 | return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop); | 979 | return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop); |
| 919 | } | 980 | } |
| 920 | #endif | 981 | #else /* CONFIG_OF_DYNAMIC */ |
| 982 | static inline int of_reconfig_notifier_register(struct notifier_block *nb) | ||
| 983 | { | ||
| 984 | return -EINVAL; | ||
| 985 | } | ||
| 986 | static inline int of_reconfig_notifier_unregister(struct notifier_block *nb) | ||
| 987 | { | ||
| 988 | return -EINVAL; | ||
| 989 | } | ||
| 990 | static inline int of_reconfig_notify(unsigned long action, | ||
| 991 | struct of_reconfig_data *arg) | ||
| 992 | { | ||
| 993 | return -EINVAL; | ||
| 994 | } | ||
| 995 | static inline int of_reconfig_get_state_change(unsigned long action, | ||
| 996 | struct of_reconfig_data *arg) | ||
| 997 | { | ||
| 998 | return -EINVAL; | ||
| 999 | } | ||
| 1000 | #endif /* CONFIG_OF_DYNAMIC */ | ||
| 921 | 1001 | ||
| 922 | /* CONFIG_OF_RESOLVE api */ | 1002 | /* CONFIG_OF_RESOLVE api */ |
| 923 | extern int of_resolve_phandles(struct device_node *tree); | 1003 | extern int of_resolve_phandles(struct device_node *tree); |
| @@ -933,4 +1013,34 @@ static inline bool of_device_is_system_power_controller(const struct device_node | |||
| 933 | return of_property_read_bool(np, "system-power-controller"); | 1013 | return of_property_read_bool(np, "system-power-controller"); |
| 934 | } | 1014 | } |
| 935 | 1015 | ||
| 1016 | /** | ||
| 1017 | * Overlay support | ||
| 1018 | */ | ||
| 1019 | |||
| 1020 | #ifdef CONFIG_OF_OVERLAY | ||
| 1021 | |||
| 1022 | /* ID based overlays; the API for external users */ | ||
| 1023 | int of_overlay_create(struct device_node *tree); | ||
| 1024 | int of_overlay_destroy(int id); | ||
| 1025 | int of_overlay_destroy_all(void); | ||
| 1026 | |||
| 1027 | #else | ||
| 1028 | |||
| 1029 | static inline int of_overlay_create(struct device_node *tree) | ||
| 1030 | { | ||
| 1031 | return -ENOTSUPP; | ||
| 1032 | } | ||
| 1033 | |||
| 1034 | static inline int of_overlay_destroy(int id) | ||
| 1035 | { | ||
| 1036 | return -ENOTSUPP; | ||
| 1037 | } | ||
| 1038 | |||
| 1039 | static inline int of_overlay_destroy_all(void) | ||
| 1040 | { | ||
| 1041 | return -ENOTSUPP; | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | #endif | ||
| 1045 | |||
| 936 | #endif /* _LINUX_OF_H */ | 1046 | #endif /* _LINUX_OF_H */ |
diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 8cb14eb393d6..d88e81be6368 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h | |||
| @@ -106,7 +106,7 @@ extern int of_address_to_resource(struct device_node *dev, int index, | |||
| 106 | struct resource *r); | 106 | struct resource *r); |
| 107 | void __iomem *of_iomap(struct device_node *node, int index); | 107 | void __iomem *of_iomap(struct device_node *node, int index); |
| 108 | void __iomem *of_io_request_and_map(struct device_node *device, | 108 | void __iomem *of_io_request_and_map(struct device_node *device, |
| 109 | int index, char *name); | 109 | int index, const char *name); |
| 110 | #else | 110 | #else |
| 111 | 111 | ||
| 112 | #include <linux/io.h> | 112 | #include <linux/io.h> |
| @@ -123,7 +123,7 @@ static inline void __iomem *of_iomap(struct device_node *device, int index) | |||
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | static inline void __iomem *of_io_request_and_map(struct device_node *device, | 125 | static inline void __iomem *of_io_request_and_map(struct device_node *device, |
| 126 | int index, char *name) | 126 | int index, const char *name) |
| 127 | { | 127 | { |
| 128 | return IOMEM_ERR_PTR(-EINVAL); | 128 | return IOMEM_ERR_PTR(-EINVAL); |
| 129 | } | 129 | } |
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index 51a560f34bca..16c75547d725 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h | |||
| @@ -1,12 +1,19 @@ | |||
| 1 | #ifndef __OF_IOMMU_H | 1 | #ifndef __OF_IOMMU_H |
| 2 | #define __OF_IOMMU_H | 2 | #define __OF_IOMMU_H |
| 3 | 3 | ||
| 4 | #include <linux/device.h> | ||
| 5 | #include <linux/iommu.h> | ||
| 6 | #include <linux/of.h> | ||
| 7 | |||
| 4 | #ifdef CONFIG_OF_IOMMU | 8 | #ifdef CONFIG_OF_IOMMU |
| 5 | 9 | ||
| 6 | extern int of_get_dma_window(struct device_node *dn, const char *prefix, | 10 | extern int of_get_dma_window(struct device_node *dn, const char *prefix, |
| 7 | int index, unsigned long *busno, dma_addr_t *addr, | 11 | int index, unsigned long *busno, dma_addr_t *addr, |
| 8 | size_t *size); | 12 | size_t *size); |
| 9 | 13 | ||
| 14 | extern void of_iommu_init(void); | ||
| 15 | extern struct iommu_ops *of_iommu_configure(struct device *dev); | ||
| 16 | |||
| 10 | #else | 17 | #else |
| 11 | 18 | ||
| 12 | static inline int of_get_dma_window(struct device_node *dn, const char *prefix, | 19 | static inline int of_get_dma_window(struct device_node *dn, const char *prefix, |
| @@ -16,6 +23,22 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix, | |||
| 16 | return -EINVAL; | 23 | return -EINVAL; |
| 17 | } | 24 | } |
| 18 | 25 | ||
| 26 | static inline void of_iommu_init(void) { } | ||
| 27 | static inline struct iommu_ops *of_iommu_configure(struct device *dev) | ||
| 28 | { | ||
| 29 | return NULL; | ||
| 30 | } | ||
| 31 | |||
| 19 | #endif /* CONFIG_OF_IOMMU */ | 32 | #endif /* CONFIG_OF_IOMMU */ |
| 20 | 33 | ||
| 34 | void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops); | ||
| 35 | struct iommu_ops *of_iommu_get_ops(struct device_node *np); | ||
| 36 | |||
| 37 | extern struct of_device_id __iommu_of_table; | ||
| 38 | |||
| 39 | typedef int (*of_iommu_init_fn)(struct device_node *); | ||
| 40 | |||
| 41 | #define IOMMU_OF_DECLARE(name, compat, fn) \ | ||
| 42 | _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn) | ||
| 43 | |||
| 21 | #endif /* __OF_IOMMU_H */ | 44 | #endif /* __OF_IOMMU_H */ |
diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h index c65a18a0cfdf..7e09244bb679 100644 --- a/include/linux/of_pdt.h +++ b/include/linux/of_pdt.h | |||
| @@ -39,7 +39,6 @@ extern void *prom_early_alloc(unsigned long size); | |||
| 39 | /* for building the device tree */ | 39 | /* for building the device tree */ |
| 40 | extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); | 40 | extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); |
| 41 | 41 | ||
| 42 | extern void (*of_pdt_build_more)(struct device_node *dp, | 42 | extern void (*of_pdt_build_more)(struct device_node *dp); |
| 43 | struct device_node ***nextp); | ||
| 44 | 43 | ||
| 45 | #endif /* _LINUX_OF_PDT_H */ | 44 | #endif /* _LINUX_OF_PDT_H */ |
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index c2b0627a2317..8a860f096c35 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h | |||
| @@ -84,4 +84,10 @@ static inline int of_platform_populate(struct device_node *root, | |||
| 84 | static inline void of_platform_depopulate(struct device *parent) { } | 84 | static inline void of_platform_depopulate(struct device *parent) { } |
| 85 | #endif | 85 | #endif |
| 86 | 86 | ||
| 87 | #ifdef CONFIG_OF_DYNAMIC | ||
| 88 | extern void of_platform_register_reconfig_notifier(void); | ||
| 89 | #else | ||
| 90 | static inline void of_platform_register_reconfig_notifier(void) { } | ||
| 91 | #endif | ||
| 92 | |||
| 87 | #endif /* _LINUX_OF_PLATFORM_H */ | 93 | #endif /* _LINUX_OF_PLATFORM_H */ |
diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h index f8322d9cd235..587bbdd31f5a 100644 --- a/include/linux/omap-mailbox.h +++ b/include/linux/omap-mailbox.h | |||
| @@ -10,20 +10,20 @@ | |||
| 10 | #define OMAP_MAILBOX_H | 10 | #define OMAP_MAILBOX_H |
| 11 | 11 | ||
| 12 | typedef u32 mbox_msg_t; | 12 | typedef u32 mbox_msg_t; |
| 13 | struct omap_mbox; | ||
| 14 | 13 | ||
| 15 | typedef int __bitwise omap_mbox_irq_t; | 14 | typedef int __bitwise omap_mbox_irq_t; |
| 16 | #define IRQ_TX ((__force omap_mbox_irq_t) 1) | 15 | #define IRQ_TX ((__force omap_mbox_irq_t) 1) |
| 17 | #define IRQ_RX ((__force omap_mbox_irq_t) 2) | 16 | #define IRQ_RX ((__force omap_mbox_irq_t) 2) |
| 18 | 17 | ||
| 19 | int omap_mbox_msg_send(struct omap_mbox *, mbox_msg_t msg); | 18 | struct mbox_chan; |
| 19 | struct mbox_client; | ||
| 20 | 20 | ||
| 21 | struct omap_mbox *omap_mbox_get(const char *, struct notifier_block *nb); | 21 | struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl, |
| 22 | void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb); | 22 | const char *chan_name); |
| 23 | 23 | ||
| 24 | void omap_mbox_save_ctx(struct omap_mbox *mbox); | 24 | void omap_mbox_save_ctx(struct mbox_chan *chan); |
| 25 | void omap_mbox_restore_ctx(struct omap_mbox *mbox); | 25 | void omap_mbox_restore_ctx(struct mbox_chan *chan); |
| 26 | void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq); | 26 | void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); |
| 27 | void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq); | 27 | void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); |
| 28 | 28 | ||
| 29 | #endif /* OMAP_MAILBOX_H */ | 29 | #endif /* OMAP_MAILBOX_H */ |
diff --git a/include/linux/oom.h b/include/linux/oom.h index e8d6e1058723..853698c721f7 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
| @@ -92,6 +92,17 @@ static inline bool oom_gfp_allowed(gfp_t gfp_mask) | |||
| 92 | 92 | ||
| 93 | extern struct task_struct *find_lock_task_mm(struct task_struct *p); | 93 | extern struct task_struct *find_lock_task_mm(struct task_struct *p); |
| 94 | 94 | ||
| 95 | static inline bool task_will_free_mem(struct task_struct *task) | ||
| 96 | { | ||
| 97 | /* | ||
| 98 | * A coredumping process may sleep for an extended period in exit_mm(), | ||
| 99 | * so the oom killer cannot assume that the process will promptly exit | ||
| 100 | * and release memory. | ||
| 101 | */ | ||
| 102 | return (task->flags & PF_EXITING) && | ||
| 103 | !(task->signal->flags & SIGNAL_GROUP_COREDUMP); | ||
| 104 | } | ||
| 105 | |||
| 95 | /* sysctls */ | 106 | /* sysctls */ |
| 96 | extern int sysctl_oom_dump_tasks; | 107 | extern int sysctl_oom_dump_tasks; |
| 97 | extern int sysctl_oom_kill_allocating_task; | 108 | extern int sysctl_oom_kill_allocating_task; |
diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h deleted file mode 100644 index 22691f614043..000000000000 --- a/include/linux/page-debug-flags.h +++ /dev/null | |||
| @@ -1,32 +0,0 @@ | |||
| 1 | #ifndef LINUX_PAGE_DEBUG_FLAGS_H | ||
| 2 | #define LINUX_PAGE_DEBUG_FLAGS_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * page->debug_flags bits: | ||
| 6 | * | ||
| 7 | * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to | ||
| 8 | * implement generic debug pagealloc feature. The pages are filled with | ||
| 9 | * poison patterns and set this flag after free_pages(). The poisoned | ||
| 10 | * pages are verified whether the patterns are not corrupted and clear | ||
| 11 | * the flag before alloc_pages(). | ||
| 12 | */ | ||
| 13 | |||
| 14 | enum page_debug_flags { | ||
| 15 | PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */ | ||
| 16 | PAGE_DEBUG_FLAG_GUARD, | ||
| 17 | }; | ||
| 18 | |||
| 19 | /* | ||
| 20 | * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably | ||
| 21 | * gets turned off when no debug features are enabling it! | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS | ||
| 25 | #if !defined(CONFIG_PAGE_POISONING) && \ | ||
| 26 | !defined(CONFIG_PAGE_GUARD) \ | ||
| 27 | /* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */ | ||
| 28 | #error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features! | ||
| 29 | #endif | ||
| 30 | #endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */ | ||
| 31 | |||
| 32 | #endif /* LINUX_PAGE_DEBUG_FLAGS_H */ | ||
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h deleted file mode 100644 index 5c831f1eca79..000000000000 --- a/include/linux/page_cgroup.h +++ /dev/null | |||
| @@ -1,105 +0,0 @@ | |||
| 1 | #ifndef __LINUX_PAGE_CGROUP_H | ||
| 2 | #define __LINUX_PAGE_CGROUP_H | ||
| 3 | |||
| 4 | enum { | ||
| 5 | /* flags for mem_cgroup */ | ||
| 6 | PCG_USED = 0x01, /* This page is charged to a memcg */ | ||
| 7 | PCG_MEM = 0x02, /* This page holds a memory charge */ | ||
| 8 | PCG_MEMSW = 0x04, /* This page holds a memory+swap charge */ | ||
| 9 | }; | ||
| 10 | |||
| 11 | struct pglist_data; | ||
| 12 | |||
| 13 | #ifdef CONFIG_MEMCG | ||
| 14 | struct mem_cgroup; | ||
| 15 | |||
| 16 | /* | ||
| 17 | * Page Cgroup can be considered as an extended mem_map. | ||
| 18 | * A page_cgroup page is associated with every page descriptor. The | ||
| 19 | * page_cgroup helps us identify information about the cgroup | ||
| 20 | * All page cgroups are allocated at boot or memory hotplug event, | ||
| 21 | * then the page cgroup for pfn always exists. | ||
| 22 | */ | ||
| 23 | struct page_cgroup { | ||
| 24 | unsigned long flags; | ||
| 25 | struct mem_cgroup *mem_cgroup; | ||
| 26 | }; | ||
| 27 | |||
| 28 | extern void pgdat_page_cgroup_init(struct pglist_data *pgdat); | ||
| 29 | |||
| 30 | #ifdef CONFIG_SPARSEMEM | ||
| 31 | static inline void page_cgroup_init_flatmem(void) | ||
| 32 | { | ||
| 33 | } | ||
| 34 | extern void page_cgroup_init(void); | ||
| 35 | #else | ||
| 36 | extern void page_cgroup_init_flatmem(void); | ||
| 37 | static inline void page_cgroup_init(void) | ||
| 38 | { | ||
| 39 | } | ||
| 40 | #endif | ||
| 41 | |||
| 42 | struct page_cgroup *lookup_page_cgroup(struct page *page); | ||
| 43 | |||
| 44 | static inline int PageCgroupUsed(struct page_cgroup *pc) | ||
| 45 | { | ||
| 46 | return !!(pc->flags & PCG_USED); | ||
| 47 | } | ||
| 48 | #else /* !CONFIG_MEMCG */ | ||
| 49 | struct page_cgroup; | ||
| 50 | |||
| 51 | static inline void pgdat_page_cgroup_init(struct pglist_data *pgdat) | ||
| 52 | { | ||
| 53 | } | ||
| 54 | |||
| 55 | static inline struct page_cgroup *lookup_page_cgroup(struct page *page) | ||
| 56 | { | ||
| 57 | return NULL; | ||
| 58 | } | ||
| 59 | |||
| 60 | static inline void page_cgroup_init(void) | ||
| 61 | { | ||
| 62 | } | ||
| 63 | |||
| 64 | static inline void page_cgroup_init_flatmem(void) | ||
| 65 | { | ||
| 66 | } | ||
| 67 | #endif /* CONFIG_MEMCG */ | ||
| 68 | |||
| 69 | #include <linux/swap.h> | ||
| 70 | |||
| 71 | #ifdef CONFIG_MEMCG_SWAP | ||
| 72 | extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, | ||
| 73 | unsigned short old, unsigned short new); | ||
| 74 | extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); | ||
| 75 | extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); | ||
| 76 | extern int swap_cgroup_swapon(int type, unsigned long max_pages); | ||
| 77 | extern void swap_cgroup_swapoff(int type); | ||
| 78 | #else | ||
| 79 | |||
| 80 | static inline | ||
| 81 | unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) | ||
| 82 | { | ||
| 83 | return 0; | ||
| 84 | } | ||
| 85 | |||
| 86 | static inline | ||
| 87 | unsigned short lookup_swap_cgroup_id(swp_entry_t ent) | ||
| 88 | { | ||
| 89 | return 0; | ||
| 90 | } | ||
| 91 | |||
| 92 | static inline int | ||
| 93 | swap_cgroup_swapon(int type, unsigned long max_pages) | ||
| 94 | { | ||
| 95 | return 0; | ||
| 96 | } | ||
| 97 | |||
| 98 | static inline void swap_cgroup_swapoff(int type) | ||
| 99 | { | ||
| 100 | return; | ||
| 101 | } | ||
| 102 | |||
| 103 | #endif /* CONFIG_MEMCG_SWAP */ | ||
| 104 | |||
| 105 | #endif /* __LINUX_PAGE_CGROUP_H */ | ||
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h new file mode 100644 index 000000000000..955421575d16 --- /dev/null +++ b/include/linux/page_counter.h | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | #ifndef _LINUX_PAGE_COUNTER_H | ||
| 2 | #define _LINUX_PAGE_COUNTER_H | ||
| 3 | |||
| 4 | #include <linux/atomic.h> | ||
| 5 | #include <linux/kernel.h> | ||
| 6 | #include <asm/page.h> | ||
| 7 | |||
| 8 | struct page_counter { | ||
| 9 | atomic_long_t count; | ||
| 10 | unsigned long limit; | ||
| 11 | struct page_counter *parent; | ||
| 12 | |||
| 13 | /* legacy */ | ||
| 14 | unsigned long watermark; | ||
| 15 | unsigned long failcnt; | ||
| 16 | }; | ||
| 17 | |||
| 18 | #if BITS_PER_LONG == 32 | ||
| 19 | #define PAGE_COUNTER_MAX LONG_MAX | ||
| 20 | #else | ||
| 21 | #define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE) | ||
| 22 | #endif | ||
| 23 | |||
| 24 | static inline void page_counter_init(struct page_counter *counter, | ||
| 25 | struct page_counter *parent) | ||
| 26 | { | ||
| 27 | atomic_long_set(&counter->count, 0); | ||
| 28 | counter->limit = PAGE_COUNTER_MAX; | ||
| 29 | counter->parent = parent; | ||
| 30 | } | ||
| 31 | |||
| 32 | static inline unsigned long page_counter_read(struct page_counter *counter) | ||
| 33 | { | ||
| 34 | return atomic_long_read(&counter->count); | ||
| 35 | } | ||
| 36 | |||
| 37 | void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); | ||
| 38 | void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); | ||
| 39 | int page_counter_try_charge(struct page_counter *counter, | ||
| 40 | unsigned long nr_pages, | ||
| 41 | struct page_counter **fail); | ||
| 42 | void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); | ||
| 43 | int page_counter_limit(struct page_counter *counter, unsigned long limit); | ||
| 44 | int page_counter_memparse(const char *buf, unsigned long *nr_pages); | ||
| 45 | |||
| 46 | static inline void page_counter_reset_watermark(struct page_counter *counter) | ||
| 47 | { | ||
| 48 | counter->watermark = page_counter_read(counter); | ||
| 49 | } | ||
| 50 | |||
| 51 | #endif /* _LINUX_PAGE_COUNTER_H */ | ||
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h new file mode 100644 index 000000000000..d2a2c84c72d0 --- /dev/null +++ b/include/linux/page_ext.h | |||
| @@ -0,0 +1,84 @@ | |||
| 1 | #ifndef __LINUX_PAGE_EXT_H | ||
| 2 | #define __LINUX_PAGE_EXT_H | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | #include <linux/stacktrace.h> | ||
| 6 | |||
| 7 | struct pglist_data; | ||
| 8 | struct page_ext_operations { | ||
| 9 | bool (*need)(void); | ||
| 10 | void (*init)(void); | ||
| 11 | }; | ||
| 12 | |||
| 13 | #ifdef CONFIG_PAGE_EXTENSION | ||
| 14 | |||
| 15 | /* | ||
| 16 | * page_ext->flags bits: | ||
| 17 | * | ||
| 18 | * PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to | ||
| 19 | * implement generic debug pagealloc feature. The pages are filled with | ||
| 20 | * poison patterns and set this flag after free_pages(). The poisoned | ||
| 21 | * pages are verified whether the patterns are not corrupted and clear | ||
| 22 | * the flag before alloc_pages(). | ||
| 23 | */ | ||
| 24 | |||
| 25 | enum page_ext_flags { | ||
| 26 | PAGE_EXT_DEBUG_POISON, /* Page is poisoned */ | ||
| 27 | PAGE_EXT_DEBUG_GUARD, | ||
| 28 | PAGE_EXT_OWNER, | ||
| 29 | }; | ||
| 30 | |||
| 31 | /* | ||
| 32 | * Page Extension can be considered as an extended mem_map. | ||
| 33 | * A page_ext page is associated with every page descriptor. The | ||
| 34 | * page_ext helps us add more information about the page. | ||
| 35 | * All page_ext are allocated at boot or memory hotplug event, | ||
| 36 | * then the page_ext for pfn always exists. | ||
| 37 | */ | ||
| 38 | struct page_ext { | ||
| 39 | unsigned long flags; | ||
| 40 | #ifdef CONFIG_PAGE_OWNER | ||
| 41 | unsigned int order; | ||
| 42 | gfp_t gfp_mask; | ||
| 43 | struct stack_trace trace; | ||
| 44 | unsigned long trace_entries[8]; | ||
| 45 | #endif | ||
| 46 | }; | ||
| 47 | |||
| 48 | extern void pgdat_page_ext_init(struct pglist_data *pgdat); | ||
| 49 | |||
| 50 | #ifdef CONFIG_SPARSEMEM | ||
| 51 | static inline void page_ext_init_flatmem(void) | ||
| 52 | { | ||
| 53 | } | ||
| 54 | extern void page_ext_init(void); | ||
| 55 | #else | ||
| 56 | extern void page_ext_init_flatmem(void); | ||
| 57 | static inline void page_ext_init(void) | ||
| 58 | { | ||
| 59 | } | ||
| 60 | #endif | ||
| 61 | |||
| 62 | struct page_ext *lookup_page_ext(struct page *page); | ||
| 63 | |||
| 64 | #else /* !CONFIG_PAGE_EXTENSION */ | ||
| 65 | struct page_ext; | ||
| 66 | |||
| 67 | static inline void pgdat_page_ext_init(struct pglist_data *pgdat) | ||
| 68 | { | ||
| 69 | } | ||
| 70 | |||
| 71 | static inline struct page_ext *lookup_page_ext(struct page *page) | ||
| 72 | { | ||
| 73 | return NULL; | ||
| 74 | } | ||
| 75 | |||
| 76 | static inline void page_ext_init(void) | ||
| 77 | { | ||
| 78 | } | ||
| 79 | |||
| 80 | static inline void page_ext_init_flatmem(void) | ||
| 81 | { | ||
| 82 | } | ||
| 83 | #endif /* CONFIG_PAGE_EXTENSION */ | ||
| 84 | #endif /* __LINUX_PAGE_EXT_H */ | ||
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h new file mode 100644 index 000000000000..b48c3471c254 --- /dev/null +++ b/include/linux/page_owner.h | |||
| @@ -0,0 +1,38 @@ | |||
| 1 | #ifndef __LINUX_PAGE_OWNER_H | ||
| 2 | #define __LINUX_PAGE_OWNER_H | ||
| 3 | |||
| 4 | #ifdef CONFIG_PAGE_OWNER | ||
| 5 | extern bool page_owner_inited; | ||
| 6 | extern struct page_ext_operations page_owner_ops; | ||
| 7 | |||
| 8 | extern void __reset_page_owner(struct page *page, unsigned int order); | ||
| 9 | extern void __set_page_owner(struct page *page, | ||
| 10 | unsigned int order, gfp_t gfp_mask); | ||
| 11 | |||
| 12 | static inline void reset_page_owner(struct page *page, unsigned int order) | ||
| 13 | { | ||
| 14 | if (likely(!page_owner_inited)) | ||
| 15 | return; | ||
| 16 | |||
| 17 | __reset_page_owner(page, order); | ||
| 18 | } | ||
| 19 | |||
| 20 | static inline void set_page_owner(struct page *page, | ||
| 21 | unsigned int order, gfp_t gfp_mask) | ||
| 22 | { | ||
| 23 | if (likely(!page_owner_inited)) | ||
| 24 | return; | ||
| 25 | |||
| 26 | __set_page_owner(page, order, gfp_mask); | ||
| 27 | } | ||
| 28 | #else | ||
| 29 | static inline void reset_page_owner(struct page *page, unsigned int order) | ||
| 30 | { | ||
| 31 | } | ||
| 32 | static inline void set_page_owner(struct page *page, | ||
| 33 | unsigned int order, gfp_t gfp_mask) | ||
| 34 | { | ||
| 35 | } | ||
| 36 | |||
| 37 | #endif /* CONFIG_PAGE_OWNER */ | ||
| 38 | #endif /* __LINUX_PAGE_OWNER_H */ | ||
diff --git a/include/linux/pci.h b/include/linux/pci.h index a523cee3abb5..360a966a97a5 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -349,6 +349,7 @@ struct pci_dev { | |||
| 349 | unsigned int __aer_firmware_first:1; | 349 | unsigned int __aer_firmware_first:1; |
| 350 | unsigned int broken_intx_masking:1; | 350 | unsigned int broken_intx_masking:1; |
| 351 | unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ | 351 | unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ |
| 352 | unsigned int irq_managed:1; | ||
| 352 | pci_dev_flags_t dev_flags; | 353 | pci_dev_flags_t dev_flags; |
| 353 | atomic_t enable_cnt; /* pci_enable_device has been called */ | 354 | atomic_t enable_cnt; /* pci_enable_device has been called */ |
| 354 | 355 | ||
| @@ -1004,6 +1005,8 @@ void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); | |||
| 1004 | int pci_save_state(struct pci_dev *dev); | 1005 | int pci_save_state(struct pci_dev *dev); |
| 1005 | void pci_restore_state(struct pci_dev *dev); | 1006 | void pci_restore_state(struct pci_dev *dev); |
| 1006 | struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); | 1007 | struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); |
| 1008 | int pci_load_saved_state(struct pci_dev *dev, | ||
| 1009 | struct pci_saved_state *state); | ||
| 1007 | int pci_load_and_free_saved_state(struct pci_dev *dev, | 1010 | int pci_load_and_free_saved_state(struct pci_dev *dev, |
| 1008 | struct pci_saved_state **state); | 1011 | struct pci_saved_state **state); |
| 1009 | struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); | 1012 | struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); |
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 2706ee9a4327..8c7895061121 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h | |||
| @@ -109,7 +109,6 @@ struct hotplug_slot { | |||
| 109 | struct list_head slot_list; | 109 | struct list_head slot_list; |
| 110 | struct pci_slot *pci_slot; | 110 | struct pci_slot *pci_slot; |
| 111 | }; | 111 | }; |
| 112 | #define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj) | ||
| 113 | 112 | ||
| 114 | static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) | 113 | static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) |
| 115 | { | 114 | { |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 97fb9f69aaed..e63c02a93f6b 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
| @@ -564,6 +564,7 @@ | |||
| 564 | #define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 | 564 | #define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 |
| 565 | #define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 | 565 | #define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 |
| 566 | #define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 | 566 | #define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 |
| 567 | #define PCI_DEVICE_ID_AMD_NL_USB 0x7912 | ||
| 567 | #define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F | 568 | #define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F |
| 568 | #define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 | 569 | #define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 |
| 569 | #define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 | 570 | #define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 |
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 420032d41d27..57f3a1c550dc 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h | |||
| @@ -254,8 +254,6 @@ do { \ | |||
| 254 | #endif /* CONFIG_SMP */ | 254 | #endif /* CONFIG_SMP */ |
| 255 | 255 | ||
| 256 | #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) | 256 | #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) |
| 257 | #define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) | ||
| 258 | #define __get_cpu_var(var) (*this_cpu_ptr(&(var))) | ||
| 259 | 257 | ||
| 260 | /* | 258 | /* |
| 261 | * Must be an lvalue. Since @var must be a simple identifier, | 259 | * Must be an lvalue. Since @var must be a simple identifier, |
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 51ce60c35f4c..b4337646388b 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
| @@ -128,10 +128,8 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) | |||
| 128 | static inline bool __ref_is_percpu(struct percpu_ref *ref, | 128 | static inline bool __ref_is_percpu(struct percpu_ref *ref, |
| 129 | unsigned long __percpu **percpu_countp) | 129 | unsigned long __percpu **percpu_countp) |
| 130 | { | 130 | { |
| 131 | unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr); | ||
| 132 | |||
| 133 | /* paired with smp_store_release() in percpu_ref_reinit() */ | 131 | /* paired with smp_store_release() in percpu_ref_reinit() */ |
| 134 | smp_read_barrier_depends(); | 132 | unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr); |
| 135 | 133 | ||
| 136 | /* | 134 | /* |
| 137 | * Theoretically, the following could test just ATOMIC; however, | 135 | * Theoretically, the following could test just ATOMIC; however, |
| @@ -147,28 +145,42 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, | |||
| 147 | } | 145 | } |
| 148 | 146 | ||
| 149 | /** | 147 | /** |
| 150 | * percpu_ref_get - increment a percpu refcount | 148 | * percpu_ref_get_many - increment a percpu refcount |
| 151 | * @ref: percpu_ref to get | 149 | * @ref: percpu_ref to get |
| 150 | * @nr: number of references to get | ||
| 152 | * | 151 | * |
| 153 | * Analagous to atomic_long_inc(). | 152 | * Analogous to atomic_long_add(). |
| 154 | * | 153 | * |
| 155 | * This function is safe to call as long as @ref is between init and exit. | 154 | * This function is safe to call as long as @ref is between init and exit. |
| 156 | */ | 155 | */ |
| 157 | static inline void percpu_ref_get(struct percpu_ref *ref) | 156 | static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) |
| 158 | { | 157 | { |
| 159 | unsigned long __percpu *percpu_count; | 158 | unsigned long __percpu *percpu_count; |
| 160 | 159 | ||
| 161 | rcu_read_lock_sched(); | 160 | rcu_read_lock_sched(); |
| 162 | 161 | ||
| 163 | if (__ref_is_percpu(ref, &percpu_count)) | 162 | if (__ref_is_percpu(ref, &percpu_count)) |
| 164 | this_cpu_inc(*percpu_count); | 163 | this_cpu_add(*percpu_count, nr); |
| 165 | else | 164 | else |
| 166 | atomic_long_inc(&ref->count); | 165 | atomic_long_add(nr, &ref->count); |
| 167 | 166 | ||
| 168 | rcu_read_unlock_sched(); | 167 | rcu_read_unlock_sched(); |
| 169 | } | 168 | } |
| 170 | 169 | ||
| 171 | /** | 170 | /** |
| 171 | * percpu_ref_get - increment a percpu refcount | ||
| 172 | * @ref: percpu_ref to get | ||
| 173 | * | ||
| 174 | * Analagous to atomic_long_inc(). | ||
| 175 | * | ||
| 176 | * This function is safe to call as long as @ref is between init and exit. | ||
| 177 | */ | ||
| 178 | static inline void percpu_ref_get(struct percpu_ref *ref) | ||
| 179 | { | ||
| 180 | percpu_ref_get_many(ref, 1); | ||
| 181 | } | ||
| 182 | |||
| 183 | /** | ||
| 172 | * percpu_ref_tryget - try to increment a percpu refcount | 184 | * percpu_ref_tryget - try to increment a percpu refcount |
| 173 | * @ref: percpu_ref to try-get | 185 | * @ref: percpu_ref to try-get |
| 174 | * | 186 | * |
| @@ -231,29 +243,44 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | |||
| 231 | } | 243 | } |
| 232 | 244 | ||
| 233 | /** | 245 | /** |
| 234 | * percpu_ref_put - decrement a percpu refcount | 246 | * percpu_ref_put_many - decrement a percpu refcount |
| 235 | * @ref: percpu_ref to put | 247 | * @ref: percpu_ref to put |
| 248 | * @nr: number of references to put | ||
| 236 | * | 249 | * |
| 237 | * Decrement the refcount, and if 0, call the release function (which was passed | 250 | * Decrement the refcount, and if 0, call the release function (which was passed |
| 238 | * to percpu_ref_init()) | 251 | * to percpu_ref_init()) |
| 239 | * | 252 | * |
| 240 | * This function is safe to call as long as @ref is between init and exit. | 253 | * This function is safe to call as long as @ref is between init and exit. |
| 241 | */ | 254 | */ |
| 242 | static inline void percpu_ref_put(struct percpu_ref *ref) | 255 | static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) |
| 243 | { | 256 | { |
| 244 | unsigned long __percpu *percpu_count; | 257 | unsigned long __percpu *percpu_count; |
| 245 | 258 | ||
| 246 | rcu_read_lock_sched(); | 259 | rcu_read_lock_sched(); |
| 247 | 260 | ||
| 248 | if (__ref_is_percpu(ref, &percpu_count)) | 261 | if (__ref_is_percpu(ref, &percpu_count)) |
| 249 | this_cpu_dec(*percpu_count); | 262 | this_cpu_sub(*percpu_count, nr); |
| 250 | else if (unlikely(atomic_long_dec_and_test(&ref->count))) | 263 | else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) |
| 251 | ref->release(ref); | 264 | ref->release(ref); |
| 252 | 265 | ||
| 253 | rcu_read_unlock_sched(); | 266 | rcu_read_unlock_sched(); |
| 254 | } | 267 | } |
| 255 | 268 | ||
| 256 | /** | 269 | /** |
| 270 | * percpu_ref_put - decrement a percpu refcount | ||
| 271 | * @ref: percpu_ref to put | ||
| 272 | * | ||
| 273 | * Decrement the refcount, and if 0, call the release function (which was passed | ||
| 274 | * to percpu_ref_init()) | ||
| 275 | * | ||
| 276 | * This function is safe to call as long as @ref is between init and exit. | ||
| 277 | */ | ||
| 278 | static inline void percpu_ref_put(struct percpu_ref *ref) | ||
| 279 | { | ||
| 280 | percpu_ref_put_many(ref, 1); | ||
| 281 | } | ||
| 282 | |||
| 283 | /** | ||
| 257 | * percpu_ref_is_zero - test whether a percpu refcount reached zero | 284 | * percpu_ref_is_zero - test whether a percpu refcount reached zero |
| 258 | * @ref: percpu_ref to test | 285 | * @ref: percpu_ref to test |
| 259 | * | 286 | * |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index a3aa63e47637..caebf2a758dc 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <linux/preempt.h> | 5 | #include <linux/preempt.h> |
| 6 | #include <linux/smp.h> | 6 | #include <linux/smp.h> |
| 7 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
| 8 | #include <linux/printk.h> | ||
| 8 | #include <linux/pfn.h> | 9 | #include <linux/pfn.h> |
| 9 | #include <linux/init.h> | 10 | #include <linux/init.h> |
| 10 | 11 | ||
| @@ -134,4 +135,7 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | |||
| 134 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ | 135 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ |
| 135 | __alignof__(type)) | 136 | __alignof__(type)) |
| 136 | 137 | ||
| 138 | /* To avoid include hell, as printk can not declare this, we declare it here */ | ||
| 139 | DECLARE_PER_CPU(printk_func_t, printk_func); | ||
| 140 | |||
| 137 | #endif /* __LINUX_PERCPU_H */ | 141 | #endif /* __LINUX_PERCPU_H */ |
diff --git a/include/linux/phy.h b/include/linux/phy.h index d090cfcaa167..22af8f8f5802 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
| @@ -433,6 +433,7 @@ struct phy_device { | |||
| 433 | * by this PHY | 433 | * by this PHY |
| 434 | * flags: A bitfield defining certain other features this PHY | 434 | * flags: A bitfield defining certain other features this PHY |
| 435 | * supports (like interrupts) | 435 | * supports (like interrupts) |
| 436 | * driver_data: static driver data | ||
| 436 | * | 437 | * |
| 437 | * The drivers must implement config_aneg and read_status. All | 438 | * The drivers must implement config_aneg and read_status. All |
| 438 | * other functions are optional. Note that none of these | 439 | * other functions are optional. Note that none of these |
| @@ -448,6 +449,7 @@ struct phy_driver { | |||
| 448 | unsigned int phy_id_mask; | 449 | unsigned int phy_id_mask; |
| 449 | u32 features; | 450 | u32 features; |
| 450 | u32 flags; | 451 | u32 flags; |
| 452 | const void *driver_data; | ||
| 451 | 453 | ||
| 452 | /* | 454 | /* |
| 453 | * Called to issue a PHY software reset | 455 | * Called to issue a PHY software reset |
| @@ -772,4 +774,28 @@ int __init mdio_bus_init(void); | |||
| 772 | void mdio_bus_exit(void); | 774 | void mdio_bus_exit(void); |
| 773 | 775 | ||
| 774 | extern struct bus_type mdio_bus_type; | 776 | extern struct bus_type mdio_bus_type; |
| 777 | |||
| 778 | /** | ||
| 779 | * module_phy_driver() - Helper macro for registering PHY drivers | ||
| 780 | * @__phy_drivers: array of PHY drivers to register | ||
| 781 | * | ||
| 782 | * Helper macro for PHY drivers which do not do anything special in module | ||
| 783 | * init/exit. Each module may only use this macro once, and calling it | ||
| 784 | * replaces module_init() and module_exit(). | ||
| 785 | */ | ||
| 786 | #define phy_module_driver(__phy_drivers, __count) \ | ||
| 787 | static int __init phy_module_init(void) \ | ||
| 788 | { \ | ||
| 789 | return phy_drivers_register(__phy_drivers, __count); \ | ||
| 790 | } \ | ||
| 791 | module_init(phy_module_init); \ | ||
| 792 | static void __exit phy_module_exit(void) \ | ||
| 793 | { \ | ||
| 794 | phy_drivers_unregister(__phy_drivers, __count); \ | ||
| 795 | } \ | ||
| 796 | module_exit(phy_module_exit) | ||
| 797 | |||
| 798 | #define module_phy_driver(__phy_drivers) \ | ||
| 799 | phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers)) | ||
| 800 | |||
| 775 | #endif /* __PHY_H */ | 801 | #endif /* __PHY_H */ |
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index 8cb6f815475b..a0197fa1b116 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h | |||
| @@ -61,7 +61,6 @@ struct phy { | |||
| 61 | struct device dev; | 61 | struct device dev; |
| 62 | int id; | 62 | int id; |
| 63 | const struct phy_ops *ops; | 63 | const struct phy_ops *ops; |
| 64 | struct phy_init_data *init_data; | ||
| 65 | struct mutex mutex; | 64 | struct mutex mutex; |
| 66 | int init_count; | 65 | int init_count; |
| 67 | int power_count; | 66 | int power_count; |
| @@ -84,33 +83,14 @@ struct phy_provider { | |||
| 84 | struct of_phandle_args *args); | 83 | struct of_phandle_args *args); |
| 85 | }; | 84 | }; |
| 86 | 85 | ||
| 87 | /** | 86 | struct phy_lookup { |
| 88 | * struct phy_consumer - represents the phy consumer | 87 | struct list_head node; |
| 89 | * @dev_name: the device name of the controller that will use this PHY device | 88 | const char *dev_id; |
| 90 | * @port: name given to the consumer port | 89 | const char *con_id; |
| 91 | */ | 90 | struct phy *phy; |
| 92 | struct phy_consumer { | ||
| 93 | const char *dev_name; | ||
| 94 | const char *port; | ||
| 95 | }; | ||
| 96 | |||
| 97 | /** | ||
| 98 | * struct phy_init_data - contains the list of PHY consumers | ||
| 99 | * @num_consumers: number of consumers for this PHY device | ||
| 100 | * @consumers: list of PHY consumers | ||
| 101 | */ | ||
| 102 | struct phy_init_data { | ||
| 103 | unsigned int num_consumers; | ||
| 104 | struct phy_consumer *consumers; | ||
| 105 | }; | 91 | }; |
| 106 | 92 | ||
| 107 | #define PHY_CONSUMER(_dev_name, _port) \ | 93 | #define to_phy(a) (container_of((a), struct phy, dev)) |
| 108 | { \ | ||
| 109 | .dev_name = _dev_name, \ | ||
| 110 | .port = _port, \ | ||
| 111 | } | ||
| 112 | |||
| 113 | #define to_phy(dev) (container_of((dev), struct phy, dev)) | ||
| 114 | 94 | ||
| 115 | #define of_phy_provider_register(dev, xlate) \ | 95 | #define of_phy_provider_register(dev, xlate) \ |
| 116 | __of_phy_provider_register((dev), THIS_MODULE, (xlate)) | 96 | __of_phy_provider_register((dev), THIS_MODULE, (xlate)) |
| @@ -159,10 +139,9 @@ struct phy *of_phy_get(struct device_node *np, const char *con_id); | |||
| 159 | struct phy *of_phy_simple_xlate(struct device *dev, | 139 | struct phy *of_phy_simple_xlate(struct device *dev, |
| 160 | struct of_phandle_args *args); | 140 | struct of_phandle_args *args); |
| 161 | struct phy *phy_create(struct device *dev, struct device_node *node, | 141 | struct phy *phy_create(struct device *dev, struct device_node *node, |
| 162 | const struct phy_ops *ops, | 142 | const struct phy_ops *ops); |
| 163 | struct phy_init_data *init_data); | ||
| 164 | struct phy *devm_phy_create(struct device *dev, struct device_node *node, | 143 | struct phy *devm_phy_create(struct device *dev, struct device_node *node, |
| 165 | const struct phy_ops *ops, struct phy_init_data *init_data); | 144 | const struct phy_ops *ops); |
| 166 | void phy_destroy(struct phy *phy); | 145 | void phy_destroy(struct phy *phy); |
| 167 | void devm_phy_destroy(struct device *dev, struct phy *phy); | 146 | void devm_phy_destroy(struct device *dev, struct phy *phy); |
| 168 | struct phy_provider *__of_phy_provider_register(struct device *dev, | 147 | struct phy_provider *__of_phy_provider_register(struct device *dev, |
| @@ -174,6 +153,8 @@ struct phy_provider *__devm_of_phy_provider_register(struct device *dev, | |||
| 174 | void of_phy_provider_unregister(struct phy_provider *phy_provider); | 153 | void of_phy_provider_unregister(struct phy_provider *phy_provider); |
| 175 | void devm_of_phy_provider_unregister(struct device *dev, | 154 | void devm_of_phy_provider_unregister(struct device *dev, |
| 176 | struct phy_provider *phy_provider); | 155 | struct phy_provider *phy_provider); |
| 156 | int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id); | ||
| 157 | void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id); | ||
| 177 | #else | 158 | #else |
| 178 | static inline int phy_pm_runtime_get(struct phy *phy) | 159 | static inline int phy_pm_runtime_get(struct phy *phy) |
| 179 | { | 160 | { |
| @@ -301,16 +282,14 @@ static inline struct phy *of_phy_simple_xlate(struct device *dev, | |||
| 301 | 282 | ||
| 302 | static inline struct phy *phy_create(struct device *dev, | 283 | static inline struct phy *phy_create(struct device *dev, |
| 303 | struct device_node *node, | 284 | struct device_node *node, |
| 304 | const struct phy_ops *ops, | 285 | const struct phy_ops *ops) |
| 305 | struct phy_init_data *init_data) | ||
| 306 | { | 286 | { |
| 307 | return ERR_PTR(-ENOSYS); | 287 | return ERR_PTR(-ENOSYS); |
| 308 | } | 288 | } |
| 309 | 289 | ||
| 310 | static inline struct phy *devm_phy_create(struct device *dev, | 290 | static inline struct phy *devm_phy_create(struct device *dev, |
| 311 | struct device_node *node, | 291 | struct device_node *node, |
| 312 | const struct phy_ops *ops, | 292 | const struct phy_ops *ops) |
| 313 | struct phy_init_data *init_data) | ||
| 314 | { | 293 | { |
| 315 | return ERR_PTR(-ENOSYS); | 294 | return ERR_PTR(-ENOSYS); |
| 316 | } | 295 | } |
| @@ -345,6 +324,13 @@ static inline void devm_of_phy_provider_unregister(struct device *dev, | |||
| 345 | struct phy_provider *phy_provider) | 324 | struct phy_provider *phy_provider) |
| 346 | { | 325 | { |
| 347 | } | 326 | } |
| 327 | static inline int | ||
| 328 | phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id) | ||
| 329 | { | ||
| 330 | return 0; | ||
| 331 | } | ||
| 332 | static inline void phy_remove_lookup(struct phy *phy, const char *con_id, | ||
| 333 | const char *dev_id) { } | ||
| 348 | #endif | 334 | #endif |
| 349 | 335 | ||
| 350 | #endif /* __DRIVERS_PHY_H */ | 336 | #endif /* __DRIVERS_PHY_H */ |
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index f2ca1b459377..7e75bfe37cc7 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h | |||
| @@ -11,7 +11,7 @@ struct fixed_phy_status { | |||
| 11 | 11 | ||
| 12 | struct device_node; | 12 | struct device_node; |
| 13 | 13 | ||
| 14 | #ifdef CONFIG_FIXED_PHY | 14 | #if IS_ENABLED(CONFIG_FIXED_PHY) |
| 15 | extern int fixed_phy_add(unsigned int irq, int phy_id, | 15 | extern int fixed_phy_add(unsigned int irq, int phy_id, |
| 16 | struct fixed_phy_status *status); | 16 | struct fixed_phy_status *status); |
| 17 | extern struct phy_device *fixed_phy_register(unsigned int irq, | 17 | extern struct phy_device *fixed_phy_register(unsigned int irq, |
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 1997ffc295a7..b9cf6c51b181 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <linux/threads.h> | 8 | #include <linux/threads.h> |
| 9 | #include <linux/nsproxy.h> | 9 | #include <linux/nsproxy.h> |
| 10 | #include <linux/kref.h> | 10 | #include <linux/kref.h> |
| 11 | #include <linux/ns_common.h> | ||
| 11 | 12 | ||
| 12 | struct pidmap { | 13 | struct pidmap { |
| 13 | atomic_t nr_free; | 14 | atomic_t nr_free; |
| @@ -43,7 +44,7 @@ struct pid_namespace { | |||
| 43 | kgid_t pid_gid; | 44 | kgid_t pid_gid; |
| 44 | int hide_pid; | 45 | int hide_pid; |
| 45 | int reboot; /* group exit code if this pidns was rebooted */ | 46 | int reboot; /* group exit code if this pidns was rebooted */ |
| 46 | unsigned int proc_inum; | 47 | struct ns_common ns; |
| 47 | }; | 48 | }; |
| 48 | 49 | ||
| 49 | extern struct pid_namespace init_pid_ns; | 50 | extern struct pid_namespace init_pid_ns; |
diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h index a6591c693ebb..5e0bc779e6c5 100644 --- a/include/linux/platform_data/asoc-s3c.h +++ b/include/linux/platform_data/asoc-s3c.h | |||
| @@ -27,6 +27,7 @@ struct samsung_i2s { | |||
| 27 | #define QUIRK_NO_MUXPSR (1 << 2) | 27 | #define QUIRK_NO_MUXPSR (1 << 2) |
| 28 | #define QUIRK_NEED_RSTCLR (1 << 3) | 28 | #define QUIRK_NEED_RSTCLR (1 << 3) |
| 29 | #define QUIRK_SUPPORTS_TDM (1 << 4) | 29 | #define QUIRK_SUPPORTS_TDM (1 << 4) |
| 30 | #define QUIRK_SUPPORTS_IDMA (1 << 5) | ||
| 30 | /* Quirks of the I2S controller */ | 31 | /* Quirks of the I2S controller */ |
| 31 | u32 quirks; | 32 | u32 quirks; |
| 32 | dma_addr_t idma_addr; | 33 | dma_addr_t idma_addr; |
diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h new file mode 100644 index 000000000000..26af54321958 --- /dev/null +++ b/include/linux/platform_data/bcmgenet.h | |||
| @@ -0,0 +1,18 @@ | |||
| 1 | #ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__ | ||
| 2 | #define __LINUX_PLATFORM_DATA_BCMGENET_H__ | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | #include <linux/if_ether.h> | ||
| 6 | #include <linux/phy.h> | ||
| 7 | |||
| 8 | struct bcmgenet_platform_data { | ||
| 9 | bool mdio_enabled; | ||
| 10 | phy_interface_t phy_interface; | ||
| 11 | int phy_address; | ||
| 12 | int phy_speed; | ||
| 13 | int phy_duplex; | ||
| 14 | u8 mac_address[ETH_ALEN]; | ||
| 15 | int genet_version; | ||
| 16 | }; | ||
| 17 | |||
| 18 | #endif | ||
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index 6a1357d31871..7d964e787299 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h | |||
| @@ -41,6 +41,7 @@ enum sdma_peripheral_type { | |||
| 41 | IMX_DMATYPE_ESAI, /* ESAI */ | 41 | IMX_DMATYPE_ESAI, /* ESAI */ |
| 42 | IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ | 42 | IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ |
| 43 | IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ | 43 | IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ |
| 44 | IMX_DMATYPE_SAI, /* SAI */ | ||
| 44 | }; | 45 | }; |
| 45 | 46 | ||
| 46 | enum imx_dma_prio { | 47 | enum imx_dma_prio { |
diff --git a/include/linux/platform_data/dwc3-exynos.h b/include/linux/platform_data/dwc3-exynos.h deleted file mode 100644 index 5eb7da9b3772..000000000000 --- a/include/linux/platform_data/dwc3-exynos.h +++ /dev/null | |||
| @@ -1,24 +0,0 @@ | |||
| 1 | /** | ||
| 2 | * dwc3-exynos.h - Samsung EXYNOS DWC3 Specific Glue layer, header. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | ||
| 5 | * http://www.samsung.com | ||
| 6 | * | ||
| 7 | * Author: Anton Tikhomirov <av.tikhomirov@samsung.com> | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License as published by | ||
| 11 | * the Free Software Foundation; either version 2 of the License, or | ||
| 12 | * (at your option) any later version. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #ifndef _DWC3_EXYNOS_H_ | ||
| 16 | #define _DWC3_EXYNOS_H_ | ||
| 17 | |||
| 18 | struct dwc3_exynos_data { | ||
| 19 | int phy_type; | ||
| 20 | int (*phy_init)(struct platform_device *pdev, int type); | ||
| 21 | int (*phy_exit)(struct platform_device *pdev, int type); | ||
| 22 | }; | ||
| 23 | |||
| 24 | #endif /* _DWC3_EXYNOS_H_ */ | ||
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h index 1b2ba24e4e03..9c7fd1efe495 100644 --- a/include/linux/platform_data/lp855x.h +++ b/include/linux/platform_data/lp855x.h | |||
| @@ -136,6 +136,7 @@ struct lp855x_rom_data { | |||
| 136 | Only valid when mode is PWM_BASED. | 136 | Only valid when mode is PWM_BASED. |
| 137 | * @size_program : total size of lp855x_rom_data | 137 | * @size_program : total size of lp855x_rom_data |
| 138 | * @rom_data : list of new eeprom/eprom registers | 138 | * @rom_data : list of new eeprom/eprom registers |
| 139 | * @supply : regulator that supplies 3V input | ||
| 139 | */ | 140 | */ |
| 140 | struct lp855x_platform_data { | 141 | struct lp855x_platform_data { |
| 141 | const char *name; | 142 | const char *name; |
| @@ -144,6 +145,7 @@ struct lp855x_platform_data { | |||
| 144 | unsigned int period_ns; | 145 | unsigned int period_ns; |
| 145 | int size_program; | 146 | int size_program; |
| 146 | struct lp855x_rom_data *rom_data; | 147 | struct lp855x_rom_data *rom_data; |
| 148 | struct regulator *supply; | ||
| 147 | }; | 149 | }; |
| 148 | 150 | ||
| 149 | #endif | 151 | #endif |
diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h deleted file mode 100644 index a5f045e1d8fe..000000000000 --- a/include/linux/platform_data/rcar-du.h +++ /dev/null | |||
| @@ -1,74 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * rcar_du.h -- R-Car Display Unit DRM driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2013 Renesas Corporation | ||
| 5 | * | ||
| 6 | * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation; either version 2 of the License, or | ||
| 11 | * (at your option) any later version. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #ifndef __RCAR_DU_H__ | ||
| 15 | #define __RCAR_DU_H__ | ||
| 16 | |||
| 17 | #include <video/videomode.h> | ||
| 18 | |||
| 19 | enum rcar_du_output { | ||
| 20 | RCAR_DU_OUTPUT_DPAD0, | ||
| 21 | RCAR_DU_OUTPUT_DPAD1, | ||
| 22 | RCAR_DU_OUTPUT_LVDS0, | ||
| 23 | RCAR_DU_OUTPUT_LVDS1, | ||
| 24 | RCAR_DU_OUTPUT_TCON, | ||
| 25 | RCAR_DU_OUTPUT_MAX, | ||
| 26 | }; | ||
| 27 | |||
| 28 | enum rcar_du_encoder_type { | ||
| 29 | RCAR_DU_ENCODER_UNUSED = 0, | ||
| 30 | RCAR_DU_ENCODER_NONE, | ||
| 31 | RCAR_DU_ENCODER_VGA, | ||
| 32 | RCAR_DU_ENCODER_LVDS, | ||
| 33 | }; | ||
| 34 | |||
| 35 | struct rcar_du_panel_data { | ||
| 36 | unsigned int width_mm; /* Panel width in mm */ | ||
| 37 | unsigned int height_mm; /* Panel height in mm */ | ||
| 38 | struct videomode mode; | ||
| 39 | }; | ||
| 40 | |||
| 41 | struct rcar_du_connector_lvds_data { | ||
| 42 | struct rcar_du_panel_data panel; | ||
| 43 | }; | ||
| 44 | |||
| 45 | struct rcar_du_connector_vga_data { | ||
| 46 | /* TODO: Add DDC information for EDID retrieval */ | ||
| 47 | }; | ||
| 48 | |||
| 49 | /* | ||
| 50 | * struct rcar_du_encoder_data - Encoder platform data | ||
| 51 | * @type: the encoder type (RCAR_DU_ENCODER_*) | ||
| 52 | * @output: the DU output the connector is connected to (RCAR_DU_OUTPUT_*) | ||
| 53 | * @connector.lvds: platform data for LVDS connectors | ||
| 54 | * @connector.vga: platform data for VGA connectors | ||
| 55 | * | ||
| 56 | * Encoder platform data describes an on-board encoder, its associated DU SoC | ||
| 57 | * output, and the connector. | ||
| 58 | */ | ||
| 59 | struct rcar_du_encoder_data { | ||
| 60 | enum rcar_du_encoder_type type; | ||
| 61 | enum rcar_du_output output; | ||
| 62 | |||
| 63 | union { | ||
| 64 | struct rcar_du_connector_lvds_data lvds; | ||
| 65 | struct rcar_du_connector_vga_data vga; | ||
| 66 | } connector; | ||
| 67 | }; | ||
| 68 | |||
| 69 | struct rcar_du_platform_data { | ||
| 70 | struct rcar_du_encoder_data *encoders; | ||
| 71 | unsigned int num_encoders; | ||
| 72 | }; | ||
| 73 | |||
| 74 | #endif /* __RCAR_DU_H__ */ | ||
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h index 1730312398ff..5087fff96d86 100644 --- a/include/linux/platform_data/st21nfca.h +++ b/include/linux/platform_data/st21nfca.h | |||
| @@ -24,7 +24,6 @@ | |||
| 24 | #define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci" | 24 | #define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci" |
| 25 | 25 | ||
| 26 | struct st21nfca_nfc_platform_data { | 26 | struct st21nfca_nfc_platform_data { |
| 27 | unsigned int gpio_irq; | ||
| 28 | unsigned int gpio_ena; | 27 | unsigned int gpio_ena; |
| 29 | unsigned int irq_polarity; | 28 | unsigned int irq_polarity; |
| 30 | }; | 29 | }; |
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h index 2d11f1f5efab..c3b432f5b63e 100644 --- a/include/linux/platform_data/st21nfcb.h +++ b/include/linux/platform_data/st21nfcb.h | |||
| @@ -24,7 +24,6 @@ | |||
| 24 | #define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" | 24 | #define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" |
| 25 | 25 | ||
| 26 | struct st21nfcb_nfc_platform_data { | 26 | struct st21nfcb_nfc_platform_data { |
| 27 | unsigned int gpio_irq; | ||
| 28 | unsigned int gpio_reset; | 27 | unsigned int gpio_reset; |
| 29 | unsigned int irq_polarity; | 28 | unsigned int irq_polarity; |
| 30 | }; | 29 | }; |
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 153d303af7eb..ae4882ca4a64 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
| @@ -197,8 +197,10 @@ extern void platform_driver_unregister(struct platform_driver *); | |||
| 197 | /* non-hotpluggable platform devices may use this so that probe() and | 197 | /* non-hotpluggable platform devices may use this so that probe() and |
| 198 | * its support may live in __init sections, conserving runtime memory. | 198 | * its support may live in __init sections, conserving runtime memory. |
| 199 | */ | 199 | */ |
| 200 | extern int platform_driver_probe(struct platform_driver *driver, | 200 | #define platform_driver_probe(drv, probe) \ |
| 201 | int (*probe)(struct platform_device *)); | 201 | __platform_driver_probe(drv, probe, THIS_MODULE) |
| 202 | extern int __platform_driver_probe(struct platform_driver *driver, | ||
| 203 | int (*probe)(struct platform_device *), struct module *module); | ||
| 202 | 204 | ||
| 203 | static inline void *platform_get_drvdata(const struct platform_device *pdev) | 205 | static inline void *platform_get_drvdata(const struct platform_device *pdev) |
| 204 | { | 206 | { |
| @@ -238,10 +240,12 @@ static void __exit __platform_driver##_exit(void) \ | |||
| 238 | } \ | 240 | } \ |
| 239 | module_exit(__platform_driver##_exit); | 241 | module_exit(__platform_driver##_exit); |
| 240 | 242 | ||
| 241 | extern struct platform_device *platform_create_bundle( | 243 | #define platform_create_bundle(driver, probe, res, n_res, data, size) \ |
| 244 | __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE) | ||
| 245 | extern struct platform_device *__platform_create_bundle( | ||
| 242 | struct platform_driver *driver, int (*probe)(struct platform_device *), | 246 | struct platform_driver *driver, int (*probe)(struct platform_device *), |
| 243 | struct resource *res, unsigned int n_res, | 247 | struct resource *res, unsigned int n_res, |
| 244 | const void *data, size_t size); | 248 | const void *data, size_t size, struct module *module); |
| 245 | 249 | ||
| 246 | /* early platform driver interface */ | 250 | /* early platform driver interface */ |
| 247 | struct early_platform_driver { | 251 | struct early_platform_driver { |
diff --git a/include/linux/plist.h b/include/linux/plist.h index 8b6c970cff6c..97883604a3c5 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h | |||
| @@ -176,7 +176,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head); | |||
| 176 | * plist_for_each_entry - iterate over list of given type | 176 | * plist_for_each_entry - iterate over list of given type |
| 177 | * @pos: the type * to use as a loop counter | 177 | * @pos: the type * to use as a loop counter |
| 178 | * @head: the head for your list | 178 | * @head: the head for your list |
| 179 | * @mem: the name of the list_struct within the struct | 179 | * @mem: the name of the list_head within the struct |
| 180 | */ | 180 | */ |
| 181 | #define plist_for_each_entry(pos, head, mem) \ | 181 | #define plist_for_each_entry(pos, head, mem) \ |
| 182 | list_for_each_entry(pos, &(head)->node_list, mem.node_list) | 182 | list_for_each_entry(pos, &(head)->node_list, mem.node_list) |
| @@ -185,7 +185,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head); | |||
| 185 | * plist_for_each_entry_continue - continue iteration over list of given type | 185 | * plist_for_each_entry_continue - continue iteration over list of given type |
| 186 | * @pos: the type * to use as a loop cursor | 186 | * @pos: the type * to use as a loop cursor |
| 187 | * @head: the head for your list | 187 | * @head: the head for your list |
| 188 | * @m: the name of the list_struct within the struct | 188 | * @m: the name of the list_head within the struct |
| 189 | * | 189 | * |
| 190 | * Continue to iterate over list of given type, continuing after | 190 | * Continue to iterate over list of given type, continuing after |
| 191 | * the current position. | 191 | * the current position. |
| @@ -198,7 +198,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head); | |||
| 198 | * @pos: the type * to use as a loop counter | 198 | * @pos: the type * to use as a loop counter |
| 199 | * @n: another type * to use as temporary storage | 199 | * @n: another type * to use as temporary storage |
| 200 | * @head: the head for your list | 200 | * @head: the head for your list |
| 201 | * @m: the name of the list_struct within the struct | 201 | * @m: the name of the list_head within the struct |
| 202 | * | 202 | * |
| 203 | * Iterate over list of given type, safe against removal of list entry. | 203 | * Iterate over list of given type, safe against removal of list entry. |
| 204 | */ | 204 | */ |
| @@ -229,7 +229,7 @@ static inline int plist_node_empty(const struct plist_node *node) | |||
| 229 | * plist_first_entry - get the struct for the first entry | 229 | * plist_first_entry - get the struct for the first entry |
| 230 | * @head: the &struct plist_head pointer | 230 | * @head: the &struct plist_head pointer |
| 231 | * @type: the type of the struct this is embedded in | 231 | * @type: the type of the struct this is embedded in |
| 232 | * @member: the name of the list_struct within the struct | 232 | * @member: the name of the list_head within the struct |
| 233 | */ | 233 | */ |
| 234 | #ifdef CONFIG_DEBUG_PI_LIST | 234 | #ifdef CONFIG_DEBUG_PI_LIST |
| 235 | # define plist_first_entry(head, type, member) \ | 235 | # define plist_first_entry(head, type, member) \ |
| @@ -246,7 +246,7 @@ static inline int plist_node_empty(const struct plist_node *node) | |||
| 246 | * plist_last_entry - get the struct for the last entry | 246 | * plist_last_entry - get the struct for the last entry |
| 247 | * @head: the &struct plist_head pointer | 247 | * @head: the &struct plist_head pointer |
| 248 | * @type: the type of the struct this is embedded in | 248 | * @type: the type of the struct this is embedded in |
| 249 | * @member: the name of the list_struct within the struct | 249 | * @member: the name of the list_head within the struct |
| 250 | */ | 250 | */ |
| 251 | #ifdef CONFIG_DEBUG_PI_LIST | 251 | #ifdef CONFIG_DEBUG_PI_LIST |
| 252 | # define plist_last_entry(head, type, member) \ | 252 | # define plist_last_entry(head, type, member) \ |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 383fd68aaee1..8b5976364619 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
| @@ -342,7 +342,7 @@ struct dev_pm_ops { | |||
| 342 | #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) | 342 | #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) |
| 343 | #endif | 343 | #endif |
| 344 | 344 | ||
| 345 | #ifdef CONFIG_PM_RUNTIME | 345 | #ifdef CONFIG_PM |
| 346 | #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ | 346 | #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ |
| 347 | .runtime_suspend = suspend_fn, \ | 347 | .runtime_suspend = suspend_fn, \ |
| 348 | .runtime_resume = resume_fn, \ | 348 | .runtime_resume = resume_fn, \ |
| @@ -351,15 +351,6 @@ struct dev_pm_ops { | |||
| 351 | #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) | 351 | #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) |
| 352 | #endif | 352 | #endif |
| 353 | 353 | ||
| 354 | #ifdef CONFIG_PM | ||
| 355 | #define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ | ||
| 356 | .runtime_suspend = suspend_fn, \ | ||
| 357 | .runtime_resume = resume_fn, \ | ||
| 358 | .runtime_idle = idle_fn, | ||
| 359 | #else | ||
| 360 | #define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) | ||
| 361 | #endif | ||
| 362 | |||
| 363 | /* | 354 | /* |
| 364 | * Use this if you want to use the same suspend and resume callbacks for suspend | 355 | * Use this if you want to use the same suspend and resume callbacks for suspend |
| 365 | * to RAM and hibernation. | 356 | * to RAM and hibernation. |
| @@ -538,11 +529,7 @@ enum rpm_request { | |||
| 538 | }; | 529 | }; |
| 539 | 530 | ||
| 540 | struct wakeup_source; | 531 | struct wakeup_source; |
| 541 | 532 | struct pm_domain_data; | |
| 542 | struct pm_domain_data { | ||
| 543 | struct list_head list_node; | ||
| 544 | struct device *dev; | ||
| 545 | }; | ||
| 546 | 533 | ||
| 547 | struct pm_subsys_data { | 534 | struct pm_subsys_data { |
| 548 | spinlock_t lock; | 535 | spinlock_t lock; |
| @@ -576,7 +563,7 @@ struct dev_pm_info { | |||
| 576 | #else | 563 | #else |
| 577 | unsigned int should_wakeup:1; | 564 | unsigned int should_wakeup:1; |
| 578 | #endif | 565 | #endif |
| 579 | #ifdef CONFIG_PM_RUNTIME | 566 | #ifdef CONFIG_PM |
| 580 | struct timer_list suspend_timer; | 567 | struct timer_list suspend_timer; |
| 581 | unsigned long timer_expires; | 568 | unsigned long timer_expires; |
| 582 | struct work_struct work; | 569 | struct work_struct work; |
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h index 8348866e7b05..0b0039634410 100644 --- a/include/linux/pm_clock.h +++ b/include/linux/pm_clock.h | |||
| @@ -18,6 +18,8 @@ struct pm_clk_notifier_block { | |||
| 18 | char *con_ids[]; | 18 | char *con_ids[]; |
| 19 | }; | 19 | }; |
| 20 | 20 | ||
| 21 | struct clk; | ||
| 22 | |||
| 21 | #ifdef CONFIG_PM_CLK | 23 | #ifdef CONFIG_PM_CLK |
| 22 | static inline bool pm_clk_no_clocks(struct device *dev) | 24 | static inline bool pm_clk_no_clocks(struct device *dev) |
| 23 | { | 25 | { |
| @@ -29,6 +31,7 @@ extern void pm_clk_init(struct device *dev); | |||
| 29 | extern int pm_clk_create(struct device *dev); | 31 | extern int pm_clk_create(struct device *dev); |
| 30 | extern void pm_clk_destroy(struct device *dev); | 32 | extern void pm_clk_destroy(struct device *dev); |
| 31 | extern int pm_clk_add(struct device *dev, const char *con_id); | 33 | extern int pm_clk_add(struct device *dev, const char *con_id); |
| 34 | extern int pm_clk_add_clk(struct device *dev, struct clk *clk); | ||
| 32 | extern void pm_clk_remove(struct device *dev, const char *con_id); | 35 | extern void pm_clk_remove(struct device *dev, const char *con_id); |
| 33 | extern int pm_clk_suspend(struct device *dev); | 36 | extern int pm_clk_suspend(struct device *dev); |
| 34 | extern int pm_clk_resume(struct device *dev); | 37 | extern int pm_clk_resume(struct device *dev); |
| @@ -51,6 +54,11 @@ static inline int pm_clk_add(struct device *dev, const char *con_id) | |||
| 51 | { | 54 | { |
| 52 | return -EINVAL; | 55 | return -EINVAL; |
| 53 | } | 56 | } |
| 57 | |||
| 58 | static inline int pm_clk_add_clk(struct device *dev, struct clk *clk) | ||
| 59 | { | ||
| 60 | return -EINVAL; | ||
| 61 | } | ||
| 54 | static inline void pm_clk_remove(struct device *dev, const char *con_id) | 62 | static inline void pm_clk_remove(struct device *dev, const char *con_id) |
| 55 | { | 63 | { |
| 56 | } | 64 | } |
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 2e0e06daf8c0..6cd20d5e651b 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h | |||
| @@ -17,6 +17,9 @@ | |||
| 17 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
| 18 | #include <linux/cpuidle.h> | 18 | #include <linux/cpuidle.h> |
| 19 | 19 | ||
| 20 | /* Defines used for the flags field in the struct generic_pm_domain */ | ||
| 21 | #define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ | ||
| 22 | |||
| 20 | enum gpd_status { | 23 | enum gpd_status { |
| 21 | GPD_STATE_ACTIVE = 0, /* PM domain is active */ | 24 | GPD_STATE_ACTIVE = 0, /* PM domain is active */ |
| 22 | GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */ | 25 | GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */ |
| @@ -76,6 +79,7 @@ struct generic_pm_domain { | |||
| 76 | struct device *dev); | 79 | struct device *dev); |
| 77 | void (*detach_dev)(struct generic_pm_domain *domain, | 80 | void (*detach_dev)(struct generic_pm_domain *domain, |
| 78 | struct device *dev); | 81 | struct device *dev); |
| 82 | unsigned int flags; /* Bit field of configs for genpd */ | ||
| 79 | }; | 83 | }; |
| 80 | 84 | ||
| 81 | static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) | 85 | static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) |
| @@ -100,6 +104,11 @@ struct gpd_timing_data { | |||
| 100 | bool cached_stop_ok; | 104 | bool cached_stop_ok; |
| 101 | }; | 105 | }; |
| 102 | 106 | ||
| 107 | struct pm_domain_data { | ||
| 108 | struct list_head list_node; | ||
| 109 | struct device *dev; | ||
| 110 | }; | ||
| 111 | |||
| 103 | struct generic_pm_domain_data { | 112 | struct generic_pm_domain_data { |
| 104 | struct pm_domain_data base; | 113 | struct pm_domain_data base; |
| 105 | struct gpd_timing_data td; | 114 | struct gpd_timing_data td; |
| @@ -147,6 +156,7 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd, | |||
| 147 | 156 | ||
| 148 | extern int pm_genpd_poweron(struct generic_pm_domain *genpd); | 157 | extern int pm_genpd_poweron(struct generic_pm_domain *genpd); |
| 149 | extern int pm_genpd_name_poweron(const char *domain_name); | 158 | extern int pm_genpd_name_poweron(const char *domain_name); |
| 159 | extern void pm_genpd_poweroff_unused(void); | ||
| 150 | 160 | ||
| 151 | extern struct dev_power_governor simple_qos_governor; | 161 | extern struct dev_power_governor simple_qos_governor; |
| 152 | extern struct dev_power_governor pm_domain_always_on_gov; | 162 | extern struct dev_power_governor pm_domain_always_on_gov; |
| @@ -221,6 +231,7 @@ static inline int pm_genpd_name_poweron(const char *domain_name) | |||
| 221 | { | 231 | { |
| 222 | return -ENOSYS; | 232 | return -ENOSYS; |
| 223 | } | 233 | } |
| 234 | static inline void pm_genpd_poweroff_unused(void) {} | ||
| 224 | #define simple_qos_governor NULL | 235 | #define simple_qos_governor NULL |
| 225 | #define pm_domain_always_on_gov NULL | 236 | #define pm_domain_always_on_gov NULL |
| 226 | #endif | 237 | #endif |
| @@ -237,12 +248,6 @@ static inline int pm_genpd_name_add_device(const char *domain_name, | |||
| 237 | return __pm_genpd_name_add_device(domain_name, dev, NULL); | 248 | return __pm_genpd_name_add_device(domain_name, dev, NULL); |
| 238 | } | 249 | } |
| 239 | 250 | ||
| 240 | #ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME | ||
| 241 | extern void pm_genpd_poweroff_unused(void); | ||
| 242 | #else | ||
| 243 | static inline void pm_genpd_poweroff_unused(void) {} | ||
| 244 | #endif | ||
| 245 | |||
| 246 | #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP | 251 | #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP |
| 247 | extern void pm_genpd_syscore_poweroff(struct device *dev); | 252 | extern void pm_genpd_syscore_poweroff(struct device *dev); |
| 248 | extern void pm_genpd_syscore_poweron(struct device *dev); | 253 | extern void pm_genpd_syscore_poweron(struct device *dev); |
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 0330217abfad..cec2d4540914 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h | |||
| @@ -21,7 +21,7 @@ struct dev_pm_opp; | |||
| 21 | struct device; | 21 | struct device; |
| 22 | 22 | ||
| 23 | enum dev_pm_opp_event { | 23 | enum dev_pm_opp_event { |
| 24 | OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, | 24 | OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, |
| 25 | }; | 25 | }; |
| 26 | 26 | ||
| 27 | #if defined(CONFIG_PM_OPP) | 27 | #if defined(CONFIG_PM_OPP) |
| @@ -44,6 +44,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, | |||
| 44 | 44 | ||
| 45 | int dev_pm_opp_add(struct device *dev, unsigned long freq, | 45 | int dev_pm_opp_add(struct device *dev, unsigned long freq, |
| 46 | unsigned long u_volt); | 46 | unsigned long u_volt); |
| 47 | void dev_pm_opp_remove(struct device *dev, unsigned long freq); | ||
| 47 | 48 | ||
| 48 | int dev_pm_opp_enable(struct device *dev, unsigned long freq); | 49 | int dev_pm_opp_enable(struct device *dev, unsigned long freq); |
| 49 | 50 | ||
| @@ -90,6 +91,10 @@ static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, | |||
| 90 | return -EINVAL; | 91 | return -EINVAL; |
| 91 | } | 92 | } |
| 92 | 93 | ||
| 94 | static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) | ||
| 95 | { | ||
| 96 | } | ||
| 97 | |||
| 93 | static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) | 98 | static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) |
| 94 | { | 99 | { |
| 95 | return 0; | 100 | return 0; |
| @@ -109,11 +114,16 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( | |||
| 109 | 114 | ||
| 110 | #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) | 115 | #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) |
| 111 | int of_init_opp_table(struct device *dev); | 116 | int of_init_opp_table(struct device *dev); |
| 117 | void of_free_opp_table(struct device *dev); | ||
| 112 | #else | 118 | #else |
| 113 | static inline int of_init_opp_table(struct device *dev) | 119 | static inline int of_init_opp_table(struct device *dev) |
| 114 | { | 120 | { |
| 115 | return -EINVAL; | 121 | return -EINVAL; |
| 116 | } | 122 | } |
| 123 | |||
| 124 | static inline void of_free_opp_table(struct device *dev) | ||
| 125 | { | ||
| 126 | } | ||
| 117 | #endif | 127 | #endif |
| 118 | 128 | ||
| 119 | #endif /* __LINUX_OPP_H__ */ | 129 | #endif /* __LINUX_OPP_H__ */ |
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 636e82834506..7b3ae0cffc05 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h | |||
| @@ -154,6 +154,23 @@ void dev_pm_qos_constraints_destroy(struct device *dev); | |||
| 154 | int dev_pm_qos_add_ancestor_request(struct device *dev, | 154 | int dev_pm_qos_add_ancestor_request(struct device *dev, |
| 155 | struct dev_pm_qos_request *req, | 155 | struct dev_pm_qos_request *req, |
| 156 | enum dev_pm_qos_req_type type, s32 value); | 156 | enum dev_pm_qos_req_type type, s32 value); |
| 157 | int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); | ||
| 158 | void dev_pm_qos_hide_latency_limit(struct device *dev); | ||
| 159 | int dev_pm_qos_expose_flags(struct device *dev, s32 value); | ||
| 160 | void dev_pm_qos_hide_flags(struct device *dev); | ||
| 161 | int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); | ||
| 162 | s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); | ||
| 163 | int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); | ||
| 164 | |||
| 165 | static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) | ||
| 166 | { | ||
| 167 | return dev->power.qos->resume_latency_req->data.pnode.prio; | ||
| 168 | } | ||
| 169 | |||
| 170 | static inline s32 dev_pm_qos_requested_flags(struct device *dev) | ||
| 171 | { | ||
| 172 | return dev->power.qos->flags_req->data.flr.flags; | ||
| 173 | } | ||
| 157 | #else | 174 | #else |
| 158 | static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, | 175 | static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, |
| 159 | s32 mask) | 176 | s32 mask) |
| @@ -200,27 +217,6 @@ static inline int dev_pm_qos_add_ancestor_request(struct device *dev, | |||
| 200 | enum dev_pm_qos_req_type type, | 217 | enum dev_pm_qos_req_type type, |
| 201 | s32 value) | 218 | s32 value) |
| 202 | { return 0; } | 219 | { return 0; } |
| 203 | #endif | ||
| 204 | |||
| 205 | #ifdef CONFIG_PM_RUNTIME | ||
| 206 | int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); | ||
| 207 | void dev_pm_qos_hide_latency_limit(struct device *dev); | ||
| 208 | int dev_pm_qos_expose_flags(struct device *dev, s32 value); | ||
| 209 | void dev_pm_qos_hide_flags(struct device *dev); | ||
| 210 | int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); | ||
| 211 | s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); | ||
| 212 | int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); | ||
| 213 | |||
| 214 | static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) | ||
| 215 | { | ||
| 216 | return dev->power.qos->resume_latency_req->data.pnode.prio; | ||
| 217 | } | ||
| 218 | |||
| 219 | static inline s32 dev_pm_qos_requested_flags(struct device *dev) | ||
| 220 | { | ||
| 221 | return dev->power.qos->flags_req->data.flr.flags; | ||
| 222 | } | ||
| 223 | #else | ||
| 224 | static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | 220 | static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) |
| 225 | { return 0; } | 221 | { return 0; } |
| 226 | static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} | 222 | static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 367f49b9a1c9..30e84d48bfea 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
| @@ -35,16 +35,6 @@ extern int pm_generic_runtime_suspend(struct device *dev); | |||
| 35 | extern int pm_generic_runtime_resume(struct device *dev); | 35 | extern int pm_generic_runtime_resume(struct device *dev); |
| 36 | extern int pm_runtime_force_suspend(struct device *dev); | 36 | extern int pm_runtime_force_suspend(struct device *dev); |
| 37 | extern int pm_runtime_force_resume(struct device *dev); | 37 | extern int pm_runtime_force_resume(struct device *dev); |
| 38 | #else | ||
| 39 | static inline bool queue_pm_work(struct work_struct *work) { return false; } | ||
| 40 | |||
| 41 | static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } | ||
| 42 | static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } | ||
| 43 | static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } | ||
| 44 | static inline int pm_runtime_force_resume(struct device *dev) { return 0; } | ||
| 45 | #endif | ||
| 46 | |||
| 47 | #ifdef CONFIG_PM_RUNTIME | ||
| 48 | 38 | ||
| 49 | extern int __pm_runtime_idle(struct device *dev, int rpmflags); | 39 | extern int __pm_runtime_idle(struct device *dev, int rpmflags); |
| 50 | extern int __pm_runtime_suspend(struct device *dev, int rpmflags); | 40 | extern int __pm_runtime_suspend(struct device *dev, int rpmflags); |
| @@ -128,7 +118,19 @@ static inline void pm_runtime_mark_last_busy(struct device *dev) | |||
| 128 | ACCESS_ONCE(dev->power.last_busy) = jiffies; | 118 | ACCESS_ONCE(dev->power.last_busy) = jiffies; |
| 129 | } | 119 | } |
| 130 | 120 | ||
| 131 | #else /* !CONFIG_PM_RUNTIME */ | 121 | static inline bool pm_runtime_is_irq_safe(struct device *dev) |
| 122 | { | ||
| 123 | return dev->power.irq_safe; | ||
| 124 | } | ||
| 125 | |||
| 126 | #else /* !CONFIG_PM */ | ||
| 127 | |||
| 128 | static inline bool queue_pm_work(struct work_struct *work) { return false; } | ||
| 129 | |||
| 130 | static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } | ||
| 131 | static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } | ||
| 132 | static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } | ||
| 133 | static inline int pm_runtime_force_resume(struct device *dev) { return 0; } | ||
| 132 | 134 | ||
| 133 | static inline int __pm_runtime_idle(struct device *dev, int rpmflags) | 135 | static inline int __pm_runtime_idle(struct device *dev, int rpmflags) |
| 134 | { | 136 | { |
| @@ -167,6 +169,7 @@ static inline bool pm_runtime_enabled(struct device *dev) { return false; } | |||
| 167 | 169 | ||
| 168 | static inline void pm_runtime_no_callbacks(struct device *dev) {} | 170 | static inline void pm_runtime_no_callbacks(struct device *dev) {} |
| 169 | static inline void pm_runtime_irq_safe(struct device *dev) {} | 171 | static inline void pm_runtime_irq_safe(struct device *dev) {} |
| 172 | static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; } | ||
| 170 | 173 | ||
| 171 | static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } | 174 | static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } |
| 172 | static inline void pm_runtime_mark_last_busy(struct device *dev) {} | 175 | static inline void pm_runtime_mark_last_busy(struct device *dev) {} |
| @@ -179,7 +182,7 @@ static inline unsigned long pm_runtime_autosuspend_expiration( | |||
| 179 | static inline void pm_runtime_set_memalloc_noio(struct device *dev, | 182 | static inline void pm_runtime_set_memalloc_noio(struct device *dev, |
| 180 | bool enable){} | 183 | bool enable){} |
| 181 | 184 | ||
| 182 | #endif /* !CONFIG_PM_RUNTIME */ | 185 | #endif /* !CONFIG_PM */ |
| 183 | 186 | ||
| 184 | static inline int pm_runtime_idle(struct device *dev) | 187 | static inline int pm_runtime_idle(struct device *dev) |
| 185 | { | 188 | { |
diff --git a/include/linux/printk.h b/include/linux/printk.h index d78125f73ac4..c8f170324e64 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h | |||
| @@ -118,12 +118,13 @@ int no_printk(const char *fmt, ...) | |||
| 118 | #ifdef CONFIG_EARLY_PRINTK | 118 | #ifdef CONFIG_EARLY_PRINTK |
| 119 | extern asmlinkage __printf(1, 2) | 119 | extern asmlinkage __printf(1, 2) |
| 120 | void early_printk(const char *fmt, ...); | 120 | void early_printk(const char *fmt, ...); |
| 121 | void early_vprintk(const char *fmt, va_list ap); | ||
| 122 | #else | 121 | #else |
| 123 | static inline __printf(1, 2) __cold | 122 | static inline __printf(1, 2) __cold |
| 124 | void early_printk(const char *s, ...) { } | 123 | void early_printk(const char *s, ...) { } |
| 125 | #endif | 124 | #endif |
| 126 | 125 | ||
| 126 | typedef int(*printk_func_t)(const char *fmt, va_list args); | ||
| 127 | |||
| 127 | #ifdef CONFIG_PRINTK | 128 | #ifdef CONFIG_PRINTK |
| 128 | asmlinkage __printf(5, 0) | 129 | asmlinkage __printf(5, 0) |
| 129 | int vprintk_emit(int facility, int level, | 130 | int vprintk_emit(int facility, int level, |
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h index 34a1e105bef4..42dfc615dbf8 100644 --- a/include/linux/proc_ns.h +++ b/include/linux/proc_ns.h | |||
| @@ -4,21 +4,18 @@ | |||
| 4 | #ifndef _LINUX_PROC_NS_H | 4 | #ifndef _LINUX_PROC_NS_H |
| 5 | #define _LINUX_PROC_NS_H | 5 | #define _LINUX_PROC_NS_H |
| 6 | 6 | ||
| 7 | #include <linux/ns_common.h> | ||
| 8 | |||
| 7 | struct pid_namespace; | 9 | struct pid_namespace; |
| 8 | struct nsproxy; | 10 | struct nsproxy; |
| 11 | struct path; | ||
| 9 | 12 | ||
| 10 | struct proc_ns_operations { | 13 | struct proc_ns_operations { |
| 11 | const char *name; | 14 | const char *name; |
| 12 | int type; | 15 | int type; |
| 13 | void *(*get)(struct task_struct *task); | 16 | struct ns_common *(*get)(struct task_struct *task); |
| 14 | void (*put)(void *ns); | 17 | void (*put)(struct ns_common *ns); |
| 15 | int (*install)(struct nsproxy *nsproxy, void *ns); | 18 | int (*install)(struct nsproxy *nsproxy, struct ns_common *ns); |
| 16 | unsigned int (*inum)(void *ns); | ||
| 17 | }; | ||
| 18 | |||
| 19 | struct proc_ns { | ||
| 20 | void *ns; | ||
| 21 | const struct proc_ns_operations *ns_ops; | ||
| 22 | }; | 19 | }; |
| 23 | 20 | ||
| 24 | extern const struct proc_ns_operations netns_operations; | 21 | extern const struct proc_ns_operations netns_operations; |
| @@ -43,32 +40,38 @@ enum { | |||
| 43 | 40 | ||
| 44 | extern int pid_ns_prepare_proc(struct pid_namespace *ns); | 41 | extern int pid_ns_prepare_proc(struct pid_namespace *ns); |
| 45 | extern void pid_ns_release_proc(struct pid_namespace *ns); | 42 | extern void pid_ns_release_proc(struct pid_namespace *ns); |
| 46 | extern struct file *proc_ns_fget(int fd); | ||
| 47 | extern struct proc_ns *get_proc_ns(struct inode *); | ||
| 48 | extern int proc_alloc_inum(unsigned int *pino); | 43 | extern int proc_alloc_inum(unsigned int *pino); |
| 49 | extern void proc_free_inum(unsigned int inum); | 44 | extern void proc_free_inum(unsigned int inum); |
| 50 | extern bool proc_ns_inode(struct inode *inode); | ||
| 51 | 45 | ||
| 52 | #else /* CONFIG_PROC_FS */ | 46 | #else /* CONFIG_PROC_FS */ |
| 53 | 47 | ||
| 54 | static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; } | 48 | static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; } |
| 55 | static inline void pid_ns_release_proc(struct pid_namespace *ns) {} | 49 | static inline void pid_ns_release_proc(struct pid_namespace *ns) {} |
| 56 | 50 | ||
| 57 | static inline struct file *proc_ns_fget(int fd) | ||
| 58 | { | ||
| 59 | return ERR_PTR(-EINVAL); | ||
| 60 | } | ||
| 61 | |||
| 62 | static inline struct proc_ns *get_proc_ns(struct inode *inode) { return NULL; } | ||
| 63 | |||
| 64 | static inline int proc_alloc_inum(unsigned int *inum) | 51 | static inline int proc_alloc_inum(unsigned int *inum) |
| 65 | { | 52 | { |
| 66 | *inum = 1; | 53 | *inum = 1; |
| 67 | return 0; | 54 | return 0; |
| 68 | } | 55 | } |
| 69 | static inline void proc_free_inum(unsigned int inum) {} | 56 | static inline void proc_free_inum(unsigned int inum) {} |
| 70 | static inline bool proc_ns_inode(struct inode *inode) { return false; } | ||
| 71 | 57 | ||
| 72 | #endif /* CONFIG_PROC_FS */ | 58 | #endif /* CONFIG_PROC_FS */ |
| 73 | 59 | ||
| 60 | static inline int ns_alloc_inum(struct ns_common *ns) | ||
| 61 | { | ||
| 62 | atomic_long_set(&ns->stashed, 0); | ||
| 63 | return proc_alloc_inum(&ns->inum); | ||
| 64 | } | ||
| 65 | |||
| 66 | #define ns_free_inum(ns) proc_free_inum((ns)->inum) | ||
| 67 | |||
| 68 | extern struct file *proc_ns_fget(int fd); | ||
| 69 | #define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private) | ||
| 70 | extern void *ns_get_path(struct path *path, struct task_struct *task, | ||
| 71 | const struct proc_ns_operations *ns_ops); | ||
| 72 | |||
| 73 | extern int ns_get_name(char *buf, size_t size, struct task_struct *task, | ||
| 74 | const struct proc_ns_operations *ns_ops); | ||
| 75 | extern void nsfs_init(void); | ||
| 76 | |||
| 74 | #endif /* _LINUX_PROC_NS_H */ | 77 | #endif /* _LINUX_PROC_NS_H */ |
diff --git a/include/linux/property.h b/include/linux/property.h new file mode 100644 index 000000000000..a6a3d98bd7e9 --- /dev/null +++ b/include/linux/property.h | |||
| @@ -0,0 +1,143 @@ | |||
| 1 | /* | ||
| 2 | * property.h - Unified device property interface. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014, Intel Corporation | ||
| 5 | * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
| 6 | * Mika Westerberg <mika.westerberg@linux.intel.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #ifndef _LINUX_PROPERTY_H_ | ||
| 14 | #define _LINUX_PROPERTY_H_ | ||
| 15 | |||
| 16 | #include <linux/types.h> | ||
| 17 | |||
| 18 | struct device; | ||
| 19 | |||
| 20 | enum dev_prop_type { | ||
| 21 | DEV_PROP_U8, | ||
| 22 | DEV_PROP_U16, | ||
| 23 | DEV_PROP_U32, | ||
| 24 | DEV_PROP_U64, | ||
| 25 | DEV_PROP_STRING, | ||
| 26 | DEV_PROP_MAX, | ||
| 27 | }; | ||
| 28 | |||
| 29 | bool device_property_present(struct device *dev, const char *propname); | ||
| 30 | int device_property_read_u8_array(struct device *dev, const char *propname, | ||
| 31 | u8 *val, size_t nval); | ||
| 32 | int device_property_read_u16_array(struct device *dev, const char *propname, | ||
| 33 | u16 *val, size_t nval); | ||
| 34 | int device_property_read_u32_array(struct device *dev, const char *propname, | ||
| 35 | u32 *val, size_t nval); | ||
| 36 | int device_property_read_u64_array(struct device *dev, const char *propname, | ||
| 37 | u64 *val, size_t nval); | ||
| 38 | int device_property_read_string_array(struct device *dev, const char *propname, | ||
| 39 | const char **val, size_t nval); | ||
| 40 | int device_property_read_string(struct device *dev, const char *propname, | ||
| 41 | const char **val); | ||
| 42 | |||
| 43 | enum fwnode_type { | ||
| 44 | FWNODE_INVALID = 0, | ||
| 45 | FWNODE_OF, | ||
| 46 | FWNODE_ACPI, | ||
| 47 | }; | ||
| 48 | |||
| 49 | struct fwnode_handle { | ||
| 50 | enum fwnode_type type; | ||
| 51 | }; | ||
| 52 | |||
| 53 | bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname); | ||
| 54 | int fwnode_property_read_u8_array(struct fwnode_handle *fwnode, | ||
| 55 | const char *propname, u8 *val, | ||
| 56 | size_t nval); | ||
| 57 | int fwnode_property_read_u16_array(struct fwnode_handle *fwnode, | ||
| 58 | const char *propname, u16 *val, | ||
| 59 | size_t nval); | ||
| 60 | int fwnode_property_read_u32_array(struct fwnode_handle *fwnode, | ||
| 61 | const char *propname, u32 *val, | ||
| 62 | size_t nval); | ||
| 63 | int fwnode_property_read_u64_array(struct fwnode_handle *fwnode, | ||
| 64 | const char *propname, u64 *val, | ||
| 65 | size_t nval); | ||
| 66 | int fwnode_property_read_string_array(struct fwnode_handle *fwnode, | ||
| 67 | const char *propname, const char **val, | ||
| 68 | size_t nval); | ||
| 69 | int fwnode_property_read_string(struct fwnode_handle *fwnode, | ||
| 70 | const char *propname, const char **val); | ||
| 71 | |||
| 72 | struct fwnode_handle *device_get_next_child_node(struct device *dev, | ||
| 73 | struct fwnode_handle *child); | ||
| 74 | |||
| 75 | #define device_for_each_child_node(dev, child) \ | ||
| 76 | for (child = device_get_next_child_node(dev, NULL); child; \ | ||
| 77 | child = device_get_next_child_node(dev, child)) | ||
| 78 | |||
| 79 | void fwnode_handle_put(struct fwnode_handle *fwnode); | ||
| 80 | |||
| 81 | unsigned int device_get_child_node_count(struct device *dev); | ||
| 82 | |||
| 83 | static inline bool device_property_read_bool(struct device *dev, | ||
| 84 | const char *propname) | ||
| 85 | { | ||
| 86 | return device_property_present(dev, propname); | ||
| 87 | } | ||
| 88 | |||
| 89 | static inline int device_property_read_u8(struct device *dev, | ||
| 90 | const char *propname, u8 *val) | ||
| 91 | { | ||
| 92 | return device_property_read_u8_array(dev, propname, val, 1); | ||
| 93 | } | ||
| 94 | |||
| 95 | static inline int device_property_read_u16(struct device *dev, | ||
| 96 | const char *propname, u16 *val) | ||
| 97 | { | ||
| 98 | return device_property_read_u16_array(dev, propname, val, 1); | ||
| 99 | } | ||
| 100 | |||
| 101 | static inline int device_property_read_u32(struct device *dev, | ||
| 102 | const char *propname, u32 *val) | ||
| 103 | { | ||
| 104 | return device_property_read_u32_array(dev, propname, val, 1); | ||
| 105 | } | ||
| 106 | |||
| 107 | static inline int device_property_read_u64(struct device *dev, | ||
| 108 | const char *propname, u64 *val) | ||
| 109 | { | ||
| 110 | return device_property_read_u64_array(dev, propname, val, 1); | ||
| 111 | } | ||
| 112 | |||
| 113 | static inline bool fwnode_property_read_bool(struct fwnode_handle *fwnode, | ||
| 114 | const char *propname) | ||
| 115 | { | ||
| 116 | return fwnode_property_present(fwnode, propname); | ||
| 117 | } | ||
| 118 | |||
| 119 | static inline int fwnode_property_read_u8(struct fwnode_handle *fwnode, | ||
| 120 | const char *propname, u8 *val) | ||
| 121 | { | ||
| 122 | return fwnode_property_read_u8_array(fwnode, propname, val, 1); | ||
| 123 | } | ||
| 124 | |||
| 125 | static inline int fwnode_property_read_u16(struct fwnode_handle *fwnode, | ||
| 126 | const char *propname, u16 *val) | ||
| 127 | { | ||
| 128 | return fwnode_property_read_u16_array(fwnode, propname, val, 1); | ||
| 129 | } | ||
| 130 | |||
| 131 | static inline int fwnode_property_read_u32(struct fwnode_handle *fwnode, | ||
| 132 | const char *propname, u32 *val) | ||
| 133 | { | ||
| 134 | return fwnode_property_read_u32_array(fwnode, propname, val, 1); | ||
| 135 | } | ||
| 136 | |||
| 137 | static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode, | ||
| 138 | const char *propname, u64 *val) | ||
| 139 | { | ||
| 140 | return fwnode_property_read_u64_array(fwnode, propname, val, 1); | ||
| 141 | } | ||
| 142 | |||
| 143 | #endif /* _LINUX_PROPERTY_H_ */ | ||
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 9974975d40db..4af3fdc85b01 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h | |||
| @@ -53,7 +53,8 @@ struct persistent_ram_zone { | |||
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| 55 | struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, | 55 | struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, |
| 56 | u32 sig, struct persistent_ram_ecc_info *ecc_info); | 56 | u32 sig, struct persistent_ram_ecc_info *ecc_info, |
| 57 | unsigned int memtype); | ||
| 57 | void persistent_ram_free(struct persistent_ram_zone *prz); | 58 | void persistent_ram_free(struct persistent_ram_zone *prz); |
| 58 | void persistent_ram_zap(struct persistent_ram_zone *prz); | 59 | void persistent_ram_zap(struct persistent_ram_zone *prz); |
| 59 | 60 | ||
| @@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, | |||
| 76 | struct ramoops_platform_data { | 77 | struct ramoops_platform_data { |
| 77 | unsigned long mem_size; | 78 | unsigned long mem_size; |
| 78 | unsigned long mem_address; | 79 | unsigned long mem_address; |
| 80 | unsigned int mem_type; | ||
| 79 | unsigned long record_size; | 81 | unsigned long record_size; |
| 80 | unsigned long console_size; | 82 | unsigned long console_size; |
| 81 | unsigned long ftrace_size; | 83 | unsigned long ftrace_size; |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index cc79eff4a1ad..987a73a40ef8 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
| @@ -52,7 +52,7 @@ extern void ptrace_notify(int exit_code); | |||
| 52 | extern void __ptrace_link(struct task_struct *child, | 52 | extern void __ptrace_link(struct task_struct *child, |
| 53 | struct task_struct *new_parent); | 53 | struct task_struct *new_parent); |
| 54 | extern void __ptrace_unlink(struct task_struct *child); | 54 | extern void __ptrace_unlink(struct task_struct *child); |
| 55 | extern void exit_ptrace(struct task_struct *tracer); | 55 | extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); |
| 56 | #define PTRACE_MODE_READ 0x01 | 56 | #define PTRACE_MODE_READ 0x01 |
| 57 | #define PTRACE_MODE_ATTACH 0x02 | 57 | #define PTRACE_MODE_ATTACH 0x02 |
| 58 | #define PTRACE_MODE_NOAUDIT 0x04 | 58 | #define PTRACE_MODE_NOAUDIT 0x04 |
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h index 18d75e795606..e1ab6e86cdb3 100644 --- a/include/linux/pxa168_eth.h +++ b/include/linux/pxa168_eth.h | |||
| @@ -4,6 +4,8 @@ | |||
| 4 | #ifndef __LINUX_PXA168_ETH_H | 4 | #ifndef __LINUX_PXA168_ETH_H |
| 5 | #define __LINUX_PXA168_ETH_H | 5 | #define __LINUX_PXA168_ETH_H |
| 6 | 6 | ||
| 7 | #include <linux/phy.h> | ||
| 8 | |||
| 7 | struct pxa168_eth_platform_data { | 9 | struct pxa168_eth_platform_data { |
| 8 | int port_number; | 10 | int port_number; |
| 9 | int phy_addr; | 11 | int phy_addr; |
| @@ -13,6 +15,7 @@ struct pxa168_eth_platform_data { | |||
| 13 | */ | 15 | */ |
| 14 | int speed; /* 0, SPEED_10, SPEED_100 */ | 16 | int speed; /* 0, SPEED_10, SPEED_100 */ |
| 15 | int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ | 17 | int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ |
| 18 | phy_interface_t intf; | ||
| 16 | 19 | ||
| 17 | /* | 20 | /* |
| 18 | * Override default RX/TX queue sizes if nonzero. | 21 | * Override default RX/TX queue sizes if nonzero. |
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index f2b405116166..77aed9ea1d26 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h | |||
| @@ -108,6 +108,25 @@ | |||
| 108 | #define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ | 108 | #define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ |
| 109 | #endif | 109 | #endif |
| 110 | 110 | ||
| 111 | /* QUARK_X1000 SSCR0 bit definition */ | ||
| 112 | #define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */ | ||
| 113 | #define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */ | ||
| 114 | #define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */ | ||
| 115 | #define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */ | ||
| 116 | |||
| 117 | #define RX_THRESH_QUARK_X1000_DFLT 1 | ||
| 118 | #define TX_THRESH_QUARK_X1000_DFLT 16 | ||
| 119 | |||
| 120 | #define QUARK_X1000_SSSR_TFL_MASK (0x1F << 8) /* Transmit FIFO Level mask */ | ||
| 121 | #define QUARK_X1000_SSSR_RFL_MASK (0x1F << 13) /* Receive FIFO Level mask */ | ||
| 122 | |||
| 123 | #define QUARK_X1000_SSCR1_TFT (0x1F << 6) /* Transmit FIFO Threshold (mask) */ | ||
| 124 | #define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */ | ||
| 125 | #define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */ | ||
| 126 | #define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */ | ||
| 127 | #define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */ | ||
| 128 | #define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */ | ||
| 129 | |||
| 111 | /* extra bits in PXA255, PXA26x and PXA27x SSP ports */ | 130 | /* extra bits in PXA255, PXA26x and PXA27x SSP ports */ |
| 112 | #define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ | 131 | #define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ |
| 113 | #define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ | 132 | #define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ |
| @@ -175,6 +194,7 @@ enum pxa_ssp_type { | |||
| 175 | PXA910_SSP, | 194 | PXA910_SSP, |
| 176 | CE4100_SSP, | 195 | CE4100_SSP, |
| 177 | LPSS_SSP, | 196 | LPSS_SSP, |
| 197 | QUARK_X1000_SSP, | ||
| 178 | }; | 198 | }; |
| 179 | 199 | ||
| 180 | struct ssp_device { | 200 | struct ssp_device { |
diff --git a/include/linux/quota.h b/include/linux/quota.h index 80d345a3524c..50978b781a19 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h | |||
| @@ -56,6 +56,11 @@ enum quota_type { | |||
| 56 | PRJQUOTA = 2, /* element used for project quotas */ | 56 | PRJQUOTA = 2, /* element used for project quotas */ |
| 57 | }; | 57 | }; |
| 58 | 58 | ||
| 59 | /* Masks for quota types when used as a bitmask */ | ||
| 60 | #define QTYPE_MASK_USR (1 << USRQUOTA) | ||
| 61 | #define QTYPE_MASK_GRP (1 << GRPQUOTA) | ||
| 62 | #define QTYPE_MASK_PRJ (1 << PRJQUOTA) | ||
| 63 | |||
| 59 | typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ | 64 | typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ |
| 60 | typedef long long qsize_t; /* Type in which we store sizes */ | 65 | typedef long long qsize_t; /* Type in which we store sizes */ |
| 61 | 66 | ||
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 1d3eee594cd6..f23538a6e411 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h | |||
| @@ -64,10 +64,10 @@ void dquot_destroy(struct dquot *dquot); | |||
| 64 | int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags); | 64 | int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags); |
| 65 | void __dquot_free_space(struct inode *inode, qsize_t number, int flags); | 65 | void __dquot_free_space(struct inode *inode, qsize_t number, int flags); |
| 66 | 66 | ||
| 67 | int dquot_alloc_inode(const struct inode *inode); | 67 | int dquot_alloc_inode(struct inode *inode); |
| 68 | 68 | ||
| 69 | int dquot_claim_space_nodirty(struct inode *inode, qsize_t number); | 69 | int dquot_claim_space_nodirty(struct inode *inode, qsize_t number); |
| 70 | void dquot_free_inode(const struct inode *inode); | 70 | void dquot_free_inode(struct inode *inode); |
| 71 | void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number); | 71 | void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number); |
| 72 | 72 | ||
| 73 | int dquot_disable(struct super_block *sb, int type, unsigned int flags); | 73 | int dquot_disable(struct super_block *sb, int type, unsigned int flags); |
| @@ -213,12 +213,12 @@ static inline void dquot_drop(struct inode *inode) | |||
| 213 | { | 213 | { |
| 214 | } | 214 | } |
| 215 | 215 | ||
| 216 | static inline int dquot_alloc_inode(const struct inode *inode) | 216 | static inline int dquot_alloc_inode(struct inode *inode) |
| 217 | { | 217 | { |
| 218 | return 0; | 218 | return 0; |
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | static inline void dquot_free_inode(const struct inode *inode) | 221 | static inline void dquot_free_inode(struct inode *inode) |
| 222 | { | 222 | { |
| 223 | } | 223 | } |
| 224 | 224 | ||
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 0a260d8a18bf..18102529254e 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h | |||
| @@ -17,14 +17,20 @@ struct ratelimit_state { | |||
| 17 | unsigned long begin; | 17 | unsigned long begin; |
| 18 | }; | 18 | }; |
| 19 | 19 | ||
| 20 | #define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ | 20 | #define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ |
| 21 | \ | ||
| 22 | struct ratelimit_state name = { \ | ||
| 23 | .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ | 21 | .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ |
| 24 | .interval = interval_init, \ | 22 | .interval = interval_init, \ |
| 25 | .burst = burst_init, \ | 23 | .burst = burst_init, \ |
| 26 | } | 24 | } |
| 27 | 25 | ||
| 26 | #define RATELIMIT_STATE_INIT_DISABLED \ | ||
| 27 | RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST) | ||
| 28 | |||
| 29 | #define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ | ||
| 30 | \ | ||
| 31 | struct ratelimit_state name = \ | ||
| 32 | RATELIMIT_STATE_INIT(name, interval_init, burst_init) \ | ||
| 33 | |||
| 28 | static inline void ratelimit_state_init(struct ratelimit_state *rs, | 34 | static inline void ratelimit_state_init(struct ratelimit_state *rs, |
| 29 | int interval, int burst) | 35 | int interval, int burst) |
| 30 | { | 36 | { |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 372ad5e0dcb8..529bc946f450 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
| @@ -241,7 +241,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
| 241 | * list_entry_rcu - get the struct for this entry | 241 | * list_entry_rcu - get the struct for this entry |
| 242 | * @ptr: the &struct list_head pointer. | 242 | * @ptr: the &struct list_head pointer. |
| 243 | * @type: the type of the struct this is embedded in. | 243 | * @type: the type of the struct this is embedded in. |
| 244 | * @member: the name of the list_struct within the struct. | 244 | * @member: the name of the list_head within the struct. |
| 245 | * | 245 | * |
| 246 | * This primitive may safely run concurrently with the _rcu list-mutation | 246 | * This primitive may safely run concurrently with the _rcu list-mutation |
| 247 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | 247 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
| @@ -278,7 +278,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
| 278 | * list_first_or_null_rcu - get the first element from a list | 278 | * list_first_or_null_rcu - get the first element from a list |
| 279 | * @ptr: the list head to take the element from. | 279 | * @ptr: the list head to take the element from. |
| 280 | * @type: the type of the struct this is embedded in. | 280 | * @type: the type of the struct this is embedded in. |
| 281 | * @member: the name of the list_struct within the struct. | 281 | * @member: the name of the list_head within the struct. |
| 282 | * | 282 | * |
| 283 | * Note that if the list is empty, it returns NULL. | 283 | * Note that if the list is empty, it returns NULL. |
| 284 | * | 284 | * |
| @@ -296,7 +296,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
| 296 | * list_for_each_entry_rcu - iterate over rcu list of given type | 296 | * list_for_each_entry_rcu - iterate over rcu list of given type |
| 297 | * @pos: the type * to use as a loop cursor. | 297 | * @pos: the type * to use as a loop cursor. |
| 298 | * @head: the head for your list. | 298 | * @head: the head for your list. |
| 299 | * @member: the name of the list_struct within the struct. | 299 | * @member: the name of the list_head within the struct. |
| 300 | * | 300 | * |
| 301 | * This list-traversal primitive may safely run concurrently with | 301 | * This list-traversal primitive may safely run concurrently with |
| 302 | * the _rcu list-mutation primitives such as list_add_rcu() | 302 | * the _rcu list-mutation primitives such as list_add_rcu() |
| @@ -311,7 +311,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
| 311 | * list_for_each_entry_continue_rcu - continue iteration over list of given type | 311 | * list_for_each_entry_continue_rcu - continue iteration over list of given type |
| 312 | * @pos: the type * to use as a loop cursor. | 312 | * @pos: the type * to use as a loop cursor. |
| 313 | * @head: the head for your list. | 313 | * @head: the head for your list. |
| 314 | * @member: the name of the list_struct within the struct. | 314 | * @member: the name of the list_head within the struct. |
| 315 | * | 315 | * |
| 316 | * Continue to iterate over list of given type, continuing after | 316 | * Continue to iterate over list of given type, continuing after |
| 317 | * the current position. | 317 | * the current position. |
| @@ -542,6 +542,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, | |||
| 542 | pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ | 542 | pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ |
| 543 | typeof(*(pos)), member)) | 543 | typeof(*(pos)), member)) |
| 544 | 544 | ||
| 545 | /** | ||
| 546 | * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point | ||
| 547 | * @pos: the type * to use as a loop cursor. | ||
| 548 | * @member: the name of the hlist_node within the struct. | ||
| 549 | */ | ||
| 550 | #define hlist_for_each_entry_from_rcu(pos, member) \ | ||
| 551 | for (; pos; \ | ||
| 552 | pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ | ||
| 553 | typeof(*(pos)), member)) | ||
| 545 | 554 | ||
| 546 | #endif /* __KERNEL__ */ | 555 | #endif /* __KERNEL__ */ |
| 547 | #endif | 556 | #endif |
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h deleted file mode 100644 index 56b7bc32db4f..000000000000 --- a/include/linux/res_counter.h +++ /dev/null | |||
| @@ -1,223 +0,0 @@ | |||
| 1 | #ifndef __RES_COUNTER_H__ | ||
| 2 | #define __RES_COUNTER_H__ | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Resource Counters | ||
| 6 | * Contain common data types and routines for resource accounting | ||
| 7 | * | ||
| 8 | * Copyright 2007 OpenVZ SWsoft Inc | ||
| 9 | * | ||
| 10 | * Author: Pavel Emelianov <xemul@openvz.org> | ||
| 11 | * | ||
| 12 | * See Documentation/cgroups/resource_counter.txt for more | ||
| 13 | * info about what this counter is. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #include <linux/spinlock.h> | ||
| 17 | #include <linux/errno.h> | ||
| 18 | |||
| 19 | /* | ||
| 20 | * The core object. the cgroup that wishes to account for some | ||
| 21 | * resource may include this counter into its structures and use | ||
| 22 | * the helpers described beyond | ||
| 23 | */ | ||
| 24 | |||
| 25 | struct res_counter { | ||
| 26 | /* | ||
| 27 | * the current resource consumption level | ||
| 28 | */ | ||
| 29 | unsigned long long usage; | ||
| 30 | /* | ||
| 31 | * the maximal value of the usage from the counter creation | ||
| 32 | */ | ||
| 33 | unsigned long long max_usage; | ||
| 34 | /* | ||
| 35 | * the limit that usage cannot exceed | ||
| 36 | */ | ||
| 37 | unsigned long long limit; | ||
| 38 | /* | ||
| 39 | * the limit that usage can be exceed | ||
| 40 | */ | ||
| 41 | unsigned long long soft_limit; | ||
| 42 | /* | ||
| 43 | * the number of unsuccessful attempts to consume the resource | ||
| 44 | */ | ||
| 45 | unsigned long long failcnt; | ||
| 46 | /* | ||
| 47 | * the lock to protect all of the above. | ||
| 48 | * the routines below consider this to be IRQ-safe | ||
| 49 | */ | ||
| 50 | spinlock_t lock; | ||
| 51 | /* | ||
| 52 | * Parent counter, used for hierarchial resource accounting | ||
| 53 | */ | ||
| 54 | struct res_counter *parent; | ||
| 55 | }; | ||
| 56 | |||
| 57 | #define RES_COUNTER_MAX ULLONG_MAX | ||
| 58 | |||
| 59 | /** | ||
| 60 | * Helpers to interact with userspace | ||
| 61 | * res_counter_read_u64() - returns the value of the specified member. | ||
| 62 | * res_counter_read/_write - put/get the specified fields from the | ||
| 63 | * res_counter struct to/from the user | ||
| 64 | * | ||
| 65 | * @counter: the counter in question | ||
| 66 | * @member: the field to work with (see RES_xxx below) | ||
| 67 | * @buf: the buffer to opeate on,... | ||
| 68 | * @nbytes: its size... | ||
| 69 | * @pos: and the offset. | ||
| 70 | */ | ||
| 71 | |||
| 72 | u64 res_counter_read_u64(struct res_counter *counter, int member); | ||
| 73 | |||
| 74 | ssize_t res_counter_read(struct res_counter *counter, int member, | ||
| 75 | const char __user *buf, size_t nbytes, loff_t *pos, | ||
| 76 | int (*read_strategy)(unsigned long long val, char *s)); | ||
| 77 | |||
| 78 | int res_counter_memparse_write_strategy(const char *buf, | ||
| 79 | unsigned long long *res); | ||
| 80 | |||
| 81 | /* | ||
| 82 | * the field descriptors. one for each member of res_counter | ||
| 83 | */ | ||
| 84 | |||
| 85 | enum { | ||
| 86 | RES_USAGE, | ||
| 87 | RES_MAX_USAGE, | ||
| 88 | RES_LIMIT, | ||
| 89 | RES_FAILCNT, | ||
| 90 | RES_SOFT_LIMIT, | ||
| 91 | }; | ||
| 92 | |||
| 93 | /* | ||
| 94 | * helpers for accounting | ||
| 95 | */ | ||
| 96 | |||
| 97 | void res_counter_init(struct res_counter *counter, struct res_counter *parent); | ||
| 98 | |||
| 99 | /* | ||
| 100 | * charge - try to consume more resource. | ||
| 101 | * | ||
| 102 | * @counter: the counter | ||
| 103 | * @val: the amount of the resource. each controller defines its own | ||
| 104 | * units, e.g. numbers, bytes, Kbytes, etc | ||
| 105 | * | ||
| 106 | * returns 0 on success and <0 if the counter->usage will exceed the | ||
| 107 | * counter->limit | ||
| 108 | * | ||
| 109 | * charge_nofail works the same, except that it charges the resource | ||
| 110 | * counter unconditionally, and returns < 0 if the after the current | ||
| 111 | * charge we are over limit. | ||
| 112 | */ | ||
| 113 | |||
| 114 | int __must_check res_counter_charge(struct res_counter *counter, | ||
| 115 | unsigned long val, struct res_counter **limit_fail_at); | ||
| 116 | int res_counter_charge_nofail(struct res_counter *counter, | ||
| 117 | unsigned long val, struct res_counter **limit_fail_at); | ||
| 118 | |||
| 119 | /* | ||
| 120 | * uncharge - tell that some portion of the resource is released | ||
| 121 | * | ||
| 122 | * @counter: the counter | ||
| 123 | * @val: the amount of the resource | ||
| 124 | * | ||
| 125 | * these calls check for usage underflow and show a warning on the console | ||
| 126 | * | ||
| 127 | * returns the total charges still present in @counter. | ||
| 128 | */ | ||
| 129 | |||
| 130 | u64 res_counter_uncharge(struct res_counter *counter, unsigned long val); | ||
| 131 | |||
| 132 | u64 res_counter_uncharge_until(struct res_counter *counter, | ||
| 133 | struct res_counter *top, | ||
| 134 | unsigned long val); | ||
| 135 | /** | ||
| 136 | * res_counter_margin - calculate chargeable space of a counter | ||
| 137 | * @cnt: the counter | ||
| 138 | * | ||
| 139 | * Returns the difference between the hard limit and the current usage | ||
| 140 | * of resource counter @cnt. | ||
| 141 | */ | ||
| 142 | static inline unsigned long long res_counter_margin(struct res_counter *cnt) | ||
| 143 | { | ||
| 144 | unsigned long long margin; | ||
| 145 | unsigned long flags; | ||
| 146 | |||
| 147 | spin_lock_irqsave(&cnt->lock, flags); | ||
| 148 | if (cnt->limit > cnt->usage) | ||
| 149 | margin = cnt->limit - cnt->usage; | ||
| 150 | else | ||
| 151 | margin = 0; | ||
| 152 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
| 153 | return margin; | ||
| 154 | } | ||
| 155 | |||
| 156 | /** | ||
| 157 | * Get the difference between the usage and the soft limit | ||
| 158 | * @cnt: The counter | ||
| 159 | * | ||
| 160 | * Returns 0 if usage is less than or equal to soft limit | ||
| 161 | * The difference between usage and soft limit, otherwise. | ||
| 162 | */ | ||
| 163 | static inline unsigned long long | ||
| 164 | res_counter_soft_limit_excess(struct res_counter *cnt) | ||
| 165 | { | ||
| 166 | unsigned long long excess; | ||
| 167 | unsigned long flags; | ||
| 168 | |||
| 169 | spin_lock_irqsave(&cnt->lock, flags); | ||
| 170 | if (cnt->usage <= cnt->soft_limit) | ||
| 171 | excess = 0; | ||
| 172 | else | ||
| 173 | excess = cnt->usage - cnt->soft_limit; | ||
| 174 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
| 175 | return excess; | ||
| 176 | } | ||
| 177 | |||
| 178 | static inline void res_counter_reset_max(struct res_counter *cnt) | ||
| 179 | { | ||
| 180 | unsigned long flags; | ||
| 181 | |||
| 182 | spin_lock_irqsave(&cnt->lock, flags); | ||
| 183 | cnt->max_usage = cnt->usage; | ||
| 184 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
| 185 | } | ||
| 186 | |||
| 187 | static inline void res_counter_reset_failcnt(struct res_counter *cnt) | ||
| 188 | { | ||
| 189 | unsigned long flags; | ||
| 190 | |||
| 191 | spin_lock_irqsave(&cnt->lock, flags); | ||
| 192 | cnt->failcnt = 0; | ||
| 193 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
| 194 | } | ||
| 195 | |||
| 196 | static inline int res_counter_set_limit(struct res_counter *cnt, | ||
| 197 | unsigned long long limit) | ||
| 198 | { | ||
| 199 | unsigned long flags; | ||
| 200 | int ret = -EBUSY; | ||
| 201 | |||
| 202 | spin_lock_irqsave(&cnt->lock, flags); | ||
| 203 | if (cnt->usage <= limit) { | ||
| 204 | cnt->limit = limit; | ||
| 205 | ret = 0; | ||
| 206 | } | ||
| 207 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
| 208 | return ret; | ||
| 209 | } | ||
| 210 | |||
| 211 | static inline int | ||
| 212 | res_counter_set_soft_limit(struct res_counter *cnt, | ||
| 213 | unsigned long long soft_limit) | ||
| 214 | { | ||
| 215 | unsigned long flags; | ||
| 216 | |||
| 217 | spin_lock_irqsave(&cnt->lock, flags); | ||
| 218 | cnt->soft_limit = soft_limit; | ||
| 219 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
| 220 | return 0; | ||
| 221 | } | ||
| 222 | |||
| 223 | #endif | ||
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index fb298e9d6d3a..b93fd89b2e5e 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h | |||
| @@ -65,7 +65,10 @@ struct rhashtable_params { | |||
| 65 | size_t new_size); | 65 | size_t new_size); |
| 66 | bool (*shrink_decision)(const struct rhashtable *ht, | 66 | bool (*shrink_decision)(const struct rhashtable *ht, |
| 67 | size_t new_size); | 67 | size_t new_size); |
| 68 | int (*mutex_is_held)(void); | 68 | #ifdef CONFIG_PROVE_LOCKING |
| 69 | int (*mutex_is_held)(void *parent); | ||
| 70 | void *parent; | ||
| 71 | #endif | ||
| 69 | }; | 72 | }; |
| 70 | 73 | ||
| 71 | /** | 74 | /** |
| @@ -96,16 +99,16 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); | |||
| 96 | u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len); | 99 | u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len); |
| 97 | u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); | 100 | u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); |
| 98 | 101 | ||
| 99 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t); | 102 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); |
| 100 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t); | 103 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); |
| 101 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, | 104 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, |
| 102 | struct rhash_head __rcu **pprev, gfp_t flags); | 105 | struct rhash_head __rcu **pprev); |
| 103 | 106 | ||
| 104 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); | 107 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); |
| 105 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); | 108 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); |
| 106 | 109 | ||
| 107 | int rhashtable_expand(struct rhashtable *ht, gfp_t flags); | 110 | int rhashtable_expand(struct rhashtable *ht); |
| 108 | int rhashtable_shrink(struct rhashtable *ht, gfp_t flags); | 111 | int rhashtable_shrink(struct rhashtable *ht); |
| 109 | 112 | ||
| 110 | void *rhashtable_lookup(const struct rhashtable *ht, const void *key); | 113 | void *rhashtable_lookup(const struct rhashtable *ht, const void *key); |
| 111 | void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, | 114 | void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, |
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 6cacbce1a06c..5db76a32fcab 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
| @@ -17,6 +17,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, | |||
| 17 | u32 id, long expires, u32 error); | 17 | u32 id, long expires, u32 error); |
| 18 | 18 | ||
| 19 | void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); | 19 | void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); |
| 20 | struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, | ||
| 21 | unsigned change, gfp_t flags); | ||
| 22 | void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, | ||
| 23 | gfp_t flags); | ||
| 24 | |||
| 20 | 25 | ||
| 21 | /* RTNL is used as a global lock for all changes to network configuration */ | 26 | /* RTNL is used as a global lock for all changes to network configuration */ |
| 22 | extern void rtnl_lock(void); | 27 | extern void rtnl_lock(void); |
| @@ -94,12 +99,15 @@ extern int ndo_dflt_fdb_add(struct ndmsg *ndm, | |||
| 94 | struct nlattr *tb[], | 99 | struct nlattr *tb[], |
| 95 | struct net_device *dev, | 100 | struct net_device *dev, |
| 96 | const unsigned char *addr, | 101 | const unsigned char *addr, |
| 97 | u16 flags); | 102 | u16 vid, |
| 103 | u16 flags); | ||
| 98 | extern int ndo_dflt_fdb_del(struct ndmsg *ndm, | 104 | extern int ndo_dflt_fdb_del(struct ndmsg *ndm, |
| 99 | struct nlattr *tb[], | 105 | struct nlattr *tb[], |
| 100 | struct net_device *dev, | 106 | struct net_device *dev, |
| 101 | const unsigned char *addr); | 107 | const unsigned char *addr, |
| 108 | u16 vid); | ||
| 102 | 109 | ||
| 103 | extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 110 | extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
| 104 | struct net_device *dev, u16 mode); | 111 | struct net_device *dev, u16 mode, |
| 112 | u32 flags, u32 mask); | ||
| 105 | #endif /* __LINUX_RTNETLINK_H */ | 113 | #endif /* __LINUX_RTNETLINK_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 55f5ee7cc3d3..8db31ef98d2f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1364,6 +1364,10 @@ struct task_struct { | |||
| 1364 | unsigned sched_reset_on_fork:1; | 1364 | unsigned sched_reset_on_fork:1; |
| 1365 | unsigned sched_contributes_to_load:1; | 1365 | unsigned sched_contributes_to_load:1; |
| 1366 | 1366 | ||
| 1367 | #ifdef CONFIG_MEMCG_KMEM | ||
| 1368 | unsigned memcg_kmem_skip_account:1; | ||
| 1369 | #endif | ||
| 1370 | |||
| 1367 | unsigned long atomic_flags; /* Flags needing atomic access. */ | 1371 | unsigned long atomic_flags; /* Flags needing atomic access. */ |
| 1368 | 1372 | ||
| 1369 | pid_t pid; | 1373 | pid_t pid; |
| @@ -1679,8 +1683,7 @@ struct task_struct { | |||
| 1679 | /* bitmask and counter of trace recursion */ | 1683 | /* bitmask and counter of trace recursion */ |
| 1680 | unsigned long trace_recursion; | 1684 | unsigned long trace_recursion; |
| 1681 | #endif /* CONFIG_TRACING */ | 1685 | #endif /* CONFIG_TRACING */ |
| 1682 | #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ | 1686 | #ifdef CONFIG_MEMCG |
| 1683 | unsigned int memcg_kmem_skip_account; | ||
| 1684 | struct memcg_oom_info { | 1687 | struct memcg_oom_info { |
| 1685 | struct mem_cgroup *memcg; | 1688 | struct mem_cgroup *memcg; |
| 1686 | gfp_t gfp_mask; | 1689 | gfp_t gfp_mask; |
| @@ -2482,6 +2485,10 @@ extern void do_group_exit(int); | |||
| 2482 | extern int do_execve(struct filename *, | 2485 | extern int do_execve(struct filename *, |
| 2483 | const char __user * const __user *, | 2486 | const char __user * const __user *, |
| 2484 | const char __user * const __user *); | 2487 | const char __user * const __user *); |
| 2488 | extern int do_execveat(int, struct filename *, | ||
| 2489 | const char __user * const __user *, | ||
| 2490 | const char __user * const __user *, | ||
| 2491 | int); | ||
| 2485 | extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); | 2492 | extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); |
| 2486 | struct task_struct *fork_idle(int); | 2493 | struct task_struct *fork_idle(int); |
| 2487 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | 2494 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); |
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h new file mode 100644 index 000000000000..9aafe0e24c68 --- /dev/null +++ b/include/linux/seq_buf.h | |||
| @@ -0,0 +1,136 @@ | |||
| 1 | #ifndef _LINUX_SEQ_BUF_H | ||
| 2 | #define _LINUX_SEQ_BUF_H | ||
| 3 | |||
| 4 | #include <linux/fs.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | * Trace sequences are used to allow a function to call several other functions | ||
| 8 | * to create a string of data to use. | ||
| 9 | */ | ||
| 10 | |||
| 11 | /** | ||
| 12 | * seq_buf - seq buffer structure | ||
| 13 | * @buffer: pointer to the buffer | ||
| 14 | * @size: size of the buffer | ||
| 15 | * @len: the amount of data inside the buffer | ||
| 16 | * @readpos: The next position to read in the buffer. | ||
| 17 | */ | ||
| 18 | struct seq_buf { | ||
| 19 | char *buffer; | ||
| 20 | size_t size; | ||
| 21 | size_t len; | ||
| 22 | loff_t readpos; | ||
| 23 | }; | ||
| 24 | |||
| 25 | static inline void seq_buf_clear(struct seq_buf *s) | ||
| 26 | { | ||
| 27 | s->len = 0; | ||
| 28 | s->readpos = 0; | ||
| 29 | } | ||
| 30 | |||
| 31 | static inline void | ||
| 32 | seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size) | ||
| 33 | { | ||
| 34 | s->buffer = buf; | ||
| 35 | s->size = size; | ||
| 36 | seq_buf_clear(s); | ||
| 37 | } | ||
| 38 | |||
| 39 | /* | ||
| 40 | * seq_buf have a buffer that might overflow. When this happens | ||
| 41 | * the len and size are set to be equal. | ||
| 42 | */ | ||
| 43 | static inline bool | ||
| 44 | seq_buf_has_overflowed(struct seq_buf *s) | ||
| 45 | { | ||
| 46 | return s->len > s->size; | ||
| 47 | } | ||
| 48 | |||
| 49 | static inline void | ||
| 50 | seq_buf_set_overflow(struct seq_buf *s) | ||
| 51 | { | ||
| 52 | s->len = s->size + 1; | ||
| 53 | } | ||
| 54 | |||
| 55 | /* | ||
| 56 | * How much buffer is left on the seq_buf? | ||
| 57 | */ | ||
| 58 | static inline unsigned int | ||
| 59 | seq_buf_buffer_left(struct seq_buf *s) | ||
| 60 | { | ||
| 61 | if (seq_buf_has_overflowed(s)) | ||
| 62 | return 0; | ||
| 63 | |||
| 64 | return s->size - s->len; | ||
| 65 | } | ||
| 66 | |||
| 67 | /* How much buffer was written? */ | ||
| 68 | static inline unsigned int seq_buf_used(struct seq_buf *s) | ||
| 69 | { | ||
| 70 | return min(s->len, s->size); | ||
| 71 | } | ||
| 72 | |||
| 73 | /** | ||
| 74 | * seq_buf_get_buf - get buffer to write arbitrary data to | ||
| 75 | * @s: the seq_buf handle | ||
| 76 | * @bufp: the beginning of the buffer is stored here | ||
| 77 | * | ||
| 78 | * Return the number of bytes available in the buffer, or zero if | ||
| 79 | * there's no space. | ||
| 80 | */ | ||
| 81 | static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp) | ||
| 82 | { | ||
| 83 | WARN_ON(s->len > s->size + 1); | ||
| 84 | |||
| 85 | if (s->len < s->size) { | ||
| 86 | *bufp = s->buffer + s->len; | ||
| 87 | return s->size - s->len; | ||
| 88 | } | ||
| 89 | |||
| 90 | *bufp = NULL; | ||
| 91 | return 0; | ||
| 92 | } | ||
| 93 | |||
| 94 | /** | ||
| 95 | * seq_buf_commit - commit data to the buffer | ||
| 96 | * @s: the seq_buf handle | ||
| 97 | * @num: the number of bytes to commit | ||
| 98 | * | ||
| 99 | * Commit @num bytes of data written to a buffer previously acquired | ||
| 100 | * by seq_buf_get. To signal an error condition, or that the data | ||
| 101 | * didn't fit in the available space, pass a negative @num value. | ||
| 102 | */ | ||
| 103 | static inline void seq_buf_commit(struct seq_buf *s, int num) | ||
| 104 | { | ||
| 105 | if (num < 0) { | ||
| 106 | seq_buf_set_overflow(s); | ||
| 107 | } else { | ||
| 108 | /* num must be negative on overflow */ | ||
| 109 | BUG_ON(s->len + num > s->size); | ||
| 110 | s->len += num; | ||
| 111 | } | ||
| 112 | } | ||
| 113 | |||
| 114 | extern __printf(2, 3) | ||
| 115 | int seq_buf_printf(struct seq_buf *s, const char *fmt, ...); | ||
| 116 | extern __printf(2, 0) | ||
| 117 | int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args); | ||
| 118 | extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s); | ||
| 119 | extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, | ||
| 120 | int cnt); | ||
| 121 | extern int seq_buf_puts(struct seq_buf *s, const char *str); | ||
| 122 | extern int seq_buf_putc(struct seq_buf *s, unsigned char c); | ||
| 123 | extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len); | ||
| 124 | extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, | ||
| 125 | unsigned int len); | ||
| 126 | extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc); | ||
| 127 | |||
| 128 | extern int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp, | ||
| 129 | int nmaskbits); | ||
| 130 | |||
| 131 | #ifdef CONFIG_BINARY_PRINTF | ||
| 132 | extern int | ||
| 133 | seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary); | ||
| 134 | #endif | ||
| 135 | |||
| 136 | #endif /* _LINUX_SEQ_BUF_H */ | ||
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 52e0097f61f0..cf6a9daaaf6d 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h | |||
| @@ -43,6 +43,21 @@ struct seq_operations { | |||
| 43 | #define SEQ_SKIP 1 | 43 | #define SEQ_SKIP 1 |
| 44 | 44 | ||
| 45 | /** | 45 | /** |
| 46 | * seq_has_overflowed - check if the buffer has overflowed | ||
| 47 | * @m: the seq_file handle | ||
| 48 | * | ||
| 49 | * seq_files have a buffer which may overflow. When this happens a larger | ||
| 50 | * buffer is reallocated and all the data will be printed again. | ||
| 51 | * The overflow state is true when m->count == m->size. | ||
| 52 | * | ||
| 53 | * Returns true if the buffer received more than it can hold. | ||
| 54 | */ | ||
| 55 | static inline bool seq_has_overflowed(struct seq_file *m) | ||
| 56 | { | ||
| 57 | return m->count == m->size; | ||
| 58 | } | ||
| 59 | |||
| 60 | /** | ||
| 46 | * seq_get_buf - get buffer to write arbitrary data to | 61 | * seq_get_buf - get buffer to write arbitrary data to |
| 47 | * @m: the seq_file handle | 62 | * @m: the seq_file handle |
| 48 | * @bufp: the beginning of the buffer is stored here | 63 | * @bufp: the beginning of the buffer is stored here |
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 3df10d5f154b..e02acf0a0ec9 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h | |||
| @@ -97,13 +97,10 @@ struct uart_8250_port { | |||
| 97 | unsigned char msr_saved_flags; | 97 | unsigned char msr_saved_flags; |
| 98 | 98 | ||
| 99 | struct uart_8250_dma *dma; | 99 | struct uart_8250_dma *dma; |
| 100 | struct serial_rs485 rs485; | ||
| 101 | 100 | ||
| 102 | /* 8250 specific callbacks */ | 101 | /* 8250 specific callbacks */ |
| 103 | int (*dl_read)(struct uart_8250_port *); | 102 | int (*dl_read)(struct uart_8250_port *); |
| 104 | void (*dl_write)(struct uart_8250_port *, int); | 103 | void (*dl_write)(struct uart_8250_port *, int); |
| 105 | int (*rs485_config)(struct uart_8250_port *, | ||
| 106 | struct serial_rs485 *rs485); | ||
| 107 | }; | 104 | }; |
| 108 | 105 | ||
| 109 | static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) | 106 | static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) |
diff --git a/include/linux/serial_bcm63xx.h b/include/linux/serial_bcm63xx.h index a80aa1a5bee2..570e964dc899 100644 --- a/include/linux/serial_bcm63xx.h +++ b/include/linux/serial_bcm63xx.h | |||
| @@ -116,6 +116,4 @@ | |||
| 116 | UART_FIFO_PARERR_MASK | \ | 116 | UART_FIFO_PARERR_MASK | \ |
| 117 | UART_FIFO_BRKDET_MASK) | 117 | UART_FIFO_BRKDET_MASK) |
| 118 | 118 | ||
| 119 | #define UART_REG_SIZE 24 | ||
| 120 | |||
| 121 | #endif /* _LINUX_SERIAL_BCM63XX_H */ | 119 | #endif /* _LINUX_SERIAL_BCM63XX_H */ |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 21c2e05c1bc3..057038cf2788 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
| @@ -63,7 +63,7 @@ struct uart_ops { | |||
| 63 | void (*flush_buffer)(struct uart_port *); | 63 | void (*flush_buffer)(struct uart_port *); |
| 64 | void (*set_termios)(struct uart_port *, struct ktermios *new, | 64 | void (*set_termios)(struct uart_port *, struct ktermios *new, |
| 65 | struct ktermios *old); | 65 | struct ktermios *old); |
| 66 | void (*set_ldisc)(struct uart_port *, int new); | 66 | void (*set_ldisc)(struct uart_port *, struct ktermios *); |
| 67 | void (*pm)(struct uart_port *, unsigned int state, | 67 | void (*pm)(struct uart_port *, unsigned int state, |
| 68 | unsigned int oldstate); | 68 | unsigned int oldstate); |
| 69 | 69 | ||
| @@ -131,6 +131,8 @@ struct uart_port { | |||
| 131 | void (*pm)(struct uart_port *, unsigned int state, | 131 | void (*pm)(struct uart_port *, unsigned int state, |
| 132 | unsigned int old); | 132 | unsigned int old); |
| 133 | void (*handle_break)(struct uart_port *); | 133 | void (*handle_break)(struct uart_port *); |
| 134 | int (*rs485_config)(struct uart_port *, | ||
| 135 | struct serial_rs485 *rs485); | ||
| 134 | unsigned int irq; /* irq number */ | 136 | unsigned int irq; /* irq number */ |
| 135 | unsigned long irqflags; /* irq flags */ | 137 | unsigned long irqflags; /* irq flags */ |
| 136 | unsigned int uartclk; /* base uart clock */ | 138 | unsigned int uartclk; /* base uart clock */ |
| @@ -140,12 +142,13 @@ struct uart_port { | |||
| 140 | unsigned char iotype; /* io access style */ | 142 | unsigned char iotype; /* io access style */ |
| 141 | unsigned char unused1; | 143 | unsigned char unused1; |
| 142 | 144 | ||
| 143 | #define UPIO_PORT (0) | 145 | #define UPIO_PORT (0) /* 8b I/O port access */ |
| 144 | #define UPIO_HUB6 (1) | 146 | #define UPIO_HUB6 (1) /* Hub6 ISA card */ |
| 145 | #define UPIO_MEM (2) | 147 | #define UPIO_MEM (2) /* 8b MMIO access */ |
| 146 | #define UPIO_MEM32 (3) | 148 | #define UPIO_MEM32 (3) /* 32b little endian */ |
| 147 | #define UPIO_AU (4) /* Au1x00 and RT288x type IO */ | 149 | #define UPIO_MEM32BE (4) /* 32b big endian */ |
| 148 | #define UPIO_TSI (5) /* Tsi108/109 type IO */ | 150 | #define UPIO_AU (5) /* Au1x00 and RT288x type IO */ |
| 151 | #define UPIO_TSI (6) /* Tsi108/109 type IO */ | ||
| 149 | 152 | ||
| 150 | unsigned int read_status_mask; /* driver specific */ | 153 | unsigned int read_status_mask; /* driver specific */ |
| 151 | unsigned int ignore_status_mask; /* driver specific */ | 154 | unsigned int ignore_status_mask; /* driver specific */ |
| @@ -160,21 +163,33 @@ struct uart_port { | |||
| 160 | /* flags must be updated while holding port mutex */ | 163 | /* flags must be updated while holding port mutex */ |
| 161 | upf_t flags; | 164 | upf_t flags; |
| 162 | 165 | ||
| 163 | #define UPF_FOURPORT ((__force upf_t) (1 << 1)) | 166 | /* |
| 164 | #define UPF_SAK ((__force upf_t) (1 << 2)) | 167 | * These flags must be equivalent to the flags defined in |
| 165 | #define UPF_SPD_MASK ((__force upf_t) (0x1030)) | 168 | * include/uapi/linux/tty_flags.h which are the userspace definitions |
| 166 | #define UPF_SPD_HI ((__force upf_t) (0x0010)) | 169 | * assigned from the serial_struct flags in uart_set_info() |
| 167 | #define UPF_SPD_VHI ((__force upf_t) (0x0020)) | 170 | * [for bit definitions in the UPF_CHANGE_MASK] |
| 168 | #define UPF_SPD_CUST ((__force upf_t) (0x0030)) | 171 | * |
| 169 | #define UPF_SPD_SHI ((__force upf_t) (0x1000)) | 172 | * Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable |
| 170 | #define UPF_SPD_WARP ((__force upf_t) (0x1010)) | 173 | * except bit 15 (UPF_NO_TXEN_TEST) which is masked off. |
| 171 | #define UPF_SKIP_TEST ((__force upf_t) (1 << 6)) | 174 | * The remaining bits are serial-core specific and not modifiable by |
| 172 | #define UPF_AUTO_IRQ ((__force upf_t) (1 << 7)) | 175 | * userspace. |
| 173 | #define UPF_HARDPPS_CD ((__force upf_t) (1 << 11)) | 176 | */ |
| 174 | #define UPF_LOW_LATENCY ((__force upf_t) (1 << 13)) | 177 | #define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ ) |
| 175 | #define UPF_BUGGY_UART ((__force upf_t) (1 << 14)) | 178 | #define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ ) |
| 179 | #define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ ) | ||
| 180 | #define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ ) | ||
| 181 | #define UPF_SPD_CUST ((__force upf_t) ASYNC_SPD_CUST /* 0x0030 */ ) | ||
| 182 | #define UPF_SPD_WARP ((__force upf_t) ASYNC_SPD_WARP /* 0x1010 */ ) | ||
| 183 | #define UPF_SPD_MASK ((__force upf_t) ASYNC_SPD_MASK /* 0x1030 */ ) | ||
| 184 | #define UPF_SKIP_TEST ((__force upf_t) ASYNC_SKIP_TEST /* 6 */ ) | ||
| 185 | #define UPF_AUTO_IRQ ((__force upf_t) ASYNC_AUTO_IRQ /* 7 */ ) | ||
| 186 | #define UPF_HARDPPS_CD ((__force upf_t) ASYNC_HARDPPS_CD /* 11 */ ) | ||
| 187 | #define UPF_SPD_SHI ((__force upf_t) ASYNC_SPD_SHI /* 12 */ ) | ||
| 188 | #define UPF_LOW_LATENCY ((__force upf_t) ASYNC_LOW_LATENCY /* 13 */ ) | ||
| 189 | #define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ ) | ||
| 176 | #define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15)) | 190 | #define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15)) |
| 177 | #define UPF_MAGIC_MULTIPLIER ((__force upf_t) (1 << 16)) | 191 | #define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ ) |
| 192 | |||
| 178 | /* Port has hardware-assisted h/w flow control (iow, auto-RTS *not* auto-CTS) */ | 193 | /* Port has hardware-assisted h/w flow control (iow, auto-RTS *not* auto-CTS) */ |
| 179 | #define UPF_HARD_FLOW ((__force upf_t) (1 << 21)) | 194 | #define UPF_HARD_FLOW ((__force upf_t) (1 << 21)) |
| 180 | /* Port has hardware-assisted s/w flow control */ | 195 | /* Port has hardware-assisted s/w flow control */ |
| @@ -190,9 +205,14 @@ struct uart_port { | |||
| 190 | #define UPF_DEAD ((__force upf_t) (1 << 30)) | 205 | #define UPF_DEAD ((__force upf_t) (1 << 30)) |
| 191 | #define UPF_IOREMAP ((__force upf_t) (1 << 31)) | 206 | #define UPF_IOREMAP ((__force upf_t) (1 << 31)) |
| 192 | 207 | ||
| 193 | #define UPF_CHANGE_MASK ((__force upf_t) (0x17fff)) | 208 | #define __UPF_CHANGE_MASK 0x17fff |
| 209 | #define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK) | ||
| 194 | #define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY)) | 210 | #define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY)) |
| 195 | 211 | ||
| 212 | #if __UPF_CHANGE_MASK > ASYNC_FLAGS | ||
| 213 | #error Change mask not equivalent to userspace-visible bit defines | ||
| 214 | #endif | ||
| 215 | |||
| 196 | /* status must be updated while holding port lock */ | 216 | /* status must be updated while holding port lock */ |
| 197 | upstat_t status; | 217 | upstat_t status; |
| 198 | 218 | ||
| @@ -214,6 +234,7 @@ struct uart_port { | |||
| 214 | unsigned char unused[2]; | 234 | unsigned char unused[2]; |
| 215 | struct attribute_group *attr_group; /* port specific attributes */ | 235 | struct attribute_group *attr_group; /* port specific attributes */ |
| 216 | const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ | 236 | const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ |
| 237 | struct serial_rs485 rs485; | ||
| 217 | void *private_data; /* generic platform data pointer */ | 238 | void *private_data; /* generic platform data pointer */ |
| 218 | }; | 239 | }; |
| 219 | 240 | ||
| @@ -367,7 +388,7 @@ static inline int uart_tx_stopped(struct uart_port *port) | |||
| 367 | 388 | ||
| 368 | static inline bool uart_cts_enabled(struct uart_port *uport) | 389 | static inline bool uart_cts_enabled(struct uart_port *uport) |
| 369 | { | 390 | { |
| 370 | return uport->status & UPSTAT_CTS_ENABLE; | 391 | return !!(uport->status & UPSTAT_CTS_ENABLE); |
| 371 | } | 392 | } |
| 372 | 393 | ||
| 373 | /* | 394 | /* |
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 68c097077ef0..f4aee75f00b1 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h | |||
| @@ -18,8 +18,6 @@ struct shrink_control { | |||
| 18 | */ | 18 | */ |
| 19 | unsigned long nr_to_scan; | 19 | unsigned long nr_to_scan; |
| 20 | 20 | ||
| 21 | /* shrink from these nodes */ | ||
| 22 | nodemask_t nodes_to_scan; | ||
| 23 | /* current node being shrunk (for NUMA aware shrinkers) */ | 21 | /* current node being shrunk (for NUMA aware shrinkers) */ |
| 24 | int nid; | 22 | int nid; |
| 25 | }; | 23 | }; |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6c8b6f604e76..85ab7d72b54c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -20,6 +20,8 @@ | |||
| 20 | #include <linux/time.h> | 20 | #include <linux/time.h> |
| 21 | #include <linux/bug.h> | 21 | #include <linux/bug.h> |
| 22 | #include <linux/cache.h> | 22 | #include <linux/cache.h> |
| 23 | #include <linux/rbtree.h> | ||
| 24 | #include <linux/socket.h> | ||
| 23 | 25 | ||
| 24 | #include <linux/atomic.h> | 26 | #include <linux/atomic.h> |
| 25 | #include <asm/types.h> | 27 | #include <asm/types.h> |
| @@ -148,6 +150,8 @@ | |||
| 148 | struct net_device; | 150 | struct net_device; |
| 149 | struct scatterlist; | 151 | struct scatterlist; |
| 150 | struct pipe_inode_info; | 152 | struct pipe_inode_info; |
| 153 | struct iov_iter; | ||
| 154 | struct napi_struct; | ||
| 151 | 155 | ||
| 152 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | 156 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
| 153 | struct nf_conntrack { | 157 | struct nf_conntrack { |
| @@ -341,7 +345,6 @@ enum { | |||
| 341 | SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ | 345 | SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ |
| 342 | SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ | 346 | SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ |
| 343 | SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ | 347 | SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ |
| 344 | SKB_FCLONE_FREE, /* this companion fclone skb is available */ | ||
| 345 | }; | 348 | }; |
| 346 | 349 | ||
| 347 | enum { | 350 | enum { |
| @@ -370,8 +373,7 @@ enum { | |||
| 370 | 373 | ||
| 371 | SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, | 374 | SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, |
| 372 | 375 | ||
| 373 | SKB_GSO_MPLS = 1 << 12, | 376 | SKB_GSO_TUNNEL_REMCSUM = 1 << 12, |
| 374 | |||
| 375 | }; | 377 | }; |
| 376 | 378 | ||
| 377 | #if BITS_PER_LONG > 32 | 379 | #if BITS_PER_LONG > 32 |
| @@ -440,6 +442,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, | |||
| 440 | * @next: Next buffer in list | 442 | * @next: Next buffer in list |
| 441 | * @prev: Previous buffer in list | 443 | * @prev: Previous buffer in list |
| 442 | * @tstamp: Time we arrived/left | 444 | * @tstamp: Time we arrived/left |
| 445 | * @rbnode: RB tree node, alternative to next/prev for netem/tcp | ||
| 443 | * @sk: Socket we are owned by | 446 | * @sk: Socket we are owned by |
| 444 | * @dev: Device we arrived on/are leaving by | 447 | * @dev: Device we arrived on/are leaving by |
| 445 | * @cb: Control buffer. Free for use by every layer. Put private vars here | 448 | * @cb: Control buffer. Free for use by every layer. Put private vars here |
| @@ -504,15 +507,19 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, | |||
| 504 | */ | 507 | */ |
| 505 | 508 | ||
| 506 | struct sk_buff { | 509 | struct sk_buff { |
| 507 | /* These two members must be first. */ | ||
| 508 | struct sk_buff *next; | ||
| 509 | struct sk_buff *prev; | ||
| 510 | |||
| 511 | union { | 510 | union { |
| 512 | ktime_t tstamp; | 511 | struct { |
| 513 | struct skb_mstamp skb_mstamp; | 512 | /* These two members must be first. */ |
| 513 | struct sk_buff *next; | ||
| 514 | struct sk_buff *prev; | ||
| 515 | |||
| 516 | union { | ||
| 517 | ktime_t tstamp; | ||
| 518 | struct skb_mstamp skb_mstamp; | ||
| 519 | }; | ||
| 520 | }; | ||
| 521 | struct rb_node rbnode; /* used in netem & tcp stack */ | ||
| 514 | }; | 522 | }; |
| 515 | |||
| 516 | struct sock *sk; | 523 | struct sock *sk; |
| 517 | struct net_device *dev; | 524 | struct net_device *dev; |
| 518 | 525 | ||
| @@ -597,7 +604,8 @@ struct sk_buff { | |||
| 597 | #endif | 604 | #endif |
| 598 | __u8 ipvs_property:1; | 605 | __u8 ipvs_property:1; |
| 599 | __u8 inner_protocol_type:1; | 606 | __u8 inner_protocol_type:1; |
| 600 | /* 4 or 6 bit hole */ | 607 | __u8 remcsum_offload:1; |
| 608 | /* 3 or 5 bit hole */ | ||
| 601 | 609 | ||
| 602 | #ifdef CONFIG_NET_SCHED | 610 | #ifdef CONFIG_NET_SCHED |
| 603 | __u16 tc_index; /* traffic control index */ | 611 | __u16 tc_index; /* traffic control index */ |
| @@ -666,6 +674,7 @@ struct sk_buff { | |||
| 666 | 674 | ||
| 667 | #define SKB_ALLOC_FCLONE 0x01 | 675 | #define SKB_ALLOC_FCLONE 0x01 |
| 668 | #define SKB_ALLOC_RX 0x02 | 676 | #define SKB_ALLOC_RX 0x02 |
| 677 | #define SKB_ALLOC_NAPI 0x04 | ||
| 669 | 678 | ||
| 670 | /* Returns true if the skb was allocated from PFMEMALLOC reserves */ | 679 | /* Returns true if the skb was allocated from PFMEMALLOC reserves */ |
| 671 | static inline bool skb_pfmemalloc(const struct sk_buff *skb) | 680 | static inline bool skb_pfmemalloc(const struct sk_buff *skb) |
| @@ -710,9 +719,6 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) | |||
| 710 | skb->_skb_refdst = (unsigned long)dst; | 719 | skb->_skb_refdst = (unsigned long)dst; |
| 711 | } | 720 | } |
| 712 | 721 | ||
| 713 | void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, | ||
| 714 | bool force); | ||
| 715 | |||
| 716 | /** | 722 | /** |
| 717 | * skb_dst_set_noref - sets skb dst, hopefully, without taking reference | 723 | * skb_dst_set_noref - sets skb dst, hopefully, without taking reference |
| 718 | * @skb: buffer | 724 | * @skb: buffer |
| @@ -725,24 +731,8 @@ void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, | |||
| 725 | */ | 731 | */ |
| 726 | static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) | 732 | static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) |
| 727 | { | 733 | { |
| 728 | __skb_dst_set_noref(skb, dst, false); | 734 | WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); |
| 729 | } | 735 | skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; |
| 730 | |||
| 731 | /** | ||
| 732 | * skb_dst_set_noref_force - sets skb dst, without taking reference | ||
| 733 | * @skb: buffer | ||
| 734 | * @dst: dst entry | ||
| 735 | * | ||
| 736 | * Sets skb dst, assuming a reference was not taken on dst. | ||
| 737 | * No reference is taken and no dst_release will be called. While for | ||
| 738 | * cached dsts deferred reclaim is a basic feature, for entries that are | ||
| 739 | * not cached it is caller's job to guarantee that last dst_release for | ||
| 740 | * provided dst happens when nobody uses it, eg. after a RCU grace period. | ||
| 741 | */ | ||
| 742 | static inline void skb_dst_set_noref_force(struct sk_buff *skb, | ||
| 743 | struct dst_entry *dst) | ||
| 744 | { | ||
| 745 | __skb_dst_set_noref(skb, dst, true); | ||
| 746 | } | 736 | } |
| 747 | 737 | ||
| 748 | /** | 738 | /** |
| @@ -810,7 +800,7 @@ static inline bool skb_fclone_busy(const struct sock *sk, | |||
| 810 | fclones = container_of(skb, struct sk_buff_fclones, skb1); | 800 | fclones = container_of(skb, struct sk_buff_fclones, skb1); |
| 811 | 801 | ||
| 812 | return skb->fclone == SKB_FCLONE_ORIG && | 802 | return skb->fclone == SKB_FCLONE_ORIG && |
| 813 | fclones->skb2.fclone == SKB_FCLONE_CLONE && | 803 | atomic_read(&fclones->fclone_ref) > 1 && |
| 814 | fclones->skb2.sk == sk; | 804 | fclones->skb2.sk == sk; |
| 815 | } | 805 | } |
| 816 | 806 | ||
| @@ -2176,47 +2166,61 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, | |||
| 2176 | return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); | 2166 | return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); |
| 2177 | } | 2167 | } |
| 2178 | 2168 | ||
| 2169 | void *napi_alloc_frag(unsigned int fragsz); | ||
| 2170 | struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, | ||
| 2171 | unsigned int length, gfp_t gfp_mask); | ||
| 2172 | static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, | ||
| 2173 | unsigned int length) | ||
| 2174 | { | ||
| 2175 | return __napi_alloc_skb(napi, length, GFP_ATOMIC); | ||
| 2176 | } | ||
| 2177 | |||
| 2179 | /** | 2178 | /** |
| 2180 | * __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data | 2179 | * __dev_alloc_pages - allocate page for network Rx |
| 2181 | * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX | 2180 | * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx |
| 2182 | * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used | 2181 | * @order: size of the allocation |
| 2183 | * @order: size of the allocation | ||
| 2184 | * | 2182 | * |
| 2185 | * Allocate a new page. | 2183 | * Allocate a new page. |
| 2186 | * | 2184 | * |
| 2187 | * %NULL is returned if there is no free memory. | 2185 | * %NULL is returned if there is no free memory. |
| 2188 | */ | 2186 | */ |
| 2189 | static inline struct page *__skb_alloc_pages(gfp_t gfp_mask, | 2187 | static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, |
| 2190 | struct sk_buff *skb, | 2188 | unsigned int order) |
| 2191 | unsigned int order) | 2189 | { |
| 2192 | { | 2190 | /* This piece of code contains several assumptions. |
| 2193 | struct page *page; | 2191 | * 1. This is for device Rx, therefor a cold page is preferred. |
| 2194 | 2192 | * 2. The expectation is the user wants a compound page. | |
| 2195 | gfp_mask |= __GFP_COLD; | 2193 | * 3. If requesting a order 0 page it will not be compound |
| 2196 | 2194 | * due to the check to see if order has a value in prep_new_page | |
| 2197 | if (!(gfp_mask & __GFP_NOMEMALLOC)) | 2195 | * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to |
| 2198 | gfp_mask |= __GFP_MEMALLOC; | 2196 | * code in gfp_to_alloc_flags that should be enforcing this. |
| 2197 | */ | ||
| 2198 | gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC; | ||
| 2199 | 2199 | ||
| 2200 | page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); | 2200 | return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); |
| 2201 | if (skb && page && page->pfmemalloc) | 2201 | } |
| 2202 | skb->pfmemalloc = true; | ||
| 2203 | 2202 | ||
| 2204 | return page; | 2203 | static inline struct page *dev_alloc_pages(unsigned int order) |
| 2204 | { | ||
| 2205 | return __dev_alloc_pages(GFP_ATOMIC, order); | ||
| 2205 | } | 2206 | } |
| 2206 | 2207 | ||
| 2207 | /** | 2208 | /** |
| 2208 | * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data | 2209 | * __dev_alloc_page - allocate a page for network Rx |
| 2209 | * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX | 2210 | * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx |
| 2210 | * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used | ||
| 2211 | * | 2211 | * |
| 2212 | * Allocate a new page. | 2212 | * Allocate a new page. |
| 2213 | * | 2213 | * |
| 2214 | * %NULL is returned if there is no free memory. | 2214 | * %NULL is returned if there is no free memory. |
| 2215 | */ | 2215 | */ |
| 2216 | static inline struct page *__skb_alloc_page(gfp_t gfp_mask, | 2216 | static inline struct page *__dev_alloc_page(gfp_t gfp_mask) |
| 2217 | struct sk_buff *skb) | 2217 | { |
| 2218 | return __dev_alloc_pages(gfp_mask, 0); | ||
| 2219 | } | ||
| 2220 | |||
| 2221 | static inline struct page *dev_alloc_page(void) | ||
| 2218 | { | 2222 | { |
| 2219 | return __skb_alloc_pages(gfp_mask, skb, 0); | 2223 | return __dev_alloc_page(GFP_ATOMIC); |
| 2220 | } | 2224 | } |
| 2221 | 2225 | ||
| 2222 | /** | 2226 | /** |
| @@ -2448,7 +2452,6 @@ static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) | |||
| 2448 | * is untouched. Otherwise it is extended. Returns zero on | 2452 | * is untouched. Otherwise it is extended. Returns zero on |
| 2449 | * success. The skb is freed on error. | 2453 | * success. The skb is freed on error. |
| 2450 | */ | 2454 | */ |
| 2451 | |||
| 2452 | static inline int skb_padto(struct sk_buff *skb, unsigned int len) | 2455 | static inline int skb_padto(struct sk_buff *skb, unsigned int len) |
| 2453 | { | 2456 | { |
| 2454 | unsigned int size = skb->len; | 2457 | unsigned int size = skb->len; |
| @@ -2457,6 +2460,29 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) | |||
| 2457 | return skb_pad(skb, len - size); | 2460 | return skb_pad(skb, len - size); |
| 2458 | } | 2461 | } |
| 2459 | 2462 | ||
| 2463 | /** | ||
| 2464 | * skb_put_padto - increase size and pad an skbuff up to a minimal size | ||
| 2465 | * @skb: buffer to pad | ||
| 2466 | * @len: minimal length | ||
| 2467 | * | ||
| 2468 | * Pads up a buffer to ensure the trailing bytes exist and are | ||
| 2469 | * blanked. If the buffer already contains sufficient data it | ||
| 2470 | * is untouched. Otherwise it is extended. Returns zero on | ||
| 2471 | * success. The skb is freed on error. | ||
| 2472 | */ | ||
| 2473 | static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) | ||
| 2474 | { | ||
| 2475 | unsigned int size = skb->len; | ||
| 2476 | |||
| 2477 | if (unlikely(size < len)) { | ||
| 2478 | len -= size; | ||
| 2479 | if (skb_pad(skb, len)) | ||
| 2480 | return -ENOMEM; | ||
| 2481 | __skb_put(skb, len); | ||
| 2482 | } | ||
| 2483 | return 0; | ||
| 2484 | } | ||
| 2485 | |||
| 2460 | static inline int skb_add_data(struct sk_buff *skb, | 2486 | static inline int skb_add_data(struct sk_buff *skb, |
| 2461 | char __user *from, int copy) | 2487 | char __user *from, int copy) |
| 2462 | { | 2488 | { |
| @@ -2629,18 +2655,18 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, | |||
| 2629 | int *err); | 2655 | int *err); |
| 2630 | unsigned int datagram_poll(struct file *file, struct socket *sock, | 2656 | unsigned int datagram_poll(struct file *file, struct socket *sock, |
| 2631 | struct poll_table_struct *wait); | 2657 | struct poll_table_struct *wait); |
| 2632 | int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, | 2658 | int skb_copy_datagram_iter(const struct sk_buff *from, int offset, |
| 2633 | struct iovec *to, int size); | 2659 | struct iov_iter *to, int size); |
| 2634 | int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, | 2660 | static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, |
| 2635 | struct iovec *iov); | 2661 | struct msghdr *msg, int size) |
| 2636 | int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, | 2662 | { |
| 2637 | const struct iovec *from, int from_offset, | 2663 | return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size); |
| 2638 | int len); | 2664 | } |
| 2639 | int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm, | 2665 | int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, |
| 2640 | int offset, size_t count); | 2666 | struct msghdr *msg); |
| 2641 | int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset, | 2667 | int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, |
| 2642 | const struct iovec *to, int to_offset, | 2668 | struct iov_iter *from, int len); |
| 2643 | int size); | 2669 | int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); |
| 2644 | void skb_free_datagram(struct sock *sk, struct sk_buff *skb); | 2670 | void skb_free_datagram(struct sock *sk, struct sk_buff *skb); |
| 2645 | void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); | 2671 | void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); |
| 2646 | int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); | 2672 | int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); |
| @@ -2661,6 +2687,20 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet); | |||
| 2661 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); | 2687 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); |
| 2662 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); | 2688 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); |
| 2663 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb); | 2689 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb); |
| 2690 | int skb_ensure_writable(struct sk_buff *skb, int write_len); | ||
| 2691 | int skb_vlan_pop(struct sk_buff *skb); | ||
| 2692 | int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); | ||
| 2693 | |||
| 2694 | static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) | ||
| 2695 | { | ||
| 2696 | /* XXX: stripping const */ | ||
| 2697 | return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len); | ||
| 2698 | } | ||
| 2699 | |||
| 2700 | static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) | ||
| 2701 | { | ||
| 2702 | return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; | ||
| 2703 | } | ||
| 2664 | 2704 | ||
| 2665 | struct skb_checksum_ops { | 2705 | struct skb_checksum_ops { |
| 2666 | __wsum (*update)(const void *mem, int len, __wsum wsum); | 2706 | __wsum (*update)(const void *mem, int len, __wsum wsum); |
diff --git a/include/linux/slab.h b/include/linux/slab.h index c265bec6a57d..9a139b637069 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -493,7 +493,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 493 | * @memcg: pointer to the memcg this cache belongs to | 493 | * @memcg: pointer to the memcg this cache belongs to |
| 494 | * @list: list_head for the list of all caches in this memcg | 494 | * @list: list_head for the list of all caches in this memcg |
| 495 | * @root_cache: pointer to the global, root cache, this cache was derived from | 495 | * @root_cache: pointer to the global, root cache, this cache was derived from |
| 496 | * @nr_pages: number of pages that belongs to this cache. | ||
| 497 | */ | 496 | */ |
| 498 | struct memcg_cache_params { | 497 | struct memcg_cache_params { |
| 499 | bool is_root_cache; | 498 | bool is_root_cache; |
| @@ -506,17 +505,12 @@ struct memcg_cache_params { | |||
| 506 | struct mem_cgroup *memcg; | 505 | struct mem_cgroup *memcg; |
| 507 | struct list_head list; | 506 | struct list_head list; |
| 508 | struct kmem_cache *root_cache; | 507 | struct kmem_cache *root_cache; |
| 509 | atomic_t nr_pages; | ||
| 510 | }; | 508 | }; |
| 511 | }; | 509 | }; |
| 512 | }; | 510 | }; |
| 513 | 511 | ||
| 514 | int memcg_update_all_caches(int num_memcgs); | 512 | int memcg_update_all_caches(int num_memcgs); |
| 515 | 513 | ||
| 516 | struct seq_file; | ||
| 517 | int cache_show(struct kmem_cache *s, struct seq_file *m); | ||
| 518 | void print_slabinfo_header(struct seq_file *m); | ||
| 519 | |||
| 520 | /** | 514 | /** |
| 521 | * kmalloc_array - allocate memory for an array. | 515 | * kmalloc_array - allocate memory for an array. |
| 522 | * @n: number of elements. | 516 | * @n: number of elements. |
diff --git a/include/linux/socket.h b/include/linux/socket.h index bb9b83640070..6e49a14365dc 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h | |||
| @@ -47,16 +47,25 @@ struct linger { | |||
| 47 | struct msghdr { | 47 | struct msghdr { |
| 48 | void *msg_name; /* ptr to socket address structure */ | 48 | void *msg_name; /* ptr to socket address structure */ |
| 49 | int msg_namelen; /* size of socket address structure */ | 49 | int msg_namelen; /* size of socket address structure */ |
| 50 | struct iovec *msg_iov; /* scatter/gather array */ | 50 | struct iov_iter msg_iter; /* data */ |
| 51 | __kernel_size_t msg_iovlen; /* # elements in msg_iov */ | ||
| 52 | void *msg_control; /* ancillary data */ | 51 | void *msg_control; /* ancillary data */ |
| 53 | __kernel_size_t msg_controllen; /* ancillary data buffer length */ | 52 | __kernel_size_t msg_controllen; /* ancillary data buffer length */ |
| 54 | unsigned int msg_flags; /* flags on received message */ | 53 | unsigned int msg_flags; /* flags on received message */ |
| 55 | }; | 54 | }; |
| 55 | |||
| 56 | struct user_msghdr { | ||
| 57 | void __user *msg_name; /* ptr to socket address structure */ | ||
| 58 | int msg_namelen; /* size of socket address structure */ | ||
| 59 | struct iovec __user *msg_iov; /* scatter/gather array */ | ||
| 60 | __kernel_size_t msg_iovlen; /* # elements in msg_iov */ | ||
| 61 | void __user *msg_control; /* ancillary data */ | ||
| 62 | __kernel_size_t msg_controllen; /* ancillary data buffer length */ | ||
| 63 | unsigned int msg_flags; /* flags on received message */ | ||
| 64 | }; | ||
| 56 | 65 | ||
| 57 | /* For recvmmsg/sendmmsg */ | 66 | /* For recvmmsg/sendmmsg */ |
| 58 | struct mmsghdr { | 67 | struct mmsghdr { |
| 59 | struct msghdr msg_hdr; | 68 | struct user_msghdr msg_hdr; |
| 60 | unsigned int msg_len; | 69 | unsigned int msg_len; |
| 61 | }; | 70 | }; |
| 62 | 71 | ||
| @@ -94,6 +103,10 @@ struct cmsghdr { | |||
| 94 | (cmsg)->cmsg_len <= (unsigned long) \ | 103 | (cmsg)->cmsg_len <= (unsigned long) \ |
| 95 | ((mhdr)->msg_controllen - \ | 104 | ((mhdr)->msg_controllen - \ |
| 96 | ((char *)(cmsg) - (char *)(mhdr)->msg_control))) | 105 | ((char *)(cmsg) - (char *)(mhdr)->msg_control))) |
| 106 | #define for_each_cmsghdr(cmsg, msg) \ | ||
| 107 | for (cmsg = CMSG_FIRSTHDR(msg); \ | ||
| 108 | cmsg; \ | ||
| 109 | cmsg = CMSG_NXTHDR(msg, cmsg)) | ||
| 97 | 110 | ||
| 98 | /* | 111 | /* |
| 99 | * Get the next cmsg header | 112 | * Get the next cmsg header |
| @@ -312,15 +325,14 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata, | |||
| 312 | extern unsigned long iov_pages(const struct iovec *iov, int offset, | 325 | extern unsigned long iov_pages(const struct iovec *iov, int offset, |
| 313 | unsigned long nr_segs); | 326 | unsigned long nr_segs); |
| 314 | 327 | ||
| 315 | extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); | ||
| 316 | extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); | 328 | extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); |
| 317 | extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); | 329 | extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); |
| 318 | 330 | ||
| 319 | struct timespec; | 331 | struct timespec; |
| 320 | 332 | ||
| 321 | /* The __sys_...msg variants allow MSG_CMSG_COMPAT */ | 333 | /* The __sys_...msg variants allow MSG_CMSG_COMPAT */ |
| 322 | extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); | 334 | extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); |
| 323 | extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); | 335 | extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); |
| 324 | extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, | 336 | extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, |
| 325 | unsigned int flags, struct timespec *timeout); | 337 | unsigned int flags, struct timespec *timeout); |
| 326 | extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, | 338 | extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 46d188a9947c..a6ef2a8e6de4 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
| @@ -1049,4 +1049,10 @@ spi_unregister_device(struct spi_device *spi) | |||
| 1049 | extern const struct spi_device_id * | 1049 | extern const struct spi_device_id * |
| 1050 | spi_get_device_id(const struct spi_device *sdev); | 1050 | spi_get_device_id(const struct spi_device *sdev); |
| 1051 | 1051 | ||
| 1052 | static inline bool | ||
| 1053 | spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer) | ||
| 1054 | { | ||
| 1055 | return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers); | ||
| 1056 | } | ||
| 1057 | |||
| 1052 | #endif /* __LINUX_SPI_H */ | 1058 | #endif /* __LINUX_SPI_H */ |
diff --git a/include/linux/spmi.h b/include/linux/spmi.h index 91f5eab9e428..f84212cd3b7d 100644 --- a/include/linux/spmi.h +++ b/include/linux/spmi.h | |||
| @@ -134,9 +134,6 @@ void spmi_controller_remove(struct spmi_controller *ctrl); | |||
| 134 | * this structure. | 134 | * this structure. |
| 135 | * @probe: binds this driver to a SPMI device. | 135 | * @probe: binds this driver to a SPMI device. |
| 136 | * @remove: unbinds this driver from the SPMI device. | 136 | * @remove: unbinds this driver from the SPMI device. |
| 137 | * @shutdown: standard shutdown callback used during powerdown/halt. | ||
| 138 | * @suspend: standard suspend callback used during system suspend. | ||
| 139 | * @resume: standard resume callback used during system resume. | ||
| 140 | * | 137 | * |
| 141 | * If PM runtime support is desired for a slave, a device driver can call | 138 | * If PM runtime support is desired for a slave, a device driver can call |
| 142 | * pm_runtime_put() from their probe() routine (and a balancing | 139 | * pm_runtime_put() from their probe() routine (and a balancing |
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 115b570e3bff..669045ab73f3 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h | |||
| @@ -1,6 +1,8 @@ | |||
| 1 | #ifndef __LINUX_STACKTRACE_H | 1 | #ifndef __LINUX_STACKTRACE_H |
| 2 | #define __LINUX_STACKTRACE_H | 2 | #define __LINUX_STACKTRACE_H |
| 3 | 3 | ||
| 4 | #include <linux/types.h> | ||
| 5 | |||
| 4 | struct task_struct; | 6 | struct task_struct; |
| 5 | struct pt_regs; | 7 | struct pt_regs; |
| 6 | 8 | ||
| @@ -20,6 +22,8 @@ extern void save_stack_trace_tsk(struct task_struct *tsk, | |||
| 20 | struct stack_trace *trace); | 22 | struct stack_trace *trace); |
| 21 | 23 | ||
| 22 | extern void print_stack_trace(struct stack_trace *trace, int spaces); | 24 | extern void print_stack_trace(struct stack_trace *trace, int spaces); |
| 25 | extern int snprint_stack_trace(char *buf, size_t size, | ||
| 26 | struct stack_trace *trace, int spaces); | ||
| 23 | 27 | ||
| 24 | #ifdef CONFIG_USER_STACKTRACE_SUPPORT | 28 | #ifdef CONFIG_USER_STACKTRACE_SUPPORT |
| 25 | extern void save_stack_trace_user(struct stack_trace *trace); | 29 | extern void save_stack_trace_user(struct stack_trace *trace); |
| @@ -32,6 +36,7 @@ extern void save_stack_trace_user(struct stack_trace *trace); | |||
| 32 | # define save_stack_trace_tsk(tsk, trace) do { } while (0) | 36 | # define save_stack_trace_tsk(tsk, trace) do { } while (0) |
| 33 | # define save_stack_trace_user(trace) do { } while (0) | 37 | # define save_stack_trace_user(trace) do { } while (0) |
| 34 | # define print_stack_trace(trace, spaces) do { } while (0) | 38 | # define print_stack_trace(trace, spaces) do { } while (0) |
| 39 | # define snprint_stack_trace(buf, size, trace, spaces) do { } while (0) | ||
| 35 | #endif | 40 | #endif |
| 36 | 41 | ||
| 37 | #endif | 42 | #endif |
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 8e030075fe79..a7cbb570cc5c 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h | |||
| @@ -53,7 +53,7 @@ struct rpc_cred { | |||
| 53 | struct rcu_head cr_rcu; | 53 | struct rcu_head cr_rcu; |
| 54 | struct rpc_auth * cr_auth; | 54 | struct rpc_auth * cr_auth; |
| 55 | const struct rpc_credops *cr_ops; | 55 | const struct rpc_credops *cr_ops; |
| 56 | #ifdef RPC_DEBUG | 56 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 57 | unsigned long cr_magic; /* 0x0f4aa4f0 */ | 57 | unsigned long cr_magic; /* 0x0f4aa4f0 */ |
| 58 | #endif | 58 | #endif |
| 59 | unsigned long cr_expire; /* when to gc */ | 59 | unsigned long cr_expire; /* when to gc */ |
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 70736b98c721..d86acc63b25f 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
| @@ -63,6 +63,9 @@ struct rpc_clnt { | |||
| 63 | struct rpc_rtt cl_rtt_default; | 63 | struct rpc_rtt cl_rtt_default; |
| 64 | struct rpc_timeout cl_timeout_default; | 64 | struct rpc_timeout cl_timeout_default; |
| 65 | const struct rpc_program *cl_program; | 65 | const struct rpc_program *cl_program; |
| 66 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
| 67 | struct dentry *cl_debugfs; /* debugfs directory */ | ||
| 68 | #endif | ||
| 66 | }; | 69 | }; |
| 67 | 70 | ||
| 68 | /* | 71 | /* |
| @@ -176,5 +179,6 @@ size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); | |||
| 176 | const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); | 179 | const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); |
| 177 | int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t); | 180 | int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t); |
| 178 | 181 | ||
| 182 | const char *rpc_proc_name(const struct rpc_task *task); | ||
| 179 | #endif /* __KERNEL__ */ | 183 | #endif /* __KERNEL__ */ |
| 180 | #endif /* _LINUX_SUNRPC_CLNT_H */ | 184 | #endif /* _LINUX_SUNRPC_CLNT_H */ |
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index 9385bd74c860..c57d8ea0716c 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h | |||
| @@ -10,22 +10,10 @@ | |||
| 10 | 10 | ||
| 11 | #include <uapi/linux/sunrpc/debug.h> | 11 | #include <uapi/linux/sunrpc/debug.h> |
| 12 | 12 | ||
| 13 | |||
| 14 | /* | ||
| 15 | * Enable RPC debugging/profiling. | ||
| 16 | */ | ||
| 17 | #ifdef CONFIG_SUNRPC_DEBUG | ||
| 18 | #define RPC_DEBUG | ||
| 19 | #endif | ||
| 20 | #ifdef CONFIG_TRACEPOINTS | ||
| 21 | #define RPC_TRACEPOINTS | ||
| 22 | #endif | ||
| 23 | /* #define RPC_PROFILE */ | ||
| 24 | |||
| 25 | /* | 13 | /* |
| 26 | * Debugging macros etc | 14 | * Debugging macros etc |
| 27 | */ | 15 | */ |
| 28 | #ifdef RPC_DEBUG | 16 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 29 | extern unsigned int rpc_debug; | 17 | extern unsigned int rpc_debug; |
| 30 | extern unsigned int nfs_debug; | 18 | extern unsigned int nfs_debug; |
| 31 | extern unsigned int nfsd_debug; | 19 | extern unsigned int nfsd_debug; |
| @@ -36,7 +24,7 @@ extern unsigned int nlm_debug; | |||
| 36 | #define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) | 24 | #define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) |
| 37 | 25 | ||
| 38 | #undef ifdebug | 26 | #undef ifdebug |
| 39 | #ifdef RPC_DEBUG | 27 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 40 | # define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) | 28 | # define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) |
| 41 | 29 | ||
| 42 | # define dfprintk(fac, args...) \ | 30 | # define dfprintk(fac, args...) \ |
| @@ -65,9 +53,55 @@ extern unsigned int nlm_debug; | |||
| 65 | /* | 53 | /* |
| 66 | * Sysctl interface for RPC debugging | 54 | * Sysctl interface for RPC debugging |
| 67 | */ | 55 | */ |
| 68 | #ifdef RPC_DEBUG | 56 | |
| 57 | struct rpc_clnt; | ||
| 58 | struct rpc_xprt; | ||
| 59 | |||
| 60 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
| 69 | void rpc_register_sysctl(void); | 61 | void rpc_register_sysctl(void); |
| 70 | void rpc_unregister_sysctl(void); | 62 | void rpc_unregister_sysctl(void); |
| 63 | int sunrpc_debugfs_init(void); | ||
| 64 | void sunrpc_debugfs_exit(void); | ||
| 65 | int rpc_clnt_debugfs_register(struct rpc_clnt *); | ||
| 66 | void rpc_clnt_debugfs_unregister(struct rpc_clnt *); | ||
| 67 | int rpc_xprt_debugfs_register(struct rpc_xprt *); | ||
| 68 | void rpc_xprt_debugfs_unregister(struct rpc_xprt *); | ||
| 69 | #else | ||
| 70 | static inline int | ||
| 71 | sunrpc_debugfs_init(void) | ||
| 72 | { | ||
| 73 | return 0; | ||
| 74 | } | ||
| 75 | |||
| 76 | static inline void | ||
| 77 | sunrpc_debugfs_exit(void) | ||
| 78 | { | ||
| 79 | return; | ||
| 80 | } | ||
| 81 | |||
| 82 | static inline int | ||
| 83 | rpc_clnt_debugfs_register(struct rpc_clnt *clnt) | ||
| 84 | { | ||
| 85 | return 0; | ||
| 86 | } | ||
| 87 | |||
| 88 | static inline void | ||
| 89 | rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt) | ||
| 90 | { | ||
| 91 | return; | ||
| 92 | } | ||
| 93 | |||
| 94 | static inline int | ||
| 95 | rpc_xprt_debugfs_register(struct rpc_xprt *xprt) | ||
| 96 | { | ||
| 97 | return 0; | ||
| 98 | } | ||
| 99 | |||
| 100 | static inline void | ||
| 101 | rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt) | ||
| 102 | { | ||
| 103 | return; | ||
| 104 | } | ||
| 71 | #endif | 105 | #endif |
| 72 | 106 | ||
| 73 | #endif /* _LINUX_SUNRPC_DEBUG_H_ */ | 107 | #endif /* _LINUX_SUNRPC_DEBUG_H_ */ |
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h index 1565bbe86d51..eecb5a71e6c0 100644 --- a/include/linux/sunrpc/metrics.h +++ b/include/linux/sunrpc/metrics.h | |||
| @@ -27,10 +27,13 @@ | |||
| 27 | 27 | ||
| 28 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
| 29 | #include <linux/ktime.h> | 29 | #include <linux/ktime.h> |
| 30 | #include <linux/spinlock.h> | ||
| 30 | 31 | ||
| 31 | #define RPC_IOSTATS_VERS "1.0" | 32 | #define RPC_IOSTATS_VERS "1.0" |
| 32 | 33 | ||
| 33 | struct rpc_iostats { | 34 | struct rpc_iostats { |
| 35 | spinlock_t om_lock; | ||
| 36 | |||
| 34 | /* | 37 | /* |
| 35 | * These counters give an idea about how many request | 38 | * These counters give an idea about how many request |
| 36 | * transmissions are required, on average, to complete that | 39 | * transmissions are required, on average, to complete that |
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 1a8959944c5f..5f1e6bd4c316 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
| @@ -79,7 +79,7 @@ struct rpc_task { | |||
| 79 | unsigned short tk_flags; /* misc flags */ | 79 | unsigned short tk_flags; /* misc flags */ |
| 80 | unsigned short tk_timeouts; /* maj timeouts */ | 80 | unsigned short tk_timeouts; /* maj timeouts */ |
| 81 | 81 | ||
| 82 | #if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) | 82 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) |
| 83 | unsigned short tk_pid; /* debugging aid */ | 83 | unsigned short tk_pid; /* debugging aid */ |
| 84 | #endif | 84 | #endif |
| 85 | unsigned char tk_priority : 2,/* Task priority */ | 85 | unsigned char tk_priority : 2,/* Task priority */ |
| @@ -187,7 +187,7 @@ struct rpc_wait_queue { | |||
| 187 | unsigned char nr; /* # tasks remaining for cookie */ | 187 | unsigned char nr; /* # tasks remaining for cookie */ |
| 188 | unsigned short qlen; /* total # tasks waiting in queue */ | 188 | unsigned short qlen; /* total # tasks waiting in queue */ |
| 189 | struct rpc_timer timer_list; | 189 | struct rpc_timer timer_list; |
| 190 | #if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) | 190 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) |
| 191 | const char * name; | 191 | const char * name; |
| 192 | #endif | 192 | #endif |
| 193 | }; | 193 | }; |
| @@ -237,7 +237,7 @@ void rpc_free(void *); | |||
| 237 | int rpciod_up(void); | 237 | int rpciod_up(void); |
| 238 | void rpciod_down(void); | 238 | void rpciod_down(void); |
| 239 | int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); | 239 | int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); |
| 240 | #ifdef RPC_DEBUG | 240 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 241 | struct net; | 241 | struct net; |
| 242 | void rpc_show_tasks(struct net *); | 242 | void rpc_show_tasks(struct net *); |
| 243 | #endif | 243 | #endif |
| @@ -251,7 +251,7 @@ static inline int rpc_wait_for_completion_task(struct rpc_task *task) | |||
| 251 | return __rpc_wait_for_completion_task(task, NULL); | 251 | return __rpc_wait_for_completion_task(task, NULL); |
| 252 | } | 252 | } |
| 253 | 253 | ||
| 254 | #if defined(RPC_DEBUG) || defined (RPC_TRACEPOINTS) | 254 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) |
| 255 | static inline const char * rpc_qname(const struct rpc_wait_queue *q) | 255 | static inline const char * rpc_qname(const struct rpc_wait_queue *q) |
| 256 | { | 256 | { |
| 257 | return ((q && q->name) ? q->name : "unknown"); | 257 | return ((q && q->name) ? q->name : "unknown"); |
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 21678464883a..6f22cfeef5e3 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h | |||
| @@ -26,10 +26,10 @@ typedef int (*svc_thread_fn)(void *); | |||
| 26 | 26 | ||
| 27 | /* statistics for svc_pool structures */ | 27 | /* statistics for svc_pool structures */ |
| 28 | struct svc_pool_stats { | 28 | struct svc_pool_stats { |
| 29 | unsigned long packets; | 29 | atomic_long_t packets; |
| 30 | unsigned long sockets_queued; | 30 | unsigned long sockets_queued; |
| 31 | unsigned long threads_woken; | 31 | atomic_long_t threads_woken; |
| 32 | unsigned long threads_timedout; | 32 | atomic_long_t threads_timedout; |
| 33 | }; | 33 | }; |
| 34 | 34 | ||
| 35 | /* | 35 | /* |
| @@ -45,12 +45,13 @@ struct svc_pool_stats { | |||
| 45 | struct svc_pool { | 45 | struct svc_pool { |
| 46 | unsigned int sp_id; /* pool id; also node id on NUMA */ | 46 | unsigned int sp_id; /* pool id; also node id on NUMA */ |
| 47 | spinlock_t sp_lock; /* protects all fields */ | 47 | spinlock_t sp_lock; /* protects all fields */ |
| 48 | struct list_head sp_threads; /* idle server threads */ | ||
| 49 | struct list_head sp_sockets; /* pending sockets */ | 48 | struct list_head sp_sockets; /* pending sockets */ |
| 50 | unsigned int sp_nrthreads; /* # of threads in pool */ | 49 | unsigned int sp_nrthreads; /* # of threads in pool */ |
| 51 | struct list_head sp_all_threads; /* all server threads */ | 50 | struct list_head sp_all_threads; /* all server threads */ |
| 52 | struct svc_pool_stats sp_stats; /* statistics on pool operation */ | 51 | struct svc_pool_stats sp_stats; /* statistics on pool operation */ |
| 53 | int sp_task_pending;/* has pending task */ | 52 | #define SP_TASK_PENDING (0) /* still work to do even if no |
| 53 | * xprt is queued. */ | ||
| 54 | unsigned long sp_flags; | ||
| 54 | } ____cacheline_aligned_in_smp; | 55 | } ____cacheline_aligned_in_smp; |
| 55 | 56 | ||
| 56 | /* | 57 | /* |
| @@ -219,8 +220,8 @@ static inline void svc_putu32(struct kvec *iov, __be32 val) | |||
| 219 | * processed. | 220 | * processed. |
| 220 | */ | 221 | */ |
| 221 | struct svc_rqst { | 222 | struct svc_rqst { |
| 222 | struct list_head rq_list; /* idle list */ | ||
| 223 | struct list_head rq_all; /* all threads list */ | 223 | struct list_head rq_all; /* all threads list */ |
| 224 | struct rcu_head rq_rcu_head; /* for RCU deferred kfree */ | ||
| 224 | struct svc_xprt * rq_xprt; /* transport ptr */ | 225 | struct svc_xprt * rq_xprt; /* transport ptr */ |
| 225 | 226 | ||
| 226 | struct sockaddr_storage rq_addr; /* peer address */ | 227 | struct sockaddr_storage rq_addr; /* peer address */ |
| @@ -236,7 +237,6 @@ struct svc_rqst { | |||
| 236 | struct svc_cred rq_cred; /* auth info */ | 237 | struct svc_cred rq_cred; /* auth info */ |
| 237 | void * rq_xprt_ctxt; /* transport specific context ptr */ | 238 | void * rq_xprt_ctxt; /* transport specific context ptr */ |
| 238 | struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ | 239 | struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ |
| 239 | bool rq_usedeferral; /* use deferral */ | ||
| 240 | 240 | ||
| 241 | size_t rq_xprt_hlen; /* xprt header len */ | 241 | size_t rq_xprt_hlen; /* xprt header len */ |
| 242 | struct xdr_buf rq_arg; | 242 | struct xdr_buf rq_arg; |
| @@ -253,9 +253,17 @@ struct svc_rqst { | |||
| 253 | u32 rq_vers; /* program version */ | 253 | u32 rq_vers; /* program version */ |
| 254 | u32 rq_proc; /* procedure number */ | 254 | u32 rq_proc; /* procedure number */ |
| 255 | u32 rq_prot; /* IP protocol */ | 255 | u32 rq_prot; /* IP protocol */ |
| 256 | unsigned short | 256 | int rq_cachetype; /* catering to nfsd */ |
| 257 | rq_secure : 1; /* secure port */ | 257 | #define RQ_SECURE (0) /* secure port */ |
| 258 | unsigned short rq_local : 1; /* local request */ | 258 | #define RQ_LOCAL (1) /* local request */ |
| 259 | #define RQ_USEDEFERRAL (2) /* use deferral */ | ||
| 260 | #define RQ_DROPME (3) /* drop current reply */ | ||
| 261 | #define RQ_SPLICE_OK (4) /* turned off in gss privacy | ||
| 262 | * to prevent encrypting page | ||
| 263 | * cache pages */ | ||
| 264 | #define RQ_VICTIM (5) /* about to be shut down */ | ||
| 265 | #define RQ_BUSY (6) /* request is busy */ | ||
| 266 | unsigned long rq_flags; /* flags field */ | ||
| 259 | 267 | ||
| 260 | void * rq_argp; /* decoded arguments */ | 268 | void * rq_argp; /* decoded arguments */ |
| 261 | void * rq_resp; /* xdr'd results */ | 269 | void * rq_resp; /* xdr'd results */ |
| @@ -271,16 +279,12 @@ struct svc_rqst { | |||
| 271 | struct cache_req rq_chandle; /* handle passed to caches for | 279 | struct cache_req rq_chandle; /* handle passed to caches for |
| 272 | * request delaying | 280 | * request delaying |
| 273 | */ | 281 | */ |
| 274 | bool rq_dropme; | ||
| 275 | /* Catering to nfsd */ | 282 | /* Catering to nfsd */ |
| 276 | struct auth_domain * rq_client; /* RPC peer info */ | 283 | struct auth_domain * rq_client; /* RPC peer info */ |
| 277 | struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ | 284 | struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ |
| 278 | int rq_cachetype; | ||
| 279 | struct svc_cacherep * rq_cacherep; /* cache info */ | 285 | struct svc_cacherep * rq_cacherep; /* cache info */ |
| 280 | bool rq_splice_ok; /* turned off in gss privacy | ||
| 281 | * to prevent encrypting page | ||
| 282 | * cache pages */ | ||
| 283 | struct task_struct *rq_task; /* service thread */ | 286 | struct task_struct *rq_task; /* service thread */ |
| 287 | spinlock_t rq_lock; /* per-request lock */ | ||
| 284 | }; | 288 | }; |
| 285 | 289 | ||
| 286 | #define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net) | 290 | #define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net) |
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index ce6e4182a5b2..79f6f8f3dc0a 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h | |||
| @@ -63,10 +63,9 @@ struct svc_xprt { | |||
| 63 | #define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */ | 63 | #define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */ |
| 64 | #define XPT_DEFERRED 8 /* deferred request pending */ | 64 | #define XPT_DEFERRED 8 /* deferred request pending */ |
| 65 | #define XPT_OLD 9 /* used for xprt aging mark+sweep */ | 65 | #define XPT_OLD 9 /* used for xprt aging mark+sweep */ |
| 66 | #define XPT_DETACHED 10 /* detached from tempsocks list */ | 66 | #define XPT_LISTENER 10 /* listening endpoint */ |
| 67 | #define XPT_LISTENER 11 /* listening endpoint */ | 67 | #define XPT_CACHE_AUTH 11 /* cache auth info */ |
| 68 | #define XPT_CACHE_AUTH 12 /* cache auth info */ | 68 | #define XPT_LOCAL 12 /* connection from loopback interface */ |
| 69 | #define XPT_LOCAL 13 /* connection from loopback interface */ | ||
| 70 | 69 | ||
| 71 | struct svc_serv *xpt_server; /* service for transport */ | 70 | struct svc_serv *xpt_server; /* service for transport */ |
| 72 | atomic_t xpt_reserved; /* space on outq that is rsvd */ | 71 | atomic_t xpt_reserved; /* space on outq that is rsvd */ |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index cf391eef2e6d..9d27ac45b909 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
| @@ -239,6 +239,9 @@ struct rpc_xprt { | |||
| 239 | struct net *xprt_net; | 239 | struct net *xprt_net; |
| 240 | const char *servername; | 240 | const char *servername; |
| 241 | const char *address_strings[RPC_DISPLAY_MAX]; | 241 | const char *address_strings[RPC_DISPLAY_MAX]; |
| 242 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
| 243 | struct dentry *debugfs; /* debugfs directory */ | ||
| 244 | #endif | ||
| 242 | }; | 245 | }; |
| 243 | 246 | ||
| 244 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) | 247 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 1ad36cc25b2e..7591788e9fbf 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h | |||
| @@ -17,6 +17,65 @@ void cleanup_socket_xprt(void); | |||
| 17 | #define RPC_DEF_MIN_RESVPORT (665U) | 17 | #define RPC_DEF_MIN_RESVPORT (665U) |
| 18 | #define RPC_DEF_MAX_RESVPORT (1023U) | 18 | #define RPC_DEF_MAX_RESVPORT (1023U) |
| 19 | 19 | ||
| 20 | struct sock_xprt { | ||
| 21 | struct rpc_xprt xprt; | ||
| 22 | |||
| 23 | /* | ||
| 24 | * Network layer | ||
| 25 | */ | ||
| 26 | struct socket * sock; | ||
| 27 | struct sock * inet; | ||
| 28 | |||
| 29 | /* | ||
| 30 | * State of TCP reply receive | ||
| 31 | */ | ||
| 32 | __be32 tcp_fraghdr, | ||
| 33 | tcp_xid, | ||
| 34 | tcp_calldir; | ||
| 35 | |||
| 36 | u32 tcp_offset, | ||
| 37 | tcp_reclen; | ||
| 38 | |||
| 39 | unsigned long tcp_copied, | ||
| 40 | tcp_flags; | ||
| 41 | |||
| 42 | /* | ||
| 43 | * Connection of transports | ||
| 44 | */ | ||
| 45 | struct delayed_work connect_worker; | ||
| 46 | struct sockaddr_storage srcaddr; | ||
| 47 | unsigned short srcport; | ||
| 48 | |||
| 49 | /* | ||
| 50 | * UDP socket buffer size parameters | ||
| 51 | */ | ||
| 52 | size_t rcvsize, | ||
| 53 | sndsize; | ||
| 54 | |||
| 55 | /* | ||
| 56 | * Saved socket callback addresses | ||
| 57 | */ | ||
| 58 | void (*old_data_ready)(struct sock *); | ||
| 59 | void (*old_state_change)(struct sock *); | ||
| 60 | void (*old_write_space)(struct sock *); | ||
| 61 | void (*old_error_report)(struct sock *); | ||
| 62 | }; | ||
| 63 | |||
| 64 | /* | ||
| 65 | * TCP receive state flags | ||
| 66 | */ | ||
| 67 | #define TCP_RCV_LAST_FRAG (1UL << 0) | ||
| 68 | #define TCP_RCV_COPY_FRAGHDR (1UL << 1) | ||
| 69 | #define TCP_RCV_COPY_XID (1UL << 2) | ||
| 70 | #define TCP_RCV_COPY_DATA (1UL << 3) | ||
| 71 | #define TCP_RCV_READ_CALLDIR (1UL << 4) | ||
| 72 | #define TCP_RCV_COPY_CALLDIR (1UL << 5) | ||
| 73 | |||
| 74 | /* | ||
| 75 | * TCP RPC flags | ||
| 76 | */ | ||
| 77 | #define TCP_RPC_REPLY (1UL << 6) | ||
| 78 | |||
| 20 | #endif /* __KERNEL__ */ | 79 | #endif /* __KERNEL__ */ |
| 21 | 80 | ||
| 22 | #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ | 81 | #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 37a585beef5c..34e8b60ab973 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -102,14 +102,6 @@ union swap_header { | |||
| 102 | } info; | 102 | } info; |
| 103 | }; | 103 | }; |
| 104 | 104 | ||
| 105 | /* A swap entry has to fit into a "unsigned long", as | ||
| 106 | * the entry is hidden in the "index" field of the | ||
| 107 | * swapper address space. | ||
| 108 | */ | ||
| 109 | typedef struct { | ||
| 110 | unsigned long val; | ||
| 111 | } swp_entry_t; | ||
| 112 | |||
| 113 | /* | 105 | /* |
| 114 | * current->reclaim_state points to one of these when a task is running | 106 | * current->reclaim_state points to one of these when a task is running |
| 115 | * memory reclaim | 107 | * memory reclaim |
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h new file mode 100644 index 000000000000..145306bdc92f --- /dev/null +++ b/include/linux/swap_cgroup.h | |||
| @@ -0,0 +1,42 @@ | |||
| 1 | #ifndef __LINUX_SWAP_CGROUP_H | ||
| 2 | #define __LINUX_SWAP_CGROUP_H | ||
| 3 | |||
| 4 | #include <linux/swap.h> | ||
| 5 | |||
| 6 | #ifdef CONFIG_MEMCG_SWAP | ||
| 7 | |||
| 8 | extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, | ||
| 9 | unsigned short old, unsigned short new); | ||
| 10 | extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); | ||
| 11 | extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); | ||
| 12 | extern int swap_cgroup_swapon(int type, unsigned long max_pages); | ||
| 13 | extern void swap_cgroup_swapoff(int type); | ||
| 14 | |||
| 15 | #else | ||
| 16 | |||
| 17 | static inline | ||
| 18 | unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) | ||
| 19 | { | ||
| 20 | return 0; | ||
| 21 | } | ||
| 22 | |||
| 23 | static inline | ||
| 24 | unsigned short lookup_swap_cgroup_id(swp_entry_t ent) | ||
| 25 | { | ||
| 26 | return 0; | ||
| 27 | } | ||
| 28 | |||
| 29 | static inline int | ||
| 30 | swap_cgroup_swapon(int type, unsigned long max_pages) | ||
| 31 | { | ||
| 32 | return 0; | ||
| 33 | } | ||
| 34 | |||
| 35 | static inline void swap_cgroup_swapoff(int type) | ||
| 36 | { | ||
| 37 | return; | ||
| 38 | } | ||
| 39 | |||
| 40 | #endif /* CONFIG_MEMCG_SWAP */ | ||
| 41 | |||
| 42 | #endif /* __LINUX_SWAP_CGROUP_H */ | ||
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index bda9b81357cc..85893d744901 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -25,7 +25,7 @@ struct linux_dirent64; | |||
| 25 | struct list_head; | 25 | struct list_head; |
| 26 | struct mmap_arg_struct; | 26 | struct mmap_arg_struct; |
| 27 | struct msgbuf; | 27 | struct msgbuf; |
| 28 | struct msghdr; | 28 | struct user_msghdr; |
| 29 | struct mmsghdr; | 29 | struct mmsghdr; |
| 30 | struct msqid_ds; | 30 | struct msqid_ds; |
| 31 | struct new_utsname; | 31 | struct new_utsname; |
| @@ -601,13 +601,13 @@ asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); | |||
| 601 | asmlinkage long sys_send(int, void __user *, size_t, unsigned); | 601 | asmlinkage long sys_send(int, void __user *, size_t, unsigned); |
| 602 | asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, | 602 | asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, |
| 603 | struct sockaddr __user *, int); | 603 | struct sockaddr __user *, int); |
| 604 | asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); | 604 | asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); |
| 605 | asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, | 605 | asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, |
| 606 | unsigned int vlen, unsigned flags); | 606 | unsigned int vlen, unsigned flags); |
| 607 | asmlinkage long sys_recv(int, void __user *, size_t, unsigned); | 607 | asmlinkage long sys_recv(int, void __user *, size_t, unsigned); |
| 608 | asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned, | 608 | asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned, |
| 609 | struct sockaddr __user *, int __user *); | 609 | struct sockaddr __user *, int __user *); |
| 610 | asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); | 610 | asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); |
| 611 | asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, | 611 | asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, |
| 612 | unsigned int vlen, unsigned flags, | 612 | unsigned int vlen, unsigned flags, |
| 613 | struct timespec __user *timeout); | 613 | struct timespec __user *timeout); |
| @@ -877,4 +877,9 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags, | |||
| 877 | asmlinkage long sys_getrandom(char __user *buf, size_t count, | 877 | asmlinkage long sys_getrandom(char __user *buf, size_t count, |
| 878 | unsigned int flags); | 878 | unsigned int flags); |
| 879 | asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); | 879 | asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); |
| 880 | |||
| 881 | asmlinkage long sys_execveat(int dfd, const char __user *filename, | ||
| 882 | const char __user *const __user *argv, | ||
| 883 | const char __user *const __user *envp, int flags); | ||
| 884 | |||
| 880 | #endif | 885 | #endif |
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index f97d0dbb59fa..ddad16148bd6 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h | |||
| @@ -70,6 +70,8 @@ struct attribute_group { | |||
| 70 | * for examples.. | 70 | * for examples.. |
| 71 | */ | 71 | */ |
| 72 | 72 | ||
| 73 | #define SYSFS_PREALLOC 010000 | ||
| 74 | |||
| 73 | #define __ATTR(_name, _mode, _show, _store) { \ | 75 | #define __ATTR(_name, _mode, _show, _store) { \ |
| 74 | .attr = {.name = __stringify(_name), \ | 76 | .attr = {.name = __stringify(_name), \ |
| 75 | .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ | 77 | .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ |
| @@ -77,6 +79,13 @@ struct attribute_group { | |||
| 77 | .store = _store, \ | 79 | .store = _store, \ |
| 78 | } | 80 | } |
| 79 | 81 | ||
| 82 | #define __ATTR_PREALLOC(_name, _mode, _show, _store) { \ | ||
| 83 | .attr = {.name = __stringify(_name), \ | ||
| 84 | .mode = SYSFS_PREALLOC | VERIFY_OCTAL_PERMISSIONS(_mode) },\ | ||
| 85 | .show = _show, \ | ||
| 86 | .store = _store, \ | ||
| 87 | } | ||
| 88 | |||
| 80 | #define __ATTR_RO(_name) { \ | 89 | #define __ATTR_RO(_name) { \ |
| 81 | .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ | 90 | .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ |
| 82 | .show = _name##_show, \ | 91 | .show = _name##_show, \ |
diff --git a/include/linux/syslog.h b/include/linux/syslog.h index 98a3153c0f96..4b7b875a7ce1 100644 --- a/include/linux/syslog.h +++ b/include/linux/syslog.h | |||
| @@ -49,4 +49,13 @@ | |||
| 49 | 49 | ||
| 50 | int do_syslog(int type, char __user *buf, int count, bool from_file); | 50 | int do_syslog(int type, char __user *buf, int count, bool from_file); |
| 51 | 51 | ||
| 52 | #ifdef CONFIG_PRINTK | ||
| 53 | int check_syslog_permissions(int type, bool from_file); | ||
| 54 | #else | ||
| 55 | static inline int check_syslog_permissions(int type, bool from_file) | ||
| 56 | { | ||
| 57 | return 0; | ||
| 58 | } | ||
| 59 | #endif | ||
| 60 | |||
| 52 | #endif /* _LINUX_SYSLOG_H */ | 61 | #endif /* _LINUX_SYSLOG_H */ |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index c2dee7deefa8..67309ece0772 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
| @@ -130,7 +130,7 @@ struct tcp_sock { | |||
| 130 | /* inet_connection_sock has to be the first member of tcp_sock */ | 130 | /* inet_connection_sock has to be the first member of tcp_sock */ |
| 131 | struct inet_connection_sock inet_conn; | 131 | struct inet_connection_sock inet_conn; |
| 132 | u16 tcp_header_len; /* Bytes of tcp header to send */ | 132 | u16 tcp_header_len; /* Bytes of tcp header to send */ |
| 133 | u16 xmit_size_goal_segs; /* Goal for segmenting output packets */ | 133 | u16 gso_segs; /* Max number of segs per GSO packet */ |
| 134 | 134 | ||
| 135 | /* | 135 | /* |
| 136 | * Header prediction flags | 136 | * Header prediction flags |
| @@ -162,7 +162,7 @@ struct tcp_sock { | |||
| 162 | struct { | 162 | struct { |
| 163 | struct sk_buff_head prequeue; | 163 | struct sk_buff_head prequeue; |
| 164 | struct task_struct *task; | 164 | struct task_struct *task; |
| 165 | struct iovec *iov; | 165 | struct msghdr *msg; |
| 166 | int memory; | 166 | int memory; |
| 167 | int len; | 167 | int len; |
| 168 | } ucopy; | 168 | } ucopy; |
| @@ -204,10 +204,10 @@ struct tcp_sock { | |||
| 204 | 204 | ||
| 205 | u16 urg_data; /* Saved octet of OOB data and control flags */ | 205 | u16 urg_data; /* Saved octet of OOB data and control flags */ |
| 206 | u8 ecn_flags; /* ECN status bits. */ | 206 | u8 ecn_flags; /* ECN status bits. */ |
| 207 | u8 reordering; /* Packet reordering metric. */ | 207 | u8 keepalive_probes; /* num of allowed keep alive probes */ |
| 208 | u32 reordering; /* Packet reordering metric. */ | ||
| 208 | u32 snd_up; /* Urgent pointer */ | 209 | u32 snd_up; /* Urgent pointer */ |
| 209 | 210 | ||
| 210 | u8 keepalive_probes; /* num of allowed keep alive probes */ | ||
| 211 | /* | 211 | /* |
| 212 | * Options received (usually on last packet, some only on SYN packets). | 212 | * Options received (usually on last packet, some only on SYN packets). |
| 213 | */ | 213 | */ |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index ef90838b36a0..c611a02fbc51 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
| @@ -29,10 +29,10 @@ | |||
| 29 | #include <linux/idr.h> | 29 | #include <linux/idr.h> |
| 30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
| 31 | #include <linux/workqueue.h> | 31 | #include <linux/workqueue.h> |
| 32 | #include <uapi/linux/thermal.h> | ||
| 32 | 33 | ||
| 33 | #define THERMAL_TRIPS_NONE -1 | 34 | #define THERMAL_TRIPS_NONE -1 |
| 34 | #define THERMAL_MAX_TRIPS 12 | 35 | #define THERMAL_MAX_TRIPS 12 |
| 35 | #define THERMAL_NAME_LENGTH 20 | ||
| 36 | 36 | ||
| 37 | /* invalid cooling state */ | 37 | /* invalid cooling state */ |
| 38 | #define THERMAL_CSTATE_INVALID -1UL | 38 | #define THERMAL_CSTATE_INVALID -1UL |
| @@ -49,11 +49,6 @@ | |||
| 49 | #define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off)) | 49 | #define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off)) |
| 50 | #define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732) | 50 | #define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732) |
| 51 | 51 | ||
| 52 | /* Adding event notification support elements */ | ||
| 53 | #define THERMAL_GENL_FAMILY_NAME "thermal_event" | ||
| 54 | #define THERMAL_GENL_VERSION 0x01 | ||
| 55 | #define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp" | ||
| 56 | |||
| 57 | /* Default Thermal Governor */ | 52 | /* Default Thermal Governor */ |
| 58 | #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) | 53 | #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) |
| 59 | #define DEFAULT_THERMAL_GOVERNOR "step_wise" | 54 | #define DEFAULT_THERMAL_GOVERNOR "step_wise" |
| @@ -86,30 +81,6 @@ enum thermal_trend { | |||
| 86 | THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */ | 81 | THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */ |
| 87 | }; | 82 | }; |
| 88 | 83 | ||
| 89 | /* Events supported by Thermal Netlink */ | ||
| 90 | enum events { | ||
| 91 | THERMAL_AUX0, | ||
| 92 | THERMAL_AUX1, | ||
| 93 | THERMAL_CRITICAL, | ||
| 94 | THERMAL_DEV_FAULT, | ||
| 95 | }; | ||
| 96 | |||
| 97 | /* attributes of thermal_genl_family */ | ||
| 98 | enum { | ||
| 99 | THERMAL_GENL_ATTR_UNSPEC, | ||
| 100 | THERMAL_GENL_ATTR_EVENT, | ||
| 101 | __THERMAL_GENL_ATTR_MAX, | ||
| 102 | }; | ||
| 103 | #define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1) | ||
| 104 | |||
| 105 | /* commands supported by the thermal_genl_family */ | ||
| 106 | enum { | ||
| 107 | THERMAL_GENL_CMD_UNSPEC, | ||
| 108 | THERMAL_GENL_CMD_EVENT, | ||
| 109 | __THERMAL_GENL_CMD_MAX, | ||
| 110 | }; | ||
| 111 | #define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1) | ||
| 112 | |||
| 113 | struct thermal_zone_device_ops { | 84 | struct thermal_zone_device_ops { |
| 114 | int (*bind) (struct thermal_zone_device *, | 85 | int (*bind) (struct thermal_zone_device *, |
| 115 | struct thermal_cooling_device *); | 86 | struct thermal_cooling_device *); |
| @@ -289,19 +260,49 @@ struct thermal_genl_event { | |||
| 289 | enum events event; | 260 | enum events event; |
| 290 | }; | 261 | }; |
| 291 | 262 | ||
| 263 | /** | ||
| 264 | * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones | ||
| 265 | * | ||
| 266 | * Mandatory: | ||
| 267 | * @get_temp: a pointer to a function that reads the sensor temperature. | ||
| 268 | * | ||
| 269 | * Optional: | ||
| 270 | * @get_trend: a pointer to a function that reads the sensor temperature trend. | ||
| 271 | * @set_emul_temp: a pointer to a function that sets sensor emulated | ||
| 272 | * temperature. | ||
| 273 | */ | ||
| 274 | struct thermal_zone_of_device_ops { | ||
| 275 | int (*get_temp)(void *, long *); | ||
| 276 | int (*get_trend)(void *, long *); | ||
| 277 | int (*set_emul_temp)(void *, unsigned long); | ||
| 278 | }; | ||
| 279 | |||
| 280 | /** | ||
| 281 | * struct thermal_trip - representation of a point in temperature domain | ||
| 282 | * @np: pointer to struct device_node that this trip point was created from | ||
| 283 | * @temperature: temperature value in miliCelsius | ||
| 284 | * @hysteresis: relative hysteresis in miliCelsius | ||
| 285 | * @type: trip point type | ||
| 286 | */ | ||
| 287 | |||
| 288 | struct thermal_trip { | ||
| 289 | struct device_node *np; | ||
| 290 | unsigned long int temperature; | ||
| 291 | unsigned long int hysteresis; | ||
| 292 | enum thermal_trip_type type; | ||
| 293 | }; | ||
| 294 | |||
| 292 | /* Function declarations */ | 295 | /* Function declarations */ |
| 293 | #ifdef CONFIG_THERMAL_OF | 296 | #ifdef CONFIG_THERMAL_OF |
| 294 | struct thermal_zone_device * | 297 | struct thermal_zone_device * |
| 295 | thermal_zone_of_sensor_register(struct device *dev, int id, | 298 | thermal_zone_of_sensor_register(struct device *dev, int id, void *data, |
| 296 | void *data, int (*get_temp)(void *, long *), | 299 | const struct thermal_zone_of_device_ops *ops); |
| 297 | int (*get_trend)(void *, long *)); | ||
| 298 | void thermal_zone_of_sensor_unregister(struct device *dev, | 300 | void thermal_zone_of_sensor_unregister(struct device *dev, |
| 299 | struct thermal_zone_device *tz); | 301 | struct thermal_zone_device *tz); |
| 300 | #else | 302 | #else |
| 301 | static inline struct thermal_zone_device * | 303 | static inline struct thermal_zone_device * |
| 302 | thermal_zone_of_sensor_register(struct device *dev, int id, | 304 | thermal_zone_of_sensor_register(struct device *dev, int id, void *data, |
| 303 | void *data, int (*get_temp)(void *, long *), | 305 | const struct thermal_zone_of_device_ops *ops) |
| 304 | int (*get_trend)(void *, long *)) | ||
| 305 | { | 306 | { |
| 306 | return NULL; | 307 | return NULL; |
| 307 | } | 308 | } |
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index ea6c9dea79e3..cfaf5a1d4bad 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | #ifndef _LINUX_TRACE_SEQ_H | 1 | #ifndef _LINUX_TRACE_SEQ_H |
| 2 | #define _LINUX_TRACE_SEQ_H | 2 | #define _LINUX_TRACE_SEQ_H |
| 3 | 3 | ||
| 4 | #include <linux/fs.h> | 4 | #include <linux/seq_buf.h> |
| 5 | 5 | ||
| 6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
| 7 | 7 | ||
| @@ -12,20 +12,36 @@ | |||
| 12 | 12 | ||
| 13 | struct trace_seq { | 13 | struct trace_seq { |
| 14 | unsigned char buffer[PAGE_SIZE]; | 14 | unsigned char buffer[PAGE_SIZE]; |
| 15 | unsigned int len; | 15 | struct seq_buf seq; |
| 16 | unsigned int readpos; | ||
| 17 | int full; | 16 | int full; |
| 18 | }; | 17 | }; |
| 19 | 18 | ||
| 20 | static inline void | 19 | static inline void |
| 21 | trace_seq_init(struct trace_seq *s) | 20 | trace_seq_init(struct trace_seq *s) |
| 22 | { | 21 | { |
| 23 | s->len = 0; | 22 | seq_buf_init(&s->seq, s->buffer, PAGE_SIZE); |
| 24 | s->readpos = 0; | ||
| 25 | s->full = 0; | 23 | s->full = 0; |
| 26 | } | 24 | } |
| 27 | 25 | ||
| 28 | /** | 26 | /** |
| 27 | * trace_seq_used - amount of actual data written to buffer | ||
| 28 | * @s: trace sequence descriptor | ||
| 29 | * | ||
| 30 | * Returns the amount of data written to the buffer. | ||
| 31 | * | ||
| 32 | * IMPORTANT! | ||
| 33 | * | ||
| 34 | * Use this instead of @s->seq.len if you need to pass the amount | ||
| 35 | * of data from the buffer to another buffer (userspace, or what not). | ||
| 36 | * The @s->seq.len on overflow is bigger than the buffer size and | ||
| 37 | * using it can cause access to undefined memory. | ||
| 38 | */ | ||
| 39 | static inline int trace_seq_used(struct trace_seq *s) | ||
| 40 | { | ||
| 41 | return seq_buf_used(&s->seq); | ||
| 42 | } | ||
| 43 | |||
| 44 | /** | ||
| 29 | * trace_seq_buffer_ptr - return pointer to next location in buffer | 45 | * trace_seq_buffer_ptr - return pointer to next location in buffer |
| 30 | * @s: trace sequence descriptor | 46 | * @s: trace sequence descriptor |
| 31 | * | 47 | * |
| @@ -37,7 +53,19 @@ trace_seq_init(struct trace_seq *s) | |||
| 37 | static inline unsigned char * | 53 | static inline unsigned char * |
| 38 | trace_seq_buffer_ptr(struct trace_seq *s) | 54 | trace_seq_buffer_ptr(struct trace_seq *s) |
| 39 | { | 55 | { |
| 40 | return s->buffer + s->len; | 56 | return s->buffer + seq_buf_used(&s->seq); |
| 57 | } | ||
| 58 | |||
| 59 | /** | ||
| 60 | * trace_seq_has_overflowed - return true if the trace_seq took too much | ||
| 61 | * @s: trace sequence descriptor | ||
| 62 | * | ||
| 63 | * Returns true if too much data was added to the trace_seq and it is | ||
| 64 | * now full and will not take anymore. | ||
| 65 | */ | ||
| 66 | static inline bool trace_seq_has_overflowed(struct trace_seq *s) | ||
| 67 | { | ||
| 68 | return s->full || seq_buf_has_overflowed(&s->seq); | ||
| 41 | } | 69 | } |
| 42 | 70 | ||
| 43 | /* | 71 | /* |
| @@ -45,40 +73,37 @@ trace_seq_buffer_ptr(struct trace_seq *s) | |||
| 45 | */ | 73 | */ |
| 46 | #ifdef CONFIG_TRACING | 74 | #ifdef CONFIG_TRACING |
| 47 | extern __printf(2, 3) | 75 | extern __printf(2, 3) |
| 48 | int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); | 76 | void trace_seq_printf(struct trace_seq *s, const char *fmt, ...); |
| 49 | extern __printf(2, 0) | 77 | extern __printf(2, 0) |
| 50 | int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); | 78 | void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); |
| 51 | extern int | 79 | extern void |
| 52 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); | 80 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); |
| 53 | extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); | 81 | extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); |
| 54 | extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | 82 | extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
| 55 | int cnt); | 83 | int cnt); |
| 56 | extern int trace_seq_puts(struct trace_seq *s, const char *str); | 84 | extern void trace_seq_puts(struct trace_seq *s, const char *str); |
| 57 | extern int trace_seq_putc(struct trace_seq *s, unsigned char c); | 85 | extern void trace_seq_putc(struct trace_seq *s, unsigned char c); |
| 58 | extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); | 86 | extern void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); |
| 59 | extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | 87 | extern void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, |
| 60 | unsigned int len); | 88 | unsigned int len); |
| 61 | extern int trace_seq_path(struct trace_seq *s, const struct path *path); | 89 | extern int trace_seq_path(struct trace_seq *s, const struct path *path); |
| 62 | 90 | ||
| 63 | extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, | 91 | extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, |
| 64 | int nmaskbits); | 92 | int nmaskbits); |
| 65 | 93 | ||
| 66 | #else /* CONFIG_TRACING */ | 94 | #else /* CONFIG_TRACING */ |
| 67 | static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | 95 | static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) |
| 68 | { | 96 | { |
| 69 | return 0; | ||
| 70 | } | 97 | } |
| 71 | static inline int | 98 | static inline void |
| 72 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) | 99 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) |
| 73 | { | 100 | { |
| 74 | return 0; | ||
| 75 | } | 101 | } |
| 76 | 102 | ||
| 77 | static inline int | 103 | static inline void |
| 78 | trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, | 104 | trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, |
| 79 | int nmaskbits) | 105 | int nmaskbits) |
| 80 | { | 106 | { |
| 81 | return 0; | ||
| 82 | } | 107 | } |
| 83 | 108 | ||
| 84 | static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) | 109 | static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) |
| @@ -90,23 +115,19 @@ static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | |||
| 90 | { | 115 | { |
| 91 | return 0; | 116 | return 0; |
| 92 | } | 117 | } |
| 93 | static inline int trace_seq_puts(struct trace_seq *s, const char *str) | 118 | static inline void trace_seq_puts(struct trace_seq *s, const char *str) |
| 94 | { | 119 | { |
| 95 | return 0; | ||
| 96 | } | 120 | } |
| 97 | static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) | 121 | static inline void trace_seq_putc(struct trace_seq *s, unsigned char c) |
| 98 | { | 122 | { |
| 99 | return 0; | ||
| 100 | } | 123 | } |
| 101 | static inline int | 124 | static inline void |
| 102 | trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) | 125 | trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) |
| 103 | { | 126 | { |
| 104 | return 0; | ||
| 105 | } | 127 | } |
| 106 | static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | 128 | static inline void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, |
| 107 | unsigned int len) | 129 | unsigned int len) |
| 108 | { | 130 | { |
| 109 | return 0; | ||
| 110 | } | 131 | } |
| 111 | static inline int trace_seq_path(struct trace_seq *s, const struct path *path) | 132 | static inline int trace_seq_path(struct trace_seq *s, const struct path *path) |
| 112 | { | 133 | { |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 5171ef8f7b85..7d66ae508e5c 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
| @@ -284,7 +284,7 @@ struct tty_struct { | |||
| 284 | 284 | ||
| 285 | #define N_TTY_BUF_SIZE 4096 | 285 | #define N_TTY_BUF_SIZE 4096 |
| 286 | 286 | ||
| 287 | unsigned char closing:1; | 287 | int closing; |
| 288 | unsigned char *write_buf; | 288 | unsigned char *write_buf; |
| 289 | int write_cnt; | 289 | int write_cnt; |
| 290 | /* If the tty has a pending do_SAK, queue it here - akpm */ | 290 | /* If the tty has a pending do_SAK, queue it here - akpm */ |
| @@ -316,12 +316,10 @@ struct tty_file_private { | |||
| 316 | #define TTY_EXCLUSIVE 3 /* Exclusive open mode */ | 316 | #define TTY_EXCLUSIVE 3 /* Exclusive open mode */ |
| 317 | #define TTY_DEBUG 4 /* Debugging */ | 317 | #define TTY_DEBUG 4 /* Debugging */ |
| 318 | #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ | 318 | #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ |
| 319 | #define TTY_CLOSING 7 /* ->close() in progress */ | ||
| 320 | #define TTY_LDISC_OPEN 11 /* Line discipline is open */ | 319 | #define TTY_LDISC_OPEN 11 /* Line discipline is open */ |
| 321 | #define TTY_PTY_LOCK 16 /* pty private */ | 320 | #define TTY_PTY_LOCK 16 /* pty private */ |
| 322 | #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ | 321 | #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ |
| 323 | #define TTY_HUPPED 18 /* Post driver->hangup() */ | 322 | #define TTY_HUPPED 18 /* Post driver->hangup() */ |
| 324 | #define TTY_HUPPING 21 /* ->hangup() in progress */ | ||
| 325 | #define TTY_LDISC_HALTED 22 /* Line discipline is halted */ | 323 | #define TTY_LDISC_HALTED 22 /* Line discipline is halted */ |
| 326 | 324 | ||
| 327 | #define TTY_WRITE_FLUSH(tty) tty_write_flush((tty)) | 325 | #define TTY_WRITE_FLUSH(tty) tty_write_flush((tty)) |
| @@ -437,14 +435,13 @@ extern int is_ignored(int sig); | |||
| 437 | extern int tty_signal(int sig, struct tty_struct *tty); | 435 | extern int tty_signal(int sig, struct tty_struct *tty); |
| 438 | extern void tty_hangup(struct tty_struct *tty); | 436 | extern void tty_hangup(struct tty_struct *tty); |
| 439 | extern void tty_vhangup(struct tty_struct *tty); | 437 | extern void tty_vhangup(struct tty_struct *tty); |
| 440 | extern void tty_unhangup(struct file *filp); | ||
| 441 | extern int tty_hung_up_p(struct file *filp); | 438 | extern int tty_hung_up_p(struct file *filp); |
| 442 | extern void do_SAK(struct tty_struct *tty); | 439 | extern void do_SAK(struct tty_struct *tty); |
| 443 | extern void __do_SAK(struct tty_struct *tty); | 440 | extern void __do_SAK(struct tty_struct *tty); |
| 444 | extern void no_tty(void); | 441 | extern void no_tty(void); |
| 445 | extern void tty_flush_to_ldisc(struct tty_struct *tty); | 442 | extern void tty_flush_to_ldisc(struct tty_struct *tty); |
| 446 | extern void tty_buffer_free_all(struct tty_port *port); | 443 | extern void tty_buffer_free_all(struct tty_port *port); |
| 447 | extern void tty_buffer_flush(struct tty_struct *tty); | 444 | extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld); |
| 448 | extern void tty_buffer_init(struct tty_port *port); | 445 | extern void tty_buffer_init(struct tty_port *port); |
| 449 | extern speed_t tty_termios_baud_rate(struct ktermios *termios); | 446 | extern speed_t tty_termios_baud_rate(struct ktermios *termios); |
| 450 | extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); | 447 | extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); |
| @@ -498,9 +495,6 @@ extern int tty_init_termios(struct tty_struct *tty); | |||
| 498 | extern int tty_standard_install(struct tty_driver *driver, | 495 | extern int tty_standard_install(struct tty_driver *driver, |
| 499 | struct tty_struct *tty); | 496 | struct tty_struct *tty); |
| 500 | 497 | ||
| 501 | extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty); | ||
| 502 | extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); | ||
| 503 | |||
| 504 | extern struct mutex tty_mutex; | 498 | extern struct mutex tty_mutex; |
| 505 | extern spinlock_t tty_files_lock; | 499 | extern spinlock_t tty_files_lock; |
| 506 | 500 | ||
| @@ -562,7 +556,7 @@ extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); | |||
| 562 | extern int tty_unregister_ldisc(int disc); | 556 | extern int tty_unregister_ldisc(int disc); |
| 563 | extern int tty_set_ldisc(struct tty_struct *tty, int ldisc); | 557 | extern int tty_set_ldisc(struct tty_struct *tty, int ldisc); |
| 564 | extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); | 558 | extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); |
| 565 | extern void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty); | 559 | extern void tty_ldisc_release(struct tty_struct *tty); |
| 566 | extern void tty_ldisc_init(struct tty_struct *tty); | 560 | extern void tty_ldisc_init(struct tty_struct *tty); |
| 567 | extern void tty_ldisc_deinit(struct tty_struct *tty); | 561 | extern void tty_ldisc_deinit(struct tty_struct *tty); |
| 568 | extern void tty_ldisc_begin(void); | 562 | extern void tty_ldisc_begin(void); |
| @@ -623,14 +617,6 @@ extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, | |||
| 623 | extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file, | 617 | extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file, |
| 624 | unsigned int cmd, unsigned long arg); | 618 | unsigned int cmd, unsigned long arg); |
| 625 | 619 | ||
| 626 | /* serial.c */ | ||
| 627 | |||
| 628 | extern void serial_console_init(void); | ||
| 629 | |||
| 630 | /* pcxx.c */ | ||
| 631 | |||
| 632 | extern int pcxe_open(struct tty_struct *tty, struct file *filp); | ||
| 633 | |||
| 634 | /* vt.c */ | 620 | /* vt.c */ |
| 635 | 621 | ||
| 636 | extern int vt_ioctl(struct tty_struct *tty, | 622 | extern int vt_ioctl(struct tty_struct *tty, |
| @@ -643,11 +629,9 @@ extern long vt_compat_ioctl(struct tty_struct *tty, | |||
| 643 | /* functions for preparation of BKL removal */ | 629 | /* functions for preparation of BKL removal */ |
| 644 | extern void __lockfunc tty_lock(struct tty_struct *tty); | 630 | extern void __lockfunc tty_lock(struct tty_struct *tty); |
| 645 | extern void __lockfunc tty_unlock(struct tty_struct *tty); | 631 | extern void __lockfunc tty_unlock(struct tty_struct *tty); |
| 646 | extern void __lockfunc tty_lock_pair(struct tty_struct *tty, | 632 | extern void __lockfunc tty_lock_slave(struct tty_struct *tty); |
| 647 | struct tty_struct *tty2); | 633 | extern void __lockfunc tty_unlock_slave(struct tty_struct *tty); |
| 648 | extern void __lockfunc tty_unlock_pair(struct tty_struct *tty, | 634 | extern void tty_set_lock_subclass(struct tty_struct *tty); |
| 649 | struct tty_struct *tty2); | ||
| 650 | |||
| 651 | /* | 635 | /* |
| 652 | * this shall be called only from where BTM is held (like close) | 636 | * this shall be called only from where BTM is held (like close) |
| 653 | * | 637 | * |
diff --git a/include/linux/uio.h b/include/linux/uio.h index 9b1581414cd4..1c5e453f7ea9 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
| @@ -31,6 +31,7 @@ struct iov_iter { | |||
| 31 | size_t count; | 31 | size_t count; |
| 32 | union { | 32 | union { |
| 33 | const struct iovec *iov; | 33 | const struct iovec *iov; |
| 34 | const struct kvec *kvec; | ||
| 34 | const struct bio_vec *bvec; | 35 | const struct bio_vec *bvec; |
| 35 | }; | 36 | }; |
| 36 | unsigned long nr_segs; | 37 | unsigned long nr_segs; |
| @@ -82,10 +83,13 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | |||
| 82 | struct iov_iter *i); | 83 | struct iov_iter *i); |
| 83 | size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i); | 84 | size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i); |
| 84 | size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); | 85 | size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); |
| 86 | size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); | ||
| 85 | size_t iov_iter_zero(size_t bytes, struct iov_iter *); | 87 | size_t iov_iter_zero(size_t bytes, struct iov_iter *); |
| 86 | unsigned long iov_iter_alignment(const struct iov_iter *i); | 88 | unsigned long iov_iter_alignment(const struct iov_iter *i); |
| 87 | void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, | 89 | void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, |
| 88 | unsigned long nr_segs, size_t count); | 90 | unsigned long nr_segs, size_t count); |
| 91 | void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *iov, | ||
| 92 | unsigned long nr_segs, size_t count); | ||
| 89 | ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, | 93 | ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, |
| 90 | size_t maxsize, unsigned maxpages, size_t *start); | 94 | size_t maxsize, unsigned maxpages, size_t *start); |
| 91 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, | 95 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, |
| @@ -97,6 +101,11 @@ static inline size_t iov_iter_count(struct iov_iter *i) | |||
| 97 | return i->count; | 101 | return i->count; |
| 98 | } | 102 | } |
| 99 | 103 | ||
| 104 | static inline bool iter_is_iovec(struct iov_iter *i) | ||
| 105 | { | ||
| 106 | return !(i->type & (ITER_BVEC | ITER_KVEC)); | ||
| 107 | } | ||
| 108 | |||
| 100 | /* | 109 | /* |
| 101 | * Cap the iov_iter by given limit; note that the second argument is | 110 | * Cap the iov_iter by given limit; note that the second argument is |
| 102 | * *not* the new size - it's upper limit for such. Passing it a value | 111 | * *not* the new size - it's upper limit for such. Passing it a value |
| @@ -123,9 +132,10 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) | |||
| 123 | { | 132 | { |
| 124 | i->count = count; | 133 | i->count = count; |
| 125 | } | 134 | } |
| 135 | size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); | ||
| 136 | size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); | ||
| 126 | 137 | ||
| 127 | int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); | 138 | int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); |
| 128 | int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); | ||
| 129 | int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, | 139 | int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, |
| 130 | int offset, int len); | 140 | int offset, int len); |
| 131 | int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, | 141 | int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, |
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index baa81718d985..32c0e83d6239 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h | |||
| @@ -35,7 +35,7 @@ struct uio_map; | |||
| 35 | struct uio_mem { | 35 | struct uio_mem { |
| 36 | const char *name; | 36 | const char *name; |
| 37 | phys_addr_t addr; | 37 | phys_addr_t addr; |
| 38 | unsigned long size; | 38 | resource_size_t size; |
| 39 | int memtype; | 39 | int memtype; |
| 40 | void __iomem *internal_addr; | 40 | void __iomem *internal_addr; |
| 41 | struct uio_map *map; | 41 | struct uio_map *map; |
diff --git a/include/linux/usb.h b/include/linux/usb.h index 447a7e2fc19b..f89c24a03bd9 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
| @@ -637,7 +637,7 @@ static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index) | |||
| 637 | #endif | 637 | #endif |
| 638 | 638 | ||
| 639 | /* USB autosuspend and autoresume */ | 639 | /* USB autosuspend and autoresume */ |
| 640 | #ifdef CONFIG_PM_RUNTIME | 640 | #ifdef CONFIG_PM |
| 641 | extern void usb_enable_autosuspend(struct usb_device *udev); | 641 | extern void usb_enable_autosuspend(struct usb_device *udev); |
| 642 | extern void usb_disable_autosuspend(struct usb_device *udev); | 642 | extern void usb_disable_autosuspend(struct usb_device *udev); |
| 643 | 643 | ||
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index e14c09a45c5a..535997a6681b 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h | |||
| @@ -13,11 +13,12 @@ struct ci_hdrc_platform_data { | |||
| 13 | /* offset of the capability registers */ | 13 | /* offset of the capability registers */ |
| 14 | uintptr_t capoffset; | 14 | uintptr_t capoffset; |
| 15 | unsigned power_budget; | 15 | unsigned power_budget; |
| 16 | struct usb_phy *phy; | 16 | struct phy *phy; |
| 17 | /* old usb_phy interface */ | ||
| 18 | struct usb_phy *usb_phy; | ||
| 17 | enum usb_phy_interface phy_mode; | 19 | enum usb_phy_interface phy_mode; |
| 18 | unsigned long flags; | 20 | unsigned long flags; |
| 19 | #define CI_HDRC_REGS_SHARED BIT(0) | 21 | #define CI_HDRC_REGS_SHARED BIT(0) |
| 20 | #define CI_HDRC_REQUIRE_TRANSCEIVER BIT(1) | ||
| 21 | #define CI_HDRC_DISABLE_STREAMING BIT(3) | 22 | #define CI_HDRC_DISABLE_STREAMING BIT(3) |
| 22 | /* | 23 | /* |
| 23 | * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1, | 24 | * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1, |
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index c330f5ef42cf..3d87defcc527 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h | |||
| @@ -427,6 +427,8 @@ static inline struct usb_composite_driver *to_cdriver( | |||
| 427 | * @b_vendor_code: bMS_VendorCode part of the OS string | 427 | * @b_vendor_code: bMS_VendorCode part of the OS string |
| 428 | * @use_os_string: false by default, interested gadgets set it | 428 | * @use_os_string: false by default, interested gadgets set it |
| 429 | * @os_desc_config: the configuration to be used with OS descriptors | 429 | * @os_desc_config: the configuration to be used with OS descriptors |
| 430 | * @setup_pending: true when setup request is queued but not completed | ||
| 431 | * @os_desc_pending: true when os_desc request is queued but not completed | ||
| 430 | * | 432 | * |
| 431 | * One of these devices is allocated and initialized before the | 433 | * One of these devices is allocated and initialized before the |
| 432 | * associated device driver's bind() is called. | 434 | * associated device driver's bind() is called. |
| @@ -488,6 +490,9 @@ struct usb_composite_dev { | |||
| 488 | 490 | ||
| 489 | /* protects deactivations and delayed_status counts*/ | 491 | /* protects deactivations and delayed_status counts*/ |
| 490 | spinlock_t lock; | 492 | spinlock_t lock; |
| 493 | |||
| 494 | unsigned setup_pending:1; | ||
| 495 | unsigned os_desc_pending:1; | ||
| 491 | }; | 496 | }; |
| 492 | 497 | ||
| 493 | extern int usb_string_id(struct usb_composite_dev *c); | 498 | extern int usb_string_id(struct usb_composite_dev *c); |
| @@ -501,6 +506,8 @@ extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n); | |||
| 501 | extern void composite_disconnect(struct usb_gadget *gadget); | 506 | extern void composite_disconnect(struct usb_gadget *gadget); |
| 502 | extern int composite_setup(struct usb_gadget *gadget, | 507 | extern int composite_setup(struct usb_gadget *gadget, |
| 503 | const struct usb_ctrlrequest *ctrl); | 508 | const struct usb_ctrlrequest *ctrl); |
| 509 | extern void composite_suspend(struct usb_gadget *gadget); | ||
| 510 | extern void composite_resume(struct usb_gadget *gadget); | ||
| 504 | 511 | ||
| 505 | /* | 512 | /* |
| 506 | * Some systems will need runtime overrides for the product identifiers | 513 | * Some systems will need runtime overrides for the product identifiers |
diff --git a/include/linux/usb/ehci-dbgp.h b/include/linux/usb/ehci-dbgp.h new file mode 100644 index 000000000000..7344d9e591cc --- /dev/null +++ b/include/linux/usb/ehci-dbgp.h | |||
| @@ -0,0 +1,83 @@ | |||
| 1 | /* | ||
| 2 | * Standalone EHCI usb debug driver | ||
| 3 | * | ||
| 4 | * Originally written by: | ||
| 5 | * Eric W. Biederman" <ebiederm@xmission.com> and | ||
| 6 | * Yinghai Lu <yhlu.kernel@gmail.com> | ||
| 7 | * | ||
| 8 | * Changes for early/late printk and HW errata: | ||
| 9 | * Jason Wessel <jason.wessel@windriver.com> | ||
| 10 | * Copyright (C) 2009 Wind River Systems, Inc. | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | |||
| 14 | #ifndef __LINUX_USB_EHCI_DBGP_H | ||
| 15 | #define __LINUX_USB_EHCI_DBGP_H | ||
| 16 | |||
| 17 | #include <linux/console.h> | ||
| 18 | #include <linux/types.h> | ||
| 19 | |||
| 20 | /* Appendix C, Debug port ... intended for use with special "debug devices" | ||
| 21 | * that can help if there's no serial console. (nonstandard enumeration.) | ||
| 22 | */ | ||
| 23 | struct ehci_dbg_port { | ||
| 24 | u32 control; | ||
| 25 | #define DBGP_OWNER (1<<30) | ||
| 26 | #define DBGP_ENABLED (1<<28) | ||
| 27 | #define DBGP_DONE (1<<16) | ||
| 28 | #define DBGP_INUSE (1<<10) | ||
| 29 | #define DBGP_ERRCODE(x) (((x)>>7)&0x07) | ||
| 30 | # define DBGP_ERR_BAD 1 | ||
| 31 | # define DBGP_ERR_SIGNAL 2 | ||
| 32 | #define DBGP_ERROR (1<<6) | ||
| 33 | #define DBGP_GO (1<<5) | ||
| 34 | #define DBGP_OUT (1<<4) | ||
| 35 | #define DBGP_LEN(x) (((x)>>0)&0x0f) | ||
| 36 | u32 pids; | ||
| 37 | #define DBGP_PID_GET(x) (((x)>>16)&0xff) | ||
| 38 | #define DBGP_PID_SET(data, tok) (((data)<<8)|(tok)) | ||
| 39 | u32 data03; | ||
| 40 | u32 data47; | ||
| 41 | u32 address; | ||
| 42 | #define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep)) | ||
| 43 | }; | ||
| 44 | |||
| 45 | #ifdef CONFIG_EARLY_PRINTK_DBGP | ||
| 46 | extern int early_dbgp_init(char *s); | ||
| 47 | extern struct console early_dbgp_console; | ||
| 48 | #endif /* CONFIG_EARLY_PRINTK_DBGP */ | ||
| 49 | |||
| 50 | struct usb_hcd; | ||
| 51 | |||
| 52 | #ifdef CONFIG_XEN_DOM0 | ||
| 53 | extern int xen_dbgp_reset_prep(struct usb_hcd *); | ||
| 54 | extern int xen_dbgp_external_startup(struct usb_hcd *); | ||
| 55 | #else | ||
| 56 | static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd) | ||
| 57 | { | ||
| 58 | return 1; /* Shouldn't this be 0? */ | ||
| 59 | } | ||
| 60 | |||
| 61 | static inline int xen_dbgp_external_startup(struct usb_hcd *hcd) | ||
| 62 | { | ||
| 63 | return -1; | ||
| 64 | } | ||
| 65 | #endif | ||
| 66 | |||
| 67 | #ifdef CONFIG_EARLY_PRINTK_DBGP | ||
| 68 | /* Call backs from ehci host driver to ehci debug driver */ | ||
| 69 | extern int dbgp_external_startup(struct usb_hcd *); | ||
| 70 | extern int dbgp_reset_prep(struct usb_hcd *); | ||
| 71 | #else | ||
| 72 | static inline int dbgp_reset_prep(struct usb_hcd *hcd) | ||
| 73 | { | ||
| 74 | return xen_dbgp_reset_prep(hcd); | ||
| 75 | } | ||
| 76 | |||
| 77 | static inline int dbgp_external_startup(struct usb_hcd *hcd) | ||
| 78 | { | ||
| 79 | return xen_dbgp_external_startup(hcd); | ||
| 80 | } | ||
| 81 | #endif | ||
| 82 | |||
| 83 | #endif /* __LINUX_USB_EHCI_DBGP_H */ | ||
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index daec99af5d54..966889a20ea3 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h | |||
| @@ -19,6 +19,8 @@ | |||
| 19 | #ifndef __LINUX_USB_EHCI_DEF_H | 19 | #ifndef __LINUX_USB_EHCI_DEF_H |
| 20 | #define __LINUX_USB_EHCI_DEF_H | 20 | #define __LINUX_USB_EHCI_DEF_H |
| 21 | 21 | ||
| 22 | #include <linux/usb/ehci-dbgp.h> | ||
| 23 | |||
| 22 | /* EHCI register interface, corresponds to EHCI Revision 0.95 specification */ | 24 | /* EHCI register interface, corresponds to EHCI Revision 0.95 specification */ |
| 23 | 25 | ||
| 24 | /* Section 2.2 Host Controller Capability Registers */ | 26 | /* Section 2.2 Host Controller Capability Registers */ |
| @@ -190,67 +192,4 @@ struct ehci_regs { | |||
| 190 | #define USBMODE_EX_HC (3<<0) /* host controller mode */ | 192 | #define USBMODE_EX_HC (3<<0) /* host controller mode */ |
| 191 | }; | 193 | }; |
| 192 | 194 | ||
| 193 | /* Appendix C, Debug port ... intended for use with special "debug devices" | ||
| 194 | * that can help if there's no serial console. (nonstandard enumeration.) | ||
| 195 | */ | ||
| 196 | struct ehci_dbg_port { | ||
| 197 | u32 control; | ||
| 198 | #define DBGP_OWNER (1<<30) | ||
| 199 | #define DBGP_ENABLED (1<<28) | ||
| 200 | #define DBGP_DONE (1<<16) | ||
| 201 | #define DBGP_INUSE (1<<10) | ||
| 202 | #define DBGP_ERRCODE(x) (((x)>>7)&0x07) | ||
| 203 | # define DBGP_ERR_BAD 1 | ||
| 204 | # define DBGP_ERR_SIGNAL 2 | ||
| 205 | #define DBGP_ERROR (1<<6) | ||
| 206 | #define DBGP_GO (1<<5) | ||
| 207 | #define DBGP_OUT (1<<4) | ||
| 208 | #define DBGP_LEN(x) (((x)>>0)&0x0f) | ||
| 209 | u32 pids; | ||
| 210 | #define DBGP_PID_GET(x) (((x)>>16)&0xff) | ||
| 211 | #define DBGP_PID_SET(data, tok) (((data)<<8)|(tok)) | ||
| 212 | u32 data03; | ||
| 213 | u32 data47; | ||
| 214 | u32 address; | ||
| 215 | #define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep)) | ||
| 216 | }; | ||
| 217 | |||
| 218 | #ifdef CONFIG_EARLY_PRINTK_DBGP | ||
| 219 | #include <linux/init.h> | ||
| 220 | extern int __init early_dbgp_init(char *s); | ||
| 221 | extern struct console early_dbgp_console; | ||
| 222 | #endif /* CONFIG_EARLY_PRINTK_DBGP */ | ||
| 223 | |||
| 224 | struct usb_hcd; | ||
| 225 | |||
| 226 | #ifdef CONFIG_XEN_DOM0 | ||
| 227 | extern int xen_dbgp_reset_prep(struct usb_hcd *); | ||
| 228 | extern int xen_dbgp_external_startup(struct usb_hcd *); | ||
| 229 | #else | ||
| 230 | static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd) | ||
| 231 | { | ||
| 232 | return 1; /* Shouldn't this be 0? */ | ||
| 233 | } | ||
| 234 | |||
| 235 | static inline int xen_dbgp_external_startup(struct usb_hcd *hcd) | ||
| 236 | { | ||
| 237 | return -1; | ||
| 238 | } | ||
| 239 | #endif | ||
| 240 | |||
| 241 | #ifdef CONFIG_EARLY_PRINTK_DBGP | ||
| 242 | /* Call backs from ehci host driver to ehci debug driver */ | ||
| 243 | extern int dbgp_external_startup(struct usb_hcd *); | ||
| 244 | extern int dbgp_reset_prep(struct usb_hcd *hcd); | ||
| 245 | #else | ||
| 246 | static inline int dbgp_reset_prep(struct usb_hcd *hcd) | ||
| 247 | { | ||
| 248 | return xen_dbgp_reset_prep(hcd); | ||
| 249 | } | ||
| 250 | static inline int dbgp_external_startup(struct usb_hcd *hcd) | ||
| 251 | { | ||
| 252 | return xen_dbgp_external_startup(hcd); | ||
| 253 | } | ||
| 254 | #endif | ||
| 255 | |||
| 256 | #endif /* __LINUX_USB_EHCI_DEF_H */ | 195 | #endif /* __LINUX_USB_EHCI_DEF_H */ |
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 522cafe26790..70ddb3943b62 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h | |||
| @@ -490,8 +490,7 @@ struct usb_gadget_ops { | |||
| 490 | void (*get_config_params)(struct usb_dcd_config_params *); | 490 | void (*get_config_params)(struct usb_dcd_config_params *); |
| 491 | int (*udc_start)(struct usb_gadget *, | 491 | int (*udc_start)(struct usb_gadget *, |
| 492 | struct usb_gadget_driver *); | 492 | struct usb_gadget_driver *); |
| 493 | int (*udc_stop)(struct usb_gadget *, | 493 | int (*udc_stop)(struct usb_gadget *); |
| 494 | struct usb_gadget_driver *); | ||
| 495 | }; | 494 | }; |
| 496 | 495 | ||
| 497 | /** | 496 | /** |
| @@ -925,7 +924,7 @@ extern int usb_add_gadget_udc_release(struct device *parent, | |||
| 925 | struct usb_gadget *gadget, void (*release)(struct device *dev)); | 924 | struct usb_gadget *gadget, void (*release)(struct device *dev)); |
| 926 | extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); | 925 | extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); |
| 927 | extern void usb_del_gadget_udc(struct usb_gadget *gadget); | 926 | extern void usb_del_gadget_udc(struct usb_gadget *gadget); |
| 928 | extern int udc_attach_driver(const char *name, | 927 | extern int usb_udc_attach_driver(const char *name, |
| 929 | struct usb_gadget_driver *driver); | 928 | struct usb_gadget_driver *driver); |
| 930 | 929 | ||
| 931 | /*-------------------------------------------------------------------------*/ | 930 | /*-------------------------------------------------------------------------*/ |
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index cd96a2bc3388..086bf13307e6 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h | |||
| @@ -93,7 +93,7 @@ struct usb_hcd { | |||
| 93 | 93 | ||
| 94 | struct timer_list rh_timer; /* drives root-hub polling */ | 94 | struct timer_list rh_timer; /* drives root-hub polling */ |
| 95 | struct urb *status_urb; /* the current status urb */ | 95 | struct urb *status_urb; /* the current status urb */ |
| 96 | #ifdef CONFIG_PM_RUNTIME | 96 | #ifdef CONFIG_PM |
| 97 | struct work_struct wakeup_work; /* for remote wakeup */ | 97 | struct work_struct wakeup_work; /* for remote wakeup */ |
| 98 | #endif | 98 | #endif |
| 99 | 99 | ||
| @@ -379,6 +379,9 @@ struct hc_driver { | |||
| 379 | int (*disable_usb3_lpm_timeout)(struct usb_hcd *, | 379 | int (*disable_usb3_lpm_timeout)(struct usb_hcd *, |
| 380 | struct usb_device *, enum usb3_link_state state); | 380 | struct usb_device *, enum usb3_link_state state); |
| 381 | int (*find_raw_port_number)(struct usb_hcd *, int); | 381 | int (*find_raw_port_number)(struct usb_hcd *, int); |
| 382 | /* Call for power on/off the port if necessary */ | ||
| 383 | int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable); | ||
| 384 | |||
| 382 | }; | 385 | }; |
| 383 | 386 | ||
| 384 | static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd) | 387 | static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd) |
| @@ -625,16 +628,13 @@ extern int usb_find_interface_driver(struct usb_device *dev, | |||
| 625 | extern void usb_root_hub_lost_power(struct usb_device *rhdev); | 628 | extern void usb_root_hub_lost_power(struct usb_device *rhdev); |
| 626 | extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); | 629 | extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); |
| 627 | extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); | 630 | extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); |
| 628 | #endif /* CONFIG_PM */ | ||
| 629 | |||
| 630 | #ifdef CONFIG_PM_RUNTIME | ||
| 631 | extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); | 631 | extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); |
| 632 | #else | 632 | #else |
| 633 | static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) | 633 | static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) |
| 634 | { | 634 | { |
| 635 | return; | 635 | return; |
| 636 | } | 636 | } |
| 637 | #endif /* CONFIG_PM_RUNTIME */ | 637 | #endif /* CONFIG_PM */ |
| 638 | 638 | ||
| 639 | /*-------------------------------------------------------------------------*/ | 639 | /*-------------------------------------------------------------------------*/ |
| 640 | 640 | ||
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index 154332b7c8c0..52661c5da690 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h | |||
| @@ -9,15 +9,20 @@ | |||
| 9 | #ifndef __LINUX_USB_OTG_H | 9 | #ifndef __LINUX_USB_OTG_H |
| 10 | #define __LINUX_USB_OTG_H | 10 | #define __LINUX_USB_OTG_H |
| 11 | 11 | ||
| 12 | #include <linux/phy/phy.h> | ||
| 12 | #include <linux/usb/phy.h> | 13 | #include <linux/usb/phy.h> |
| 13 | 14 | ||
| 14 | struct usb_otg { | 15 | struct usb_otg { |
| 15 | u8 default_a; | 16 | u8 default_a; |
| 16 | 17 | ||
| 17 | struct usb_phy *phy; | 18 | struct phy *phy; |
| 19 | /* old usb_phy interface */ | ||
| 20 | struct usb_phy *usb_phy; | ||
| 18 | struct usb_bus *host; | 21 | struct usb_bus *host; |
| 19 | struct usb_gadget *gadget; | 22 | struct usb_gadget *gadget; |
| 20 | 23 | ||
| 24 | enum usb_otg_state state; | ||
| 25 | |||
| 21 | /* bind/unbind the host controller */ | 26 | /* bind/unbind the host controller */ |
| 22 | int (*set_host)(struct usb_otg *otg, struct usb_bus *host); | 27 | int (*set_host)(struct usb_otg *otg, struct usb_bus *host); |
| 23 | 28 | ||
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index 353053a33f21..f499c23e6342 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h | |||
| @@ -77,7 +77,6 @@ struct usb_phy { | |||
| 77 | unsigned int flags; | 77 | unsigned int flags; |
| 78 | 78 | ||
| 79 | enum usb_phy_type type; | 79 | enum usb_phy_type type; |
| 80 | enum usb_otg_state state; | ||
| 81 | enum usb_phy_events last_event; | 80 | enum usb_phy_events last_event; |
| 82 | 81 | ||
| 83 | struct usb_otg *otg; | 82 | struct usb_otg *otg; |
| @@ -210,6 +209,7 @@ extern void usb_put_phy(struct usb_phy *); | |||
| 210 | extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x); | 209 | extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x); |
| 211 | extern int usb_bind_phy(const char *dev_name, u8 index, | 210 | extern int usb_bind_phy(const char *dev_name, u8 index, |
| 212 | const char *phy_dev_name); | 211 | const char *phy_dev_name); |
| 212 | extern void usb_phy_set_event(struct usb_phy *x, unsigned long event); | ||
| 213 | #else | 213 | #else |
| 214 | static inline struct usb_phy *usb_get_phy(enum usb_phy_type type) | 214 | static inline struct usb_phy *usb_get_phy(enum usb_phy_type type) |
| 215 | { | 215 | { |
| @@ -251,6 +251,10 @@ static inline int usb_bind_phy(const char *dev_name, u8 index, | |||
| 251 | { | 251 | { |
| 252 | return -EOPNOTSUPP; | 252 | return -EOPNOTSUPP; |
| 253 | } | 253 | } |
| 254 | |||
| 255 | static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event) | ||
| 256 | { | ||
| 257 | } | ||
| 254 | #endif | 258 | #endif |
| 255 | 259 | ||
| 256 | static inline int | 260 | static inline int |
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index d5952bb66752..9fd9e481ea98 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h | |||
| @@ -145,6 +145,10 @@ struct renesas_usbhs_driver_param { | |||
| 145 | int d0_rx_id; | 145 | int d0_rx_id; |
| 146 | int d1_tx_id; | 146 | int d1_tx_id; |
| 147 | int d1_rx_id; | 147 | int d1_rx_id; |
| 148 | int d2_tx_id; | ||
| 149 | int d2_rx_id; | ||
| 150 | int d3_tx_id; | ||
| 151 | int d3_rx_id; | ||
| 148 | 152 | ||
| 149 | /* | 153 | /* |
| 150 | * option: | 154 | * option: |
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index e95372654f09..8297e5b341d8 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/kref.h> | 4 | #include <linux/kref.h> |
| 5 | #include <linux/nsproxy.h> | 5 | #include <linux/nsproxy.h> |
| 6 | #include <linux/ns_common.h> | ||
| 6 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
| 7 | #include <linux/err.h> | 8 | #include <linux/err.h> |
| 8 | 9 | ||
| @@ -17,6 +18,10 @@ struct uid_gid_map { /* 64 bytes -- 1 cache line */ | |||
| 17 | } extent[UID_GID_MAP_MAX_EXTENTS]; | 18 | } extent[UID_GID_MAP_MAX_EXTENTS]; |
| 18 | }; | 19 | }; |
| 19 | 20 | ||
| 21 | #define USERNS_SETGROUPS_ALLOWED 1UL | ||
| 22 | |||
| 23 | #define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED | ||
| 24 | |||
| 20 | struct user_namespace { | 25 | struct user_namespace { |
| 21 | struct uid_gid_map uid_map; | 26 | struct uid_gid_map uid_map; |
| 22 | struct uid_gid_map gid_map; | 27 | struct uid_gid_map gid_map; |
| @@ -26,7 +31,8 @@ struct user_namespace { | |||
| 26 | int level; | 31 | int level; |
| 27 | kuid_t owner; | 32 | kuid_t owner; |
| 28 | kgid_t group; | 33 | kgid_t group; |
| 29 | unsigned int proc_inum; | 34 | struct ns_common ns; |
| 35 | unsigned long flags; | ||
| 30 | 36 | ||
| 31 | /* Register of per-UID persistent keyrings for this namespace */ | 37 | /* Register of per-UID persistent keyrings for this namespace */ |
| 32 | #ifdef CONFIG_PERSISTENT_KEYRINGS | 38 | #ifdef CONFIG_PERSISTENT_KEYRINGS |
| @@ -63,6 +69,9 @@ extern const struct seq_operations proc_projid_seq_operations; | |||
| 63 | extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *); | 69 | extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *); |
| 64 | extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *); | 70 | extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *); |
| 65 | extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *); | 71 | extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *); |
| 72 | extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *); | ||
| 73 | extern int proc_setgroups_show(struct seq_file *m, void *v); | ||
| 74 | extern bool userns_may_setgroups(const struct user_namespace *ns); | ||
| 66 | #else | 75 | #else |
| 67 | 76 | ||
| 68 | static inline struct user_namespace *get_user_ns(struct user_namespace *ns) | 77 | static inline struct user_namespace *get_user_ns(struct user_namespace *ns) |
| @@ -87,6 +96,10 @@ static inline void put_user_ns(struct user_namespace *ns) | |||
| 87 | { | 96 | { |
| 88 | } | 97 | } |
| 89 | 98 | ||
| 99 | static inline bool userns_may_setgroups(const struct user_namespace *ns) | ||
| 100 | { | ||
| 101 | return true; | ||
| 102 | } | ||
| 90 | #endif | 103 | #endif |
| 91 | 104 | ||
| 92 | #endif /* _LINUX_USER_H */ | 105 | #endif /* _LINUX_USER_H */ |
diff --git a/include/linux/utsname.h b/include/linux/utsname.h index 239e27733d6c..5093f58ae192 100644 --- a/include/linux/utsname.h +++ b/include/linux/utsname.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <linux/sched.h> | 5 | #include <linux/sched.h> |
| 6 | #include <linux/kref.h> | 6 | #include <linux/kref.h> |
| 7 | #include <linux/nsproxy.h> | 7 | #include <linux/nsproxy.h> |
| 8 | #include <linux/ns_common.h> | ||
| 8 | #include <linux/err.h> | 9 | #include <linux/err.h> |
| 9 | #include <uapi/linux/utsname.h> | 10 | #include <uapi/linux/utsname.h> |
| 10 | 11 | ||
| @@ -23,7 +24,7 @@ struct uts_namespace { | |||
| 23 | struct kref kref; | 24 | struct kref kref; |
| 24 | struct new_utsname name; | 25 | struct new_utsname name; |
| 25 | struct user_namespace *user_ns; | 26 | struct user_namespace *user_ns; |
| 26 | unsigned int proc_inum; | 27 | struct ns_common ns; |
| 27 | }; | 28 | }; |
| 28 | extern struct uts_namespace init_uts_ns; | 29 | extern struct uts_namespace init_uts_ns; |
| 29 | 30 | ||
diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 65261a7244fc..28f0e65b9a11 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h | |||
| @@ -75,10 +75,13 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq); | |||
| 75 | 75 | ||
| 76 | bool virtqueue_is_broken(struct virtqueue *vq); | 76 | bool virtqueue_is_broken(struct virtqueue *vq); |
| 77 | 77 | ||
| 78 | void *virtqueue_get_avail(struct virtqueue *vq); | ||
| 79 | void *virtqueue_get_used(struct virtqueue *vq); | ||
| 80 | |||
| 78 | /** | 81 | /** |
| 79 | * virtio_device - representation of a device using virtio | 82 | * virtio_device - representation of a device using virtio |
| 80 | * @index: unique position on the virtio bus | 83 | * @index: unique position on the virtio bus |
| 81 | * @failed: saved value for CONFIG_S_FAILED bit (for restore) | 84 | * @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore) |
| 82 | * @config_enabled: configuration change reporting enabled | 85 | * @config_enabled: configuration change reporting enabled |
| 83 | * @config_change_pending: configuration change reported while disabled | 86 | * @config_change_pending: configuration change reported while disabled |
| 84 | * @config_lock: protects configuration change reporting | 87 | * @config_lock: protects configuration change reporting |
| @@ -101,11 +104,12 @@ struct virtio_device { | |||
| 101 | const struct virtio_config_ops *config; | 104 | const struct virtio_config_ops *config; |
| 102 | const struct vringh_config_ops *vringh_config; | 105 | const struct vringh_config_ops *vringh_config; |
| 103 | struct list_head vqs; | 106 | struct list_head vqs; |
| 104 | /* Note that this is a Linux set_bit-style bitmap. */ | 107 | u64 features; |
| 105 | unsigned long features[1]; | ||
| 106 | void *priv; | 108 | void *priv; |
| 107 | }; | 109 | }; |
| 108 | 110 | ||
| 111 | bool virtio_device_is_legacy_only(struct virtio_device_id id); | ||
| 112 | |||
| 109 | static inline struct virtio_device *dev_to_virtio(struct device *_dev) | 113 | static inline struct virtio_device *dev_to_virtio(struct device *_dev) |
| 110 | { | 114 | { |
| 111 | return container_of(_dev, struct virtio_device, dev); | 115 | return container_of(_dev, struct virtio_device, dev); |
| @@ -128,6 +132,8 @@ int virtio_device_restore(struct virtio_device *dev); | |||
| 128 | * @id_table: the ids serviced by this driver. | 132 | * @id_table: the ids serviced by this driver. |
| 129 | * @feature_table: an array of feature numbers supported by this driver. | 133 | * @feature_table: an array of feature numbers supported by this driver. |
| 130 | * @feature_table_size: number of entries in the feature table array. | 134 | * @feature_table_size: number of entries in the feature table array. |
| 135 | * @feature_table_legacy: same as feature_table but when working in legacy mode. | ||
| 136 | * @feature_table_size_legacy: number of entries in feature table legacy array. | ||
| 131 | * @probe: the function to call when a device is found. Returns 0 or -errno. | 137 | * @probe: the function to call when a device is found. Returns 0 or -errno. |
| 132 | * @remove: the function to call when a device is removed. | 138 | * @remove: the function to call when a device is removed. |
| 133 | * @config_changed: optional function to call when the device configuration | 139 | * @config_changed: optional function to call when the device configuration |
| @@ -138,6 +144,8 @@ struct virtio_driver { | |||
| 138 | const struct virtio_device_id *id_table; | 144 | const struct virtio_device_id *id_table; |
| 139 | const unsigned int *feature_table; | 145 | const unsigned int *feature_table; |
| 140 | unsigned int feature_table_size; | 146 | unsigned int feature_table_size; |
| 147 | const unsigned int *feature_table_legacy; | ||
| 148 | unsigned int feature_table_size_legacy; | ||
| 141 | int (*probe)(struct virtio_device *dev); | 149 | int (*probe)(struct virtio_device *dev); |
| 142 | void (*scan)(struct virtio_device *dev); | 150 | void (*scan)(struct virtio_device *dev); |
| 143 | void (*remove)(struct virtio_device *dev); | 151 | void (*remove)(struct virtio_device *dev); |
diff --git a/include/linux/virtio_byteorder.h b/include/linux/virtio_byteorder.h new file mode 100644 index 000000000000..51865d05b267 --- /dev/null +++ b/include/linux/virtio_byteorder.h | |||
| @@ -0,0 +1,59 @@ | |||
| 1 | #ifndef _LINUX_VIRTIO_BYTEORDER_H | ||
| 2 | #define _LINUX_VIRTIO_BYTEORDER_H | ||
| 3 | #include <linux/types.h> | ||
| 4 | #include <uapi/linux/virtio_types.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | * Low-level memory accessors for handling virtio in modern little endian and in | ||
| 8 | * compatibility native endian format. | ||
| 9 | */ | ||
| 10 | |||
| 11 | static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val) | ||
| 12 | { | ||
| 13 | if (little_endian) | ||
| 14 | return le16_to_cpu((__force __le16)val); | ||
| 15 | else | ||
| 16 | return (__force u16)val; | ||
| 17 | } | ||
| 18 | |||
| 19 | static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val) | ||
| 20 | { | ||
| 21 | if (little_endian) | ||
| 22 | return (__force __virtio16)cpu_to_le16(val); | ||
| 23 | else | ||
| 24 | return (__force __virtio16)val; | ||
| 25 | } | ||
| 26 | |||
| 27 | static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val) | ||
| 28 | { | ||
| 29 | if (little_endian) | ||
| 30 | return le32_to_cpu((__force __le32)val); | ||
| 31 | else | ||
| 32 | return (__force u32)val; | ||
| 33 | } | ||
| 34 | |||
| 35 | static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val) | ||
| 36 | { | ||
| 37 | if (little_endian) | ||
| 38 | return (__force __virtio32)cpu_to_le32(val); | ||
| 39 | else | ||
| 40 | return (__force __virtio32)val; | ||
| 41 | } | ||
| 42 | |||
| 43 | static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val) | ||
| 44 | { | ||
| 45 | if (little_endian) | ||
| 46 | return le64_to_cpu((__force __le64)val); | ||
| 47 | else | ||
| 48 | return (__force u64)val; | ||
| 49 | } | ||
| 50 | |||
| 51 | static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val) | ||
| 52 | { | ||
| 53 | if (little_endian) | ||
| 54 | return (__force __virtio64)cpu_to_le64(val); | ||
| 55 | else | ||
| 56 | return (__force __virtio64)val; | ||
| 57 | } | ||
| 58 | |||
| 59 | #endif /* _LINUX_VIRTIO_BYTEORDER */ | ||
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 7f4ef66873ef..ca3ed78e5ec7 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #include <linux/err.h> | 4 | #include <linux/err.h> |
| 5 | #include <linux/bug.h> | 5 | #include <linux/bug.h> |
| 6 | #include <linux/virtio.h> | 6 | #include <linux/virtio.h> |
| 7 | #include <linux/virtio_byteorder.h> | ||
| 7 | #include <uapi/linux/virtio_config.h> | 8 | #include <uapi/linux/virtio_config.h> |
| 8 | 9 | ||
| 9 | /** | 10 | /** |
| @@ -18,6 +19,9 @@ | |||
| 18 | * offset: the offset of the configuration field | 19 | * offset: the offset of the configuration field |
| 19 | * buf: the buffer to read the field value from. | 20 | * buf: the buffer to read the field value from. |
| 20 | * len: the length of the buffer | 21 | * len: the length of the buffer |
| 22 | * @generation: config generation counter | ||
| 23 | * vdev: the virtio_device | ||
| 24 | * Returns the config generation counter | ||
| 21 | * @get_status: read the status byte | 25 | * @get_status: read the status byte |
| 22 | * vdev: the virtio_device | 26 | * vdev: the virtio_device |
| 23 | * Returns the status byte | 27 | * Returns the status byte |
| @@ -46,6 +50,7 @@ | |||
| 46 | * vdev: the virtio_device | 50 | * vdev: the virtio_device |
| 47 | * This gives the final feature bits for the device: it can change | 51 | * This gives the final feature bits for the device: it can change |
| 48 | * the dev->feature bits if it wants. | 52 | * the dev->feature bits if it wants. |
| 53 | * Returns 0 on success or error status | ||
| 49 | * @bus_name: return the bus name associated with the device | 54 | * @bus_name: return the bus name associated with the device |
| 50 | * vdev: the virtio_device | 55 | * vdev: the virtio_device |
| 51 | * This returns a pointer to the bus name a la pci_name from which | 56 | * This returns a pointer to the bus name a la pci_name from which |
| @@ -58,6 +63,7 @@ struct virtio_config_ops { | |||
| 58 | void *buf, unsigned len); | 63 | void *buf, unsigned len); |
| 59 | void (*set)(struct virtio_device *vdev, unsigned offset, | 64 | void (*set)(struct virtio_device *vdev, unsigned offset, |
| 60 | const void *buf, unsigned len); | 65 | const void *buf, unsigned len); |
| 66 | u32 (*generation)(struct virtio_device *vdev); | ||
| 61 | u8 (*get_status)(struct virtio_device *vdev); | 67 | u8 (*get_status)(struct virtio_device *vdev); |
| 62 | void (*set_status)(struct virtio_device *vdev, u8 status); | 68 | void (*set_status)(struct virtio_device *vdev, u8 status); |
| 63 | void (*reset)(struct virtio_device *vdev); | 69 | void (*reset)(struct virtio_device *vdev); |
| @@ -66,8 +72,8 @@ struct virtio_config_ops { | |||
| 66 | vq_callback_t *callbacks[], | 72 | vq_callback_t *callbacks[], |
| 67 | const char *names[]); | 73 | const char *names[]); |
| 68 | void (*del_vqs)(struct virtio_device *); | 74 | void (*del_vqs)(struct virtio_device *); |
| 69 | u32 (*get_features)(struct virtio_device *vdev); | 75 | u64 (*get_features)(struct virtio_device *vdev); |
| 70 | void (*finalize_features)(struct virtio_device *vdev); | 76 | int (*finalize_features)(struct virtio_device *vdev); |
| 71 | const char *(*bus_name)(struct virtio_device *vdev); | 77 | const char *(*bus_name)(struct virtio_device *vdev); |
| 72 | int (*set_vq_affinity)(struct virtqueue *vq, int cpu); | 78 | int (*set_vq_affinity)(struct virtqueue *vq, int cpu); |
| 73 | }; | 79 | }; |
| @@ -77,23 +83,70 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, | |||
| 77 | unsigned int fbit); | 83 | unsigned int fbit); |
| 78 | 84 | ||
| 79 | /** | 85 | /** |
| 80 | * virtio_has_feature - helper to determine if this device has this feature. | 86 | * __virtio_test_bit - helper to test feature bits. For use by transports. |
| 87 | * Devices should normally use virtio_has_feature, | ||
| 88 | * which includes more checks. | ||
| 81 | * @vdev: the device | 89 | * @vdev: the device |
| 82 | * @fbit: the feature bit | 90 | * @fbit: the feature bit |
| 83 | */ | 91 | */ |
| 84 | static inline bool virtio_has_feature(const struct virtio_device *vdev, | 92 | static inline bool __virtio_test_bit(const struct virtio_device *vdev, |
| 93 | unsigned int fbit) | ||
| 94 | { | ||
| 95 | /* Did you forget to fix assumptions on max features? */ | ||
| 96 | if (__builtin_constant_p(fbit)) | ||
| 97 | BUILD_BUG_ON(fbit >= 64); | ||
| 98 | else | ||
| 99 | BUG_ON(fbit >= 64); | ||
| 100 | |||
| 101 | return vdev->features & BIT_ULL(fbit); | ||
| 102 | } | ||
| 103 | |||
| 104 | /** | ||
| 105 | * __virtio_set_bit - helper to set feature bits. For use by transports. | ||
| 106 | * @vdev: the device | ||
| 107 | * @fbit: the feature bit | ||
| 108 | */ | ||
| 109 | static inline void __virtio_set_bit(struct virtio_device *vdev, | ||
| 110 | unsigned int fbit) | ||
| 111 | { | ||
| 112 | /* Did you forget to fix assumptions on max features? */ | ||
| 113 | if (__builtin_constant_p(fbit)) | ||
| 114 | BUILD_BUG_ON(fbit >= 64); | ||
| 115 | else | ||
| 116 | BUG_ON(fbit >= 64); | ||
| 117 | |||
| 118 | vdev->features |= BIT_ULL(fbit); | ||
| 119 | } | ||
| 120 | |||
| 121 | /** | ||
| 122 | * __virtio_clear_bit - helper to clear feature bits. For use by transports. | ||
| 123 | * @vdev: the device | ||
| 124 | * @fbit: the feature bit | ||
| 125 | */ | ||
| 126 | static inline void __virtio_clear_bit(struct virtio_device *vdev, | ||
| 85 | unsigned int fbit) | 127 | unsigned int fbit) |
| 86 | { | 128 | { |
| 87 | /* Did you forget to fix assumptions on max features? */ | 129 | /* Did you forget to fix assumptions on max features? */ |
| 88 | if (__builtin_constant_p(fbit)) | 130 | if (__builtin_constant_p(fbit)) |
| 89 | BUILD_BUG_ON(fbit >= 32); | 131 | BUILD_BUG_ON(fbit >= 64); |
| 90 | else | 132 | else |
| 91 | BUG_ON(fbit >= 32); | 133 | BUG_ON(fbit >= 64); |
| 134 | |||
| 135 | vdev->features &= ~BIT_ULL(fbit); | ||
| 136 | } | ||
| 92 | 137 | ||
| 138 | /** | ||
| 139 | * virtio_has_feature - helper to determine if this device has this feature. | ||
| 140 | * @vdev: the device | ||
| 141 | * @fbit: the feature bit | ||
| 142 | */ | ||
| 143 | static inline bool virtio_has_feature(const struct virtio_device *vdev, | ||
| 144 | unsigned int fbit) | ||
| 145 | { | ||
| 93 | if (fbit < VIRTIO_TRANSPORT_F_START) | 146 | if (fbit < VIRTIO_TRANSPORT_F_START) |
| 94 | virtio_check_driver_offered_feature(vdev, fbit); | 147 | virtio_check_driver_offered_feature(vdev, fbit); |
| 95 | 148 | ||
| 96 | return test_bit(fbit, vdev->features); | 149 | return __virtio_test_bit(vdev, fbit); |
| 97 | } | 150 | } |
| 98 | 151 | ||
| 99 | static inline | 152 | static inline |
| @@ -152,6 +205,37 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu) | |||
| 152 | return 0; | 205 | return 0; |
| 153 | } | 206 | } |
| 154 | 207 | ||
| 208 | /* Memory accessors */ | ||
| 209 | static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val) | ||
| 210 | { | ||
| 211 | return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | ||
| 212 | } | ||
| 213 | |||
| 214 | static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val) | ||
| 215 | { | ||
| 216 | return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | ||
| 217 | } | ||
| 218 | |||
| 219 | static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val) | ||
| 220 | { | ||
| 221 | return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | ||
| 222 | } | ||
| 223 | |||
| 224 | static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val) | ||
| 225 | { | ||
| 226 | return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | ||
| 227 | } | ||
| 228 | |||
| 229 | static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val) | ||
| 230 | { | ||
| 231 | return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | ||
| 232 | } | ||
| 233 | |||
| 234 | static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) | ||
| 235 | { | ||
| 236 | return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | ||
| 237 | } | ||
| 238 | |||
| 155 | /* Config space accessors. */ | 239 | /* Config space accessors. */ |
| 156 | #define virtio_cread(vdev, structname, member, ptr) \ | 240 | #define virtio_cread(vdev, structname, member, ptr) \ |
| 157 | do { \ | 241 | do { \ |
| @@ -221,11 +305,33 @@ static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset) | |||
| 221 | return ret; | 305 | return ret; |
| 222 | } | 306 | } |
| 223 | 307 | ||
| 308 | /* Read @count fields, @bytes each. */ | ||
| 309 | static inline void __virtio_cread_many(struct virtio_device *vdev, | ||
| 310 | unsigned int offset, | ||
| 311 | void *buf, size_t count, size_t bytes) | ||
| 312 | { | ||
| 313 | u32 old, gen = vdev->config->generation ? | ||
| 314 | vdev->config->generation(vdev) : 0; | ||
| 315 | int i; | ||
| 316 | |||
| 317 | do { | ||
| 318 | old = gen; | ||
| 319 | |||
| 320 | for (i = 0; i < count; i++) | ||
| 321 | vdev->config->get(vdev, offset + bytes * i, | ||
| 322 | buf + i * bytes, bytes); | ||
| 323 | |||
| 324 | gen = vdev->config->generation ? | ||
| 325 | vdev->config->generation(vdev) : 0; | ||
| 326 | } while (gen != old); | ||
| 327 | } | ||
| 328 | |||
| 329 | |||
| 224 | static inline void virtio_cread_bytes(struct virtio_device *vdev, | 330 | static inline void virtio_cread_bytes(struct virtio_device *vdev, |
| 225 | unsigned int offset, | 331 | unsigned int offset, |
| 226 | void *buf, size_t len) | 332 | void *buf, size_t len) |
| 227 | { | 333 | { |
| 228 | vdev->config->get(vdev, offset, buf, len); | 334 | __virtio_cread_many(vdev, offset, buf, len, 1); |
| 229 | } | 335 | } |
| 230 | 336 | ||
| 231 | static inline void virtio_cwrite8(struct virtio_device *vdev, | 337 | static inline void virtio_cwrite8(struct virtio_device *vdev, |
| @@ -239,12 +345,13 @@ static inline u16 virtio_cread16(struct virtio_device *vdev, | |||
| 239 | { | 345 | { |
| 240 | u16 ret; | 346 | u16 ret; |
| 241 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); | 347 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); |
| 242 | return ret; | 348 | return virtio16_to_cpu(vdev, (__force __virtio16)ret); |
| 243 | } | 349 | } |
| 244 | 350 | ||
| 245 | static inline void virtio_cwrite16(struct virtio_device *vdev, | 351 | static inline void virtio_cwrite16(struct virtio_device *vdev, |
| 246 | unsigned int offset, u16 val) | 352 | unsigned int offset, u16 val) |
| 247 | { | 353 | { |
| 354 | val = (__force u16)cpu_to_virtio16(vdev, val); | ||
| 248 | vdev->config->set(vdev, offset, &val, sizeof(val)); | 355 | vdev->config->set(vdev, offset, &val, sizeof(val)); |
| 249 | } | 356 | } |
| 250 | 357 | ||
| @@ -253,12 +360,13 @@ static inline u32 virtio_cread32(struct virtio_device *vdev, | |||
| 253 | { | 360 | { |
| 254 | u32 ret; | 361 | u32 ret; |
| 255 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); | 362 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); |
| 256 | return ret; | 363 | return virtio32_to_cpu(vdev, (__force __virtio32)ret); |
| 257 | } | 364 | } |
| 258 | 365 | ||
| 259 | static inline void virtio_cwrite32(struct virtio_device *vdev, | 366 | static inline void virtio_cwrite32(struct virtio_device *vdev, |
| 260 | unsigned int offset, u32 val) | 367 | unsigned int offset, u32 val) |
| 261 | { | 368 | { |
| 369 | val = (__force u32)cpu_to_virtio32(vdev, val); | ||
| 262 | vdev->config->set(vdev, offset, &val, sizeof(val)); | 370 | vdev->config->set(vdev, offset, &val, sizeof(val)); |
| 263 | } | 371 | } |
| 264 | 372 | ||
| @@ -267,12 +375,14 @@ static inline u64 virtio_cread64(struct virtio_device *vdev, | |||
| 267 | { | 375 | { |
| 268 | u64 ret; | 376 | u64 ret; |
| 269 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); | 377 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); |
| 270 | return ret; | 378 | __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret)); |
| 379 | return virtio64_to_cpu(vdev, (__force __virtio64)ret); | ||
| 271 | } | 380 | } |
| 272 | 381 | ||
| 273 | static inline void virtio_cwrite64(struct virtio_device *vdev, | 382 | static inline void virtio_cwrite64(struct virtio_device *vdev, |
| 274 | unsigned int offset, u64 val) | 383 | unsigned int offset, u64 val) |
| 275 | { | 384 | { |
| 385 | val = (__force u64)cpu_to_virtio64(vdev, val); | ||
| 276 | vdev->config->set(vdev, offset, &val, sizeof(val)); | 386 | vdev->config->set(vdev, offset, &val, sizeof(val)); |
| 277 | } | 387 | } |
| 278 | 388 | ||
diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h deleted file mode 100644 index de429d1f4357..000000000000 --- a/include/linux/virtio_scsi.h +++ /dev/null | |||
| @@ -1,162 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * This header is BSD licensed so anyone can use the definitions to implement | ||
| 3 | * compatible drivers/servers. | ||
| 4 | * | ||
| 5 | * Redistribution and use in source and binary forms, with or without | ||
| 6 | * modification, are permitted provided that the following conditions | ||
| 7 | * are met: | ||
| 8 | * 1. Redistributions of source code must retain the above copyright | ||
| 9 | * notice, this list of conditions and the following disclaimer. | ||
| 10 | * 2. Redistributions in binary form must reproduce the above copyright | ||
| 11 | * notice, this list of conditions and the following disclaimer in the | ||
| 12 | * documentation and/or other materials provided with the distribution. | ||
| 13 | * | ||
| 14 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND | ||
| 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 17 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE | ||
| 18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
| 20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
| 21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
| 22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||
| 23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
| 24 | * SUCH DAMAGE. | ||
| 25 | */ | ||
| 26 | |||
| 27 | #ifndef _LINUX_VIRTIO_SCSI_H | ||
| 28 | #define _LINUX_VIRTIO_SCSI_H | ||
| 29 | |||
| 30 | #define VIRTIO_SCSI_CDB_SIZE 32 | ||
| 31 | #define VIRTIO_SCSI_SENSE_SIZE 96 | ||
| 32 | |||
| 33 | /* SCSI command request, followed by data-out */ | ||
| 34 | struct virtio_scsi_cmd_req { | ||
| 35 | u8 lun[8]; /* Logical Unit Number */ | ||
| 36 | u64 tag; /* Command identifier */ | ||
| 37 | u8 task_attr; /* Task attribute */ | ||
| 38 | u8 prio; /* SAM command priority field */ | ||
| 39 | u8 crn; | ||
| 40 | u8 cdb[VIRTIO_SCSI_CDB_SIZE]; | ||
| 41 | } __packed; | ||
| 42 | |||
| 43 | /* SCSI command request, followed by protection information */ | ||
| 44 | struct virtio_scsi_cmd_req_pi { | ||
| 45 | u8 lun[8]; /* Logical Unit Number */ | ||
| 46 | u64 tag; /* Command identifier */ | ||
| 47 | u8 task_attr; /* Task attribute */ | ||
| 48 | u8 prio; /* SAM command priority field */ | ||
| 49 | u8 crn; | ||
| 50 | u32 pi_bytesout; /* DataOUT PI Number of bytes */ | ||
| 51 | u32 pi_bytesin; /* DataIN PI Number of bytes */ | ||
| 52 | u8 cdb[VIRTIO_SCSI_CDB_SIZE]; | ||
| 53 | } __packed; | ||
| 54 | |||
| 55 | /* Response, followed by sense data and data-in */ | ||
| 56 | struct virtio_scsi_cmd_resp { | ||
| 57 | u32 sense_len; /* Sense data length */ | ||
| 58 | u32 resid; /* Residual bytes in data buffer */ | ||
| 59 | u16 status_qualifier; /* Status qualifier */ | ||
| 60 | u8 status; /* Command completion status */ | ||
| 61 | u8 response; /* Response values */ | ||
| 62 | u8 sense[VIRTIO_SCSI_SENSE_SIZE]; | ||
| 63 | } __packed; | ||
| 64 | |||
| 65 | /* Task Management Request */ | ||
| 66 | struct virtio_scsi_ctrl_tmf_req { | ||
| 67 | u32 type; | ||
| 68 | u32 subtype; | ||
| 69 | u8 lun[8]; | ||
| 70 | u64 tag; | ||
| 71 | } __packed; | ||
| 72 | |||
| 73 | struct virtio_scsi_ctrl_tmf_resp { | ||
| 74 | u8 response; | ||
| 75 | } __packed; | ||
| 76 | |||
| 77 | /* Asynchronous notification query/subscription */ | ||
| 78 | struct virtio_scsi_ctrl_an_req { | ||
| 79 | u32 type; | ||
| 80 | u8 lun[8]; | ||
| 81 | u32 event_requested; | ||
| 82 | } __packed; | ||
| 83 | |||
| 84 | struct virtio_scsi_ctrl_an_resp { | ||
| 85 | u32 event_actual; | ||
| 86 | u8 response; | ||
| 87 | } __packed; | ||
| 88 | |||
| 89 | struct virtio_scsi_event { | ||
| 90 | u32 event; | ||
| 91 | u8 lun[8]; | ||
| 92 | u32 reason; | ||
| 93 | } __packed; | ||
| 94 | |||
| 95 | struct virtio_scsi_config { | ||
| 96 | u32 num_queues; | ||
| 97 | u32 seg_max; | ||
| 98 | u32 max_sectors; | ||
| 99 | u32 cmd_per_lun; | ||
| 100 | u32 event_info_size; | ||
| 101 | u32 sense_size; | ||
| 102 | u32 cdb_size; | ||
| 103 | u16 max_channel; | ||
| 104 | u16 max_target; | ||
| 105 | u32 max_lun; | ||
| 106 | } __packed; | ||
| 107 | |||
| 108 | /* Feature Bits */ | ||
| 109 | #define VIRTIO_SCSI_F_INOUT 0 | ||
| 110 | #define VIRTIO_SCSI_F_HOTPLUG 1 | ||
| 111 | #define VIRTIO_SCSI_F_CHANGE 2 | ||
| 112 | #define VIRTIO_SCSI_F_T10_PI 3 | ||
| 113 | |||
| 114 | /* Response codes */ | ||
| 115 | #define VIRTIO_SCSI_S_OK 0 | ||
| 116 | #define VIRTIO_SCSI_S_OVERRUN 1 | ||
| 117 | #define VIRTIO_SCSI_S_ABORTED 2 | ||
| 118 | #define VIRTIO_SCSI_S_BAD_TARGET 3 | ||
| 119 | #define VIRTIO_SCSI_S_RESET 4 | ||
| 120 | #define VIRTIO_SCSI_S_BUSY 5 | ||
| 121 | #define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6 | ||
| 122 | #define VIRTIO_SCSI_S_TARGET_FAILURE 7 | ||
| 123 | #define VIRTIO_SCSI_S_NEXUS_FAILURE 8 | ||
| 124 | #define VIRTIO_SCSI_S_FAILURE 9 | ||
| 125 | #define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10 | ||
| 126 | #define VIRTIO_SCSI_S_FUNCTION_REJECTED 11 | ||
| 127 | #define VIRTIO_SCSI_S_INCORRECT_LUN 12 | ||
| 128 | |||
| 129 | /* Controlq type codes. */ | ||
| 130 | #define VIRTIO_SCSI_T_TMF 0 | ||
| 131 | #define VIRTIO_SCSI_T_AN_QUERY 1 | ||
| 132 | #define VIRTIO_SCSI_T_AN_SUBSCRIBE 2 | ||
| 133 | |||
| 134 | /* Valid TMF subtypes. */ | ||
| 135 | #define VIRTIO_SCSI_T_TMF_ABORT_TASK 0 | ||
| 136 | #define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1 | ||
| 137 | #define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2 | ||
| 138 | #define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3 | ||
| 139 | #define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4 | ||
| 140 | #define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5 | ||
| 141 | #define VIRTIO_SCSI_T_TMF_QUERY_TASK 6 | ||
| 142 | #define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7 | ||
| 143 | |||
| 144 | /* Events. */ | ||
| 145 | #define VIRTIO_SCSI_T_EVENTS_MISSED 0x80000000 | ||
| 146 | #define VIRTIO_SCSI_T_NO_EVENT 0 | ||
| 147 | #define VIRTIO_SCSI_T_TRANSPORT_RESET 1 | ||
| 148 | #define VIRTIO_SCSI_T_ASYNC_NOTIFY 2 | ||
| 149 | #define VIRTIO_SCSI_T_PARAM_CHANGE 3 | ||
| 150 | |||
| 151 | /* Reasons of transport reset event */ | ||
| 152 | #define VIRTIO_SCSI_EVT_RESET_HARD 0 | ||
| 153 | #define VIRTIO_SCSI_EVT_RESET_RESCAN 1 | ||
| 154 | #define VIRTIO_SCSI_EVT_RESET_REMOVED 2 | ||
| 155 | |||
| 156 | #define VIRTIO_SCSI_S_SIMPLE 0 | ||
| 157 | #define VIRTIO_SCSI_S_ORDERED 1 | ||
| 158 | #define VIRTIO_SCSI_S_HEAD 2 | ||
| 159 | #define VIRTIO_SCSI_S_ACA 3 | ||
| 160 | |||
| 161 | |||
| 162 | #endif /* _LINUX_VIRTIO_SCSI_H */ | ||
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 730334cdf037..9246d32dc973 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h | |||
| @@ -90,6 +90,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
| 90 | #ifdef CONFIG_DEBUG_VM_VMACACHE | 90 | #ifdef CONFIG_DEBUG_VM_VMACACHE |
| 91 | VMACACHE_FIND_CALLS, | 91 | VMACACHE_FIND_CALLS, |
| 92 | VMACACHE_FIND_HITS, | 92 | VMACACHE_FIND_HITS, |
| 93 | VMACACHE_FULL_FLUSHES, | ||
| 93 | #endif | 94 | #endif |
| 94 | NR_VM_EVENT_ITEMS | 95 | NR_VM_EVENT_ITEMS |
| 95 | }; | 96 | }; |
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h index 023430e265fe..5691f752ce8f 100644 --- a/include/linux/vmw_vmci_api.h +++ b/include/linux/vmw_vmci_api.h | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #define VMCI_KERNEL_API_VERSION_2 2 | 24 | #define VMCI_KERNEL_API_VERSION_2 2 |
| 25 | #define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2 | 25 | #define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2 |
| 26 | 26 | ||
| 27 | struct msghdr; | ||
| 27 | typedef void (vmci_device_shutdown_fn) (void *device_registration, | 28 | typedef void (vmci_device_shutdown_fn) (void *device_registration, |
| 28 | void *user_data); | 29 | void *user_data); |
| 29 | 30 | ||
| @@ -75,8 +76,8 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size, | |||
| 75 | ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, | 76 | ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, |
| 76 | void *iov, size_t iov_size, int mode); | 77 | void *iov, size_t iov_size, int mode); |
| 77 | ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, | 78 | ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, |
| 78 | void *iov, size_t iov_size, int mode); | 79 | struct msghdr *msg, size_t iov_size, int mode); |
| 79 | ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, void *iov, size_t iov_size, | 80 | ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size, |
| 80 | int mode); | 81 | int mode); |
| 81 | 82 | ||
| 82 | #endif /* !__VMW_VMCI_API_H__ */ | 83 | #endif /* !__VMW_VMCI_API_H__ */ |
diff --git a/include/linux/vringh.h b/include/linux/vringh.h index 749cde28728b..a3fa537e717a 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h | |||
| @@ -24,12 +24,16 @@ | |||
| 24 | #ifndef _LINUX_VRINGH_H | 24 | #ifndef _LINUX_VRINGH_H |
| 25 | #define _LINUX_VRINGH_H | 25 | #define _LINUX_VRINGH_H |
| 26 | #include <uapi/linux/virtio_ring.h> | 26 | #include <uapi/linux/virtio_ring.h> |
| 27 | #include <linux/virtio_byteorder.h> | ||
| 27 | #include <linux/uio.h> | 28 | #include <linux/uio.h> |
| 28 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
| 29 | #include <asm/barrier.h> | 30 | #include <asm/barrier.h> |
| 30 | 31 | ||
| 31 | /* virtio_ring with information needed for host access. */ | 32 | /* virtio_ring with information needed for host access. */ |
| 32 | struct vringh { | 33 | struct vringh { |
| 34 | /* Everything is little endian */ | ||
| 35 | bool little_endian; | ||
| 36 | |||
| 33 | /* Guest publishes used event idx (note: we always do). */ | 37 | /* Guest publishes used event idx (note: we always do). */ |
| 34 | bool event_indices; | 38 | bool event_indices; |
| 35 | 39 | ||
| @@ -105,7 +109,7 @@ struct vringh_kiov { | |||
| 105 | #define VRINGH_IOV_ALLOCATED 0x8000000 | 109 | #define VRINGH_IOV_ALLOCATED 0x8000000 |
| 106 | 110 | ||
| 107 | /* Helpers for userspace vrings. */ | 111 | /* Helpers for userspace vrings. */ |
| 108 | int vringh_init_user(struct vringh *vrh, u32 features, | 112 | int vringh_init_user(struct vringh *vrh, u64 features, |
| 109 | unsigned int num, bool weak_barriers, | 113 | unsigned int num, bool weak_barriers, |
| 110 | struct vring_desc __user *desc, | 114 | struct vring_desc __user *desc, |
| 111 | struct vring_avail __user *avail, | 115 | struct vring_avail __user *avail, |
| @@ -167,7 +171,7 @@ bool vringh_notify_enable_user(struct vringh *vrh); | |||
| 167 | void vringh_notify_disable_user(struct vringh *vrh); | 171 | void vringh_notify_disable_user(struct vringh *vrh); |
| 168 | 172 | ||
| 169 | /* Helpers for kernelspace vrings. */ | 173 | /* Helpers for kernelspace vrings. */ |
| 170 | int vringh_init_kern(struct vringh *vrh, u32 features, | 174 | int vringh_init_kern(struct vringh *vrh, u64 features, |
| 171 | unsigned int num, bool weak_barriers, | 175 | unsigned int num, bool weak_barriers, |
| 172 | struct vring_desc *desc, | 176 | struct vring_desc *desc, |
| 173 | struct vring_avail *avail, | 177 | struct vring_avail *avail, |
| @@ -222,4 +226,33 @@ static inline void vringh_notify(struct vringh *vrh) | |||
| 222 | vrh->notify(vrh); | 226 | vrh->notify(vrh); |
| 223 | } | 227 | } |
| 224 | 228 | ||
| 229 | static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val) | ||
| 230 | { | ||
| 231 | return __virtio16_to_cpu(vrh->little_endian, val); | ||
| 232 | } | ||
| 233 | |||
| 234 | static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val) | ||
| 235 | { | ||
| 236 | return __cpu_to_virtio16(vrh->little_endian, val); | ||
| 237 | } | ||
| 238 | |||
| 239 | static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val) | ||
| 240 | { | ||
| 241 | return __virtio32_to_cpu(vrh->little_endian, val); | ||
| 242 | } | ||
| 243 | |||
| 244 | static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val) | ||
| 245 | { | ||
| 246 | return __cpu_to_virtio32(vrh->little_endian, val); | ||
| 247 | } | ||
| 248 | |||
| 249 | static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val) | ||
| 250 | { | ||
| 251 | return __virtio64_to_cpu(vrh->little_endian, val); | ||
| 252 | } | ||
| 253 | |||
| 254 | static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val) | ||
| 255 | { | ||
| 256 | return __cpu_to_virtio64(vrh->little_endian, val); | ||
| 257 | } | ||
| 225 | #endif /* _LINUX_VRINGH_H */ | 258 | #endif /* _LINUX_VRINGH_H */ |
