diff options
Diffstat (limited to 'include/linux')
293 files changed, 18315 insertions, 4829 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index f57c440642cd..d2445fa9999f 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
| @@ -53,11 +53,24 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) | |||
| 53 | return adev ? adev->handle : NULL; | 53 | return adev ? adev->handle : NULL; |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | #define ACPI_COMPANION(dev) acpi_node((dev)->fwnode) | 56 | #define ACPI_COMPANION(dev) to_acpi_node((dev)->fwnode) |
| 57 | #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ | 57 | #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ |
| 58 | acpi_fwnode_handle(adev) : NULL) | 58 | acpi_fwnode_handle(adev) : NULL) |
| 59 | #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) | 59 | #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) |
| 60 | 60 | ||
| 61 | /** | ||
| 62 | * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with | ||
| 63 | * the PCI-defined class-code information | ||
| 64 | * | ||
| 65 | * @_cls : the class, subclass, prog-if triple for this device | ||
| 66 | * @_msk : the class mask for this device | ||
| 67 | * | ||
| 68 | * This macro is used to create a struct acpi_device_id that matches a | ||
| 69 | * specific PCI class. The .id and .driver_data fields will be left | ||
| 70 | * initialized with the default value. | ||
| 71 | */ | ||
| 72 | #define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk), | ||
| 73 | |||
| 61 | static inline bool has_acpi_companion(struct device *dev) | 74 | static inline bool has_acpi_companion(struct device *dev) |
| 62 | { | 75 | { |
| 63 | return is_acpi_node(dev->fwnode); | 76 | return is_acpi_node(dev->fwnode); |
| @@ -158,6 +171,16 @@ typedef u32 phys_cpuid_t; | |||
| 158 | #define PHYS_CPUID_INVALID (phys_cpuid_t)(-1) | 171 | #define PHYS_CPUID_INVALID (phys_cpuid_t)(-1) |
| 159 | #endif | 172 | #endif |
| 160 | 173 | ||
| 174 | static inline bool invalid_logical_cpuid(u32 cpuid) | ||
| 175 | { | ||
| 176 | return (int)cpuid < 0; | ||
| 177 | } | ||
| 178 | |||
| 179 | static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) | ||
| 180 | { | ||
| 181 | return phys_id == PHYS_CPUID_INVALID; | ||
| 182 | } | ||
| 183 | |||
| 161 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 184 | #ifdef CONFIG_ACPI_HOTPLUG_CPU |
| 162 | /* Arch dependent functions for cpu hotplug support */ | 185 | /* Arch dependent functions for cpu hotplug support */ |
| 163 | int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu); | 186 | int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu); |
| @@ -243,54 +266,21 @@ extern bool wmi_has_guid(const char *guid); | |||
| 243 | #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400 | 266 | #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400 |
| 244 | #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800 | 267 | #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800 |
| 245 | 268 | ||
| 246 | #if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) | 269 | extern char acpi_video_backlight_string[]; |
| 247 | |||
| 248 | extern long acpi_video_get_capabilities(acpi_handle graphics_dev_handle); | ||
| 249 | extern long acpi_is_video_device(acpi_handle handle); | 270 | extern long acpi_is_video_device(acpi_handle handle); |
| 250 | extern void acpi_video_dmi_promote_vendor(void); | ||
| 251 | extern void acpi_video_dmi_demote_vendor(void); | ||
| 252 | extern int acpi_video_backlight_support(void); | ||
| 253 | extern int acpi_video_display_switch_support(void); | ||
| 254 | |||
| 255 | #else | ||
| 256 | |||
| 257 | static inline long acpi_video_get_capabilities(acpi_handle graphics_dev_handle) | ||
| 258 | { | ||
| 259 | return 0; | ||
| 260 | } | ||
| 261 | |||
| 262 | static inline long acpi_is_video_device(acpi_handle handle) | ||
| 263 | { | ||
| 264 | return 0; | ||
| 265 | } | ||
| 266 | |||
| 267 | static inline void acpi_video_dmi_promote_vendor(void) | ||
| 268 | { | ||
| 269 | } | ||
| 270 | |||
| 271 | static inline void acpi_video_dmi_demote_vendor(void) | ||
| 272 | { | ||
| 273 | } | ||
| 274 | |||
| 275 | static inline int acpi_video_backlight_support(void) | ||
| 276 | { | ||
| 277 | return 0; | ||
| 278 | } | ||
| 279 | |||
| 280 | static inline int acpi_video_display_switch_support(void) | ||
| 281 | { | ||
| 282 | return 0; | ||
| 283 | } | ||
| 284 | |||
| 285 | #endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */ | ||
| 286 | |||
| 287 | extern int acpi_blacklisted(void); | 271 | extern int acpi_blacklisted(void); |
| 288 | extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); | 272 | extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); |
| 289 | extern void acpi_osi_setup(char *str); | 273 | extern void acpi_osi_setup(char *str); |
| 274 | extern bool acpi_osi_is_win8(void); | ||
| 290 | 275 | ||
| 291 | #ifdef CONFIG_ACPI_NUMA | 276 | #ifdef CONFIG_ACPI_NUMA |
| 277 | int acpi_map_pxm_to_online_node(int pxm); | ||
| 292 | int acpi_get_node(acpi_handle handle); | 278 | int acpi_get_node(acpi_handle handle); |
| 293 | #else | 279 | #else |
| 280 | static inline int acpi_map_pxm_to_online_node(int pxm) | ||
| 281 | { | ||
| 282 | return 0; | ||
| 283 | } | ||
| 294 | static inline int acpi_get_node(acpi_handle handle) | 284 | static inline int acpi_get_node(acpi_handle handle) |
| 295 | { | 285 | { |
| 296 | return 0; | 286 | return 0; |
| @@ -440,6 +430,7 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, | |||
| 440 | #define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82 | 430 | #define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82 |
| 441 | 431 | ||
| 442 | extern void acpi_early_init(void); | 432 | extern void acpi_early_init(void); |
| 433 | extern void acpi_subsystem_init(void); | ||
| 443 | 434 | ||
| 444 | extern int acpi_nvs_register(__u64 start, __u64 size); | 435 | extern int acpi_nvs_register(__u64 start, __u64 size); |
| 445 | 436 | ||
| @@ -465,6 +456,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *); | |||
| 465 | #define ACPI_COMPANION(dev) (NULL) | 456 | #define ACPI_COMPANION(dev) (NULL) |
| 466 | #define ACPI_COMPANION_SET(dev, adev) do { } while (0) | 457 | #define ACPI_COMPANION_SET(dev, adev) do { } while (0) |
| 467 | #define ACPI_HANDLE(dev) (NULL) | 458 | #define ACPI_HANDLE(dev) (NULL) |
| 459 | #define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0), | ||
| 468 | 460 | ||
| 469 | struct fwnode_handle; | 461 | struct fwnode_handle; |
| 470 | 462 | ||
| @@ -473,7 +465,7 @@ static inline bool is_acpi_node(struct fwnode_handle *fwnode) | |||
| 473 | return false; | 465 | return false; |
| 474 | } | 466 | } |
| 475 | 467 | ||
| 476 | static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode) | 468 | static inline struct acpi_device *to_acpi_node(struct fwnode_handle *fwnode) |
| 477 | { | 469 | { |
| 478 | return NULL; | 470 | return NULL; |
| 479 | } | 471 | } |
| @@ -494,6 +486,7 @@ static inline const char *acpi_dev_name(struct acpi_device *adev) | |||
| 494 | } | 486 | } |
| 495 | 487 | ||
| 496 | static inline void acpi_early_init(void) { } | 488 | static inline void acpi_early_init(void) { } |
| 489 | static inline void acpi_subsystem_init(void) { } | ||
| 497 | 490 | ||
| 498 | static inline int early_acpi_boot_init(void) | 491 | static inline int early_acpi_boot_init(void) |
| 499 | { | 492 | { |
| @@ -569,6 +562,11 @@ static inline int acpi_device_modalias(struct device *dev, | |||
| 569 | return -ENODEV; | 562 | return -ENODEV; |
| 570 | } | 563 | } |
| 571 | 564 | ||
| 565 | static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent) | ||
| 566 | { | ||
| 567 | return false; | ||
| 568 | } | ||
| 569 | |||
| 572 | #define ACPI_PTR(_ptr) (NULL) | 570 | #define ACPI_PTR(_ptr) (NULL) |
| 573 | 571 | ||
| 574 | #endif /* !CONFIG_ACPI */ | 572 | #endif /* !CONFIG_ACPI */ |
diff --git a/include/linux/amba/sp810.h b/include/linux/amba/sp810.h index c7df89f99115..58fe9e8b6fd7 100644 --- a/include/linux/amba/sp810.h +++ b/include/linux/amba/sp810.h | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * ARM PrimeXsys System Controller SP810 header file | 2 | * ARM PrimeXsys System Controller SP810 header file |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2009 ST Microelectronics | 4 | * Copyright (C) 2009 ST Microelectronics |
| 5 | * Viresh Kumar <viresh.linux@gmail.com> | 5 | * Viresh Kumar <vireshk@kernel.org> |
| 6 | * | 6 | * |
| 7 | * This file is licensed under the terms of the GNU General Public | 7 | * This file is licensed under the terms of the GNU General Public |
| 8 | * License version 2. This program is licensed "as is" without any | 8 | * License version 2. This program is licensed "as is" without any |
diff --git a/include/linux/ata.h b/include/linux/ata.h index b666b773e111..6c78956aa470 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
| @@ -45,6 +45,7 @@ enum { | |||
| 45 | ATA_SECT_SIZE = 512, | 45 | ATA_SECT_SIZE = 512, |
| 46 | ATA_MAX_SECTORS_128 = 128, | 46 | ATA_MAX_SECTORS_128 = 128, |
| 47 | ATA_MAX_SECTORS = 256, | 47 | ATA_MAX_SECTORS = 256, |
| 48 | ATA_MAX_SECTORS_1024 = 1024, | ||
| 48 | ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ | 49 | ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ |
| 49 | ATA_MAX_SECTORS_TAPE = 65535, | 50 | ATA_MAX_SECTORS_TAPE = 65535, |
| 50 | 51 | ||
| @@ -704,9 +705,19 @@ static inline bool ata_id_wcache_enabled(const u16 *id) | |||
| 704 | 705 | ||
| 705 | static inline bool ata_id_has_read_log_dma_ext(const u16 *id) | 706 | static inline bool ata_id_has_read_log_dma_ext(const u16 *id) |
| 706 | { | 707 | { |
| 708 | /* Word 86 must have bit 15 set */ | ||
| 707 | if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) | 709 | if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) |
| 708 | return false; | 710 | return false; |
| 709 | return id[ATA_ID_COMMAND_SET_3] & (1 << 3); | 711 | |
| 712 | /* READ LOG DMA EXT support can be signaled either from word 119 | ||
| 713 | * or from word 120. The format is the same for both words: Bit | ||
| 714 | * 15 must be cleared, bit 14 set and bit 3 set. | ||
| 715 | */ | ||
| 716 | if ((id[ATA_ID_COMMAND_SET_3] & 0xC008) == 0x4008 || | ||
| 717 | (id[ATA_ID_COMMAND_SET_4] & 0xC008) == 0x4008) | ||
| 718 | return true; | ||
| 719 | |||
| 720 | return false; | ||
| 710 | } | 721 | } |
| 711 | 722 | ||
| 712 | static inline bool ata_id_has_sense_reporting(const u16 *id) | 723 | static inline bool ata_id_has_sense_reporting(const u16 *id) |
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h new file mode 100644 index 000000000000..a23209b43842 --- /dev/null +++ b/include/linux/backing-dev-defs.h | |||
| @@ -0,0 +1,256 @@ | |||
| 1 | #ifndef __LINUX_BACKING_DEV_DEFS_H | ||
| 2 | #define __LINUX_BACKING_DEV_DEFS_H | ||
| 3 | |||
| 4 | #include <linux/list.h> | ||
| 5 | #include <linux/radix-tree.h> | ||
| 6 | #include <linux/rbtree.h> | ||
| 7 | #include <linux/spinlock.h> | ||
| 8 | #include <linux/percpu_counter.h> | ||
| 9 | #include <linux/percpu-refcount.h> | ||
| 10 | #include <linux/flex_proportions.h> | ||
| 11 | #include <linux/timer.h> | ||
| 12 | #include <linux/workqueue.h> | ||
| 13 | |||
| 14 | struct page; | ||
| 15 | struct device; | ||
| 16 | struct dentry; | ||
| 17 | |||
| 18 | /* | ||
| 19 | * Bits in bdi_writeback.state | ||
| 20 | */ | ||
| 21 | enum wb_state { | ||
| 22 | WB_registered, /* bdi_register() was done */ | ||
| 23 | WB_writeback_running, /* Writeback is in progress */ | ||
| 24 | WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ | ||
| 25 | }; | ||
| 26 | |||
| 27 | enum wb_congested_state { | ||
| 28 | WB_async_congested, /* The async (write) queue is getting full */ | ||
| 29 | WB_sync_congested, /* The sync queue is getting full */ | ||
| 30 | }; | ||
| 31 | |||
| 32 | typedef int (congested_fn)(void *, int); | ||
| 33 | |||
| 34 | enum wb_stat_item { | ||
| 35 | WB_RECLAIMABLE, | ||
| 36 | WB_WRITEBACK, | ||
| 37 | WB_DIRTIED, | ||
| 38 | WB_WRITTEN, | ||
| 39 | NR_WB_STAT_ITEMS | ||
| 40 | }; | ||
| 41 | |||
| 42 | #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) | ||
| 43 | |||
| 44 | /* | ||
| 45 | * For cgroup writeback, multiple wb's may map to the same blkcg. Those | ||
| 46 | * wb's can operate mostly independently but should share the congested | ||
| 47 | * state. To facilitate such sharing, the congested state is tracked using | ||
| 48 | * the following struct which is created on demand, indexed by blkcg ID on | ||
| 49 | * its bdi, and refcounted. | ||
| 50 | */ | ||
| 51 | struct bdi_writeback_congested { | ||
| 52 | unsigned long state; /* WB_[a]sync_congested flags */ | ||
| 53 | atomic_t refcnt; /* nr of attached wb's and blkg */ | ||
| 54 | |||
| 55 | #ifdef CONFIG_CGROUP_WRITEBACK | ||
| 56 | struct backing_dev_info *bdi; /* the associated bdi */ | ||
| 57 | int blkcg_id; /* ID of the associated blkcg */ | ||
| 58 | struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */ | ||
| 59 | #endif | ||
| 60 | }; | ||
| 61 | |||
| 62 | /* | ||
| 63 | * Each wb (bdi_writeback) can perform writeback operations, is measured | ||
| 64 | * and throttled, independently. Without cgroup writeback, each bdi | ||
| 65 | * (bdi_writeback) is served by its embedded bdi->wb. | ||
| 66 | * | ||
| 67 | * On the default hierarchy, blkcg implicitly enables memcg. This allows | ||
| 68 | * using memcg's page ownership for attributing writeback IOs, and every | ||
| 69 | * memcg - blkcg combination can be served by its own wb by assigning a | ||
| 70 | * dedicated wb to each memcg, which enables isolation across different | ||
| 71 | * cgroups and propagation of IO back pressure down from the IO layer upto | ||
| 72 | * the tasks which are generating the dirty pages to be written back. | ||
| 73 | * | ||
| 74 | * A cgroup wb is indexed on its bdi by the ID of the associated memcg, | ||
| 75 | * refcounted with the number of inodes attached to it, and pins the memcg | ||
| 76 | * and the corresponding blkcg. As the corresponding blkcg for a memcg may | ||
| 77 | * change as blkcg is disabled and enabled higher up in the hierarchy, a wb | ||
| 78 | * is tested for blkcg after lookup and removed from index on mismatch so | ||
| 79 | * that a new wb for the combination can be created. | ||
| 80 | */ | ||
| 81 | struct bdi_writeback { | ||
| 82 | struct backing_dev_info *bdi; /* our parent bdi */ | ||
| 83 | |||
| 84 | unsigned long state; /* Always use atomic bitops on this */ | ||
| 85 | unsigned long last_old_flush; /* last old data flush */ | ||
| 86 | |||
| 87 | struct list_head b_dirty; /* dirty inodes */ | ||
| 88 | struct list_head b_io; /* parked for writeback */ | ||
| 89 | struct list_head b_more_io; /* parked for more writeback */ | ||
| 90 | struct list_head b_dirty_time; /* time stamps are dirty */ | ||
| 91 | spinlock_t list_lock; /* protects the b_* lists */ | ||
| 92 | |||
| 93 | struct percpu_counter stat[NR_WB_STAT_ITEMS]; | ||
| 94 | |||
| 95 | struct bdi_writeback_congested *congested; | ||
| 96 | |||
| 97 | unsigned long bw_time_stamp; /* last time write bw is updated */ | ||
| 98 | unsigned long dirtied_stamp; | ||
| 99 | unsigned long written_stamp; /* pages written at bw_time_stamp */ | ||
| 100 | unsigned long write_bandwidth; /* the estimated write bandwidth */ | ||
| 101 | unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */ | ||
| 102 | |||
| 103 | /* | ||
| 104 | * The base dirty throttle rate, re-calculated on every 200ms. | ||
| 105 | * All the bdi tasks' dirty rate will be curbed under it. | ||
| 106 | * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit | ||
| 107 | * in small steps and is much more smooth/stable than the latter. | ||
| 108 | */ | ||
| 109 | unsigned long dirty_ratelimit; | ||
| 110 | unsigned long balanced_dirty_ratelimit; | ||
| 111 | |||
| 112 | struct fprop_local_percpu completions; | ||
| 113 | int dirty_exceeded; | ||
| 114 | |||
| 115 | spinlock_t work_lock; /* protects work_list & dwork scheduling */ | ||
| 116 | struct list_head work_list; | ||
| 117 | struct delayed_work dwork; /* work item used for writeback */ | ||
| 118 | |||
| 119 | #ifdef CONFIG_CGROUP_WRITEBACK | ||
| 120 | struct percpu_ref refcnt; /* used only for !root wb's */ | ||
| 121 | struct fprop_local_percpu memcg_completions; | ||
| 122 | struct cgroup_subsys_state *memcg_css; /* the associated memcg */ | ||
| 123 | struct cgroup_subsys_state *blkcg_css; /* and blkcg */ | ||
| 124 | struct list_head memcg_node; /* anchored at memcg->cgwb_list */ | ||
| 125 | struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */ | ||
| 126 | |||
| 127 | union { | ||
| 128 | struct work_struct release_work; | ||
| 129 | struct rcu_head rcu; | ||
| 130 | }; | ||
| 131 | #endif | ||
| 132 | }; | ||
| 133 | |||
| 134 | struct backing_dev_info { | ||
| 135 | struct list_head bdi_list; | ||
| 136 | unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ | ||
| 137 | unsigned int capabilities; /* Device capabilities */ | ||
| 138 | congested_fn *congested_fn; /* Function pointer if device is md/dm */ | ||
| 139 | void *congested_data; /* Pointer to aux data for congested func */ | ||
| 140 | |||
| 141 | char *name; | ||
| 142 | |||
| 143 | unsigned int min_ratio; | ||
| 144 | unsigned int max_ratio, max_prop_frac; | ||
| 145 | |||
| 146 | /* | ||
| 147 | * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are | ||
| 148 | * any dirty wbs, which is depended upon by bdi_has_dirty(). | ||
| 149 | */ | ||
| 150 | atomic_long_t tot_write_bandwidth; | ||
| 151 | |||
| 152 | struct bdi_writeback wb; /* the root writeback info for this bdi */ | ||
| 153 | #ifdef CONFIG_CGROUP_WRITEBACK | ||
| 154 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ | ||
| 155 | struct rb_root cgwb_congested_tree; /* their congested states */ | ||
| 156 | atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */ | ||
| 157 | #else | ||
| 158 | struct bdi_writeback_congested *wb_congested; | ||
| 159 | #endif | ||
| 160 | wait_queue_head_t wb_waitq; | ||
| 161 | |||
| 162 | struct device *dev; | ||
| 163 | |||
| 164 | struct timer_list laptop_mode_wb_timer; | ||
| 165 | |||
| 166 | #ifdef CONFIG_DEBUG_FS | ||
| 167 | struct dentry *debug_dir; | ||
| 168 | struct dentry *debug_stats; | ||
| 169 | #endif | ||
| 170 | }; | ||
| 171 | |||
| 172 | enum { | ||
| 173 | BLK_RW_ASYNC = 0, | ||
| 174 | BLK_RW_SYNC = 1, | ||
| 175 | }; | ||
| 176 | |||
| 177 | void clear_wb_congested(struct bdi_writeback_congested *congested, int sync); | ||
| 178 | void set_wb_congested(struct bdi_writeback_congested *congested, int sync); | ||
| 179 | |||
| 180 | static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync) | ||
| 181 | { | ||
| 182 | clear_wb_congested(bdi->wb.congested, sync); | ||
| 183 | } | ||
| 184 | |||
| 185 | static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) | ||
| 186 | { | ||
| 187 | set_wb_congested(bdi->wb.congested, sync); | ||
| 188 | } | ||
| 189 | |||
| 190 | #ifdef CONFIG_CGROUP_WRITEBACK | ||
| 191 | |||
| 192 | /** | ||
| 193 | * wb_tryget - try to increment a wb's refcount | ||
| 194 | * @wb: bdi_writeback to get | ||
| 195 | */ | ||
| 196 | static inline bool wb_tryget(struct bdi_writeback *wb) | ||
| 197 | { | ||
| 198 | if (wb != &wb->bdi->wb) | ||
| 199 | return percpu_ref_tryget(&wb->refcnt); | ||
| 200 | return true; | ||
| 201 | } | ||
| 202 | |||
| 203 | /** | ||
| 204 | * wb_get - increment a wb's refcount | ||
| 205 | * @wb: bdi_writeback to get | ||
| 206 | */ | ||
| 207 | static inline void wb_get(struct bdi_writeback *wb) | ||
| 208 | { | ||
| 209 | if (wb != &wb->bdi->wb) | ||
| 210 | percpu_ref_get(&wb->refcnt); | ||
| 211 | } | ||
| 212 | |||
| 213 | /** | ||
| 214 | * wb_put - decrement a wb's refcount | ||
| 215 | * @wb: bdi_writeback to put | ||
| 216 | */ | ||
| 217 | static inline void wb_put(struct bdi_writeback *wb) | ||
| 218 | { | ||
| 219 | if (wb != &wb->bdi->wb) | ||
| 220 | percpu_ref_put(&wb->refcnt); | ||
| 221 | } | ||
| 222 | |||
| 223 | /** | ||
| 224 | * wb_dying - is a wb dying? | ||
| 225 | * @wb: bdi_writeback of interest | ||
| 226 | * | ||
| 227 | * Returns whether @wb is unlinked and being drained. | ||
| 228 | */ | ||
| 229 | static inline bool wb_dying(struct bdi_writeback *wb) | ||
| 230 | { | ||
| 231 | return percpu_ref_is_dying(&wb->refcnt); | ||
| 232 | } | ||
| 233 | |||
| 234 | #else /* CONFIG_CGROUP_WRITEBACK */ | ||
| 235 | |||
| 236 | static inline bool wb_tryget(struct bdi_writeback *wb) | ||
| 237 | { | ||
| 238 | return true; | ||
| 239 | } | ||
| 240 | |||
| 241 | static inline void wb_get(struct bdi_writeback *wb) | ||
| 242 | { | ||
| 243 | } | ||
| 244 | |||
| 245 | static inline void wb_put(struct bdi_writeback *wb) | ||
| 246 | { | ||
| 247 | } | ||
| 248 | |||
| 249 | static inline bool wb_dying(struct bdi_writeback *wb) | ||
| 250 | { | ||
| 251 | return false; | ||
| 252 | } | ||
| 253 | |||
| 254 | #endif /* CONFIG_CGROUP_WRITEBACK */ | ||
| 255 | |||
| 256 | #endif /* __LINUX_BACKING_DEV_DEFS_H */ | ||
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index d87d8eced064..0fe9df983ab7 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
| @@ -8,106 +8,14 @@ | |||
| 8 | #ifndef _LINUX_BACKING_DEV_H | 8 | #ifndef _LINUX_BACKING_DEV_H |
| 9 | #define _LINUX_BACKING_DEV_H | 9 | #define _LINUX_BACKING_DEV_H |
| 10 | 10 | ||
| 11 | #include <linux/percpu_counter.h> | ||
| 12 | #include <linux/log2.h> | ||
| 13 | #include <linux/flex_proportions.h> | ||
| 14 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
| 15 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
| 16 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
| 17 | #include <linux/timer.h> | 14 | #include <linux/blkdev.h> |
| 18 | #include <linux/writeback.h> | 15 | #include <linux/writeback.h> |
| 19 | #include <linux/atomic.h> | 16 | #include <linux/blk-cgroup.h> |
| 20 | #include <linux/sysctl.h> | 17 | #include <linux/backing-dev-defs.h> |
| 21 | #include <linux/workqueue.h> | 18 | #include <linux/slab.h> |
| 22 | |||
| 23 | struct page; | ||
| 24 | struct device; | ||
| 25 | struct dentry; | ||
| 26 | |||
| 27 | /* | ||
| 28 | * Bits in backing_dev_info.state | ||
| 29 | */ | ||
| 30 | enum bdi_state { | ||
| 31 | BDI_async_congested, /* The async (write) queue is getting full */ | ||
| 32 | BDI_sync_congested, /* The sync queue is getting full */ | ||
| 33 | BDI_registered, /* bdi_register() was done */ | ||
| 34 | BDI_writeback_running, /* Writeback is in progress */ | ||
| 35 | }; | ||
| 36 | |||
| 37 | typedef int (congested_fn)(void *, int); | ||
| 38 | |||
| 39 | enum bdi_stat_item { | ||
| 40 | BDI_RECLAIMABLE, | ||
| 41 | BDI_WRITEBACK, | ||
| 42 | BDI_DIRTIED, | ||
| 43 | BDI_WRITTEN, | ||
| 44 | NR_BDI_STAT_ITEMS | ||
| 45 | }; | ||
| 46 | |||
| 47 | #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) | ||
| 48 | |||
| 49 | struct bdi_writeback { | ||
| 50 | struct backing_dev_info *bdi; /* our parent bdi */ | ||
| 51 | |||
| 52 | unsigned long last_old_flush; /* last old data flush */ | ||
| 53 | |||
| 54 | struct delayed_work dwork; /* work item used for writeback */ | ||
| 55 | struct list_head b_dirty; /* dirty inodes */ | ||
| 56 | struct list_head b_io; /* parked for writeback */ | ||
| 57 | struct list_head b_more_io; /* parked for more writeback */ | ||
| 58 | struct list_head b_dirty_time; /* time stamps are dirty */ | ||
| 59 | spinlock_t list_lock; /* protects the b_* lists */ | ||
| 60 | }; | ||
| 61 | |||
| 62 | struct backing_dev_info { | ||
| 63 | struct list_head bdi_list; | ||
| 64 | unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ | ||
| 65 | unsigned long state; /* Always use atomic bitops on this */ | ||
| 66 | unsigned int capabilities; /* Device capabilities */ | ||
| 67 | congested_fn *congested_fn; /* Function pointer if device is md/dm */ | ||
| 68 | void *congested_data; /* Pointer to aux data for congested func */ | ||
| 69 | |||
| 70 | char *name; | ||
| 71 | |||
| 72 | struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; | ||
| 73 | |||
| 74 | unsigned long bw_time_stamp; /* last time write bw is updated */ | ||
| 75 | unsigned long dirtied_stamp; | ||
| 76 | unsigned long written_stamp; /* pages written at bw_time_stamp */ | ||
| 77 | unsigned long write_bandwidth; /* the estimated write bandwidth */ | ||
| 78 | unsigned long avg_write_bandwidth; /* further smoothed write bw */ | ||
| 79 | |||
| 80 | /* | ||
| 81 | * The base dirty throttle rate, re-calculated on every 200ms. | ||
| 82 | * All the bdi tasks' dirty rate will be curbed under it. | ||
| 83 | * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit | ||
| 84 | * in small steps and is much more smooth/stable than the latter. | ||
| 85 | */ | ||
| 86 | unsigned long dirty_ratelimit; | ||
| 87 | unsigned long balanced_dirty_ratelimit; | ||
| 88 | |||
| 89 | struct fprop_local_percpu completions; | ||
| 90 | int dirty_exceeded; | ||
| 91 | |||
| 92 | unsigned int min_ratio; | ||
| 93 | unsigned int max_ratio, max_prop_frac; | ||
| 94 | |||
| 95 | struct bdi_writeback wb; /* default writeback info for this bdi */ | ||
| 96 | spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */ | ||
| 97 | |||
| 98 | struct list_head work_list; | ||
| 99 | |||
| 100 | struct device *dev; | ||
| 101 | |||
| 102 | struct timer_list laptop_mode_wb_timer; | ||
| 103 | |||
| 104 | #ifdef CONFIG_DEBUG_FS | ||
| 105 | struct dentry *debug_dir; | ||
| 106 | struct dentry *debug_stats; | ||
| 107 | #endif | ||
| 108 | }; | ||
| 109 | |||
| 110 | struct backing_dev_info *inode_to_bdi(struct inode *inode); | ||
| 111 | 19 | ||
| 112 | int __must_check bdi_init(struct backing_dev_info *bdi); | 20 | int __must_check bdi_init(struct backing_dev_info *bdi); |
| 113 | void bdi_destroy(struct backing_dev_info *bdi); | 21 | void bdi_destroy(struct backing_dev_info *bdi); |
| @@ -117,97 +25,99 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, | |||
| 117 | const char *fmt, ...); | 25 | const char *fmt, ...); |
| 118 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); | 26 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); |
| 119 | int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); | 27 | int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); |
| 120 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, | 28 | void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, |
| 121 | enum wb_reason reason); | 29 | bool range_cyclic, enum wb_reason reason); |
| 122 | void bdi_start_background_writeback(struct backing_dev_info *bdi); | 30 | void wb_start_background_writeback(struct bdi_writeback *wb); |
| 123 | void bdi_writeback_workfn(struct work_struct *work); | 31 | void wb_workfn(struct work_struct *work); |
| 124 | int bdi_has_dirty_io(struct backing_dev_info *bdi); | 32 | void wb_wakeup_delayed(struct bdi_writeback *wb); |
| 125 | void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); | ||
| 126 | 33 | ||
| 127 | extern spinlock_t bdi_lock; | 34 | extern spinlock_t bdi_lock; |
| 128 | extern struct list_head bdi_list; | 35 | extern struct list_head bdi_list; |
| 129 | 36 | ||
| 130 | extern struct workqueue_struct *bdi_wq; | 37 | extern struct workqueue_struct *bdi_wq; |
| 131 | 38 | ||
| 132 | static inline int wb_has_dirty_io(struct bdi_writeback *wb) | 39 | static inline bool wb_has_dirty_io(struct bdi_writeback *wb) |
| 133 | { | 40 | { |
| 134 | return !list_empty(&wb->b_dirty) || | 41 | return test_bit(WB_has_dirty_io, &wb->state); |
| 135 | !list_empty(&wb->b_io) || | 42 | } |
| 136 | !list_empty(&wb->b_more_io); | 43 | |
| 44 | static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) | ||
| 45 | { | ||
| 46 | /* | ||
| 47 | * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are | ||
| 48 | * any dirty wbs. See wb_update_write_bandwidth(). | ||
| 49 | */ | ||
| 50 | return atomic_long_read(&bdi->tot_write_bandwidth); | ||
| 137 | } | 51 | } |
| 138 | 52 | ||
| 139 | static inline void __add_bdi_stat(struct backing_dev_info *bdi, | 53 | static inline void __add_wb_stat(struct bdi_writeback *wb, |
| 140 | enum bdi_stat_item item, s64 amount) | 54 | enum wb_stat_item item, s64 amount) |
| 141 | { | 55 | { |
| 142 | __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH); | 56 | __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH); |
| 143 | } | 57 | } |
| 144 | 58 | ||
| 145 | static inline void __inc_bdi_stat(struct backing_dev_info *bdi, | 59 | static inline void __inc_wb_stat(struct bdi_writeback *wb, |
| 146 | enum bdi_stat_item item) | 60 | enum wb_stat_item item) |
| 147 | { | 61 | { |
| 148 | __add_bdi_stat(bdi, item, 1); | 62 | __add_wb_stat(wb, item, 1); |
| 149 | } | 63 | } |
| 150 | 64 | ||
| 151 | static inline void inc_bdi_stat(struct backing_dev_info *bdi, | 65 | static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) |
| 152 | enum bdi_stat_item item) | ||
| 153 | { | 66 | { |
| 154 | unsigned long flags; | 67 | unsigned long flags; |
| 155 | 68 | ||
| 156 | local_irq_save(flags); | 69 | local_irq_save(flags); |
| 157 | __inc_bdi_stat(bdi, item); | 70 | __inc_wb_stat(wb, item); |
| 158 | local_irq_restore(flags); | 71 | local_irq_restore(flags); |
| 159 | } | 72 | } |
| 160 | 73 | ||
| 161 | static inline void __dec_bdi_stat(struct backing_dev_info *bdi, | 74 | static inline void __dec_wb_stat(struct bdi_writeback *wb, |
| 162 | enum bdi_stat_item item) | 75 | enum wb_stat_item item) |
| 163 | { | 76 | { |
| 164 | __add_bdi_stat(bdi, item, -1); | 77 | __add_wb_stat(wb, item, -1); |
| 165 | } | 78 | } |
| 166 | 79 | ||
| 167 | static inline void dec_bdi_stat(struct backing_dev_info *bdi, | 80 | static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) |
| 168 | enum bdi_stat_item item) | ||
| 169 | { | 81 | { |
| 170 | unsigned long flags; | 82 | unsigned long flags; |
| 171 | 83 | ||
| 172 | local_irq_save(flags); | 84 | local_irq_save(flags); |
| 173 | __dec_bdi_stat(bdi, item); | 85 | __dec_wb_stat(wb, item); |
| 174 | local_irq_restore(flags); | 86 | local_irq_restore(flags); |
| 175 | } | 87 | } |
| 176 | 88 | ||
| 177 | static inline s64 bdi_stat(struct backing_dev_info *bdi, | 89 | static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) |
| 178 | enum bdi_stat_item item) | ||
| 179 | { | 90 | { |
| 180 | return percpu_counter_read_positive(&bdi->bdi_stat[item]); | 91 | return percpu_counter_read_positive(&wb->stat[item]); |
| 181 | } | 92 | } |
| 182 | 93 | ||
| 183 | static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi, | 94 | static inline s64 __wb_stat_sum(struct bdi_writeback *wb, |
| 184 | enum bdi_stat_item item) | 95 | enum wb_stat_item item) |
| 185 | { | 96 | { |
| 186 | return percpu_counter_sum_positive(&bdi->bdi_stat[item]); | 97 | return percpu_counter_sum_positive(&wb->stat[item]); |
| 187 | } | 98 | } |
| 188 | 99 | ||
| 189 | static inline s64 bdi_stat_sum(struct backing_dev_info *bdi, | 100 | static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) |
| 190 | enum bdi_stat_item item) | ||
| 191 | { | 101 | { |
| 192 | s64 sum; | 102 | s64 sum; |
| 193 | unsigned long flags; | 103 | unsigned long flags; |
| 194 | 104 | ||
| 195 | local_irq_save(flags); | 105 | local_irq_save(flags); |
| 196 | sum = __bdi_stat_sum(bdi, item); | 106 | sum = __wb_stat_sum(wb, item); |
| 197 | local_irq_restore(flags); | 107 | local_irq_restore(flags); |
| 198 | 108 | ||
| 199 | return sum; | 109 | return sum; |
| 200 | } | 110 | } |
| 201 | 111 | ||
| 202 | extern void bdi_writeout_inc(struct backing_dev_info *bdi); | 112 | extern void wb_writeout_inc(struct bdi_writeback *wb); |
| 203 | 113 | ||
| 204 | /* | 114 | /* |
| 205 | * maximal error of a stat counter. | 115 | * maximal error of a stat counter. |
| 206 | */ | 116 | */ |
| 207 | static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi) | 117 | static inline unsigned long wb_stat_error(struct bdi_writeback *wb) |
| 208 | { | 118 | { |
| 209 | #ifdef CONFIG_SMP | 119 | #ifdef CONFIG_SMP |
| 210 | return nr_cpu_ids * BDI_STAT_BATCH; | 120 | return nr_cpu_ids * WB_STAT_BATCH; |
| 211 | #else | 121 | #else |
| 212 | return 1; | 122 | return 1; |
| 213 | #endif | 123 | #endif |
| @@ -231,50 +141,57 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); | |||
| 231 | * BDI_CAP_NO_WRITEBACK: Don't write pages back | 141 | * BDI_CAP_NO_WRITEBACK: Don't write pages back |
| 232 | * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages | 142 | * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages |
| 233 | * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. | 143 | * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. |
| 144 | * | ||
| 145 | * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback. | ||
| 234 | */ | 146 | */ |
| 235 | #define BDI_CAP_NO_ACCT_DIRTY 0x00000001 | 147 | #define BDI_CAP_NO_ACCT_DIRTY 0x00000001 |
| 236 | #define BDI_CAP_NO_WRITEBACK 0x00000002 | 148 | #define BDI_CAP_NO_WRITEBACK 0x00000002 |
| 237 | #define BDI_CAP_NO_ACCT_WB 0x00000004 | 149 | #define BDI_CAP_NO_ACCT_WB 0x00000004 |
| 238 | #define BDI_CAP_STABLE_WRITES 0x00000008 | 150 | #define BDI_CAP_STABLE_WRITES 0x00000008 |
| 239 | #define BDI_CAP_STRICTLIMIT 0x00000010 | 151 | #define BDI_CAP_STRICTLIMIT 0x00000010 |
| 152 | #define BDI_CAP_CGROUP_WRITEBACK 0x00000020 | ||
| 240 | 153 | ||
| 241 | #define BDI_CAP_NO_ACCT_AND_WRITEBACK \ | 154 | #define BDI_CAP_NO_ACCT_AND_WRITEBACK \ |
| 242 | (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) | 155 | (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) |
| 243 | 156 | ||
| 244 | extern struct backing_dev_info noop_backing_dev_info; | 157 | extern struct backing_dev_info noop_backing_dev_info; |
| 245 | 158 | ||
| 246 | int writeback_in_progress(struct backing_dev_info *bdi); | 159 | /** |
| 247 | 160 | * writeback_in_progress - determine whether there is writeback in progress | |
| 248 | static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) | 161 | * @wb: bdi_writeback of interest |
| 162 | * | ||
| 163 | * Determine whether there is writeback waiting to be handled against a | ||
| 164 | * bdi_writeback. | ||
| 165 | */ | ||
| 166 | static inline bool writeback_in_progress(struct bdi_writeback *wb) | ||
| 249 | { | 167 | { |
| 250 | if (bdi->congested_fn) | 168 | return test_bit(WB_writeback_running, &wb->state); |
| 251 | return bdi->congested_fn(bdi->congested_data, bdi_bits); | ||
| 252 | return (bdi->state & bdi_bits); | ||
| 253 | } | 169 | } |
| 254 | 170 | ||
| 255 | static inline int bdi_read_congested(struct backing_dev_info *bdi) | 171 | static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) |
| 256 | { | 172 | { |
| 257 | return bdi_congested(bdi, 1 << BDI_sync_congested); | 173 | struct super_block *sb; |
| 258 | } | ||
| 259 | 174 | ||
| 260 | static inline int bdi_write_congested(struct backing_dev_info *bdi) | 175 | if (!inode) |
| 261 | { | 176 | return &noop_backing_dev_info; |
| 262 | return bdi_congested(bdi, 1 << BDI_async_congested); | 177 | |
| 178 | sb = inode->i_sb; | ||
| 179 | #ifdef CONFIG_BLOCK | ||
| 180 | if (sb_is_blkdev_sb(sb)) | ||
| 181 | return blk_get_backing_dev_info(I_BDEV(inode)); | ||
| 182 | #endif | ||
| 183 | return sb->s_bdi; | ||
| 263 | } | 184 | } |
| 264 | 185 | ||
| 265 | static inline int bdi_rw_congested(struct backing_dev_info *bdi) | 186 | static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) |
| 266 | { | 187 | { |
| 267 | return bdi_congested(bdi, (1 << BDI_sync_congested) | | 188 | struct backing_dev_info *bdi = wb->bdi; |
| 268 | (1 << BDI_async_congested)); | ||
| 269 | } | ||
| 270 | 189 | ||
| 271 | enum { | 190 | if (bdi->congested_fn) |
| 272 | BLK_RW_ASYNC = 0, | 191 | return bdi->congested_fn(bdi->congested_data, cong_bits); |
| 273 | BLK_RW_SYNC = 1, | 192 | return wb->congested->state & cong_bits; |
| 274 | }; | 193 | } |
| 275 | 194 | ||
| 276 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync); | ||
| 277 | void set_bdi_congested(struct backing_dev_info *bdi, int sync); | ||
| 278 | long congestion_wait(int sync, long timeout); | 195 | long congestion_wait(int sync, long timeout); |
| 279 | long wait_iff_congested(struct zone *zone, int sync, long timeout); | 196 | long wait_iff_congested(struct zone *zone, int sync, long timeout); |
| 280 | int pdflush_proc_obsolete(struct ctl_table *table, int write, | 197 | int pdflush_proc_obsolete(struct ctl_table *table, int write, |
| @@ -318,4 +235,336 @@ static inline int bdi_sched_wait(void *word) | |||
| 318 | return 0; | 235 | return 0; |
| 319 | } | 236 | } |
| 320 | 237 | ||
| 321 | #endif /* _LINUX_BACKING_DEV_H */ | 238 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 239 | |||
| 240 | struct bdi_writeback_congested * | ||
| 241 | wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp); | ||
| 242 | void wb_congested_put(struct bdi_writeback_congested *congested); | ||
| 243 | struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, | ||
| 244 | struct cgroup_subsys_state *memcg_css, | ||
| 245 | gfp_t gfp); | ||
| 246 | void wb_memcg_offline(struct mem_cgroup *memcg); | ||
| 247 | void wb_blkcg_offline(struct blkcg *blkcg); | ||
| 248 | int inode_congested(struct inode *inode, int cong_bits); | ||
| 249 | |||
| 250 | /** | ||
| 251 | * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode | ||
| 252 | * @inode: inode of interest | ||
| 253 | * | ||
| 254 | * cgroup writeback requires support from both the bdi and filesystem. | ||
| 255 | * Test whether @inode has both. | ||
| 256 | */ | ||
| 257 | static inline bool inode_cgwb_enabled(struct inode *inode) | ||
| 258 | { | ||
| 259 | struct backing_dev_info *bdi = inode_to_bdi(inode); | ||
| 260 | |||
| 261 | return bdi_cap_account_dirty(bdi) && | ||
| 262 | (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && | ||
| 263 | (inode->i_sb->s_iflags & SB_I_CGROUPWB); | ||
| 264 | } | ||
| 265 | |||
| 266 | /** | ||
| 267 | * wb_find_current - find wb for %current on a bdi | ||
| 268 | * @bdi: bdi of interest | ||
| 269 | * | ||
| 270 | * Find the wb of @bdi which matches both the memcg and blkcg of %current. | ||
| 271 | * Must be called under rcu_read_lock() which protects the returend wb. | ||
| 272 | * NULL if not found. | ||
| 273 | */ | ||
| 274 | static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) | ||
| 275 | { | ||
| 276 | struct cgroup_subsys_state *memcg_css; | ||
| 277 | struct bdi_writeback *wb; | ||
| 278 | |||
| 279 | memcg_css = task_css(current, memory_cgrp_id); | ||
| 280 | if (!memcg_css->parent) | ||
| 281 | return &bdi->wb; | ||
| 282 | |||
| 283 | wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); | ||
| 284 | |||
| 285 | /* | ||
| 286 | * %current's blkcg equals the effective blkcg of its memcg. No | ||
| 287 | * need to use the relatively expensive cgroup_get_e_css(). | ||
| 288 | */ | ||
| 289 | if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id))) | ||
| 290 | return wb; | ||
| 291 | return NULL; | ||
| 292 | } | ||
| 293 | |||
| 294 | /** | ||
| 295 | * wb_get_create_current - get or create wb for %current on a bdi | ||
| 296 | * @bdi: bdi of interest | ||
| 297 | * @gfp: allocation mask | ||
| 298 | * | ||
| 299 | * Equivalent to wb_get_create() on %current's memcg. This function is | ||
| 300 | * called from a relatively hot path and optimizes the common cases using | ||
| 301 | * wb_find_current(). | ||
| 302 | */ | ||
| 303 | static inline struct bdi_writeback * | ||
| 304 | wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) | ||
| 305 | { | ||
| 306 | struct bdi_writeback *wb; | ||
| 307 | |||
| 308 | rcu_read_lock(); | ||
| 309 | wb = wb_find_current(bdi); | ||
| 310 | if (wb && unlikely(!wb_tryget(wb))) | ||
| 311 | wb = NULL; | ||
| 312 | rcu_read_unlock(); | ||
| 313 | |||
| 314 | if (unlikely(!wb)) { | ||
| 315 | struct cgroup_subsys_state *memcg_css; | ||
| 316 | |||
| 317 | memcg_css = task_get_css(current, memory_cgrp_id); | ||
| 318 | wb = wb_get_create(bdi, memcg_css, gfp); | ||
| 319 | css_put(memcg_css); | ||
| 320 | } | ||
| 321 | return wb; | ||
| 322 | } | ||
| 323 | |||
| 324 | /** | ||
| 325 | * inode_to_wb_is_valid - test whether an inode has a wb associated | ||
| 326 | * @inode: inode of interest | ||
| 327 | * | ||
| 328 | * Returns %true if @inode has a wb associated. May be called without any | ||
| 329 | * locking. | ||
| 330 | */ | ||
| 331 | static inline bool inode_to_wb_is_valid(struct inode *inode) | ||
| 332 | { | ||
| 333 | return inode->i_wb; | ||
| 334 | } | ||
| 335 | |||
| 336 | /** | ||
| 337 | * inode_to_wb - determine the wb of an inode | ||
| 338 | * @inode: inode of interest | ||
| 339 | * | ||
| 340 | * Returns the wb @inode is currently associated with. The caller must be | ||
| 341 | * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the | ||
| 342 | * associated wb's list_lock. | ||
| 343 | */ | ||
| 344 | static inline struct bdi_writeback *inode_to_wb(struct inode *inode) | ||
| 345 | { | ||
| 346 | #ifdef CONFIG_LOCKDEP | ||
| 347 | WARN_ON_ONCE(debug_locks && | ||
| 348 | (!lockdep_is_held(&inode->i_lock) && | ||
| 349 | !lockdep_is_held(&inode->i_mapping->tree_lock) && | ||
| 350 | !lockdep_is_held(&inode->i_wb->list_lock))); | ||
| 351 | #endif | ||
| 352 | return inode->i_wb; | ||
| 353 | } | ||
| 354 | |||
| 355 | /** | ||
| 356 | * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction | ||
| 357 | * @inode: target inode | ||
| 358 | * @lockedp: temp bool output param, to be passed to the end function | ||
| 359 | * | ||
| 360 | * The caller wants to access the wb associated with @inode but isn't | ||
| 361 | * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This | ||
| 362 | * function determines the wb associated with @inode and ensures that the | ||
| 363 | * association doesn't change until the transaction is finished with | ||
| 364 | * unlocked_inode_to_wb_end(). | ||
| 365 | * | ||
| 366 | * The caller must call unlocked_inode_to_wb_end() with *@lockdep | ||
| 367 | * afterwards and can't sleep during transaction. IRQ may or may not be | ||
| 368 | * disabled on return. | ||
| 369 | */ | ||
| 370 | static inline struct bdi_writeback * | ||
| 371 | unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) | ||
| 372 | { | ||
| 373 | rcu_read_lock(); | ||
| 374 | |||
| 375 | /* | ||
| 376 | * Paired with store_release in inode_switch_wb_work_fn() and | ||
| 377 | * ensures that we see the new wb if we see cleared I_WB_SWITCH. | ||
| 378 | */ | ||
| 379 | *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; | ||
| 380 | |||
| 381 | if (unlikely(*lockedp)) | ||
| 382 | spin_lock_irq(&inode->i_mapping->tree_lock); | ||
| 383 | |||
| 384 | /* | ||
| 385 | * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock. | ||
| 386 | * inode_to_wb() will bark. Deref directly. | ||
| 387 | */ | ||
| 388 | return inode->i_wb; | ||
| 389 | } | ||
| 390 | |||
| 391 | /** | ||
| 392 | * unlocked_inode_to_wb_end - end inode wb access transaction | ||
| 393 | * @inode: target inode | ||
| 394 | * @locked: *@lockedp from unlocked_inode_to_wb_begin() | ||
| 395 | */ | ||
| 396 | static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) | ||
| 397 | { | ||
| 398 | if (unlikely(locked)) | ||
| 399 | spin_unlock_irq(&inode->i_mapping->tree_lock); | ||
| 400 | |||
| 401 | rcu_read_unlock(); | ||
| 402 | } | ||
| 403 | |||
| 404 | struct wb_iter { | ||
| 405 | int start_blkcg_id; | ||
| 406 | struct radix_tree_iter tree_iter; | ||
| 407 | void **slot; | ||
| 408 | }; | ||
| 409 | |||
| 410 | static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter, | ||
| 411 | struct backing_dev_info *bdi) | ||
| 412 | { | ||
| 413 | struct radix_tree_iter *titer = &iter->tree_iter; | ||
| 414 | |||
| 415 | WARN_ON_ONCE(!rcu_read_lock_held()); | ||
| 416 | |||
| 417 | if (iter->start_blkcg_id >= 0) { | ||
| 418 | iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id); | ||
| 419 | iter->start_blkcg_id = -1; | ||
| 420 | } else { | ||
| 421 | iter->slot = radix_tree_next_slot(iter->slot, titer, 0); | ||
| 422 | } | ||
| 423 | |||
| 424 | if (!iter->slot) | ||
| 425 | iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0); | ||
| 426 | if (iter->slot) | ||
| 427 | return *iter->slot; | ||
| 428 | return NULL; | ||
| 429 | } | ||
| 430 | |||
| 431 | static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter, | ||
| 432 | struct backing_dev_info *bdi, | ||
| 433 | int start_blkcg_id) | ||
| 434 | { | ||
| 435 | iter->start_blkcg_id = start_blkcg_id; | ||
| 436 | |||
| 437 | if (start_blkcg_id) | ||
| 438 | return __wb_iter_next(iter, bdi); | ||
| 439 | else | ||
| 440 | return &bdi->wb; | ||
| 441 | } | ||
| 442 | |||
| 443 | /** | ||
| 444 | * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order | ||
| 445 | * @wb_cur: cursor struct bdi_writeback pointer | ||
| 446 | * @bdi: bdi to walk wb's of | ||
| 447 | * @iter: pointer to struct wb_iter to be used as iteration buffer | ||
| 448 | * @start_blkcg_id: blkcg ID to start iteration from | ||
| 449 | * | ||
| 450 | * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending | ||
| 451 | * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter | ||
| 452 | * to be used as temp storage during iteration. rcu_read_lock() must be | ||
| 453 | * held throughout iteration. | ||
| 454 | */ | ||
| 455 | #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \ | ||
| 456 | for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \ | ||
| 457 | (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi)) | ||
| 458 | |||
| 459 | #else /* CONFIG_CGROUP_WRITEBACK */ | ||
| 460 | |||
| 461 | static inline bool inode_cgwb_enabled(struct inode *inode) | ||
| 462 | { | ||
| 463 | return false; | ||
| 464 | } | ||
| 465 | |||
| 466 | static inline struct bdi_writeback_congested * | ||
| 467 | wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) | ||
| 468 | { | ||
| 469 | atomic_inc(&bdi->wb_congested->refcnt); | ||
| 470 | return bdi->wb_congested; | ||
| 471 | } | ||
| 472 | |||
| 473 | static inline void wb_congested_put(struct bdi_writeback_congested *congested) | ||
| 474 | { | ||
| 475 | if (atomic_dec_and_test(&congested->refcnt)) | ||
| 476 | kfree(congested); | ||
| 477 | } | ||
| 478 | |||
| 479 | static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) | ||
| 480 | { | ||
| 481 | return &bdi->wb; | ||
| 482 | } | ||
| 483 | |||
| 484 | static inline struct bdi_writeback * | ||
| 485 | wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) | ||
| 486 | { | ||
| 487 | return &bdi->wb; | ||
| 488 | } | ||
| 489 | |||
| 490 | static inline bool inode_to_wb_is_valid(struct inode *inode) | ||
| 491 | { | ||
| 492 | return true; | ||
| 493 | } | ||
| 494 | |||
| 495 | static inline struct bdi_writeback *inode_to_wb(struct inode *inode) | ||
| 496 | { | ||
| 497 | return &inode_to_bdi(inode)->wb; | ||
| 498 | } | ||
| 499 | |||
| 500 | static inline struct bdi_writeback * | ||
| 501 | unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) | ||
| 502 | { | ||
| 503 | return inode_to_wb(inode); | ||
| 504 | } | ||
| 505 | |||
| 506 | static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) | ||
| 507 | { | ||
| 508 | } | ||
| 509 | |||
| 510 | static inline void wb_memcg_offline(struct mem_cgroup *memcg) | ||
| 511 | { | ||
| 512 | } | ||
| 513 | |||
| 514 | static inline void wb_blkcg_offline(struct blkcg *blkcg) | ||
| 515 | { | ||
| 516 | } | ||
| 517 | |||
| 518 | struct wb_iter { | ||
| 519 | int next_id; | ||
| 520 | }; | ||
| 521 | |||
| 522 | #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \ | ||
| 523 | for ((iter)->next_id = (start_blkcg_id); \ | ||
| 524 | ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); ) | ||
| 525 | |||
| 526 | static inline int inode_congested(struct inode *inode, int cong_bits) | ||
| 527 | { | ||
| 528 | return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); | ||
| 529 | } | ||
| 530 | |||
| 531 | #endif /* CONFIG_CGROUP_WRITEBACK */ | ||
| 532 | |||
| 533 | static inline int inode_read_congested(struct inode *inode) | ||
| 534 | { | ||
| 535 | return inode_congested(inode, 1 << WB_sync_congested); | ||
| 536 | } | ||
| 537 | |||
| 538 | static inline int inode_write_congested(struct inode *inode) | ||
| 539 | { | ||
| 540 | return inode_congested(inode, 1 << WB_async_congested); | ||
| 541 | } | ||
| 542 | |||
| 543 | static inline int inode_rw_congested(struct inode *inode) | ||
| 544 | { | ||
| 545 | return inode_congested(inode, (1 << WB_sync_congested) | | ||
| 546 | (1 << WB_async_congested)); | ||
| 547 | } | ||
| 548 | |||
| 549 | static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits) | ||
| 550 | { | ||
| 551 | return wb_congested(&bdi->wb, cong_bits); | ||
| 552 | } | ||
| 553 | |||
| 554 | static inline int bdi_read_congested(struct backing_dev_info *bdi) | ||
| 555 | { | ||
| 556 | return bdi_congested(bdi, 1 << WB_sync_congested); | ||
| 557 | } | ||
| 558 | |||
| 559 | static inline int bdi_write_congested(struct backing_dev_info *bdi) | ||
| 560 | { | ||
| 561 | return bdi_congested(bdi, 1 << WB_async_congested); | ||
| 562 | } | ||
| 563 | |||
| 564 | static inline int bdi_rw_congested(struct backing_dev_info *bdi) | ||
| 565 | { | ||
| 566 | return bdi_congested(bdi, (1 << WB_sync_congested) | | ||
| 567 | (1 << WB_async_congested)); | ||
| 568 | } | ||
| 569 | |||
| 570 | #endif /* _LINUX_BACKING_DEV_H */ | ||
diff --git a/include/linux/backlight.h b/include/linux/backlight.h index adb14a8616df..1e7a69adbe6f 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h | |||
| @@ -117,12 +117,16 @@ struct backlight_device { | |||
| 117 | int use_count; | 117 | int use_count; |
| 118 | }; | 118 | }; |
| 119 | 119 | ||
| 120 | static inline void backlight_update_status(struct backlight_device *bd) | 120 | static inline int backlight_update_status(struct backlight_device *bd) |
| 121 | { | 121 | { |
| 122 | int ret = -ENOENT; | ||
| 123 | |||
| 122 | mutex_lock(&bd->update_lock); | 124 | mutex_lock(&bd->update_lock); |
| 123 | if (bd->ops && bd->ops->update_status) | 125 | if (bd->ops && bd->ops->update_status) |
| 124 | bd->ops->update_status(bd); | 126 | ret = bd->ops->update_status(bd); |
| 125 | mutex_unlock(&bd->update_lock); | 127 | mutex_unlock(&bd->update_lock); |
| 128 | |||
| 129 | return ret; | ||
| 126 | } | 130 | } |
| 127 | 131 | ||
| 128 | extern struct backlight_device *backlight_device_register(const char *name, | 132 | extern struct backlight_device *backlight_device_register(const char *name, |
diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h index b12b07e75929..2793652fbf66 100644 --- a/include/linux/bcm47xx_nvram.h +++ b/include/linux/bcm47xx_nvram.h | |||
| @@ -10,11 +10,17 @@ | |||
| 10 | 10 | ||
| 11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
| 12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
| 13 | #include <linux/vmalloc.h> | ||
| 13 | 14 | ||
| 14 | #ifdef CONFIG_BCM47XX | 15 | #ifdef CONFIG_BCM47XX_NVRAM |
| 15 | int bcm47xx_nvram_init_from_mem(u32 base, u32 lim); | 16 | int bcm47xx_nvram_init_from_mem(u32 base, u32 lim); |
| 16 | int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len); | 17 | int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len); |
| 17 | int bcm47xx_nvram_gpio_pin(const char *name); | 18 | int bcm47xx_nvram_gpio_pin(const char *name); |
| 19 | char *bcm47xx_nvram_get_contents(size_t *val_len); | ||
| 20 | static inline void bcm47xx_nvram_release_contents(char *nvram) | ||
| 21 | { | ||
| 22 | vfree(nvram); | ||
| 23 | }; | ||
| 18 | #else | 24 | #else |
| 19 | static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim) | 25 | static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim) |
| 20 | { | 26 | { |
| @@ -29,6 +35,15 @@ static inline int bcm47xx_nvram_gpio_pin(const char *name) | |||
| 29 | { | 35 | { |
| 30 | return -ENOTSUPP; | 36 | return -ENOTSUPP; |
| 31 | }; | 37 | }; |
| 38 | |||
| 39 | static inline char *bcm47xx_nvram_get_contents(size_t *val_len) | ||
| 40 | { | ||
| 41 | return NULL; | ||
| 42 | }; | ||
| 43 | |||
| 44 | static inline void bcm47xx_nvram_release_contents(char *nvram) | ||
| 45 | { | ||
| 46 | }; | ||
| 32 | #endif | 47 | #endif |
| 33 | 48 | ||
| 34 | #endif /* __BCM47XX_NVRAM_H */ | 49 | #endif /* __BCM47XX_NVRAM_H */ |
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index e34f906647d3..2ff4a9961e1d 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h | |||
| @@ -305,6 +305,15 @@ int __bcma_driver_register(struct bcma_driver *drv, struct module *owner); | |||
| 305 | 305 | ||
| 306 | extern void bcma_driver_unregister(struct bcma_driver *drv); | 306 | extern void bcma_driver_unregister(struct bcma_driver *drv); |
| 307 | 307 | ||
| 308 | /* module_bcma_driver() - Helper macro for drivers that don't do | ||
| 309 | * anything special in module init/exit. This eliminates a lot of | ||
| 310 | * boilerplate. Each module may only use this macro once, and | ||
| 311 | * calling it replaces module_init() and module_exit() | ||
| 312 | */ | ||
| 313 | #define module_bcma_driver(__bcma_driver) \ | ||
| 314 | module_driver(__bcma_driver, bcma_driver_register, \ | ||
| 315 | bcma_driver_unregister) | ||
| 316 | |||
| 308 | /* Set a fallback SPROM. | 317 | /* Set a fallback SPROM. |
| 309 | * See kdoc at the function definition for complete documentation. */ | 318 | * See kdoc at the function definition for complete documentation. */ |
| 310 | extern int bcma_arch_register_fallback_sprom( | 319 | extern int bcma_arch_register_fallback_sprom( |
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index 5ba6918ca20b..9657f11d48a7 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h | |||
| @@ -246,7 +246,18 @@ static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up) | |||
| 246 | } | 246 | } |
| 247 | #endif | 247 | #endif |
| 248 | 248 | ||
| 249 | #ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE | ||
| 249 | extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); | 250 | extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); |
| 250 | extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); | 251 | extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); |
| 252 | #else | ||
| 253 | static inline int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev) | ||
| 254 | { | ||
| 255 | return -ENOTSUPP; | ||
| 256 | } | ||
| 257 | static inline int bcma_core_pci_plat_dev_init(struct pci_dev *dev) | ||
| 258 | { | ||
| 259 | return -ENOTSUPP; | ||
| 260 | } | ||
| 261 | #endif | ||
| 251 | 262 | ||
| 252 | #endif /* LINUX_BCMA_DRIVER_PCI_H_ */ | 263 | #endif /* LINUX_BCMA_DRIVER_PCI_H_ */ |
diff --git a/include/linux/bio.h b/include/linux/bio.h index da3a127c9958..5e963a6d7c14 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
| @@ -290,7 +290,21 @@ static inline unsigned bio_segments(struct bio *bio) | |||
| 290 | * returns. and then bio would be freed memory when if (bio->bi_flags ...) | 290 | * returns. and then bio would be freed memory when if (bio->bi_flags ...) |
| 291 | * runs | 291 | * runs |
| 292 | */ | 292 | */ |
| 293 | #define bio_get(bio) atomic_inc(&(bio)->bi_cnt) | 293 | static inline void bio_get(struct bio *bio) |
| 294 | { | ||
| 295 | bio->bi_flags |= (1 << BIO_REFFED); | ||
| 296 | smp_mb__before_atomic(); | ||
| 297 | atomic_inc(&bio->__bi_cnt); | ||
| 298 | } | ||
| 299 | |||
| 300 | static inline void bio_cnt_set(struct bio *bio, unsigned int count) | ||
| 301 | { | ||
| 302 | if (count != 1) { | ||
| 303 | bio->bi_flags |= (1 << BIO_REFFED); | ||
| 304 | smp_mb__before_atomic(); | ||
| 305 | } | ||
| 306 | atomic_set(&bio->__bi_cnt, count); | ||
| 307 | } | ||
| 294 | 308 | ||
| 295 | enum bip_flags { | 309 | enum bip_flags { |
| 296 | BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ | 310 | BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ |
| @@ -413,7 +427,6 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask) | |||
| 413 | } | 427 | } |
| 414 | 428 | ||
| 415 | extern void bio_endio(struct bio *, int); | 429 | extern void bio_endio(struct bio *, int); |
| 416 | extern void bio_endio_nodec(struct bio *, int); | ||
| 417 | struct request_queue; | 430 | struct request_queue; |
| 418 | extern int bio_phys_segments(struct request_queue *, struct bio *); | 431 | extern int bio_phys_segments(struct request_queue *, struct bio *); |
| 419 | 432 | ||
| @@ -469,9 +482,12 @@ extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); | |||
| 469 | extern unsigned int bvec_nr_vecs(unsigned short idx); | 482 | extern unsigned int bvec_nr_vecs(unsigned short idx); |
| 470 | 483 | ||
| 471 | #ifdef CONFIG_BLK_CGROUP | 484 | #ifdef CONFIG_BLK_CGROUP |
| 485 | int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); | ||
| 472 | int bio_associate_current(struct bio *bio); | 486 | int bio_associate_current(struct bio *bio); |
| 473 | void bio_disassociate_task(struct bio *bio); | 487 | void bio_disassociate_task(struct bio *bio); |
| 474 | #else /* CONFIG_BLK_CGROUP */ | 488 | #else /* CONFIG_BLK_CGROUP */ |
| 489 | static inline int bio_associate_blkcg(struct bio *bio, | ||
| 490 | struct cgroup_subsys_state *blkcg_css) { return 0; } | ||
| 475 | static inline int bio_associate_current(struct bio *bio) { return -ENOENT; } | 491 | static inline int bio_associate_current(struct bio *bio) { return -ENOENT; } |
| 476 | static inline void bio_disassociate_task(struct bio *bio) { } | 492 | static inline void bio_disassociate_task(struct bio *bio) { } |
| 477 | #endif /* CONFIG_BLK_CGROUP */ | 493 | #endif /* CONFIG_BLK_CGROUP */ |
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h new file mode 100644 index 000000000000..1b62d768c7df --- /dev/null +++ b/include/linux/blk-cgroup.h | |||
| @@ -0,0 +1,650 @@ | |||
| 1 | #ifndef _BLK_CGROUP_H | ||
| 2 | #define _BLK_CGROUP_H | ||
| 3 | /* | ||
| 4 | * Common Block IO controller cgroup interface | ||
| 5 | * | ||
| 6 | * Based on ideas and code from CFQ, CFS and BFQ: | ||
| 7 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | ||
| 8 | * | ||
| 9 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | ||
| 10 | * Paolo Valente <paolo.valente@unimore.it> | ||
| 11 | * | ||
| 12 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | ||
| 13 | * Nauman Rafique <nauman@google.com> | ||
| 14 | */ | ||
| 15 | |||
| 16 | #include <linux/cgroup.h> | ||
| 17 | #include <linux/u64_stats_sync.h> | ||
| 18 | #include <linux/seq_file.h> | ||
| 19 | #include <linux/radix-tree.h> | ||
| 20 | #include <linux/blkdev.h> | ||
| 21 | #include <linux/atomic.h> | ||
| 22 | |||
| 23 | /* Max limits for throttle policy */ | ||
| 24 | #define THROTL_IOPS_MAX UINT_MAX | ||
| 25 | |||
| 26 | #ifdef CONFIG_BLK_CGROUP | ||
| 27 | |||
| 28 | enum blkg_rwstat_type { | ||
| 29 | BLKG_RWSTAT_READ, | ||
| 30 | BLKG_RWSTAT_WRITE, | ||
| 31 | BLKG_RWSTAT_SYNC, | ||
| 32 | BLKG_RWSTAT_ASYNC, | ||
| 33 | |||
| 34 | BLKG_RWSTAT_NR, | ||
| 35 | BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, | ||
| 36 | }; | ||
| 37 | |||
| 38 | struct blkcg_gq; | ||
| 39 | |||
| 40 | struct blkcg { | ||
| 41 | struct cgroup_subsys_state css; | ||
| 42 | spinlock_t lock; | ||
| 43 | |||
| 44 | struct radix_tree_root blkg_tree; | ||
| 45 | struct blkcg_gq *blkg_hint; | ||
| 46 | struct hlist_head blkg_list; | ||
| 47 | |||
| 48 | struct blkcg_policy_data *pd[BLKCG_MAX_POLS]; | ||
| 49 | |||
| 50 | struct list_head all_blkcgs_node; | ||
| 51 | #ifdef CONFIG_CGROUP_WRITEBACK | ||
| 52 | struct list_head cgwb_list; | ||
| 53 | #endif | ||
| 54 | }; | ||
| 55 | |||
| 56 | struct blkg_stat { | ||
| 57 | struct u64_stats_sync syncp; | ||
| 58 | uint64_t cnt; | ||
| 59 | }; | ||
| 60 | |||
| 61 | struct blkg_rwstat { | ||
| 62 | struct u64_stats_sync syncp; | ||
| 63 | uint64_t cnt[BLKG_RWSTAT_NR]; | ||
| 64 | }; | ||
| 65 | |||
| 66 | /* | ||
| 67 | * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a | ||
| 68 | * request_queue (q). This is used by blkcg policies which need to track | ||
| 69 | * information per blkcg - q pair. | ||
| 70 | * | ||
| 71 | * There can be multiple active blkcg policies and each has its private | ||
| 72 | * data on each blkg, the size of which is determined by | ||
| 73 | * blkcg_policy->pd_size. blkcg core allocates and frees such areas | ||
| 74 | * together with blkg and invokes pd_init/exit_fn() methods. | ||
| 75 | * | ||
| 76 | * Such private data must embed struct blkg_policy_data (pd) at the | ||
| 77 | * beginning and pd_size can't be smaller than pd. | ||
| 78 | */ | ||
| 79 | struct blkg_policy_data { | ||
| 80 | /* the blkg and policy id this per-policy data belongs to */ | ||
| 81 | struct blkcg_gq *blkg; | ||
| 82 | int plid; | ||
| 83 | |||
| 84 | /* used during policy activation */ | ||
| 85 | struct list_head alloc_node; | ||
| 86 | }; | ||
| 87 | |||
| 88 | /* | ||
| 89 | * Policies that need to keep per-blkcg data which is independent | ||
| 90 | * from any request_queue associated to it must specify its size | ||
| 91 | * with the cpd_size field of the blkcg_policy structure and | ||
| 92 | * embed a blkcg_policy_data in it. cpd_init() is invoked to let | ||
| 93 | * each policy handle per-blkcg data. | ||
| 94 | */ | ||
| 95 | struct blkcg_policy_data { | ||
| 96 | /* the policy id this per-policy data belongs to */ | ||
| 97 | int plid; | ||
| 98 | }; | ||
| 99 | |||
| 100 | /* association between a blk cgroup and a request queue */ | ||
| 101 | struct blkcg_gq { | ||
| 102 | /* Pointer to the associated request_queue */ | ||
| 103 | struct request_queue *q; | ||
| 104 | struct list_head q_node; | ||
| 105 | struct hlist_node blkcg_node; | ||
| 106 | struct blkcg *blkcg; | ||
| 107 | |||
| 108 | /* | ||
| 109 | * Each blkg gets congested separately and the congestion state is | ||
| 110 | * propagated to the matching bdi_writeback_congested. | ||
| 111 | */ | ||
| 112 | struct bdi_writeback_congested *wb_congested; | ||
| 113 | |||
| 114 | /* all non-root blkcg_gq's are guaranteed to have access to parent */ | ||
| 115 | struct blkcg_gq *parent; | ||
| 116 | |||
| 117 | /* request allocation list for this blkcg-q pair */ | ||
| 118 | struct request_list rl; | ||
| 119 | |||
| 120 | /* reference count */ | ||
| 121 | atomic_t refcnt; | ||
| 122 | |||
| 123 | /* is this blkg online? protected by both blkcg and q locks */ | ||
| 124 | bool online; | ||
| 125 | |||
| 126 | struct blkg_policy_data *pd[BLKCG_MAX_POLS]; | ||
| 127 | |||
| 128 | struct rcu_head rcu_head; | ||
| 129 | }; | ||
| 130 | |||
| 131 | typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg); | ||
| 132 | typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg); | ||
| 133 | typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg); | ||
| 134 | typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg); | ||
| 135 | typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg); | ||
| 136 | typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg); | ||
| 137 | |||
| 138 | struct blkcg_policy { | ||
| 139 | int plid; | ||
| 140 | /* policy specific private data size */ | ||
| 141 | size_t pd_size; | ||
| 142 | /* policy specific per-blkcg data size */ | ||
| 143 | size_t cpd_size; | ||
| 144 | /* cgroup files for the policy */ | ||
| 145 | struct cftype *cftypes; | ||
| 146 | |||
| 147 | /* operations */ | ||
| 148 | blkcg_pol_init_cpd_fn *cpd_init_fn; | ||
| 149 | blkcg_pol_init_pd_fn *pd_init_fn; | ||
| 150 | blkcg_pol_online_pd_fn *pd_online_fn; | ||
| 151 | blkcg_pol_offline_pd_fn *pd_offline_fn; | ||
| 152 | blkcg_pol_exit_pd_fn *pd_exit_fn; | ||
| 153 | blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; | ||
| 154 | }; | ||
| 155 | |||
| 156 | extern struct blkcg blkcg_root; | ||
| 157 | extern struct cgroup_subsys_state * const blkcg_root_css; | ||
| 158 | |||
| 159 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q); | ||
| 160 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, | ||
| 161 | struct request_queue *q); | ||
| 162 | int blkcg_init_queue(struct request_queue *q); | ||
| 163 | void blkcg_drain_queue(struct request_queue *q); | ||
| 164 | void blkcg_exit_queue(struct request_queue *q); | ||
| 165 | |||
| 166 | /* Blkio controller policy registration */ | ||
| 167 | int blkcg_policy_register(struct blkcg_policy *pol); | ||
| 168 | void blkcg_policy_unregister(struct blkcg_policy *pol); | ||
| 169 | int blkcg_activate_policy(struct request_queue *q, | ||
| 170 | const struct blkcg_policy *pol); | ||
| 171 | void blkcg_deactivate_policy(struct request_queue *q, | ||
| 172 | const struct blkcg_policy *pol); | ||
| 173 | |||
| 174 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, | ||
| 175 | u64 (*prfill)(struct seq_file *, | ||
| 176 | struct blkg_policy_data *, int), | ||
| 177 | const struct blkcg_policy *pol, int data, | ||
| 178 | bool show_total); | ||
| 179 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); | ||
| 180 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | ||
| 181 | const struct blkg_rwstat *rwstat); | ||
| 182 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); | ||
| 183 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | ||
| 184 | int off); | ||
| 185 | |||
| 186 | u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off); | ||
| 187 | struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, | ||
| 188 | int off); | ||
| 189 | |||
| 190 | struct blkg_conf_ctx { | ||
| 191 | struct gendisk *disk; | ||
| 192 | struct blkcg_gq *blkg; | ||
| 193 | u64 v; | ||
| 194 | }; | ||
| 195 | |||
| 196 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | ||
| 197 | const char *input, struct blkg_conf_ctx *ctx); | ||
| 198 | void blkg_conf_finish(struct blkg_conf_ctx *ctx); | ||
| 199 | |||
| 200 | |||
| 201 | static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) | ||
| 202 | { | ||
| 203 | return css ? container_of(css, struct blkcg, css) : NULL; | ||
| 204 | } | ||
| 205 | |||
| 206 | static inline struct blkcg *task_blkcg(struct task_struct *tsk) | ||
| 207 | { | ||
| 208 | return css_to_blkcg(task_css(tsk, blkio_cgrp_id)); | ||
| 209 | } | ||
| 210 | |||
| 211 | static inline struct blkcg *bio_blkcg(struct bio *bio) | ||
| 212 | { | ||
| 213 | if (bio && bio->bi_css) | ||
| 214 | return css_to_blkcg(bio->bi_css); | ||
| 215 | return task_blkcg(current); | ||
| 216 | } | ||
| 217 | |||
| 218 | static inline struct cgroup_subsys_state * | ||
| 219 | task_get_blkcg_css(struct task_struct *task) | ||
| 220 | { | ||
| 221 | return task_get_css(task, blkio_cgrp_id); | ||
| 222 | } | ||
| 223 | |||
| 224 | /** | ||
| 225 | * blkcg_parent - get the parent of a blkcg | ||
| 226 | * @blkcg: blkcg of interest | ||
| 227 | * | ||
| 228 | * Return the parent blkcg of @blkcg. Can be called anytime. | ||
| 229 | */ | ||
| 230 | static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) | ||
| 231 | { | ||
| 232 | return css_to_blkcg(blkcg->css.parent); | ||
| 233 | } | ||
| 234 | |||
| 235 | /** | ||
| 236 | * blkg_to_pdata - get policy private data | ||
| 237 | * @blkg: blkg of interest | ||
| 238 | * @pol: policy of interest | ||
| 239 | * | ||
| 240 | * Return pointer to private data associated with the @blkg-@pol pair. | ||
| 241 | */ | ||
| 242 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, | ||
| 243 | struct blkcg_policy *pol) | ||
| 244 | { | ||
| 245 | return blkg ? blkg->pd[pol->plid] : NULL; | ||
| 246 | } | ||
| 247 | |||
| 248 | static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, | ||
| 249 | struct blkcg_policy *pol) | ||
| 250 | { | ||
| 251 | return blkcg ? blkcg->pd[pol->plid] : NULL; | ||
| 252 | } | ||
| 253 | |||
| 254 | /** | ||
| 255 | * pdata_to_blkg - get blkg associated with policy private data | ||
| 256 | * @pd: policy private data of interest | ||
| 257 | * | ||
| 258 | * @pd is policy private data. Determine the blkg it's associated with. | ||
| 259 | */ | ||
| 260 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) | ||
| 261 | { | ||
| 262 | return pd ? pd->blkg : NULL; | ||
| 263 | } | ||
| 264 | |||
| 265 | /** | ||
| 266 | * blkg_path - format cgroup path of blkg | ||
| 267 | * @blkg: blkg of interest | ||
| 268 | * @buf: target buffer | ||
| 269 | * @buflen: target buffer length | ||
| 270 | * | ||
| 271 | * Format the path of the cgroup of @blkg into @buf. | ||
| 272 | */ | ||
| 273 | static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) | ||
| 274 | { | ||
| 275 | char *p; | ||
| 276 | |||
| 277 | p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); | ||
| 278 | if (!p) { | ||
| 279 | strncpy(buf, "<unavailable>", buflen); | ||
| 280 | return -ENAMETOOLONG; | ||
| 281 | } | ||
| 282 | |||
| 283 | memmove(buf, p, buf + buflen - p); | ||
| 284 | return 0; | ||
| 285 | } | ||
| 286 | |||
| 287 | /** | ||
| 288 | * blkg_get - get a blkg reference | ||
| 289 | * @blkg: blkg to get | ||
| 290 | * | ||
| 291 | * The caller should be holding an existing reference. | ||
| 292 | */ | ||
| 293 | static inline void blkg_get(struct blkcg_gq *blkg) | ||
| 294 | { | ||
| 295 | WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); | ||
| 296 | atomic_inc(&blkg->refcnt); | ||
| 297 | } | ||
| 298 | |||
| 299 | void __blkg_release_rcu(struct rcu_head *rcu); | ||
| 300 | |||
| 301 | /** | ||
| 302 | * blkg_put - put a blkg reference | ||
| 303 | * @blkg: blkg to put | ||
| 304 | */ | ||
| 305 | static inline void blkg_put(struct blkcg_gq *blkg) | ||
| 306 | { | ||
| 307 | WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); | ||
| 308 | if (atomic_dec_and_test(&blkg->refcnt)) | ||
| 309 | call_rcu(&blkg->rcu_head, __blkg_release_rcu); | ||
| 310 | } | ||
| 311 | |||
| 312 | struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, | ||
| 313 | bool update_hint); | ||
| 314 | |||
| 315 | /** | ||
| 316 | * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants | ||
| 317 | * @d_blkg: loop cursor pointing to the current descendant | ||
| 318 | * @pos_css: used for iteration | ||
| 319 | * @p_blkg: target blkg to walk descendants of | ||
| 320 | * | ||
| 321 | * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU | ||
| 322 | * read locked. If called under either blkcg or queue lock, the iteration | ||
| 323 | * is guaranteed to include all and only online blkgs. The caller may | ||
| 324 | * update @pos_css by calling css_rightmost_descendant() to skip subtree. | ||
| 325 | * @p_blkg is included in the iteration and the first node to be visited. | ||
| 326 | */ | ||
| 327 | #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ | ||
| 328 | css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ | ||
| 329 | if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ | ||
| 330 | (p_blkg)->q, false))) | ||
| 331 | |||
| 332 | /** | ||
| 333 | * blkg_for_each_descendant_post - post-order walk of a blkg's descendants | ||
| 334 | * @d_blkg: loop cursor pointing to the current descendant | ||
| 335 | * @pos_css: used for iteration | ||
| 336 | * @p_blkg: target blkg to walk descendants of | ||
| 337 | * | ||
| 338 | * Similar to blkg_for_each_descendant_pre() but performs post-order | ||
| 339 | * traversal instead. Synchronization rules are the same. @p_blkg is | ||
| 340 | * included in the iteration and the last node to be visited. | ||
| 341 | */ | ||
| 342 | #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ | ||
| 343 | css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ | ||
| 344 | if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ | ||
| 345 | (p_blkg)->q, false))) | ||
| 346 | |||
| 347 | /** | ||
| 348 | * blk_get_rl - get request_list to use | ||
| 349 | * @q: request_queue of interest | ||
| 350 | * @bio: bio which will be attached to the allocated request (may be %NULL) | ||
| 351 | * | ||
| 352 | * The caller wants to allocate a request from @q to use for @bio. Find | ||
| 353 | * the request_list to use and obtain a reference on it. Should be called | ||
| 354 | * under queue_lock. This function is guaranteed to return non-%NULL | ||
| 355 | * request_list. | ||
| 356 | */ | ||
| 357 | static inline struct request_list *blk_get_rl(struct request_queue *q, | ||
| 358 | struct bio *bio) | ||
| 359 | { | ||
| 360 | struct blkcg *blkcg; | ||
| 361 | struct blkcg_gq *blkg; | ||
| 362 | |||
| 363 | rcu_read_lock(); | ||
| 364 | |||
| 365 | blkcg = bio_blkcg(bio); | ||
| 366 | |||
| 367 | /* bypass blkg lookup and use @q->root_rl directly for root */ | ||
| 368 | if (blkcg == &blkcg_root) | ||
| 369 | goto root_rl; | ||
| 370 | |||
| 371 | /* | ||
| 372 | * Try to use blkg->rl. blkg lookup may fail under memory pressure | ||
| 373 | * or if either the blkcg or queue is going away. Fall back to | ||
| 374 | * root_rl in such cases. | ||
| 375 | */ | ||
| 376 | blkg = blkg_lookup_create(blkcg, q); | ||
| 377 | if (unlikely(IS_ERR(blkg))) | ||
| 378 | goto root_rl; | ||
| 379 | |||
| 380 | blkg_get(blkg); | ||
| 381 | rcu_read_unlock(); | ||
| 382 | return &blkg->rl; | ||
| 383 | root_rl: | ||
| 384 | rcu_read_unlock(); | ||
| 385 | return &q->root_rl; | ||
| 386 | } | ||
| 387 | |||
| 388 | /** | ||
| 389 | * blk_put_rl - put request_list | ||
| 390 | * @rl: request_list to put | ||
| 391 | * | ||
| 392 | * Put the reference acquired by blk_get_rl(). Should be called under | ||
| 393 | * queue_lock. | ||
| 394 | */ | ||
| 395 | static inline void blk_put_rl(struct request_list *rl) | ||
| 396 | { | ||
| 397 | /* root_rl may not have blkg set */ | ||
| 398 | if (rl->blkg && rl->blkg->blkcg != &blkcg_root) | ||
| 399 | blkg_put(rl->blkg); | ||
| 400 | } | ||
| 401 | |||
| 402 | /** | ||
| 403 | * blk_rq_set_rl - associate a request with a request_list | ||
| 404 | * @rq: request of interest | ||
| 405 | * @rl: target request_list | ||
| 406 | * | ||
| 407 | * Associate @rq with @rl so that accounting and freeing can know the | ||
| 408 | * request_list @rq came from. | ||
| 409 | */ | ||
| 410 | static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) | ||
| 411 | { | ||
| 412 | rq->rl = rl; | ||
| 413 | } | ||
| 414 | |||
| 415 | /** | ||
| 416 | * blk_rq_rl - return the request_list a request came from | ||
| 417 | * @rq: request of interest | ||
| 418 | * | ||
| 419 | * Return the request_list @rq is allocated from. | ||
| 420 | */ | ||
| 421 | static inline struct request_list *blk_rq_rl(struct request *rq) | ||
| 422 | { | ||
| 423 | return rq->rl; | ||
| 424 | } | ||
| 425 | |||
| 426 | struct request_list *__blk_queue_next_rl(struct request_list *rl, | ||
| 427 | struct request_queue *q); | ||
| 428 | /** | ||
| 429 | * blk_queue_for_each_rl - iterate through all request_lists of a request_queue | ||
| 430 | * | ||
| 431 | * Should be used under queue_lock. | ||
| 432 | */ | ||
| 433 | #define blk_queue_for_each_rl(rl, q) \ | ||
| 434 | for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) | ||
| 435 | |||
| 436 | static inline void blkg_stat_init(struct blkg_stat *stat) | ||
| 437 | { | ||
| 438 | u64_stats_init(&stat->syncp); | ||
| 439 | } | ||
| 440 | |||
| 441 | /** | ||
| 442 | * blkg_stat_add - add a value to a blkg_stat | ||
| 443 | * @stat: target blkg_stat | ||
| 444 | * @val: value to add | ||
| 445 | * | ||
| 446 | * Add @val to @stat. The caller is responsible for synchronizing calls to | ||
| 447 | * this function. | ||
| 448 | */ | ||
| 449 | static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) | ||
| 450 | { | ||
| 451 | u64_stats_update_begin(&stat->syncp); | ||
| 452 | stat->cnt += val; | ||
| 453 | u64_stats_update_end(&stat->syncp); | ||
| 454 | } | ||
| 455 | |||
| 456 | /** | ||
| 457 | * blkg_stat_read - read the current value of a blkg_stat | ||
| 458 | * @stat: blkg_stat to read | ||
| 459 | * | ||
| 460 | * Read the current value of @stat. This function can be called without | ||
| 461 | * synchroniztion and takes care of u64 atomicity. | ||
| 462 | */ | ||
| 463 | static inline uint64_t blkg_stat_read(struct blkg_stat *stat) | ||
| 464 | { | ||
| 465 | unsigned int start; | ||
| 466 | uint64_t v; | ||
| 467 | |||
| 468 | do { | ||
| 469 | start = u64_stats_fetch_begin_irq(&stat->syncp); | ||
| 470 | v = stat->cnt; | ||
| 471 | } while (u64_stats_fetch_retry_irq(&stat->syncp, start)); | ||
| 472 | |||
| 473 | return v; | ||
| 474 | } | ||
| 475 | |||
| 476 | /** | ||
| 477 | * blkg_stat_reset - reset a blkg_stat | ||
| 478 | * @stat: blkg_stat to reset | ||
| 479 | */ | ||
| 480 | static inline void blkg_stat_reset(struct blkg_stat *stat) | ||
| 481 | { | ||
| 482 | stat->cnt = 0; | ||
| 483 | } | ||
| 484 | |||
| 485 | /** | ||
| 486 | * blkg_stat_merge - merge a blkg_stat into another | ||
| 487 | * @to: the destination blkg_stat | ||
| 488 | * @from: the source | ||
| 489 | * | ||
| 490 | * Add @from's count to @to. | ||
| 491 | */ | ||
| 492 | static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from) | ||
| 493 | { | ||
| 494 | blkg_stat_add(to, blkg_stat_read(from)); | ||
| 495 | } | ||
| 496 | |||
| 497 | static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat) | ||
| 498 | { | ||
| 499 | u64_stats_init(&rwstat->syncp); | ||
| 500 | } | ||
| 501 | |||
| 502 | /** | ||
| 503 | * blkg_rwstat_add - add a value to a blkg_rwstat | ||
| 504 | * @rwstat: target blkg_rwstat | ||
| 505 | * @rw: mask of REQ_{WRITE|SYNC} | ||
| 506 | * @val: value to add | ||
| 507 | * | ||
| 508 | * Add @val to @rwstat. The counters are chosen according to @rw. The | ||
| 509 | * caller is responsible for synchronizing calls to this function. | ||
| 510 | */ | ||
| 511 | static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, | ||
| 512 | int rw, uint64_t val) | ||
| 513 | { | ||
| 514 | u64_stats_update_begin(&rwstat->syncp); | ||
| 515 | |||
| 516 | if (rw & REQ_WRITE) | ||
| 517 | rwstat->cnt[BLKG_RWSTAT_WRITE] += val; | ||
| 518 | else | ||
| 519 | rwstat->cnt[BLKG_RWSTAT_READ] += val; | ||
| 520 | if (rw & REQ_SYNC) | ||
| 521 | rwstat->cnt[BLKG_RWSTAT_SYNC] += val; | ||
| 522 | else | ||
| 523 | rwstat->cnt[BLKG_RWSTAT_ASYNC] += val; | ||
| 524 | |||
| 525 | u64_stats_update_end(&rwstat->syncp); | ||
| 526 | } | ||
| 527 | |||
| 528 | /** | ||
| 529 | * blkg_rwstat_read - read the current values of a blkg_rwstat | ||
| 530 | * @rwstat: blkg_rwstat to read | ||
| 531 | * | ||
| 532 | * Read the current snapshot of @rwstat and return it as the return value. | ||
| 533 | * This function can be called without synchronization and takes care of | ||
| 534 | * u64 atomicity. | ||
| 535 | */ | ||
| 536 | static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) | ||
| 537 | { | ||
| 538 | unsigned int start; | ||
| 539 | struct blkg_rwstat tmp; | ||
| 540 | |||
| 541 | do { | ||
| 542 | start = u64_stats_fetch_begin_irq(&rwstat->syncp); | ||
| 543 | tmp = *rwstat; | ||
| 544 | } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start)); | ||
| 545 | |||
| 546 | return tmp; | ||
| 547 | } | ||
| 548 | |||
| 549 | /** | ||
| 550 | * blkg_rwstat_total - read the total count of a blkg_rwstat | ||
| 551 | * @rwstat: blkg_rwstat to read | ||
| 552 | * | ||
| 553 | * Return the total count of @rwstat regardless of the IO direction. This | ||
| 554 | * function can be called without synchronization and takes care of u64 | ||
| 555 | * atomicity. | ||
| 556 | */ | ||
| 557 | static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) | ||
| 558 | { | ||
| 559 | struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); | ||
| 560 | |||
| 561 | return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]; | ||
| 562 | } | ||
| 563 | |||
| 564 | /** | ||
| 565 | * blkg_rwstat_reset - reset a blkg_rwstat | ||
| 566 | * @rwstat: blkg_rwstat to reset | ||
| 567 | */ | ||
| 568 | static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) | ||
| 569 | { | ||
| 570 | memset(rwstat->cnt, 0, sizeof(rwstat->cnt)); | ||
| 571 | } | ||
| 572 | |||
| 573 | /** | ||
| 574 | * blkg_rwstat_merge - merge a blkg_rwstat into another | ||
| 575 | * @to: the destination blkg_rwstat | ||
| 576 | * @from: the source | ||
| 577 | * | ||
| 578 | * Add @from's counts to @to. | ||
| 579 | */ | ||
| 580 | static inline void blkg_rwstat_merge(struct blkg_rwstat *to, | ||
| 581 | struct blkg_rwstat *from) | ||
| 582 | { | ||
| 583 | struct blkg_rwstat v = blkg_rwstat_read(from); | ||
| 584 | int i; | ||
| 585 | |||
| 586 | u64_stats_update_begin(&to->syncp); | ||
| 587 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | ||
| 588 | to->cnt[i] += v.cnt[i]; | ||
| 589 | u64_stats_update_end(&to->syncp); | ||
| 590 | } | ||
| 591 | |||
| 592 | #else /* CONFIG_BLK_CGROUP */ | ||
| 593 | |||
| 594 | struct blkcg { | ||
| 595 | }; | ||
| 596 | |||
| 597 | struct blkg_policy_data { | ||
| 598 | }; | ||
| 599 | |||
| 600 | struct blkcg_policy_data { | ||
| 601 | }; | ||
| 602 | |||
| 603 | struct blkcg_gq { | ||
| 604 | }; | ||
| 605 | |||
| 606 | struct blkcg_policy { | ||
| 607 | }; | ||
| 608 | |||
| 609 | #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL)) | ||
| 610 | |||
| 611 | static inline struct cgroup_subsys_state * | ||
| 612 | task_get_blkcg_css(struct task_struct *task) | ||
| 613 | { | ||
| 614 | return NULL; | ||
| 615 | } | ||
| 616 | |||
| 617 | #ifdef CONFIG_BLOCK | ||
| 618 | |||
| 619 | static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } | ||
| 620 | static inline int blkcg_init_queue(struct request_queue *q) { return 0; } | ||
| 621 | static inline void blkcg_drain_queue(struct request_queue *q) { } | ||
| 622 | static inline void blkcg_exit_queue(struct request_queue *q) { } | ||
| 623 | static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } | ||
| 624 | static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } | ||
| 625 | static inline int blkcg_activate_policy(struct request_queue *q, | ||
| 626 | const struct blkcg_policy *pol) { return 0; } | ||
| 627 | static inline void blkcg_deactivate_policy(struct request_queue *q, | ||
| 628 | const struct blkcg_policy *pol) { } | ||
| 629 | |||
| 630 | static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } | ||
| 631 | |||
| 632 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, | ||
| 633 | struct blkcg_policy *pol) { return NULL; } | ||
| 634 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } | ||
| 635 | static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } | ||
| 636 | static inline void blkg_get(struct blkcg_gq *blkg) { } | ||
| 637 | static inline void blkg_put(struct blkcg_gq *blkg) { } | ||
| 638 | |||
| 639 | static inline struct request_list *blk_get_rl(struct request_queue *q, | ||
| 640 | struct bio *bio) { return &q->root_rl; } | ||
| 641 | static inline void blk_put_rl(struct request_list *rl) { } | ||
| 642 | static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } | ||
| 643 | static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } | ||
| 644 | |||
| 645 | #define blk_queue_for_each_rl(rl, q) \ | ||
| 646 | for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) | ||
| 647 | |||
| 648 | #endif /* CONFIG_BLOCK */ | ||
| 649 | #endif /* CONFIG_BLK_CGROUP */ | ||
| 650 | #endif /* _BLK_CGROUP_H */ | ||
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 2056a99b92f8..37d1602c4f7a 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
| @@ -96,6 +96,7 @@ typedef void (exit_request_fn)(void *, struct request *, unsigned int, | |||
| 96 | 96 | ||
| 97 | typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, | 97 | typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, |
| 98 | bool); | 98 | bool); |
| 99 | typedef void (busy_tag_iter_fn)(struct request *, void *, bool); | ||
| 99 | 100 | ||
| 100 | struct blk_mq_ops { | 101 | struct blk_mq_ops { |
| 101 | /* | 102 | /* |
| @@ -182,6 +183,7 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *); | |||
| 182 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, | 183 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, |
| 183 | gfp_t gfp, bool reserved); | 184 | gfp_t gfp, bool reserved); |
| 184 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); | 185 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); |
| 186 | struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags); | ||
| 185 | 187 | ||
| 186 | enum { | 188 | enum { |
| 187 | BLK_MQ_UNIQUE_TAG_BITS = 16, | 189 | BLK_MQ_UNIQUE_TAG_BITS = 16, |
| @@ -224,6 +226,8 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async); | |||
| 224 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); | 226 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
| 225 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, | 227 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, |
| 226 | void *priv); | 228 | void *priv); |
| 229 | void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, | ||
| 230 | void *priv); | ||
| 227 | void blk_mq_freeze_queue(struct request_queue *q); | 231 | void blk_mq_freeze_queue(struct request_queue *q); |
| 228 | void blk_mq_unfreeze_queue(struct request_queue *q); | 232 | void blk_mq_unfreeze_queue(struct request_queue *q); |
| 229 | void blk_mq_freeze_queue_start(struct request_queue *q); | 233 | void blk_mq_freeze_queue_start(struct request_queue *q); |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index b7299febc4b4..7303b3405520 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
| @@ -65,7 +65,7 @@ struct bio { | |||
| 65 | unsigned int bi_seg_front_size; | 65 | unsigned int bi_seg_front_size; |
| 66 | unsigned int bi_seg_back_size; | 66 | unsigned int bi_seg_back_size; |
| 67 | 67 | ||
| 68 | atomic_t bi_remaining; | 68 | atomic_t __bi_remaining; |
| 69 | 69 | ||
| 70 | bio_end_io_t *bi_end_io; | 70 | bio_end_io_t *bi_end_io; |
| 71 | 71 | ||
| @@ -92,7 +92,7 @@ struct bio { | |||
| 92 | 92 | ||
| 93 | unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ | 93 | unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ |
| 94 | 94 | ||
| 95 | atomic_t bi_cnt; /* pin count */ | 95 | atomic_t __bi_cnt; /* pin count */ |
| 96 | 96 | ||
| 97 | struct bio_vec *bi_io_vec; /* the actual vec list */ | 97 | struct bio_vec *bi_io_vec; /* the actual vec list */ |
| 98 | 98 | ||
| @@ -112,16 +112,15 @@ struct bio { | |||
| 112 | * bio flags | 112 | * bio flags |
| 113 | */ | 113 | */ |
| 114 | #define BIO_UPTODATE 0 /* ok after I/O completion */ | 114 | #define BIO_UPTODATE 0 /* ok after I/O completion */ |
| 115 | #define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */ | 115 | #define BIO_SEG_VALID 1 /* bi_phys_segments valid */ |
| 116 | #define BIO_EOF 2 /* out-out-bounds error */ | 116 | #define BIO_CLONED 2 /* doesn't own data */ |
| 117 | #define BIO_SEG_VALID 3 /* bi_phys_segments valid */ | 117 | #define BIO_BOUNCED 3 /* bio is a bounce bio */ |
| 118 | #define BIO_CLONED 4 /* doesn't own data */ | 118 | #define BIO_USER_MAPPED 4 /* contains user pages */ |
| 119 | #define BIO_BOUNCED 5 /* bio is a bounce bio */ | 119 | #define BIO_NULL_MAPPED 5 /* contains invalid user pages */ |
| 120 | #define BIO_USER_MAPPED 6 /* contains user pages */ | 120 | #define BIO_QUIET 6 /* Make BIO Quiet */ |
| 121 | #define BIO_EOPNOTSUPP 7 /* not supported */ | 121 | #define BIO_SNAP_STABLE 7 /* bio data must be snapshotted during write */ |
| 122 | #define BIO_NULL_MAPPED 8 /* contains invalid user pages */ | 122 | #define BIO_CHAIN 8 /* chained bio, ->bi_remaining in effect */ |
| 123 | #define BIO_QUIET 9 /* Make BIO Quiet */ | 123 | #define BIO_REFFED 9 /* bio has elevated ->bi_cnt */ |
| 124 | #define BIO_SNAP_STABLE 10 /* bio data must be snapshotted during write */ | ||
| 125 | 124 | ||
| 126 | /* | 125 | /* |
| 127 | * Flags starting here get preserved by bio_reset() - this includes | 126 | * Flags starting here get preserved by bio_reset() - this includes |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5d93a6645e88..d4068c17d0df 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | #include <linux/timer.h> | 12 | #include <linux/timer.h> |
| 13 | #include <linux/workqueue.h> | 13 | #include <linux/workqueue.h> |
| 14 | #include <linux/pagemap.h> | 14 | #include <linux/pagemap.h> |
| 15 | #include <linux/backing-dev.h> | 15 | #include <linux/backing-dev-defs.h> |
| 16 | #include <linux/wait.h> | 16 | #include <linux/wait.h> |
| 17 | #include <linux/mempool.h> | 17 | #include <linux/mempool.h> |
| 18 | #include <linux/bio.h> | 18 | #include <linux/bio.h> |
| @@ -22,15 +22,13 @@ | |||
| 22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
| 23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
| 24 | #include <linux/percpu-refcount.h> | 24 | #include <linux/percpu-refcount.h> |
| 25 | 25 | #include <linux/scatterlist.h> | |
| 26 | #include <asm/scatterlist.h> | ||
| 27 | 26 | ||
| 28 | struct module; | 27 | struct module; |
| 29 | struct scsi_ioctl_command; | 28 | struct scsi_ioctl_command; |
| 30 | 29 | ||
| 31 | struct request_queue; | 30 | struct request_queue; |
| 32 | struct elevator_queue; | 31 | struct elevator_queue; |
| 33 | struct request_pm_state; | ||
| 34 | struct blk_trace; | 32 | struct blk_trace; |
| 35 | struct request; | 33 | struct request; |
| 36 | struct sg_io_hdr; | 34 | struct sg_io_hdr; |
| @@ -75,18 +73,7 @@ struct request_list { | |||
| 75 | enum rq_cmd_type_bits { | 73 | enum rq_cmd_type_bits { |
| 76 | REQ_TYPE_FS = 1, /* fs request */ | 74 | REQ_TYPE_FS = 1, /* fs request */ |
| 77 | REQ_TYPE_BLOCK_PC, /* scsi command */ | 75 | REQ_TYPE_BLOCK_PC, /* scsi command */ |
| 78 | REQ_TYPE_SENSE, /* sense request */ | 76 | REQ_TYPE_DRV_PRIV, /* driver defined types from here */ |
| 79 | REQ_TYPE_PM_SUSPEND, /* suspend request */ | ||
| 80 | REQ_TYPE_PM_RESUME, /* resume request */ | ||
| 81 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ | ||
| 82 | REQ_TYPE_SPECIAL, /* driver defined type */ | ||
| 83 | /* | ||
| 84 | * for ATA/ATAPI devices. this really doesn't belong here, ide should | ||
| 85 | * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver | ||
| 86 | * private REQ_LB opcodes to differentiate what type of request this is | ||
| 87 | */ | ||
| 88 | REQ_TYPE_ATA_TASKFILE, | ||
| 89 | REQ_TYPE_ATA_PC, | ||
| 90 | }; | 77 | }; |
| 91 | 78 | ||
| 92 | #define BLK_MAX_CDB 16 | 79 | #define BLK_MAX_CDB 16 |
| @@ -108,7 +95,7 @@ struct request { | |||
| 108 | struct blk_mq_ctx *mq_ctx; | 95 | struct blk_mq_ctx *mq_ctx; |
| 109 | 96 | ||
| 110 | u64 cmd_flags; | 97 | u64 cmd_flags; |
| 111 | enum rq_cmd_type_bits cmd_type; | 98 | unsigned cmd_type; |
| 112 | unsigned long atomic_flags; | 99 | unsigned long atomic_flags; |
| 113 | 100 | ||
| 114 | int cpu; | 101 | int cpu; |
| @@ -216,19 +203,6 @@ static inline unsigned short req_get_ioprio(struct request *req) | |||
| 216 | return req->ioprio; | 203 | return req->ioprio; |
| 217 | } | 204 | } |
| 218 | 205 | ||
| 219 | /* | ||
| 220 | * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME | ||
| 221 | * requests. Some step values could eventually be made generic. | ||
| 222 | */ | ||
| 223 | struct request_pm_state | ||
| 224 | { | ||
| 225 | /* PM state machine step value, currently driver specific */ | ||
| 226 | int pm_step; | ||
| 227 | /* requested PM state value (S1, S2, S3, S4, ...) */ | ||
| 228 | u32 pm_state; | ||
| 229 | void* data; /* for driver use */ | ||
| 230 | }; | ||
| 231 | |||
| 232 | #include <linux/elevator.h> | 206 | #include <linux/elevator.h> |
| 233 | 207 | ||
| 234 | struct blk_queue_ctx; | 208 | struct blk_queue_ctx; |
| @@ -469,7 +443,7 @@ struct request_queue { | |||
| 469 | struct mutex sysfs_lock; | 443 | struct mutex sysfs_lock; |
| 470 | 444 | ||
| 471 | int bypass_depth; | 445 | int bypass_depth; |
| 472 | int mq_freeze_depth; | 446 | atomic_t mq_freeze_depth; |
| 473 | 447 | ||
| 474 | #if defined(CONFIG_BLK_DEV_BSG) | 448 | #if defined(CONFIG_BLK_DEV_BSG) |
| 475 | bsg_job_fn *bsg_job_fn; | 449 | bsg_job_fn *bsg_job_fn; |
| @@ -610,10 +584,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
| 610 | (((rq)->cmd_flags & REQ_STARTED) && \ | 584 | (((rq)->cmd_flags & REQ_STARTED) && \ |
| 611 | ((rq)->cmd_type == REQ_TYPE_FS)) | 585 | ((rq)->cmd_type == REQ_TYPE_FS)) |
| 612 | 586 | ||
| 613 | #define blk_pm_request(rq) \ | ||
| 614 | ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ | ||
| 615 | (rq)->cmd_type == REQ_TYPE_PM_RESUME) | ||
| 616 | |||
| 617 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) | 587 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) |
| 618 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) | 588 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
| 619 | /* rq->queuelist of dequeued request must be list_empty() */ | 589 | /* rq->queuelist of dequeued request must be list_empty() */ |
| @@ -821,30 +791,12 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
| 821 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 791 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
| 822 | struct scsi_ioctl_command __user *); | 792 | struct scsi_ioctl_command __user *); |
| 823 | 793 | ||
| 824 | /* | ||
| 825 | * A queue has just exitted congestion. Note this in the global counter of | ||
| 826 | * congested queues, and wake up anyone who was waiting for requests to be | ||
| 827 | * put back. | ||
| 828 | */ | ||
| 829 | static inline void blk_clear_queue_congested(struct request_queue *q, int sync) | ||
| 830 | { | ||
| 831 | clear_bdi_congested(&q->backing_dev_info, sync); | ||
| 832 | } | ||
| 833 | |||
| 834 | /* | ||
| 835 | * A queue has just entered congestion. Flag that in the queue's VM-visible | ||
| 836 | * state flags and increment the global gounter of congested queues. | ||
| 837 | */ | ||
| 838 | static inline void blk_set_queue_congested(struct request_queue *q, int sync) | ||
| 839 | { | ||
| 840 | set_bdi_congested(&q->backing_dev_info, sync); | ||
| 841 | } | ||
| 842 | |||
| 843 | extern void blk_start_queue(struct request_queue *q); | 794 | extern void blk_start_queue(struct request_queue *q); |
| 844 | extern void blk_stop_queue(struct request_queue *q); | 795 | extern void blk_stop_queue(struct request_queue *q); |
| 845 | extern void blk_sync_queue(struct request_queue *q); | 796 | extern void blk_sync_queue(struct request_queue *q); |
| 846 | extern void __blk_stop_queue(struct request_queue *q); | 797 | extern void __blk_stop_queue(struct request_queue *q); |
| 847 | extern void __blk_run_queue(struct request_queue *q); | 798 | extern void __blk_run_queue(struct request_queue *q); |
| 799 | extern void __blk_run_queue_uncond(struct request_queue *q); | ||
| 848 | extern void blk_run_queue(struct request_queue *); | 800 | extern void blk_run_queue(struct request_queue *); |
| 849 | extern void blk_run_queue_async(struct request_queue *q); | 801 | extern void blk_run_queue_async(struct request_queue *q); |
| 850 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 802 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
| @@ -933,7 +885,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq) | |||
| 933 | if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) | 885 | if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) |
| 934 | return q->limits.max_hw_sectors; | 886 | return q->limits.max_hw_sectors; |
| 935 | 887 | ||
| 936 | if (!q->limits.chunk_sectors) | 888 | if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) |
| 937 | return blk_queue_get_max_sectors(q, rq->cmd_flags); | 889 | return blk_queue_get_max_sectors(q, rq->cmd_flags); |
| 938 | 890 | ||
| 939 | return min(blk_max_size_offset(q, blk_rq_pos(rq)), | 891 | return min(blk_max_size_offset(q, blk_rq_pos(rq)), |
| @@ -1054,6 +1006,7 @@ bool __must_check blk_get_queue(struct request_queue *); | |||
| 1054 | struct request_queue *blk_alloc_queue(gfp_t); | 1006 | struct request_queue *blk_alloc_queue(gfp_t); |
| 1055 | struct request_queue *blk_alloc_queue_node(gfp_t, int); | 1007 | struct request_queue *blk_alloc_queue_node(gfp_t, int); |
| 1056 | extern void blk_put_queue(struct request_queue *); | 1008 | extern void blk_put_queue(struct request_queue *); |
| 1009 | extern void blk_set_queue_dying(struct request_queue *); | ||
| 1057 | 1010 | ||
| 1058 | /* | 1011 | /* |
| 1059 | * block layer runtime pm functions | 1012 | * block layer runtime pm functions |
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 0995c2de8162..f589222bfa87 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
| @@ -357,12 +357,12 @@ extern void *alloc_large_system_hash(const char *tablename, | |||
| 357 | /* Only NUMA needs hash distribution. 64bit NUMA architectures have | 357 | /* Only NUMA needs hash distribution. 64bit NUMA architectures have |
| 358 | * sufficient vmalloc space. | 358 | * sufficient vmalloc space. |
| 359 | */ | 359 | */ |
| 360 | #if defined(CONFIG_NUMA) && defined(CONFIG_64BIT) | 360 | #ifdef CONFIG_NUMA |
| 361 | #define HASHDIST_DEFAULT 1 | 361 | #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) |
| 362 | extern int hashdist; /* Distribute hashes across NUMA nodes? */ | ||
| 362 | #else | 363 | #else |
| 363 | #define HASHDIST_DEFAULT 0 | 364 | #define hashdist (0) |
| 364 | #endif | 365 | #endif |
| 365 | extern int hashdist; /* Distribute hashes across NUMA nodes? */ | ||
| 366 | 366 | ||
| 367 | 367 | ||
| 368 | #endif /* _LINUX_BOOTMEM_H */ | 368 | #endif /* _LINUX_BOOTMEM_H */ |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index d5cda067115a..4383476a0d48 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
| @@ -105,7 +105,8 @@ struct bpf_verifier_ops { | |||
| 105 | */ | 105 | */ |
| 106 | bool (*is_valid_access)(int off, int size, enum bpf_access_type type); | 106 | bool (*is_valid_access)(int off, int size, enum bpf_access_type type); |
| 107 | 107 | ||
| 108 | u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off, | 108 | u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, |
| 109 | int src_reg, int ctx_off, | ||
| 109 | struct bpf_insn *insn); | 110 | struct bpf_insn *insn); |
| 110 | }; | 111 | }; |
| 111 | 112 | ||
| @@ -123,15 +124,41 @@ struct bpf_prog_aux { | |||
| 123 | const struct bpf_verifier_ops *ops; | 124 | const struct bpf_verifier_ops *ops; |
| 124 | struct bpf_map **used_maps; | 125 | struct bpf_map **used_maps; |
| 125 | struct bpf_prog *prog; | 126 | struct bpf_prog *prog; |
| 126 | struct work_struct work; | 127 | union { |
| 128 | struct work_struct work; | ||
| 129 | struct rcu_head rcu; | ||
| 130 | }; | ||
| 127 | }; | 131 | }; |
| 128 | 132 | ||
| 133 | struct bpf_array { | ||
| 134 | struct bpf_map map; | ||
| 135 | u32 elem_size; | ||
| 136 | /* 'ownership' of prog_array is claimed by the first program that | ||
| 137 | * is going to use this map or by the first program which FD is stored | ||
| 138 | * in the map to make sure that all callers and callees have the same | ||
| 139 | * prog_type and JITed flag | ||
| 140 | */ | ||
| 141 | enum bpf_prog_type owner_prog_type; | ||
| 142 | bool owner_jited; | ||
| 143 | union { | ||
| 144 | char value[0] __aligned(8); | ||
| 145 | struct bpf_prog *prog[0] __aligned(8); | ||
| 146 | }; | ||
| 147 | }; | ||
| 148 | #define MAX_TAIL_CALL_CNT 32 | ||
| 149 | |||
| 150 | u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); | ||
| 151 | void bpf_prog_array_map_clear(struct bpf_map *map); | ||
| 152 | bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); | ||
| 153 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void); | ||
| 154 | |||
| 129 | #ifdef CONFIG_BPF_SYSCALL | 155 | #ifdef CONFIG_BPF_SYSCALL |
| 130 | void bpf_register_prog_type(struct bpf_prog_type_list *tl); | 156 | void bpf_register_prog_type(struct bpf_prog_type_list *tl); |
| 131 | void bpf_register_map_type(struct bpf_map_type_list *tl); | 157 | void bpf_register_map_type(struct bpf_map_type_list *tl); |
| 132 | 158 | ||
| 133 | struct bpf_prog *bpf_prog_get(u32 ufd); | 159 | struct bpf_prog *bpf_prog_get(u32 ufd); |
| 134 | void bpf_prog_put(struct bpf_prog *prog); | 160 | void bpf_prog_put(struct bpf_prog *prog); |
| 161 | void bpf_prog_put_rcu(struct bpf_prog *prog); | ||
| 135 | 162 | ||
| 136 | struct bpf_map *bpf_map_get(struct fd f); | 163 | struct bpf_map *bpf_map_get(struct fd f); |
| 137 | void bpf_map_put(struct bpf_map *map); | 164 | void bpf_map_put(struct bpf_map *map); |
| @@ -160,5 +187,10 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto; | |||
| 160 | 187 | ||
| 161 | extern const struct bpf_func_proto bpf_get_prandom_u32_proto; | 188 | extern const struct bpf_func_proto bpf_get_prandom_u32_proto; |
| 162 | extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; | 189 | extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; |
| 190 | extern const struct bpf_func_proto bpf_tail_call_proto; | ||
| 191 | extern const struct bpf_func_proto bpf_ktime_get_ns_proto; | ||
| 192 | extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; | ||
| 193 | extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; | ||
| 194 | extern const struct bpf_func_proto bpf_get_current_comm_proto; | ||
| 163 | 195 | ||
| 164 | #endif /* _LINUX_BPF_H */ | 196 | #endif /* _LINUX_BPF_H */ |
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 656da2a12ffe..697ca7795bd9 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h | |||
| @@ -1,6 +1,13 @@ | |||
| 1 | #ifndef _LINUX_BRCMPHY_H | 1 | #ifndef _LINUX_BRCMPHY_H |
| 2 | #define _LINUX_BRCMPHY_H | 2 | #define _LINUX_BRCMPHY_H |
| 3 | 3 | ||
| 4 | #include <linux/phy.h> | ||
| 5 | |||
| 6 | /* All Broadcom Ethernet switches have a pseudo-PHY at address 30 which is used | ||
| 7 | * to configure the switch internal registers via MDIO accesses. | ||
| 8 | */ | ||
| 9 | #define BRCM_PSEUDO_PHY_ADDR 30 | ||
| 10 | |||
| 4 | #define PHY_ID_BCM50610 0x0143bd60 | 11 | #define PHY_ID_BCM50610 0x0143bd60 |
| 5 | #define PHY_ID_BCM50610M 0x0143bd70 | 12 | #define PHY_ID_BCM50610M 0x0143bd70 |
| 6 | #define PHY_ID_BCM5241 0x0143bc30 | 13 | #define PHY_ID_BCM5241 0x0143bc30 |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 73b45225a7ca..e6797ded700e 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
| @@ -317,6 +317,13 @@ sb_getblk(struct super_block *sb, sector_t block) | |||
| 317 | return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); | 317 | return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); |
| 318 | } | 318 | } |
| 319 | 319 | ||
| 320 | |||
| 321 | static inline struct buffer_head * | ||
| 322 | sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp) | ||
| 323 | { | ||
| 324 | return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp); | ||
| 325 | } | ||
| 326 | |||
| 320 | static inline struct buffer_head * | 327 | static inline struct buffer_head * |
| 321 | sb_find_get_block(struct super_block *sb, sector_t block) | 328 | sb_find_get_block(struct super_block *sb, sector_t block) |
| 322 | { | 329 | { |
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 3daf5ed392c9..2189935075b4 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h | |||
| @@ -19,7 +19,7 @@ enum cache_type { | |||
| 19 | /** | 19 | /** |
| 20 | * struct cacheinfo - represent a cache leaf node | 20 | * struct cacheinfo - represent a cache leaf node |
| 21 | * @type: type of the cache - data, inst or unified | 21 | * @type: type of the cache - data, inst or unified |
| 22 | * @level: represents the hierarcy in the multi-level cache | 22 | * @level: represents the hierarchy in the multi-level cache |
| 23 | * @coherency_line_size: size of each cache line usually representing | 23 | * @coherency_line_size: size of each cache line usually representing |
| 24 | * the minimum amount of data that gets transferred from memory | 24 | * the minimum amount of data that gets transferred from memory |
| 25 | * @number_of_sets: total number of sets, a set is a collection of cache | 25 | * @number_of_sets: total number of sets, a set is a collection of cache |
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index b6a52a4b457a..51bb6532785c 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h | |||
| @@ -27,10 +27,12 @@ | |||
| 27 | /** | 27 | /** |
| 28 | * struct can_skb_priv - private additional data inside CAN sk_buffs | 28 | * struct can_skb_priv - private additional data inside CAN sk_buffs |
| 29 | * @ifindex: ifindex of the first interface the CAN frame appeared on | 29 | * @ifindex: ifindex of the first interface the CAN frame appeared on |
| 30 | * @skbcnt: atomic counter to have an unique id together with skb pointer | ||
| 30 | * @cf: align to the following CAN frame at skb->data | 31 | * @cf: align to the following CAN frame at skb->data |
| 31 | */ | 32 | */ |
| 32 | struct can_skb_priv { | 33 | struct can_skb_priv { |
| 33 | int ifindex; | 34 | int ifindex; |
| 35 | int skbcnt; | ||
| 34 | struct can_frame cf[0]; | 36 | struct can_frame cf[0]; |
| 35 | }; | 37 | }; |
| 36 | 38 | ||
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 30f92cefaa72..9ebee53d3bf5 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h | |||
| @@ -43,9 +43,9 @@ struct ceph_options { | |||
| 43 | int flags; | 43 | int flags; |
| 44 | struct ceph_fsid fsid; | 44 | struct ceph_fsid fsid; |
| 45 | struct ceph_entity_addr my_addr; | 45 | struct ceph_entity_addr my_addr; |
| 46 | int mount_timeout; | 46 | unsigned long mount_timeout; /* jiffies */ |
| 47 | int osd_idle_ttl; | 47 | unsigned long osd_idle_ttl; /* jiffies */ |
| 48 | int osd_keepalive_timeout; | 48 | unsigned long osd_keepalive_timeout; /* jiffies */ |
| 49 | 49 | ||
| 50 | /* | 50 | /* |
| 51 | * any type that can't be simply compared or doesn't need need | 51 | * any type that can't be simply compared or doesn't need need |
| @@ -63,9 +63,9 @@ struct ceph_options { | |||
| 63 | /* | 63 | /* |
| 64 | * defaults | 64 | * defaults |
| 65 | */ | 65 | */ |
| 66 | #define CEPH_MOUNT_TIMEOUT_DEFAULT 60 | 66 | #define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) |
| 67 | #define CEPH_OSD_KEEPALIVE_DEFAULT 5 | 67 | #define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) |
| 68 | #define CEPH_OSD_IDLE_TTL_DEFAULT 60 | 68 | #define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) |
| 69 | 69 | ||
| 70 | #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) | 70 | #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) |
| 71 | #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) | 71 | #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) |
| @@ -93,13 +93,9 @@ enum { | |||
| 93 | CEPH_MOUNT_SHUTDOWN, | 93 | CEPH_MOUNT_SHUTDOWN, |
| 94 | }; | 94 | }; |
| 95 | 95 | ||
| 96 | /* | 96 | static inline unsigned long ceph_timeout_jiffies(unsigned long timeout) |
| 97 | * subtract jiffies | ||
| 98 | */ | ||
| 99 | static inline unsigned long time_sub(unsigned long a, unsigned long b) | ||
| 100 | { | 97 | { |
| 101 | BUG_ON(time_after(b, a)); | 98 | return timeout ?: MAX_SCHEDULE_TIMEOUT; |
| 102 | return (long)a - (long)b; | ||
| 103 | } | 99 | } |
| 104 | 100 | ||
| 105 | struct ceph_mds_client; | 101 | struct ceph_mds_client; |
| @@ -178,6 +174,7 @@ static inline int calc_pages_for(u64 off, u64 len) | |||
| 178 | 174 | ||
| 179 | extern struct kmem_cache *ceph_inode_cachep; | 175 | extern struct kmem_cache *ceph_inode_cachep; |
| 180 | extern struct kmem_cache *ceph_cap_cachep; | 176 | extern struct kmem_cache *ceph_cap_cachep; |
| 177 | extern struct kmem_cache *ceph_cap_flush_cachep; | ||
| 181 | extern struct kmem_cache *ceph_dentry_cachep; | 178 | extern struct kmem_cache *ceph_dentry_cachep; |
| 182 | extern struct kmem_cache *ceph_file_cachep; | 179 | extern struct kmem_cache *ceph_file_cachep; |
| 183 | 180 | ||
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index e15499422fdc..37753278987a 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <linux/radix-tree.h> | 8 | #include <linux/radix-tree.h> |
| 9 | #include <linux/uio.h> | 9 | #include <linux/uio.h> |
| 10 | #include <linux/workqueue.h> | 10 | #include <linux/workqueue.h> |
| 11 | #include <net/net_namespace.h> | ||
| 11 | 12 | ||
| 12 | #include <linux/ceph/types.h> | 13 | #include <linux/ceph/types.h> |
| 13 | #include <linux/ceph/buffer.h> | 14 | #include <linux/ceph/buffer.h> |
| @@ -56,6 +57,7 @@ struct ceph_messenger { | |||
| 56 | struct ceph_entity_addr my_enc_addr; | 57 | struct ceph_entity_addr my_enc_addr; |
| 57 | 58 | ||
| 58 | atomic_t stopping; | 59 | atomic_t stopping; |
| 60 | possible_net_t net; | ||
| 59 | bool nocrc; | 61 | bool nocrc; |
| 60 | bool tcp_nodelay; | 62 | bool tcp_nodelay; |
| 61 | 63 | ||
| @@ -267,6 +269,7 @@ extern void ceph_messenger_init(struct ceph_messenger *msgr, | |||
| 267 | u64 required_features, | 269 | u64 required_features, |
| 268 | bool nocrc, | 270 | bool nocrc, |
| 269 | bool tcp_nodelay); | 271 | bool tcp_nodelay); |
| 272 | extern void ceph_messenger_fini(struct ceph_messenger *msgr); | ||
| 270 | 273 | ||
| 271 | extern void ceph_con_init(struct ceph_connection *con, void *private, | 274 | extern void ceph_con_init(struct ceph_connection *con, void *private, |
| 272 | const struct ceph_connection_operations *ops, | 275 | const struct ceph_connection_operations *ops, |
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 61b19c46bdb3..7506b485bb6d 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h | |||
| @@ -249,7 +249,7 @@ extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, | |||
| 249 | struct ceph_msg *msg); | 249 | struct ceph_msg *msg); |
| 250 | 250 | ||
| 251 | extern void osd_req_op_init(struct ceph_osd_request *osd_req, | 251 | extern void osd_req_op_init(struct ceph_osd_request *osd_req, |
| 252 | unsigned int which, u16 opcode); | 252 | unsigned int which, u16 opcode, u32 flags); |
| 253 | 253 | ||
| 254 | extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *, | 254 | extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *, |
| 255 | unsigned int which, | 255 | unsigned int which, |
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h new file mode 100644 index 000000000000..93755a629299 --- /dev/null +++ b/include/linux/cgroup-defs.h | |||
| @@ -0,0 +1,501 @@ | |||
| 1 | /* | ||
| 2 | * linux/cgroup-defs.h - basic definitions for cgroup | ||
| 3 | * | ||
| 4 | * This file provides basic type and interface. Include this file directly | ||
| 5 | * only if necessary to avoid cyclic dependencies. | ||
| 6 | */ | ||
| 7 | #ifndef _LINUX_CGROUP_DEFS_H | ||
| 8 | #define _LINUX_CGROUP_DEFS_H | ||
| 9 | |||
| 10 | #include <linux/limits.h> | ||
| 11 | #include <linux/list.h> | ||
| 12 | #include <linux/idr.h> | ||
| 13 | #include <linux/wait.h> | ||
| 14 | #include <linux/mutex.h> | ||
| 15 | #include <linux/rcupdate.h> | ||
| 16 | #include <linux/percpu-refcount.h> | ||
| 17 | #include <linux/percpu-rwsem.h> | ||
| 18 | #include <linux/workqueue.h> | ||
| 19 | |||
| 20 | #ifdef CONFIG_CGROUPS | ||
| 21 | |||
| 22 | struct cgroup; | ||
| 23 | struct cgroup_root; | ||
| 24 | struct cgroup_subsys; | ||
| 25 | struct cgroup_taskset; | ||
| 26 | struct kernfs_node; | ||
| 27 | struct kernfs_ops; | ||
| 28 | struct kernfs_open_file; | ||
| 29 | struct seq_file; | ||
| 30 | |||
| 31 | #define MAX_CGROUP_TYPE_NAMELEN 32 | ||
| 32 | #define MAX_CGROUP_ROOT_NAMELEN 64 | ||
| 33 | #define MAX_CFTYPE_NAME 64 | ||
| 34 | |||
| 35 | /* define the enumeration of all cgroup subsystems */ | ||
| 36 | #define SUBSYS(_x) _x ## _cgrp_id, | ||
| 37 | enum cgroup_subsys_id { | ||
| 38 | #include <linux/cgroup_subsys.h> | ||
| 39 | CGROUP_SUBSYS_COUNT, | ||
| 40 | }; | ||
| 41 | #undef SUBSYS | ||
| 42 | |||
| 43 | /* bits in struct cgroup_subsys_state flags field */ | ||
| 44 | enum { | ||
| 45 | CSS_NO_REF = (1 << 0), /* no reference counting for this css */ | ||
| 46 | CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ | ||
| 47 | CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ | ||
| 48 | }; | ||
| 49 | |||
| 50 | /* bits in struct cgroup flags field */ | ||
| 51 | enum { | ||
| 52 | /* Control Group requires release notifications to userspace */ | ||
| 53 | CGRP_NOTIFY_ON_RELEASE, | ||
| 54 | /* | ||
| 55 | * Clone the parent's configuration when creating a new child | ||
| 56 | * cpuset cgroup. For historical reasons, this option can be | ||
| 57 | * specified at mount time and thus is implemented here. | ||
| 58 | */ | ||
| 59 | CGRP_CPUSET_CLONE_CHILDREN, | ||
| 60 | }; | ||
| 61 | |||
| 62 | /* cgroup_root->flags */ | ||
| 63 | enum { | ||
| 64 | CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */ | ||
| 65 | CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ | ||
| 66 | CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ | ||
| 67 | }; | ||
| 68 | |||
| 69 | /* cftype->flags */ | ||
| 70 | enum { | ||
| 71 | CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ | ||
| 72 | CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ | ||
| 73 | CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ | ||
| 74 | |||
| 75 | /* internal flags, do not use outside cgroup core proper */ | ||
| 76 | __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ | ||
| 77 | __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ | ||
| 78 | }; | ||
| 79 | |||
| 80 | /* | ||
| 81 | * Per-subsystem/per-cgroup state maintained by the system. This is the | ||
| 82 | * fundamental structural building block that controllers deal with. | ||
| 83 | * | ||
| 84 | * Fields marked with "PI:" are public and immutable and may be accessed | ||
| 85 | * directly without synchronization. | ||
| 86 | */ | ||
| 87 | struct cgroup_subsys_state { | ||
| 88 | /* PI: the cgroup that this css is attached to */ | ||
| 89 | struct cgroup *cgroup; | ||
| 90 | |||
| 91 | /* PI: the cgroup subsystem that this css is attached to */ | ||
| 92 | struct cgroup_subsys *ss; | ||
| 93 | |||
| 94 | /* reference count - access via css_[try]get() and css_put() */ | ||
| 95 | struct percpu_ref refcnt; | ||
| 96 | |||
| 97 | /* PI: the parent css */ | ||
| 98 | struct cgroup_subsys_state *parent; | ||
| 99 | |||
| 100 | /* siblings list anchored at the parent's ->children */ | ||
| 101 | struct list_head sibling; | ||
| 102 | struct list_head children; | ||
| 103 | |||
| 104 | /* | ||
| 105 | * PI: Subsys-unique ID. 0 is unused and root is always 1. The | ||
| 106 | * matching css can be looked up using css_from_id(). | ||
| 107 | */ | ||
| 108 | int id; | ||
| 109 | |||
| 110 | unsigned int flags; | ||
| 111 | |||
| 112 | /* | ||
| 113 | * Monotonically increasing unique serial number which defines a | ||
| 114 | * uniform order among all csses. It's guaranteed that all | ||
| 115 | * ->children lists are in the ascending order of ->serial_nr and | ||
| 116 | * used to allow interrupting and resuming iterations. | ||
| 117 | */ | ||
| 118 | u64 serial_nr; | ||
| 119 | |||
| 120 | /* percpu_ref killing and RCU release */ | ||
| 121 | struct rcu_head rcu_head; | ||
| 122 | struct work_struct destroy_work; | ||
| 123 | }; | ||
| 124 | |||
| 125 | /* | ||
| 126 | * A css_set is a structure holding pointers to a set of | ||
| 127 | * cgroup_subsys_state objects. This saves space in the task struct | ||
| 128 | * object and speeds up fork()/exit(), since a single inc/dec and a | ||
| 129 | * list_add()/del() can bump the reference count on the entire cgroup | ||
| 130 | * set for a task. | ||
| 131 | */ | ||
| 132 | struct css_set { | ||
| 133 | /* Reference count */ | ||
| 134 | atomic_t refcount; | ||
| 135 | |||
| 136 | /* | ||
| 137 | * List running through all cgroup groups in the same hash | ||
| 138 | * slot. Protected by css_set_lock | ||
| 139 | */ | ||
| 140 | struct hlist_node hlist; | ||
| 141 | |||
| 142 | /* | ||
| 143 | * Lists running through all tasks using this cgroup group. | ||
| 144 | * mg_tasks lists tasks which belong to this cset but are in the | ||
| 145 | * process of being migrated out or in. Protected by | ||
| 146 | * css_set_rwsem, but, during migration, once tasks are moved to | ||
| 147 | * mg_tasks, it can be read safely while holding cgroup_mutex. | ||
| 148 | */ | ||
| 149 | struct list_head tasks; | ||
| 150 | struct list_head mg_tasks; | ||
| 151 | |||
| 152 | /* | ||
| 153 | * List of cgrp_cset_links pointing at cgroups referenced from this | ||
| 154 | * css_set. Protected by css_set_lock. | ||
| 155 | */ | ||
| 156 | struct list_head cgrp_links; | ||
| 157 | |||
| 158 | /* the default cgroup associated with this css_set */ | ||
| 159 | struct cgroup *dfl_cgrp; | ||
| 160 | |||
| 161 | /* | ||
| 162 | * Set of subsystem states, one for each subsystem. This array is | ||
| 163 | * immutable after creation apart from the init_css_set during | ||
| 164 | * subsystem registration (at boot time). | ||
| 165 | */ | ||
| 166 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; | ||
| 167 | |||
| 168 | /* | ||
| 169 | * List of csets participating in the on-going migration either as | ||
| 170 | * source or destination. Protected by cgroup_mutex. | ||
| 171 | */ | ||
| 172 | struct list_head mg_preload_node; | ||
| 173 | struct list_head mg_node; | ||
| 174 | |||
| 175 | /* | ||
| 176 | * If this cset is acting as the source of migration the following | ||
| 177 | * two fields are set. mg_src_cgrp is the source cgroup of the | ||
| 178 | * on-going migration and mg_dst_cset is the destination cset the | ||
| 179 | * target tasks on this cset should be migrated to. Protected by | ||
| 180 | * cgroup_mutex. | ||
| 181 | */ | ||
| 182 | struct cgroup *mg_src_cgrp; | ||
| 183 | struct css_set *mg_dst_cset; | ||
| 184 | |||
| 185 | /* | ||
| 186 | * On the default hierarhcy, ->subsys[ssid] may point to a css | ||
| 187 | * attached to an ancestor instead of the cgroup this css_set is | ||
| 188 | * associated with. The following node is anchored at | ||
| 189 | * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to | ||
| 190 | * iterate through all css's attached to a given cgroup. | ||
| 191 | */ | ||
| 192 | struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; | ||
| 193 | |||
| 194 | /* For RCU-protected deletion */ | ||
| 195 | struct rcu_head rcu_head; | ||
| 196 | }; | ||
| 197 | |||
| 198 | struct cgroup { | ||
| 199 | /* self css with NULL ->ss, points back to this cgroup */ | ||
| 200 | struct cgroup_subsys_state self; | ||
| 201 | |||
| 202 | unsigned long flags; /* "unsigned long" so bitops work */ | ||
| 203 | |||
| 204 | /* | ||
| 205 | * idr allocated in-hierarchy ID. | ||
| 206 | * | ||
| 207 | * ID 0 is not used, the ID of the root cgroup is always 1, and a | ||
| 208 | * new cgroup will be assigned with a smallest available ID. | ||
| 209 | * | ||
| 210 | * Allocating/Removing ID must be protected by cgroup_mutex. | ||
| 211 | */ | ||
| 212 | int id; | ||
| 213 | |||
| 214 | /* | ||
| 215 | * If this cgroup contains any tasks, it contributes one to | ||
| 216 | * populated_cnt. All children with non-zero popuplated_cnt of | ||
| 217 | * their own contribute one. The count is zero iff there's no task | ||
| 218 | * in this cgroup or its subtree. | ||
| 219 | */ | ||
| 220 | int populated_cnt; | ||
| 221 | |||
| 222 | struct kernfs_node *kn; /* cgroup kernfs entry */ | ||
| 223 | struct kernfs_node *procs_kn; /* kn for "cgroup.procs" */ | ||
| 224 | struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */ | ||
| 225 | |||
| 226 | /* | ||
| 227 | * The bitmask of subsystems enabled on the child cgroups. | ||
| 228 | * ->subtree_control is the one configured through | ||
| 229 | * "cgroup.subtree_control" while ->child_subsys_mask is the | ||
| 230 | * effective one which may have more subsystems enabled. | ||
| 231 | * Controller knobs are made available iff it's enabled in | ||
| 232 | * ->subtree_control. | ||
| 233 | */ | ||
| 234 | unsigned int subtree_control; | ||
| 235 | unsigned int child_subsys_mask; | ||
| 236 | |||
| 237 | /* Private pointers for each registered subsystem */ | ||
| 238 | struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; | ||
| 239 | |||
| 240 | struct cgroup_root *root; | ||
| 241 | |||
| 242 | /* | ||
| 243 | * List of cgrp_cset_links pointing at css_sets with tasks in this | ||
| 244 | * cgroup. Protected by css_set_lock. | ||
| 245 | */ | ||
| 246 | struct list_head cset_links; | ||
| 247 | |||
| 248 | /* | ||
| 249 | * On the default hierarchy, a css_set for a cgroup with some | ||
| 250 | * susbsys disabled will point to css's which are associated with | ||
| 251 | * the closest ancestor which has the subsys enabled. The | ||
| 252 | * following lists all css_sets which point to this cgroup's css | ||
| 253 | * for the given subsystem. | ||
| 254 | */ | ||
| 255 | struct list_head e_csets[CGROUP_SUBSYS_COUNT]; | ||
| 256 | |||
| 257 | /* | ||
| 258 | * list of pidlists, up to two for each namespace (one for procs, one | ||
| 259 | * for tasks); created on demand. | ||
| 260 | */ | ||
| 261 | struct list_head pidlists; | ||
| 262 | struct mutex pidlist_mutex; | ||
| 263 | |||
| 264 | /* used to wait for offlining of csses */ | ||
| 265 | wait_queue_head_t offline_waitq; | ||
| 266 | |||
| 267 | /* used to schedule release agent */ | ||
| 268 | struct work_struct release_agent_work; | ||
| 269 | }; | ||
| 270 | |||
| 271 | /* | ||
| 272 | * A cgroup_root represents the root of a cgroup hierarchy, and may be | ||
| 273 | * associated with a kernfs_root to form an active hierarchy. This is | ||
| 274 | * internal to cgroup core. Don't access directly from controllers. | ||
| 275 | */ | ||
| 276 | struct cgroup_root { | ||
| 277 | struct kernfs_root *kf_root; | ||
| 278 | |||
| 279 | /* The bitmask of subsystems attached to this hierarchy */ | ||
| 280 | unsigned int subsys_mask; | ||
| 281 | |||
| 282 | /* Unique id for this hierarchy. */ | ||
| 283 | int hierarchy_id; | ||
| 284 | |||
| 285 | /* The root cgroup. Root is destroyed on its release. */ | ||
| 286 | struct cgroup cgrp; | ||
| 287 | |||
| 288 | /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ | ||
| 289 | atomic_t nr_cgrps; | ||
| 290 | |||
| 291 | /* A list running through the active hierarchies */ | ||
| 292 | struct list_head root_list; | ||
| 293 | |||
| 294 | /* Hierarchy-specific flags */ | ||
| 295 | unsigned int flags; | ||
| 296 | |||
| 297 | /* IDs for cgroups in this hierarchy */ | ||
| 298 | struct idr cgroup_idr; | ||
| 299 | |||
| 300 | /* The path to use for release notifications. */ | ||
| 301 | char release_agent_path[PATH_MAX]; | ||
| 302 | |||
| 303 | /* The name for this hierarchy - may be empty */ | ||
| 304 | char name[MAX_CGROUP_ROOT_NAMELEN]; | ||
| 305 | }; | ||
| 306 | |||
| 307 | /* | ||
| 308 | * struct cftype: handler definitions for cgroup control files | ||
| 309 | * | ||
| 310 | * When reading/writing to a file: | ||
| 311 | * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata | ||
| 312 | * - the 'cftype' of the file is file->f_path.dentry->d_fsdata | ||
| 313 | */ | ||
| 314 | struct cftype { | ||
| 315 | /* | ||
| 316 | * By convention, the name should begin with the name of the | ||
| 317 | * subsystem, followed by a period. Zero length string indicates | ||
| 318 | * end of cftype array. | ||
| 319 | */ | ||
| 320 | char name[MAX_CFTYPE_NAME]; | ||
| 321 | int private; | ||
| 322 | /* | ||
| 323 | * If not 0, file mode is set to this value, otherwise it will | ||
| 324 | * be figured out automatically | ||
| 325 | */ | ||
| 326 | umode_t mode; | ||
| 327 | |||
| 328 | /* | ||
| 329 | * The maximum length of string, excluding trailing nul, that can | ||
| 330 | * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed. | ||
| 331 | */ | ||
| 332 | size_t max_write_len; | ||
| 333 | |||
| 334 | /* CFTYPE_* flags */ | ||
| 335 | unsigned int flags; | ||
| 336 | |||
| 337 | /* | ||
| 338 | * Fields used for internal bookkeeping. Initialized automatically | ||
| 339 | * during registration. | ||
| 340 | */ | ||
| 341 | struct cgroup_subsys *ss; /* NULL for cgroup core files */ | ||
| 342 | struct list_head node; /* anchored at ss->cfts */ | ||
| 343 | struct kernfs_ops *kf_ops; | ||
| 344 | |||
| 345 | /* | ||
| 346 | * read_u64() is a shortcut for the common case of returning a | ||
| 347 | * single integer. Use it in place of read() | ||
| 348 | */ | ||
| 349 | u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft); | ||
| 350 | /* | ||
| 351 | * read_s64() is a signed version of read_u64() | ||
| 352 | */ | ||
| 353 | s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft); | ||
| 354 | |||
| 355 | /* generic seq_file read interface */ | ||
| 356 | int (*seq_show)(struct seq_file *sf, void *v); | ||
| 357 | |||
| 358 | /* optional ops, implement all or none */ | ||
| 359 | void *(*seq_start)(struct seq_file *sf, loff_t *ppos); | ||
| 360 | void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos); | ||
| 361 | void (*seq_stop)(struct seq_file *sf, void *v); | ||
| 362 | |||
| 363 | /* | ||
| 364 | * write_u64() is a shortcut for the common case of accepting | ||
| 365 | * a single integer (as parsed by simple_strtoull) from | ||
| 366 | * userspace. Use in place of write(); return 0 or error. | ||
| 367 | */ | ||
| 368 | int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft, | ||
| 369 | u64 val); | ||
| 370 | /* | ||
| 371 | * write_s64() is a signed version of write_u64() | ||
| 372 | */ | ||
| 373 | int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft, | ||
| 374 | s64 val); | ||
| 375 | |||
| 376 | /* | ||
| 377 | * write() is the generic write callback which maps directly to | ||
| 378 | * kernfs write operation and overrides all other operations. | ||
| 379 | * Maximum write size is determined by ->max_write_len. Use | ||
| 380 | * of_css/cft() to access the associated css and cft. | ||
| 381 | */ | ||
| 382 | ssize_t (*write)(struct kernfs_open_file *of, | ||
| 383 | char *buf, size_t nbytes, loff_t off); | ||
| 384 | |||
| 385 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 386 | struct lock_class_key lockdep_key; | ||
| 387 | #endif | ||
| 388 | }; | ||
| 389 | |||
| 390 | /* | ||
| 391 | * Control Group subsystem type. | ||
| 392 | * See Documentation/cgroups/cgroups.txt for details | ||
| 393 | */ | ||
| 394 | struct cgroup_subsys { | ||
| 395 | struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); | ||
| 396 | int (*css_online)(struct cgroup_subsys_state *css); | ||
| 397 | void (*css_offline)(struct cgroup_subsys_state *css); | ||
| 398 | void (*css_released)(struct cgroup_subsys_state *css); | ||
| 399 | void (*css_free)(struct cgroup_subsys_state *css); | ||
| 400 | void (*css_reset)(struct cgroup_subsys_state *css); | ||
| 401 | void (*css_e_css_changed)(struct cgroup_subsys_state *css); | ||
| 402 | |||
| 403 | int (*can_attach)(struct cgroup_subsys_state *css, | ||
| 404 | struct cgroup_taskset *tset); | ||
| 405 | void (*cancel_attach)(struct cgroup_subsys_state *css, | ||
| 406 | struct cgroup_taskset *tset); | ||
| 407 | void (*attach)(struct cgroup_subsys_state *css, | ||
| 408 | struct cgroup_taskset *tset); | ||
| 409 | void (*fork)(struct task_struct *task); | ||
| 410 | void (*exit)(struct cgroup_subsys_state *css, | ||
| 411 | struct cgroup_subsys_state *old_css, | ||
| 412 | struct task_struct *task); | ||
| 413 | void (*bind)(struct cgroup_subsys_state *root_css); | ||
| 414 | |||
| 415 | int disabled; | ||
| 416 | int early_init; | ||
| 417 | |||
| 418 | /* | ||
| 419 | * If %false, this subsystem is properly hierarchical - | ||
| 420 | * configuration, resource accounting and restriction on a parent | ||
| 421 | * cgroup cover those of its children. If %true, hierarchy support | ||
| 422 | * is broken in some ways - some subsystems ignore hierarchy | ||
| 423 | * completely while others are only implemented half-way. | ||
| 424 | * | ||
| 425 | * It's now disallowed to create nested cgroups if the subsystem is | ||
| 426 | * broken and cgroup core will emit a warning message on such | ||
| 427 | * cases. Eventually, all subsystems will be made properly | ||
| 428 | * hierarchical and this will go away. | ||
| 429 | */ | ||
| 430 | bool broken_hierarchy; | ||
| 431 | bool warned_broken_hierarchy; | ||
| 432 | |||
| 433 | /* the following two fields are initialized automtically during boot */ | ||
| 434 | int id; | ||
| 435 | const char *name; | ||
| 436 | |||
| 437 | /* link to parent, protected by cgroup_lock() */ | ||
| 438 | struct cgroup_root *root; | ||
| 439 | |||
| 440 | /* idr for css->id */ | ||
| 441 | struct idr css_idr; | ||
| 442 | |||
| 443 | /* | ||
| 444 | * List of cftypes. Each entry is the first entry of an array | ||
| 445 | * terminated by zero length name. | ||
| 446 | */ | ||
| 447 | struct list_head cfts; | ||
| 448 | |||
| 449 | /* | ||
| 450 | * Base cftypes which are automatically registered. The two can | ||
| 451 | * point to the same array. | ||
| 452 | */ | ||
| 453 | struct cftype *dfl_cftypes; /* for the default hierarchy */ | ||
| 454 | struct cftype *legacy_cftypes; /* for the legacy hierarchies */ | ||
| 455 | |||
| 456 | /* | ||
| 457 | * A subsystem may depend on other subsystems. When such subsystem | ||
| 458 | * is enabled on a cgroup, the depended-upon subsystems are enabled | ||
| 459 | * together if available. Subsystems enabled due to dependency are | ||
| 460 | * not visible to userland until explicitly enabled. The following | ||
| 461 | * specifies the mask of subsystems that this one depends on. | ||
| 462 | */ | ||
| 463 | unsigned int depends_on; | ||
| 464 | }; | ||
| 465 | |||
| 466 | extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; | ||
| 467 | |||
| 468 | /** | ||
| 469 | * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups | ||
| 470 | * @tsk: target task | ||
| 471 | * | ||
| 472 | * Called from threadgroup_change_begin() and allows cgroup operations to | ||
| 473 | * synchronize against threadgroup changes using a percpu_rw_semaphore. | ||
| 474 | */ | ||
| 475 | static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) | ||
| 476 | { | ||
| 477 | percpu_down_read(&cgroup_threadgroup_rwsem); | ||
| 478 | } | ||
| 479 | |||
| 480 | /** | ||
| 481 | * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups | ||
| 482 | * @tsk: target task | ||
| 483 | * | ||
| 484 | * Called from threadgroup_change_end(). Counterpart of | ||
| 485 | * cgroup_threadcgroup_change_begin(). | ||
| 486 | */ | ||
| 487 | static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) | ||
| 488 | { | ||
| 489 | percpu_up_read(&cgroup_threadgroup_rwsem); | ||
| 490 | } | ||
| 491 | |||
| 492 | #else /* CONFIG_CGROUPS */ | ||
| 493 | |||
| 494 | #define CGROUP_SUBSYS_COUNT 0 | ||
| 495 | |||
| 496 | static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {} | ||
| 497 | static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} | ||
| 498 | |||
| 499 | #endif /* CONFIG_CGROUPS */ | ||
| 500 | |||
| 501 | #endif /* _LINUX_CGROUP_DEFS_H */ | ||
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b9cb94c3102a..a593e299162e 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
| @@ -11,94 +11,200 @@ | |||
| 11 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
| 12 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
| 13 | #include <linux/nodemask.h> | 13 | #include <linux/nodemask.h> |
| 14 | #include <linux/rcupdate.h> | ||
| 15 | #include <linux/rculist.h> | 14 | #include <linux/rculist.h> |
| 16 | #include <linux/cgroupstats.h> | 15 | #include <linux/cgroupstats.h> |
| 17 | #include <linux/rwsem.h> | 16 | #include <linux/rwsem.h> |
| 18 | #include <linux/idr.h> | ||
| 19 | #include <linux/workqueue.h> | ||
| 20 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
| 21 | #include <linux/percpu-refcount.h> | ||
| 22 | #include <linux/seq_file.h> | 18 | #include <linux/seq_file.h> |
| 23 | #include <linux/kernfs.h> | 19 | #include <linux/kernfs.h> |
| 24 | #include <linux/wait.h> | 20 | |
| 21 | #include <linux/cgroup-defs.h> | ||
| 25 | 22 | ||
| 26 | #ifdef CONFIG_CGROUPS | 23 | #ifdef CONFIG_CGROUPS |
| 27 | 24 | ||
| 28 | struct cgroup_root; | 25 | /* a css_task_iter should be treated as an opaque object */ |
| 29 | struct cgroup_subsys; | 26 | struct css_task_iter { |
| 30 | struct cgroup; | 27 | struct cgroup_subsys *ss; |
| 31 | 28 | ||
| 32 | extern int cgroup_init_early(void); | 29 | struct list_head *cset_pos; |
| 33 | extern int cgroup_init(void); | 30 | struct list_head *cset_head; |
| 34 | extern void cgroup_fork(struct task_struct *p); | ||
| 35 | extern void cgroup_post_fork(struct task_struct *p); | ||
| 36 | extern void cgroup_exit(struct task_struct *p); | ||
| 37 | extern int cgroupstats_build(struct cgroupstats *stats, | ||
| 38 | struct dentry *dentry); | ||
| 39 | 31 | ||
| 40 | extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, | 32 | struct list_head *task_pos; |
| 41 | struct pid *pid, struct task_struct *tsk); | 33 | struct list_head *tasks_head; |
| 34 | struct list_head *mg_tasks_head; | ||
| 35 | }; | ||
| 42 | 36 | ||
| 43 | /* define the enumeration of all cgroup subsystems */ | 37 | extern struct cgroup_root cgrp_dfl_root; |
| 44 | #define SUBSYS(_x) _x ## _cgrp_id, | 38 | extern struct css_set init_css_set; |
| 45 | enum cgroup_subsys_id { | 39 | |
| 40 | #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; | ||
| 46 | #include <linux/cgroup_subsys.h> | 41 | #include <linux/cgroup_subsys.h> |
| 47 | CGROUP_SUBSYS_COUNT, | ||
| 48 | }; | ||
| 49 | #undef SUBSYS | 42 | #undef SUBSYS |
| 50 | 43 | ||
| 44 | bool css_has_online_children(struct cgroup_subsys_state *css); | ||
| 45 | struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); | ||
| 46 | struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, | ||
| 47 | struct cgroup_subsys *ss); | ||
| 48 | struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, | ||
| 49 | struct cgroup_subsys *ss); | ||
| 50 | |||
| 51 | bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); | ||
| 52 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); | ||
| 53 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); | ||
| 54 | |||
| 55 | int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); | ||
| 56 | int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); | ||
| 57 | int cgroup_rm_cftypes(struct cftype *cfts); | ||
| 58 | |||
| 59 | char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); | ||
| 60 | int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); | ||
| 61 | int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, | ||
| 62 | struct pid *pid, struct task_struct *tsk); | ||
| 63 | |||
| 64 | void cgroup_fork(struct task_struct *p); | ||
| 65 | void cgroup_post_fork(struct task_struct *p); | ||
| 66 | void cgroup_exit(struct task_struct *p); | ||
| 67 | |||
| 68 | int cgroup_init_early(void); | ||
| 69 | int cgroup_init(void); | ||
| 70 | |||
| 51 | /* | 71 | /* |
| 52 | * Per-subsystem/per-cgroup state maintained by the system. This is the | 72 | * Iteration helpers and macros. |
| 53 | * fundamental structural building block that controllers deal with. | 73 | */ |
| 74 | |||
| 75 | struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, | ||
| 76 | struct cgroup_subsys_state *parent); | ||
| 77 | struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos, | ||
| 78 | struct cgroup_subsys_state *css); | ||
| 79 | struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos); | ||
| 80 | struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos, | ||
| 81 | struct cgroup_subsys_state *css); | ||
| 82 | |||
| 83 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset); | ||
| 84 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset); | ||
| 85 | |||
| 86 | void css_task_iter_start(struct cgroup_subsys_state *css, | ||
| 87 | struct css_task_iter *it); | ||
| 88 | struct task_struct *css_task_iter_next(struct css_task_iter *it); | ||
| 89 | void css_task_iter_end(struct css_task_iter *it); | ||
| 90 | |||
| 91 | /** | ||
| 92 | * css_for_each_child - iterate through children of a css | ||
| 93 | * @pos: the css * to use as the loop cursor | ||
| 94 | * @parent: css whose children to walk | ||
| 54 | * | 95 | * |
| 55 | * Fields marked with "PI:" are public and immutable and may be accessed | 96 | * Walk @parent's children. Must be called under rcu_read_lock(). |
| 56 | * directly without synchronization. | 97 | * |
| 98 | * If a subsystem synchronizes ->css_online() and the start of iteration, a | ||
| 99 | * css which finished ->css_online() is guaranteed to be visible in the | ||
| 100 | * future iterations and will stay visible until the last reference is put. | ||
| 101 | * A css which hasn't finished ->css_online() or already finished | ||
| 102 | * ->css_offline() may show up during traversal. It's each subsystem's | ||
| 103 | * responsibility to synchronize against on/offlining. | ||
| 104 | * | ||
| 105 | * It is allowed to temporarily drop RCU read lock during iteration. The | ||
| 106 | * caller is responsible for ensuring that @pos remains accessible until | ||
| 107 | * the start of the next iteration by, for example, bumping the css refcnt. | ||
| 57 | */ | 108 | */ |
| 58 | struct cgroup_subsys_state { | 109 | #define css_for_each_child(pos, parent) \ |
| 59 | /* PI: the cgroup that this css is attached to */ | 110 | for ((pos) = css_next_child(NULL, (parent)); (pos); \ |
| 60 | struct cgroup *cgroup; | 111 | (pos) = css_next_child((pos), (parent))) |
| 61 | |||
| 62 | /* PI: the cgroup subsystem that this css is attached to */ | ||
| 63 | struct cgroup_subsys *ss; | ||
| 64 | |||
| 65 | /* reference count - access via css_[try]get() and css_put() */ | ||
| 66 | struct percpu_ref refcnt; | ||
| 67 | |||
| 68 | /* PI: the parent css */ | ||
| 69 | struct cgroup_subsys_state *parent; | ||
| 70 | |||
| 71 | /* siblings list anchored at the parent's ->children */ | ||
| 72 | struct list_head sibling; | ||
| 73 | struct list_head children; | ||
| 74 | |||
| 75 | /* | ||
| 76 | * PI: Subsys-unique ID. 0 is unused and root is always 1. The | ||
| 77 | * matching css can be looked up using css_from_id(). | ||
| 78 | */ | ||
| 79 | int id; | ||
| 80 | |||
| 81 | unsigned int flags; | ||
| 82 | |||
| 83 | /* | ||
| 84 | * Monotonically increasing unique serial number which defines a | ||
| 85 | * uniform order among all csses. It's guaranteed that all | ||
| 86 | * ->children lists are in the ascending order of ->serial_nr and | ||
| 87 | * used to allow interrupting and resuming iterations. | ||
| 88 | */ | ||
| 89 | u64 serial_nr; | ||
| 90 | |||
| 91 | /* percpu_ref killing and RCU release */ | ||
| 92 | struct rcu_head rcu_head; | ||
| 93 | struct work_struct destroy_work; | ||
| 94 | }; | ||
| 95 | 112 | ||
| 96 | /* bits in struct cgroup_subsys_state flags field */ | 113 | /** |
| 97 | enum { | 114 | * css_for_each_descendant_pre - pre-order walk of a css's descendants |
| 98 | CSS_NO_REF = (1 << 0), /* no reference counting for this css */ | 115 | * @pos: the css * to use as the loop cursor |
| 99 | CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ | 116 | * @root: css whose descendants to walk |
| 100 | CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ | 117 | * |
| 101 | }; | 118 | * Walk @root's descendants. @root is included in the iteration and the |
| 119 | * first node to be visited. Must be called under rcu_read_lock(). | ||
| 120 | * | ||
| 121 | * If a subsystem synchronizes ->css_online() and the start of iteration, a | ||
| 122 | * css which finished ->css_online() is guaranteed to be visible in the | ||
| 123 | * future iterations and will stay visible until the last reference is put. | ||
| 124 | * A css which hasn't finished ->css_online() or already finished | ||
| 125 | * ->css_offline() may show up during traversal. It's each subsystem's | ||
| 126 | * responsibility to synchronize against on/offlining. | ||
| 127 | * | ||
| 128 | * For example, the following guarantees that a descendant can't escape | ||
| 129 | * state updates of its ancestors. | ||
| 130 | * | ||
| 131 | * my_online(@css) | ||
| 132 | * { | ||
| 133 | * Lock @css's parent and @css; | ||
| 134 | * Inherit state from the parent; | ||
| 135 | * Unlock both. | ||
| 136 | * } | ||
| 137 | * | ||
| 138 | * my_update_state(@css) | ||
| 139 | * { | ||
| 140 | * css_for_each_descendant_pre(@pos, @css) { | ||
| 141 | * Lock @pos; | ||
| 142 | * if (@pos == @css) | ||
| 143 | * Update @css's state; | ||
| 144 | * else | ||
| 145 | * Verify @pos is alive and inherit state from its parent; | ||
| 146 | * Unlock @pos; | ||
| 147 | * } | ||
| 148 | * } | ||
| 149 | * | ||
| 150 | * As long as the inheriting step, including checking the parent state, is | ||
| 151 | * enclosed inside @pos locking, double-locking the parent isn't necessary | ||
| 152 | * while inheriting. The state update to the parent is guaranteed to be | ||
| 153 | * visible by walking order and, as long as inheriting operations to the | ||
| 154 | * same @pos are atomic to each other, multiple updates racing each other | ||
| 155 | * still result in the correct state. It's guaranateed that at least one | ||
| 156 | * inheritance happens for any css after the latest update to its parent. | ||
| 157 | * | ||
| 158 | * If checking parent's state requires locking the parent, each inheriting | ||
| 159 | * iteration should lock and unlock both @pos->parent and @pos. | ||
| 160 | * | ||
| 161 | * Alternatively, a subsystem may choose to use a single global lock to | ||
| 162 | * synchronize ->css_online() and ->css_offline() against tree-walking | ||
| 163 | * operations. | ||
| 164 | * | ||
| 165 | * It is allowed to temporarily drop RCU read lock during iteration. The | ||
| 166 | * caller is responsible for ensuring that @pos remains accessible until | ||
| 167 | * the start of the next iteration by, for example, bumping the css refcnt. | ||
| 168 | */ | ||
| 169 | #define css_for_each_descendant_pre(pos, css) \ | ||
| 170 | for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \ | ||
| 171 | (pos) = css_next_descendant_pre((pos), (css))) | ||
| 172 | |||
| 173 | /** | ||
| 174 | * css_for_each_descendant_post - post-order walk of a css's descendants | ||
| 175 | * @pos: the css * to use as the loop cursor | ||
| 176 | * @css: css whose descendants to walk | ||
| 177 | * | ||
| 178 | * Similar to css_for_each_descendant_pre() but performs post-order | ||
| 179 | * traversal instead. @root is included in the iteration and the last | ||
| 180 | * node to be visited. | ||
| 181 | * | ||
| 182 | * If a subsystem synchronizes ->css_online() and the start of iteration, a | ||
| 183 | * css which finished ->css_online() is guaranteed to be visible in the | ||
| 184 | * future iterations and will stay visible until the last reference is put. | ||
| 185 | * A css which hasn't finished ->css_online() or already finished | ||
| 186 | * ->css_offline() may show up during traversal. It's each subsystem's | ||
| 187 | * responsibility to synchronize against on/offlining. | ||
| 188 | * | ||
| 189 | * Note that the walk visibility guarantee example described in pre-order | ||
| 190 | * walk doesn't apply the same to post-order walks. | ||
| 191 | */ | ||
| 192 | #define css_for_each_descendant_post(pos, css) \ | ||
| 193 | for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ | ||
| 194 | (pos) = css_next_descendant_post((pos), (css))) | ||
| 195 | |||
| 196 | /** | ||
| 197 | * cgroup_taskset_for_each - iterate cgroup_taskset | ||
| 198 | * @task: the loop cursor | ||
| 199 | * @tset: taskset to iterate | ||
| 200 | */ | ||
| 201 | #define cgroup_taskset_for_each(task, tset) \ | ||
| 202 | for ((task) = cgroup_taskset_first((tset)); (task); \ | ||
| 203 | (task) = cgroup_taskset_next((tset))) | ||
| 204 | |||
| 205 | /* | ||
| 206 | * Inline functions. | ||
| 207 | */ | ||
| 102 | 208 | ||
| 103 | /** | 209 | /** |
| 104 | * css_get - obtain a reference on the specified css | 210 | * css_get - obtain a reference on the specified css |
| @@ -185,309 +291,112 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) | |||
| 185 | percpu_ref_put_many(&css->refcnt, n); | 291 | percpu_ref_put_many(&css->refcnt, n); |
| 186 | } | 292 | } |
| 187 | 293 | ||
| 188 | /* bits in struct cgroup flags field */ | 294 | /** |
| 189 | enum { | 295 | * task_css_set_check - obtain a task's css_set with extra access conditions |
| 190 | /* Control Group requires release notifications to userspace */ | 296 | * @task: the task to obtain css_set for |
| 191 | CGRP_NOTIFY_ON_RELEASE, | 297 | * @__c: extra condition expression to be passed to rcu_dereference_check() |
| 192 | /* | 298 | * |
| 193 | * Clone the parent's configuration when creating a new child | 299 | * A task's css_set is RCU protected, initialized and exited while holding |
| 194 | * cpuset cgroup. For historical reasons, this option can be | 300 | * task_lock(), and can only be modified while holding both cgroup_mutex |
| 195 | * specified at mount time and thus is implemented here. | 301 | * and task_lock() while the task is alive. This macro verifies that the |
| 196 | */ | 302 | * caller is inside proper critical section and returns @task's css_set. |
| 197 | CGRP_CPUSET_CLONE_CHILDREN, | 303 | * |
| 198 | }; | 304 | * The caller can also specify additional allowed conditions via @__c, such |
| 199 | 305 | * as locks used during the cgroup_subsys::attach() methods. | |
| 200 | struct cgroup { | ||
| 201 | /* self css with NULL ->ss, points back to this cgroup */ | ||
| 202 | struct cgroup_subsys_state self; | ||
| 203 | |||
| 204 | unsigned long flags; /* "unsigned long" so bitops work */ | ||
| 205 | |||
| 206 | /* | ||
| 207 | * idr allocated in-hierarchy ID. | ||
| 208 | * | ||
| 209 | * ID 0 is not used, the ID of the root cgroup is always 1, and a | ||
| 210 | * new cgroup will be assigned with a smallest available ID. | ||
| 211 | * | ||
| 212 | * Allocating/Removing ID must be protected by cgroup_mutex. | ||
| 213 | */ | ||
| 214 | int id; | ||
| 215 | |||
| 216 | /* | ||
| 217 | * If this cgroup contains any tasks, it contributes one to | ||
| 218 | * populated_cnt. All children with non-zero popuplated_cnt of | ||
| 219 | * their own contribute one. The count is zero iff there's no task | ||
| 220 | * in this cgroup or its subtree. | ||
| 221 | */ | ||
| 222 | int populated_cnt; | ||
| 223 | |||
| 224 | struct kernfs_node *kn; /* cgroup kernfs entry */ | ||
| 225 | struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */ | ||
| 226 | |||
| 227 | /* | ||
| 228 | * The bitmask of subsystems enabled on the child cgroups. | ||
| 229 | * ->subtree_control is the one configured through | ||
| 230 | * "cgroup.subtree_control" while ->child_subsys_mask is the | ||
| 231 | * effective one which may have more subsystems enabled. | ||
| 232 | * Controller knobs are made available iff it's enabled in | ||
| 233 | * ->subtree_control. | ||
| 234 | */ | ||
| 235 | unsigned int subtree_control; | ||
| 236 | unsigned int child_subsys_mask; | ||
| 237 | |||
| 238 | /* Private pointers for each registered subsystem */ | ||
| 239 | struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; | ||
| 240 | |||
| 241 | struct cgroup_root *root; | ||
| 242 | |||
| 243 | /* | ||
| 244 | * List of cgrp_cset_links pointing at css_sets with tasks in this | ||
| 245 | * cgroup. Protected by css_set_lock. | ||
| 246 | */ | ||
| 247 | struct list_head cset_links; | ||
| 248 | |||
| 249 | /* | ||
| 250 | * On the default hierarchy, a css_set for a cgroup with some | ||
| 251 | * susbsys disabled will point to css's which are associated with | ||
| 252 | * the closest ancestor which has the subsys enabled. The | ||
| 253 | * following lists all css_sets which point to this cgroup's css | ||
| 254 | * for the given subsystem. | ||
| 255 | */ | ||
| 256 | struct list_head e_csets[CGROUP_SUBSYS_COUNT]; | ||
| 257 | |||
| 258 | /* | ||
| 259 | * list of pidlists, up to two for each namespace (one for procs, one | ||
| 260 | * for tasks); created on demand. | ||
| 261 | */ | ||
| 262 | struct list_head pidlists; | ||
| 263 | struct mutex pidlist_mutex; | ||
| 264 | |||
| 265 | /* used to wait for offlining of csses */ | ||
| 266 | wait_queue_head_t offline_waitq; | ||
| 267 | |||
| 268 | /* used to schedule release agent */ | ||
| 269 | struct work_struct release_agent_work; | ||
| 270 | }; | ||
| 271 | |||
| 272 | #define MAX_CGROUP_ROOT_NAMELEN 64 | ||
| 273 | |||
| 274 | /* cgroup_root->flags */ | ||
| 275 | enum { | ||
| 276 | CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */ | ||
| 277 | CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ | ||
| 278 | CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ | ||
| 279 | }; | ||
| 280 | |||
| 281 | /* | ||
| 282 | * A cgroup_root represents the root of a cgroup hierarchy, and may be | ||
| 283 | * associated with a kernfs_root to form an active hierarchy. This is | ||
| 284 | * internal to cgroup core. Don't access directly from controllers. | ||
| 285 | */ | 306 | */ |
| 286 | struct cgroup_root { | 307 | #ifdef CONFIG_PROVE_RCU |
| 287 | struct kernfs_root *kf_root; | 308 | extern struct mutex cgroup_mutex; |
| 288 | 309 | extern struct rw_semaphore css_set_rwsem; | |
| 289 | /* The bitmask of subsystems attached to this hierarchy */ | 310 | #define task_css_set_check(task, __c) \ |
| 290 | unsigned int subsys_mask; | 311 | rcu_dereference_check((task)->cgroups, \ |
| 291 | 312 | lockdep_is_held(&cgroup_mutex) || \ | |
| 292 | /* Unique id for this hierarchy. */ | 313 | lockdep_is_held(&css_set_rwsem) || \ |
| 293 | int hierarchy_id; | 314 | ((task)->flags & PF_EXITING) || (__c)) |
| 294 | 315 | #else | |
| 295 | /* The root cgroup. Root is destroyed on its release. */ | 316 | #define task_css_set_check(task, __c) \ |
| 296 | struct cgroup cgrp; | 317 | rcu_dereference((task)->cgroups) |
| 297 | 318 | #endif | |
| 298 | /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ | ||
| 299 | atomic_t nr_cgrps; | ||
| 300 | |||
| 301 | /* A list running through the active hierarchies */ | ||
| 302 | struct list_head root_list; | ||
| 303 | |||
| 304 | /* Hierarchy-specific flags */ | ||
| 305 | unsigned int flags; | ||
| 306 | |||
| 307 | /* IDs for cgroups in this hierarchy */ | ||
| 308 | struct idr cgroup_idr; | ||
| 309 | |||
| 310 | /* The path to use for release notifications. */ | ||
| 311 | char release_agent_path[PATH_MAX]; | ||
| 312 | |||
| 313 | /* The name for this hierarchy - may be empty */ | ||
| 314 | char name[MAX_CGROUP_ROOT_NAMELEN]; | ||
| 315 | }; | ||
| 316 | 319 | ||
| 317 | /* | 320 | /** |
| 318 | * A css_set is a structure holding pointers to a set of | 321 | * task_css_check - obtain css for (task, subsys) w/ extra access conds |
| 319 | * cgroup_subsys_state objects. This saves space in the task struct | 322 | * @task: the target task |
| 320 | * object and speeds up fork()/exit(), since a single inc/dec and a | 323 | * @subsys_id: the target subsystem ID |
| 321 | * list_add()/del() can bump the reference count on the entire cgroup | 324 | * @__c: extra condition expression to be passed to rcu_dereference_check() |
| 322 | * set for a task. | 325 | * |
| 326 | * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The | ||
| 327 | * synchronization rules are the same as task_css_set_check(). | ||
| 323 | */ | 328 | */ |
| 329 | #define task_css_check(task, subsys_id, __c) \ | ||
| 330 | task_css_set_check((task), (__c))->subsys[(subsys_id)] | ||
| 324 | 331 | ||
| 325 | struct css_set { | 332 | /** |
| 326 | 333 | * task_css_set - obtain a task's css_set | |
| 327 | /* Reference count */ | 334 | * @task: the task to obtain css_set for |
| 328 | atomic_t refcount; | ||
| 329 | |||
| 330 | /* | ||
| 331 | * List running through all cgroup groups in the same hash | ||
| 332 | * slot. Protected by css_set_lock | ||
| 333 | */ | ||
| 334 | struct hlist_node hlist; | ||
| 335 | |||
| 336 | /* | ||
| 337 | * Lists running through all tasks using this cgroup group. | ||
| 338 | * mg_tasks lists tasks which belong to this cset but are in the | ||
| 339 | * process of being migrated out or in. Protected by | ||
| 340 | * css_set_rwsem, but, during migration, once tasks are moved to | ||
| 341 | * mg_tasks, it can be read safely while holding cgroup_mutex. | ||
| 342 | */ | ||
| 343 | struct list_head tasks; | ||
| 344 | struct list_head mg_tasks; | ||
| 345 | |||
| 346 | /* | ||
| 347 | * List of cgrp_cset_links pointing at cgroups referenced from this | ||
| 348 | * css_set. Protected by css_set_lock. | ||
| 349 | */ | ||
| 350 | struct list_head cgrp_links; | ||
| 351 | |||
| 352 | /* the default cgroup associated with this css_set */ | ||
| 353 | struct cgroup *dfl_cgrp; | ||
| 354 | |||
| 355 | /* | ||
| 356 | * Set of subsystem states, one for each subsystem. This array is | ||
| 357 | * immutable after creation apart from the init_css_set during | ||
| 358 | * subsystem registration (at boot time). | ||
| 359 | */ | ||
| 360 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; | ||
| 361 | |||
| 362 | /* | ||
| 363 | * List of csets participating in the on-going migration either as | ||
| 364 | * source or destination. Protected by cgroup_mutex. | ||
| 365 | */ | ||
| 366 | struct list_head mg_preload_node; | ||
| 367 | struct list_head mg_node; | ||
| 368 | |||
| 369 | /* | ||
| 370 | * If this cset is acting as the source of migration the following | ||
| 371 | * two fields are set. mg_src_cgrp is the source cgroup of the | ||
| 372 | * on-going migration and mg_dst_cset is the destination cset the | ||
| 373 | * target tasks on this cset should be migrated to. Protected by | ||
| 374 | * cgroup_mutex. | ||
| 375 | */ | ||
| 376 | struct cgroup *mg_src_cgrp; | ||
| 377 | struct css_set *mg_dst_cset; | ||
| 378 | |||
| 379 | /* | ||
| 380 | * On the default hierarhcy, ->subsys[ssid] may point to a css | ||
| 381 | * attached to an ancestor instead of the cgroup this css_set is | ||
| 382 | * associated with. The following node is anchored at | ||
| 383 | * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to | ||
| 384 | * iterate through all css's attached to a given cgroup. | ||
| 385 | */ | ||
| 386 | struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; | ||
| 387 | |||
| 388 | /* For RCU-protected deletion */ | ||
| 389 | struct rcu_head rcu_head; | ||
| 390 | }; | ||
| 391 | |||
| 392 | /* | ||
| 393 | * struct cftype: handler definitions for cgroup control files | ||
| 394 | * | 335 | * |
| 395 | * When reading/writing to a file: | 336 | * See task_css_set_check(). |
| 396 | * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata | ||
| 397 | * - the 'cftype' of the file is file->f_path.dentry->d_fsdata | ||
| 398 | */ | 337 | */ |
| 338 | static inline struct css_set *task_css_set(struct task_struct *task) | ||
| 339 | { | ||
| 340 | return task_css_set_check(task, false); | ||
| 341 | } | ||
| 399 | 342 | ||
| 400 | /* cftype->flags */ | 343 | /** |
| 401 | enum { | 344 | * task_css - obtain css for (task, subsys) |
| 402 | CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ | 345 | * @task: the target task |
| 403 | CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ | 346 | * @subsys_id: the target subsystem ID |
| 404 | CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ | 347 | * |
| 348 | * See task_css_check(). | ||
| 349 | */ | ||
| 350 | static inline struct cgroup_subsys_state *task_css(struct task_struct *task, | ||
| 351 | int subsys_id) | ||
| 352 | { | ||
| 353 | return task_css_check(task, subsys_id, false); | ||
| 354 | } | ||
| 405 | 355 | ||
| 406 | /* internal flags, do not use outside cgroup core proper */ | 356 | /** |
| 407 | __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ | 357 | * task_get_css - find and get the css for (task, subsys) |
| 408 | __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ | 358 | * @task: the target task |
| 409 | }; | 359 | * @subsys_id: the target subsystem ID |
| 360 | * | ||
| 361 | * Find the css for the (@task, @subsys_id) combination, increment a | ||
| 362 | * reference on and return it. This function is guaranteed to return a | ||
| 363 | * valid css. | ||
| 364 | */ | ||
| 365 | static inline struct cgroup_subsys_state * | ||
| 366 | task_get_css(struct task_struct *task, int subsys_id) | ||
| 367 | { | ||
| 368 | struct cgroup_subsys_state *css; | ||
| 369 | |||
| 370 | rcu_read_lock(); | ||
| 371 | while (true) { | ||
| 372 | css = task_css(task, subsys_id); | ||
| 373 | if (likely(css_tryget_online(css))) | ||
| 374 | break; | ||
| 375 | cpu_relax(); | ||
| 376 | } | ||
| 377 | rcu_read_unlock(); | ||
| 378 | return css; | ||
| 379 | } | ||
| 410 | 380 | ||
| 411 | #define MAX_CFTYPE_NAME 64 | 381 | /** |
| 412 | 382 | * task_css_is_root - test whether a task belongs to the root css | |
| 413 | struct cftype { | 383 | * @task: the target task |
| 414 | /* | 384 | * @subsys_id: the target subsystem ID |
| 415 | * By convention, the name should begin with the name of the | 385 | * |
| 416 | * subsystem, followed by a period. Zero length string indicates | 386 | * Test whether @task belongs to the root css on the specified subsystem. |
| 417 | * end of cftype array. | 387 | * May be invoked in any context. |
| 418 | */ | 388 | */ |
| 419 | char name[MAX_CFTYPE_NAME]; | 389 | static inline bool task_css_is_root(struct task_struct *task, int subsys_id) |
| 420 | int private; | 390 | { |
| 421 | /* | 391 | return task_css_check(task, subsys_id, true) == |
| 422 | * If not 0, file mode is set to this value, otherwise it will | 392 | init_css_set.subsys[subsys_id]; |
| 423 | * be figured out automatically | 393 | } |
| 424 | */ | ||
| 425 | umode_t mode; | ||
| 426 | |||
| 427 | /* | ||
| 428 | * The maximum length of string, excluding trailing nul, that can | ||
| 429 | * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed. | ||
| 430 | */ | ||
| 431 | size_t max_write_len; | ||
| 432 | |||
| 433 | /* CFTYPE_* flags */ | ||
| 434 | unsigned int flags; | ||
| 435 | |||
| 436 | /* | ||
| 437 | * Fields used for internal bookkeeping. Initialized automatically | ||
| 438 | * during registration. | ||
| 439 | */ | ||
| 440 | struct cgroup_subsys *ss; /* NULL for cgroup core files */ | ||
| 441 | struct list_head node; /* anchored at ss->cfts */ | ||
| 442 | struct kernfs_ops *kf_ops; | ||
| 443 | |||
| 444 | /* | ||
| 445 | * read_u64() is a shortcut for the common case of returning a | ||
| 446 | * single integer. Use it in place of read() | ||
| 447 | */ | ||
| 448 | u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft); | ||
| 449 | /* | ||
| 450 | * read_s64() is a signed version of read_u64() | ||
| 451 | */ | ||
| 452 | s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft); | ||
| 453 | |||
| 454 | /* generic seq_file read interface */ | ||
| 455 | int (*seq_show)(struct seq_file *sf, void *v); | ||
| 456 | |||
| 457 | /* optional ops, implement all or none */ | ||
| 458 | void *(*seq_start)(struct seq_file *sf, loff_t *ppos); | ||
| 459 | void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos); | ||
| 460 | void (*seq_stop)(struct seq_file *sf, void *v); | ||
| 461 | |||
| 462 | /* | ||
| 463 | * write_u64() is a shortcut for the common case of accepting | ||
| 464 | * a single integer (as parsed by simple_strtoull) from | ||
| 465 | * userspace. Use in place of write(); return 0 or error. | ||
| 466 | */ | ||
| 467 | int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft, | ||
| 468 | u64 val); | ||
| 469 | /* | ||
| 470 | * write_s64() is a signed version of write_u64() | ||
| 471 | */ | ||
| 472 | int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft, | ||
| 473 | s64 val); | ||
| 474 | |||
| 475 | /* | ||
| 476 | * write() is the generic write callback which maps directly to | ||
| 477 | * kernfs write operation and overrides all other operations. | ||
| 478 | * Maximum write size is determined by ->max_write_len. Use | ||
| 479 | * of_css/cft() to access the associated css and cft. | ||
| 480 | */ | ||
| 481 | ssize_t (*write)(struct kernfs_open_file *of, | ||
| 482 | char *buf, size_t nbytes, loff_t off); | ||
| 483 | |||
| 484 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 485 | struct lock_class_key lockdep_key; | ||
| 486 | #endif | ||
| 487 | }; | ||
| 488 | 394 | ||
| 489 | extern struct cgroup_root cgrp_dfl_root; | 395 | static inline struct cgroup *task_cgroup(struct task_struct *task, |
| 490 | extern struct css_set init_css_set; | 396 | int subsys_id) |
| 397 | { | ||
| 398 | return task_css(task, subsys_id)->cgroup; | ||
| 399 | } | ||
| 491 | 400 | ||
| 492 | /** | 401 | /** |
| 493 | * cgroup_on_dfl - test whether a cgroup is on the default hierarchy | 402 | * cgroup_on_dfl - test whether a cgroup is on the default hierarchy |
| @@ -604,367 +513,22 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) | |||
| 604 | pr_cont_kernfs_path(cgrp->kn); | 513 | pr_cont_kernfs_path(cgrp->kn); |
| 605 | } | 514 | } |
| 606 | 515 | ||
| 607 | char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); | ||
| 608 | |||
| 609 | int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); | ||
| 610 | int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); | ||
| 611 | int cgroup_rm_cftypes(struct cftype *cfts); | ||
| 612 | |||
| 613 | bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); | ||
| 614 | |||
| 615 | /* | ||
| 616 | * Control Group taskset, used to pass around set of tasks to cgroup_subsys | ||
| 617 | * methods. | ||
| 618 | */ | ||
| 619 | struct cgroup_taskset; | ||
| 620 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset); | ||
| 621 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset); | ||
| 622 | |||
| 623 | /** | ||
| 624 | * cgroup_taskset_for_each - iterate cgroup_taskset | ||
| 625 | * @task: the loop cursor | ||
| 626 | * @tset: taskset to iterate | ||
| 627 | */ | ||
| 628 | #define cgroup_taskset_for_each(task, tset) \ | ||
| 629 | for ((task) = cgroup_taskset_first((tset)); (task); \ | ||
| 630 | (task) = cgroup_taskset_next((tset))) | ||
| 631 | |||
| 632 | /* | ||
| 633 | * Control Group subsystem type. | ||
| 634 | * See Documentation/cgroups/cgroups.txt for details | ||
| 635 | */ | ||
| 636 | |||
| 637 | struct cgroup_subsys { | ||
| 638 | struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); | ||
| 639 | int (*css_online)(struct cgroup_subsys_state *css); | ||
| 640 | void (*css_offline)(struct cgroup_subsys_state *css); | ||
| 641 | void (*css_released)(struct cgroup_subsys_state *css); | ||
| 642 | void (*css_free)(struct cgroup_subsys_state *css); | ||
| 643 | void (*css_reset)(struct cgroup_subsys_state *css); | ||
| 644 | void (*css_e_css_changed)(struct cgroup_subsys_state *css); | ||
| 645 | |||
| 646 | int (*can_attach)(struct cgroup_subsys_state *css, | ||
| 647 | struct cgroup_taskset *tset); | ||
| 648 | void (*cancel_attach)(struct cgroup_subsys_state *css, | ||
| 649 | struct cgroup_taskset *tset); | ||
| 650 | void (*attach)(struct cgroup_subsys_state *css, | ||
| 651 | struct cgroup_taskset *tset); | ||
| 652 | void (*fork)(struct task_struct *task); | ||
| 653 | void (*exit)(struct cgroup_subsys_state *css, | ||
| 654 | struct cgroup_subsys_state *old_css, | ||
| 655 | struct task_struct *task); | ||
| 656 | void (*bind)(struct cgroup_subsys_state *root_css); | ||
| 657 | |||
| 658 | int disabled; | ||
| 659 | int early_init; | ||
| 660 | |||
| 661 | /* | ||
| 662 | * If %false, this subsystem is properly hierarchical - | ||
| 663 | * configuration, resource accounting and restriction on a parent | ||
| 664 | * cgroup cover those of its children. If %true, hierarchy support | ||
| 665 | * is broken in some ways - some subsystems ignore hierarchy | ||
| 666 | * completely while others are only implemented half-way. | ||
| 667 | * | ||
| 668 | * It's now disallowed to create nested cgroups if the subsystem is | ||
| 669 | * broken and cgroup core will emit a warning message on such | ||
| 670 | * cases. Eventually, all subsystems will be made properly | ||
| 671 | * hierarchical and this will go away. | ||
| 672 | */ | ||
| 673 | bool broken_hierarchy; | ||
| 674 | bool warned_broken_hierarchy; | ||
| 675 | |||
| 676 | /* the following two fields are initialized automtically during boot */ | ||
| 677 | int id; | ||
| 678 | #define MAX_CGROUP_TYPE_NAMELEN 32 | ||
| 679 | const char *name; | ||
| 680 | |||
| 681 | /* link to parent, protected by cgroup_lock() */ | ||
| 682 | struct cgroup_root *root; | ||
| 683 | |||
| 684 | /* idr for css->id */ | ||
| 685 | struct idr css_idr; | ||
| 686 | |||
| 687 | /* | ||
| 688 | * List of cftypes. Each entry is the first entry of an array | ||
| 689 | * terminated by zero length name. | ||
| 690 | */ | ||
| 691 | struct list_head cfts; | ||
| 692 | |||
| 693 | /* | ||
| 694 | * Base cftypes which are automatically registered. The two can | ||
| 695 | * point to the same array. | ||
| 696 | */ | ||
| 697 | struct cftype *dfl_cftypes; /* for the default hierarchy */ | ||
| 698 | struct cftype *legacy_cftypes; /* for the legacy hierarchies */ | ||
| 699 | |||
| 700 | /* | ||
| 701 | * A subsystem may depend on other subsystems. When such subsystem | ||
| 702 | * is enabled on a cgroup, the depended-upon subsystems are enabled | ||
| 703 | * together if available. Subsystems enabled due to dependency are | ||
| 704 | * not visible to userland until explicitly enabled. The following | ||
| 705 | * specifies the mask of subsystems that this one depends on. | ||
| 706 | */ | ||
| 707 | unsigned int depends_on; | ||
| 708 | }; | ||
| 709 | |||
| 710 | #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; | ||
| 711 | #include <linux/cgroup_subsys.h> | ||
| 712 | #undef SUBSYS | ||
| 713 | |||
| 714 | /** | ||
| 715 | * task_css_set_check - obtain a task's css_set with extra access conditions | ||
| 716 | * @task: the task to obtain css_set for | ||
| 717 | * @__c: extra condition expression to be passed to rcu_dereference_check() | ||
| 718 | * | ||
| 719 | * A task's css_set is RCU protected, initialized and exited while holding | ||
| 720 | * task_lock(), and can only be modified while holding both cgroup_mutex | ||
| 721 | * and task_lock() while the task is alive. This macro verifies that the | ||
| 722 | * caller is inside proper critical section and returns @task's css_set. | ||
| 723 | * | ||
| 724 | * The caller can also specify additional allowed conditions via @__c, such | ||
| 725 | * as locks used during the cgroup_subsys::attach() methods. | ||
| 726 | */ | ||
| 727 | #ifdef CONFIG_PROVE_RCU | ||
| 728 | extern struct mutex cgroup_mutex; | ||
| 729 | extern struct rw_semaphore css_set_rwsem; | ||
| 730 | #define task_css_set_check(task, __c) \ | ||
| 731 | rcu_dereference_check((task)->cgroups, \ | ||
| 732 | lockdep_is_held(&cgroup_mutex) || \ | ||
| 733 | lockdep_is_held(&css_set_rwsem) || \ | ||
| 734 | ((task)->flags & PF_EXITING) || (__c)) | ||
| 735 | #else | ||
| 736 | #define task_css_set_check(task, __c) \ | ||
| 737 | rcu_dereference((task)->cgroups) | ||
| 738 | #endif | ||
| 739 | |||
| 740 | /** | ||
| 741 | * task_css_check - obtain css for (task, subsys) w/ extra access conds | ||
| 742 | * @task: the target task | ||
| 743 | * @subsys_id: the target subsystem ID | ||
| 744 | * @__c: extra condition expression to be passed to rcu_dereference_check() | ||
| 745 | * | ||
| 746 | * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The | ||
| 747 | * synchronization rules are the same as task_css_set_check(). | ||
| 748 | */ | ||
| 749 | #define task_css_check(task, subsys_id, __c) \ | ||
| 750 | task_css_set_check((task), (__c))->subsys[(subsys_id)] | ||
| 751 | |||
| 752 | /** | ||
| 753 | * task_css_set - obtain a task's css_set | ||
| 754 | * @task: the task to obtain css_set for | ||
| 755 | * | ||
| 756 | * See task_css_set_check(). | ||
| 757 | */ | ||
| 758 | static inline struct css_set *task_css_set(struct task_struct *task) | ||
| 759 | { | ||
| 760 | return task_css_set_check(task, false); | ||
| 761 | } | ||
| 762 | |||
| 763 | /** | ||
| 764 | * task_css - obtain css for (task, subsys) | ||
| 765 | * @task: the target task | ||
| 766 | * @subsys_id: the target subsystem ID | ||
| 767 | * | ||
| 768 | * See task_css_check(). | ||
| 769 | */ | ||
| 770 | static inline struct cgroup_subsys_state *task_css(struct task_struct *task, | ||
| 771 | int subsys_id) | ||
| 772 | { | ||
| 773 | return task_css_check(task, subsys_id, false); | ||
| 774 | } | ||
| 775 | |||
| 776 | /** | ||
| 777 | * task_css_is_root - test whether a task belongs to the root css | ||
| 778 | * @task: the target task | ||
| 779 | * @subsys_id: the target subsystem ID | ||
| 780 | * | ||
| 781 | * Test whether @task belongs to the root css on the specified subsystem. | ||
| 782 | * May be invoked in any context. | ||
| 783 | */ | ||
| 784 | static inline bool task_css_is_root(struct task_struct *task, int subsys_id) | ||
| 785 | { | ||
| 786 | return task_css_check(task, subsys_id, true) == | ||
| 787 | init_css_set.subsys[subsys_id]; | ||
| 788 | } | ||
| 789 | |||
| 790 | static inline struct cgroup *task_cgroup(struct task_struct *task, | ||
| 791 | int subsys_id) | ||
| 792 | { | ||
| 793 | return task_css(task, subsys_id)->cgroup; | ||
| 794 | } | ||
| 795 | |||
| 796 | struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, | ||
| 797 | struct cgroup_subsys_state *parent); | ||
| 798 | |||
| 799 | struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); | ||
| 800 | |||
| 801 | /** | ||
| 802 | * css_for_each_child - iterate through children of a css | ||
| 803 | * @pos: the css * to use as the loop cursor | ||
| 804 | * @parent: css whose children to walk | ||
| 805 | * | ||
| 806 | * Walk @parent's children. Must be called under rcu_read_lock(). | ||
| 807 | * | ||
| 808 | * If a subsystem synchronizes ->css_online() and the start of iteration, a | ||
| 809 | * css which finished ->css_online() is guaranteed to be visible in the | ||
| 810 | * future iterations and will stay visible until the last reference is put. | ||
| 811 | * A css which hasn't finished ->css_online() or already finished | ||
| 812 | * ->css_offline() may show up during traversal. It's each subsystem's | ||
| 813 | * responsibility to synchronize against on/offlining. | ||
| 814 | * | ||
| 815 | * It is allowed to temporarily drop RCU read lock during iteration. The | ||
| 816 | * caller is responsible for ensuring that @pos remains accessible until | ||
| 817 | * the start of the next iteration by, for example, bumping the css refcnt. | ||
| 818 | */ | ||
| 819 | #define css_for_each_child(pos, parent) \ | ||
| 820 | for ((pos) = css_next_child(NULL, (parent)); (pos); \ | ||
| 821 | (pos) = css_next_child((pos), (parent))) | ||
| 822 | |||
| 823 | struct cgroup_subsys_state * | ||
| 824 | css_next_descendant_pre(struct cgroup_subsys_state *pos, | ||
| 825 | struct cgroup_subsys_state *css); | ||
| 826 | |||
| 827 | struct cgroup_subsys_state * | ||
| 828 | css_rightmost_descendant(struct cgroup_subsys_state *pos); | ||
| 829 | |||
| 830 | /** | ||
| 831 | * css_for_each_descendant_pre - pre-order walk of a css's descendants | ||
| 832 | * @pos: the css * to use as the loop cursor | ||
| 833 | * @root: css whose descendants to walk | ||
| 834 | * | ||
| 835 | * Walk @root's descendants. @root is included in the iteration and the | ||
| 836 | * first node to be visited. Must be called under rcu_read_lock(). | ||
| 837 | * | ||
| 838 | * If a subsystem synchronizes ->css_online() and the start of iteration, a | ||
| 839 | * css which finished ->css_online() is guaranteed to be visible in the | ||
| 840 | * future iterations and will stay visible until the last reference is put. | ||
| 841 | * A css which hasn't finished ->css_online() or already finished | ||
| 842 | * ->css_offline() may show up during traversal. It's each subsystem's | ||
| 843 | * responsibility to synchronize against on/offlining. | ||
| 844 | * | ||
| 845 | * For example, the following guarantees that a descendant can't escape | ||
| 846 | * state updates of its ancestors. | ||
| 847 | * | ||
| 848 | * my_online(@css) | ||
| 849 | * { | ||
| 850 | * Lock @css's parent and @css; | ||
| 851 | * Inherit state from the parent; | ||
| 852 | * Unlock both. | ||
| 853 | * } | ||
| 854 | * | ||
| 855 | * my_update_state(@css) | ||
| 856 | * { | ||
| 857 | * css_for_each_descendant_pre(@pos, @css) { | ||
| 858 | * Lock @pos; | ||
| 859 | * if (@pos == @css) | ||
| 860 | * Update @css's state; | ||
| 861 | * else | ||
| 862 | * Verify @pos is alive and inherit state from its parent; | ||
| 863 | * Unlock @pos; | ||
| 864 | * } | ||
| 865 | * } | ||
| 866 | * | ||
| 867 | * As long as the inheriting step, including checking the parent state, is | ||
| 868 | * enclosed inside @pos locking, double-locking the parent isn't necessary | ||
| 869 | * while inheriting. The state update to the parent is guaranteed to be | ||
| 870 | * visible by walking order and, as long as inheriting operations to the | ||
| 871 | * same @pos are atomic to each other, multiple updates racing each other | ||
| 872 | * still result in the correct state. It's guaranateed that at least one | ||
| 873 | * inheritance happens for any css after the latest update to its parent. | ||
| 874 | * | ||
| 875 | * If checking parent's state requires locking the parent, each inheriting | ||
| 876 | * iteration should lock and unlock both @pos->parent and @pos. | ||
| 877 | * | ||
| 878 | * Alternatively, a subsystem may choose to use a single global lock to | ||
| 879 | * synchronize ->css_online() and ->css_offline() against tree-walking | ||
| 880 | * operations. | ||
| 881 | * | ||
| 882 | * It is allowed to temporarily drop RCU read lock during iteration. The | ||
| 883 | * caller is responsible for ensuring that @pos remains accessible until | ||
| 884 | * the start of the next iteration by, for example, bumping the css refcnt. | ||
| 885 | */ | ||
| 886 | #define css_for_each_descendant_pre(pos, css) \ | ||
| 887 | for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \ | ||
| 888 | (pos) = css_next_descendant_pre((pos), (css))) | ||
| 889 | |||
| 890 | struct cgroup_subsys_state * | ||
| 891 | css_next_descendant_post(struct cgroup_subsys_state *pos, | ||
| 892 | struct cgroup_subsys_state *css); | ||
| 893 | |||
| 894 | /** | ||
| 895 | * css_for_each_descendant_post - post-order walk of a css's descendants | ||
| 896 | * @pos: the css * to use as the loop cursor | ||
| 897 | * @css: css whose descendants to walk | ||
| 898 | * | ||
| 899 | * Similar to css_for_each_descendant_pre() but performs post-order | ||
| 900 | * traversal instead. @root is included in the iteration and the last | ||
| 901 | * node to be visited. | ||
| 902 | * | ||
| 903 | * If a subsystem synchronizes ->css_online() and the start of iteration, a | ||
| 904 | * css which finished ->css_online() is guaranteed to be visible in the | ||
| 905 | * future iterations and will stay visible until the last reference is put. | ||
| 906 | * A css which hasn't finished ->css_online() or already finished | ||
| 907 | * ->css_offline() may show up during traversal. It's each subsystem's | ||
| 908 | * responsibility to synchronize against on/offlining. | ||
| 909 | * | ||
| 910 | * Note that the walk visibility guarantee example described in pre-order | ||
| 911 | * walk doesn't apply the same to post-order walks. | ||
| 912 | */ | ||
| 913 | #define css_for_each_descendant_post(pos, css) \ | ||
| 914 | for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ | ||
| 915 | (pos) = css_next_descendant_post((pos), (css))) | ||
| 916 | |||
| 917 | bool css_has_online_children(struct cgroup_subsys_state *css); | ||
| 918 | |||
| 919 | /* A css_task_iter should be treated as an opaque object */ | ||
| 920 | struct css_task_iter { | ||
| 921 | struct cgroup_subsys *ss; | ||
| 922 | |||
| 923 | struct list_head *cset_pos; | ||
| 924 | struct list_head *cset_head; | ||
| 925 | |||
| 926 | struct list_head *task_pos; | ||
| 927 | struct list_head *tasks_head; | ||
| 928 | struct list_head *mg_tasks_head; | ||
| 929 | }; | ||
| 930 | |||
| 931 | void css_task_iter_start(struct cgroup_subsys_state *css, | ||
| 932 | struct css_task_iter *it); | ||
| 933 | struct task_struct *css_task_iter_next(struct css_task_iter *it); | ||
| 934 | void css_task_iter_end(struct css_task_iter *it); | ||
| 935 | |||
| 936 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); | ||
| 937 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); | ||
| 938 | |||
| 939 | struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, | ||
| 940 | struct cgroup_subsys *ss); | ||
| 941 | struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, | ||
| 942 | struct cgroup_subsys *ss); | ||
| 943 | |||
| 944 | #else /* !CONFIG_CGROUPS */ | 516 | #else /* !CONFIG_CGROUPS */ |
| 945 | 517 | ||
| 946 | struct cgroup_subsys_state; | 518 | struct cgroup_subsys_state; |
| 947 | 519 | ||
| 948 | static inline int cgroup_init_early(void) { return 0; } | 520 | static inline void css_put(struct cgroup_subsys_state *css) {} |
| 949 | static inline int cgroup_init(void) { return 0; } | 521 | static inline int cgroup_attach_task_all(struct task_struct *from, |
| 522 | struct task_struct *t) { return 0; } | ||
| 523 | static inline int cgroupstats_build(struct cgroupstats *stats, | ||
| 524 | struct dentry *dentry) { return -EINVAL; } | ||
| 525 | |||
| 950 | static inline void cgroup_fork(struct task_struct *p) {} | 526 | static inline void cgroup_fork(struct task_struct *p) {} |
| 951 | static inline void cgroup_post_fork(struct task_struct *p) {} | 527 | static inline void cgroup_post_fork(struct task_struct *p) {} |
| 952 | static inline void cgroup_exit(struct task_struct *p) {} | 528 | static inline void cgroup_exit(struct task_struct *p) {} |
| 953 | 529 | ||
| 954 | static inline int cgroupstats_build(struct cgroupstats *stats, | 530 | static inline int cgroup_init_early(void) { return 0; } |
| 955 | struct dentry *dentry) | 531 | static inline int cgroup_init(void) { return 0; } |
| 956 | { | ||
| 957 | return -EINVAL; | ||
| 958 | } | ||
| 959 | |||
| 960 | static inline void css_put(struct cgroup_subsys_state *css) {} | ||
| 961 | |||
| 962 | /* No cgroups - nothing to do */ | ||
| 963 | static inline int cgroup_attach_task_all(struct task_struct *from, | ||
| 964 | struct task_struct *t) | ||
| 965 | { | ||
| 966 | return 0; | ||
| 967 | } | ||
| 968 | 532 | ||
| 969 | #endif /* !CONFIG_CGROUPS */ | 533 | #endif /* !CONFIG_CGROUPS */ |
| 970 | 534 | ||
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index df695313f975..78842f46f152 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */ | 31 | #define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */ |
| 32 | #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ | 32 | #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ |
| 33 | #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ | 33 | #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ |
| 34 | #define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */ | ||
| 34 | 35 | ||
| 35 | struct clk_hw; | 36 | struct clk_hw; |
| 36 | struct clk_core; | 37 | struct clk_core; |
| @@ -209,7 +210,7 @@ struct clk_ops { | |||
| 209 | struct clk_init_data { | 210 | struct clk_init_data { |
| 210 | const char *name; | 211 | const char *name; |
| 211 | const struct clk_ops *ops; | 212 | const struct clk_ops *ops; |
| 212 | const char **parent_names; | 213 | const char * const *parent_names; |
| 213 | u8 num_parents; | 214 | u8 num_parents; |
| 214 | unsigned long flags; | 215 | unsigned long flags; |
| 215 | }; | 216 | }; |
| @@ -426,12 +427,14 @@ extern const struct clk_ops clk_mux_ops; | |||
| 426 | extern const struct clk_ops clk_mux_ro_ops; | 427 | extern const struct clk_ops clk_mux_ro_ops; |
| 427 | 428 | ||
| 428 | struct clk *clk_register_mux(struct device *dev, const char *name, | 429 | struct clk *clk_register_mux(struct device *dev, const char *name, |
| 429 | const char **parent_names, u8 num_parents, unsigned long flags, | 430 | const char * const *parent_names, u8 num_parents, |
| 431 | unsigned long flags, | ||
| 430 | void __iomem *reg, u8 shift, u8 width, | 432 | void __iomem *reg, u8 shift, u8 width, |
| 431 | u8 clk_mux_flags, spinlock_t *lock); | 433 | u8 clk_mux_flags, spinlock_t *lock); |
| 432 | 434 | ||
| 433 | struct clk *clk_register_mux_table(struct device *dev, const char *name, | 435 | struct clk *clk_register_mux_table(struct device *dev, const char *name, |
| 434 | const char **parent_names, u8 num_parents, unsigned long flags, | 436 | const char * const *parent_names, u8 num_parents, |
| 437 | unsigned long flags, | ||
| 435 | void __iomem *reg, u8 shift, u32 mask, | 438 | void __iomem *reg, u8 shift, u32 mask, |
| 436 | u8 clk_mux_flags, u32 *table, spinlock_t *lock); | 439 | u8 clk_mux_flags, u32 *table, spinlock_t *lock); |
| 437 | 440 | ||
| @@ -457,7 +460,7 @@ struct clk_fixed_factor { | |||
| 457 | unsigned int div; | 460 | unsigned int div; |
| 458 | }; | 461 | }; |
| 459 | 462 | ||
| 460 | extern struct clk_ops clk_fixed_factor_ops; | 463 | extern const struct clk_ops clk_fixed_factor_ops; |
| 461 | struct clk *clk_register_fixed_factor(struct device *dev, const char *name, | 464 | struct clk *clk_register_fixed_factor(struct device *dev, const char *name, |
| 462 | const char *parent_name, unsigned long flags, | 465 | const char *parent_name, unsigned long flags, |
| 463 | unsigned int mult, unsigned int div); | 466 | unsigned int mult, unsigned int div); |
| @@ -518,7 +521,7 @@ struct clk_composite { | |||
| 518 | }; | 521 | }; |
| 519 | 522 | ||
| 520 | struct clk *clk_register_composite(struct device *dev, const char *name, | 523 | struct clk *clk_register_composite(struct device *dev, const char *name, |
| 521 | const char **parent_names, int num_parents, | 524 | const char * const *parent_names, int num_parents, |
| 522 | struct clk_hw *mux_hw, const struct clk_ops *mux_ops, | 525 | struct clk_hw *mux_hw, const struct clk_ops *mux_ops, |
| 523 | struct clk_hw *rate_hw, const struct clk_ops *rate_ops, | 526 | struct clk_hw *rate_hw, const struct clk_ops *rate_ops, |
| 524 | struct clk_hw *gate_hw, const struct clk_ops *gate_ops, | 527 | struct clk_hw *gate_hw, const struct clk_ops *gate_ops, |
| @@ -589,6 +592,7 @@ long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate, | |||
| 589 | unsigned long max_rate, | 592 | unsigned long max_rate, |
| 590 | unsigned long *best_parent_rate, | 593 | unsigned long *best_parent_rate, |
| 591 | struct clk_hw **best_parent_p); | 594 | struct clk_hw **best_parent_p); |
| 595 | void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent); | ||
| 592 | 596 | ||
| 593 | static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src) | 597 | static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src) |
| 594 | { | 598 | { |
| @@ -624,6 +628,8 @@ struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, | |||
| 624 | void *data); | 628 | void *data); |
| 625 | struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data); | 629 | struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data); |
| 626 | int of_clk_get_parent_count(struct device_node *np); | 630 | int of_clk_get_parent_count(struct device_node *np); |
| 631 | int of_clk_parent_fill(struct device_node *np, const char **parents, | ||
| 632 | unsigned int size); | ||
| 627 | const char *of_clk_get_parent_name(struct device_node *np, int index); | 633 | const char *of_clk_get_parent_name(struct device_node *np, int index); |
| 628 | 634 | ||
| 629 | void of_clk_init(const struct of_device_id *matches); | 635 | void of_clk_init(const struct of_device_id *matches); |
diff --git a/include/linux/clk.h b/include/linux/clk.h index 68c16a6bedb3..0df4a51e1a78 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h | |||
| @@ -306,6 +306,20 @@ void devm_clk_put(struct device *dev, struct clk *clk); | |||
| 306 | * @clk: clock source | 306 | * @clk: clock source |
| 307 | * @rate: desired clock rate in Hz | 307 | * @rate: desired clock rate in Hz |
| 308 | * | 308 | * |
| 309 | * This answers the question "if I were to pass @rate to clk_set_rate(), | ||
| 310 | * what clock rate would I end up with?" without changing the hardware | ||
| 311 | * in any way. In other words: | ||
| 312 | * | ||
| 313 | * rate = clk_round_rate(clk, r); | ||
| 314 | * | ||
| 315 | * and: | ||
| 316 | * | ||
| 317 | * clk_set_rate(clk, r); | ||
| 318 | * rate = clk_get_rate(clk); | ||
| 319 | * | ||
| 320 | * are equivalent except the former does not modify the clock hardware | ||
| 321 | * in any way. | ||
| 322 | * | ||
| 309 | * Returns rounded clock rate in Hz, or negative errno. | 323 | * Returns rounded clock rate in Hz, or negative errno. |
| 310 | */ | 324 | */ |
| 311 | long clk_round_rate(struct clk *clk, unsigned long rate); | 325 | long clk_round_rate(struct clk *clk, unsigned long rate); |
| @@ -471,19 +485,6 @@ static inline void clk_disable_unprepare(struct clk *clk) | |||
| 471 | clk_unprepare(clk); | 485 | clk_unprepare(clk); |
| 472 | } | 486 | } |
| 473 | 487 | ||
| 474 | /** | ||
| 475 | * clk_add_alias - add a new clock alias | ||
| 476 | * @alias: name for clock alias | ||
| 477 | * @alias_dev_name: device name | ||
| 478 | * @id: platform specific clock name | ||
| 479 | * @dev: device | ||
| 480 | * | ||
| 481 | * Allows using generic clock names for drivers by adding a new alias. | ||
| 482 | * Assumes clkdev, see clkdev.h for more info. | ||
| 483 | */ | ||
| 484 | int clk_add_alias(const char *alias, const char *alias_dev_name, char *id, | ||
| 485 | struct device *dev); | ||
| 486 | |||
| 487 | struct device_node; | 488 | struct device_node; |
| 488 | struct of_phandle_args; | 489 | struct of_phandle_args; |
| 489 | 490 | ||
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h index 94bad77eeb4a..08bffcc466de 100644 --- a/include/linux/clkdev.h +++ b/include/linux/clkdev.h | |||
| @@ -22,6 +22,7 @@ struct clk_lookup { | |||
| 22 | const char *dev_id; | 22 | const char *dev_id; |
| 23 | const char *con_id; | 23 | const char *con_id; |
| 24 | struct clk *clk; | 24 | struct clk *clk; |
| 25 | struct clk_hw *clk_hw; | ||
| 25 | }; | 26 | }; |
| 26 | 27 | ||
| 27 | #define CLKDEV_INIT(d, n, c) \ | 28 | #define CLKDEV_INIT(d, n, c) \ |
| @@ -32,15 +33,19 @@ struct clk_lookup { | |||
| 32 | } | 33 | } |
| 33 | 34 | ||
| 34 | struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, | 35 | struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, |
| 35 | const char *dev_fmt, ...); | 36 | const char *dev_fmt, ...) __printf(3, 4); |
| 36 | 37 | ||
| 37 | void clkdev_add(struct clk_lookup *cl); | 38 | void clkdev_add(struct clk_lookup *cl); |
| 38 | void clkdev_drop(struct clk_lookup *cl); | 39 | void clkdev_drop(struct clk_lookup *cl); |
| 39 | 40 | ||
| 41 | struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id, | ||
| 42 | const char *dev_fmt, ...) __printf(3, 4); | ||
| 43 | |||
| 40 | void clkdev_add_table(struct clk_lookup *, size_t); | 44 | void clkdev_add_table(struct clk_lookup *, size_t); |
| 41 | int clk_add_alias(const char *, const char *, char *, struct device *); | 45 | int clk_add_alias(const char *, const char *, const char *, struct device *); |
| 42 | 46 | ||
| 43 | int clk_register_clkdev(struct clk *, const char *, const char *, ...); | 47 | int clk_register_clkdev(struct clk *, const char *, const char *, ...) |
| 48 | __printf(3, 4); | ||
| 44 | int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t); | 49 | int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t); |
| 45 | 50 | ||
| 46 | #ifdef CONFIG_COMMON_CLK | 51 | #ifdef CONFIG_COMMON_CLK |
diff --git a/include/linux/compat.h b/include/linux/compat.h index ab25814690bc..a76c9172b2eb 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
| @@ -424,7 +424,7 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, | |||
| 424 | 424 | ||
| 425 | asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); | 425 | asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); |
| 426 | 426 | ||
| 427 | extern int compat_printk(const char *fmt, ...); | 427 | extern __printf(1, 2) int compat_printk(const char *fmt, ...); |
| 428 | extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat); | 428 | extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat); |
| 429 | extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set); | 429 | extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set); |
| 430 | 430 | ||
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 371e560d13cf..dfaa7b3e9ae9 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
| @@ -5,9 +5,9 @@ | |||
| 5 | /* | 5 | /* |
| 6 | * Common definitions for all gcc versions go here. | 6 | * Common definitions for all gcc versions go here. |
| 7 | */ | 7 | */ |
| 8 | #define GCC_VERSION (__GNUC__ * 10000 \ | 8 | #define GCC_VERSION (__GNUC__ * 10000 \ |
| 9 | + __GNUC_MINOR__ * 100 \ | 9 | + __GNUC_MINOR__ * 100 \ |
| 10 | + __GNUC_PATCHLEVEL__) | 10 | + __GNUC_PATCHLEVEL__) |
| 11 | 11 | ||
| 12 | /* Optimization barrier */ | 12 | /* Optimization barrier */ |
| 13 | 13 | ||
| @@ -46,55 +46,63 @@ | |||
| 46 | * the inline assembly constraint from =g to =r, in this particular | 46 | * the inline assembly constraint from =g to =r, in this particular |
| 47 | * case either is valid. | 47 | * case either is valid. |
| 48 | */ | 48 | */ |
| 49 | #define RELOC_HIDE(ptr, off) \ | 49 | #define RELOC_HIDE(ptr, off) \ |
| 50 | ({ unsigned long __ptr; \ | 50 | ({ \ |
| 51 | __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ | 51 | unsigned long __ptr; \ |
| 52 | (typeof(ptr)) (__ptr + (off)); }) | 52 | __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ |
| 53 | (typeof(ptr)) (__ptr + (off)); \ | ||
| 54 | }) | ||
| 53 | 55 | ||
| 54 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ | 56 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ |
| 55 | #define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) | 57 | #define OPTIMIZER_HIDE_VAR(var) \ |
| 58 | __asm__ ("" : "=r" (var) : "0" (var)) | ||
| 56 | 59 | ||
| 57 | #ifdef __CHECKER__ | 60 | #ifdef __CHECKER__ |
| 58 | #define __must_be_array(arr) 0 | 61 | #define __must_be_array(a) 0 |
| 59 | #else | 62 | #else |
| 60 | /* &a[0] degrades to a pointer: a different type from an array */ | 63 | /* &a[0] degrades to a pointer: a different type from an array */ |
| 61 | #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) | 64 | #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) |
| 62 | #endif | 65 | #endif |
| 63 | 66 | ||
| 64 | /* | 67 | /* |
| 65 | * Force always-inline if the user requests it so via the .config, | 68 | * Force always-inline if the user requests it so via the .config, |
| 66 | * or if gcc is too old: | 69 | * or if gcc is too old: |
| 67 | */ | 70 | */ |
| 68 | #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ | 71 | #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ |
| 69 | !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) | 72 | !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) |
| 70 | # define inline inline __attribute__((always_inline)) notrace | 73 | #define inline inline __attribute__((always_inline)) notrace |
| 71 | # define __inline__ __inline__ __attribute__((always_inline)) notrace | 74 | #define __inline__ __inline__ __attribute__((always_inline)) notrace |
| 72 | # define __inline __inline __attribute__((always_inline)) notrace | 75 | #define __inline __inline __attribute__((always_inline)) notrace |
| 73 | #else | 76 | #else |
| 74 | /* A lot of inline functions can cause havoc with function tracing */ | 77 | /* A lot of inline functions can cause havoc with function tracing */ |
| 75 | # define inline inline notrace | 78 | #define inline inline notrace |
| 76 | # define __inline__ __inline__ notrace | 79 | #define __inline__ __inline__ notrace |
| 77 | # define __inline __inline notrace | 80 | #define __inline __inline notrace |
| 78 | #endif | 81 | #endif |
| 79 | 82 | ||
| 80 | #define __deprecated __attribute__((deprecated)) | 83 | #define __always_inline inline __attribute__((always_inline)) |
| 81 | #define __packed __attribute__((packed)) | 84 | #define noinline __attribute__((noinline)) |
| 82 | #define __weak __attribute__((weak)) | 85 | |
| 83 | #define __alias(symbol) __attribute__((alias(#symbol))) | 86 | #define __deprecated __attribute__((deprecated)) |
| 87 | #define __packed __attribute__((packed)) | ||
| 88 | #define __weak __attribute__((weak)) | ||
| 89 | #define __alias(symbol) __attribute__((alias(#symbol))) | ||
| 84 | 90 | ||
| 85 | /* | 91 | /* |
| 86 | * it doesn't make sense on ARM (currently the only user of __naked) to trace | 92 | * it doesn't make sense on ARM (currently the only user of __naked) |
| 87 | * naked functions because then mcount is called without stack and frame pointer | 93 | * to trace naked functions because then mcount is called without |
| 88 | * being set up and there is no chance to restore the lr register to the value | 94 | * stack and frame pointer being set up and there is no chance to |
| 89 | * before mcount was called. | 95 | * restore the lr register to the value before mcount was called. |
| 96 | * | ||
| 97 | * The asm() bodies of naked functions often depend on standard calling | ||
| 98 | * conventions, therefore they must be noinline and noclone. | ||
| 90 | * | 99 | * |
| 91 | * The asm() bodies of naked functions often depend on standard calling conventions, | 100 | * GCC 4.[56] currently fail to enforce this, so we must do so ourselves. |
| 92 | * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce | 101 | * See GCC PR44290. |
| 93 | * this, so we must do so ourselves. See GCC PR44290. | ||
| 94 | */ | 102 | */ |
| 95 | #define __naked __attribute__((naked)) noinline __noclone notrace | 103 | #define __naked __attribute__((naked)) noinline __noclone notrace |
| 96 | 104 | ||
| 97 | #define __noreturn __attribute__((noreturn)) | 105 | #define __noreturn __attribute__((noreturn)) |
| 98 | 106 | ||
| 99 | /* | 107 | /* |
| 100 | * From the GCC manual: | 108 | * From the GCC manual: |
| @@ -106,19 +114,130 @@ | |||
| 106 | * would be. | 114 | * would be. |
| 107 | * [...] | 115 | * [...] |
| 108 | */ | 116 | */ |
| 109 | #define __pure __attribute__((pure)) | 117 | #define __pure __attribute__((pure)) |
| 110 | #define __aligned(x) __attribute__((aligned(x))) | 118 | #define __aligned(x) __attribute__((aligned(x))) |
| 111 | #define __printf(a, b) __attribute__((format(printf, a, b))) | 119 | #define __printf(a, b) __attribute__((format(printf, a, b))) |
| 112 | #define __scanf(a, b) __attribute__((format(scanf, a, b))) | 120 | #define __scanf(a, b) __attribute__((format(scanf, a, b))) |
| 113 | #define noinline __attribute__((noinline)) | 121 | #define __attribute_const__ __attribute__((__const__)) |
| 114 | #define __attribute_const__ __attribute__((__const__)) | 122 | #define __maybe_unused __attribute__((unused)) |
| 115 | #define __maybe_unused __attribute__((unused)) | 123 | #define __always_unused __attribute__((unused)) |
| 116 | #define __always_unused __attribute__((unused)) | 124 | |
| 117 | 125 | /* gcc version specific checks */ | |
| 118 | #define __gcc_header(x) #x | 126 | |
| 119 | #define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) | 127 | #if GCC_VERSION < 30200 |
| 120 | #define gcc_header(x) _gcc_header(x) | 128 | # error Sorry, your compiler is too old - please upgrade it. |
| 121 | #include gcc_header(__GNUC__) | 129 | #endif |
| 130 | |||
| 131 | #if GCC_VERSION < 30300 | ||
| 132 | # define __used __attribute__((__unused__)) | ||
| 133 | #else | ||
| 134 | # define __used __attribute__((__used__)) | ||
| 135 | #endif | ||
| 136 | |||
| 137 | #ifdef CONFIG_GCOV_KERNEL | ||
| 138 | # if GCC_VERSION < 30400 | ||
| 139 | # error "GCOV profiling support for gcc versions below 3.4 not included" | ||
| 140 | # endif /* __GNUC_MINOR__ */ | ||
| 141 | #endif /* CONFIG_GCOV_KERNEL */ | ||
| 142 | |||
| 143 | #if GCC_VERSION >= 30400 | ||
| 144 | #define __must_check __attribute__((warn_unused_result)) | ||
| 145 | #endif | ||
| 146 | |||
| 147 | #if GCC_VERSION >= 40000 | ||
| 148 | |||
| 149 | /* GCC 4.1.[01] miscompiles __weak */ | ||
| 150 | #ifdef __KERNEL__ | ||
| 151 | # if GCC_VERSION >= 40100 && GCC_VERSION <= 40101 | ||
| 152 | # error Your version of gcc miscompiles the __weak directive | ||
| 153 | # endif | ||
| 154 | #endif | ||
| 155 | |||
| 156 | #define __used __attribute__((__used__)) | ||
| 157 | #define __compiler_offsetof(a, b) \ | ||
| 158 | __builtin_offsetof(a, b) | ||
| 159 | |||
| 160 | #if GCC_VERSION >= 40100 && GCC_VERSION < 40600 | ||
| 161 | # define __compiletime_object_size(obj) __builtin_object_size(obj, 0) | ||
| 162 | #endif | ||
| 163 | |||
| 164 | #if GCC_VERSION >= 40300 | ||
| 165 | /* Mark functions as cold. gcc will assume any path leading to a call | ||
| 166 | * to them will be unlikely. This means a lot of manual unlikely()s | ||
| 167 | * are unnecessary now for any paths leading to the usual suspects | ||
| 168 | * like BUG(), printk(), panic() etc. [but let's keep them for now for | ||
| 169 | * older compilers] | ||
| 170 | * | ||
| 171 | * Early snapshots of gcc 4.3 don't support this and we can't detect this | ||
| 172 | * in the preprocessor, but we can live with this because they're unreleased. | ||
| 173 | * Maketime probing would be overkill here. | ||
| 174 | * | ||
| 175 | * gcc also has a __attribute__((__hot__)) to move hot functions into | ||
| 176 | * a special section, but I don't see any sense in this right now in | ||
| 177 | * the kernel context | ||
| 178 | */ | ||
| 179 | #define __cold __attribute__((__cold__)) | ||
| 180 | |||
| 181 | #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) | ||
| 182 | |||
| 183 | #ifndef __CHECKER__ | ||
| 184 | # define __compiletime_warning(message) __attribute__((warning(message))) | ||
| 185 | # define __compiletime_error(message) __attribute__((error(message))) | ||
| 186 | #endif /* __CHECKER__ */ | ||
| 187 | #endif /* GCC_VERSION >= 40300 */ | ||
| 188 | |||
| 189 | #if GCC_VERSION >= 40500 | ||
| 190 | /* | ||
| 191 | * Mark a position in code as unreachable. This can be used to | ||
| 192 | * suppress control flow warnings after asm blocks that transfer | ||
| 193 | * control elsewhere. | ||
| 194 | * | ||
| 195 | * Early snapshots of gcc 4.5 don't support this and we can't detect | ||
| 196 | * this in the preprocessor, but we can live with this because they're | ||
| 197 | * unreleased. Really, we need to have autoconf for the kernel. | ||
| 198 | */ | ||
| 199 | #define unreachable() __builtin_unreachable() | ||
| 200 | |||
| 201 | /* Mark a function definition as prohibited from being cloned. */ | ||
| 202 | #define __noclone __attribute__((__noclone__)) | ||
| 203 | |||
| 204 | #endif /* GCC_VERSION >= 40500 */ | ||
| 205 | |||
| 206 | #if GCC_VERSION >= 40600 | ||
| 207 | /* | ||
| 208 | * Tell the optimizer that something else uses this function or variable. | ||
| 209 | */ | ||
| 210 | #define __visible __attribute__((externally_visible)) | ||
| 211 | #endif | ||
| 212 | |||
| 213 | /* | ||
| 214 | * GCC 'asm goto' miscompiles certain code sequences: | ||
| 215 | * | ||
| 216 | * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 | ||
| 217 | * | ||
| 218 | * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. | ||
| 219 | * | ||
| 220 | * (asm goto is automatically volatile - the naming reflects this.) | ||
| 221 | */ | ||
| 222 | #define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) | ||
| 223 | |||
| 224 | #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP | ||
| 225 | #if GCC_VERSION >= 40400 | ||
| 226 | #define __HAVE_BUILTIN_BSWAP32__ | ||
| 227 | #define __HAVE_BUILTIN_BSWAP64__ | ||
| 228 | #endif | ||
| 229 | #if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600) | ||
| 230 | #define __HAVE_BUILTIN_BSWAP16__ | ||
| 231 | #endif | ||
| 232 | #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ | ||
| 233 | |||
| 234 | #if GCC_VERSION >= 50000 | ||
| 235 | #define KASAN_ABI_VERSION 4 | ||
| 236 | #elif GCC_VERSION >= 40902 | ||
| 237 | #define KASAN_ABI_VERSION 3 | ||
| 238 | #endif | ||
| 239 | |||
| 240 | #endif /* gcc version >= 40000 specific checks */ | ||
| 122 | 241 | ||
| 123 | #if !defined(__noclone) | 242 | #if !defined(__noclone) |
| 124 | #define __noclone /* not needed */ | 243 | #define __noclone /* not needed */ |
| @@ -129,5 +248,3 @@ | |||
| 129 | * code | 248 | * code |
| 130 | */ | 249 | */ |
| 131 | #define uninitialized_var(x) x = x | 250 | #define uninitialized_var(x) x = x |
| 132 | |||
| 133 | #define __always_inline inline __attribute__((always_inline)) | ||
diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h deleted file mode 100644 index 7d89febe4d79..000000000000 --- a/include/linux/compiler-gcc3.h +++ /dev/null | |||
| @@ -1,23 +0,0 @@ | |||
| 1 | #ifndef __LINUX_COMPILER_H | ||
| 2 | #error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead." | ||
| 3 | #endif | ||
| 4 | |||
| 5 | #if GCC_VERSION < 30200 | ||
| 6 | # error Sorry, your compiler is too old - please upgrade it. | ||
| 7 | #endif | ||
| 8 | |||
| 9 | #if GCC_VERSION >= 30300 | ||
| 10 | # define __used __attribute__((__used__)) | ||
| 11 | #else | ||
| 12 | # define __used __attribute__((__unused__)) | ||
| 13 | #endif | ||
| 14 | |||
| 15 | #if GCC_VERSION >= 30400 | ||
| 16 | #define __must_check __attribute__((warn_unused_result)) | ||
| 17 | #endif | ||
| 18 | |||
| 19 | #ifdef CONFIG_GCOV_KERNEL | ||
| 20 | # if GCC_VERSION < 30400 | ||
| 21 | # error "GCOV profiling support for gcc versions below 3.4 not included" | ||
| 22 | # endif /* __GNUC_MINOR__ */ | ||
| 23 | #endif /* CONFIG_GCOV_KERNEL */ | ||
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h deleted file mode 100644 index 769e19864632..000000000000 --- a/include/linux/compiler-gcc4.h +++ /dev/null | |||
| @@ -1,91 +0,0 @@ | |||
| 1 | #ifndef __LINUX_COMPILER_H | ||
| 2 | #error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead." | ||
| 3 | #endif | ||
| 4 | |||
| 5 | /* GCC 4.1.[01] miscompiles __weak */ | ||
| 6 | #ifdef __KERNEL__ | ||
| 7 | # if GCC_VERSION >= 40100 && GCC_VERSION <= 40101 | ||
| 8 | # error Your version of gcc miscompiles the __weak directive | ||
| 9 | # endif | ||
| 10 | #endif | ||
| 11 | |||
| 12 | #define __used __attribute__((__used__)) | ||
| 13 | #define __must_check __attribute__((warn_unused_result)) | ||
| 14 | #define __compiler_offsetof(a,b) __builtin_offsetof(a,b) | ||
| 15 | |||
| 16 | #if GCC_VERSION >= 40100 && GCC_VERSION < 40600 | ||
| 17 | # define __compiletime_object_size(obj) __builtin_object_size(obj, 0) | ||
| 18 | #endif | ||
| 19 | |||
| 20 | #if GCC_VERSION >= 40300 | ||
| 21 | /* Mark functions as cold. gcc will assume any path leading to a call | ||
| 22 | to them will be unlikely. This means a lot of manual unlikely()s | ||
| 23 | are unnecessary now for any paths leading to the usual suspects | ||
| 24 | like BUG(), printk(), panic() etc. [but let's keep them for now for | ||
| 25 | older compilers] | ||
| 26 | |||
| 27 | Early snapshots of gcc 4.3 don't support this and we can't detect this | ||
| 28 | in the preprocessor, but we can live with this because they're unreleased. | ||
| 29 | Maketime probing would be overkill here. | ||
| 30 | |||
| 31 | gcc also has a __attribute__((__hot__)) to move hot functions into | ||
| 32 | a special section, but I don't see any sense in this right now in | ||
| 33 | the kernel context */ | ||
| 34 | #define __cold __attribute__((__cold__)) | ||
| 35 | |||
| 36 | #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) | ||
| 37 | |||
| 38 | #ifndef __CHECKER__ | ||
| 39 | # define __compiletime_warning(message) __attribute__((warning(message))) | ||
| 40 | # define __compiletime_error(message) __attribute__((error(message))) | ||
| 41 | #endif /* __CHECKER__ */ | ||
| 42 | #endif /* GCC_VERSION >= 40300 */ | ||
| 43 | |||
| 44 | #if GCC_VERSION >= 40500 | ||
| 45 | /* | ||
| 46 | * Mark a position in code as unreachable. This can be used to | ||
| 47 | * suppress control flow warnings after asm blocks that transfer | ||
| 48 | * control elsewhere. | ||
| 49 | * | ||
| 50 | * Early snapshots of gcc 4.5 don't support this and we can't detect | ||
| 51 | * this in the preprocessor, but we can live with this because they're | ||
| 52 | * unreleased. Really, we need to have autoconf for the kernel. | ||
| 53 | */ | ||
| 54 | #define unreachable() __builtin_unreachable() | ||
| 55 | |||
| 56 | /* Mark a function definition as prohibited from being cloned. */ | ||
| 57 | #define __noclone __attribute__((__noclone__)) | ||
| 58 | |||
| 59 | #endif /* GCC_VERSION >= 40500 */ | ||
| 60 | |||
| 61 | #if GCC_VERSION >= 40600 | ||
| 62 | /* | ||
| 63 | * Tell the optimizer that something else uses this function or variable. | ||
| 64 | */ | ||
| 65 | #define __visible __attribute__((externally_visible)) | ||
| 66 | #endif | ||
| 67 | |||
| 68 | /* | ||
| 69 | * GCC 'asm goto' miscompiles certain code sequences: | ||
| 70 | * | ||
| 71 | * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 | ||
| 72 | * | ||
| 73 | * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. | ||
| 74 | * | ||
| 75 | * (asm goto is automatically volatile - the naming reflects this.) | ||
| 76 | */ | ||
| 77 | #define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) | ||
| 78 | |||
| 79 | #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP | ||
| 80 | #if GCC_VERSION >= 40400 | ||
| 81 | #define __HAVE_BUILTIN_BSWAP32__ | ||
| 82 | #define __HAVE_BUILTIN_BSWAP64__ | ||
| 83 | #endif | ||
| 84 | #if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600) | ||
| 85 | #define __HAVE_BUILTIN_BSWAP16__ | ||
| 86 | #endif | ||
| 87 | #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ | ||
| 88 | |||
| 89 | #if GCC_VERSION >= 40902 | ||
| 90 | #define KASAN_ABI_VERSION 3 | ||
| 91 | #endif | ||
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h deleted file mode 100644 index efee493714eb..000000000000 --- a/include/linux/compiler-gcc5.h +++ /dev/null | |||
| @@ -1,67 +0,0 @@ | |||
| 1 | #ifndef __LINUX_COMPILER_H | ||
| 2 | #error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead." | ||
| 3 | #endif | ||
| 4 | |||
| 5 | #define __used __attribute__((__used__)) | ||
| 6 | #define __must_check __attribute__((warn_unused_result)) | ||
| 7 | #define __compiler_offsetof(a, b) __builtin_offsetof(a, b) | ||
| 8 | |||
| 9 | /* Mark functions as cold. gcc will assume any path leading to a call | ||
| 10 | to them will be unlikely. This means a lot of manual unlikely()s | ||
| 11 | are unnecessary now for any paths leading to the usual suspects | ||
| 12 | like BUG(), printk(), panic() etc. [but let's keep them for now for | ||
| 13 | older compilers] | ||
| 14 | |||
| 15 | Early snapshots of gcc 4.3 don't support this and we can't detect this | ||
| 16 | in the preprocessor, but we can live with this because they're unreleased. | ||
| 17 | Maketime probing would be overkill here. | ||
| 18 | |||
| 19 | gcc also has a __attribute__((__hot__)) to move hot functions into | ||
| 20 | a special section, but I don't see any sense in this right now in | ||
| 21 | the kernel context */ | ||
| 22 | #define __cold __attribute__((__cold__)) | ||
| 23 | |||
| 24 | #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) | ||
| 25 | |||
| 26 | #ifndef __CHECKER__ | ||
| 27 | # define __compiletime_warning(message) __attribute__((warning(message))) | ||
| 28 | # define __compiletime_error(message) __attribute__((error(message))) | ||
| 29 | #endif /* __CHECKER__ */ | ||
| 30 | |||
| 31 | /* | ||
| 32 | * Mark a position in code as unreachable. This can be used to | ||
| 33 | * suppress control flow warnings after asm blocks that transfer | ||
| 34 | * control elsewhere. | ||
| 35 | * | ||
| 36 | * Early snapshots of gcc 4.5 don't support this and we can't detect | ||
| 37 | * this in the preprocessor, but we can live with this because they're | ||
| 38 | * unreleased. Really, we need to have autoconf for the kernel. | ||
| 39 | */ | ||
| 40 | #define unreachable() __builtin_unreachable() | ||
| 41 | |||
| 42 | /* Mark a function definition as prohibited from being cloned. */ | ||
| 43 | #define __noclone __attribute__((__noclone__)) | ||
| 44 | |||
| 45 | /* | ||
| 46 | * Tell the optimizer that something else uses this function or variable. | ||
| 47 | */ | ||
| 48 | #define __visible __attribute__((externally_visible)) | ||
| 49 | |||
| 50 | /* | ||
| 51 | * GCC 'asm goto' miscompiles certain code sequences: | ||
| 52 | * | ||
| 53 | * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 | ||
| 54 | * | ||
| 55 | * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. | ||
| 56 | * | ||
| 57 | * (asm goto is automatically volatile - the naming reflects this.) | ||
| 58 | */ | ||
| 59 | #define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) | ||
| 60 | |||
| 61 | #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP | ||
| 62 | #define __HAVE_BUILTIN_BSWAP32__ | ||
| 63 | #define __HAVE_BUILTIN_BSWAP64__ | ||
| 64 | #define __HAVE_BUILTIN_BSWAP16__ | ||
| 65 | #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ | ||
| 66 | |||
| 67 | #define KASAN_ABI_VERSION 4 | ||
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 0c9a2f2c2802..d4c71132d07f 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h | |||
| @@ -13,10 +13,12 @@ | |||
| 13 | /* Intel ECC compiler doesn't support gcc specific asm stmts. | 13 | /* Intel ECC compiler doesn't support gcc specific asm stmts. |
| 14 | * It uses intrinsics to do the equivalent things. | 14 | * It uses intrinsics to do the equivalent things. |
| 15 | */ | 15 | */ |
| 16 | #undef barrier | ||
| 16 | #undef barrier_data | 17 | #undef barrier_data |
| 17 | #undef RELOC_HIDE | 18 | #undef RELOC_HIDE |
| 18 | #undef OPTIMIZER_HIDE_VAR | 19 | #undef OPTIMIZER_HIDE_VAR |
| 19 | 20 | ||
| 21 | #define barrier() __memory_barrier() | ||
| 20 | #define barrier_data(ptr) barrier() | 22 | #define barrier_data(ptr) barrier() |
| 21 | 23 | ||
| 22 | #define RELOC_HIDE(ptr, off) \ | 24 | #define RELOC_HIDE(ptr, off) \ |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 05be2352fef8..e08a6ae7c0a4 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | # define __release(x) __context__(x,-1) | 17 | # define __release(x) __context__(x,-1) |
| 18 | # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) | 18 | # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) |
| 19 | # define __percpu __attribute__((noderef, address_space(3))) | 19 | # define __percpu __attribute__((noderef, address_space(3))) |
| 20 | # define __pmem __attribute__((noderef, address_space(5))) | ||
| 20 | #ifdef CONFIG_SPARSE_RCU_POINTER | 21 | #ifdef CONFIG_SPARSE_RCU_POINTER |
| 21 | # define __rcu __attribute__((noderef, address_space(4))) | 22 | # define __rcu __attribute__((noderef, address_space(4))) |
| 22 | #else | 23 | #else |
| @@ -42,6 +43,7 @@ extern void __chk_io_ptr(const volatile void __iomem *); | |||
| 42 | # define __cond_lock(x,c) (c) | 43 | # define __cond_lock(x,c) (c) |
| 43 | # define __percpu | 44 | # define __percpu |
| 44 | # define __rcu | 45 | # define __rcu |
| 46 | # define __pmem | ||
| 45 | #endif | 47 | #endif |
| 46 | 48 | ||
| 47 | /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ | 49 | /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ |
| @@ -473,6 +475,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
| 473 | (volatile typeof(x) *)&(x); }) | 475 | (volatile typeof(x) *)&(x); }) |
| 474 | #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) | 476 | #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) |
| 475 | 477 | ||
| 478 | /** | ||
| 479 | * lockless_dereference() - safely load a pointer for later dereference | ||
| 480 | * @p: The pointer to load | ||
| 481 | * | ||
| 482 | * Similar to rcu_dereference(), but for situations where the pointed-to | ||
| 483 | * object's lifetime is managed by something other than RCU. That | ||
| 484 | * "something other" might be reference counting or simple immortality. | ||
| 485 | */ | ||
| 486 | #define lockless_dereference(p) \ | ||
| 487 | ({ \ | ||
| 488 | typeof(p) _________p1 = READ_ONCE(p); \ | ||
| 489 | smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ | ||
| 490 | (_________p1); \ | ||
| 491 | }) | ||
| 492 | |||
| 476 | /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ | 493 | /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ |
| 477 | #ifdef CONFIG_KPROBES | 494 | #ifdef CONFIG_KPROBES |
| 478 | # define __kprobes __attribute__((__section__(".kprobes.text"))) | 495 | # define __kprobes __attribute__((__section__(".kprobes.text"))) |
diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 34025df61829..63a36e89d0eb 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h | |||
| @@ -64,14 +64,14 @@ struct config_item { | |||
| 64 | struct dentry *ci_dentry; | 64 | struct dentry *ci_dentry; |
| 65 | }; | 65 | }; |
| 66 | 66 | ||
| 67 | extern int config_item_set_name(struct config_item *, const char *, ...); | 67 | extern __printf(2, 3) |
| 68 | int config_item_set_name(struct config_item *, const char *, ...); | ||
| 68 | 69 | ||
| 69 | static inline char *config_item_name(struct config_item * item) | 70 | static inline char *config_item_name(struct config_item * item) |
| 70 | { | 71 | { |
| 71 | return item->ci_name; | 72 | return item->ci_name; |
| 72 | } | 73 | } |
| 73 | 74 | ||
| 74 | extern void config_item_init(struct config_item *); | ||
| 75 | extern void config_item_init_type_name(struct config_item *item, | 75 | extern void config_item_init_type_name(struct config_item *item, |
| 76 | const char *name, | 76 | const char *name, |
| 77 | struct config_item_type *type); | 77 | struct config_item_type *type); |
diff --git a/include/linux/console.h b/include/linux/console.h index 9f50fb413c11..bd194343c346 100644 --- a/include/linux/console.h +++ b/include/linux/console.h | |||
| @@ -115,6 +115,7 @@ static inline int con_debug_leave(void) | |||
| 115 | #define CON_BOOT (8) | 115 | #define CON_BOOT (8) |
| 116 | #define CON_ANYTIME (16) /* Safe to call when cpu is offline */ | 116 | #define CON_ANYTIME (16) /* Safe to call when cpu is offline */ |
| 117 | #define CON_BRL (32) /* Used for a braille device */ | 117 | #define CON_BRL (32) /* Used for a braille device */ |
| 118 | #define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */ | ||
| 118 | 119 | ||
| 119 | struct console { | 120 | struct console { |
| 120 | char name[16]; | 121 | char name[16]; |
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index e859c98d1767..e329ee2667e1 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h | |||
| @@ -104,6 +104,7 @@ struct vc_data { | |||
| 104 | unsigned int vc_resize_user; /* resize request from user */ | 104 | unsigned int vc_resize_user; /* resize request from user */ |
| 105 | unsigned int vc_bell_pitch; /* Console bell pitch */ | 105 | unsigned int vc_bell_pitch; /* Console bell pitch */ |
| 106 | unsigned int vc_bell_duration; /* Console bell duration */ | 106 | unsigned int vc_bell_duration; /* Console bell duration */ |
| 107 | unsigned short vc_cur_blink_ms; /* Cursor blink duration */ | ||
| 107 | struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */ | 108 | struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */ |
| 108 | struct uni_pagedir *vc_uni_pagedir; | 109 | struct uni_pagedir *vc_uni_pagedir; |
| 109 | struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ | 110 | struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ |
diff --git a/include/linux/cper.h b/include/linux/cper.h index 76abba4b238e..dcacb1a72e26 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h | |||
| @@ -340,7 +340,27 @@ struct cper_ia_proc_ctx { | |||
| 340 | __u64 mm_reg_addr; | 340 | __u64 mm_reg_addr; |
| 341 | }; | 341 | }; |
| 342 | 342 | ||
| 343 | /* Memory Error Section */ | 343 | /* Old Memory Error Section UEFI 2.1, 2.2 */ |
| 344 | struct cper_sec_mem_err_old { | ||
| 345 | __u64 validation_bits; | ||
| 346 | __u64 error_status; | ||
| 347 | __u64 physical_addr; | ||
| 348 | __u64 physical_addr_mask; | ||
| 349 | __u16 node; | ||
| 350 | __u16 card; | ||
| 351 | __u16 module; | ||
| 352 | __u16 bank; | ||
| 353 | __u16 device; | ||
| 354 | __u16 row; | ||
| 355 | __u16 column; | ||
| 356 | __u16 bit_pos; | ||
| 357 | __u64 requestor_id; | ||
| 358 | __u64 responder_id; | ||
| 359 | __u64 target_id; | ||
| 360 | __u8 error_type; | ||
| 361 | }; | ||
| 362 | |||
| 363 | /* Memory Error Section UEFI >= 2.3 */ | ||
| 344 | struct cper_sec_mem_err { | 364 | struct cper_sec_mem_err { |
| 345 | __u64 validation_bits; | 365 | __u64 validation_bits; |
| 346 | __u64 error_status; | 366 | __u64 error_status; |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index c0fb6b1b4712..23c30bdcca86 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
| @@ -40,9 +40,10 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr); | |||
| 40 | extern int cpu_add_dev_attr_group(struct attribute_group *attrs); | 40 | extern int cpu_add_dev_attr_group(struct attribute_group *attrs); |
| 41 | extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); | 41 | extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); |
| 42 | 42 | ||
| 43 | extern struct device *cpu_device_create(struct device *parent, void *drvdata, | 43 | extern __printf(4, 5) |
| 44 | const struct attribute_group **groups, | 44 | struct device *cpu_device_create(struct device *parent, void *drvdata, |
| 45 | const char *fmt, ...); | 45 | const struct attribute_group **groups, |
| 46 | const char *fmt, ...); | ||
| 46 | #ifdef CONFIG_HOTPLUG_CPU | 47 | #ifdef CONFIG_HOTPLUG_CPU |
| 47 | extern void unregister_cpu(struct cpu *cpu); | 48 | extern void unregister_cpu(struct cpu *cpu); |
| 48 | extern ssize_t arch_cpu_probe(const char *, size_t); | 49 | extern ssize_t arch_cpu_probe(const char *, size_t); |
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h index bd955270d5aa..c156f5082758 100644 --- a/include/linux/cpu_cooling.h +++ b/include/linux/cpu_cooling.h | |||
| @@ -28,6 +28,9 @@ | |||
| 28 | #include <linux/thermal.h> | 28 | #include <linux/thermal.h> |
| 29 | #include <linux/cpumask.h> | 29 | #include <linux/cpumask.h> |
| 30 | 30 | ||
| 31 | typedef int (*get_static_t)(cpumask_t *cpumask, int interval, | ||
| 32 | unsigned long voltage, u32 *power); | ||
| 33 | |||
| 31 | #ifdef CONFIG_CPU_THERMAL | 34 | #ifdef CONFIG_CPU_THERMAL |
| 32 | /** | 35 | /** |
| 33 | * cpufreq_cooling_register - function to create cpufreq cooling device. | 36 | * cpufreq_cooling_register - function to create cpufreq cooling device. |
| @@ -36,6 +39,10 @@ | |||
| 36 | struct thermal_cooling_device * | 39 | struct thermal_cooling_device * |
| 37 | cpufreq_cooling_register(const struct cpumask *clip_cpus); | 40 | cpufreq_cooling_register(const struct cpumask *clip_cpus); |
| 38 | 41 | ||
| 42 | struct thermal_cooling_device * | ||
| 43 | cpufreq_power_cooling_register(const struct cpumask *clip_cpus, | ||
| 44 | u32 capacitance, get_static_t plat_static_func); | ||
| 45 | |||
| 39 | /** | 46 | /** |
| 40 | * of_cpufreq_cooling_register - create cpufreq cooling device based on DT. | 47 | * of_cpufreq_cooling_register - create cpufreq cooling device based on DT. |
| 41 | * @np: a valid struct device_node to the cooling device device tree node. | 48 | * @np: a valid struct device_node to the cooling device device tree node. |
| @@ -45,6 +52,12 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus); | |||
| 45 | struct thermal_cooling_device * | 52 | struct thermal_cooling_device * |
| 46 | of_cpufreq_cooling_register(struct device_node *np, | 53 | of_cpufreq_cooling_register(struct device_node *np, |
| 47 | const struct cpumask *clip_cpus); | 54 | const struct cpumask *clip_cpus); |
| 55 | |||
| 56 | struct thermal_cooling_device * | ||
| 57 | of_cpufreq_power_cooling_register(struct device_node *np, | ||
| 58 | const struct cpumask *clip_cpus, | ||
| 59 | u32 capacitance, | ||
| 60 | get_static_t plat_static_func); | ||
| 48 | #else | 61 | #else |
| 49 | static inline struct thermal_cooling_device * | 62 | static inline struct thermal_cooling_device * |
| 50 | of_cpufreq_cooling_register(struct device_node *np, | 63 | of_cpufreq_cooling_register(struct device_node *np, |
| @@ -52,6 +65,15 @@ of_cpufreq_cooling_register(struct device_node *np, | |||
| 52 | { | 65 | { |
| 53 | return ERR_PTR(-ENOSYS); | 66 | return ERR_PTR(-ENOSYS); |
| 54 | } | 67 | } |
| 68 | |||
| 69 | static inline struct thermal_cooling_device * | ||
| 70 | of_cpufreq_power_cooling_register(struct device_node *np, | ||
| 71 | const struct cpumask *clip_cpus, | ||
| 72 | u32 capacitance, | ||
| 73 | get_static_t plat_static_func) | ||
| 74 | { | ||
| 75 | return NULL; | ||
| 76 | } | ||
| 55 | #endif | 77 | #endif |
| 56 | 78 | ||
| 57 | /** | 79 | /** |
| @@ -68,11 +90,28 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus) | |||
| 68 | return ERR_PTR(-ENOSYS); | 90 | return ERR_PTR(-ENOSYS); |
| 69 | } | 91 | } |
| 70 | static inline struct thermal_cooling_device * | 92 | static inline struct thermal_cooling_device * |
| 93 | cpufreq_power_cooling_register(const struct cpumask *clip_cpus, | ||
| 94 | u32 capacitance, get_static_t plat_static_func) | ||
| 95 | { | ||
| 96 | return NULL; | ||
| 97 | } | ||
| 98 | |||
| 99 | static inline struct thermal_cooling_device * | ||
| 71 | of_cpufreq_cooling_register(struct device_node *np, | 100 | of_cpufreq_cooling_register(struct device_node *np, |
| 72 | const struct cpumask *clip_cpus) | 101 | const struct cpumask *clip_cpus) |
| 73 | { | 102 | { |
| 74 | return ERR_PTR(-ENOSYS); | 103 | return ERR_PTR(-ENOSYS); |
| 75 | } | 104 | } |
| 105 | |||
| 106 | static inline struct thermal_cooling_device * | ||
| 107 | of_cpufreq_power_cooling_register(struct device_node *np, | ||
| 108 | const struct cpumask *clip_cpus, | ||
| 109 | u32 capacitance, | ||
| 110 | get_static_t plat_static_func) | ||
| 111 | { | ||
| 112 | return NULL; | ||
| 113 | } | ||
| 114 | |||
| 76 | static inline | 115 | static inline |
| 77 | void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) | 116 | void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) |
| 78 | { | 117 | { |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 2ee4888c1f47..bde1e567b3a9 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
| @@ -62,10 +62,13 @@ struct cpufreq_policy { | |||
| 62 | /* CPUs sharing clock, require sw coordination */ | 62 | /* CPUs sharing clock, require sw coordination */ |
| 63 | cpumask_var_t cpus; /* Online CPUs only */ | 63 | cpumask_var_t cpus; /* Online CPUs only */ |
| 64 | cpumask_var_t related_cpus; /* Online + Offline CPUs */ | 64 | cpumask_var_t related_cpus; /* Online + Offline CPUs */ |
| 65 | cpumask_var_t real_cpus; /* Related and present */ | ||
| 65 | 66 | ||
| 66 | unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs | 67 | unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs |
| 67 | should set cpufreq */ | 68 | should set cpufreq */ |
| 68 | unsigned int cpu; /* cpu nr of CPU managing this policy */ | 69 | unsigned int cpu; /* cpu managing this policy, must be online */ |
| 70 | unsigned int kobj_cpu; /* cpu managing sysfs files, can be offline */ | ||
| 71 | |||
| 69 | struct clk *clk; | 72 | struct clk *clk; |
| 70 | struct cpufreq_cpuinfo cpuinfo;/* see above */ | 73 | struct cpufreq_cpuinfo cpuinfo;/* see above */ |
| 71 | 74 | ||
| @@ -80,6 +83,7 @@ struct cpufreq_policy { | |||
| 80 | struct cpufreq_governor *governor; /* see below */ | 83 | struct cpufreq_governor *governor; /* see below */ |
| 81 | void *governor_data; | 84 | void *governor_data; |
| 82 | bool governor_enabled; /* governor start/stop flag */ | 85 | bool governor_enabled; /* governor start/stop flag */ |
| 86 | char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */ | ||
| 83 | 87 | ||
| 84 | struct work_struct update; /* if update_policy() needs to be | 88 | struct work_struct update; /* if update_policy() needs to be |
| 85 | * called, but you're in IRQ context */ | 89 | * called, but you're in IRQ context */ |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 9c5e89254796..d075d34279df 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
| @@ -151,10 +151,6 @@ extern void cpuidle_resume(void); | |||
| 151 | extern int cpuidle_enable_device(struct cpuidle_device *dev); | 151 | extern int cpuidle_enable_device(struct cpuidle_device *dev); |
| 152 | extern void cpuidle_disable_device(struct cpuidle_device *dev); | 152 | extern void cpuidle_disable_device(struct cpuidle_device *dev); |
| 153 | extern int cpuidle_play_dead(void); | 153 | extern int cpuidle_play_dead(void); |
| 154 | extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | ||
| 155 | struct cpuidle_device *dev); | ||
| 156 | extern int cpuidle_enter_freeze(struct cpuidle_driver *drv, | ||
| 157 | struct cpuidle_device *dev); | ||
| 158 | 154 | ||
| 159 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); | 155 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); |
| 160 | #else | 156 | #else |
| @@ -190,16 +186,28 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev) | |||
| 190 | {return -ENODEV; } | 186 | {return -ENODEV; } |
| 191 | static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } | 187 | static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } |
| 192 | static inline int cpuidle_play_dead(void) {return -ENODEV; } | 188 | static inline int cpuidle_play_dead(void) {return -ENODEV; } |
| 189 | static inline struct cpuidle_driver *cpuidle_get_cpu_driver( | ||
| 190 | struct cpuidle_device *dev) {return NULL; } | ||
| 191 | #endif | ||
| 192 | |||
| 193 | #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND) | ||
| 194 | extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | ||
| 195 | struct cpuidle_device *dev); | ||
| 196 | extern int cpuidle_enter_freeze(struct cpuidle_driver *drv, | ||
| 197 | struct cpuidle_device *dev); | ||
| 198 | #else | ||
| 193 | static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | 199 | static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, |
| 194 | struct cpuidle_device *dev) | 200 | struct cpuidle_device *dev) |
| 195 | {return -ENODEV; } | 201 | {return -ENODEV; } |
| 196 | static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv, | 202 | static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv, |
| 197 | struct cpuidle_device *dev) | 203 | struct cpuidle_device *dev) |
| 198 | {return -ENODEV; } | 204 | {return -ENODEV; } |
| 199 | static inline struct cpuidle_driver *cpuidle_get_cpu_driver( | ||
| 200 | struct cpuidle_device *dev) {return NULL; } | ||
| 201 | #endif | 205 | #endif |
| 202 | 206 | ||
| 207 | /* kernel/sched/idle.c */ | ||
| 208 | extern void sched_idle_set_state(struct cpuidle_state *idle_state); | ||
| 209 | extern void default_idle_call(void); | ||
| 210 | |||
| 203 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED | 211 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED |
| 204 | void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); | 212 | void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); |
| 205 | #else | 213 | #else |
diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h index 84920f3cc83e..a9953c762eee 100644 --- a/include/linux/crc-itu-t.h +++ b/include/linux/crc-itu-t.h | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * | 3 | * |
| 4 | * Implements the standard CRC ITU-T V.41: | 4 | * Implements the standard CRC ITU-T V.41: |
| 5 | * Width 16 | 5 | * Width 16 |
| 6 | * Poly 0x0x1021 (x^16 + x^12 + x^15 + 1) | 6 | * Poly 0x1021 (x^16 + x^12 + x^15 + 1) |
| 7 | * Init 0 | 7 | * Init 0 |
| 8 | * | 8 | * |
| 9 | * This source code is licensed under the GNU General Public License, | 9 | * This source code is licensed under the GNU General Public License, |
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h index cf53d0773ce3..d81961e9e37d 100644 --- a/include/linux/crc-t10dif.h +++ b/include/linux/crc-t10dif.h | |||
| @@ -9,5 +9,6 @@ | |||
| 9 | extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, | 9 | extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, |
| 10 | size_t len); | 10 | size_t len); |
| 11 | extern __u16 crc_t10dif(unsigned char const *, size_t); | 11 | extern __u16 crc_t10dif(unsigned char const *, size_t); |
| 12 | extern __u16 crc_t10dif_update(__u16 crc, unsigned char const *, size_t); | ||
| 12 | 13 | ||
| 13 | #endif | 14 | #endif |
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h index 48a1a7d100f1..48b49305716b 100644 --- a/include/linux/crush/crush.h +++ b/include/linux/crush/crush.h | |||
| @@ -1,7 +1,11 @@ | |||
| 1 | #ifndef CEPH_CRUSH_CRUSH_H | 1 | #ifndef CEPH_CRUSH_CRUSH_H |
| 2 | #define CEPH_CRUSH_CRUSH_H | 2 | #define CEPH_CRUSH_CRUSH_H |
| 3 | 3 | ||
| 4 | #include <linux/types.h> | 4 | #ifdef __KERNEL__ |
| 5 | # include <linux/types.h> | ||
| 6 | #else | ||
| 7 | # include "crush_compat.h" | ||
| 8 | #endif | ||
| 5 | 9 | ||
| 6 | /* | 10 | /* |
| 7 | * CRUSH is a pseudo-random data distribution algorithm that | 11 | * CRUSH is a pseudo-random data distribution algorithm that |
| @@ -20,7 +24,11 @@ | |||
| 20 | #define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */ | 24 | #define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */ |
| 21 | 25 | ||
| 22 | #define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */ | 26 | #define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */ |
| 27 | #define CRUSH_MAX_RULESET (1<<8) /* max crush ruleset number */ | ||
| 28 | #define CRUSH_MAX_RULES CRUSH_MAX_RULESET /* should be the same as max rulesets */ | ||
| 23 | 29 | ||
| 30 | #define CRUSH_MAX_DEVICE_WEIGHT (100u * 0x10000u) | ||
| 31 | #define CRUSH_MAX_BUCKET_WEIGHT (65535u * 0x10000u) | ||
| 24 | 32 | ||
| 25 | #define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */ | 33 | #define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */ |
| 26 | #define CRUSH_ITEM_NONE 0x7fffffff /* no result */ | 34 | #define CRUSH_ITEM_NONE 0x7fffffff /* no result */ |
| @@ -108,6 +116,15 @@ enum { | |||
| 108 | }; | 116 | }; |
| 109 | extern const char *crush_bucket_alg_name(int alg); | 117 | extern const char *crush_bucket_alg_name(int alg); |
| 110 | 118 | ||
| 119 | /* | ||
| 120 | * although tree was a legacy algorithm, it has been buggy, so | ||
| 121 | * exclude it. | ||
| 122 | */ | ||
| 123 | #define CRUSH_LEGACY_ALLOWED_BUCKET_ALGS ( \ | ||
| 124 | (1 << CRUSH_BUCKET_UNIFORM) | \ | ||
| 125 | (1 << CRUSH_BUCKET_LIST) | \ | ||
| 126 | (1 << CRUSH_BUCKET_STRAW)) | ||
| 127 | |||
| 111 | struct crush_bucket { | 128 | struct crush_bucket { |
| 112 | __s32 id; /* this'll be negative */ | 129 | __s32 id; /* this'll be negative */ |
| 113 | __u16 type; /* non-zero; type=0 is reserved for devices */ | 130 | __u16 type; /* non-zero; type=0 is reserved for devices */ |
| @@ -174,7 +191,7 @@ struct crush_map { | |||
| 174 | /* choose local attempts using a fallback permutation before | 191 | /* choose local attempts using a fallback permutation before |
| 175 | * re-descent */ | 192 | * re-descent */ |
| 176 | __u32 choose_local_fallback_tries; | 193 | __u32 choose_local_fallback_tries; |
| 177 | /* choose attempts before giving up */ | 194 | /* choose attempts before giving up */ |
| 178 | __u32 choose_total_tries; | 195 | __u32 choose_total_tries; |
| 179 | /* attempt chooseleaf inner descent once for firstn mode; on | 196 | /* attempt chooseleaf inner descent once for firstn mode; on |
| 180 | * reject retry outer descent. Note that this does *not* | 197 | * reject retry outer descent. Note that this does *not* |
| @@ -187,6 +204,25 @@ struct crush_map { | |||
| 187 | * that want to limit reshuffling, a value of 3 or 4 will make the | 204 | * that want to limit reshuffling, a value of 3 or 4 will make the |
| 188 | * mappings line up a bit better with previous mappings. */ | 205 | * mappings line up a bit better with previous mappings. */ |
| 189 | __u8 chooseleaf_vary_r; | 206 | __u8 chooseleaf_vary_r; |
| 207 | |||
| 208 | #ifndef __KERNEL__ | ||
| 209 | /* | ||
| 210 | * version 0 (original) of straw_calc has various flaws. version 1 | ||
| 211 | * fixes a few of them. | ||
| 212 | */ | ||
| 213 | __u8 straw_calc_version; | ||
| 214 | |||
| 215 | /* | ||
| 216 | * allowed bucket algs is a bitmask, here the bit positions | ||
| 217 | * are CRUSH_BUCKET_*. note that these are *bits* and | ||
| 218 | * CRUSH_BUCKET_* values are not, so we need to or together (1 | ||
| 219 | * << CRUSH_BUCKET_WHATEVER). The 0th bit is not used to | ||
| 220 | * minimize confusion (bucket type values start at 1). | ||
| 221 | */ | ||
| 222 | __u32 allowed_bucket_algs; | ||
| 223 | |||
| 224 | __u32 *choose_tries; | ||
| 225 | #endif | ||
| 190 | }; | 226 | }; |
| 191 | 227 | ||
| 192 | 228 | ||
diff --git a/include/linux/crush/hash.h b/include/linux/crush/hash.h index 91e884230d5d..d1d90258242e 100644 --- a/include/linux/crush/hash.h +++ b/include/linux/crush/hash.h | |||
| @@ -1,6 +1,12 @@ | |||
| 1 | #ifndef CEPH_CRUSH_HASH_H | 1 | #ifndef CEPH_CRUSH_HASH_H |
| 2 | #define CEPH_CRUSH_HASH_H | 2 | #define CEPH_CRUSH_HASH_H |
| 3 | 3 | ||
| 4 | #ifdef __KERNEL__ | ||
| 5 | # include <linux/types.h> | ||
| 6 | #else | ||
| 7 | # include "crush_compat.h" | ||
| 8 | #endif | ||
| 9 | |||
| 4 | #define CRUSH_HASH_RJENKINS1 0 | 10 | #define CRUSH_HASH_RJENKINS1 0 |
| 5 | 11 | ||
| 6 | #define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1 | 12 | #define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1 |
diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h index eab367446eea..5dfd5b1125d2 100644 --- a/include/linux/crush/mapper.h +++ b/include/linux/crush/mapper.h | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | * LGPL2 | 8 | * LGPL2 |
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/crush/crush.h> | 11 | #include "crush.h" |
| 12 | 12 | ||
| 13 | extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size); | 13 | extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size); |
| 14 | extern int crush_do_rule(const struct crush_map *map, | 14 | extern int crush_do_rule(const struct crush_map *map, |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index df334cbacc6d..d67ae119cf4e 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
| @@ -160,6 +160,7 @@ struct dentry_operations { | |||
| 160 | char *(*d_dname)(struct dentry *, char *, int); | 160 | char *(*d_dname)(struct dentry *, char *, int); |
| 161 | struct vfsmount *(*d_automount)(struct path *); | 161 | struct vfsmount *(*d_automount)(struct path *); |
| 162 | int (*d_manage)(struct dentry *, bool); | 162 | int (*d_manage)(struct dentry *, bool); |
| 163 | struct inode *(*d_select_inode)(struct dentry *, unsigned); | ||
| 163 | } ____cacheline_aligned; | 164 | } ____cacheline_aligned; |
| 164 | 165 | ||
| 165 | /* | 166 | /* |
| @@ -225,6 +226,7 @@ struct dentry_operations { | |||
| 225 | 226 | ||
| 226 | #define DCACHE_MAY_FREE 0x00800000 | 227 | #define DCACHE_MAY_FREE 0x00800000 |
| 227 | #define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ | 228 | #define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ |
| 229 | #define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */ | ||
| 228 | 230 | ||
| 229 | extern seqlock_t rename_lock; | 231 | extern seqlock_t rename_lock; |
| 230 | 232 | ||
| @@ -325,7 +327,8 @@ static inline unsigned d_count(const struct dentry *dentry) | |||
| 325 | /* | 327 | /* |
| 326 | * helper function for dentry_operations.d_dname() members | 328 | * helper function for dentry_operations.d_dname() members |
| 327 | */ | 329 | */ |
| 328 | extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); | 330 | extern __printf(4, 5) |
| 331 | char *dynamic_dname(struct dentry *, char *, int, const char *, ...); | ||
| 329 | extern char *simple_dname(struct dentry *, char *, int); | 332 | extern char *simple_dname(struct dentry *, char *, int); |
| 330 | 333 | ||
| 331 | extern char *__d_path(const struct path *, const struct path *, char *, int); | 334 | extern char *__d_path(const struct path *, const struct path *, char *, int); |
| @@ -505,6 +508,11 @@ static inline bool d_really_is_positive(const struct dentry *dentry) | |||
| 505 | return dentry->d_inode != NULL; | 508 | return dentry->d_inode != NULL; |
| 506 | } | 509 | } |
| 507 | 510 | ||
| 511 | static inline int simple_positive(struct dentry *dentry) | ||
| 512 | { | ||
| 513 | return d_really_is_positive(dentry) && !d_unhashed(dentry); | ||
| 514 | } | ||
| 515 | |||
| 508 | extern void d_set_fallthru(struct dentry *dentry); | 516 | extern void d_set_fallthru(struct dentry *dentry); |
| 509 | 517 | ||
| 510 | static inline bool d_is_fallthru(const struct dentry *dentry) | 518 | static inline bool d_is_fallthru(const struct dentry *dentry) |
diff --git a/include/linux/device.h b/include/linux/device.h index 6558af90c8fe..a2b4ea70a946 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
| @@ -196,12 +196,41 @@ extern struct kset *bus_get_kset(struct bus_type *bus); | |||
| 196 | extern struct klist *bus_get_device_klist(struct bus_type *bus); | 196 | extern struct klist *bus_get_device_klist(struct bus_type *bus); |
| 197 | 197 | ||
| 198 | /** | 198 | /** |
| 199 | * enum probe_type - device driver probe type to try | ||
| 200 | * Device drivers may opt in for special handling of their | ||
| 201 | * respective probe routines. This tells the core what to | ||
| 202 | * expect and prefer. | ||
| 203 | * | ||
| 204 | * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well | ||
| 205 | * whether probed synchronously or asynchronously. | ||
| 206 | * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which | ||
| 207 | * probing order is not essential for booting the system may | ||
| 208 | * opt into executing their probes asynchronously. | ||
| 209 | * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need | ||
| 210 | * their probe routines to run synchronously with driver and | ||
| 211 | * device registration (with the exception of -EPROBE_DEFER | ||
| 212 | * handling - re-probing always ends up being done asynchronously). | ||
| 213 | * | ||
| 214 | * Note that the end goal is to switch the kernel to use asynchronous | ||
| 215 | * probing by default, so annotating drivers with | ||
| 216 | * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us | ||
| 217 | * to speed up boot process while we are validating the rest of the | ||
| 218 | * drivers. | ||
| 219 | */ | ||
| 220 | enum probe_type { | ||
| 221 | PROBE_DEFAULT_STRATEGY, | ||
| 222 | PROBE_PREFER_ASYNCHRONOUS, | ||
| 223 | PROBE_FORCE_SYNCHRONOUS, | ||
| 224 | }; | ||
| 225 | |||
| 226 | /** | ||
| 199 | * struct device_driver - The basic device driver structure | 227 | * struct device_driver - The basic device driver structure |
| 200 | * @name: Name of the device driver. | 228 | * @name: Name of the device driver. |
| 201 | * @bus: The bus which the device of this driver belongs to. | 229 | * @bus: The bus which the device of this driver belongs to. |
| 202 | * @owner: The module owner. | 230 | * @owner: The module owner. |
| 203 | * @mod_name: Used for built-in modules. | 231 | * @mod_name: Used for built-in modules. |
| 204 | * @suppress_bind_attrs: Disables bind/unbind via sysfs. | 232 | * @suppress_bind_attrs: Disables bind/unbind via sysfs. |
| 233 | * @probe_type: Type of the probe (synchronous or asynchronous) to use. | ||
| 205 | * @of_match_table: The open firmware table. | 234 | * @of_match_table: The open firmware table. |
| 206 | * @acpi_match_table: The ACPI match table. | 235 | * @acpi_match_table: The ACPI match table. |
| 207 | * @probe: Called to query the existence of a specific device, | 236 | * @probe: Called to query the existence of a specific device, |
| @@ -235,6 +264,7 @@ struct device_driver { | |||
| 235 | const char *mod_name; /* used for built-in modules */ | 264 | const char *mod_name; /* used for built-in modules */ |
| 236 | 265 | ||
| 237 | bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ | 266 | bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ |
| 267 | enum probe_type probe_type; | ||
| 238 | 268 | ||
| 239 | const struct of_device_id *of_match_table; | 269 | const struct of_device_id *of_match_table; |
| 240 | const struct acpi_device_id *acpi_match_table; | 270 | const struct acpi_device_id *acpi_match_table; |
| @@ -607,8 +637,9 @@ extern int devres_release_group(struct device *dev, void *id); | |||
| 607 | 637 | ||
| 608 | /* managed devm_k.alloc/kfree for device drivers */ | 638 | /* managed devm_k.alloc/kfree for device drivers */ |
| 609 | extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); | 639 | extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); |
| 610 | extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, | 640 | extern __printf(3, 0) |
| 611 | va_list ap); | 641 | char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, |
| 642 | va_list ap); | ||
| 612 | extern __printf(3, 4) | 643 | extern __printf(3, 4) |
| 613 | char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); | 644 | char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); |
| 614 | static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) | 645 | static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) |
| @@ -975,17 +1006,16 @@ extern int __must_check device_bind_driver(struct device *dev); | |||
| 975 | extern void device_release_driver(struct device *dev); | 1006 | extern void device_release_driver(struct device *dev); |
| 976 | extern int __must_check device_attach(struct device *dev); | 1007 | extern int __must_check device_attach(struct device *dev); |
| 977 | extern int __must_check driver_attach(struct device_driver *drv); | 1008 | extern int __must_check driver_attach(struct device_driver *drv); |
| 1009 | extern void device_initial_probe(struct device *dev); | ||
| 978 | extern int __must_check device_reprobe(struct device *dev); | 1010 | extern int __must_check device_reprobe(struct device *dev); |
| 979 | 1011 | ||
| 980 | /* | 1012 | /* |
| 981 | * Easy functions for dynamically creating devices on the fly | 1013 | * Easy functions for dynamically creating devices on the fly |
| 982 | */ | 1014 | */ |
| 983 | extern struct device *device_create_vargs(struct class *cls, | 1015 | extern __printf(5, 0) |
| 984 | struct device *parent, | 1016 | struct device *device_create_vargs(struct class *cls, struct device *parent, |
| 985 | dev_t devt, | 1017 | dev_t devt, void *drvdata, |
| 986 | void *drvdata, | 1018 | const char *fmt, va_list vargs); |
| 987 | const char *fmt, | ||
| 988 | va_list vargs); | ||
| 989 | extern __printf(5, 6) | 1019 | extern __printf(5, 6) |
| 990 | struct device *device_create(struct class *cls, struct device *parent, | 1020 | struct device *device_create(struct class *cls, struct device *parent, |
| 991 | dev_t devt, void *drvdata, | 1021 | dev_t devt, void *drvdata, |
| @@ -1269,4 +1299,26 @@ static void __exit __driver##_exit(void) \ | |||
| 1269 | } \ | 1299 | } \ |
| 1270 | module_exit(__driver##_exit); | 1300 | module_exit(__driver##_exit); |
| 1271 | 1301 | ||
| 1302 | /** | ||
| 1303 | * builtin_driver() - Helper macro for drivers that don't do anything | ||
| 1304 | * special in init and have no exit. This eliminates some boilerplate. | ||
| 1305 | * Each driver may only use this macro once, and calling it replaces | ||
| 1306 | * device_initcall (or in some cases, the legacy __initcall). This is | ||
| 1307 | * meant to be a direct parallel of module_driver() above but without | ||
| 1308 | * the __exit stuff that is not used for builtin cases. | ||
| 1309 | * | ||
| 1310 | * @__driver: driver name | ||
| 1311 | * @__register: register function for this driver type | ||
| 1312 | * @...: Additional arguments to be passed to __register | ||
| 1313 | * | ||
| 1314 | * Use this macro to construct bus specific macros for registering | ||
| 1315 | * drivers, and do not use it on its own. | ||
| 1316 | */ | ||
| 1317 | #define builtin_driver(__driver, __register, ...) \ | ||
| 1318 | static int __init __driver##_init(void) \ | ||
| 1319 | { \ | ||
| 1320 | return __register(&(__driver) , ##__VA_ARGS__); \ | ||
| 1321 | } \ | ||
| 1322 | device_initcall(__driver##_init); | ||
| 1323 | |||
| 1272 | #endif /* _DEVICE_H_ */ | 1324 | #endif /* _DEVICE_H_ */ |
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 2f0b431b73e0..f98bd7068d55 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h | |||
| @@ -115,6 +115,8 @@ struct dma_buf_ops { | |||
| 115 | * @attachments: list of dma_buf_attachment that denotes all devices attached. | 115 | * @attachments: list of dma_buf_attachment that denotes all devices attached. |
| 116 | * @ops: dma_buf_ops associated with this buffer object. | 116 | * @ops: dma_buf_ops associated with this buffer object. |
| 117 | * @exp_name: name of the exporter; useful for debugging. | 117 | * @exp_name: name of the exporter; useful for debugging. |
| 118 | * @owner: pointer to exporter module; used for refcounting when exporter is a | ||
| 119 | * kernel module. | ||
| 118 | * @list_node: node for dma_buf accounting and debugging. | 120 | * @list_node: node for dma_buf accounting and debugging. |
| 119 | * @priv: exporter specific private data for this buffer object. | 121 | * @priv: exporter specific private data for this buffer object. |
| 120 | * @resv: reservation object linked to this dma-buf | 122 | * @resv: reservation object linked to this dma-buf |
| @@ -129,6 +131,7 @@ struct dma_buf { | |||
| 129 | unsigned vmapping_counter; | 131 | unsigned vmapping_counter; |
| 130 | void *vmap_ptr; | 132 | void *vmap_ptr; |
| 131 | const char *exp_name; | 133 | const char *exp_name; |
| 134 | struct module *owner; | ||
| 132 | struct list_head list_node; | 135 | struct list_head list_node; |
| 133 | void *priv; | 136 | void *priv; |
| 134 | struct reservation_object *resv; | 137 | struct reservation_object *resv; |
| @@ -164,7 +167,8 @@ struct dma_buf_attachment { | |||
| 164 | 167 | ||
| 165 | /** | 168 | /** |
| 166 | * struct dma_buf_export_info - holds information needed to export a dma_buf | 169 | * struct dma_buf_export_info - holds information needed to export a dma_buf |
| 167 | * @exp_name: name of the exporting module - useful for debugging. | 170 | * @exp_name: name of the exporter - useful for debugging. |
| 171 | * @owner: pointer to exporter module - used for refcounting kernel module | ||
| 168 | * @ops: Attach allocator-defined dma buf ops to the new buffer | 172 | * @ops: Attach allocator-defined dma buf ops to the new buffer |
| 169 | * @size: Size of the buffer | 173 | * @size: Size of the buffer |
| 170 | * @flags: mode flags for the file | 174 | * @flags: mode flags for the file |
| @@ -176,6 +180,7 @@ struct dma_buf_attachment { | |||
| 176 | */ | 180 | */ |
| 177 | struct dma_buf_export_info { | 181 | struct dma_buf_export_info { |
| 178 | const char *exp_name; | 182 | const char *exp_name; |
| 183 | struct module *owner; | ||
| 179 | const struct dma_buf_ops *ops; | 184 | const struct dma_buf_ops *ops; |
| 180 | size_t size; | 185 | size_t size; |
| 181 | int flags; | 186 | int flags; |
| @@ -187,7 +192,8 @@ struct dma_buf_export_info { | |||
| 187 | * helper macro for exporters; zeros and fills in most common values | 192 | * helper macro for exporters; zeros and fills in most common values |
| 188 | */ | 193 | */ |
| 189 | #define DEFINE_DMA_BUF_EXPORT_INFO(a) \ | 194 | #define DEFINE_DMA_BUF_EXPORT_INFO(a) \ |
| 190 | struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME } | 195 | struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \ |
| 196 | .owner = THIS_MODULE } | ||
| 191 | 197 | ||
| 192 | /** | 198 | /** |
| 193 | * get_dma_buf - convenience wrapper for get_file. | 199 | * get_dma_buf - convenience wrapper for get_file. |
diff --git a/include/linux/dma/pxa-dma.h b/include/linux/dma/pxa-dma.h new file mode 100644 index 000000000000..3edc99294bf6 --- /dev/null +++ b/include/linux/dma/pxa-dma.h | |||
| @@ -0,0 +1,27 @@ | |||
| 1 | #ifndef _PXA_DMA_H_ | ||
| 2 | #define _PXA_DMA_H_ | ||
| 3 | |||
| 4 | enum pxad_chan_prio { | ||
| 5 | PXAD_PRIO_HIGHEST = 0, | ||
| 6 | PXAD_PRIO_NORMAL, | ||
| 7 | PXAD_PRIO_LOW, | ||
| 8 | PXAD_PRIO_LOWEST, | ||
| 9 | }; | ||
| 10 | |||
| 11 | struct pxad_param { | ||
| 12 | unsigned int drcmr; | ||
| 13 | enum pxad_chan_prio prio; | ||
| 14 | }; | ||
| 15 | |||
| 16 | struct dma_chan; | ||
| 17 | |||
| 18 | #ifdef CONFIG_PXA_DMA | ||
| 19 | bool pxad_filter_fn(struct dma_chan *chan, void *param); | ||
| 20 | #else | ||
| 21 | static inline bool pxad_filter_fn(struct dma_chan *chan, void *param) | ||
| 22 | { | ||
| 23 | return false; | ||
| 24 | } | ||
| 25 | #endif | ||
| 26 | |||
| 27 | #endif /* _PXA_DMA_H_ */ | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index ad419757241f..e2f5eb419976 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -65,6 +65,7 @@ enum dma_transaction_type { | |||
| 65 | DMA_PQ, | 65 | DMA_PQ, |
| 66 | DMA_XOR_VAL, | 66 | DMA_XOR_VAL, |
| 67 | DMA_PQ_VAL, | 67 | DMA_PQ_VAL, |
| 68 | DMA_MEMSET, | ||
| 68 | DMA_INTERRUPT, | 69 | DMA_INTERRUPT, |
| 69 | DMA_SG, | 70 | DMA_SG, |
| 70 | DMA_PRIVATE, | 71 | DMA_PRIVATE, |
| @@ -122,10 +123,18 @@ enum dma_transfer_direction { | |||
| 122 | * chunk and before first src/dst address for next chunk. | 123 | * chunk and before first src/dst address for next chunk. |
| 123 | * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false. | 124 | * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false. |
| 124 | * Ignored for src(assumed 0), if src_inc is true and src_sgl is false. | 125 | * Ignored for src(assumed 0), if src_inc is true and src_sgl is false. |
| 126 | * @dst_icg: Number of bytes to jump after last dst address of this | ||
| 127 | * chunk and before the first dst address for next chunk. | ||
| 128 | * Ignored if dst_inc is true and dst_sgl is false. | ||
| 129 | * @src_icg: Number of bytes to jump after last src address of this | ||
| 130 | * chunk and before the first src address for next chunk. | ||
| 131 | * Ignored if src_inc is true and src_sgl is false. | ||
| 125 | */ | 132 | */ |
| 126 | struct data_chunk { | 133 | struct data_chunk { |
| 127 | size_t size; | 134 | size_t size; |
| 128 | size_t icg; | 135 | size_t icg; |
| 136 | size_t dst_icg; | ||
| 137 | size_t src_icg; | ||
| 129 | }; | 138 | }; |
| 130 | 139 | ||
| 131 | /** | 140 | /** |
| @@ -222,6 +231,16 @@ struct dma_chan_percpu { | |||
| 222 | }; | 231 | }; |
| 223 | 232 | ||
| 224 | /** | 233 | /** |
| 234 | * struct dma_router - DMA router structure | ||
| 235 | * @dev: pointer to the DMA router device | ||
| 236 | * @route_free: function to be called when the route can be disconnected | ||
| 237 | */ | ||
| 238 | struct dma_router { | ||
| 239 | struct device *dev; | ||
| 240 | void (*route_free)(struct device *dev, void *route_data); | ||
| 241 | }; | ||
| 242 | |||
| 243 | /** | ||
| 225 | * struct dma_chan - devices supply DMA channels, clients use them | 244 | * struct dma_chan - devices supply DMA channels, clients use them |
| 226 | * @device: ptr to the dma device who supplies this channel, always !%NULL | 245 | * @device: ptr to the dma device who supplies this channel, always !%NULL |
| 227 | * @cookie: last cookie value returned to client | 246 | * @cookie: last cookie value returned to client |
| @@ -232,6 +251,8 @@ struct dma_chan_percpu { | |||
| 232 | * @local: per-cpu pointer to a struct dma_chan_percpu | 251 | * @local: per-cpu pointer to a struct dma_chan_percpu |
| 233 | * @client_count: how many clients are using this channel | 252 | * @client_count: how many clients are using this channel |
| 234 | * @table_count: number of appearances in the mem-to-mem allocation table | 253 | * @table_count: number of appearances in the mem-to-mem allocation table |
| 254 | * @router: pointer to the DMA router structure | ||
| 255 | * @route_data: channel specific data for the router | ||
| 235 | * @private: private data for certain client-channel associations | 256 | * @private: private data for certain client-channel associations |
| 236 | */ | 257 | */ |
| 237 | struct dma_chan { | 258 | struct dma_chan { |
| @@ -247,6 +268,11 @@ struct dma_chan { | |||
| 247 | struct dma_chan_percpu __percpu *local; | 268 | struct dma_chan_percpu __percpu *local; |
| 248 | int client_count; | 269 | int client_count; |
| 249 | int table_count; | 270 | int table_count; |
| 271 | |||
| 272 | /* DMA router */ | ||
| 273 | struct dma_router *router; | ||
| 274 | void *route_data; | ||
| 275 | |||
| 250 | void *private; | 276 | void *private; |
| 251 | }; | 277 | }; |
| 252 | 278 | ||
| @@ -570,6 +596,7 @@ struct dma_tx_state { | |||
| 570 | * @copy_align: alignment shift for memcpy operations | 596 | * @copy_align: alignment shift for memcpy operations |
| 571 | * @xor_align: alignment shift for xor operations | 597 | * @xor_align: alignment shift for xor operations |
| 572 | * @pq_align: alignment shift for pq operations | 598 | * @pq_align: alignment shift for pq operations |
| 599 | * @fill_align: alignment shift for memset operations | ||
| 573 | * @dev_id: unique device ID | 600 | * @dev_id: unique device ID |
| 574 | * @dev: struct device reference for dma mapping api | 601 | * @dev: struct device reference for dma mapping api |
| 575 | * @src_addr_widths: bit mask of src addr widths the device supports | 602 | * @src_addr_widths: bit mask of src addr widths the device supports |
| @@ -588,6 +615,7 @@ struct dma_tx_state { | |||
| 588 | * @device_prep_dma_xor_val: prepares a xor validation operation | 615 | * @device_prep_dma_xor_val: prepares a xor validation operation |
| 589 | * @device_prep_dma_pq: prepares a pq operation | 616 | * @device_prep_dma_pq: prepares a pq operation |
| 590 | * @device_prep_dma_pq_val: prepares a pqzero_sum operation | 617 | * @device_prep_dma_pq_val: prepares a pqzero_sum operation |
| 618 | * @device_prep_dma_memset: prepares a memset operation | ||
| 591 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 619 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
| 592 | * @device_prep_slave_sg: prepares a slave dma operation | 620 | * @device_prep_slave_sg: prepares a slave dma operation |
| 593 | * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. | 621 | * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. |
| @@ -620,6 +648,7 @@ struct dma_device { | |||
| 620 | u8 copy_align; | 648 | u8 copy_align; |
| 621 | u8 xor_align; | 649 | u8 xor_align; |
| 622 | u8 pq_align; | 650 | u8 pq_align; |
| 651 | u8 fill_align; | ||
| 623 | #define DMA_HAS_PQ_CONTINUE (1 << 15) | 652 | #define DMA_HAS_PQ_CONTINUE (1 << 15) |
| 624 | 653 | ||
| 625 | int dev_id; | 654 | int dev_id; |
| @@ -650,6 +679,9 @@ struct dma_device { | |||
| 650 | struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | 679 | struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, |
| 651 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 680 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
| 652 | enum sum_check_flags *pqres, unsigned long flags); | 681 | enum sum_check_flags *pqres, unsigned long flags); |
| 682 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( | ||
| 683 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, | ||
| 684 | unsigned long flags); | ||
| 653 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 685 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( |
| 654 | struct dma_chan *chan, unsigned long flags); | 686 | struct dma_chan *chan, unsigned long flags); |
| 655 | struct dma_async_tx_descriptor *(*device_prep_dma_sg)( | 687 | struct dma_async_tx_descriptor *(*device_prep_dma_sg)( |
| @@ -745,6 +777,17 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( | |||
| 745 | return chan->device->device_prep_interleaved_dma(chan, xt, flags); | 777 | return chan->device->device_prep_interleaved_dma(chan, xt, flags); |
| 746 | } | 778 | } |
| 747 | 779 | ||
| 780 | static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( | ||
| 781 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, | ||
| 782 | unsigned long flags) | ||
| 783 | { | ||
| 784 | if (!chan || !chan->device) | ||
| 785 | return NULL; | ||
| 786 | |||
| 787 | return chan->device->device_prep_dma_memset(chan, dest, value, | ||
| 788 | len, flags); | ||
| 789 | } | ||
| 790 | |||
| 748 | static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( | 791 | static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( |
| 749 | struct dma_chan *chan, | 792 | struct dma_chan *chan, |
| 750 | struct scatterlist *dst_sg, unsigned int dst_nents, | 793 | struct scatterlist *dst_sg, unsigned int dst_nents, |
| @@ -820,6 +863,12 @@ static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, | |||
| 820 | return dmaengine_check_align(dev->pq_align, off1, off2, len); | 863 | return dmaengine_check_align(dev->pq_align, off1, off2, len); |
| 821 | } | 864 | } |
| 822 | 865 | ||
| 866 | static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, | ||
| 867 | size_t off2, size_t len) | ||
| 868 | { | ||
| 869 | return dmaengine_check_align(dev->fill_align, off1, off2, len); | ||
| 870 | } | ||
| 871 | |||
| 823 | static inline void | 872 | static inline void |
| 824 | dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) | 873 | dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) |
| 825 | { | 874 | { |
| @@ -874,6 +923,33 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) | |||
| 874 | BUG(); | 923 | BUG(); |
| 875 | } | 924 | } |
| 876 | 925 | ||
| 926 | static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg, | ||
| 927 | size_t dir_icg) | ||
| 928 | { | ||
| 929 | if (inc) { | ||
| 930 | if (dir_icg) | ||
| 931 | return dir_icg; | ||
| 932 | else if (sgl) | ||
| 933 | return icg; | ||
| 934 | } | ||
| 935 | |||
| 936 | return 0; | ||
| 937 | } | ||
| 938 | |||
| 939 | static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt, | ||
| 940 | struct data_chunk *chunk) | ||
| 941 | { | ||
| 942 | return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl, | ||
| 943 | chunk->icg, chunk->dst_icg); | ||
| 944 | } | ||
| 945 | |||
| 946 | static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt, | ||
| 947 | struct data_chunk *chunk) | ||
| 948 | { | ||
| 949 | return dmaengine_get_icg(xt->src_inc, xt->src_sgl, | ||
| 950 | chunk->icg, chunk->src_icg); | ||
| 951 | } | ||
| 952 | |||
| 877 | /* --- public DMA engine API --- */ | 953 | /* --- public DMA engine API --- */ |
| 878 | 954 | ||
| 879 | #ifdef CONFIG_DMA_ENGINE | 955 | #ifdef CONFIG_DMA_ENGINE |
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h index 52456aa566a0..e1043f79122f 100644 --- a/include/linux/dmapool.h +++ b/include/linux/dmapool.h | |||
| @@ -11,8 +11,8 @@ | |||
| 11 | #ifndef LINUX_DMAPOOL_H | 11 | #ifndef LINUX_DMAPOOL_H |
| 12 | #define LINUX_DMAPOOL_H | 12 | #define LINUX_DMAPOOL_H |
| 13 | 13 | ||
| 14 | #include <linux/scatterlist.h> | ||
| 14 | #include <asm/io.h> | 15 | #include <asm/io.h> |
| 15 | #include <asm/scatterlist.h> | ||
| 16 | 16 | ||
| 17 | struct device; | 17 | struct device; |
| 18 | 18 | ||
diff --git a/include/linux/dmi.h b/include/linux/dmi.h index f820f0a336c9..5055ac34142d 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define __DMI_H__ | 2 | #define __DMI_H__ |
| 3 | 3 | ||
| 4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
| 5 | #include <linux/kobject.h> | ||
| 5 | #include <linux/mod_devicetable.h> | 6 | #include <linux/mod_devicetable.h> |
| 6 | 7 | ||
| 7 | /* enum dmi_field is in mod_devicetable.h */ | 8 | /* enum dmi_field is in mod_devicetable.h */ |
| @@ -74,7 +75,7 @@ struct dmi_header { | |||
| 74 | u8 type; | 75 | u8 type; |
| 75 | u8 length; | 76 | u8 length; |
| 76 | u16 handle; | 77 | u16 handle; |
| 77 | }; | 78 | } __packed; |
| 78 | 79 | ||
| 79 | struct dmi_device { | 80 | struct dmi_device { |
| 80 | struct list_head list; | 81 | struct list_head list; |
| @@ -93,6 +94,7 @@ struct dmi_dev_onboard { | |||
| 93 | int devfn; | 94 | int devfn; |
| 94 | }; | 95 | }; |
| 95 | 96 | ||
| 97 | extern struct kobject *dmi_kobj; | ||
| 96 | extern int dmi_check_system(const struct dmi_system_id *list); | 98 | extern int dmi_check_system(const struct dmi_system_id *list); |
| 97 | const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list); | 99 | const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list); |
| 98 | extern const char * dmi_get_system_info(int field); | 100 | extern const char * dmi_get_system_info(int field); |
diff --git a/include/linux/efi.h b/include/linux/efi.h index 2092965afca3..85ef051ac6fb 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
| @@ -85,7 +85,8 @@ typedef struct { | |||
| 85 | #define EFI_MEMORY_MAPPED_IO 11 | 85 | #define EFI_MEMORY_MAPPED_IO 11 |
| 86 | #define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12 | 86 | #define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12 |
| 87 | #define EFI_PAL_CODE 13 | 87 | #define EFI_PAL_CODE 13 |
| 88 | #define EFI_MAX_MEMORY_TYPE 14 | 88 | #define EFI_PERSISTENT_MEMORY 14 |
| 89 | #define EFI_MAX_MEMORY_TYPE 15 | ||
| 89 | 90 | ||
| 90 | /* Attribute values: */ | 91 | /* Attribute values: */ |
| 91 | #define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */ | 92 | #define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */ |
| @@ -96,6 +97,8 @@ typedef struct { | |||
| 96 | #define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */ | 97 | #define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */ |
| 97 | #define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */ | 98 | #define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */ |
| 98 | #define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ | 99 | #define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ |
| 100 | #define EFI_MEMORY_MORE_RELIABLE \ | ||
| 101 | ((u64)0x0000000000010000ULL) /* higher reliability */ | ||
| 99 | #define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */ | 102 | #define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */ |
| 100 | #define EFI_MEMORY_DESCRIPTOR_VERSION 1 | 103 | #define EFI_MEMORY_DESCRIPTOR_VERSION 1 |
| 101 | 104 | ||
| @@ -868,6 +871,7 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos | |||
| 868 | extern void efi_late_init(void); | 871 | extern void efi_late_init(void); |
| 869 | extern void efi_free_boot_services(void); | 872 | extern void efi_free_boot_services(void); |
| 870 | extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size); | 873 | extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size); |
| 874 | extern void efi_find_mirror(void); | ||
| 871 | #else | 875 | #else |
| 872 | static inline void efi_late_init(void) {} | 876 | static inline void efi_late_init(void) {} |
| 873 | static inline void efi_free_boot_services(void) {} | 877 | static inline void efi_free_boot_services(void) {} |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 45a91474487d..638b324f0291 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
| @@ -39,6 +39,7 @@ typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct reques | |||
| 39 | typedef int (elevator_init_fn) (struct request_queue *, | 39 | typedef int (elevator_init_fn) (struct request_queue *, |
| 40 | struct elevator_type *e); | 40 | struct elevator_type *e); |
| 41 | typedef void (elevator_exit_fn) (struct elevator_queue *); | 41 | typedef void (elevator_exit_fn) (struct elevator_queue *); |
| 42 | typedef void (elevator_registered_fn) (struct request_queue *); | ||
| 42 | 43 | ||
| 43 | struct elevator_ops | 44 | struct elevator_ops |
| 44 | { | 45 | { |
| @@ -68,6 +69,7 @@ struct elevator_ops | |||
| 68 | 69 | ||
| 69 | elevator_init_fn *elevator_init_fn; | 70 | elevator_init_fn *elevator_init_fn; |
| 70 | elevator_exit_fn *elevator_exit_fn; | 71 | elevator_exit_fn *elevator_exit_fn; |
| 72 | elevator_registered_fn *elevator_registered_fn; | ||
| 71 | }; | 73 | }; |
| 72 | 74 | ||
| 73 | #define ELV_NAME_MAX (16) | 75 | #define ELV_NAME_MAX (16) |
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 606563ef8a72..9012f8775208 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h | |||
| @@ -110,7 +110,29 @@ static inline bool is_zero_ether_addr(const u8 *addr) | |||
| 110 | */ | 110 | */ |
| 111 | static inline bool is_multicast_ether_addr(const u8 *addr) | 111 | static inline bool is_multicast_ether_addr(const u8 *addr) |
| 112 | { | 112 | { |
| 113 | return 0x01 & addr[0]; | 113 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
| 114 | u32 a = *(const u32 *)addr; | ||
| 115 | #else | ||
| 116 | u16 a = *(const u16 *)addr; | ||
| 117 | #endif | ||
| 118 | #ifdef __BIG_ENDIAN | ||
| 119 | return 0x01 & (a >> ((sizeof(a) * 8) - 8)); | ||
| 120 | #else | ||
| 121 | return 0x01 & a; | ||
| 122 | #endif | ||
| 123 | } | ||
| 124 | |||
| 125 | static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2]) | ||
| 126 | { | ||
| 127 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 | ||
| 128 | #ifdef __BIG_ENDIAN | ||
| 129 | return 0x01 & ((*(const u64 *)addr) >> 56); | ||
| 130 | #else | ||
| 131 | return 0x01 & (*(const u64 *)addr); | ||
| 132 | #endif | ||
| 133 | #else | ||
| 134 | return is_multicast_ether_addr(addr); | ||
| 135 | #endif | ||
| 114 | } | 136 | } |
| 115 | 137 | ||
| 116 | /** | 138 | /** |
| @@ -169,6 +191,24 @@ static inline bool is_valid_ether_addr(const u8 *addr) | |||
| 169 | } | 191 | } |
| 170 | 192 | ||
| 171 | /** | 193 | /** |
| 194 | * eth_proto_is_802_3 - Determine if a given Ethertype/length is a protocol | ||
| 195 | * @proto: Ethertype/length value to be tested | ||
| 196 | * | ||
| 197 | * Check that the value from the Ethertype/length field is a valid Ethertype. | ||
| 198 | * | ||
| 199 | * Return true if the valid is an 802.3 supported Ethertype. | ||
| 200 | */ | ||
| 201 | static inline bool eth_proto_is_802_3(__be16 proto) | ||
| 202 | { | ||
| 203 | #ifndef __BIG_ENDIAN | ||
| 204 | /* if CPU is little endian mask off bits representing LSB */ | ||
| 205 | proto &= htons(0xFF00); | ||
| 206 | #endif | ||
| 207 | /* cast both to u16 and compare since LSB can be ignored */ | ||
| 208 | return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN); | ||
| 209 | } | ||
| 210 | |||
| 211 | /** | ||
| 172 | * eth_random_addr - Generate software assigned random Ethernet address | 212 | * eth_random_addr - Generate software assigned random Ethernet address |
| 173 | * @addr: Pointer to a six-byte array containing the Ethernet address | 213 | * @addr: Pointer to a six-byte array containing the Ethernet address |
| 174 | * | 214 | * |
diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 36f49c405dfb..b16d929fa75f 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h | |||
| @@ -1,6 +1,9 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * External connector (extcon) class driver | 2 | * External connector (extcon) class driver |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2015 Samsung Electronics | ||
| 5 | * Author: Chanwoo Choi <cw00.choi@samsung.com> | ||
| 6 | * | ||
| 4 | * Copyright (C) 2012 Samsung Electronics | 7 | * Copyright (C) 2012 Samsung Electronics |
| 5 | * Author: Donggeun Kim <dg77.kim@samsung.com> | 8 | * Author: Donggeun Kim <dg77.kim@samsung.com> |
| 6 | * Author: MyungJoo Ham <myungjoo.ham@samsung.com> | 9 | * Author: MyungJoo Ham <myungjoo.ham@samsung.com> |
| @@ -27,50 +30,35 @@ | |||
| 27 | #include <linux/notifier.h> | 30 | #include <linux/notifier.h> |
| 28 | #include <linux/sysfs.h> | 31 | #include <linux/sysfs.h> |
| 29 | 32 | ||
| 30 | #define SUPPORTED_CABLE_MAX 32 | ||
| 31 | #define CABLE_NAME_MAX 30 | ||
| 32 | |||
| 33 | /* | 33 | /* |
| 34 | * The standard cable name is to help support general notifier | 34 | * Define the unique id of supported external connectors |
| 35 | * and notifiee device drivers to share the common names. | ||
| 36 | * Please use standard cable names unless your notifier device has | ||
| 37 | * a very unique and abnormal cable or | ||
| 38 | * the cable type is supposed to be used with only one unique | ||
| 39 | * pair of notifier/notifiee devices. | ||
| 40 | * | ||
| 41 | * Please add any other "standard" cables used with extcon dev. | ||
| 42 | * | ||
| 43 | * You may add a dot and number to specify version or specification | ||
| 44 | * of the specific cable if it is required. (e.g., "Fast-charger.18" | ||
| 45 | * and "Fast-charger.10" for 1.8A and 1.0A chargers) | ||
| 46 | * However, the notifiee and notifier should be able to handle such | ||
| 47 | * string and if the notifiee can negotiate the protocol or identify, | ||
| 48 | * you don't need such convention. This convention is helpful when | ||
| 49 | * notifier can distinguish but notifiee cannot. | ||
| 50 | */ | 35 | */ |
| 51 | enum extcon_cable_name { | 36 | #define EXTCON_NONE 0 |
| 52 | EXTCON_USB = 0, | 37 | |
| 53 | EXTCON_USB_HOST, | 38 | #define EXTCON_USB 1 /* USB connector */ |
| 54 | EXTCON_TA, /* Travel Adaptor */ | 39 | #define EXTCON_USB_HOST 2 |
| 55 | EXTCON_FAST_CHARGER, | 40 | |
| 56 | EXTCON_SLOW_CHARGER, | 41 | #define EXTCON_TA 3 /* Charger connector */ |
| 57 | EXTCON_CHARGE_DOWNSTREAM, /* Charging an external device */ | 42 | #define EXTCON_FAST_CHARGER 4 |
| 58 | EXTCON_HDMI, | 43 | #define EXTCON_SLOW_CHARGER 5 |
| 59 | EXTCON_MHL, | 44 | #define EXTCON_CHARGE_DOWNSTREAM 6 |
| 60 | EXTCON_DVI, | 45 | |
| 61 | EXTCON_VGA, | 46 | #define EXTCON_LINE_IN 7 /* Audio/Video connector */ |
| 62 | EXTCON_DOCK, | 47 | #define EXTCON_LINE_OUT 8 |
| 63 | EXTCON_LINE_IN, | 48 | #define EXTCON_MICROPHONE 9 |
| 64 | EXTCON_LINE_OUT, | 49 | #define EXTCON_HEADPHONE 10 |
| 65 | EXTCON_MIC_IN, | 50 | #define EXTCON_HDMI 11 |
| 66 | EXTCON_HEADPHONE_OUT, | 51 | #define EXTCON_MHL 12 |
| 67 | EXTCON_SPDIF_IN, | 52 | #define EXTCON_DVI 13 |
| 68 | EXTCON_SPDIF_OUT, | 53 | #define EXTCON_VGA 14 |
| 69 | EXTCON_VIDEO_IN, | 54 | #define EXTCON_SPDIF_IN 15 |
| 70 | EXTCON_VIDEO_OUT, | 55 | #define EXTCON_SPDIF_OUT 16 |
| 71 | EXTCON_MECHANICAL, | 56 | #define EXTCON_VIDEO_IN 17 |
| 72 | }; | 57 | #define EXTCON_VIDEO_OUT 18 |
| 73 | extern const char extcon_cable_name[][CABLE_NAME_MAX + 1]; | 58 | |
| 59 | #define EXTCON_DOCK 19 /* Misc connector */ | ||
| 60 | #define EXTCON_JIG 20 | ||
| 61 | #define EXTCON_MECHANICAL 21 | ||
| 74 | 62 | ||
| 75 | struct extcon_cable; | 63 | struct extcon_cable; |
| 76 | 64 | ||
| @@ -78,7 +66,7 @@ struct extcon_cable; | |||
| 78 | * struct extcon_dev - An extcon device represents one external connector. | 66 | * struct extcon_dev - An extcon device represents one external connector. |
| 79 | * @name: The name of this extcon device. Parent device name is | 67 | * @name: The name of this extcon device. Parent device name is |
| 80 | * used if NULL. | 68 | * used if NULL. |
| 81 | * @supported_cable: Array of supported cable names ending with NULL. | 69 | * @supported_cable: Array of supported cable names ending with EXTCON_NONE. |
| 82 | * If supported_cable is NULL, cable name related APIs | 70 | * If supported_cable is NULL, cable name related APIs |
| 83 | * are disabled. | 71 | * are disabled. |
| 84 | * @mutually_exclusive: Array of mutually exclusive set of cables that cannot | 72 | * @mutually_exclusive: Array of mutually exclusive set of cables that cannot |
| @@ -89,16 +77,14 @@ struct extcon_cable; | |||
| 89 | * be attached simulataneously. {0x7, 0} is equivalent to | 77 | * be attached simulataneously. {0x7, 0} is equivalent to |
| 90 | * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there | 78 | * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there |
| 91 | * can be no simultaneous connections. | 79 | * can be no simultaneous connections. |
| 92 | * @print_name: An optional callback to override the method to print the | ||
| 93 | * name of the extcon device. | ||
| 94 | * @print_state: An optional callback to override the method to print the | 80 | * @print_state: An optional callback to override the method to print the |
| 95 | * status of the extcon device. | 81 | * status of the extcon device. |
| 96 | * @dev: Device of this extcon. | 82 | * @dev: Device of this extcon. |
| 97 | * @state: Attach/detach state of this extcon. Do not provide at | 83 | * @state: Attach/detach state of this extcon. Do not provide at |
| 98 | * register-time. | 84 | * register-time. |
| 99 | * @nh: Notifier for the state change events from this extcon | 85 | * @nh: Notifier for the state change events from this extcon |
| 100 | * @entry: To support list of extcon devices so that users can search | 86 | * @entry: To support list of extcon devices so that users can |
| 101 | * for extcon devices based on the extcon name. | 87 | * search for extcon devices based on the extcon name. |
| 102 | * @lock: | 88 | * @lock: |
| 103 | * @max_supported: Internal value to store the number of cables. | 89 | * @max_supported: Internal value to store the number of cables. |
| 104 | * @extcon_dev_type: Device_type struct to provide attribute_groups | 90 | * @extcon_dev_type: Device_type struct to provide attribute_groups |
| @@ -113,16 +99,15 @@ struct extcon_cable; | |||
| 113 | struct extcon_dev { | 99 | struct extcon_dev { |
| 114 | /* Optional user initializing data */ | 100 | /* Optional user initializing data */ |
| 115 | const char *name; | 101 | const char *name; |
| 116 | const char **supported_cable; | 102 | const unsigned int *supported_cable; |
| 117 | const u32 *mutually_exclusive; | 103 | const u32 *mutually_exclusive; |
| 118 | 104 | ||
| 119 | /* Optional callbacks to override class functions */ | 105 | /* Optional callbacks to override class functions */ |
| 120 | ssize_t (*print_name)(struct extcon_dev *edev, char *buf); | ||
| 121 | ssize_t (*print_state)(struct extcon_dev *edev, char *buf); | 106 | ssize_t (*print_state)(struct extcon_dev *edev, char *buf); |
| 122 | 107 | ||
| 123 | /* Internal data. Please do not set. */ | 108 | /* Internal data. Please do not set. */ |
| 124 | struct device dev; | 109 | struct device dev; |
| 125 | struct raw_notifier_head nh; | 110 | struct raw_notifier_head *nh; |
| 126 | struct list_head entry; | 111 | struct list_head entry; |
| 127 | int max_supported; | 112 | int max_supported; |
| 128 | spinlock_t lock; /* could be called by irq handler */ | 113 | spinlock_t lock; /* could be called by irq handler */ |
| @@ -161,8 +146,6 @@ struct extcon_cable { | |||
| 161 | /** | 146 | /** |
| 162 | * struct extcon_specific_cable_nb - An internal data for | 147 | * struct extcon_specific_cable_nb - An internal data for |
| 163 | * extcon_register_interest(). | 148 | * extcon_register_interest(). |
| 164 | * @internal_nb: A notifier block bridging extcon notifier | ||
| 165 | * and cable notifier. | ||
| 166 | * @user_nb: user provided notifier block for events from | 149 | * @user_nb: user provided notifier block for events from |
| 167 | * a specific cable. | 150 | * a specific cable. |
| 168 | * @cable_index: the target cable. | 151 | * @cable_index: the target cable. |
| @@ -170,7 +153,6 @@ struct extcon_cable { | |||
| 170 | * @previous_value: the saved previous event value. | 153 | * @previous_value: the saved previous event value. |
| 171 | */ | 154 | */ |
| 172 | struct extcon_specific_cable_nb { | 155 | struct extcon_specific_cable_nb { |
| 173 | struct notifier_block internal_nb; | ||
| 174 | struct notifier_block *user_nb; | 156 | struct notifier_block *user_nb; |
| 175 | int cable_index; | 157 | int cable_index; |
| 176 | struct extcon_dev *edev; | 158 | struct extcon_dev *edev; |
| @@ -194,10 +176,10 @@ extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name); | |||
| 194 | /* | 176 | /* |
| 195 | * Following APIs control the memory of extcon device. | 177 | * Following APIs control the memory of extcon device. |
| 196 | */ | 178 | */ |
| 197 | extern struct extcon_dev *extcon_dev_allocate(const char **cables); | 179 | extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable); |
| 198 | extern void extcon_dev_free(struct extcon_dev *edev); | 180 | extern void extcon_dev_free(struct extcon_dev *edev); |
| 199 | extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, | 181 | extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, |
| 200 | const char **cables); | 182 | const unsigned int *cable); |
| 201 | extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev); | 183 | extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev); |
| 202 | 184 | ||
| 203 | /* | 185 | /* |
| @@ -216,13 +198,10 @@ extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state); | |||
| 216 | 198 | ||
| 217 | /* | 199 | /* |
| 218 | * get/set_cable_state access each bit of the 32b encoded state value. | 200 | * get/set_cable_state access each bit of the 32b encoded state value. |
| 219 | * They are used to access the status of each cable based on the cable_name | 201 | * They are used to access the status of each cable based on the cable_name. |
| 220 | * or cable_index, which is retrieved by extcon_find_cable_index | ||
| 221 | */ | 202 | */ |
| 222 | extern int extcon_find_cable_index(struct extcon_dev *sdev, | 203 | extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id); |
| 223 | const char *cable_name); | 204 | extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id, |
| 224 | extern int extcon_get_cable_state_(struct extcon_dev *edev, int cable_index); | ||
| 225 | extern int extcon_set_cable_state_(struct extcon_dev *edev, int cable_index, | ||
| 226 | bool cable_state); | 205 | bool cable_state); |
| 227 | 206 | ||
| 228 | extern int extcon_get_cable_state(struct extcon_dev *edev, | 207 | extern int extcon_get_cable_state(struct extcon_dev *edev, |
| @@ -249,16 +228,21 @@ extern int extcon_unregister_interest(struct extcon_specific_cable_nb *nb); | |||
| 249 | * we do not recommend to use this for normal 'notifiee' device drivers who | 228 | * we do not recommend to use this for normal 'notifiee' device drivers who |
| 250 | * want to be notified by a specific external port of the notifier. | 229 | * want to be notified by a specific external port of the notifier. |
| 251 | */ | 230 | */ |
| 252 | extern int extcon_register_notifier(struct extcon_dev *edev, | 231 | extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id, |
| 232 | struct notifier_block *nb); | ||
| 233 | extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id, | ||
| 253 | struct notifier_block *nb); | 234 | struct notifier_block *nb); |
| 254 | extern int extcon_unregister_notifier(struct extcon_dev *edev, | ||
| 255 | struct notifier_block *nb); | ||
| 256 | 235 | ||
| 257 | /* | 236 | /* |
| 258 | * Following API get the extcon device from devicetree. | 237 | * Following API get the extcon device from devicetree. |
| 259 | * This function use phandle of devicetree to get extcon device directly. | 238 | * This function use phandle of devicetree to get extcon device directly. |
| 260 | */ | 239 | */ |
| 261 | extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index); | 240 | extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, |
| 241 | int index); | ||
| 242 | |||
| 243 | /* Following API to get information of extcon device */ | ||
| 244 | extern const char *extcon_get_edev_name(struct extcon_dev *edev); | ||
| 245 | |||
| 262 | #else /* CONFIG_EXTCON */ | 246 | #else /* CONFIG_EXTCON */ |
| 263 | static inline int extcon_dev_register(struct extcon_dev *edev) | 247 | static inline int extcon_dev_register(struct extcon_dev *edev) |
| 264 | { | 248 | { |
| @@ -276,7 +260,7 @@ static inline int devm_extcon_dev_register(struct device *dev, | |||
| 276 | static inline void devm_extcon_dev_unregister(struct device *dev, | 260 | static inline void devm_extcon_dev_unregister(struct device *dev, |
| 277 | struct extcon_dev *edev) { } | 261 | struct extcon_dev *edev) { } |
| 278 | 262 | ||
| 279 | static inline struct extcon_dev *extcon_dev_allocate(const char **cables) | 263 | static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable) |
| 280 | { | 264 | { |
| 281 | return ERR_PTR(-ENOSYS); | 265 | return ERR_PTR(-ENOSYS); |
| 282 | } | 266 | } |
| @@ -284,7 +268,7 @@ static inline struct extcon_dev *extcon_dev_allocate(const char **cables) | |||
| 284 | static inline void extcon_dev_free(struct extcon_dev *edev) { } | 268 | static inline void extcon_dev_free(struct extcon_dev *edev) { } |
| 285 | 269 | ||
| 286 | static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, | 270 | static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, |
| 287 | const char **cables) | 271 | const unsigned int *cable) |
| 288 | { | 272 | { |
| 289 | return ERR_PTR(-ENOSYS); | 273 | return ERR_PTR(-ENOSYS); |
| 290 | } | 274 | } |
| @@ -307,20 +291,14 @@ static inline int extcon_update_state(struct extcon_dev *edev, u32 mask, | |||
| 307 | return 0; | 291 | return 0; |
| 308 | } | 292 | } |
| 309 | 293 | ||
| 310 | static inline int extcon_find_cable_index(struct extcon_dev *edev, | ||
| 311 | const char *cable_name) | ||
| 312 | { | ||
| 313 | return 0; | ||
| 314 | } | ||
| 315 | |||
| 316 | static inline int extcon_get_cable_state_(struct extcon_dev *edev, | 294 | static inline int extcon_get_cable_state_(struct extcon_dev *edev, |
| 317 | int cable_index) | 295 | unsigned int id) |
| 318 | { | 296 | { |
| 319 | return 0; | 297 | return 0; |
| 320 | } | 298 | } |
| 321 | 299 | ||
| 322 | static inline int extcon_set_cable_state_(struct extcon_dev *edev, | 300 | static inline int extcon_set_cable_state_(struct extcon_dev *edev, |
| 323 | int cable_index, bool cable_state) | 301 | unsigned int id, bool cable_state) |
| 324 | { | 302 | { |
| 325 | return 0; | 303 | return 0; |
| 326 | } | 304 | } |
| @@ -343,13 +321,15 @@ static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) | |||
| 343 | } | 321 | } |
| 344 | 322 | ||
| 345 | static inline int extcon_register_notifier(struct extcon_dev *edev, | 323 | static inline int extcon_register_notifier(struct extcon_dev *edev, |
| 346 | struct notifier_block *nb) | 324 | unsigned int id, |
| 325 | struct notifier_block *nb) | ||
| 347 | { | 326 | { |
| 348 | return 0; | 327 | return 0; |
| 349 | } | 328 | } |
| 350 | 329 | ||
| 351 | static inline int extcon_unregister_notifier(struct extcon_dev *edev, | 330 | static inline int extcon_unregister_notifier(struct extcon_dev *edev, |
| 352 | struct notifier_block *nb) | 331 | unsigned int id, |
| 332 | struct notifier_block *nb) | ||
| 353 | { | 333 | { |
| 354 | return 0; | 334 | return 0; |
| 355 | } | 335 | } |
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h index 9ca958c4e94c..53c60806bcfb 100644 --- a/include/linux/extcon/extcon-adc-jack.h +++ b/include/linux/extcon/extcon-adc-jack.h | |||
| @@ -44,7 +44,7 @@ struct adc_jack_cond { | |||
| 44 | * @consumer_channel: Unique name to identify the channel on the consumer | 44 | * @consumer_channel: Unique name to identify the channel on the consumer |
| 45 | * side. This typically describes the channels used within | 45 | * side. This typically describes the channels used within |
| 46 | * the consumer. E.g. 'battery_voltage' | 46 | * the consumer. E.g. 'battery_voltage' |
| 47 | * @cable_names: array of cable names ending with null. | 47 | * @cable_names: array of extcon id for supported cables. |
| 48 | * @adc_contitions: array of struct adc_jack_cond conditions ending | 48 | * @adc_contitions: array of struct adc_jack_cond conditions ending |
| 49 | * with .state = 0 entry. This describes how to decode | 49 | * with .state = 0 entry. This describes how to decode |
| 50 | * adc values into extcon state. | 50 | * adc values into extcon state. |
| @@ -58,8 +58,7 @@ struct adc_jack_pdata { | |||
| 58 | const char *name; | 58 | const char *name; |
| 59 | const char *consumer_channel; | 59 | const char *consumer_channel; |
| 60 | 60 | ||
| 61 | /* The last entry should be NULL */ | 61 | const enum extcon *cable_names; |
| 62 | const char **cable_names; | ||
| 63 | 62 | ||
| 64 | /* The last entry's state should be 0 */ | 63 | /* The last entry's state should be 0 */ |
| 65 | struct adc_jack_cond *adc_conditions; | 64 | struct adc_jack_cond *adc_conditions; |
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 591f8c3ef410..920408a21ffd 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h | |||
| @@ -50,6 +50,8 @@ | |||
| 50 | #define MAX_ACTIVE_NODE_LOGS 8 | 50 | #define MAX_ACTIVE_NODE_LOGS 8 |
| 51 | #define MAX_ACTIVE_DATA_LOGS 8 | 51 | #define MAX_ACTIVE_DATA_LOGS 8 |
| 52 | 52 | ||
| 53 | #define VERSION_LEN 256 | ||
| 54 | |||
| 53 | /* | 55 | /* |
| 54 | * For superblock | 56 | * For superblock |
| 55 | */ | 57 | */ |
| @@ -86,6 +88,12 @@ struct f2fs_super_block { | |||
| 86 | __le32 extension_count; /* # of extensions below */ | 88 | __le32 extension_count; /* # of extensions below */ |
| 87 | __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */ | 89 | __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */ |
| 88 | __le32 cp_payload; | 90 | __le32 cp_payload; |
| 91 | __u8 version[VERSION_LEN]; /* the kernel version */ | ||
| 92 | __u8 init_version[VERSION_LEN]; /* the initial kernel version */ | ||
| 93 | __le32 feature; /* defined features */ | ||
| 94 | __u8 encryption_level; /* versioning level for encryption */ | ||
| 95 | __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */ | ||
| 96 | __u8 reserved[871]; /* valid reserved region */ | ||
| 89 | } __packed; | 97 | } __packed; |
| 90 | 98 | ||
| 91 | /* | 99 | /* |
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 230f87bdf5ad..fbb88740634a 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h | |||
| @@ -47,6 +47,9 @@ struct files_struct { | |||
| 47 | * read mostly part | 47 | * read mostly part |
| 48 | */ | 48 | */ |
| 49 | atomic_t count; | 49 | atomic_t count; |
| 50 | bool resize_in_progress; | ||
| 51 | wait_queue_head_t resize_wait; | ||
| 52 | |||
| 50 | struct fdtable __rcu *fdt; | 53 | struct fdtable __rcu *fdt; |
| 51 | struct fdtable fdtab; | 54 | struct fdtable fdtab; |
| 52 | /* | 55 | /* |
diff --git a/include/linux/filter.h b/include/linux/filter.h index fa11b3a367be..17724f6ea983 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
| @@ -207,6 +207,16 @@ struct bpf_prog_aux; | |||
| 207 | .off = OFF, \ | 207 | .off = OFF, \ |
| 208 | .imm = 0 }) | 208 | .imm = 0 }) |
| 209 | 209 | ||
| 210 | /* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ | ||
| 211 | |||
| 212 | #define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ | ||
| 213 | ((struct bpf_insn) { \ | ||
| 214 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ | ||
| 215 | .dst_reg = DST, \ | ||
| 216 | .src_reg = SRC, \ | ||
| 217 | .off = OFF, \ | ||
| 218 | .imm = 0 }) | ||
| 219 | |||
| 210 | /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ | 220 | /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ |
| 211 | 221 | ||
| 212 | #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ | 222 | #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ |
| @@ -267,6 +277,14 @@ struct bpf_prog_aux; | |||
| 267 | .off = 0, \ | 277 | .off = 0, \ |
| 268 | .imm = 0 }) | 278 | .imm = 0 }) |
| 269 | 279 | ||
| 280 | /* Internal classic blocks for direct assignment */ | ||
| 281 | |||
| 282 | #define __BPF_STMT(CODE, K) \ | ||
| 283 | ((struct sock_filter) BPF_STMT(CODE, K)) | ||
| 284 | |||
| 285 | #define __BPF_JUMP(CODE, K, JT, JF) \ | ||
| 286 | ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF)) | ||
| 287 | |||
| 270 | #define bytes_to_bpf_size(bytes) \ | 288 | #define bytes_to_bpf_size(bytes) \ |
| 271 | ({ \ | 289 | ({ \ |
| 272 | int bpf_size = -EINVAL; \ | 290 | int bpf_size = -EINVAL; \ |
| @@ -360,12 +378,9 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) | |||
| 360 | 378 | ||
| 361 | int sk_filter(struct sock *sk, struct sk_buff *skb); | 379 | int sk_filter(struct sock *sk, struct sk_buff *skb); |
| 362 | 380 | ||
| 363 | void bpf_prog_select_runtime(struct bpf_prog *fp); | 381 | int bpf_prog_select_runtime(struct bpf_prog *fp); |
| 364 | void bpf_prog_free(struct bpf_prog *fp); | 382 | void bpf_prog_free(struct bpf_prog *fp); |
| 365 | 383 | ||
| 366 | int bpf_convert_filter(struct sock_filter *prog, int len, | ||
| 367 | struct bpf_insn *new_prog, int *new_len); | ||
| 368 | |||
| 369 | struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); | 384 | struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); |
| 370 | struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, | 385 | struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, |
| 371 | gfp_t gfp_extra_flags); | 386 | gfp_t gfp_extra_flags); |
| @@ -377,14 +392,17 @@ static inline void bpf_prog_unlock_free(struct bpf_prog *fp) | |||
| 377 | __bpf_prog_free(fp); | 392 | __bpf_prog_free(fp); |
| 378 | } | 393 | } |
| 379 | 394 | ||
| 395 | typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, | ||
| 396 | unsigned int flen); | ||
| 397 | |||
| 380 | int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); | 398 | int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); |
| 399 | int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, | ||
| 400 | bpf_aux_classic_check_t trans); | ||
| 381 | void bpf_prog_destroy(struct bpf_prog *fp); | 401 | void bpf_prog_destroy(struct bpf_prog *fp); |
| 382 | 402 | ||
| 383 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); | 403 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
| 384 | int sk_attach_bpf(u32 ufd, struct sock *sk); | 404 | int sk_attach_bpf(u32 ufd, struct sock *sk); |
| 385 | int sk_detach_filter(struct sock *sk); | 405 | int sk_detach_filter(struct sock *sk); |
| 386 | |||
| 387 | int bpf_check_classic(const struct sock_filter *filter, unsigned int flen); | ||
| 388 | int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, | 406 | int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, |
| 389 | unsigned int len); | 407 | unsigned int len); |
| 390 | 408 | ||
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index 8293262401de..e65ef959546c 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h | |||
| @@ -6,16 +6,16 @@ | |||
| 6 | #include <linux/bitops.h> | 6 | #include <linux/bitops.h> |
| 7 | 7 | ||
| 8 | struct frontswap_ops { | 8 | struct frontswap_ops { |
| 9 | void (*init)(unsigned); | 9 | void (*init)(unsigned); /* this swap type was just swapon'ed */ |
| 10 | int (*store)(unsigned, pgoff_t, struct page *); | 10 | int (*store)(unsigned, pgoff_t, struct page *); /* store a page */ |
| 11 | int (*load)(unsigned, pgoff_t, struct page *); | 11 | int (*load)(unsigned, pgoff_t, struct page *); /* load a page */ |
| 12 | void (*invalidate_page)(unsigned, pgoff_t); | 12 | void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */ |
| 13 | void (*invalidate_area)(unsigned); | 13 | void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */ |
| 14 | struct frontswap_ops *next; /* private pointer to next ops */ | ||
| 14 | }; | 15 | }; |
| 15 | 16 | ||
| 16 | extern bool frontswap_enabled; | 17 | extern bool frontswap_enabled; |
| 17 | extern struct frontswap_ops * | 18 | extern void frontswap_register_ops(struct frontswap_ops *ops); |
| 18 | frontswap_register_ops(struct frontswap_ops *ops); | ||
| 19 | extern void frontswap_shrink(unsigned long); | 19 | extern void frontswap_shrink(unsigned long); |
| 20 | extern unsigned long frontswap_curr_pages(void); | 20 | extern unsigned long frontswap_curr_pages(void); |
| 21 | extern void frontswap_writethrough(bool); | 21 | extern void frontswap_writethrough(bool); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index b577e801b4af..84b783f277f7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <uapi/linux/fs.h> | 35 | #include <uapi/linux/fs.h> |
| 36 | 36 | ||
| 37 | struct backing_dev_info; | 37 | struct backing_dev_info; |
| 38 | struct bdi_writeback; | ||
| 38 | struct export_operations; | 39 | struct export_operations; |
| 39 | struct hd_geometry; | 40 | struct hd_geometry; |
| 40 | struct iovec; | 41 | struct iovec; |
| @@ -54,7 +55,8 @@ struct vm_fault; | |||
| 54 | 55 | ||
| 55 | extern void __init inode_init(void); | 56 | extern void __init inode_init(void); |
| 56 | extern void __init inode_init_early(void); | 57 | extern void __init inode_init_early(void); |
| 57 | extern void __init files_init(unsigned long); | 58 | extern void __init files_init(void); |
| 59 | extern void __init files_maxfiles_init(void); | ||
| 58 | 60 | ||
| 59 | extern struct files_stat_struct files_stat; | 61 | extern struct files_stat_struct files_stat; |
| 60 | extern unsigned long get_max_files(void); | 62 | extern unsigned long get_max_files(void); |
| @@ -69,6 +71,7 @@ typedef int (get_block_t)(struct inode *inode, sector_t iblock, | |||
| 69 | struct buffer_head *bh_result, int create); | 71 | struct buffer_head *bh_result, int create); |
| 70 | typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | 72 | typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, |
| 71 | ssize_t bytes, void *private); | 73 | ssize_t bytes, void *private); |
| 74 | typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate); | ||
| 72 | 75 | ||
| 73 | #define MAY_EXEC 0x00000001 | 76 | #define MAY_EXEC 0x00000001 |
| 74 | #define MAY_WRITE 0x00000002 | 77 | #define MAY_WRITE 0x00000002 |
| @@ -634,6 +637,14 @@ struct inode { | |||
| 634 | 637 | ||
| 635 | struct hlist_node i_hash; | 638 | struct hlist_node i_hash; |
| 636 | struct list_head i_wb_list; /* backing dev IO list */ | 639 | struct list_head i_wb_list; /* backing dev IO list */ |
| 640 | #ifdef CONFIG_CGROUP_WRITEBACK | ||
| 641 | struct bdi_writeback *i_wb; /* the associated cgroup wb */ | ||
| 642 | |||
| 643 | /* foreign inode detection, see wbc_detach_inode() */ | ||
| 644 | int i_wb_frn_winner; | ||
| 645 | u16 i_wb_frn_avg_time; | ||
| 646 | u16 i_wb_frn_history; | ||
| 647 | #endif | ||
| 637 | struct list_head i_lru; /* inode LRU list */ | 648 | struct list_head i_lru; /* inode LRU list */ |
| 638 | struct list_head i_sb_list; | 649 | struct list_head i_sb_list; |
| 639 | union { | 650 | union { |
| @@ -1036,12 +1047,12 @@ extern void locks_remove_file(struct file *); | |||
| 1036 | extern void locks_release_private(struct file_lock *); | 1047 | extern void locks_release_private(struct file_lock *); |
| 1037 | extern void posix_test_lock(struct file *, struct file_lock *); | 1048 | extern void posix_test_lock(struct file *, struct file_lock *); |
| 1038 | extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); | 1049 | extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); |
| 1039 | extern int posix_lock_file_wait(struct file *, struct file_lock *); | 1050 | extern int posix_lock_inode_wait(struct inode *, struct file_lock *); |
| 1040 | extern int posix_unblock_lock(struct file_lock *); | 1051 | extern int posix_unblock_lock(struct file_lock *); |
| 1041 | extern int vfs_test_lock(struct file *, struct file_lock *); | 1052 | extern int vfs_test_lock(struct file *, struct file_lock *); |
| 1042 | extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); | 1053 | extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); |
| 1043 | extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); | 1054 | extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); |
| 1044 | extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); | 1055 | extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl); |
| 1045 | extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); | 1056 | extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); |
| 1046 | extern void lease_get_mtime(struct inode *, struct timespec *time); | 1057 | extern void lease_get_mtime(struct inode *, struct timespec *time); |
| 1047 | extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); | 1058 | extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); |
| @@ -1127,7 +1138,8 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl, | |||
| 1127 | return -ENOLCK; | 1138 | return -ENOLCK; |
| 1128 | } | 1139 | } |
| 1129 | 1140 | ||
| 1130 | static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl) | 1141 | static inline int posix_lock_inode_wait(struct inode *inode, |
| 1142 | struct file_lock *fl) | ||
| 1131 | { | 1143 | { |
| 1132 | return -ENOLCK; | 1144 | return -ENOLCK; |
| 1133 | } | 1145 | } |
| @@ -1153,8 +1165,8 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) | |||
| 1153 | return 0; | 1165 | return 0; |
| 1154 | } | 1166 | } |
| 1155 | 1167 | ||
| 1156 | static inline int flock_lock_file_wait(struct file *filp, | 1168 | static inline int flock_lock_inode_wait(struct inode *inode, |
| 1157 | struct file_lock *request) | 1169 | struct file_lock *request) |
| 1158 | { | 1170 | { |
| 1159 | return -ENOLCK; | 1171 | return -ENOLCK; |
| 1160 | } | 1172 | } |
| @@ -1192,6 +1204,20 @@ static inline void show_fd_locks(struct seq_file *f, | |||
| 1192 | struct file *filp, struct files_struct *files) {} | 1204 | struct file *filp, struct files_struct *files) {} |
| 1193 | #endif /* !CONFIG_FILE_LOCKING */ | 1205 | #endif /* !CONFIG_FILE_LOCKING */ |
| 1194 | 1206 | ||
| 1207 | static inline struct inode *file_inode(const struct file *f) | ||
| 1208 | { | ||
| 1209 | return f->f_inode; | ||
| 1210 | } | ||
| 1211 | |||
| 1212 | static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl) | ||
| 1213 | { | ||
| 1214 | return posix_lock_inode_wait(file_inode(filp), fl); | ||
| 1215 | } | ||
| 1216 | |||
| 1217 | static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl) | ||
| 1218 | { | ||
| 1219 | return flock_lock_inode_wait(file_inode(filp), fl); | ||
| 1220 | } | ||
| 1195 | 1221 | ||
| 1196 | struct fasync_struct { | 1222 | struct fasync_struct { |
| 1197 | spinlock_t fa_lock; | 1223 | spinlock_t fa_lock; |
| @@ -1232,6 +1258,8 @@ struct mm_struct; | |||
| 1232 | #define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ | 1258 | #define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ |
| 1233 | #define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ | 1259 | #define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ |
| 1234 | 1260 | ||
| 1261 | /* sb->s_iflags */ | ||
| 1262 | #define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ | ||
| 1235 | 1263 | ||
| 1236 | /* Possible states of 'frozen' field */ | 1264 | /* Possible states of 'frozen' field */ |
| 1237 | enum { | 1265 | enum { |
| @@ -1270,6 +1298,7 @@ struct super_block { | |||
| 1270 | const struct quotactl_ops *s_qcop; | 1298 | const struct quotactl_ops *s_qcop; |
| 1271 | const struct export_operations *s_export_op; | 1299 | const struct export_operations *s_export_op; |
| 1272 | unsigned long s_flags; | 1300 | unsigned long s_flags; |
| 1301 | unsigned long s_iflags; /* internal SB_I_* flags */ | ||
| 1273 | unsigned long s_magic; | 1302 | unsigned long s_magic; |
| 1274 | struct dentry *s_root; | 1303 | struct dentry *s_root; |
| 1275 | struct rw_semaphore s_umount; | 1304 | struct rw_semaphore s_umount; |
| @@ -1641,7 +1670,6 @@ struct inode_operations { | |||
| 1641 | int (*set_acl)(struct inode *, struct posix_acl *, int); | 1670 | int (*set_acl)(struct inode *, struct posix_acl *, int); |
| 1642 | 1671 | ||
| 1643 | /* WARNING: probably going away soon, do not use! */ | 1672 | /* WARNING: probably going away soon, do not use! */ |
| 1644 | int (*dentry_open)(struct dentry *, struct file *, const struct cred *); | ||
| 1645 | } ____cacheline_aligned; | 1673 | } ____cacheline_aligned; |
| 1646 | 1674 | ||
| 1647 | ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, | 1675 | ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, |
| @@ -1806,6 +1834,11 @@ struct super_operations { | |||
| 1806 | * | 1834 | * |
| 1807 | * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit(). | 1835 | * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit(). |
| 1808 | * | 1836 | * |
| 1837 | * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to | ||
| 1838 | * synchronize competing switching instances and to tell | ||
| 1839 | * wb stat updates to grab mapping->tree_lock. See | ||
| 1840 | * inode_switch_wb_work_fn() for details. | ||
| 1841 | * | ||
| 1809 | * Q: What is the difference between I_WILL_FREE and I_FREEING? | 1842 | * Q: What is the difference between I_WILL_FREE and I_FREEING? |
| 1810 | */ | 1843 | */ |
| 1811 | #define I_DIRTY_SYNC (1 << 0) | 1844 | #define I_DIRTY_SYNC (1 << 0) |
| @@ -1825,6 +1858,7 @@ struct super_operations { | |||
| 1825 | #define I_DIRTY_TIME (1 << 11) | 1858 | #define I_DIRTY_TIME (1 << 11) |
| 1826 | #define __I_DIRTY_TIME_EXPIRED 12 | 1859 | #define __I_DIRTY_TIME_EXPIRED 12 |
| 1827 | #define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED) | 1860 | #define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED) |
| 1861 | #define I_WB_SWITCH (1 << 13) | ||
| 1828 | 1862 | ||
| 1829 | #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) | 1863 | #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) |
| 1830 | #define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME) | 1864 | #define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME) |
| @@ -1898,6 +1932,7 @@ struct file_system_type { | |||
| 1898 | #define FS_HAS_SUBTYPE 4 | 1932 | #define FS_HAS_SUBTYPE 4 |
| 1899 | #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ | 1933 | #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ |
| 1900 | #define FS_USERNS_DEV_MOUNT 16 /* A userns mount does not imply MNT_NODEV */ | 1934 | #define FS_USERNS_DEV_MOUNT 16 /* A userns mount does not imply MNT_NODEV */ |
| 1935 | #define FS_USERNS_VISIBLE 32 /* FS must already be visible */ | ||
| 1901 | #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ | 1936 | #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ |
| 1902 | struct dentry *(*mount) (struct file_system_type *, int, | 1937 | struct dentry *(*mount) (struct file_system_type *, int, |
| 1903 | const char *, void *); | 1938 | const char *, void *); |
| @@ -1985,7 +2020,6 @@ extern int vfs_ustat(dev_t, struct kstatfs *); | |||
| 1985 | extern int freeze_super(struct super_block *super); | 2020 | extern int freeze_super(struct super_block *super); |
| 1986 | extern int thaw_super(struct super_block *super); | 2021 | extern int thaw_super(struct super_block *super); |
| 1987 | extern bool our_mnt(struct vfsmount *mnt); | 2022 | extern bool our_mnt(struct vfsmount *mnt); |
| 1988 | extern bool fs_fully_visible(struct file_system_type *); | ||
| 1989 | 2023 | ||
| 1990 | extern int current_umask(void); | 2024 | extern int current_umask(void); |
| 1991 | 2025 | ||
| @@ -1993,11 +2027,6 @@ extern void ihold(struct inode * inode); | |||
| 1993 | extern void iput(struct inode *); | 2027 | extern void iput(struct inode *); |
| 1994 | extern int generic_update_time(struct inode *, struct timespec *, int); | 2028 | extern int generic_update_time(struct inode *, struct timespec *, int); |
| 1995 | 2029 | ||
| 1996 | static inline struct inode *file_inode(const struct file *f) | ||
| 1997 | { | ||
| 1998 | return f->f_inode; | ||
| 1999 | } | ||
| 2000 | |||
| 2001 | /* /sys/fs */ | 2030 | /* /sys/fs */ |
| 2002 | extern struct kobject *fs_kobj; | 2031 | extern struct kobject *fs_kobj; |
| 2003 | 2032 | ||
| @@ -2194,7 +2223,6 @@ extern struct file *file_open_name(struct filename *, int, umode_t); | |||
| 2194 | extern struct file *filp_open(const char *, int, umode_t); | 2223 | extern struct file *filp_open(const char *, int, umode_t); |
| 2195 | extern struct file *file_open_root(struct dentry *, struct vfsmount *, | 2224 | extern struct file *file_open_root(struct dentry *, struct vfsmount *, |
| 2196 | const char *, int); | 2225 | const char *, int); |
| 2197 | extern int vfs_open(const struct path *, struct file *, const struct cred *); | ||
| 2198 | extern struct file * dentry_open(const struct path *, int, const struct cred *); | 2226 | extern struct file * dentry_open(const struct path *, int, const struct cred *); |
| 2199 | extern int filp_close(struct file *, fl_owner_t id); | 2227 | extern int filp_close(struct file *, fl_owner_t id); |
| 2200 | 2228 | ||
| @@ -2218,7 +2246,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp); | |||
| 2218 | 2246 | ||
| 2219 | /* fs/dcache.c */ | 2247 | /* fs/dcache.c */ |
| 2220 | extern void __init vfs_caches_init_early(void); | 2248 | extern void __init vfs_caches_init_early(void); |
| 2221 | extern void __init vfs_caches_init(unsigned long); | 2249 | extern void __init vfs_caches_init(void); |
| 2222 | 2250 | ||
| 2223 | extern struct kmem_cache *names_cachep; | 2251 | extern struct kmem_cache *names_cachep; |
| 2224 | 2252 | ||
| @@ -2241,7 +2269,13 @@ extern struct super_block *freeze_bdev(struct block_device *); | |||
| 2241 | extern void emergency_thaw_all(void); | 2269 | extern void emergency_thaw_all(void); |
| 2242 | extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); | 2270 | extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); |
| 2243 | extern int fsync_bdev(struct block_device *); | 2271 | extern int fsync_bdev(struct block_device *); |
| 2244 | extern int sb_is_blkdev_sb(struct super_block *sb); | 2272 | |
| 2273 | extern struct super_block *blockdev_superblock; | ||
| 2274 | |||
| 2275 | static inline bool sb_is_blkdev_sb(struct super_block *sb) | ||
| 2276 | { | ||
| 2277 | return sb == blockdev_superblock; | ||
| 2278 | } | ||
| 2245 | #else | 2279 | #else |
| 2246 | static inline void bd_forget(struct inode *inode) {} | 2280 | static inline void bd_forget(struct inode *inode) {} |
| 2247 | static inline int sync_blockdev(struct block_device *bdev) { return 0; } | 2281 | static inline int sync_blockdev(struct block_device *bdev) { return 0; } |
| @@ -2280,6 +2314,9 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, | |||
| 2280 | extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, | 2314 | extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, |
| 2281 | void *holder); | 2315 | void *holder); |
| 2282 | extern void blkdev_put(struct block_device *bdev, fmode_t mode); | 2316 | extern void blkdev_put(struct block_device *bdev, fmode_t mode); |
| 2317 | extern int __blkdev_reread_part(struct block_device *bdev); | ||
| 2318 | extern int blkdev_reread_part(struct block_device *bdev); | ||
| 2319 | |||
| 2283 | #ifdef CONFIG_SYSFS | 2320 | #ifdef CONFIG_SYSFS |
| 2284 | extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); | 2321 | extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); |
| 2285 | extern void bd_unlink_disk_holder(struct block_device *bdev, | 2322 | extern void bd_unlink_disk_holder(struct block_device *bdev, |
| @@ -2502,6 +2539,8 @@ extern struct file * open_exec(const char *); | |||
| 2502 | extern int is_subdir(struct dentry *, struct dentry *); | 2539 | extern int is_subdir(struct dentry *, struct dentry *); |
| 2503 | extern int path_is_under(struct path *, struct path *); | 2540 | extern int path_is_under(struct path *, struct path *); |
| 2504 | 2541 | ||
| 2542 | extern char *file_path(struct file *, char *, int); | ||
| 2543 | |||
| 2505 | #include <linux/err.h> | 2544 | #include <linux/err.h> |
| 2506 | 2545 | ||
| 2507 | /* needed for stackable file system support */ | 2546 | /* needed for stackable file system support */ |
| @@ -2553,7 +2592,12 @@ extern struct inode *new_inode_pseudo(struct super_block *sb); | |||
| 2553 | extern struct inode *new_inode(struct super_block *sb); | 2592 | extern struct inode *new_inode(struct super_block *sb); |
| 2554 | extern void free_inode_nonrcu(struct inode *inode); | 2593 | extern void free_inode_nonrcu(struct inode *inode); |
| 2555 | extern int should_remove_suid(struct dentry *); | 2594 | extern int should_remove_suid(struct dentry *); |
| 2556 | extern int file_remove_suid(struct file *); | 2595 | extern int file_remove_privs(struct file *); |
| 2596 | extern int dentry_needs_remove_privs(struct dentry *dentry); | ||
| 2597 | static inline int file_needs_remove_privs(struct file *file) | ||
| 2598 | { | ||
| 2599 | return dentry_needs_remove_privs(file->f_path.dentry); | ||
| 2600 | } | ||
| 2557 | 2601 | ||
| 2558 | extern void __insert_inode_hash(struct inode *, unsigned long hashval); | 2602 | extern void __insert_inode_hash(struct inode *, unsigned long hashval); |
| 2559 | static inline void insert_inode_hash(struct inode *inode) | 2603 | static inline void insert_inode_hash(struct inode *inode) |
| @@ -2628,9 +2672,13 @@ ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t, | |||
| 2628 | int dax_clear_blocks(struct inode *, sector_t block, long size); | 2672 | int dax_clear_blocks(struct inode *, sector_t block, long size); |
| 2629 | int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); | 2673 | int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); |
| 2630 | int dax_truncate_page(struct inode *, loff_t from, get_block_t); | 2674 | int dax_truncate_page(struct inode *, loff_t from, get_block_t); |
| 2631 | int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); | 2675 | int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, |
| 2676 | dax_iodone_t); | ||
| 2677 | int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, | ||
| 2678 | dax_iodone_t); | ||
| 2632 | int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); | 2679 | int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); |
| 2633 | #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) | 2680 | #define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod) |
| 2681 | #define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod) | ||
| 2634 | 2682 | ||
| 2635 | #ifdef CONFIG_BLOCK | 2683 | #ifdef CONFIG_BLOCK |
| 2636 | typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, | 2684 | typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, |
| @@ -2784,6 +2832,8 @@ extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned in | |||
| 2784 | extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); | 2832 | extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); |
| 2785 | extern const struct file_operations simple_dir_operations; | 2833 | extern const struct file_operations simple_dir_operations; |
| 2786 | extern const struct inode_operations simple_dir_inode_operations; | 2834 | extern const struct inode_operations simple_dir_inode_operations; |
| 2835 | extern void make_empty_dir_inode(struct inode *inode); | ||
| 2836 | extern bool is_empty_dir_inode(struct inode *inode); | ||
| 2787 | struct tree_descr { char *name; const struct file_operations *ops; int mode; }; | 2837 | struct tree_descr { char *name; const struct file_operations *ops; int mode; }; |
| 2788 | struct dentry *d_alloc_name(struct dentry *, const char *); | 2838 | struct dentry *d_alloc_name(struct dentry *, const char *); |
| 2789 | extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *); | 2839 | extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *); |
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 771484993ca7..604e1526cd00 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h | |||
| @@ -74,6 +74,7 @@ extern wait_queue_head_t fscache_cache_cleared_wq; | |||
| 74 | */ | 74 | */ |
| 75 | typedef void (*fscache_operation_release_t)(struct fscache_operation *op); | 75 | typedef void (*fscache_operation_release_t)(struct fscache_operation *op); |
| 76 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); | 76 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); |
| 77 | typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op); | ||
| 77 | 78 | ||
| 78 | enum fscache_operation_state { | 79 | enum fscache_operation_state { |
| 79 | FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */ | 80 | FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */ |
| @@ -109,6 +110,9 @@ struct fscache_operation { | |||
| 109 | * the op in a non-pool thread */ | 110 | * the op in a non-pool thread */ |
| 110 | fscache_operation_processor_t processor; | 111 | fscache_operation_processor_t processor; |
| 111 | 112 | ||
| 113 | /* Operation cancellation cleanup (optional) */ | ||
| 114 | fscache_operation_cancel_t cancel; | ||
| 115 | |||
| 112 | /* operation releaser */ | 116 | /* operation releaser */ |
| 113 | fscache_operation_release_t release; | 117 | fscache_operation_release_t release; |
| 114 | }; | 118 | }; |
| @@ -119,33 +123,17 @@ extern void fscache_op_work_func(struct work_struct *work); | |||
| 119 | extern void fscache_enqueue_operation(struct fscache_operation *); | 123 | extern void fscache_enqueue_operation(struct fscache_operation *); |
| 120 | extern void fscache_op_complete(struct fscache_operation *, bool); | 124 | extern void fscache_op_complete(struct fscache_operation *, bool); |
| 121 | extern void fscache_put_operation(struct fscache_operation *); | 125 | extern void fscache_put_operation(struct fscache_operation *); |
| 122 | 126 | extern void fscache_operation_init(struct fscache_operation *, | |
| 123 | /** | 127 | fscache_operation_processor_t, |
| 124 | * fscache_operation_init - Do basic initialisation of an operation | 128 | fscache_operation_cancel_t, |
| 125 | * @op: The operation to initialise | 129 | fscache_operation_release_t); |
| 126 | * @release: The release function to assign | ||
| 127 | * | ||
| 128 | * Do basic initialisation of an operation. The caller must still set flags, | ||
| 129 | * object and processor if needed. | ||
| 130 | */ | ||
| 131 | static inline void fscache_operation_init(struct fscache_operation *op, | ||
| 132 | fscache_operation_processor_t processor, | ||
| 133 | fscache_operation_release_t release) | ||
| 134 | { | ||
| 135 | INIT_WORK(&op->work, fscache_op_work_func); | ||
| 136 | atomic_set(&op->usage, 1); | ||
| 137 | op->state = FSCACHE_OP_ST_INITIALISED; | ||
| 138 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); | ||
| 139 | op->processor = processor; | ||
| 140 | op->release = release; | ||
| 141 | INIT_LIST_HEAD(&op->pend_link); | ||
| 142 | } | ||
| 143 | 130 | ||
| 144 | /* | 131 | /* |
| 145 | * data read operation | 132 | * data read operation |
| 146 | */ | 133 | */ |
| 147 | struct fscache_retrieval { | 134 | struct fscache_retrieval { |
| 148 | struct fscache_operation op; | 135 | struct fscache_operation op; |
| 136 | struct fscache_cookie *cookie; /* The netfs cookie */ | ||
| 149 | struct address_space *mapping; /* netfs pages */ | 137 | struct address_space *mapping; /* netfs pages */ |
| 150 | fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ | 138 | fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ |
| 151 | void *context; /* netfs read context (pinned) */ | 139 | void *context; /* netfs read context (pinned) */ |
| @@ -371,6 +359,7 @@ struct fscache_object { | |||
| 371 | #define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */ | 359 | #define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */ |
| 372 | #define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ | 360 | #define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ |
| 373 | #define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ | 361 | #define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ |
| 362 | #define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ | ||
| 374 | 363 | ||
| 375 | struct list_head cache_link; /* link in cache->object_list */ | 364 | struct list_head cache_link; /* link in cache->object_list */ |
| 376 | struct hlist_node cookie_link; /* link in cookie->backing_objects */ | 365 | struct hlist_node cookie_link; /* link in cookie->backing_objects */ |
| @@ -410,17 +399,16 @@ static inline bool fscache_object_is_available(struct fscache_object *object) | |||
| 410 | return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); | 399 | return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); |
| 411 | } | 400 | } |
| 412 | 401 | ||
| 413 | static inline bool fscache_object_is_active(struct fscache_object *object) | 402 | static inline bool fscache_cache_is_broken(struct fscache_object *object) |
| 414 | { | 403 | { |
| 415 | return fscache_object_is_available(object) && | 404 | return test_bit(FSCACHE_IOERROR, &object->cache->flags); |
| 416 | fscache_object_is_live(object) && | ||
| 417 | !test_bit(FSCACHE_IOERROR, &object->cache->flags); | ||
| 418 | } | 405 | } |
| 419 | 406 | ||
| 420 | static inline bool fscache_object_is_dead(struct fscache_object *object) | 407 | static inline bool fscache_object_is_active(struct fscache_object *object) |
| 421 | { | 408 | { |
| 422 | return fscache_object_is_dying(object) && | 409 | return fscache_object_is_available(object) && |
| 423 | test_bit(FSCACHE_IOERROR, &object->cache->flags); | 410 | fscache_object_is_live(object) && |
| 411 | !fscache_cache_is_broken(object); | ||
| 424 | } | 412 | } |
| 425 | 413 | ||
| 426 | /** | 414 | /** |
| @@ -551,4 +539,15 @@ extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, | |||
| 551 | const void *data, | 539 | const void *data, |
| 552 | uint16_t datalen); | 540 | uint16_t datalen); |
| 553 | 541 | ||
| 542 | extern void fscache_object_retrying_stale(struct fscache_object *object); | ||
| 543 | |||
| 544 | enum fscache_why_object_killed { | ||
| 545 | FSCACHE_OBJECT_IS_STALE, | ||
| 546 | FSCACHE_OBJECT_NO_SPACE, | ||
| 547 | FSCACHE_OBJECT_WAS_RETIRED, | ||
| 548 | FSCACHE_OBJECT_WAS_CULLED, | ||
| 549 | }; | ||
| 550 | extern void fscache_object_mark_killed(struct fscache_object *object, | ||
| 551 | enum fscache_why_object_killed why); | ||
| 552 | |||
| 554 | #endif /* _LINUX_FSCACHE_CACHE_H */ | 553 | #endif /* _LINUX_FSCACHE_CACHE_H */ |
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index a82296af413f..2a2f56b292c1 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #define FSL_USB_VER_1_6 1 | 24 | #define FSL_USB_VER_1_6 1 |
| 25 | #define FSL_USB_VER_2_2 2 | 25 | #define FSL_USB_VER_2_2 2 |
| 26 | #define FSL_USB_VER_2_4 3 | 26 | #define FSL_USB_VER_2_4 3 |
| 27 | #define FSL_USB_VER_2_5 4 | ||
| 27 | 28 | ||
| 28 | #include <linux/types.h> | 29 | #include <linux/types.h> |
| 29 | 30 | ||
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 0f313f93c586..65a517dd32f7 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h | |||
| @@ -84,8 +84,6 @@ struct fsnotify_fname; | |||
| 84 | * Each group much define these ops. The fsnotify infrastructure will call | 84 | * Each group much define these ops. The fsnotify infrastructure will call |
| 85 | * these operations for each relevant group. | 85 | * these operations for each relevant group. |
| 86 | * | 86 | * |
| 87 | * should_send_event - given a group, inode, and mask this function determines | ||
| 88 | * if the group is interested in this event. | ||
| 89 | * handle_event - main call for a group to handle an fs event | 87 | * handle_event - main call for a group to handle an fs event |
| 90 | * free_group_priv - called when a group refcnt hits 0 to clean up the private union | 88 | * free_group_priv - called when a group refcnt hits 0 to clean up the private union |
| 91 | * freeing_mark - called when a mark is being destroyed for some reason. The group | 89 | * freeing_mark - called when a mark is being destroyed for some reason. The group |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 1da602982cf9..6cd8c0ee4b6f 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
| @@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); | |||
| 116 | * SAVE_REGS. If another ops with this flag set is already registered | 116 | * SAVE_REGS. If another ops with this flag set is already registered |
| 117 | * for any of the functions that this ops will be registered for, then | 117 | * for any of the functions that this ops will be registered for, then |
| 118 | * this ops will fail to register or set_filter_ip. | 118 | * this ops will fail to register or set_filter_ip. |
| 119 | * PID - Is affected by set_ftrace_pid (allows filtering on those pids) | ||
| 119 | */ | 120 | */ |
| 120 | enum { | 121 | enum { |
| 121 | FTRACE_OPS_FL_ENABLED = 1 << 0, | 122 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
| @@ -132,6 +133,7 @@ enum { | |||
| 132 | FTRACE_OPS_FL_MODIFYING = 1 << 11, | 133 | FTRACE_OPS_FL_MODIFYING = 1 << 11, |
| 133 | FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, | 134 | FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, |
| 134 | FTRACE_OPS_FL_IPMODIFY = 1 << 13, | 135 | FTRACE_OPS_FL_IPMODIFY = 1 << 13, |
| 136 | FTRACE_OPS_FL_PID = 1 << 14, | ||
| 135 | }; | 137 | }; |
| 136 | 138 | ||
| 137 | #ifdef CONFIG_DYNAMIC_FTRACE | 139 | #ifdef CONFIG_DYNAMIC_FTRACE |
| @@ -159,6 +161,7 @@ struct ftrace_ops { | |||
| 159 | struct ftrace_ops *next; | 161 | struct ftrace_ops *next; |
| 160 | unsigned long flags; | 162 | unsigned long flags; |
| 161 | void *private; | 163 | void *private; |
| 164 | ftrace_func_t saved_func; | ||
| 162 | int __percpu *disabled; | 165 | int __percpu *disabled; |
| 163 | #ifdef CONFIG_DYNAMIC_FTRACE | 166 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 164 | int nr_trampolines; | 167 | int nr_trampolines; |
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 1ccaab44abcc..5383bb1394a1 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h | |||
| @@ -119,16 +119,16 @@ extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, | |||
| 119 | 119 | ||
| 120 | extern struct gen_pool *devm_gen_pool_create(struct device *dev, | 120 | extern struct gen_pool *devm_gen_pool_create(struct device *dev, |
| 121 | int min_alloc_order, int nid); | 121 | int min_alloc_order, int nid); |
| 122 | extern struct gen_pool *dev_get_gen_pool(struct device *dev); | 122 | extern struct gen_pool *gen_pool_get(struct device *dev); |
| 123 | 123 | ||
| 124 | bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, | 124 | bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, |
| 125 | size_t size); | 125 | size_t size); |
| 126 | 126 | ||
| 127 | #ifdef CONFIG_OF | 127 | #ifdef CONFIG_OF |
| 128 | extern struct gen_pool *of_get_named_gen_pool(struct device_node *np, | 128 | extern struct gen_pool *of_gen_pool_get(struct device_node *np, |
| 129 | const char *propname, int index); | 129 | const char *propname, int index); |
| 130 | #else | 130 | #else |
| 131 | static inline struct gen_pool *of_get_named_gen_pool(struct device_node *np, | 131 | static inline struct gen_pool *of_gen_pool_get(struct device_node *np, |
| 132 | const char *propname, int index) | 132 | const char *propname, int index) |
| 133 | { | 133 | { |
| 134 | return NULL; | 134 | return NULL; |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 15928f0647e4..ad35f300b9a4 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -368,6 +368,11 @@ extern void free_pages(unsigned long addr, unsigned int order); | |||
| 368 | extern void free_hot_cold_page(struct page *page, bool cold); | 368 | extern void free_hot_cold_page(struct page *page, bool cold); |
| 369 | extern void free_hot_cold_page_list(struct list_head *list, bool cold); | 369 | extern void free_hot_cold_page_list(struct list_head *list, bool cold); |
| 370 | 370 | ||
| 371 | struct page_frag_cache; | ||
| 372 | extern void *__alloc_page_frag(struct page_frag_cache *nc, | ||
| 373 | unsigned int fragsz, gfp_t gfp_mask); | ||
| 374 | extern void __free_page_frag(void *addr); | ||
| 375 | |||
| 371 | extern void __free_kmem_pages(struct page *page, unsigned int order); | 376 | extern void __free_kmem_pages(struct page *page, unsigned int order); |
| 372 | extern void free_kmem_pages(unsigned long addr, unsigned int order); | 377 | extern void free_kmem_pages(unsigned long addr, unsigned int order); |
| 373 | 378 | ||
| @@ -379,6 +384,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); | |||
| 379 | void drain_all_pages(struct zone *zone); | 384 | void drain_all_pages(struct zone *zone); |
| 380 | void drain_local_pages(struct zone *zone); | 385 | void drain_local_pages(struct zone *zone); |
| 381 | 386 | ||
| 387 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT | ||
| 388 | void page_alloc_init_late(void); | ||
| 389 | #else | ||
| 390 | static inline void page_alloc_init_late(void) | ||
| 391 | { | ||
| 392 | } | ||
| 393 | #endif | ||
| 394 | |||
| 382 | /* | 395 | /* |
| 383 | * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what | 396 | * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what |
| 384 | * GFP flags are used before interrupts are enabled. Once interrupts are | 397 | * GFP flags are used before interrupts are enabled. Once interrupts are |
diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h index 569236e6b2bc..93e080b39cf6 100644 --- a/include/linux/goldfish.h +++ b/include/linux/goldfish.h | |||
| @@ -3,13 +3,24 @@ | |||
| 3 | 3 | ||
| 4 | /* Helpers for Goldfish virtual platform */ | 4 | /* Helpers for Goldfish virtual platform */ |
| 5 | 5 | ||
| 6 | static inline void gf_write64(unsigned long data, | 6 | static inline void gf_write_ptr(const void *ptr, void __iomem *portl, |
| 7 | void __iomem *portl, void __iomem *porth) | 7 | void __iomem *porth) |
| 8 | { | 8 | { |
| 9 | writel((u32)data, portl); | 9 | writel((u32)(unsigned long)ptr, portl); |
| 10 | #ifdef CONFIG_64BIT | 10 | #ifdef CONFIG_64BIT |
| 11 | writel(data>>32, porth); | 11 | writel((unsigned long)ptr >> 32, porth); |
| 12 | #endif | 12 | #endif |
| 13 | } | 13 | } |
| 14 | 14 | ||
| 15 | static inline void gf_write_dma_addr(const dma_addr_t addr, | ||
| 16 | void __iomem *portl, | ||
| 17 | void __iomem *porth) | ||
| 18 | { | ||
| 19 | writel((u32)addr, portl); | ||
| 20 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
| 21 | writel(addr >> 32, porth); | ||
| 22 | #endif | ||
| 23 | } | ||
| 24 | |||
| 25 | |||
| 15 | #endif /* __LINUX_GOLDFISH_H */ | 26 | #endif /* __LINUX_GOLDFISH_H */ |
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index fd098169fe87..adac255aee86 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h | |||
| @@ -407,6 +407,21 @@ static inline int desc_to_gpio(const struct gpio_desc *desc) | |||
| 407 | return -EINVAL; | 407 | return -EINVAL; |
| 408 | } | 408 | } |
| 409 | 409 | ||
| 410 | /* Child properties interface */ | ||
| 411 | struct fwnode_handle; | ||
| 412 | |||
| 413 | static inline struct gpio_desc *fwnode_get_named_gpiod( | ||
| 414 | struct fwnode_handle *fwnode, const char *propname) | ||
| 415 | { | ||
| 416 | return ERR_PTR(-ENOSYS); | ||
| 417 | } | ||
| 418 | |||
| 419 | static inline struct gpio_desc *devm_get_gpiod_from_child( | ||
| 420 | struct device *dev, const char *con_id, struct fwnode_handle *child) | ||
| 421 | { | ||
| 422 | return ERR_PTR(-ENOSYS); | ||
| 423 | } | ||
| 424 | |||
| 410 | #endif /* CONFIG_GPIOLIB */ | 425 | #endif /* CONFIG_GPIOLIB */ |
| 411 | 426 | ||
| 412 | /* | 427 | /* |
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index cc7ec129b329..c8393cd4d44f 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h | |||
| @@ -45,7 +45,7 @@ struct seq_file; | |||
| 45 | * @base: identifies the first GPIO number handled by this chip; | 45 | * @base: identifies the first GPIO number handled by this chip; |
| 46 | * or, if negative during registration, requests dynamic ID allocation. | 46 | * or, if negative during registration, requests dynamic ID allocation. |
| 47 | * DEPRECATION: providing anything non-negative and nailing the base | 47 | * DEPRECATION: providing anything non-negative and nailing the base |
| 48 | * base offset of GPIO chips is deprecated. Please pass -1 as base to | 48 | * offset of GPIO chips is deprecated. Please pass -1 as base to |
| 49 | * let gpiolib select the chip base in all possible cases. We want to | 49 | * let gpiolib select the chip base in all possible cases. We want to |
| 50 | * get rid of the static GPIO number space in the long run. | 50 | * get rid of the static GPIO number space in the long run. |
| 51 | * @ngpio: the number of GPIOs handled by this controller; the last GPIO | 51 | * @ngpio: the number of GPIOs handled by this controller; the last GPIO |
diff --git a/include/linux/gsmmux.h b/include/linux/gsmmux.h deleted file mode 100644 index c25e9477f7c3..000000000000 --- a/include/linux/gsmmux.h +++ /dev/null | |||
| @@ -1,36 +0,0 @@ | |||
| 1 | #ifndef _LINUX_GSMMUX_H | ||
| 2 | #define _LINUX_GSMMUX_H | ||
| 3 | |||
| 4 | struct gsm_config | ||
| 5 | { | ||
| 6 | unsigned int adaption; | ||
| 7 | unsigned int encapsulation; | ||
| 8 | unsigned int initiator; | ||
| 9 | unsigned int t1; | ||
| 10 | unsigned int t2; | ||
| 11 | unsigned int t3; | ||
| 12 | unsigned int n2; | ||
| 13 | unsigned int mru; | ||
| 14 | unsigned int mtu; | ||
| 15 | unsigned int k; | ||
| 16 | unsigned int i; | ||
| 17 | unsigned int unused[8]; /* Padding for expansion without | ||
| 18 | breaking stuff */ | ||
| 19 | }; | ||
| 20 | |||
| 21 | #define GSMIOC_GETCONF _IOR('G', 0, struct gsm_config) | ||
| 22 | #define GSMIOC_SETCONF _IOW('G', 1, struct gsm_config) | ||
| 23 | |||
| 24 | struct gsm_netconfig { | ||
| 25 | unsigned int adaption; /* Adaption to use in network mode */ | ||
| 26 | unsigned short protocol;/* Protocol to use - only ETH_P_IP supported */ | ||
| 27 | unsigned short unused2; | ||
| 28 | char if_name[IFNAMSIZ]; /* interface name format string */ | ||
| 29 | __u8 unused[28]; /* For future use */ | ||
| 30 | }; | ||
| 31 | |||
| 32 | #define GSMIOC_ENABLE_NET _IOW('G', 2, struct gsm_netconfig) | ||
| 33 | #define GSMIOC_DISABLE_NET _IO('G', 3) | ||
| 34 | |||
| 35 | |||
| 36 | #endif | ||
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index 0042bf330b99..c02b5ce6c5cd 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h | |||
| @@ -230,6 +230,7 @@ struct hid_sensor_common { | |||
| 230 | struct platform_device *pdev; | 230 | struct platform_device *pdev; |
| 231 | unsigned usage_id; | 231 | unsigned usage_id; |
| 232 | atomic_t data_ready; | 232 | atomic_t data_ready; |
| 233 | atomic_t user_requested_state; | ||
| 233 | struct iio_trigger *trigger; | 234 | struct iio_trigger *trigger; |
| 234 | struct hid_sensor_hub_attribute_info poll; | 235 | struct hid_sensor_hub_attribute_info poll; |
| 235 | struct hid_sensor_hub_attribute_info report_state; | 236 | struct hid_sensor_hub_attribute_info report_state; |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 205026175c42..d891f949466a 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
| @@ -460,15 +460,14 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, | |||
| 460 | return &mm->page_table_lock; | 460 | return &mm->page_table_lock; |
| 461 | } | 461 | } |
| 462 | 462 | ||
| 463 | static inline bool hugepages_supported(void) | 463 | #ifndef hugepages_supported |
| 464 | { | 464 | /* |
| 465 | /* | 465 | * Some platform decide whether they support huge pages at boot |
| 466 | * Some platform decide whether they support huge pages at boot | 466 | * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 |
| 467 | * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when | 467 | * when there is no such support |
| 468 | * there is no such support | 468 | */ |
| 469 | */ | 469 | #define hugepages_supported() (HPAGE_SHIFT != 0) |
| 470 | return HPAGE_SHIFT != 0; | 470 | #endif |
| 471 | } | ||
| 472 | 471 | ||
| 473 | #else /* CONFIG_HUGETLB_PAGE */ | 472 | #else /* CONFIG_HUGETLB_PAGE */ |
| 474 | struct hstate {}; | 473 | struct hstate {}; |
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h index 3343298e40e8..859d673d98c8 100644 --- a/include/linux/hwspinlock.h +++ b/include/linux/hwspinlock.h | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ | 26 | #define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ |
| 27 | 27 | ||
| 28 | struct device; | 28 | struct device; |
| 29 | struct device_node; | ||
| 29 | struct hwspinlock; | 30 | struct hwspinlock; |
| 30 | struct hwspinlock_device; | 31 | struct hwspinlock_device; |
| 31 | struct hwspinlock_ops; | 32 | struct hwspinlock_ops; |
| @@ -66,6 +67,7 @@ int hwspin_lock_unregister(struct hwspinlock_device *bank); | |||
| 66 | struct hwspinlock *hwspin_lock_request(void); | 67 | struct hwspinlock *hwspin_lock_request(void); |
| 67 | struct hwspinlock *hwspin_lock_request_specific(unsigned int id); | 68 | struct hwspinlock *hwspin_lock_request_specific(unsigned int id); |
| 68 | int hwspin_lock_free(struct hwspinlock *hwlock); | 69 | int hwspin_lock_free(struct hwspinlock *hwlock); |
| 70 | int of_hwspin_lock_get_id(struct device_node *np, int index); | ||
| 69 | int hwspin_lock_get_id(struct hwspinlock *hwlock); | 71 | int hwspin_lock_get_id(struct hwspinlock *hwlock); |
| 70 | int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int, | 72 | int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int, |
| 71 | unsigned long *); | 73 | unsigned long *); |
| @@ -120,6 +122,11 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) | |||
| 120 | { | 122 | { |
| 121 | } | 123 | } |
| 122 | 124 | ||
| 125 | static inline int of_hwspin_lock_get_id(struct device_node *np, int index) | ||
| 126 | { | ||
| 127 | return 0; | ||
| 128 | } | ||
| 129 | |||
| 123 | static inline int hwspin_lock_get_id(struct hwspinlock *hwlock) | 130 | static inline int hwspin_lock_get_id(struct hwspinlock *hwlock) |
| 124 | { | 131 | { |
| 125 | return 0; | 132 | return 0; |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 902c37aef67e..30d3a1f79450 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
| @@ -160,16 +160,18 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, | |||
| 160 | * 1 . 1 (Windows 7) | 160 | * 1 . 1 (Windows 7) |
| 161 | * 2 . 4 (Windows 8) | 161 | * 2 . 4 (Windows 8) |
| 162 | * 3 . 0 (Windows 8 R2) | 162 | * 3 . 0 (Windows 8 R2) |
| 163 | * 4 . 0 (Windows 10) | ||
| 163 | */ | 164 | */ |
| 164 | 165 | ||
| 165 | #define VERSION_WS2008 ((0 << 16) | (13)) | 166 | #define VERSION_WS2008 ((0 << 16) | (13)) |
| 166 | #define VERSION_WIN7 ((1 << 16) | (1)) | 167 | #define VERSION_WIN7 ((1 << 16) | (1)) |
| 167 | #define VERSION_WIN8 ((2 << 16) | (4)) | 168 | #define VERSION_WIN8 ((2 << 16) | (4)) |
| 168 | #define VERSION_WIN8_1 ((3 << 16) | (0)) | 169 | #define VERSION_WIN8_1 ((3 << 16) | (0)) |
| 170 | #define VERSION_WIN10 ((4 << 16) | (0)) | ||
| 169 | 171 | ||
| 170 | #define VERSION_INVAL -1 | 172 | #define VERSION_INVAL -1 |
| 171 | 173 | ||
| 172 | #define VERSION_CURRENT VERSION_WIN8_1 | 174 | #define VERSION_CURRENT VERSION_WIN10 |
| 173 | 175 | ||
| 174 | /* Make maximum size of pipe payload of 16K */ | 176 | /* Make maximum size of pipe payload of 16K */ |
| 175 | #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) | 177 | #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) |
| @@ -389,10 +391,7 @@ enum vmbus_channel_message_type { | |||
| 389 | CHANNELMSG_INITIATE_CONTACT = 14, | 391 | CHANNELMSG_INITIATE_CONTACT = 14, |
| 390 | CHANNELMSG_VERSION_RESPONSE = 15, | 392 | CHANNELMSG_VERSION_RESPONSE = 15, |
| 391 | CHANNELMSG_UNLOAD = 16, | 393 | CHANNELMSG_UNLOAD = 16, |
| 392 | #ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD | 394 | CHANNELMSG_UNLOAD_RESPONSE = 17, |
| 393 | CHANNELMSG_VIEWRANGE_ADD = 17, | ||
| 394 | CHANNELMSG_VIEWRANGE_REMOVE = 18, | ||
| 395 | #endif | ||
| 396 | CHANNELMSG_COUNT | 395 | CHANNELMSG_COUNT |
| 397 | }; | 396 | }; |
| 398 | 397 | ||
| @@ -549,21 +548,6 @@ struct vmbus_channel_gpadl_torndown { | |||
| 549 | u32 gpadl; | 548 | u32 gpadl; |
| 550 | } __packed; | 549 | } __packed; |
| 551 | 550 | ||
| 552 | #ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD | ||
| 553 | struct vmbus_channel_view_range_add { | ||
| 554 | struct vmbus_channel_message_header header; | ||
| 555 | PHYSICAL_ADDRESS viewrange_base; | ||
| 556 | u64 viewrange_length; | ||
| 557 | u32 child_relid; | ||
| 558 | } __packed; | ||
| 559 | |||
| 560 | struct vmbus_channel_view_range_remove { | ||
| 561 | struct vmbus_channel_message_header header; | ||
| 562 | PHYSICAL_ADDRESS viewrange_base; | ||
| 563 | u32 child_relid; | ||
| 564 | } __packed; | ||
| 565 | #endif | ||
| 566 | |||
| 567 | struct vmbus_channel_relid_released { | 551 | struct vmbus_channel_relid_released { |
| 568 | struct vmbus_channel_message_header header; | 552 | struct vmbus_channel_message_header header; |
| 569 | u32 child_relid; | 553 | u32 child_relid; |
| @@ -713,6 +697,11 @@ struct vmbus_channel { | |||
| 713 | /* The corresponding CPUID in the guest */ | 697 | /* The corresponding CPUID in the guest */ |
| 714 | u32 target_cpu; | 698 | u32 target_cpu; |
| 715 | /* | 699 | /* |
| 700 | * State to manage the CPU affiliation of channels. | ||
| 701 | */ | ||
| 702 | struct cpumask alloced_cpus_in_node; | ||
| 703 | int numa_node; | ||
| 704 | /* | ||
| 716 | * Support for sub-channels. For high performance devices, | 705 | * Support for sub-channels. For high performance devices, |
| 717 | * it will be useful to have multiple sub-channels to support | 706 | * it will be useful to have multiple sub-channels to support |
| 718 | * a scalable communication infrastructure with the host. | 707 | * a scalable communication infrastructure with the host. |
| @@ -745,6 +734,15 @@ struct vmbus_channel { | |||
| 745 | */ | 734 | */ |
| 746 | struct list_head sc_list; | 735 | struct list_head sc_list; |
| 747 | /* | 736 | /* |
| 737 | * Current number of sub-channels. | ||
| 738 | */ | ||
| 739 | int num_sc; | ||
| 740 | /* | ||
| 741 | * Number of a sub-channel (position within sc_list) which is supposed | ||
| 742 | * to be used as the next outgoing channel. | ||
| 743 | */ | ||
| 744 | int next_oc; | ||
| 745 | /* | ||
| 748 | * The primary channel this sub-channel belongs to. | 746 | * The primary channel this sub-channel belongs to. |
| 749 | * This will be NULL for the primary channel. | 747 | * This will be NULL for the primary channel. |
| 750 | */ | 748 | */ |
| @@ -758,9 +756,6 @@ struct vmbus_channel { | |||
| 758 | * link up channels based on their CPU affinity. | 756 | * link up channels based on their CPU affinity. |
| 759 | */ | 757 | */ |
| 760 | struct list_head percpu_list; | 758 | struct list_head percpu_list; |
| 761 | |||
| 762 | int num_sc; | ||
| 763 | int next_oc; | ||
| 764 | }; | 759 | }; |
| 765 | 760 | ||
| 766 | static inline void set_channel_read_state(struct vmbus_channel *c, bool state) | 761 | static inline void set_channel_read_state(struct vmbus_channel *c, bool state) |
| @@ -1236,13 +1231,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *, | |||
| 1236 | struct icmsg_negotiate *, u8 *, int, | 1231 | struct icmsg_negotiate *, u8 *, int, |
| 1237 | int); | 1232 | int); |
| 1238 | 1233 | ||
| 1239 | int hv_kvp_init(struct hv_util_service *); | ||
| 1240 | void hv_kvp_deinit(void); | ||
| 1241 | void hv_kvp_onchannelcallback(void *); | ||
| 1242 | |||
| 1243 | int hv_vss_init(struct hv_util_service *); | ||
| 1244 | void hv_vss_deinit(void); | ||
| 1245 | void hv_vss_onchannelcallback(void *); | ||
| 1246 | void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); | 1234 | void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); |
| 1247 | 1235 | ||
| 1248 | extern struct resource hyperv_mmio; | 1236 | extern struct resource hyperv_mmio; |
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h index 0bc03f100d04..9ad7828d9d34 100644 --- a/include/linux/i2c/twl.h +++ b/include/linux/i2c/twl.h | |||
| @@ -675,6 +675,7 @@ struct twl4030_power_data { | |||
| 675 | struct twl4030_resconfig *board_config; | 675 | struct twl4030_resconfig *board_config; |
| 676 | #define TWL4030_RESCONFIG_UNDEF ((u8)-1) | 676 | #define TWL4030_RESCONFIG_UNDEF ((u8)-1) |
| 677 | bool use_poweroff; /* Board is wired for TWL poweroff */ | 677 | bool use_poweroff; /* Board is wired for TWL poweroff */ |
| 678 | bool ac_charger_quirk; /* Disable AC charger on board */ | ||
| 678 | }; | 679 | }; |
| 679 | 680 | ||
| 680 | extern int twl4030_remove_script(u8 flags); | 681 | extern int twl4030_remove_script(u8 flags); |
diff --git a/include/linux/ide.h b/include/linux/ide.h index 93b5ca754b5b..a633898f36ac 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
| @@ -39,6 +39,19 @@ | |||
| 39 | 39 | ||
| 40 | struct device; | 40 | struct device; |
| 41 | 41 | ||
| 42 | /* IDE-specific values for req->cmd_type */ | ||
| 43 | enum ata_cmd_type_bits { | ||
| 44 | REQ_TYPE_ATA_TASKFILE = REQ_TYPE_DRV_PRIV + 1, | ||
| 45 | REQ_TYPE_ATA_PC, | ||
| 46 | REQ_TYPE_ATA_SENSE, /* sense request */ | ||
| 47 | REQ_TYPE_ATA_PM_SUSPEND,/* suspend request */ | ||
| 48 | REQ_TYPE_ATA_PM_RESUME, /* resume request */ | ||
| 49 | }; | ||
| 50 | |||
| 51 | #define ata_pm_request(rq) \ | ||
| 52 | ((rq)->cmd_type == REQ_TYPE_ATA_PM_SUSPEND || \ | ||
| 53 | (rq)->cmd_type == REQ_TYPE_ATA_PM_RESUME) | ||
| 54 | |||
| 42 | /* Error codes returned in rq->errors to the higher part of the driver. */ | 55 | /* Error codes returned in rq->errors to the higher part of the driver. */ |
| 43 | enum { | 56 | enum { |
| 44 | IDE_DRV_ERROR_GENERAL = 101, | 57 | IDE_DRV_ERROR_GENERAL = 101, |
| @@ -1314,6 +1327,19 @@ struct ide_port_info { | |||
| 1314 | u8 udma_mask; | 1327 | u8 udma_mask; |
| 1315 | }; | 1328 | }; |
| 1316 | 1329 | ||
| 1330 | /* | ||
| 1331 | * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME | ||
| 1332 | * requests. | ||
| 1333 | */ | ||
| 1334 | struct ide_pm_state { | ||
| 1335 | /* PM state machine step value, currently driver specific */ | ||
| 1336 | int pm_step; | ||
| 1337 | /* requested PM state value (S1, S2, S3, S4, ...) */ | ||
| 1338 | u32 pm_state; | ||
| 1339 | void* data; /* for driver use */ | ||
| 1340 | }; | ||
| 1341 | |||
| 1342 | |||
| 1317 | int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *); | 1343 | int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *); |
| 1318 | int ide_pci_init_two(struct pci_dev *, struct pci_dev *, | 1344 | int ide_pci_init_two(struct pci_dev *, struct pci_dev *, |
| 1319 | const struct ide_port_info *, void *); | 1345 | const struct ide_port_info *, void *); |
| @@ -1551,4 +1577,5 @@ static inline void ide_set_drivedata(ide_drive_t *drive, void *data) | |||
| 1551 | #define ide_host_for_each_port(i, port, host) \ | 1577 | #define ide_host_for_each_port(i, port, host) \ |
| 1552 | for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++) | 1578 | for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++) |
| 1553 | 1579 | ||
| 1580 | |||
| 1554 | #endif /* _IDE_H */ | 1581 | #endif /* _IDE_H */ |
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h index 8872ca103d06..1dc1f4ed4001 100644 --- a/include/linux/ieee802154.h +++ b/include/linux/ieee802154.h | |||
| @@ -225,15 +225,13 @@ static inline bool ieee802154_is_valid_psdu_len(const u8 len) | |||
| 225 | * ieee802154_is_valid_psdu_len - check if extended addr is valid | 225 | * ieee802154_is_valid_psdu_len - check if extended addr is valid |
| 226 | * @addr: extended addr to check | 226 | * @addr: extended addr to check |
| 227 | */ | 227 | */ |
| 228 | static inline bool ieee802154_is_valid_extended_addr(const __le64 addr) | 228 | static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr) |
| 229 | { | 229 | { |
| 230 | /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff | 230 | /* Bail out if the address is all zero, or if the group |
| 231 | * is used internally as extended to short address broadcast mapping. | 231 | * address bit is set. |
| 232 | * This is currently a workaround because neighbor discovery can't | ||
| 233 | * deal with short addresses types right now. | ||
| 234 | */ | 232 | */ |
| 235 | return ((addr != cpu_to_le64(0x0000000000000000ULL)) && | 233 | return ((addr != cpu_to_le64(0x0000000000000000ULL)) && |
| 236 | (addr != cpu_to_le64(0xffffffffffffffffULL))); | 234 | !(addr & cpu_to_le64(0x0100000000000000ULL))); |
| 237 | } | 235 | } |
| 238 | 236 | ||
| 239 | /** | 237 | /** |
| @@ -244,9 +242,9 @@ static inline void ieee802154_random_extended_addr(__le64 *addr) | |||
| 244 | { | 242 | { |
| 245 | get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN); | 243 | get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN); |
| 246 | 244 | ||
| 247 | /* toggle some bit if we hit an invalid extended addr */ | 245 | /* clear the group bit, and set the locally administered bit */ |
| 248 | if (!ieee802154_is_valid_extended_addr(*addr)) | 246 | ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] &= ~0x01; |
| 249 | ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01; | 247 | ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] |= 0x02; |
| 250 | } | 248 | } |
| 251 | 249 | ||
| 252 | #endif /* LINUX_IEEE802154_H */ | 250 | #endif /* LINUX_IEEE802154_H */ |
diff --git a/include/linux/if_link.h b/include/linux/if_link.h index da4929927f69..ae5d0d22955d 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h | |||
| @@ -5,6 +5,15 @@ | |||
| 5 | 5 | ||
| 6 | 6 | ||
| 7 | /* We don't want this structure exposed to user space */ | 7 | /* We don't want this structure exposed to user space */ |
| 8 | struct ifla_vf_stats { | ||
| 9 | __u64 rx_packets; | ||
| 10 | __u64 tx_packets; | ||
| 11 | __u64 rx_bytes; | ||
| 12 | __u64 tx_bytes; | ||
| 13 | __u64 broadcast; | ||
| 14 | __u64 multicast; | ||
| 15 | }; | ||
| 16 | |||
| 8 | struct ifla_vf_info { | 17 | struct ifla_vf_info { |
| 9 | __u32 vf; | 18 | __u32 vf; |
| 10 | __u8 mac[32]; | 19 | __u8 mac[32]; |
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 6f6929ea8a0c..a4ccc3122f93 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h | |||
| @@ -29,7 +29,7 @@ struct macvtap_queue; | |||
| 29 | * Maximum times a macvtap device can be opened. This can be used to | 29 | * Maximum times a macvtap device can be opened. This can be used to |
| 30 | * configure the number of receive queue, e.g. for multiqueue virtio. | 30 | * configure the number of receive queue, e.g. for multiqueue virtio. |
| 31 | */ | 31 | */ |
| 32 | #define MAX_MACVTAP_QUEUES 16 | 32 | #define MAX_MACVTAP_QUEUES 256 |
| 33 | 33 | ||
| 34 | #define MACVLAN_MC_FILTER_BITS 8 | 34 | #define MACVLAN_MC_FILTER_BITS 8 |
| 35 | #define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS) | 35 | #define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS) |
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 66a7d7600f43..b49cf923becc 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h | |||
| @@ -74,7 +74,7 @@ static inline struct sock *sk_pppox(struct pppox_sock *po) | |||
| 74 | struct module; | 74 | struct module; |
| 75 | 75 | ||
| 76 | struct pppox_proto { | 76 | struct pppox_proto { |
| 77 | int (*create)(struct net *net, struct socket *sock); | 77 | int (*create)(struct net *net, struct socket *sock, int kern); |
| 78 | int (*ioctl)(struct socket *sock, unsigned int cmd, | 78 | int (*ioctl)(struct socket *sock, unsigned int cmd, |
| 79 | unsigned long arg); | 79 | unsigned long arg); |
| 80 | struct module *owner; | 80 | struct module *owner; |
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 920e4457ce6e..67ce5bd3b56a 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h | |||
| @@ -416,7 +416,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, | |||
| 416 | /** | 416 | /** |
| 417 | * __vlan_get_tag - get the VLAN ID that is part of the payload | 417 | * __vlan_get_tag - get the VLAN ID that is part of the payload |
| 418 | * @skb: skbuff to query | 418 | * @skb: skbuff to query |
| 419 | * @vlan_tci: buffer to store vlaue | 419 | * @vlan_tci: buffer to store value |
| 420 | * | 420 | * |
| 421 | * Returns error if the skb is not of VLAN type | 421 | * Returns error if the skb is not of VLAN type |
| 422 | */ | 422 | */ |
| @@ -435,7 +435,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) | |||
| 435 | /** | 435 | /** |
| 436 | * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] | 436 | * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] |
| 437 | * @skb: skbuff to query | 437 | * @skb: skbuff to query |
| 438 | * @vlan_tci: buffer to store vlaue | 438 | * @vlan_tci: buffer to store value |
| 439 | * | 439 | * |
| 440 | * Returns error if @skb->vlan_tci is not set correctly | 440 | * Returns error if @skb->vlan_tci is not set correctly |
| 441 | */ | 441 | */ |
| @@ -456,7 +456,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, | |||
| 456 | /** | 456 | /** |
| 457 | * vlan_get_tag - get the VLAN ID from the skb | 457 | * vlan_get_tag - get the VLAN ID from the skb |
| 458 | * @skb: skbuff to query | 458 | * @skb: skbuff to query |
| 459 | * @vlan_tci: buffer to store vlaue | 459 | * @vlan_tci: buffer to store value |
| 460 | * | 460 | * |
| 461 | * Returns error if the skb is not VLAN tagged | 461 | * Returns error if the skb is not VLAN tagged |
| 462 | */ | 462 | */ |
| @@ -539,7 +539,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb, | |||
| 539 | */ | 539 | */ |
| 540 | 540 | ||
| 541 | proto = vhdr->h_vlan_encapsulated_proto; | 541 | proto = vhdr->h_vlan_encapsulated_proto; |
| 542 | if (ntohs(proto) >= ETH_P_802_3_MIN) { | 542 | if (eth_proto_is_802_3(proto)) { |
| 543 | skb->protocol = proto; | 543 | skb->protocol = proto; |
| 544 | return; | 544 | return; |
| 545 | } | 545 | } |
| @@ -628,4 +628,24 @@ static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, | |||
| 628 | return features; | 628 | return features; |
| 629 | } | 629 | } |
| 630 | 630 | ||
| 631 | /** | ||
| 632 | * compare_vlan_header - Compare two vlan headers | ||
| 633 | * @h1: Pointer to vlan header | ||
| 634 | * @h2: Pointer to vlan header | ||
| 635 | * | ||
| 636 | * Compare two vlan headers, returns 0 if equal. | ||
| 637 | * | ||
| 638 | * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits. | ||
| 639 | */ | ||
| 640 | static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1, | ||
| 641 | const struct vlan_hdr *h2) | ||
| 642 | { | ||
| 643 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
| 644 | return *(u32 *)h1 ^ *(u32 *)h2; | ||
| 645 | #else | ||
| 646 | return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) | | ||
| 647 | ((__force u32)h1->h_vlan_encapsulated_proto ^ | ||
| 648 | (__force u32)h2->h_vlan_encapsulated_proto); | ||
| 649 | #endif | ||
| 650 | } | ||
| 631 | #endif /* !(_LINUX_IF_VLAN_H_) */ | 651 | #endif /* !(_LINUX_IF_VLAN_H_) */ |
diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 2c677afeea47..193ad488d3e2 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h | |||
| @@ -130,5 +130,6 @@ extern void ip_mc_unmap(struct in_device *); | |||
| 130 | extern void ip_mc_remap(struct in_device *); | 130 | extern void ip_mc_remap(struct in_device *); |
| 131 | extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); | 131 | extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); |
| 132 | extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); | 132 | extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); |
| 133 | int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed); | ||
| 133 | 134 | ||
| 134 | #endif | 135 | #endif |
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index eb8622b78ec9..1600c55828e0 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h | |||
| @@ -29,6 +29,7 @@ struct iio_buffer; | |||
| 29 | * @set_length: set number of datums in buffer | 29 | * @set_length: set number of datums in buffer |
| 30 | * @release: called when the last reference to the buffer is dropped, | 30 | * @release: called when the last reference to the buffer is dropped, |
| 31 | * should free all resources allocated by the buffer. | 31 | * should free all resources allocated by the buffer. |
| 32 | * @modes: Supported operating modes by this buffer type | ||
| 32 | * | 33 | * |
| 33 | * The purpose of this structure is to make the buffer element | 34 | * The purpose of this structure is to make the buffer element |
| 34 | * modular as event for a given driver, different usecases may require | 35 | * modular as event for a given driver, different usecases may require |
| @@ -51,6 +52,8 @@ struct iio_buffer_access_funcs { | |||
| 51 | int (*set_length)(struct iio_buffer *buffer, int length); | 52 | int (*set_length)(struct iio_buffer *buffer, int length); |
| 52 | 53 | ||
| 53 | void (*release)(struct iio_buffer *buffer); | 54 | void (*release)(struct iio_buffer *buffer); |
| 55 | |||
| 56 | unsigned int modes; | ||
| 54 | }; | 57 | }; |
| 55 | 58 | ||
| 56 | /** | 59 | /** |
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index d86b753e9b30..f79148261d16 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h | |||
| @@ -32,6 +32,7 @@ enum iio_chan_info_enum { | |||
| 32 | IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW, | 32 | IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW, |
| 33 | IIO_CHAN_INFO_AVERAGE_RAW, | 33 | IIO_CHAN_INFO_AVERAGE_RAW, |
| 34 | IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY, | 34 | IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY, |
| 35 | IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY, | ||
| 35 | IIO_CHAN_INFO_SAMP_FREQ, | 36 | IIO_CHAN_INFO_SAMP_FREQ, |
| 36 | IIO_CHAN_INFO_FREQUENCY, | 37 | IIO_CHAN_INFO_FREQUENCY, |
| 37 | IIO_CHAN_INFO_PHASE, | 38 | IIO_CHAN_INFO_PHASE, |
| @@ -43,6 +44,8 @@ enum iio_chan_info_enum { | |||
| 43 | IIO_CHAN_INFO_CALIBWEIGHT, | 44 | IIO_CHAN_INFO_CALIBWEIGHT, |
| 44 | IIO_CHAN_INFO_DEBOUNCE_COUNT, | 45 | IIO_CHAN_INFO_DEBOUNCE_COUNT, |
| 45 | IIO_CHAN_INFO_DEBOUNCE_TIME, | 46 | IIO_CHAN_INFO_DEBOUNCE_TIME, |
| 47 | IIO_CHAN_INFO_CALIBEMISSIVITY, | ||
| 48 | IIO_CHAN_INFO_OVERSAMPLING_RATIO, | ||
| 46 | }; | 49 | }; |
| 47 | 50 | ||
| 48 | enum iio_shared_by { | 51 | enum iio_shared_by { |
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index 942b6de68e2f..32b579525004 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h | |||
| @@ -17,6 +17,8 @@ enum iio_event_info { | |||
| 17 | IIO_EV_INFO_VALUE, | 17 | IIO_EV_INFO_VALUE, |
| 18 | IIO_EV_INFO_HYSTERESIS, | 18 | IIO_EV_INFO_HYSTERESIS, |
| 19 | IIO_EV_INFO_PERIOD, | 19 | IIO_EV_INFO_PERIOD, |
| 20 | IIO_EV_INFO_HIGH_PASS_FILTER_3DB, | ||
| 21 | IIO_EV_INFO_LOW_PASS_FILTER_3DB, | ||
| 20 | }; | 22 | }; |
| 21 | 23 | ||
| 22 | #define IIO_VAL_INT 1 | 24 | #define IIO_VAL_INT 1 |
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index ac48b10c9395..0e707f0c1a3e 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h | |||
| @@ -24,6 +24,7 @@ struct inet_diag_handler { | |||
| 24 | struct inet_diag_msg *r, | 24 | struct inet_diag_msg *r, |
| 25 | void *info); | 25 | void *info); |
| 26 | __u16 idiag_type; | 26 | __u16 idiag_type; |
| 27 | __u16 idiag_info_size; | ||
| 27 | }; | 28 | }; |
| 28 | 29 | ||
| 29 | struct inet_connection_sock; | 30 | struct inet_connection_sock; |
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 0a21fbefdfbe..a4328cea376a 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h | |||
| @@ -120,6 +120,9 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) | |||
| 120 | || (!IN_DEV_FORWARD(in_dev) && \ | 120 | || (!IN_DEV_FORWARD(in_dev) && \ |
| 121 | IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS))) | 121 | IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS))) |
| 122 | 122 | ||
| 123 | #define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \ | ||
| 124 | IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN) | ||
| 125 | |||
| 123 | #define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) | 126 | #define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) |
| 124 | #define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT) | 127 | #define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT) |
| 125 | #define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) | 128 | #define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) |
diff --git a/include/linux/init.h b/include/linux/init.h index 21b6d768edd7..b449f378f995 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
| @@ -91,14 +91,6 @@ | |||
| 91 | 91 | ||
| 92 | #define __exit __section(.exit.text) __exitused __cold notrace | 92 | #define __exit __section(.exit.text) __exitused __cold notrace |
| 93 | 93 | ||
| 94 | /* temporary, until all users are removed */ | ||
| 95 | #define __cpuinit | ||
| 96 | #define __cpuinitdata | ||
| 97 | #define __cpuinitconst | ||
| 98 | #define __cpuexit | ||
| 99 | #define __cpuexitdata | ||
| 100 | #define __cpuexitconst | ||
| 101 | |||
| 102 | /* Used for MEMORY_HOTPLUG */ | 94 | /* Used for MEMORY_HOTPLUG */ |
| 103 | #define __meminit __section(.meminit.text) __cold notrace | 95 | #define __meminit __section(.meminit.text) __cold notrace |
| 104 | #define __meminitdata __section(.meminit.data) | 96 | #define __meminitdata __section(.meminit.data) |
| @@ -116,9 +108,6 @@ | |||
| 116 | #define __INITRODATA .section ".init.rodata","a",%progbits | 108 | #define __INITRODATA .section ".init.rodata","a",%progbits |
| 117 | #define __FINITDATA .previous | 109 | #define __FINITDATA .previous |
| 118 | 110 | ||
| 119 | /* temporary, until all users are removed */ | ||
| 120 | #define __CPUINIT | ||
| 121 | |||
| 122 | #define __MEMINIT .section ".meminit.text", "ax" | 111 | #define __MEMINIT .section ".meminit.text", "ax" |
| 123 | #define __MEMINITDATA .section ".meminit.data", "aw" | 112 | #define __MEMINITDATA .section ".meminit.data", "aw" |
| 124 | #define __MEMINITRODATA .section ".meminit.rodata", "a" | 113 | #define __MEMINITRODATA .section ".meminit.rodata", "a" |
| @@ -293,68 +282,8 @@ void __init parse_early_param(void); | |||
| 293 | void __init parse_early_options(char *cmdline); | 282 | void __init parse_early_options(char *cmdline); |
| 294 | #endif /* __ASSEMBLY__ */ | 283 | #endif /* __ASSEMBLY__ */ |
| 295 | 284 | ||
| 296 | /** | ||
| 297 | * module_init() - driver initialization entry point | ||
| 298 | * @x: function to be run at kernel boot time or module insertion | ||
| 299 | * | ||
| 300 | * module_init() will either be called during do_initcalls() (if | ||
| 301 | * builtin) or at module insertion time (if a module). There can only | ||
| 302 | * be one per module. | ||
| 303 | */ | ||
| 304 | #define module_init(x) __initcall(x); | ||
| 305 | |||
| 306 | /** | ||
| 307 | * module_exit() - driver exit entry point | ||
| 308 | * @x: function to be run when driver is removed | ||
| 309 | * | ||
| 310 | * module_exit() will wrap the driver clean-up code | ||
| 311 | * with cleanup_module() when used with rmmod when | ||
| 312 | * the driver is a module. If the driver is statically | ||
| 313 | * compiled into the kernel, module_exit() has no effect. | ||
| 314 | * There can only be one per module. | ||
| 315 | */ | ||
| 316 | #define module_exit(x) __exitcall(x); | ||
| 317 | |||
| 318 | #else /* MODULE */ | 285 | #else /* MODULE */ |
| 319 | 286 | ||
| 320 | /* | ||
| 321 | * In most cases loadable modules do not need custom | ||
| 322 | * initcall levels. There are still some valid cases where | ||
| 323 | * a driver may be needed early if built in, and does not | ||
| 324 | * matter when built as a loadable module. Like bus | ||
| 325 | * snooping debug drivers. | ||
| 326 | */ | ||
| 327 | #define early_initcall(fn) module_init(fn) | ||
| 328 | #define core_initcall(fn) module_init(fn) | ||
| 329 | #define core_initcall_sync(fn) module_init(fn) | ||
| 330 | #define postcore_initcall(fn) module_init(fn) | ||
| 331 | #define postcore_initcall_sync(fn) module_init(fn) | ||
| 332 | #define arch_initcall(fn) module_init(fn) | ||
| 333 | #define subsys_initcall(fn) module_init(fn) | ||
| 334 | #define subsys_initcall_sync(fn) module_init(fn) | ||
| 335 | #define fs_initcall(fn) module_init(fn) | ||
| 336 | #define fs_initcall_sync(fn) module_init(fn) | ||
| 337 | #define rootfs_initcall(fn) module_init(fn) | ||
| 338 | #define device_initcall(fn) module_init(fn) | ||
| 339 | #define device_initcall_sync(fn) module_init(fn) | ||
| 340 | #define late_initcall(fn) module_init(fn) | ||
| 341 | #define late_initcall_sync(fn) module_init(fn) | ||
| 342 | |||
| 343 | #define console_initcall(fn) module_init(fn) | ||
| 344 | #define security_initcall(fn) module_init(fn) | ||
| 345 | |||
| 346 | /* Each module must use one module_init(). */ | ||
| 347 | #define module_init(initfn) \ | ||
| 348 | static inline initcall_t __inittest(void) \ | ||
| 349 | { return initfn; } \ | ||
| 350 | int init_module(void) __attribute__((alias(#initfn))); | ||
| 351 | |||
| 352 | /* This is only required if you want to be unloadable. */ | ||
| 353 | #define module_exit(exitfn) \ | ||
| 354 | static inline exitcall_t __exittest(void) \ | ||
| 355 | { return exitfn; } \ | ||
| 356 | void cleanup_module(void) __attribute__((alias(#exitfn))); | ||
| 357 | |||
| 358 | #define __setup_param(str, unique_id, fn) /* nothing */ | 287 | #define __setup_param(str, unique_id, fn) /* nothing */ |
| 359 | #define __setup(str, func) /* nothing */ | 288 | #define __setup(str, func) /* nothing */ |
| 360 | #endif | 289 | #endif |
| @@ -362,24 +291,6 @@ void __init parse_early_options(char *cmdline); | |||
| 362 | /* Data marked not to be saved by software suspend */ | 291 | /* Data marked not to be saved by software suspend */ |
| 363 | #define __nosavedata __section(.data..nosave) | 292 | #define __nosavedata __section(.data..nosave) |
| 364 | 293 | ||
| 365 | /* This means "can be init if no module support, otherwise module load | ||
| 366 | may call it." */ | ||
| 367 | #ifdef CONFIG_MODULES | ||
| 368 | #define __init_or_module | ||
| 369 | #define __initdata_or_module | ||
| 370 | #define __initconst_or_module | ||
| 371 | #define __INIT_OR_MODULE .text | ||
| 372 | #define __INITDATA_OR_MODULE .data | ||
| 373 | #define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits | ||
| 374 | #else | ||
| 375 | #define __init_or_module __init | ||
| 376 | #define __initdata_or_module __initdata | ||
| 377 | #define __initconst_or_module __initconst | ||
| 378 | #define __INIT_OR_MODULE __INIT | ||
| 379 | #define __INITDATA_OR_MODULE __INITDATA | ||
| 380 | #define __INITRODATA_OR_MODULE __INITRODATA | ||
| 381 | #endif /*CONFIG_MODULES*/ | ||
| 382 | |||
| 383 | #ifdef MODULE | 294 | #ifdef MODULE |
| 384 | #define __exit_p(x) x | 295 | #define __exit_p(x) x |
| 385 | #else | 296 | #else |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index bb9b075f0eb0..e8493fee8160 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -25,13 +25,6 @@ | |||
| 25 | extern struct files_struct init_files; | 25 | extern struct files_struct init_files; |
| 26 | extern struct fs_struct init_fs; | 26 | extern struct fs_struct init_fs; |
| 27 | 27 | ||
| 28 | #ifdef CONFIG_CGROUPS | ||
| 29 | #define INIT_GROUP_RWSEM(sig) \ | ||
| 30 | .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem), | ||
| 31 | #else | ||
| 32 | #define INIT_GROUP_RWSEM(sig) | ||
| 33 | #endif | ||
| 34 | |||
| 35 | #ifdef CONFIG_CPUSETS | 28 | #ifdef CONFIG_CPUSETS |
| 36 | #define INIT_CPUSET_SEQ(tsk) \ | 29 | #define INIT_CPUSET_SEQ(tsk) \ |
| 37 | .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), | 30 | .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), |
| @@ -55,7 +48,6 @@ extern struct fs_struct init_fs; | |||
| 55 | }, \ | 48 | }, \ |
| 56 | .cred_guard_mutex = \ | 49 | .cred_guard_mutex = \ |
| 57 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ | 50 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ |
| 58 | INIT_GROUP_RWSEM(sig) \ | ||
| 59 | } | 51 | } |
| 60 | 52 | ||
| 61 | extern struct nsproxy init_nsproxy; | 53 | extern struct nsproxy init_nsproxy; |
diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h index 08a5ef6e8f25..eecc9ea6cd58 100644 --- a/include/linux/input/touchscreen.h +++ b/include/linux/input/touchscreen.h | |||
| @@ -12,9 +12,10 @@ | |||
| 12 | #include <linux/input.h> | 12 | #include <linux/input.h> |
| 13 | 13 | ||
| 14 | #ifdef CONFIG_OF | 14 | #ifdef CONFIG_OF |
| 15 | void touchscreen_parse_of_params(struct input_dev *dev); | 15 | void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch); |
| 16 | #else | 16 | #else |
| 17 | static inline void touchscreen_parse_of_params(struct input_dev *dev) | 17 | static inline void touchscreen_parse_of_params(struct input_dev *dev, |
| 18 | bool multitouch) | ||
| 18 | { | 19 | { |
| 19 | } | 20 | } |
| 20 | #endif | 21 | #endif |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 3665cb331ca1..d9a366d24e3b 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
| @@ -297,6 +297,7 @@ struct q_inval { | |||
| 297 | /* 1MB - maximum possible interrupt remapping table size */ | 297 | /* 1MB - maximum possible interrupt remapping table size */ |
| 298 | #define INTR_REMAP_PAGE_ORDER 8 | 298 | #define INTR_REMAP_PAGE_ORDER 8 |
| 299 | #define INTR_REMAP_TABLE_REG_SIZE 0xf | 299 | #define INTR_REMAP_TABLE_REG_SIZE 0xf |
| 300 | #define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf | ||
| 300 | 301 | ||
| 301 | #define INTR_REMAP_TABLE_ENTRIES 65536 | 302 | #define INTR_REMAP_TABLE_ENTRIES 65536 |
| 302 | 303 | ||
| @@ -323,6 +324,9 @@ enum { | |||
| 323 | MAX_SR_DMAR_REGS | 324 | MAX_SR_DMAR_REGS |
| 324 | }; | 325 | }; |
| 325 | 326 | ||
| 327 | #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) | ||
| 328 | #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) | ||
| 329 | |||
| 326 | struct intel_iommu { | 330 | struct intel_iommu { |
| 327 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ | 331 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ |
| 328 | u64 reg_phys; /* physical address of hw register set */ | 332 | u64 reg_phys; /* physical address of hw register set */ |
| @@ -356,6 +360,7 @@ struct intel_iommu { | |||
| 356 | #endif | 360 | #endif |
| 357 | struct device *iommu_dev; /* IOMMU-sysfs device */ | 361 | struct device *iommu_dev; /* IOMMU-sysfs device */ |
| 358 | int node; | 362 | int node; |
| 363 | u32 flags; /* Software defined flags */ | ||
| 359 | }; | 364 | }; |
| 360 | 365 | ||
| 361 | static inline void __iommu_flush_cache( | 366 | static inline void __iommu_flush_cache( |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 0546b8710ce3..f9c1b6d0f2e4 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
| @@ -114,6 +114,20 @@ enum iommu_attr { | |||
| 114 | DOMAIN_ATTR_MAX, | 114 | DOMAIN_ATTR_MAX, |
| 115 | }; | 115 | }; |
| 116 | 116 | ||
| 117 | /** | ||
| 118 | * struct iommu_dm_region - descriptor for a direct mapped memory region | ||
| 119 | * @list: Linked list pointers | ||
| 120 | * @start: System physical start address of the region | ||
| 121 | * @length: Length of the region in bytes | ||
| 122 | * @prot: IOMMU Protection flags (READ/WRITE/...) | ||
| 123 | */ | ||
| 124 | struct iommu_dm_region { | ||
| 125 | struct list_head list; | ||
| 126 | phys_addr_t start; | ||
| 127 | size_t length; | ||
| 128 | int prot; | ||
| 129 | }; | ||
| 130 | |||
| 117 | #ifdef CONFIG_IOMMU_API | 131 | #ifdef CONFIG_IOMMU_API |
| 118 | 132 | ||
| 119 | /** | 133 | /** |
| @@ -159,6 +173,10 @@ struct iommu_ops { | |||
| 159 | int (*domain_set_attr)(struct iommu_domain *domain, | 173 | int (*domain_set_attr)(struct iommu_domain *domain, |
| 160 | enum iommu_attr attr, void *data); | 174 | enum iommu_attr attr, void *data); |
| 161 | 175 | ||
| 176 | /* Request/Free a list of direct mapping requirements for a device */ | ||
| 177 | void (*get_dm_regions)(struct device *dev, struct list_head *list); | ||
| 178 | void (*put_dm_regions)(struct device *dev, struct list_head *list); | ||
| 179 | |||
| 162 | /* Window handling functions */ | 180 | /* Window handling functions */ |
| 163 | int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, | 181 | int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, |
| 164 | phys_addr_t paddr, u64 size, int prot); | 182 | phys_addr_t paddr, u64 size, int prot); |
| @@ -193,6 +211,7 @@ extern int iommu_attach_device(struct iommu_domain *domain, | |||
| 193 | struct device *dev); | 211 | struct device *dev); |
| 194 | extern void iommu_detach_device(struct iommu_domain *domain, | 212 | extern void iommu_detach_device(struct iommu_domain *domain, |
| 195 | struct device *dev); | 213 | struct device *dev); |
| 214 | extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); | ||
| 196 | extern int iommu_map(struct iommu_domain *domain, unsigned long iova, | 215 | extern int iommu_map(struct iommu_domain *domain, unsigned long iova, |
| 197 | phys_addr_t paddr, size_t size, int prot); | 216 | phys_addr_t paddr, size_t size, int prot); |
| 198 | extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, | 217 | extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
| @@ -204,6 +223,10 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io | |||
| 204 | extern void iommu_set_fault_handler(struct iommu_domain *domain, | 223 | extern void iommu_set_fault_handler(struct iommu_domain *domain, |
| 205 | iommu_fault_handler_t handler, void *token); | 224 | iommu_fault_handler_t handler, void *token); |
| 206 | 225 | ||
| 226 | extern void iommu_get_dm_regions(struct device *dev, struct list_head *list); | ||
| 227 | extern void iommu_put_dm_regions(struct device *dev, struct list_head *list); | ||
| 228 | extern int iommu_request_dm_for_dev(struct device *dev); | ||
| 229 | |||
| 207 | extern int iommu_attach_group(struct iommu_domain *domain, | 230 | extern int iommu_attach_group(struct iommu_domain *domain, |
| 208 | struct iommu_group *group); | 231 | struct iommu_group *group); |
| 209 | extern void iommu_detach_group(struct iommu_domain *domain, | 232 | extern void iommu_detach_group(struct iommu_domain *domain, |
| @@ -227,6 +250,7 @@ extern int iommu_group_unregister_notifier(struct iommu_group *group, | |||
| 227 | struct notifier_block *nb); | 250 | struct notifier_block *nb); |
| 228 | extern int iommu_group_id(struct iommu_group *group); | 251 | extern int iommu_group_id(struct iommu_group *group); |
| 229 | extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); | 252 | extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); |
| 253 | extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); | ||
| 230 | 254 | ||
| 231 | extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, | 255 | extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, |
| 232 | void *data); | 256 | void *data); |
| @@ -234,7 +258,7 @@ extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, | |||
| 234 | void *data); | 258 | void *data); |
| 235 | struct device *iommu_device_create(struct device *parent, void *drvdata, | 259 | struct device *iommu_device_create(struct device *parent, void *drvdata, |
| 236 | const struct attribute_group **groups, | 260 | const struct attribute_group **groups, |
| 237 | const char *fmt, ...); | 261 | const char *fmt, ...) __printf(4, 5); |
| 238 | void iommu_device_destroy(struct device *dev); | 262 | void iommu_device_destroy(struct device *dev); |
| 239 | int iommu_device_link(struct device *dev, struct device *link); | 263 | int iommu_device_link(struct device *dev, struct device *link); |
| 240 | void iommu_device_unlink(struct device *dev, struct device *link); | 264 | void iommu_device_unlink(struct device *dev, struct device *link); |
| @@ -332,6 +356,11 @@ static inline void iommu_detach_device(struct iommu_domain *domain, | |||
| 332 | { | 356 | { |
| 333 | } | 357 | } |
| 334 | 358 | ||
| 359 | static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) | ||
| 360 | { | ||
| 361 | return NULL; | ||
| 362 | } | ||
| 363 | |||
| 335 | static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, | 364 | static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, |
| 336 | phys_addr_t paddr, int gfp_order, int prot) | 365 | phys_addr_t paddr, int gfp_order, int prot) |
| 337 | { | 366 | { |
| @@ -373,6 +402,21 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain, | |||
| 373 | { | 402 | { |
| 374 | } | 403 | } |
| 375 | 404 | ||
| 405 | static inline void iommu_get_dm_regions(struct device *dev, | ||
| 406 | struct list_head *list) | ||
| 407 | { | ||
| 408 | } | ||
| 409 | |||
| 410 | static inline void iommu_put_dm_regions(struct device *dev, | ||
| 411 | struct list_head *list) | ||
| 412 | { | ||
| 413 | } | ||
| 414 | |||
| 415 | static inline int iommu_request_dm_for_dev(struct device *dev) | ||
| 416 | { | ||
| 417 | return -ENODEV; | ||
| 418 | } | ||
| 419 | |||
| 376 | static inline int iommu_attach_group(struct iommu_domain *domain, | 420 | static inline int iommu_attach_group(struct iommu_domain *domain, |
| 377 | struct iommu_group *group) | 421 | struct iommu_group *group) |
| 378 | { | 422 | { |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 812149160d3b..92188b0225bb 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -407,7 +407,6 @@ enum { | |||
| 407 | IRQCHIP_EOI_THREADED = (1 << 6), | 407 | IRQCHIP_EOI_THREADED = (1 << 6), |
| 408 | }; | 408 | }; |
| 409 | 409 | ||
| 410 | /* This include will go away once we isolated irq_desc usage to core code */ | ||
| 411 | #include <linux/irqdesc.h> | 410 | #include <linux/irqdesc.h> |
| 412 | 411 | ||
| 413 | /* | 412 | /* |
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h index 14d79131f53d..638887376e58 100644 --- a/include/linux/irqchip.h +++ b/include/linux/irqchip.h | |||
| @@ -11,6 +11,20 @@ | |||
| 11 | #ifndef _LINUX_IRQCHIP_H | 11 | #ifndef _LINUX_IRQCHIP_H |
| 12 | #define _LINUX_IRQCHIP_H | 12 | #define _LINUX_IRQCHIP_H |
| 13 | 13 | ||
| 14 | #include <linux/of.h> | ||
| 15 | |||
| 16 | /* | ||
| 17 | * This macro must be used by the different irqchip drivers to declare | ||
| 18 | * the association between their DT compatible string and their | ||
| 19 | * initialization function. | ||
| 20 | * | ||
| 21 | * @name: name that must be unique accross all IRQCHIP_DECLARE of the | ||
| 22 | * same file. | ||
| 23 | * @compstr: compatible string of the irqchip driver | ||
| 24 | * @fn: initialization function | ||
| 25 | */ | ||
| 26 | #define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn) | ||
| 27 | |||
| 14 | #ifdef CONFIG_IRQCHIP | 28 | #ifdef CONFIG_IRQCHIP |
| 15 | void irqchip_init(void); | 29 | void irqchip_init(void); |
| 16 | #else | 30 | #else |
diff --git a/include/linux/irqchip/ingenic.h b/include/linux/irqchip/ingenic.h new file mode 100644 index 000000000000..0ee319a4029d --- /dev/null +++ b/include/linux/irqchip/ingenic.h | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License as published by the | ||
| 6 | * Free Software Foundation; either version 2 of the License, or (at your | ||
| 7 | * option) any later version. | ||
| 8 | * | ||
| 9 | * You should have received a copy of the GNU General Public License along | ||
| 10 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
| 11 | * 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 12 | * | ||
| 13 | */ | ||
| 14 | |||
| 15 | #ifndef __LINUX_IRQCHIP_INGENIC_H__ | ||
| 16 | #define __LINUX_IRQCHIP_INGENIC_H__ | ||
| 17 | |||
| 18 | #include <linux/irq.h> | ||
| 19 | |||
| 20 | extern void ingenic_intc_irq_suspend(struct irq_data *data); | ||
| 21 | extern void ingenic_intc_irq_resume(struct irq_data *data); | ||
| 22 | |||
| 23 | #endif | ||
diff --git a/include/linux/irqchip/irq-sa11x0.h b/include/linux/irqchip/irq-sa11x0.h new file mode 100644 index 000000000000..15db6829c1e4 --- /dev/null +++ b/include/linux/irqchip/irq-sa11x0.h | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | /* | ||
| 2 | * Generic IRQ handling for the SA11x0. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2015 Dmitry Eremin-Solenikov | ||
| 5 | * Copyright (C) 1999-2001 Nicolas Pitre | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_SA11x0_H | ||
| 13 | #define __INCLUDE_LINUX_IRQCHIP_IRQ_SA11x0_H | ||
| 14 | |||
| 15 | void __init sa11x0_init_irq_nodt(int irq_start, resource_size_t io_start); | ||
| 16 | |||
| 17 | #endif | ||
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index c52d1480f272..fcea4e48e21f 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
| @@ -3,9 +3,6 @@ | |||
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * Core internal functions to deal with irq descriptors | 5 | * Core internal functions to deal with irq descriptors |
| 6 | * | ||
| 7 | * This include will move to kernel/irq once we cleaned up the tree. | ||
| 8 | * For now it's included from <linux/irq.h> | ||
| 9 | */ | 6 | */ |
| 10 | 7 | ||
| 11 | struct irq_affinity_notify; | 8 | struct irq_affinity_notify; |
| @@ -90,7 +87,12 @@ struct irq_desc { | |||
| 90 | const char *name; | 87 | const char *name; |
| 91 | } ____cacheline_internodealigned_in_smp; | 88 | } ____cacheline_internodealigned_in_smp; |
| 92 | 89 | ||
| 93 | #ifndef CONFIG_SPARSE_IRQ | 90 | #ifdef CONFIG_SPARSE_IRQ |
| 91 | extern void irq_lock_sparse(void); | ||
| 92 | extern void irq_unlock_sparse(void); | ||
| 93 | #else | ||
| 94 | static inline void irq_lock_sparse(void) { } | ||
| 95 | static inline void irq_unlock_sparse(void) { } | ||
| 94 | extern struct irq_desc irq_desc[NR_IRQS]; | 96 | extern struct irq_desc irq_desc[NR_IRQS]; |
| 95 | #endif | 97 | #endif |
| 96 | 98 | ||
| @@ -103,6 +105,11 @@ static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) | |||
| 103 | #endif | 105 | #endif |
| 104 | } | 106 | } |
| 105 | 107 | ||
| 108 | static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) | ||
| 109 | { | ||
| 110 | return desc->irq_data.irq; | ||
| 111 | } | ||
| 112 | |||
| 106 | static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) | 113 | static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) |
| 107 | { | 114 | { |
| 108 | return &desc->irq_data; | 115 | return &desc->irq_data; |
| @@ -188,6 +195,47 @@ __irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip, | |||
| 188 | desc->name = name; | 195 | desc->name = name; |
| 189 | } | 196 | } |
| 190 | 197 | ||
| 198 | /** | ||
| 199 | * irq_set_handler_locked - Set irq handler from a locked region | ||
| 200 | * @data: Pointer to the irq_data structure which identifies the irq | ||
| 201 | * @handler: Flow control handler function for this interrupt | ||
| 202 | * | ||
| 203 | * Sets the handler in the irq descriptor associated to @data. | ||
| 204 | * | ||
| 205 | * Must be called with irq_desc locked and valid parameters. Typical | ||
| 206 | * call site is the irq_set_type() callback. | ||
| 207 | */ | ||
| 208 | static inline void irq_set_handler_locked(struct irq_data *data, | ||
| 209 | irq_flow_handler_t handler) | ||
| 210 | { | ||
| 211 | struct irq_desc *desc = irq_data_to_desc(data); | ||
| 212 | |||
| 213 | desc->handle_irq = handler; | ||
| 214 | } | ||
| 215 | |||
| 216 | /** | ||
| 217 | * irq_set_chip_handler_name_locked - Set chip, handler and name from a locked region | ||
| 218 | * @data: Pointer to the irq_data structure for which the chip is set | ||
| 219 | * @chip: Pointer to the new irq chip | ||
| 220 | * @handler: Flow control handler function for this interrupt | ||
| 221 | * @name: Name of the interrupt | ||
| 222 | * | ||
| 223 | * Replace the irq chip at the proper hierarchy level in @data and | ||
| 224 | * sets the handler and name in the associated irq descriptor. | ||
| 225 | * | ||
| 226 | * Must be called with irq_desc locked and valid parameters. | ||
| 227 | */ | ||
| 228 | static inline void | ||
| 229 | irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip, | ||
| 230 | irq_flow_handler_t handler, const char *name) | ||
| 231 | { | ||
| 232 | struct irq_desc *desc = irq_data_to_desc(data); | ||
| 233 | |||
| 234 | desc->handle_irq = handler; | ||
| 235 | desc->name = name; | ||
| 236 | data->chip = chip; | ||
| 237 | } | ||
| 238 | |||
| 191 | static inline int irq_balancing_disabled(unsigned int irq) | 239 | static inline int irq_balancing_disabled(unsigned int irq) |
| 192 | { | 240 | { |
| 193 | struct irq_desc *desc; | 241 | struct irq_desc *desc; |
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index fdd5cc16c9c4..9669bf9d4f48 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h | |||
| @@ -23,12 +23,6 @@ unsigned int irq_get_next_irq(unsigned int offset); | |||
| 23 | ; \ | 23 | ; \ |
| 24 | else | 24 | else |
| 25 | 25 | ||
| 26 | #ifdef CONFIG_SMP | ||
| 27 | #define irq_node(irq) (irq_get_irq_data(irq)->node) | ||
| 28 | #else | ||
| 29 | #define irq_node(irq) 0 | ||
| 30 | #endif | ||
| 31 | |||
| 32 | # define for_each_active_irq(irq) \ | 26 | # define for_each_active_irq(irq) \ |
| 33 | for (irq = irq_get_next_irq(0); irq < nr_irqs; \ | 27 | for (irq = irq_get_next_irq(0); irq < nr_irqs; \ |
| 34 | irq = irq_get_next_irq(irq + 1)) | 28 | irq = irq_get_next_irq(irq + 1)) |
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 20e7f78041c8..edb640ae9a94 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h | |||
| @@ -1035,7 +1035,7 @@ struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal); | |||
| 1035 | int jbd2_journal_next_log_block(journal_t *, unsigned long long *); | 1035 | int jbd2_journal_next_log_block(journal_t *, unsigned long long *); |
| 1036 | int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid, | 1036 | int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid, |
| 1037 | unsigned long *block); | 1037 | unsigned long *block); |
| 1038 | void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); | 1038 | int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); |
| 1039 | void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); | 1039 | void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); |
| 1040 | 1040 | ||
| 1041 | /* Commit management */ | 1041 | /* Commit management */ |
| @@ -1157,7 +1157,7 @@ extern int jbd2_journal_recover (journal_t *journal); | |||
| 1157 | extern int jbd2_journal_wipe (journal_t *, int); | 1157 | extern int jbd2_journal_wipe (journal_t *, int); |
| 1158 | extern int jbd2_journal_skip_recovery (journal_t *); | 1158 | extern int jbd2_journal_skip_recovery (journal_t *); |
| 1159 | extern void jbd2_journal_update_sb_errno(journal_t *); | 1159 | extern void jbd2_journal_update_sb_errno(journal_t *); |
| 1160 | extern void jbd2_journal_update_sb_log_tail (journal_t *, tid_t, | 1160 | extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t, |
| 1161 | unsigned long, int); | 1161 | unsigned long, int); |
| 1162 | extern void __jbd2_journal_abort_hard (journal_t *); | 1162 | extern void __jbd2_journal_abort_hard (journal_t *); |
| 1163 | extern void jbd2_journal_abort (journal_t *, int); | 1163 | extern void jbd2_journal_abort (journal_t *, int); |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 060dd7b61c6d..5582410727cb 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -411,7 +411,8 @@ extern __printf(3, 0) | |||
| 411 | int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); | 411 | int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); |
| 412 | extern __printf(2, 3) | 412 | extern __printf(2, 3) |
| 413 | char *kasprintf(gfp_t gfp, const char *fmt, ...); | 413 | char *kasprintf(gfp_t gfp, const char *fmt, ...); |
| 414 | extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); | 414 | extern __printf(2, 0) |
| 415 | char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); | ||
| 415 | 416 | ||
| 416 | extern __scanf(2, 3) | 417 | extern __scanf(2, 3) |
| 417 | int sscanf(const char *, const char *, ...); | 418 | int sscanf(const char *, const char *, ...); |
| @@ -439,6 +440,9 @@ extern int panic_on_unrecovered_nmi; | |||
| 439 | extern int panic_on_io_nmi; | 440 | extern int panic_on_io_nmi; |
| 440 | extern int panic_on_warn; | 441 | extern int panic_on_warn; |
| 441 | extern int sysctl_panic_on_stackoverflow; | 442 | extern int sysctl_panic_on_stackoverflow; |
| 443 | |||
| 444 | extern bool crash_kexec_post_notifiers; | ||
| 445 | |||
| 442 | /* | 446 | /* |
| 443 | * Only to be used by arch init code. If the user over-wrote the default | 447 | * Only to be used by arch init code. If the user over-wrote the default |
| 444 | * CONFIG_PANIC_TIMEOUT, honor it. | 448 | * CONFIG_PANIC_TIMEOUT, honor it. |
| @@ -533,12 +537,6 @@ bool mac_pton(const char *s, u8 *mac); | |||
| 533 | * | 537 | * |
| 534 | * Most likely, you want to use tracing_on/tracing_off. | 538 | * Most likely, you want to use tracing_on/tracing_off. |
| 535 | */ | 539 | */ |
| 536 | #ifdef CONFIG_RING_BUFFER | ||
| 537 | /* trace_off_permanent stops recording with no way to bring it back */ | ||
| 538 | void tracing_off_permanent(void); | ||
| 539 | #else | ||
| 540 | static inline void tracing_off_permanent(void) { } | ||
| 541 | #endif | ||
| 542 | 540 | ||
| 543 | enum ftrace_dump_mode { | 541 | enum ftrace_dump_mode { |
| 544 | DUMP_NONE, | 542 | DUMP_NONE, |
| @@ -682,10 +680,10 @@ do { \ | |||
| 682 | __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ | 680 | __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ |
| 683 | } while (0) | 681 | } while (0) |
| 684 | 682 | ||
| 685 | extern int | 683 | extern __printf(2, 0) int |
| 686 | __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); | 684 | __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); |
| 687 | 685 | ||
| 688 | extern int | 686 | extern __printf(2, 0) int |
| 689 | __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); | 687 | __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); |
| 690 | 688 | ||
| 691 | extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); | 689 | extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); |
| @@ -705,7 +703,7 @@ int trace_printk(const char *fmt, ...) | |||
| 705 | { | 703 | { |
| 706 | return 0; | 704 | return 0; |
| 707 | } | 705 | } |
| 708 | static inline int | 706 | static __printf(1, 0) inline int |
| 709 | ftrace_vprintk(const char *fmt, va_list ap) | 707 | ftrace_vprintk(const char *fmt, va_list ap) |
| 710 | { | 708 | { |
| 711 | return 0; | 709 | return 0; |
| @@ -819,13 +817,15 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
| 819 | #endif | 817 | #endif |
| 820 | 818 | ||
| 821 | /* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */ | 819 | /* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */ |
| 822 | #define VERIFY_OCTAL_PERMISSIONS(perms) \ | 820 | #define VERIFY_OCTAL_PERMISSIONS(perms) \ |
| 823 | (BUILD_BUG_ON_ZERO((perms) < 0) + \ | 821 | (BUILD_BUG_ON_ZERO((perms) < 0) + \ |
| 824 | BUILD_BUG_ON_ZERO((perms) > 0777) + \ | 822 | BUILD_BUG_ON_ZERO((perms) > 0777) + \ |
| 825 | /* User perms >= group perms >= other perms */ \ | 823 | /* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \ |
| 826 | BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \ | 824 | BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \ |
| 827 | BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \ | 825 | BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \ |
| 828 | /* Other writable? Generally considered a bad idea. */ \ | 826 | /* USER_WRITABLE >= GROUP_WRITABLE */ \ |
| 829 | BUILD_BUG_ON_ZERO((perms) & 2) + \ | 827 | BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \ |
| 828 | /* OTHER_WRITABLE? Generally considered a bad idea. */ \ | ||
| 829 | BUILD_BUG_ON_ZERO((perms) & 2) + \ | ||
| 830 | (perms)) | 830 | (perms)) |
| 831 | #endif | 831 | #endif |
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 71ecdab1671b..123be25ea15a 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h | |||
| @@ -45,6 +45,7 @@ enum kernfs_node_flag { | |||
| 45 | KERNFS_LOCKDEP = 0x0100, | 45 | KERNFS_LOCKDEP = 0x0100, |
| 46 | KERNFS_SUICIDAL = 0x0400, | 46 | KERNFS_SUICIDAL = 0x0400, |
| 47 | KERNFS_SUICIDED = 0x0800, | 47 | KERNFS_SUICIDED = 0x0800, |
| 48 | KERNFS_EMPTY_DIR = 0x1000, | ||
| 48 | }; | 49 | }; |
| 49 | 50 | ||
| 50 | /* @flags for kernfs_create_root() */ | 51 | /* @flags for kernfs_create_root() */ |
| @@ -277,6 +278,7 @@ void kernfs_put(struct kernfs_node *kn); | |||
| 277 | 278 | ||
| 278 | struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry); | 279 | struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry); |
| 279 | struct kernfs_root *kernfs_root_from_sb(struct super_block *sb); | 280 | struct kernfs_root *kernfs_root_from_sb(struct super_block *sb); |
| 281 | struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn); | ||
| 280 | 282 | ||
| 281 | struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, | 283 | struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, |
| 282 | unsigned int flags, void *priv); | 284 | unsigned int flags, void *priv); |
| @@ -285,6 +287,8 @@ void kernfs_destroy_root(struct kernfs_root *root); | |||
| 285 | struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, | 287 | struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, |
| 286 | const char *name, umode_t mode, | 288 | const char *name, umode_t mode, |
| 287 | void *priv, const void *ns); | 289 | void *priv, const void *ns); |
| 290 | struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent, | ||
| 291 | const char *name); | ||
| 288 | struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, | 292 | struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, |
| 289 | const char *name, | 293 | const char *name, |
| 290 | umode_t mode, loff_t size, | 294 | umode_t mode, loff_t size, |
| @@ -352,6 +356,10 @@ static inline struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry) | |||
| 352 | static inline struct kernfs_root *kernfs_root_from_sb(struct super_block *sb) | 356 | static inline struct kernfs_root *kernfs_root_from_sb(struct super_block *sb) |
| 353 | { return NULL; } | 357 | { return NULL; } |
| 354 | 358 | ||
| 359 | static inline struct inode * | ||
| 360 | kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn) | ||
| 361 | { return NULL; } | ||
| 362 | |||
| 355 | static inline struct kernfs_root * | 363 | static inline struct kernfs_root * |
| 356 | kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags, | 364 | kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags, |
| 357 | void *priv) | 365 | void *priv) |
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index e705467ddb47..d0a1f99e24e3 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h | |||
| @@ -28,7 +28,8 @@ | |||
| 28 | extern void kmemleak_init(void) __ref; | 28 | extern void kmemleak_init(void) __ref; |
| 29 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, | 29 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, |
| 30 | gfp_t gfp) __ref; | 30 | gfp_t gfp) __ref; |
| 31 | extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref; | 31 | extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, |
| 32 | gfp_t gfp) __ref; | ||
| 32 | extern void kmemleak_free(const void *ptr) __ref; | 33 | extern void kmemleak_free(const void *ptr) __ref; |
| 33 | extern void kmemleak_free_part(const void *ptr, size_t size) __ref; | 34 | extern void kmemleak_free_part(const void *ptr, size_t size) __ref; |
| 34 | extern void kmemleak_free_percpu(const void __percpu *ptr) __ref; | 35 | extern void kmemleak_free_percpu(const void __percpu *ptr) __ref; |
| @@ -71,7 +72,8 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | |||
| 71 | gfp_t gfp) | 72 | gfp_t gfp) |
| 72 | { | 73 | { |
| 73 | } | 74 | } |
| 74 | static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) | 75 | static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, |
| 76 | gfp_t gfp) | ||
| 75 | { | 77 | { |
| 76 | } | 78 | } |
| 77 | static inline void kmemleak_free(const void *ptr) | 79 | static inline void kmemleak_free(const void *ptr) |
diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 2d61b909f414..637f67002c5a 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h | |||
| @@ -80,8 +80,9 @@ struct kobject { | |||
| 80 | 80 | ||
| 81 | extern __printf(2, 3) | 81 | extern __printf(2, 3) |
| 82 | int kobject_set_name(struct kobject *kobj, const char *name, ...); | 82 | int kobject_set_name(struct kobject *kobj, const char *name, ...); |
| 83 | extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, | 83 | extern __printf(2, 0) |
| 84 | va_list vargs); | 84 | int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, |
| 85 | va_list vargs); | ||
| 85 | 86 | ||
| 86 | static inline const char *kobject_name(const struct kobject *kobj) | 87 | static inline const char *kobject_name(const struct kobject *kobj) |
| 87 | { | 88 | { |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ad45054309a0..05e99b8ef465 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -44,6 +44,10 @@ | |||
| 44 | /* Two fragments for cross MMIO pages. */ | 44 | /* Two fragments for cross MMIO pages. */ |
| 45 | #define KVM_MAX_MMIO_FRAGMENTS 2 | 45 | #define KVM_MAX_MMIO_FRAGMENTS 2 |
| 46 | 46 | ||
| 47 | #ifndef KVM_ADDRESS_SPACE_NUM | ||
| 48 | #define KVM_ADDRESS_SPACE_NUM 1 | ||
| 49 | #endif | ||
| 50 | |||
| 47 | /* | 51 | /* |
| 48 | * For the normal pfn, the highest 12 bits should be zero, | 52 | * For the normal pfn, the highest 12 bits should be zero, |
| 49 | * so we can mask bit 62 ~ bit 52 to indicate the error pfn, | 53 | * so we can mask bit 62 ~ bit 52 to indicate the error pfn, |
| @@ -134,6 +138,7 @@ static inline bool is_error_page(struct page *page) | |||
| 134 | #define KVM_REQ_ENABLE_IBS 23 | 138 | #define KVM_REQ_ENABLE_IBS 23 |
| 135 | #define KVM_REQ_DISABLE_IBS 24 | 139 | #define KVM_REQ_DISABLE_IBS 24 |
| 136 | #define KVM_REQ_APIC_PAGE_RELOAD 25 | 140 | #define KVM_REQ_APIC_PAGE_RELOAD 25 |
| 141 | #define KVM_REQ_SMI 26 | ||
| 137 | 142 | ||
| 138 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 143 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
| 139 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 | 144 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 |
| @@ -230,6 +235,7 @@ struct kvm_vcpu { | |||
| 230 | 235 | ||
| 231 | int fpu_active; | 236 | int fpu_active; |
| 232 | int guest_fpu_loaded, guest_xcr0_loaded; | 237 | int guest_fpu_loaded, guest_xcr0_loaded; |
| 238 | unsigned char fpu_counter; | ||
| 233 | wait_queue_head_t wq; | 239 | wait_queue_head_t wq; |
| 234 | struct pid *pid; | 240 | struct pid *pid; |
| 235 | int sigset_active; | 241 | int sigset_active; |
| @@ -329,6 +335,13 @@ struct kvm_kernel_irq_routing_entry { | |||
| 329 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) | 335 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
| 330 | #endif | 336 | #endif |
| 331 | 337 | ||
| 338 | #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE | ||
| 339 | static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) | ||
| 340 | { | ||
| 341 | return 0; | ||
| 342 | } | ||
| 343 | #endif | ||
| 344 | |||
| 332 | /* | 345 | /* |
| 333 | * Note: | 346 | * Note: |
| 334 | * memslots are not sorted by id anymore, please use id_to_memslot() | 347 | * memslots are not sorted by id anymore, please use id_to_memslot() |
| @@ -347,7 +360,7 @@ struct kvm { | |||
| 347 | spinlock_t mmu_lock; | 360 | spinlock_t mmu_lock; |
| 348 | struct mutex slots_lock; | 361 | struct mutex slots_lock; |
| 349 | struct mm_struct *mm; /* userspace tied to this vm */ | 362 | struct mm_struct *mm; /* userspace tied to this vm */ |
| 350 | struct kvm_memslots *memslots; | 363 | struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM]; |
| 351 | struct srcu_struct srcu; | 364 | struct srcu_struct srcu; |
| 352 | struct srcu_struct irq_srcu; | 365 | struct srcu_struct irq_srcu; |
| 353 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE | 366 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE |
| @@ -462,13 +475,25 @@ void kvm_exit(void); | |||
| 462 | void kvm_get_kvm(struct kvm *kvm); | 475 | void kvm_get_kvm(struct kvm *kvm); |
| 463 | void kvm_put_kvm(struct kvm *kvm); | 476 | void kvm_put_kvm(struct kvm *kvm); |
| 464 | 477 | ||
| 465 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) | 478 | static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) |
| 466 | { | 479 | { |
| 467 | return rcu_dereference_check(kvm->memslots, | 480 | return rcu_dereference_check(kvm->memslots[as_id], |
| 468 | srcu_read_lock_held(&kvm->srcu) | 481 | srcu_read_lock_held(&kvm->srcu) |
| 469 | || lockdep_is_held(&kvm->slots_lock)); | 482 | || lockdep_is_held(&kvm->slots_lock)); |
| 470 | } | 483 | } |
| 471 | 484 | ||
| 485 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) | ||
| 486 | { | ||
| 487 | return __kvm_memslots(kvm, 0); | ||
| 488 | } | ||
| 489 | |||
| 490 | static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) | ||
| 491 | { | ||
| 492 | int as_id = kvm_arch_vcpu_memslots_id(vcpu); | ||
| 493 | |||
| 494 | return __kvm_memslots(vcpu->kvm, as_id); | ||
| 495 | } | ||
| 496 | |||
| 472 | static inline struct kvm_memory_slot * | 497 | static inline struct kvm_memory_slot * |
| 473 | id_to_memslot(struct kvm_memslots *slots, int id) | 498 | id_to_memslot(struct kvm_memslots *slots, int id) |
| 474 | { | 499 | { |
| @@ -500,21 +525,22 @@ enum kvm_mr_change { | |||
| 500 | }; | 525 | }; |
| 501 | 526 | ||
| 502 | int kvm_set_memory_region(struct kvm *kvm, | 527 | int kvm_set_memory_region(struct kvm *kvm, |
| 503 | struct kvm_userspace_memory_region *mem); | 528 | const struct kvm_userspace_memory_region *mem); |
| 504 | int __kvm_set_memory_region(struct kvm *kvm, | 529 | int __kvm_set_memory_region(struct kvm *kvm, |
| 505 | struct kvm_userspace_memory_region *mem); | 530 | const struct kvm_userspace_memory_region *mem); |
| 506 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, | 531 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
| 507 | struct kvm_memory_slot *dont); | 532 | struct kvm_memory_slot *dont); |
| 508 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, | 533 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
| 509 | unsigned long npages); | 534 | unsigned long npages); |
| 510 | void kvm_arch_memslots_updated(struct kvm *kvm); | 535 | void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots); |
| 511 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | 536 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
| 512 | struct kvm_memory_slot *memslot, | 537 | struct kvm_memory_slot *memslot, |
| 513 | struct kvm_userspace_memory_region *mem, | 538 | const struct kvm_userspace_memory_region *mem, |
| 514 | enum kvm_mr_change change); | 539 | enum kvm_mr_change change); |
| 515 | void kvm_arch_commit_memory_region(struct kvm *kvm, | 540 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
| 516 | struct kvm_userspace_memory_region *mem, | 541 | const struct kvm_userspace_memory_region *mem, |
| 517 | const struct kvm_memory_slot *old, | 542 | const struct kvm_memory_slot *old, |
| 543 | const struct kvm_memory_slot *new, | ||
| 518 | enum kvm_mr_change change); | 544 | enum kvm_mr_change change); |
| 519 | bool kvm_largepages_enabled(void); | 545 | bool kvm_largepages_enabled(void); |
| 520 | void kvm_disable_largepages(void); | 546 | void kvm_disable_largepages(void); |
| @@ -524,8 +550,8 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm); | |||
| 524 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | 550 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
| 525 | struct kvm_memory_slot *slot); | 551 | struct kvm_memory_slot *slot); |
| 526 | 552 | ||
| 527 | int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, | 553 | int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, |
| 528 | int nr_pages); | 554 | struct page **pages, int nr_pages); |
| 529 | 555 | ||
| 530 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); | 556 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
| 531 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); | 557 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
| @@ -538,13 +564,13 @@ void kvm_release_page_dirty(struct page *page); | |||
| 538 | void kvm_set_page_accessed(struct page *page); | 564 | void kvm_set_page_accessed(struct page *page); |
| 539 | 565 | ||
| 540 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); | 566 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); |
| 541 | pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, | ||
| 542 | bool write_fault, bool *writable); | ||
| 543 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); | 567 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
| 544 | pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, | 568 | pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, |
| 545 | bool *writable); | 569 | bool *writable); |
| 546 | pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); | 570 | pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
| 547 | pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); | 571 | pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); |
| 572 | pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, | ||
| 573 | bool *async, bool write_fault, bool *writable); | ||
| 548 | 574 | ||
| 549 | void kvm_release_pfn_clean(pfn_t pfn); | 575 | void kvm_release_pfn_clean(pfn_t pfn); |
| 550 | void kvm_set_pfn_dirty(pfn_t pfn); | 576 | void kvm_set_pfn_dirty(pfn_t pfn); |
| @@ -573,6 +599,25 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); | |||
| 573 | unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); | 599 | unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); |
| 574 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); | 600 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
| 575 | 601 | ||
| 602 | struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); | ||
| 603 | struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); | ||
| 604 | pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); | ||
| 605 | pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); | ||
| 606 | struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); | ||
| 607 | unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); | ||
| 608 | unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); | ||
| 609 | int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, | ||
| 610 | int len); | ||
| 611 | int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, | ||
| 612 | unsigned long len); | ||
| 613 | int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, | ||
| 614 | unsigned long len); | ||
| 615 | int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, | ||
| 616 | int offset, int len); | ||
| 617 | int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, | ||
| 618 | unsigned long len); | ||
| 619 | void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); | ||
| 620 | |||
| 576 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); | 621 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
| 577 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); | 622 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
| 578 | int kvm_vcpu_yield_to(struct kvm_vcpu *target); | 623 | int kvm_vcpu_yield_to(struct kvm_vcpu *target); |
| @@ -689,6 +734,24 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) | |||
| 689 | return false; | 734 | return false; |
| 690 | } | 735 | } |
| 691 | #endif | 736 | #endif |
| 737 | #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE | ||
| 738 | void kvm_arch_start_assignment(struct kvm *kvm); | ||
| 739 | void kvm_arch_end_assignment(struct kvm *kvm); | ||
| 740 | bool kvm_arch_has_assigned_device(struct kvm *kvm); | ||
| 741 | #else | ||
| 742 | static inline void kvm_arch_start_assignment(struct kvm *kvm) | ||
| 743 | { | ||
| 744 | } | ||
| 745 | |||
| 746 | static inline void kvm_arch_end_assignment(struct kvm *kvm) | ||
| 747 | { | ||
| 748 | } | ||
| 749 | |||
| 750 | static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) | ||
| 751 | { | ||
| 752 | return false; | ||
| 753 | } | ||
| 754 | #endif | ||
| 692 | 755 | ||
| 693 | static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) | 756 | static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) |
| 694 | { | 757 | { |
| @@ -762,16 +825,10 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm, | |||
| 762 | } | 825 | } |
| 763 | #endif | 826 | #endif |
| 764 | 827 | ||
| 765 | static inline void kvm_guest_enter(void) | 828 | /* must be called with irqs disabled */ |
| 829 | static inline void __kvm_guest_enter(void) | ||
| 766 | { | 830 | { |
| 767 | unsigned long flags; | ||
| 768 | |||
| 769 | BUG_ON(preemptible()); | ||
| 770 | |||
| 771 | local_irq_save(flags); | ||
| 772 | guest_enter(); | 831 | guest_enter(); |
| 773 | local_irq_restore(flags); | ||
| 774 | |||
| 775 | /* KVM does not hold any references to rcu protected data when it | 832 | /* KVM does not hold any references to rcu protected data when it |
| 776 | * switches CPU into a guest mode. In fact switching to a guest mode | 833 | * switches CPU into a guest mode. In fact switching to a guest mode |
| 777 | * is very similar to exiting to userspace from rcu point of view. In | 834 | * is very similar to exiting to userspace from rcu point of view. In |
| @@ -783,12 +840,27 @@ static inline void kvm_guest_enter(void) | |||
| 783 | rcu_virt_note_context_switch(smp_processor_id()); | 840 | rcu_virt_note_context_switch(smp_processor_id()); |
| 784 | } | 841 | } |
| 785 | 842 | ||
| 843 | /* must be called with irqs disabled */ | ||
| 844 | static inline void __kvm_guest_exit(void) | ||
| 845 | { | ||
| 846 | guest_exit(); | ||
| 847 | } | ||
| 848 | |||
| 849 | static inline void kvm_guest_enter(void) | ||
| 850 | { | ||
| 851 | unsigned long flags; | ||
| 852 | |||
| 853 | local_irq_save(flags); | ||
| 854 | __kvm_guest_enter(); | ||
| 855 | local_irq_restore(flags); | ||
| 856 | } | ||
| 857 | |||
| 786 | static inline void kvm_guest_exit(void) | 858 | static inline void kvm_guest_exit(void) |
| 787 | { | 859 | { |
| 788 | unsigned long flags; | 860 | unsigned long flags; |
| 789 | 861 | ||
| 790 | local_irq_save(flags); | 862 | local_irq_save(flags); |
| 791 | guest_exit(); | 863 | __kvm_guest_exit(); |
| 792 | local_irq_restore(flags); | 864 | local_irq_restore(flags); |
| 793 | } | 865 | } |
| 794 | 866 | ||
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 931da7e917cf..1b47a185c2f0 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h | |||
| @@ -28,6 +28,7 @@ struct kvm_run; | |||
| 28 | struct kvm_userspace_memory_region; | 28 | struct kvm_userspace_memory_region; |
| 29 | struct kvm_vcpu; | 29 | struct kvm_vcpu; |
| 30 | struct kvm_vcpu_init; | 30 | struct kvm_vcpu_init; |
| 31 | struct kvm_memslots; | ||
| 31 | 32 | ||
| 32 | enum kvm_mr_change; | 33 | enum kvm_mr_change; |
| 33 | 34 | ||
diff --git a/include/linux/leds.h b/include/linux/leds.h index 9a2b000094cf..b122eeafb5dc 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #ifndef __LINUX_LEDS_H_INCLUDED | 12 | #ifndef __LINUX_LEDS_H_INCLUDED |
| 13 | #define __LINUX_LEDS_H_INCLUDED | 13 | #define __LINUX_LEDS_H_INCLUDED |
| 14 | 14 | ||
| 15 | #include <linux/device.h> | ||
| 15 | #include <linux/list.h> | 16 | #include <linux/list.h> |
| 16 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
| 17 | #include <linux/rwsem.h> | 18 | #include <linux/rwsem.h> |
| @@ -222,6 +223,11 @@ struct led_trigger { | |||
| 222 | struct list_head next_trig; | 223 | struct list_head next_trig; |
| 223 | }; | 224 | }; |
| 224 | 225 | ||
| 226 | ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr, | ||
| 227 | const char *buf, size_t count); | ||
| 228 | ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr, | ||
| 229 | char *buf); | ||
| 230 | |||
| 225 | /* Registration functions for complex triggers */ | 231 | /* Registration functions for complex triggers */ |
| 226 | extern int led_trigger_register(struct led_trigger *trigger); | 232 | extern int led_trigger_register(struct led_trigger *trigger); |
| 227 | extern void led_trigger_unregister(struct led_trigger *trigger); | 233 | extern void led_trigger_unregister(struct led_trigger *trigger); |
| @@ -238,6 +244,16 @@ extern void led_trigger_blink_oneshot(struct led_trigger *trigger, | |||
| 238 | unsigned long *delay_on, | 244 | unsigned long *delay_on, |
| 239 | unsigned long *delay_off, | 245 | unsigned long *delay_off, |
| 240 | int invert); | 246 | int invert); |
| 247 | extern void led_trigger_set_default(struct led_classdev *led_cdev); | ||
| 248 | extern void led_trigger_set(struct led_classdev *led_cdev, | ||
| 249 | struct led_trigger *trigger); | ||
| 250 | extern void led_trigger_remove(struct led_classdev *led_cdev); | ||
| 251 | |||
| 252 | static inline void *led_get_trigger_data(struct led_classdev *led_cdev) | ||
| 253 | { | ||
| 254 | return led_cdev->trigger_data; | ||
| 255 | } | ||
| 256 | |||
| 241 | /** | 257 | /** |
| 242 | * led_trigger_rename_static - rename a trigger | 258 | * led_trigger_rename_static - rename a trigger |
| 243 | * @name: the new trigger name | 259 | * @name: the new trigger name |
| @@ -267,6 +283,15 @@ static inline void led_trigger_register_simple(const char *name, | |||
| 267 | static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {} | 283 | static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {} |
| 268 | static inline void led_trigger_event(struct led_trigger *trigger, | 284 | static inline void led_trigger_event(struct led_trigger *trigger, |
| 269 | enum led_brightness event) {} | 285 | enum led_brightness event) {} |
| 286 | static inline void led_trigger_set_default(struct led_classdev *led_cdev) {} | ||
| 287 | static inline void led_trigger_set(struct led_classdev *led_cdev, | ||
| 288 | struct led_trigger *trigger) {} | ||
| 289 | static inline void led_trigger_remove(struct led_classdev *led_cdev) {} | ||
| 290 | static inline void *led_get_trigger_data(struct led_classdev *led_cdev) | ||
| 291 | { | ||
| 292 | return NULL; | ||
| 293 | } | ||
| 294 | |||
| 270 | #endif /* CONFIG_LEDS_TRIGGERS */ | 295 | #endif /* CONFIG_LEDS_TRIGGERS */ |
| 271 | 296 | ||
| 272 | /* Trigger specific functions */ | 297 | /* Trigger specific functions */ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 28aeae46f355..c9cfbcdb8d14 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -134,7 +134,6 @@ enum { | |||
| 134 | ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1, | 134 | ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1, |
| 135 | 135 | ||
| 136 | ATA_SHT_EMULATED = 1, | 136 | ATA_SHT_EMULATED = 1, |
| 137 | ATA_SHT_CMD_PER_LUN = 1, | ||
| 138 | ATA_SHT_THIS_ID = -1, | 137 | ATA_SHT_THIS_ID = -1, |
| 139 | ATA_SHT_USE_CLUSTERING = 1, | 138 | ATA_SHT_USE_CLUSTERING = 1, |
| 140 | 139 | ||
| @@ -431,6 +430,9 @@ enum { | |||
| 431 | ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */ | 430 | ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */ |
| 432 | ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ | 431 | ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ |
| 433 | ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ | 432 | ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ |
| 433 | ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */ | ||
| 434 | ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ | ||
| 435 | ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ | ||
| 434 | 436 | ||
| 435 | /* DMA mask for user DMA control: User visible values; DO NOT | 437 | /* DMA mask for user DMA control: User visible values; DO NOT |
| 436 | renumber */ | 438 | renumber */ |
| @@ -1364,7 +1366,6 @@ extern struct device_attribute *ata_common_sdev_attrs[]; | |||
| 1364 | .can_queue = ATA_DEF_QUEUE, \ | 1366 | .can_queue = ATA_DEF_QUEUE, \ |
| 1365 | .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ | 1367 | .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ |
| 1366 | .this_id = ATA_SHT_THIS_ID, \ | 1368 | .this_id = ATA_SHT_THIS_ID, \ |
| 1367 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \ | ||
| 1368 | .emulated = ATA_SHT_EMULATED, \ | 1369 | .emulated = ATA_SHT_EMULATED, \ |
| 1369 | .use_clustering = ATA_SHT_USE_CLUSTERING, \ | 1370 | .use_clustering = ATA_SHT_USE_CLUSTERING, \ |
| 1370 | .proc_name = drv_name, \ | 1371 | .proc_name = drv_name, \ |
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h index 01508c7b8c81..2a663c6bb428 100644 --- a/include/linux/libfdt_env.h +++ b/include/linux/libfdt_env.h | |||
| @@ -5,6 +5,10 @@ | |||
| 5 | 5 | ||
| 6 | #include <asm/byteorder.h> | 6 | #include <asm/byteorder.h> |
| 7 | 7 | ||
| 8 | typedef __be16 fdt16_t; | ||
| 9 | typedef __be32 fdt32_t; | ||
| 10 | typedef __be64 fdt64_t; | ||
| 11 | |||
| 8 | #define fdt32_to_cpu(x) be32_to_cpu(x) | 12 | #define fdt32_to_cpu(x) be32_to_cpu(x) |
| 9 | #define cpu_to_fdt32(x) cpu_to_be32(x) | 13 | #define cpu_to_fdt32(x) cpu_to_be32(x) |
| 10 | #define fdt64_to_cpu(x) be64_to_cpu(x) | 14 | #define fdt64_to_cpu(x) be64_to_cpu(x) |
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h new file mode 100644 index 000000000000..75e3af01ee32 --- /dev/null +++ b/include/linux/libnvdimm.h | |||
| @@ -0,0 +1,151 @@ | |||
| 1 | /* | ||
| 2 | * libnvdimm - Non-volatile-memory Devices Subsystem | ||
| 3 | * | ||
| 4 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of version 2 of the GNU General Public License as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | */ | ||
| 15 | #ifndef __LIBNVDIMM_H__ | ||
| 16 | #define __LIBNVDIMM_H__ | ||
| 17 | #include <linux/kernel.h> | ||
| 18 | #include <linux/sizes.h> | ||
| 19 | #include <linux/types.h> | ||
| 20 | |||
| 21 | enum { | ||
| 22 | /* when a dimm supports both PMEM and BLK access a label is required */ | ||
| 23 | NDD_ALIASING = 1 << 0, | ||
| 24 | /* unarmed memory devices may not persist writes */ | ||
| 25 | NDD_UNARMED = 1 << 1, | ||
| 26 | |||
| 27 | /* need to set a limit somewhere, but yes, this is likely overkill */ | ||
| 28 | ND_IOCTL_MAX_BUFLEN = SZ_4M, | ||
| 29 | ND_CMD_MAX_ELEM = 4, | ||
| 30 | ND_CMD_MAX_ENVELOPE = 16, | ||
| 31 | ND_CMD_ARS_STATUS_MAX = SZ_4K, | ||
| 32 | ND_MAX_MAPPINGS = 32, | ||
| 33 | |||
| 34 | /* mark newly adjusted resources as requiring a label update */ | ||
| 35 | DPA_RESOURCE_ADJUSTED = 1 << 0, | ||
| 36 | }; | ||
| 37 | |||
| 38 | extern struct attribute_group nvdimm_bus_attribute_group; | ||
| 39 | extern struct attribute_group nvdimm_attribute_group; | ||
| 40 | extern struct attribute_group nd_device_attribute_group; | ||
| 41 | extern struct attribute_group nd_numa_attribute_group; | ||
| 42 | extern struct attribute_group nd_region_attribute_group; | ||
| 43 | extern struct attribute_group nd_mapping_attribute_group; | ||
| 44 | |||
| 45 | struct nvdimm; | ||
| 46 | struct nvdimm_bus_descriptor; | ||
| 47 | typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, | ||
| 48 | struct nvdimm *nvdimm, unsigned int cmd, void *buf, | ||
| 49 | unsigned int buf_len); | ||
| 50 | |||
| 51 | struct nd_namespace_label; | ||
| 52 | struct nvdimm_drvdata; | ||
| 53 | struct nd_mapping { | ||
| 54 | struct nvdimm *nvdimm; | ||
| 55 | struct nd_namespace_label **labels; | ||
| 56 | u64 start; | ||
| 57 | u64 size; | ||
| 58 | /* | ||
| 59 | * @ndd is for private use at region enable / disable time for | ||
| 60 | * get_ndd() + put_ndd(), all other nd_mapping to ndd | ||
| 61 | * conversions use to_ndd() which respects enabled state of the | ||
| 62 | * nvdimm. | ||
| 63 | */ | ||
| 64 | struct nvdimm_drvdata *ndd; | ||
| 65 | }; | ||
| 66 | |||
| 67 | struct nvdimm_bus_descriptor { | ||
| 68 | const struct attribute_group **attr_groups; | ||
| 69 | unsigned long dsm_mask; | ||
| 70 | char *provider_name; | ||
| 71 | ndctl_fn ndctl; | ||
| 72 | }; | ||
| 73 | |||
| 74 | struct nd_cmd_desc { | ||
| 75 | int in_num; | ||
| 76 | int out_num; | ||
| 77 | u32 in_sizes[ND_CMD_MAX_ELEM]; | ||
| 78 | int out_sizes[ND_CMD_MAX_ELEM]; | ||
| 79 | }; | ||
| 80 | |||
| 81 | struct nd_interleave_set { | ||
| 82 | u64 cookie; | ||
| 83 | }; | ||
| 84 | |||
| 85 | struct nd_region_desc { | ||
| 86 | struct resource *res; | ||
| 87 | struct nd_mapping *nd_mapping; | ||
| 88 | u16 num_mappings; | ||
| 89 | const struct attribute_group **attr_groups; | ||
| 90 | struct nd_interleave_set *nd_set; | ||
| 91 | void *provider_data; | ||
| 92 | int num_lanes; | ||
| 93 | int numa_node; | ||
| 94 | }; | ||
| 95 | |||
| 96 | struct nvdimm_bus; | ||
| 97 | struct module; | ||
| 98 | struct device; | ||
| 99 | struct nd_blk_region; | ||
| 100 | struct nd_blk_region_desc { | ||
| 101 | int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); | ||
| 102 | void (*disable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); | ||
| 103 | int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, | ||
| 104 | void *iobuf, u64 len, int rw); | ||
| 105 | struct nd_region_desc ndr_desc; | ||
| 106 | }; | ||
| 107 | |||
| 108 | static inline struct nd_blk_region_desc *to_blk_region_desc( | ||
| 109 | struct nd_region_desc *ndr_desc) | ||
| 110 | { | ||
| 111 | return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc); | ||
| 112 | |||
| 113 | } | ||
| 114 | |||
| 115 | struct nvdimm_bus *__nvdimm_bus_register(struct device *parent, | ||
| 116 | struct nvdimm_bus_descriptor *nfit_desc, struct module *module); | ||
| 117 | #define nvdimm_bus_register(parent, desc) \ | ||
| 118 | __nvdimm_bus_register(parent, desc, THIS_MODULE) | ||
| 119 | void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus); | ||
| 120 | struct nvdimm_bus *to_nvdimm_bus(struct device *dev); | ||
| 121 | struct nvdimm *to_nvdimm(struct device *dev); | ||
| 122 | struct nd_region *to_nd_region(struct device *dev); | ||
| 123 | struct nd_blk_region *to_nd_blk_region(struct device *dev); | ||
| 124 | struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); | ||
| 125 | const char *nvdimm_name(struct nvdimm *nvdimm); | ||
| 126 | void *nvdimm_provider_data(struct nvdimm *nvdimm); | ||
| 127 | struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, | ||
| 128 | const struct attribute_group **groups, unsigned long flags, | ||
| 129 | unsigned long *dsm_mask); | ||
| 130 | const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); | ||
| 131 | const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); | ||
| 132 | u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, | ||
| 133 | const struct nd_cmd_desc *desc, int idx, void *buf); | ||
| 134 | u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, | ||
| 135 | const struct nd_cmd_desc *desc, int idx, const u32 *in_field, | ||
| 136 | const u32 *out_field); | ||
| 137 | int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count); | ||
| 138 | struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, | ||
| 139 | struct nd_region_desc *ndr_desc); | ||
| 140 | struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, | ||
| 141 | struct nd_region_desc *ndr_desc); | ||
| 142 | struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, | ||
| 143 | struct nd_region_desc *ndr_desc); | ||
| 144 | void *nd_region_provider_data(struct nd_region *nd_region); | ||
| 145 | void *nd_blk_region_provider_data(struct nd_blk_region *ndbr); | ||
| 146 | void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data); | ||
| 147 | struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr); | ||
| 148 | unsigned int nd_region_acquire_lane(struct nd_region *nd_region); | ||
| 149 | void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); | ||
| 150 | u64 nd_fletcher64(void *addr, size_t len, bool le); | ||
| 151 | #endif /* __LIBNVDIMM_H__ */ | ||
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index ee6dbb39a809..31db7a05dd36 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h | |||
| @@ -99,7 +99,7 @@ struct klp_object { | |||
| 99 | struct klp_func *funcs; | 99 | struct klp_func *funcs; |
| 100 | 100 | ||
| 101 | /* internal */ | 101 | /* internal */ |
| 102 | struct kobject *kobj; | 102 | struct kobject kobj; |
| 103 | struct module *mod; | 103 | struct module *mod; |
| 104 | enum klp_state state; | 104 | enum klp_state state; |
| 105 | }; | 105 | }; |
| @@ -123,6 +123,12 @@ struct klp_patch { | |||
| 123 | enum klp_state state; | 123 | enum klp_state state; |
| 124 | }; | 124 | }; |
| 125 | 125 | ||
| 126 | #define klp_for_each_object(patch, obj) \ | ||
| 127 | for (obj = patch->objs; obj->funcs; obj++) | ||
| 128 | |||
| 129 | #define klp_for_each_func(obj, func) \ | ||
| 130 | for (func = obj->funcs; func->old_name; func++) | ||
| 131 | |||
| 126 | int klp_register_patch(struct klp_patch *); | 132 | int klp_register_patch(struct klp_patch *); |
| 127 | int klp_unregister_patch(struct klp_patch *); | 133 | int klp_unregister_patch(struct klp_patch *); |
| 128 | int klp_enable_patch(struct klp_patch *); | 134 | int klp_enable_patch(struct klp_patch *); |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 2722111591a3..70400dc7660f 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -255,6 +255,7 @@ struct held_lock { | |||
| 255 | unsigned int check:1; /* see lock_acquire() comment */ | 255 | unsigned int check:1; /* see lock_acquire() comment */ |
| 256 | unsigned int hardirqs_off:1; | 256 | unsigned int hardirqs_off:1; |
| 257 | unsigned int references:12; /* 32 bits */ | 257 | unsigned int references:12; /* 32 bits */ |
| 258 | unsigned int pin_count; | ||
| 258 | }; | 259 | }; |
| 259 | 260 | ||
| 260 | /* | 261 | /* |
| @@ -354,6 +355,9 @@ extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); | |||
| 354 | extern void lockdep_clear_current_reclaim_state(void); | 355 | extern void lockdep_clear_current_reclaim_state(void); |
| 355 | extern void lockdep_trace_alloc(gfp_t mask); | 356 | extern void lockdep_trace_alloc(gfp_t mask); |
| 356 | 357 | ||
| 358 | extern void lock_pin_lock(struct lockdep_map *lock); | ||
| 359 | extern void lock_unpin_lock(struct lockdep_map *lock); | ||
| 360 | |||
| 357 | # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, | 361 | # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, |
| 358 | 362 | ||
| 359 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) | 363 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
| @@ -368,6 +372,9 @@ extern void lockdep_trace_alloc(gfp_t mask); | |||
| 368 | 372 | ||
| 369 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) | 373 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
| 370 | 374 | ||
| 375 | #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) | ||
| 376 | #define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map) | ||
| 377 | |||
| 371 | #else /* !CONFIG_LOCKDEP */ | 378 | #else /* !CONFIG_LOCKDEP */ |
| 372 | 379 | ||
| 373 | static inline void lockdep_off(void) | 380 | static inline void lockdep_off(void) |
| @@ -420,6 +427,9 @@ struct lock_class_key { }; | |||
| 420 | 427 | ||
| 421 | #define lockdep_recursing(tsk) (0) | 428 | #define lockdep_recursing(tsk) (0) |
| 422 | 429 | ||
| 430 | #define lockdep_pin_lock(l) do { (void)(l); } while (0) | ||
| 431 | #define lockdep_unpin_lock(l) do { (void)(l); } while (0) | ||
| 432 | |||
| 423 | #endif /* !LOCKDEP */ | 433 | #endif /* !LOCKDEP */ |
| 424 | 434 | ||
| 425 | #ifdef CONFIG_LOCK_STAT | 435 | #ifdef CONFIG_LOCK_STAT |
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h new file mode 100644 index 000000000000..9429f054c323 --- /dev/null +++ b/include/linux/lsm_hooks.h | |||
| @@ -0,0 +1,1888 @@ | |||
| 1 | /* | ||
| 2 | * Linux Security Module interfaces | ||
| 3 | * | ||
| 4 | * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com> | ||
| 5 | * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com> | ||
| 6 | * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com> | ||
| 7 | * Copyright (C) 2001 James Morris <jmorris@intercode.com.au> | ||
| 8 | * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) | ||
| 9 | * Copyright (C) 2015 Intel Corporation. | ||
| 10 | * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com> | ||
| 11 | * | ||
| 12 | * This program is free software; you can redistribute it and/or modify | ||
| 13 | * it under the terms of the GNU General Public License as published by | ||
| 14 | * the Free Software Foundation; either version 2 of the License, or | ||
| 15 | * (at your option) any later version. | ||
| 16 | * | ||
| 17 | * Due to this file being licensed under the GPL there is controversy over | ||
| 18 | * whether this permits you to write a module that #includes this file | ||
| 19 | * without placing your module under the GPL. Please consult a lawyer for | ||
| 20 | * advice before doing this. | ||
| 21 | * | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef __LINUX_LSM_HOOKS_H | ||
| 25 | #define __LINUX_LSM_HOOKS_H | ||
| 26 | |||
| 27 | #include <linux/security.h> | ||
| 28 | #include <linux/init.h> | ||
| 29 | #include <linux/rculist.h> | ||
| 30 | |||
| 31 | /** | ||
| 32 | * Security hooks for program execution operations. | ||
| 33 | * | ||
| 34 | * @bprm_set_creds: | ||
| 35 | * Save security information in the bprm->security field, typically based | ||
| 36 | * on information about the bprm->file, for later use by the apply_creds | ||
| 37 | * hook. This hook may also optionally check permissions (e.g. for | ||
| 38 | * transitions between security domains). | ||
| 39 | * This hook may be called multiple times during a single execve, e.g. for | ||
| 40 | * interpreters. The hook can tell whether it has already been called by | ||
| 41 | * checking to see if @bprm->security is non-NULL. If so, then the hook | ||
| 42 | * may decide either to retain the security information saved earlier or | ||
| 43 | * to replace it. | ||
| 44 | * @bprm contains the linux_binprm structure. | ||
| 45 | * Return 0 if the hook is successful and permission is granted. | ||
| 46 | * @bprm_check_security: | ||
| 47 | * This hook mediates the point when a search for a binary handler will | ||
| 48 | * begin. It allows a check the @bprm->security value which is set in the | ||
| 49 | * preceding set_creds call. The primary difference from set_creds is | ||
| 50 | * that the argv list and envp list are reliably available in @bprm. This | ||
| 51 | * hook may be called multiple times during a single execve; and in each | ||
| 52 | * pass set_creds is called first. | ||
| 53 | * @bprm contains the linux_binprm structure. | ||
| 54 | * Return 0 if the hook is successful and permission is granted. | ||
| 55 | * @bprm_committing_creds: | ||
| 56 | * Prepare to install the new security attributes of a process being | ||
| 57 | * transformed by an execve operation, based on the old credentials | ||
| 58 | * pointed to by @current->cred and the information set in @bprm->cred by | ||
| 59 | * the bprm_set_creds hook. @bprm points to the linux_binprm structure. | ||
| 60 | * This hook is a good place to perform state changes on the process such | ||
| 61 | * as closing open file descriptors to which access will no longer be | ||
| 62 | * granted when the attributes are changed. This is called immediately | ||
| 63 | * before commit_creds(). | ||
| 64 | * @bprm_committed_creds: | ||
| 65 | * Tidy up after the installation of the new security attributes of a | ||
| 66 | * process being transformed by an execve operation. The new credentials | ||
| 67 | * have, by this point, been set to @current->cred. @bprm points to the | ||
| 68 | * linux_binprm structure. This hook is a good place to perform state | ||
| 69 | * changes on the process such as clearing out non-inheritable signal | ||
| 70 | * state. This is called immediately after commit_creds(). | ||
| 71 | * @bprm_secureexec: | ||
| 72 | * Return a boolean value (0 or 1) indicating whether a "secure exec" | ||
| 73 | * is required. The flag is passed in the auxiliary table | ||
| 74 | * on the initial stack to the ELF interpreter to indicate whether libc | ||
| 75 | * should enable secure mode. | ||
| 76 | * @bprm contains the linux_binprm structure. | ||
| 77 | * | ||
| 78 | * Security hooks for filesystem operations. | ||
| 79 | * | ||
| 80 | * @sb_alloc_security: | ||
| 81 | * Allocate and attach a security structure to the sb->s_security field. | ||
| 82 | * The s_security field is initialized to NULL when the structure is | ||
| 83 | * allocated. | ||
| 84 | * @sb contains the super_block structure to be modified. | ||
| 85 | * Return 0 if operation was successful. | ||
| 86 | * @sb_free_security: | ||
| 87 | * Deallocate and clear the sb->s_security field. | ||
| 88 | * @sb contains the super_block structure to be modified. | ||
| 89 | * @sb_statfs: | ||
| 90 | * Check permission before obtaining filesystem statistics for the @mnt | ||
| 91 | * mountpoint. | ||
| 92 | * @dentry is a handle on the superblock for the filesystem. | ||
| 93 | * Return 0 if permission is granted. | ||
| 94 | * @sb_mount: | ||
| 95 | * Check permission before an object specified by @dev_name is mounted on | ||
| 96 | * the mount point named by @nd. For an ordinary mount, @dev_name | ||
| 97 | * identifies a device if the file system type requires a device. For a | ||
| 98 | * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a | ||
| 99 | * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the | ||
| 100 | * pathname of the object being mounted. | ||
| 101 | * @dev_name contains the name for object being mounted. | ||
| 102 | * @path contains the path for mount point object. | ||
| 103 | * @type contains the filesystem type. | ||
| 104 | * @flags contains the mount flags. | ||
| 105 | * @data contains the filesystem-specific data. | ||
| 106 | * Return 0 if permission is granted. | ||
| 107 | * @sb_copy_data: | ||
| 108 | * Allow mount option data to be copied prior to parsing by the filesystem, | ||
| 109 | * so that the security module can extract security-specific mount | ||
| 110 | * options cleanly (a filesystem may modify the data e.g. with strsep()). | ||
| 111 | * This also allows the original mount data to be stripped of security- | ||
| 112 | * specific options to avoid having to make filesystems aware of them. | ||
| 113 | * @type the type of filesystem being mounted. | ||
| 114 | * @orig the original mount data copied from userspace. | ||
| 115 | * @copy copied data which will be passed to the security module. | ||
| 116 | * Returns 0 if the copy was successful. | ||
| 117 | * @sb_remount: | ||
| 118 | * Extracts security system specific mount options and verifies no changes | ||
| 119 | * are being made to those options. | ||
| 120 | * @sb superblock being remounted | ||
| 121 | * @data contains the filesystem-specific data. | ||
| 122 | * Return 0 if permission is granted. | ||
| 123 | * @sb_umount: | ||
| 124 | * Check permission before the @mnt file system is unmounted. | ||
| 125 | * @mnt contains the mounted file system. | ||
| 126 | * @flags contains the unmount flags, e.g. MNT_FORCE. | ||
| 127 | * Return 0 if permission is granted. | ||
| 128 | * @sb_pivotroot: | ||
| 129 | * Check permission before pivoting the root filesystem. | ||
| 130 | * @old_path contains the path for the new location of the | ||
| 131 | * current root (put_old). | ||
| 132 | * @new_path contains the path for the new root (new_root). | ||
| 133 | * Return 0 if permission is granted. | ||
| 134 | * @sb_set_mnt_opts: | ||
| 135 | * Set the security relevant mount options used for a superblock | ||
| 136 | * @sb the superblock to set security mount options for | ||
| 137 | * @opts binary data structure containing all lsm mount data | ||
| 138 | * @sb_clone_mnt_opts: | ||
| 139 | * Copy all security options from a given superblock to another | ||
| 140 | * @oldsb old superblock which contain information to clone | ||
| 141 | * @newsb new superblock which needs filled in | ||
| 142 | * @sb_parse_opts_str: | ||
| 143 | * Parse a string of security data filling in the opts structure | ||
| 144 | * @options string containing all mount options known by the LSM | ||
| 145 | * @opts binary data structure usable by the LSM | ||
| 146 | * @dentry_init_security: | ||
| 147 | * Compute a context for a dentry as the inode is not yet available | ||
| 148 | * since NFSv4 has no label backed by an EA anyway. | ||
| 149 | * @dentry dentry to use in calculating the context. | ||
| 150 | * @mode mode used to determine resource type. | ||
| 151 | * @name name of the last path component used to create file | ||
| 152 | * @ctx pointer to place the pointer to the resulting context in. | ||
| 153 | * @ctxlen point to place the length of the resulting context. | ||
| 154 | * | ||
| 155 | * | ||
| 156 | * Security hooks for inode operations. | ||
| 157 | * | ||
| 158 | * @inode_alloc_security: | ||
| 159 | * Allocate and attach a security structure to @inode->i_security. The | ||
| 160 | * i_security field is initialized to NULL when the inode structure is | ||
| 161 | * allocated. | ||
| 162 | * @inode contains the inode structure. | ||
| 163 | * Return 0 if operation was successful. | ||
| 164 | * @inode_free_security: | ||
| 165 | * @inode contains the inode structure. | ||
| 166 | * Deallocate the inode security structure and set @inode->i_security to | ||
| 167 | * NULL. | ||
| 168 | * @inode_init_security: | ||
| 169 | * Obtain the security attribute name suffix and value to set on a newly | ||
| 170 | * created inode and set up the incore security field for the new inode. | ||
| 171 | * This hook is called by the fs code as part of the inode creation | ||
| 172 | * transaction and provides for atomic labeling of the inode, unlike | ||
| 173 | * the post_create/mkdir/... hooks called by the VFS. The hook function | ||
| 174 | * is expected to allocate the name and value via kmalloc, with the caller | ||
| 175 | * being responsible for calling kfree after using them. | ||
| 176 | * If the security module does not use security attributes or does | ||
| 177 | * not wish to put a security attribute on this particular inode, | ||
| 178 | * then it should return -EOPNOTSUPP to skip this processing. | ||
| 179 | * @inode contains the inode structure of the newly created inode. | ||
| 180 | * @dir contains the inode structure of the parent directory. | ||
| 181 | * @qstr contains the last path component of the new object | ||
| 182 | * @name will be set to the allocated name suffix (e.g. selinux). | ||
| 183 | * @value will be set to the allocated attribute value. | ||
| 184 | * @len will be set to the length of the value. | ||
| 185 | * Returns 0 if @name and @value have been successfully set, | ||
| 186 | * -EOPNOTSUPP if no security attribute is needed, or | ||
| 187 | * -ENOMEM on memory allocation failure. | ||
| 188 | * @inode_create: | ||
| 189 | * Check permission to create a regular file. | ||
| 190 | * @dir contains inode structure of the parent of the new file. | ||
| 191 | * @dentry contains the dentry structure for the file to be created. | ||
| 192 | * @mode contains the file mode of the file to be created. | ||
| 193 | * Return 0 if permission is granted. | ||
| 194 | * @inode_link: | ||
| 195 | * Check permission before creating a new hard link to a file. | ||
| 196 | * @old_dentry contains the dentry structure for an existing | ||
| 197 | * link to the file. | ||
| 198 | * @dir contains the inode structure of the parent directory | ||
| 199 | * of the new link. | ||
| 200 | * @new_dentry contains the dentry structure for the new link. | ||
| 201 | * Return 0 if permission is granted. | ||
| 202 | * @path_link: | ||
| 203 | * Check permission before creating a new hard link to a file. | ||
| 204 | * @old_dentry contains the dentry structure for an existing link | ||
| 205 | * to the file. | ||
| 206 | * @new_dir contains the path structure of the parent directory of | ||
| 207 | * the new link. | ||
| 208 | * @new_dentry contains the dentry structure for the new link. | ||
| 209 | * Return 0 if permission is granted. | ||
| 210 | * @inode_unlink: | ||
| 211 | * Check the permission to remove a hard link to a file. | ||
| 212 | * @dir contains the inode structure of parent directory of the file. | ||
| 213 | * @dentry contains the dentry structure for file to be unlinked. | ||
| 214 | * Return 0 if permission is granted. | ||
| 215 | * @path_unlink: | ||
| 216 | * Check the permission to remove a hard link to a file. | ||
| 217 | * @dir contains the path structure of parent directory of the file. | ||
| 218 | * @dentry contains the dentry structure for file to be unlinked. | ||
| 219 | * Return 0 if permission is granted. | ||
| 220 | * @inode_symlink: | ||
| 221 | * Check the permission to create a symbolic link to a file. | ||
| 222 | * @dir contains the inode structure of parent directory of | ||
| 223 | * the symbolic link. | ||
| 224 | * @dentry contains the dentry structure of the symbolic link. | ||
| 225 | * @old_name contains the pathname of file. | ||
| 226 | * Return 0 if permission is granted. | ||
| 227 | * @path_symlink: | ||
| 228 | * Check the permission to create a symbolic link to a file. | ||
| 229 | * @dir contains the path structure of parent directory of | ||
| 230 | * the symbolic link. | ||
| 231 | * @dentry contains the dentry structure of the symbolic link. | ||
| 232 | * @old_name contains the pathname of file. | ||
| 233 | * Return 0 if permission is granted. | ||
| 234 | * @inode_mkdir: | ||
| 235 | * Check permissions to create a new directory in the existing directory | ||
| 236 | * associated with inode structure @dir. | ||
| 237 | * @dir contains the inode structure of parent of the directory | ||
| 238 | * to be created. | ||
| 239 | * @dentry contains the dentry structure of new directory. | ||
| 240 | * @mode contains the mode of new directory. | ||
| 241 | * Return 0 if permission is granted. | ||
| 242 | * @path_mkdir: | ||
| 243 | * Check permissions to create a new directory in the existing directory | ||
| 244 | * associated with path structure @path. | ||
| 245 | * @dir contains the path structure of parent of the directory | ||
| 246 | * to be created. | ||
| 247 | * @dentry contains the dentry structure of new directory. | ||
| 248 | * @mode contains the mode of new directory. | ||
| 249 | * Return 0 if permission is granted. | ||
| 250 | * @inode_rmdir: | ||
| 251 | * Check the permission to remove a directory. | ||
| 252 | * @dir contains the inode structure of parent of the directory | ||
| 253 | * to be removed. | ||
| 254 | * @dentry contains the dentry structure of directory to be removed. | ||
| 255 | * Return 0 if permission is granted. | ||
| 256 | * @path_rmdir: | ||
| 257 | * Check the permission to remove a directory. | ||
| 258 | * @dir contains the path structure of parent of the directory to be | ||
| 259 | * removed. | ||
| 260 | * @dentry contains the dentry structure of directory to be removed. | ||
| 261 | * Return 0 if permission is granted. | ||
| 262 | * @inode_mknod: | ||
| 263 | * Check permissions when creating a special file (or a socket or a fifo | ||
| 264 | * file created via the mknod system call). Note that if mknod operation | ||
| 265 | * is being done for a regular file, then the create hook will be called | ||
| 266 | * and not this hook. | ||
| 267 | * @dir contains the inode structure of parent of the new file. | ||
| 268 | * @dentry contains the dentry structure of the new file. | ||
| 269 | * @mode contains the mode of the new file. | ||
| 270 | * @dev contains the device number. | ||
| 271 | * Return 0 if permission is granted. | ||
| 272 | * @path_mknod: | ||
| 273 | * Check permissions when creating a file. Note that this hook is called | ||
| 274 | * even if mknod operation is being done for a regular file. | ||
| 275 | * @dir contains the path structure of parent of the new file. | ||
| 276 | * @dentry contains the dentry structure of the new file. | ||
| 277 | * @mode contains the mode of the new file. | ||
| 278 | * @dev contains the undecoded device number. Use new_decode_dev() to get | ||
| 279 | * the decoded device number. | ||
| 280 | * Return 0 if permission is granted. | ||
| 281 | * @inode_rename: | ||
| 282 | * Check for permission to rename a file or directory. | ||
| 283 | * @old_dir contains the inode structure for parent of the old link. | ||
| 284 | * @old_dentry contains the dentry structure of the old link. | ||
| 285 | * @new_dir contains the inode structure for parent of the new link. | ||
| 286 | * @new_dentry contains the dentry structure of the new link. | ||
| 287 | * Return 0 if permission is granted. | ||
| 288 | * @path_rename: | ||
| 289 | * Check for permission to rename a file or directory. | ||
| 290 | * @old_dir contains the path structure for parent of the old link. | ||
| 291 | * @old_dentry contains the dentry structure of the old link. | ||
| 292 | * @new_dir contains the path structure for parent of the new link. | ||
| 293 | * @new_dentry contains the dentry structure of the new link. | ||
| 294 | * Return 0 if permission is granted. | ||
| 295 | * @path_chmod: | ||
| 296 | * Check for permission to change DAC's permission of a file or directory. | ||
| 297 | * @dentry contains the dentry structure. | ||
| 298 | * @mnt contains the vfsmnt structure. | ||
| 299 | * @mode contains DAC's mode. | ||
| 300 | * Return 0 if permission is granted. | ||
| 301 | * @path_chown: | ||
| 302 | * Check for permission to change owner/group of a file or directory. | ||
| 303 | * @path contains the path structure. | ||
| 304 | * @uid contains new owner's ID. | ||
| 305 | * @gid contains new group's ID. | ||
| 306 | * Return 0 if permission is granted. | ||
| 307 | * @path_chroot: | ||
| 308 | * Check for permission to change root directory. | ||
| 309 | * @path contains the path structure. | ||
| 310 | * Return 0 if permission is granted. | ||
| 311 | * @inode_readlink: | ||
| 312 | * Check the permission to read the symbolic link. | ||
| 313 | * @dentry contains the dentry structure for the file link. | ||
| 314 | * Return 0 if permission is granted. | ||
| 315 | * @inode_follow_link: | ||
| 316 | * Check permission to follow a symbolic link when looking up a pathname. | ||
| 317 | * @dentry contains the dentry structure for the link. | ||
| 318 | * @inode contains the inode, which itself is not stable in RCU-walk | ||
| 319 | * @rcu indicates whether we are in RCU-walk mode. | ||
| 320 | * Return 0 if permission is granted. | ||
| 321 | * @inode_permission: | ||
| 322 | * Check permission before accessing an inode. This hook is called by the | ||
| 323 | * existing Linux permission function, so a security module can use it to | ||
| 324 | * provide additional checking for existing Linux permission checks. | ||
| 325 | * Notice that this hook is called when a file is opened (as well as many | ||
| 326 | * other operations), whereas the file_security_ops permission hook is | ||
| 327 | * called when the actual read/write operations are performed. | ||
| 328 | * @inode contains the inode structure to check. | ||
| 329 | * @mask contains the permission mask. | ||
| 330 | * Return 0 if permission is granted. | ||
| 331 | * @inode_setattr: | ||
| 332 | * Check permission before setting file attributes. Note that the kernel | ||
| 333 | * call to notify_change is performed from several locations, whenever | ||
| 334 | * file attributes change (such as when a file is truncated, chown/chmod | ||
| 335 | * operations, transferring disk quotas, etc). | ||
| 336 | * @dentry contains the dentry structure for the file. | ||
| 337 | * @attr is the iattr structure containing the new file attributes. | ||
| 338 | * Return 0 if permission is granted. | ||
| 339 | * @path_truncate: | ||
| 340 | * Check permission before truncating a file. | ||
| 341 | * @path contains the path structure for the file. | ||
| 342 | * Return 0 if permission is granted. | ||
| 343 | * @inode_getattr: | ||
| 344 | * Check permission before obtaining file attributes. | ||
| 345 | * @mnt is the vfsmount where the dentry was looked up | ||
| 346 | * @dentry contains the dentry structure for the file. | ||
| 347 | * Return 0 if permission is granted. | ||
| 348 | * @inode_setxattr: | ||
| 349 | * Check permission before setting the extended attributes | ||
| 350 | * @value identified by @name for @dentry. | ||
| 351 | * Return 0 if permission is granted. | ||
| 352 | * @inode_post_setxattr: | ||
| 353 | * Update inode security field after successful setxattr operation. | ||
| 354 | * @value identified by @name for @dentry. | ||
| 355 | * @inode_getxattr: | ||
| 356 | * Check permission before obtaining the extended attributes | ||
| 357 | * identified by @name for @dentry. | ||
| 358 | * Return 0 if permission is granted. | ||
| 359 | * @inode_listxattr: | ||
| 360 | * Check permission before obtaining the list of extended attribute | ||
| 361 | * names for @dentry. | ||
| 362 | * Return 0 if permission is granted. | ||
| 363 | * @inode_removexattr: | ||
| 364 | * Check permission before removing the extended attribute | ||
| 365 | * identified by @name for @dentry. | ||
| 366 | * Return 0 if permission is granted. | ||
| 367 | * @inode_getsecurity: | ||
| 368 | * Retrieve a copy of the extended attribute representation of the | ||
| 369 | * security label associated with @name for @inode via @buffer. Note that | ||
| 370 | * @name is the remainder of the attribute name after the security prefix | ||
| 371 | * has been removed. @alloc is used to specify of the call should return a | ||
| 372 | * value via the buffer or just the value length Return size of buffer on | ||
| 373 | * success. | ||
| 374 | * @inode_setsecurity: | ||
| 375 | * Set the security label associated with @name for @inode from the | ||
| 376 | * extended attribute value @value. @size indicates the size of the | ||
| 377 | * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0. | ||
| 378 | * Note that @name is the remainder of the attribute name after the | ||
| 379 | * security. prefix has been removed. | ||
| 380 | * Return 0 on success. | ||
| 381 | * @inode_listsecurity: | ||
| 382 | * Copy the extended attribute names for the security labels | ||
| 383 | * associated with @inode into @buffer. The maximum size of @buffer | ||
| 384 | * is specified by @buffer_size. @buffer may be NULL to request | ||
| 385 | * the size of the buffer required. | ||
| 386 | * Returns number of bytes used/required on success. | ||
| 387 | * @inode_need_killpriv: | ||
| 388 | * Called when an inode has been changed. | ||
| 389 | * @dentry is the dentry being changed. | ||
| 390 | * Return <0 on error to abort the inode change operation. | ||
| 391 | * Return 0 if inode_killpriv does not need to be called. | ||
| 392 | * Return >0 if inode_killpriv does need to be called. | ||
| 393 | * @inode_killpriv: | ||
| 394 | * The setuid bit is being removed. Remove similar security labels. | ||
| 395 | * Called with the dentry->d_inode->i_mutex held. | ||
| 396 | * @dentry is the dentry being changed. | ||
| 397 | * Return 0 on success. If error is returned, then the operation | ||
| 398 | * causing setuid bit removal is failed. | ||
| 399 | * @inode_getsecid: | ||
| 400 | * Get the secid associated with the node. | ||
| 401 | * @inode contains a pointer to the inode. | ||
| 402 | * @secid contains a pointer to the location where result will be saved. | ||
| 403 | * In case of failure, @secid will be set to zero. | ||
| 404 | * | ||
| 405 | * Security hooks for file operations | ||
| 406 | * | ||
| 407 | * @file_permission: | ||
| 408 | * Check file permissions before accessing an open file. This hook is | ||
| 409 | * called by various operations that read or write files. A security | ||
| 410 | * module can use this hook to perform additional checking on these | ||
| 411 | * operations, e.g. to revalidate permissions on use to support privilege | ||
| 412 | * bracketing or policy changes. Notice that this hook is used when the | ||
| 413 | * actual read/write operations are performed, whereas the | ||
| 414 | * inode_security_ops hook is called when a file is opened (as well as | ||
| 415 | * many other operations). | ||
| 416 | * Caveat: Although this hook can be used to revalidate permissions for | ||
| 417 | * various system call operations that read or write files, it does not | ||
| 418 | * address the revalidation of permissions for memory-mapped files. | ||
| 419 | * Security modules must handle this separately if they need such | ||
| 420 | * revalidation. | ||
| 421 | * @file contains the file structure being accessed. | ||
| 422 | * @mask contains the requested permissions. | ||
| 423 | * Return 0 if permission is granted. | ||
| 424 | * @file_alloc_security: | ||
| 425 | * Allocate and attach a security structure to the file->f_security field. | ||
| 426 | * The security field is initialized to NULL when the structure is first | ||
| 427 | * created. | ||
| 428 | * @file contains the file structure to secure. | ||
| 429 | * Return 0 if the hook is successful and permission is granted. | ||
| 430 | * @file_free_security: | ||
| 431 | * Deallocate and free any security structures stored in file->f_security. | ||
| 432 | * @file contains the file structure being modified. | ||
| 433 | * @file_ioctl: | ||
| 434 | * @file contains the file structure. | ||
| 435 | * @cmd contains the operation to perform. | ||
| 436 | * @arg contains the operational arguments. | ||
| 437 | * Check permission for an ioctl operation on @file. Note that @arg | ||
| 438 | * sometimes represents a user space pointer; in other cases, it may be a | ||
| 439 | * simple integer value. When @arg represents a user space pointer, it | ||
| 440 | * should never be used by the security module. | ||
| 441 | * Return 0 if permission is granted. | ||
| 442 | * @mmap_addr : | ||
| 443 | * Check permissions for a mmap operation at @addr. | ||
| 444 | * @addr contains virtual address that will be used for the operation. | ||
| 445 | * Return 0 if permission is granted. | ||
| 446 | * @mmap_file : | ||
| 447 | * Check permissions for a mmap operation. The @file may be NULL, e.g. | ||
| 448 | * if mapping anonymous memory. | ||
| 449 | * @file contains the file structure for file to map (may be NULL). | ||
| 450 | * @reqprot contains the protection requested by the application. | ||
| 451 | * @prot contains the protection that will be applied by the kernel. | ||
| 452 | * @flags contains the operational flags. | ||
| 453 | * Return 0 if permission is granted. | ||
| 454 | * @file_mprotect: | ||
| 455 | * Check permissions before changing memory access permissions. | ||
| 456 | * @vma contains the memory region to modify. | ||
| 457 | * @reqprot contains the protection requested by the application. | ||
| 458 | * @prot contains the protection that will be applied by the kernel. | ||
| 459 | * Return 0 if permission is granted. | ||
| 460 | * @file_lock: | ||
| 461 | * Check permission before performing file locking operations. | ||
| 462 | * Note: this hook mediates both flock and fcntl style locks. | ||
| 463 | * @file contains the file structure. | ||
| 464 | * @cmd contains the posix-translated lock operation to perform | ||
| 465 | * (e.g. F_RDLCK, F_WRLCK). | ||
| 466 | * Return 0 if permission is granted. | ||
| 467 | * @file_fcntl: | ||
| 468 | * Check permission before allowing the file operation specified by @cmd | ||
| 469 | * from being performed on the file @file. Note that @arg sometimes | ||
| 470 | * represents a user space pointer; in other cases, it may be a simple | ||
| 471 | * integer value. When @arg represents a user space pointer, it should | ||
| 472 | * never be used by the security module. | ||
| 473 | * @file contains the file structure. | ||
| 474 | * @cmd contains the operation to be performed. | ||
| 475 | * @arg contains the operational arguments. | ||
| 476 | * Return 0 if permission is granted. | ||
| 477 | * @file_set_fowner: | ||
| 478 | * Save owner security information (typically from current->security) in | ||
| 479 | * file->f_security for later use by the send_sigiotask hook. | ||
| 480 | * @file contains the file structure to update. | ||
| 481 | * Return 0 on success. | ||
| 482 | * @file_send_sigiotask: | ||
| 483 | * Check permission for the file owner @fown to send SIGIO or SIGURG to the | ||
| 484 | * process @tsk. Note that this hook is sometimes called from interrupt. | ||
| 485 | * Note that the fown_struct, @fown, is never outside the context of a | ||
| 486 | * struct file, so the file structure (and associated security information) | ||
| 487 | * can always be obtained: | ||
| 488 | * container_of(fown, struct file, f_owner) | ||
| 489 | * @tsk contains the structure of task receiving signal. | ||
| 490 | * @fown contains the file owner information. | ||
| 491 | * @sig is the signal that will be sent. When 0, kernel sends SIGIO. | ||
| 492 | * Return 0 if permission is granted. | ||
| 493 | * @file_receive: | ||
| 494 | * This hook allows security modules to control the ability of a process | ||
| 495 | * to receive an open file descriptor via socket IPC. | ||
| 496 | * @file contains the file structure being received. | ||
| 497 | * Return 0 if permission is granted. | ||
| 498 | * @file_open | ||
| 499 | * Save open-time permission checking state for later use upon | ||
| 500 | * file_permission, and recheck access if anything has changed | ||
| 501 | * since inode_permission. | ||
| 502 | * | ||
| 503 | * Security hooks for task operations. | ||
| 504 | * | ||
| 505 | * @task_create: | ||
| 506 | * Check permission before creating a child process. See the clone(2) | ||
| 507 | * manual page for definitions of the @clone_flags. | ||
| 508 | * @clone_flags contains the flags indicating what should be shared. | ||
| 509 | * Return 0 if permission is granted. | ||
| 510 | * @task_free: | ||
| 511 | * @task task being freed | ||
| 512 | * Handle release of task-related resources. (Note that this can be called | ||
| 513 | * from interrupt context.) | ||
| 514 | * @cred_alloc_blank: | ||
| 515 | * @cred points to the credentials. | ||
| 516 | * @gfp indicates the atomicity of any memory allocations. | ||
| 517 | * Only allocate sufficient memory and attach to @cred such that | ||
| 518 | * cred_transfer() will not get ENOMEM. | ||
| 519 | * @cred_free: | ||
| 520 | * @cred points to the credentials. | ||
| 521 | * Deallocate and clear the cred->security field in a set of credentials. | ||
| 522 | * @cred_prepare: | ||
| 523 | * @new points to the new credentials. | ||
| 524 | * @old points to the original credentials. | ||
| 525 | * @gfp indicates the atomicity of any memory allocations. | ||
| 526 | * Prepare a new set of credentials by copying the data from the old set. | ||
| 527 | * @cred_transfer: | ||
| 528 | * @new points to the new credentials. | ||
| 529 | * @old points to the original credentials. | ||
| 530 | * Transfer data from original creds to new creds | ||
| 531 | * @kernel_act_as: | ||
| 532 | * Set the credentials for a kernel service to act as (subjective context). | ||
| 533 | * @new points to the credentials to be modified. | ||
| 534 | * @secid specifies the security ID to be set | ||
| 535 | * The current task must be the one that nominated @secid. | ||
| 536 | * Return 0 if successful. | ||
| 537 | * @kernel_create_files_as: | ||
| 538 | * Set the file creation context in a set of credentials to be the same as | ||
| 539 | * the objective context of the specified inode. | ||
| 540 | * @new points to the credentials to be modified. | ||
| 541 | * @inode points to the inode to use as a reference. | ||
| 542 | * The current task must be the one that nominated @inode. | ||
| 543 | * Return 0 if successful. | ||
| 544 | * @kernel_fw_from_file: | ||
| 545 | * Load firmware from userspace (not called for built-in firmware). | ||
| 546 | * @file contains the file structure pointing to the file containing | ||
| 547 | * the firmware to load. This argument will be NULL if the firmware | ||
| 548 | * was loaded via the uevent-triggered blob-based interface exposed | ||
| 549 | * by CONFIG_FW_LOADER_USER_HELPER. | ||
| 550 | * @buf pointer to buffer containing firmware contents. | ||
| 551 | * @size length of the firmware contents. | ||
| 552 | * Return 0 if permission is granted. | ||
| 553 | * @kernel_module_request: | ||
| 554 | * Ability to trigger the kernel to automatically upcall to userspace for | ||
| 555 | * userspace to load a kernel module with the given name. | ||
| 556 | * @kmod_name name of the module requested by the kernel | ||
| 557 | * Return 0 if successful. | ||
| 558 | * @kernel_module_from_file: | ||
| 559 | * Load a kernel module from userspace. | ||
| 560 | * @file contains the file structure pointing to the file containing | ||
| 561 | * the kernel module to load. If the module is being loaded from a blob, | ||
| 562 | * this argument will be NULL. | ||
| 563 | * Return 0 if permission is granted. | ||
| 564 | * @task_fix_setuid: | ||
| 565 | * Update the module's state after setting one or more of the user | ||
| 566 | * identity attributes of the current process. The @flags parameter | ||
| 567 | * indicates which of the set*uid system calls invoked this hook. If | ||
| 568 | * @new is the set of credentials that will be installed. Modifications | ||
| 569 | * should be made to this rather than to @current->cred. | ||
| 570 | * @old is the set of credentials that are being replaces | ||
| 571 | * @flags contains one of the LSM_SETID_* values. | ||
| 572 | * Return 0 on success. | ||
| 573 | * @task_setpgid: | ||
| 574 | * Check permission before setting the process group identifier of the | ||
| 575 | * process @p to @pgid. | ||
| 576 | * @p contains the task_struct for process being modified. | ||
| 577 | * @pgid contains the new pgid. | ||
| 578 | * Return 0 if permission is granted. | ||
| 579 | * @task_getpgid: | ||
| 580 | * Check permission before getting the process group identifier of the | ||
| 581 | * process @p. | ||
| 582 | * @p contains the task_struct for the process. | ||
| 583 | * Return 0 if permission is granted. | ||
| 584 | * @task_getsid: | ||
| 585 | * Check permission before getting the session identifier of the process | ||
| 586 | * @p. | ||
| 587 | * @p contains the task_struct for the process. | ||
| 588 | * Return 0 if permission is granted. | ||
| 589 | * @task_getsecid: | ||
| 590 | * Retrieve the security identifier of the process @p. | ||
| 591 | * @p contains the task_struct for the process and place is into @secid. | ||
| 592 | * In case of failure, @secid will be set to zero. | ||
| 593 | * | ||
| 594 | * @task_setnice: | ||
| 595 | * Check permission before setting the nice value of @p to @nice. | ||
| 596 | * @p contains the task_struct of process. | ||
| 597 | * @nice contains the new nice value. | ||
| 598 | * Return 0 if permission is granted. | ||
| 599 | * @task_setioprio | ||
| 600 | * Check permission before setting the ioprio value of @p to @ioprio. | ||
| 601 | * @p contains the task_struct of process. | ||
| 602 | * @ioprio contains the new ioprio value | ||
| 603 | * Return 0 if permission is granted. | ||
| 604 | * @task_getioprio | ||
| 605 | * Check permission before getting the ioprio value of @p. | ||
| 606 | * @p contains the task_struct of process. | ||
| 607 | * Return 0 if permission is granted. | ||
| 608 | * @task_setrlimit: | ||
| 609 | * Check permission before setting the resource limits of the current | ||
| 610 | * process for @resource to @new_rlim. The old resource limit values can | ||
| 611 | * be examined by dereferencing (current->signal->rlim + resource). | ||
| 612 | * @resource contains the resource whose limit is being set. | ||
| 613 | * @new_rlim contains the new limits for @resource. | ||
| 614 | * Return 0 if permission is granted. | ||
| 615 | * @task_setscheduler: | ||
| 616 | * Check permission before setting scheduling policy and/or parameters of | ||
| 617 | * process @p based on @policy and @lp. | ||
| 618 | * @p contains the task_struct for process. | ||
| 619 | * @policy contains the scheduling policy. | ||
| 620 | * @lp contains the scheduling parameters. | ||
| 621 | * Return 0 if permission is granted. | ||
| 622 | * @task_getscheduler: | ||
| 623 | * Check permission before obtaining scheduling information for process | ||
| 624 | * @p. | ||
| 625 | * @p contains the task_struct for process. | ||
| 626 | * Return 0 if permission is granted. | ||
| 627 | * @task_movememory | ||
| 628 | * Check permission before moving memory owned by process @p. | ||
| 629 | * @p contains the task_struct for process. | ||
| 630 | * Return 0 if permission is granted. | ||
| 631 | * @task_kill: | ||
| 632 | * Check permission before sending signal @sig to @p. @info can be NULL, | ||
| 633 | * the constant 1, or a pointer to a siginfo structure. If @info is 1 or | ||
| 634 | * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming | ||
| 635 | * from the kernel and should typically be permitted. | ||
| 636 | * SIGIO signals are handled separately by the send_sigiotask hook in | ||
| 637 | * file_security_ops. | ||
| 638 | * @p contains the task_struct for process. | ||
| 639 | * @info contains the signal information. | ||
| 640 | * @sig contains the signal value. | ||
| 641 | * @secid contains the sid of the process where the signal originated | ||
| 642 | * Return 0 if permission is granted. | ||
| 643 | * @task_wait: | ||
| 644 | * Check permission before allowing a process to reap a child process @p | ||
| 645 | * and collect its status information. | ||
| 646 | * @p contains the task_struct for process. | ||
| 647 | * Return 0 if permission is granted. | ||
| 648 | * @task_prctl: | ||
| 649 | * Check permission before performing a process control operation on the | ||
| 650 | * current process. | ||
| 651 | * @option contains the operation. | ||
| 652 | * @arg2 contains a argument. | ||
| 653 | * @arg3 contains a argument. | ||
| 654 | * @arg4 contains a argument. | ||
| 655 | * @arg5 contains a argument. | ||
| 656 | * Return -ENOSYS if no-one wanted to handle this op, any other value to | ||
| 657 | * cause prctl() to return immediately with that value. | ||
| 658 | * @task_to_inode: | ||
| 659 | * Set the security attributes for an inode based on an associated task's | ||
| 660 | * security attributes, e.g. for /proc/pid inodes. | ||
| 661 | * @p contains the task_struct for the task. | ||
| 662 | * @inode contains the inode structure for the inode. | ||
| 663 | * | ||
| 664 | * Security hooks for Netlink messaging. | ||
| 665 | * | ||
| 666 | * @netlink_send: | ||
| 667 | * Save security information for a netlink message so that permission | ||
| 668 | * checking can be performed when the message is processed. The security | ||
| 669 | * information can be saved using the eff_cap field of the | ||
| 670 | * netlink_skb_parms structure. Also may be used to provide fine | ||
| 671 | * grained control over message transmission. | ||
| 672 | * @sk associated sock of task sending the message. | ||
| 673 | * @skb contains the sk_buff structure for the netlink message. | ||
| 674 | * Return 0 if the information was successfully saved and message | ||
| 675 | * is allowed to be transmitted. | ||
| 676 | * | ||
| 677 | * Security hooks for Unix domain networking. | ||
| 678 | * | ||
| 679 | * @unix_stream_connect: | ||
| 680 | * Check permissions before establishing a Unix domain stream connection | ||
| 681 | * between @sock and @other. | ||
| 682 | * @sock contains the sock structure. | ||
| 683 | * @other contains the peer sock structure. | ||
| 684 | * @newsk contains the new sock structure. | ||
| 685 | * Return 0 if permission is granted. | ||
| 686 | * @unix_may_send: | ||
| 687 | * Check permissions before connecting or sending datagrams from @sock to | ||
| 688 | * @other. | ||
| 689 | * @sock contains the socket structure. | ||
| 690 | * @other contains the peer socket structure. | ||
| 691 | * Return 0 if permission is granted. | ||
| 692 | * | ||
| 693 | * The @unix_stream_connect and @unix_may_send hooks were necessary because | ||
| 694 | * Linux provides an alternative to the conventional file name space for Unix | ||
| 695 | * domain sockets. Whereas binding and connecting to sockets in the file name | ||
| 696 | * space is mediated by the typical file permissions (and caught by the mknod | ||
| 697 | * and permission hooks in inode_security_ops), binding and connecting to | ||
| 698 | * sockets in the abstract name space is completely unmediated. Sufficient | ||
| 699 | * control of Unix domain sockets in the abstract name space isn't possible | ||
| 700 | * using only the socket layer hooks, since we need to know the actual target | ||
| 701 | * socket, which is not looked up until we are inside the af_unix code. | ||
| 702 | * | ||
| 703 | * Security hooks for socket operations. | ||
| 704 | * | ||
| 705 | * @socket_create: | ||
| 706 | * Check permissions prior to creating a new socket. | ||
| 707 | * @family contains the requested protocol family. | ||
| 708 | * @type contains the requested communications type. | ||
| 709 | * @protocol contains the requested protocol. | ||
| 710 | * @kern set to 1 if a kernel socket. | ||
| 711 | * Return 0 if permission is granted. | ||
| 712 | * @socket_post_create: | ||
| 713 | * This hook allows a module to update or allocate a per-socket security | ||
| 714 | * structure. Note that the security field was not added directly to the | ||
| 715 | * socket structure, but rather, the socket security information is stored | ||
| 716 | * in the associated inode. Typically, the inode alloc_security hook will | ||
| 717 | * allocate and and attach security information to | ||
| 718 | * sock->inode->i_security. This hook may be used to update the | ||
| 719 | * sock->inode->i_security field with additional information that wasn't | ||
| 720 | * available when the inode was allocated. | ||
| 721 | * @sock contains the newly created socket structure. | ||
| 722 | * @family contains the requested protocol family. | ||
| 723 | * @type contains the requested communications type. | ||
| 724 | * @protocol contains the requested protocol. | ||
| 725 | * @kern set to 1 if a kernel socket. | ||
| 726 | * @socket_bind: | ||
| 727 | * Check permission before socket protocol layer bind operation is | ||
| 728 | * performed and the socket @sock is bound to the address specified in the | ||
| 729 | * @address parameter. | ||
| 730 | * @sock contains the socket structure. | ||
| 731 | * @address contains the address to bind to. | ||
| 732 | * @addrlen contains the length of address. | ||
| 733 | * Return 0 if permission is granted. | ||
| 734 | * @socket_connect: | ||
| 735 | * Check permission before socket protocol layer connect operation | ||
| 736 | * attempts to connect socket @sock to a remote address, @address. | ||
| 737 | * @sock contains the socket structure. | ||
| 738 | * @address contains the address of remote endpoint. | ||
| 739 | * @addrlen contains the length of address. | ||
| 740 | * Return 0 if permission is granted. | ||
| 741 | * @socket_listen: | ||
| 742 | * Check permission before socket protocol layer listen operation. | ||
| 743 | * @sock contains the socket structure. | ||
| 744 | * @backlog contains the maximum length for the pending connection queue. | ||
| 745 | * Return 0 if permission is granted. | ||
| 746 | * @socket_accept: | ||
| 747 | * Check permission before accepting a new connection. Note that the new | ||
| 748 | * socket, @newsock, has been created and some information copied to it, | ||
| 749 | * but the accept operation has not actually been performed. | ||
| 750 | * @sock contains the listening socket structure. | ||
| 751 | * @newsock contains the newly created server socket for connection. | ||
| 752 | * Return 0 if permission is granted. | ||
| 753 | * @socket_sendmsg: | ||
| 754 | * Check permission before transmitting a message to another socket. | ||
| 755 | * @sock contains the socket structure. | ||
| 756 | * @msg contains the message to be transmitted. | ||
| 757 | * @size contains the size of message. | ||
| 758 | * Return 0 if permission is granted. | ||
| 759 | * @socket_recvmsg: | ||
| 760 | * Check permission before receiving a message from a socket. | ||
| 761 | * @sock contains the socket structure. | ||
| 762 | * @msg contains the message structure. | ||
| 763 | * @size contains the size of message structure. | ||
| 764 | * @flags contains the operational flags. | ||
| 765 | * Return 0 if permission is granted. | ||
| 766 | * @socket_getsockname: | ||
| 767 | * Check permission before the local address (name) of the socket object | ||
| 768 | * @sock is retrieved. | ||
| 769 | * @sock contains the socket structure. | ||
| 770 | * Return 0 if permission is granted. | ||
| 771 | * @socket_getpeername: | ||
| 772 | * Check permission before the remote address (name) of a socket object | ||
| 773 | * @sock is retrieved. | ||
| 774 | * @sock contains the socket structure. | ||
| 775 | * Return 0 if permission is granted. | ||
| 776 | * @socket_getsockopt: | ||
| 777 | * Check permissions before retrieving the options associated with socket | ||
| 778 | * @sock. | ||
| 779 | * @sock contains the socket structure. | ||
| 780 | * @level contains the protocol level to retrieve option from. | ||
| 781 | * @optname contains the name of option to retrieve. | ||
| 782 | * Return 0 if permission is granted. | ||
| 783 | * @socket_setsockopt: | ||
| 784 | * Check permissions before setting the options associated with socket | ||
| 785 | * @sock. | ||
| 786 | * @sock contains the socket structure. | ||
| 787 | * @level contains the protocol level to set options for. | ||
| 788 | * @optname contains the name of the option to set. | ||
| 789 | * Return 0 if permission is granted. | ||
| 790 | * @socket_shutdown: | ||
| 791 | * Checks permission before all or part of a connection on the socket | ||
| 792 | * @sock is shut down. | ||
| 793 | * @sock contains the socket structure. | ||
| 794 | * @how contains the flag indicating how future sends and receives | ||
| 795 | * are handled. | ||
| 796 | * Return 0 if permission is granted. | ||
| 797 | * @socket_sock_rcv_skb: | ||
| 798 | * Check permissions on incoming network packets. This hook is distinct | ||
| 799 | * from Netfilter's IP input hooks since it is the first time that the | ||
| 800 | * incoming sk_buff @skb has been associated with a particular socket, @sk. | ||
| 801 | * Must not sleep inside this hook because some callers hold spinlocks. | ||
| 802 | * @sk contains the sock (not socket) associated with the incoming sk_buff. | ||
| 803 | * @skb contains the incoming network data. | ||
| 804 | * @socket_getpeersec_stream: | ||
| 805 | * This hook allows the security module to provide peer socket security | ||
| 806 | * state for unix or connected tcp sockets to userspace via getsockopt | ||
| 807 | * SO_GETPEERSEC. For tcp sockets this can be meaningful if the | ||
| 808 | * socket is associated with an ipsec SA. | ||
| 809 | * @sock is the local socket. | ||
| 810 | * @optval userspace memory where the security state is to be copied. | ||
| 811 | * @optlen userspace int where the module should copy the actual length | ||
| 812 | * of the security state. | ||
| 813 | * @len as input is the maximum length to copy to userspace provided | ||
| 814 | * by the caller. | ||
| 815 | * Return 0 if all is well, otherwise, typical getsockopt return | ||
| 816 | * values. | ||
| 817 | * @socket_getpeersec_dgram: | ||
| 818 | * This hook allows the security module to provide peer socket security | ||
| 819 | * state for udp sockets on a per-packet basis to userspace via | ||
| 820 | * getsockopt SO_GETPEERSEC. The application must first have indicated | ||
| 821 | * the IP_PASSSEC option via getsockopt. It can then retrieve the | ||
| 822 | * security state returned by this hook for a packet via the SCM_SECURITY | ||
| 823 | * ancillary message type. | ||
| 824 | * @skb is the skbuff for the packet being queried | ||
| 825 | * @secdata is a pointer to a buffer in which to copy the security data | ||
| 826 | * @seclen is the maximum length for @secdata | ||
| 827 | * Return 0 on success, error on failure. | ||
| 828 | * @sk_alloc_security: | ||
| 829 | * Allocate and attach a security structure to the sk->sk_security field, | ||
| 830 | * which is used to copy security attributes between local stream sockets. | ||
| 831 | * @sk_free_security: | ||
| 832 | * Deallocate security structure. | ||
| 833 | * @sk_clone_security: | ||
| 834 | * Clone/copy security structure. | ||
| 835 | * @sk_getsecid: | ||
| 836 | * Retrieve the LSM-specific secid for the sock to enable caching | ||
| 837 | * of network authorizations. | ||
| 838 | * @sock_graft: | ||
| 839 | * Sets the socket's isec sid to the sock's sid. | ||
| 840 | * @inet_conn_request: | ||
| 841 | * Sets the openreq's sid to socket's sid with MLS portion taken | ||
| 842 | * from peer sid. | ||
| 843 | * @inet_csk_clone: | ||
| 844 | * Sets the new child socket's sid to the openreq sid. | ||
| 845 | * @inet_conn_established: | ||
| 846 | * Sets the connection's peersid to the secmark on skb. | ||
| 847 | * @secmark_relabel_packet: | ||
| 848 | * check if the process should be allowed to relabel packets to | ||
| 849 | * the given secid | ||
| 850 | * @security_secmark_refcount_inc | ||
| 851 | * tells the LSM to increment the number of secmark labeling rules loaded | ||
| 852 | * @security_secmark_refcount_dec | ||
| 853 | * tells the LSM to decrement the number of secmark labeling rules loaded | ||
| 854 | * @req_classify_flow: | ||
| 855 | * Sets the flow's sid to the openreq sid. | ||
| 856 | * @tun_dev_alloc_security: | ||
| 857 | * This hook allows a module to allocate a security structure for a TUN | ||
| 858 | * device. | ||
| 859 | * @security pointer to a security structure pointer. | ||
| 860 | * Returns a zero on success, negative values on failure. | ||
| 861 | * @tun_dev_free_security: | ||
| 862 | * This hook allows a module to free the security structure for a TUN | ||
| 863 | * device. | ||
| 864 | * @security pointer to the TUN device's security structure | ||
| 865 | * @tun_dev_create: | ||
| 866 | * Check permissions prior to creating a new TUN device. | ||
| 867 | * @tun_dev_attach_queue: | ||
| 868 | * Check permissions prior to attaching to a TUN device queue. | ||
| 869 | * @security pointer to the TUN device's security structure. | ||
| 870 | * @tun_dev_attach: | ||
| 871 | * This hook can be used by the module to update any security state | ||
| 872 | * associated with the TUN device's sock structure. | ||
| 873 | * @sk contains the existing sock structure. | ||
| 874 | * @security pointer to the TUN device's security structure. | ||
| 875 | * @tun_dev_open: | ||
| 876 | * This hook can be used by the module to update any security state | ||
| 877 | * associated with the TUN device's security structure. | ||
| 878 | * @security pointer to the TUN devices's security structure. | ||
| 879 | * | ||
| 880 | * Security hooks for XFRM operations. | ||
| 881 | * | ||
| 882 | * @xfrm_policy_alloc_security: | ||
| 883 | * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy | ||
| 884 | * Database used by the XFRM system. | ||
| 885 | * @sec_ctx contains the security context information being provided by | ||
| 886 | * the user-level policy update program (e.g., setkey). | ||
| 887 | * Allocate a security structure to the xp->security field; the security | ||
| 888 | * field is initialized to NULL when the xfrm_policy is allocated. | ||
| 889 | * Return 0 if operation was successful (memory to allocate, legal context) | ||
| 890 | * @gfp is to specify the context for the allocation | ||
| 891 | * @xfrm_policy_clone_security: | ||
| 892 | * @old_ctx contains an existing xfrm_sec_ctx. | ||
| 893 | * @new_ctxp contains a new xfrm_sec_ctx being cloned from old. | ||
| 894 | * Allocate a security structure in new_ctxp that contains the | ||
| 895 | * information from the old_ctx structure. | ||
| 896 | * Return 0 if operation was successful (memory to allocate). | ||
| 897 | * @xfrm_policy_free_security: | ||
| 898 | * @ctx contains the xfrm_sec_ctx | ||
| 899 | * Deallocate xp->security. | ||
| 900 | * @xfrm_policy_delete_security: | ||
| 901 | * @ctx contains the xfrm_sec_ctx. | ||
| 902 | * Authorize deletion of xp->security. | ||
| 903 | * @xfrm_state_alloc: | ||
| 904 | * @x contains the xfrm_state being added to the Security Association | ||
| 905 | * Database by the XFRM system. | ||
| 906 | * @sec_ctx contains the security context information being provided by | ||
| 907 | * the user-level SA generation program (e.g., setkey or racoon). | ||
| 908 | * Allocate a security structure to the x->security field; the security | ||
| 909 | * field is initialized to NULL when the xfrm_state is allocated. Set the | ||
| 910 | * context to correspond to sec_ctx. Return 0 if operation was successful | ||
| 911 | * (memory to allocate, legal context). | ||
| 912 | * @xfrm_state_alloc_acquire: | ||
| 913 | * @x contains the xfrm_state being added to the Security Association | ||
| 914 | * Database by the XFRM system. | ||
| 915 | * @polsec contains the policy's security context. | ||
| 916 | * @secid contains the secid from which to take the mls portion of the | ||
| 917 | * context. | ||
| 918 | * Allocate a security structure to the x->security field; the security | ||
| 919 | * field is initialized to NULL when the xfrm_state is allocated. Set the | ||
| 920 | * context to correspond to secid. Return 0 if operation was successful | ||
| 921 | * (memory to allocate, legal context). | ||
| 922 | * @xfrm_state_free_security: | ||
| 923 | * @x contains the xfrm_state. | ||
| 924 | * Deallocate x->security. | ||
| 925 | * @xfrm_state_delete_security: | ||
| 926 | * @x contains the xfrm_state. | ||
| 927 | * Authorize deletion of x->security. | ||
| 928 | * @xfrm_policy_lookup: | ||
| 929 | * @ctx contains the xfrm_sec_ctx for which the access control is being | ||
| 930 | * checked. | ||
| 931 | * @fl_secid contains the flow security label that is used to authorize | ||
| 932 | * access to the policy xp. | ||
| 933 | * @dir contains the direction of the flow (input or output). | ||
| 934 | * Check permission when a flow selects a xfrm_policy for processing | ||
| 935 | * XFRMs on a packet. The hook is called when selecting either a | ||
| 936 | * per-socket policy or a generic xfrm policy. | ||
| 937 | * Return 0 if permission is granted, -ESRCH otherwise, or -errno | ||
| 938 | * on other errors. | ||
| 939 | * @xfrm_state_pol_flow_match: | ||
| 940 | * @x contains the state to match. | ||
| 941 | * @xp contains the policy to check for a match. | ||
| 942 | * @fl contains the flow to check for a match. | ||
| 943 | * Return 1 if there is a match. | ||
| 944 | * @xfrm_decode_session: | ||
| 945 | * @skb points to skb to decode. | ||
| 946 | * @secid points to the flow key secid to set. | ||
| 947 | * @ckall says if all xfrms used should be checked for same secid. | ||
| 948 | * Return 0 if ckall is zero or all xfrms used have the same secid. | ||
| 949 | * | ||
| 950 | * Security hooks affecting all Key Management operations | ||
| 951 | * | ||
| 952 | * @key_alloc: | ||
| 953 | * Permit allocation of a key and assign security data. Note that key does | ||
| 954 | * not have a serial number assigned at this point. | ||
| 955 | * @key points to the key. | ||
| 956 | * @flags is the allocation flags | ||
| 957 | * Return 0 if permission is granted, -ve error otherwise. | ||
| 958 | * @key_free: | ||
| 959 | * Notification of destruction; free security data. | ||
| 960 | * @key points to the key. | ||
| 961 | * No return value. | ||
| 962 | * @key_permission: | ||
| 963 | * See whether a specific operational right is granted to a process on a | ||
| 964 | * key. | ||
| 965 | * @key_ref refers to the key (key pointer + possession attribute bit). | ||
| 966 | * @cred points to the credentials to provide the context against which to | ||
| 967 | * evaluate the security data on the key. | ||
| 968 | * @perm describes the combination of permissions required of this key. | ||
| 969 | * Return 0 if permission is granted, -ve error otherwise. | ||
| 970 | * @key_getsecurity: | ||
| 971 | * Get a textual representation of the security context attached to a key | ||
| 972 | * for the purposes of honouring KEYCTL_GETSECURITY. This function | ||
| 973 | * allocates the storage for the NUL-terminated string and the caller | ||
| 974 | * should free it. | ||
| 975 | * @key points to the key to be queried. | ||
| 976 | * @_buffer points to a pointer that should be set to point to the | ||
| 977 | * resulting string (if no label or an error occurs). | ||
| 978 | * Return the length of the string (including terminating NUL) or -ve if | ||
| 979 | * an error. | ||
| 980 | * May also return 0 (and a NULL buffer pointer) if there is no label. | ||
| 981 | * | ||
| 982 | * Security hooks affecting all System V IPC operations. | ||
| 983 | * | ||
| 984 | * @ipc_permission: | ||
| 985 | * Check permissions for access to IPC | ||
| 986 | * @ipcp contains the kernel IPC permission structure | ||
| 987 | * @flag contains the desired (requested) permission set | ||
| 988 | * Return 0 if permission is granted. | ||
| 989 | * @ipc_getsecid: | ||
| 990 | * Get the secid associated with the ipc object. | ||
| 991 | * @ipcp contains the kernel IPC permission structure. | ||
| 992 | * @secid contains a pointer to the location where result will be saved. | ||
| 993 | * In case of failure, @secid will be set to zero. | ||
| 994 | * | ||
| 995 | * Security hooks for individual messages held in System V IPC message queues | ||
| 996 | * @msg_msg_alloc_security: | ||
| 997 | * Allocate and attach a security structure to the msg->security field. | ||
| 998 | * The security field is initialized to NULL when the structure is first | ||
| 999 | * created. | ||
| 1000 | * @msg contains the message structure to be modified. | ||
| 1001 | * Return 0 if operation was successful and permission is granted. | ||
| 1002 | * @msg_msg_free_security: | ||
| 1003 | * Deallocate the security structure for this message. | ||
| 1004 | * @msg contains the message structure to be modified. | ||
| 1005 | * | ||
| 1006 | * Security hooks for System V IPC Message Queues | ||
| 1007 | * | ||
| 1008 | * @msg_queue_alloc_security: | ||
| 1009 | * Allocate and attach a security structure to the | ||
| 1010 | * msq->q_perm.security field. The security field is initialized to | ||
| 1011 | * NULL when the structure is first created. | ||
| 1012 | * @msq contains the message queue structure to be modified. | ||
| 1013 | * Return 0 if operation was successful and permission is granted. | ||
| 1014 | * @msg_queue_free_security: | ||
| 1015 | * Deallocate security structure for this message queue. | ||
| 1016 | * @msq contains the message queue structure to be modified. | ||
| 1017 | * @msg_queue_associate: | ||
| 1018 | * Check permission when a message queue is requested through the | ||
| 1019 | * msgget system call. This hook is only called when returning the | ||
| 1020 | * message queue identifier for an existing message queue, not when a | ||
| 1021 | * new message queue is created. | ||
| 1022 | * @msq contains the message queue to act upon. | ||
| 1023 | * @msqflg contains the operation control flags. | ||
| 1024 | * Return 0 if permission is granted. | ||
| 1025 | * @msg_queue_msgctl: | ||
| 1026 | * Check permission when a message control operation specified by @cmd | ||
| 1027 | * is to be performed on the message queue @msq. | ||
| 1028 | * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO. | ||
| 1029 | * @msq contains the message queue to act upon. May be NULL. | ||
| 1030 | * @cmd contains the operation to be performed. | ||
| 1031 | * Return 0 if permission is granted. | ||
| 1032 | * @msg_queue_msgsnd: | ||
| 1033 | * Check permission before a message, @msg, is enqueued on the message | ||
| 1034 | * queue, @msq. | ||
| 1035 | * @msq contains the message queue to send message to. | ||
| 1036 | * @msg contains the message to be enqueued. | ||
| 1037 | * @msqflg contains operational flags. | ||
| 1038 | * Return 0 if permission is granted. | ||
| 1039 | * @msg_queue_msgrcv: | ||
| 1040 | * Check permission before a message, @msg, is removed from the message | ||
| 1041 | * queue, @msq. The @target task structure contains a pointer to the | ||
| 1042 | * process that will be receiving the message (not equal to the current | ||
| 1043 | * process when inline receives are being performed). | ||
| 1044 | * @msq contains the message queue to retrieve message from. | ||
| 1045 | * @msg contains the message destination. | ||
| 1046 | * @target contains the task structure for recipient process. | ||
| 1047 | * @type contains the type of message requested. | ||
| 1048 | * @mode contains the operational flags. | ||
| 1049 | * Return 0 if permission is granted. | ||
| 1050 | * | ||
| 1051 | * Security hooks for System V Shared Memory Segments | ||
| 1052 | * | ||
| 1053 | * @shm_alloc_security: | ||
| 1054 | * Allocate and attach a security structure to the shp->shm_perm.security | ||
| 1055 | * field. The security field is initialized to NULL when the structure is | ||
| 1056 | * first created. | ||
| 1057 | * @shp contains the shared memory structure to be modified. | ||
| 1058 | * Return 0 if operation was successful and permission is granted. | ||
| 1059 | * @shm_free_security: | ||
| 1060 | * Deallocate the security struct for this memory segment. | ||
| 1061 | * @shp contains the shared memory structure to be modified. | ||
| 1062 | * @shm_associate: | ||
| 1063 | * Check permission when a shared memory region is requested through the | ||
| 1064 | * shmget system call. This hook is only called when returning the shared | ||
| 1065 | * memory region identifier for an existing region, not when a new shared | ||
| 1066 | * memory region is created. | ||
| 1067 | * @shp contains the shared memory structure to be modified. | ||
| 1068 | * @shmflg contains the operation control flags. | ||
| 1069 | * Return 0 if permission is granted. | ||
| 1070 | * @shm_shmctl: | ||
| 1071 | * Check permission when a shared memory control operation specified by | ||
| 1072 | * @cmd is to be performed on the shared memory region @shp. | ||
| 1073 | * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO. | ||
| 1074 | * @shp contains shared memory structure to be modified. | ||
| 1075 | * @cmd contains the operation to be performed. | ||
| 1076 | * Return 0 if permission is granted. | ||
| 1077 | * @shm_shmat: | ||
| 1078 | * Check permissions prior to allowing the shmat system call to attach the | ||
| 1079 | * shared memory segment @shp to the data segment of the calling process. | ||
| 1080 | * The attaching address is specified by @shmaddr. | ||
| 1081 | * @shp contains the shared memory structure to be modified. | ||
| 1082 | * @shmaddr contains the address to attach memory region to. | ||
| 1083 | * @shmflg contains the operational flags. | ||
| 1084 | * Return 0 if permission is granted. | ||
| 1085 | * | ||
| 1086 | * Security hooks for System V Semaphores | ||
| 1087 | * | ||
| 1088 | * @sem_alloc_security: | ||
| 1089 | * Allocate and attach a security structure to the sma->sem_perm.security | ||
| 1090 | * field. The security field is initialized to NULL when the structure is | ||
| 1091 | * first created. | ||
| 1092 | * @sma contains the semaphore structure | ||
| 1093 | * Return 0 if operation was successful and permission is granted. | ||
| 1094 | * @sem_free_security: | ||
| 1095 | * deallocate security struct for this semaphore | ||
| 1096 | * @sma contains the semaphore structure. | ||
| 1097 | * @sem_associate: | ||
| 1098 | * Check permission when a semaphore is requested through the semget | ||
| 1099 | * system call. This hook is only called when returning the semaphore | ||
| 1100 | * identifier for an existing semaphore, not when a new one must be | ||
| 1101 | * created. | ||
| 1102 | * @sma contains the semaphore structure. | ||
| 1103 | * @semflg contains the operation control flags. | ||
| 1104 | * Return 0 if permission is granted. | ||
| 1105 | * @sem_semctl: | ||
| 1106 | * Check permission when a semaphore operation specified by @cmd is to be | ||
| 1107 | * performed on the semaphore @sma. The @sma may be NULL, e.g. for | ||
| 1108 | * IPC_INFO or SEM_INFO. | ||
| 1109 | * @sma contains the semaphore structure. May be NULL. | ||
| 1110 | * @cmd contains the operation to be performed. | ||
| 1111 | * Return 0 if permission is granted. | ||
| 1112 | * @sem_semop | ||
| 1113 | * Check permissions before performing operations on members of the | ||
| 1114 | * semaphore set @sma. If the @alter flag is nonzero, the semaphore set | ||
| 1115 | * may be modified. | ||
| 1116 | * @sma contains the semaphore structure. | ||
| 1117 | * @sops contains the operations to perform. | ||
| 1118 | * @nsops contains the number of operations to perform. | ||
| 1119 | * @alter contains the flag indicating whether changes are to be made. | ||
| 1120 | * Return 0 if permission is granted. | ||
| 1121 | * | ||
| 1122 | * @binder_set_context_mgr | ||
| 1123 | * Check whether @mgr is allowed to be the binder context manager. | ||
| 1124 | * @mgr contains the task_struct for the task being registered. | ||
| 1125 | * Return 0 if permission is granted. | ||
| 1126 | * @binder_transaction | ||
| 1127 | * Check whether @from is allowed to invoke a binder transaction call | ||
| 1128 | * to @to. | ||
| 1129 | * @from contains the task_struct for the sending task. | ||
| 1130 | * @to contains the task_struct for the receiving task. | ||
| 1131 | * @binder_transfer_binder | ||
| 1132 | * Check whether @from is allowed to transfer a binder reference to @to. | ||
| 1133 | * @from contains the task_struct for the sending task. | ||
| 1134 | * @to contains the task_struct for the receiving task. | ||
| 1135 | * @binder_transfer_file | ||
| 1136 | * Check whether @from is allowed to transfer @file to @to. | ||
| 1137 | * @from contains the task_struct for the sending task. | ||
| 1138 | * @file contains the struct file being transferred. | ||
| 1139 | * @to contains the task_struct for the receiving task. | ||
| 1140 | * | ||
| 1141 | * @ptrace_access_check: | ||
| 1142 | * Check permission before allowing the current process to trace the | ||
| 1143 | * @child process. | ||
| 1144 | * Security modules may also want to perform a process tracing check | ||
| 1145 | * during an execve in the set_security or apply_creds hooks of | ||
| 1146 | * tracing check during an execve in the bprm_set_creds hook of | ||
| 1147 | * binprm_security_ops if the process is being traced and its security | ||
| 1148 | * attributes would be changed by the execve. | ||
| 1149 | * @child contains the task_struct structure for the target process. | ||
| 1150 | * @mode contains the PTRACE_MODE flags indicating the form of access. | ||
| 1151 | * Return 0 if permission is granted. | ||
| 1152 | * @ptrace_traceme: | ||
| 1153 | * Check that the @parent process has sufficient permission to trace the | ||
| 1154 | * current process before allowing the current process to present itself | ||
| 1155 | * to the @parent process for tracing. | ||
| 1156 | * @parent contains the task_struct structure for debugger process. | ||
| 1157 | * Return 0 if permission is granted. | ||
| 1158 | * @capget: | ||
| 1159 | * Get the @effective, @inheritable, and @permitted capability sets for | ||
| 1160 | * the @target process. The hook may also perform permission checking to | ||
| 1161 | * determine if the current process is allowed to see the capability sets | ||
| 1162 | * of the @target process. | ||
| 1163 | * @target contains the task_struct structure for target process. | ||
| 1164 | * @effective contains the effective capability set. | ||
| 1165 | * @inheritable contains the inheritable capability set. | ||
| 1166 | * @permitted contains the permitted capability set. | ||
| 1167 | * Return 0 if the capability sets were successfully obtained. | ||
| 1168 | * @capset: | ||
| 1169 | * Set the @effective, @inheritable, and @permitted capability sets for | ||
| 1170 | * the current process. | ||
| 1171 | * @new contains the new credentials structure for target process. | ||
| 1172 | * @old contains the current credentials structure for target process. | ||
| 1173 | * @effective contains the effective capability set. | ||
| 1174 | * @inheritable contains the inheritable capability set. | ||
| 1175 | * @permitted contains the permitted capability set. | ||
| 1176 | * Return 0 and update @new if permission is granted. | ||
| 1177 | * @capable: | ||
| 1178 | * Check whether the @tsk process has the @cap capability in the indicated | ||
| 1179 | * credentials. | ||
| 1180 | * @cred contains the credentials to use. | ||
| 1181 | * @ns contains the user namespace we want the capability in | ||
| 1182 | * @cap contains the capability <include/linux/capability.h>. | ||
| 1183 | * @audit: Whether to write an audit message or not | ||
| 1184 | * Return 0 if the capability is granted for @tsk. | ||
| 1185 | * @syslog: | ||
| 1186 | * Check permission before accessing the kernel message ring or changing | ||
| 1187 | * logging to the console. | ||
| 1188 | * See the syslog(2) manual page for an explanation of the @type values. | ||
| 1189 | * @type contains the type of action. | ||
| 1190 | * @from_file indicates the context of action (if it came from /proc). | ||
| 1191 | * Return 0 if permission is granted. | ||
| 1192 | * @settime: | ||
| 1193 | * Check permission to change the system time. | ||
| 1194 | * struct timespec and timezone are defined in include/linux/time.h | ||
| 1195 | * @ts contains new time | ||
| 1196 | * @tz contains new timezone | ||
| 1197 | * Return 0 if permission is granted. | ||
| 1198 | * @vm_enough_memory: | ||
| 1199 | * Check permissions for allocating a new virtual mapping. | ||
| 1200 | * @mm contains the mm struct it is being added to. | ||
| 1201 | * @pages contains the number of pages. | ||
| 1202 | * Return 0 if permission is granted. | ||
| 1203 | * | ||
| 1204 | * @ismaclabel: | ||
| 1205 | * Check if the extended attribute specified by @name | ||
| 1206 | * represents a MAC label. Returns 1 if name is a MAC | ||
| 1207 | * attribute otherwise returns 0. | ||
| 1208 | * @name full extended attribute name to check against | ||
| 1209 | * LSM as a MAC label. | ||
| 1210 | * | ||
| 1211 | * @secid_to_secctx: | ||
| 1212 | * Convert secid to security context. If secdata is NULL the length of | ||
| 1213 | * the result will be returned in seclen, but no secdata will be returned. | ||
| 1214 | * This does mean that the length could change between calls to check the | ||
| 1215 | * length and the next call which actually allocates and returns the | ||
| 1216 | * secdata. | ||
| 1217 | * @secid contains the security ID. | ||
| 1218 | * @secdata contains the pointer that stores the converted security | ||
| 1219 | * context. | ||
| 1220 | * @seclen pointer which contains the length of the data | ||
| 1221 | * @secctx_to_secid: | ||
| 1222 | * Convert security context to secid. | ||
| 1223 | * @secid contains the pointer to the generated security ID. | ||
| 1224 | * @secdata contains the security context. | ||
| 1225 | * | ||
| 1226 | * @release_secctx: | ||
| 1227 | * Release the security context. | ||
| 1228 | * @secdata contains the security context. | ||
| 1229 | * @seclen contains the length of the security context. | ||
| 1230 | * | ||
| 1231 | * Security hooks for Audit | ||
| 1232 | * | ||
| 1233 | * @audit_rule_init: | ||
| 1234 | * Allocate and initialize an LSM audit rule structure. | ||
| 1235 | * @field contains the required Audit action. | ||
| 1236 | * Fields flags are defined in include/linux/audit.h | ||
| 1237 | * @op contains the operator the rule uses. | ||
| 1238 | * @rulestr contains the context where the rule will be applied to. | ||
| 1239 | * @lsmrule contains a pointer to receive the result. | ||
| 1240 | * Return 0 if @lsmrule has been successfully set, | ||
| 1241 | * -EINVAL in case of an invalid rule. | ||
| 1242 | * | ||
| 1243 | * @audit_rule_known: | ||
| 1244 | * Specifies whether given @rule contains any fields related to | ||
| 1245 | * current LSM. | ||
| 1246 | * @rule contains the audit rule of interest. | ||
| 1247 | * Return 1 in case of relation found, 0 otherwise. | ||
| 1248 | * | ||
| 1249 | * @audit_rule_match: | ||
| 1250 | * Determine if given @secid matches a rule previously approved | ||
| 1251 | * by @audit_rule_known. | ||
| 1252 | * @secid contains the security id in question. | ||
| 1253 | * @field contains the field which relates to current LSM. | ||
| 1254 | * @op contains the operator that will be used for matching. | ||
| 1255 | * @rule points to the audit rule that will be checked against. | ||
| 1256 | * @actx points to the audit context associated with the check. | ||
| 1257 | * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure. | ||
| 1258 | * | ||
| 1259 | * @audit_rule_free: | ||
| 1260 | * Deallocate the LSM audit rule structure previously allocated by | ||
| 1261 | * audit_rule_init. | ||
| 1262 | * @rule contains the allocated rule | ||
| 1263 | * | ||
| 1264 | * @inode_notifysecctx: | ||
| 1265 | * Notify the security module of what the security context of an inode | ||
| 1266 | * should be. Initializes the incore security context managed by the | ||
| 1267 | * security module for this inode. Example usage: NFS client invokes | ||
| 1268 | * this hook to initialize the security context in its incore inode to the | ||
| 1269 | * value provided by the server for the file when the server returned the | ||
| 1270 | * file's attributes to the client. | ||
| 1271 | * | ||
| 1272 | * Must be called with inode->i_mutex locked. | ||
| 1273 | * | ||
| 1274 | * @inode we wish to set the security context of. | ||
| 1275 | * @ctx contains the string which we wish to set in the inode. | ||
| 1276 | * @ctxlen contains the length of @ctx. | ||
| 1277 | * | ||
| 1278 | * @inode_setsecctx: | ||
| 1279 | * Change the security context of an inode. Updates the | ||
| 1280 | * incore security context managed by the security module and invokes the | ||
| 1281 | * fs code as needed (via __vfs_setxattr_noperm) to update any backing | ||
| 1282 | * xattrs that represent the context. Example usage: NFS server invokes | ||
| 1283 | * this hook to change the security context in its incore inode and on the | ||
| 1284 | * backing filesystem to a value provided by the client on a SETATTR | ||
| 1285 | * operation. | ||
| 1286 | * | ||
| 1287 | * Must be called with inode->i_mutex locked. | ||
| 1288 | * | ||
| 1289 | * @dentry contains the inode we wish to set the security context of. | ||
| 1290 | * @ctx contains the string which we wish to set in the inode. | ||
| 1291 | * @ctxlen contains the length of @ctx. | ||
| 1292 | * | ||
| 1293 | * @inode_getsecctx: | ||
| 1294 | * On success, returns 0 and fills out @ctx and @ctxlen with the security | ||
| 1295 | * context for the given @inode. | ||
| 1296 | * | ||
| 1297 | * @inode we wish to get the security context of. | ||
| 1298 | * @ctx is a pointer in which to place the allocated security context. | ||
| 1299 | * @ctxlen points to the place to put the length of @ctx. | ||
| 1300 | * This is the main security structure. | ||
| 1301 | */ | ||
| 1302 | |||
| 1303 | union security_list_options { | ||
| 1304 | int (*binder_set_context_mgr)(struct task_struct *mgr); | ||
| 1305 | int (*binder_transaction)(struct task_struct *from, | ||
| 1306 | struct task_struct *to); | ||
| 1307 | int (*binder_transfer_binder)(struct task_struct *from, | ||
| 1308 | struct task_struct *to); | ||
| 1309 | int (*binder_transfer_file)(struct task_struct *from, | ||
| 1310 | struct task_struct *to, | ||
| 1311 | struct file *file); | ||
| 1312 | |||
| 1313 | int (*ptrace_access_check)(struct task_struct *child, | ||
| 1314 | unsigned int mode); | ||
| 1315 | int (*ptrace_traceme)(struct task_struct *parent); | ||
| 1316 | int (*capget)(struct task_struct *target, kernel_cap_t *effective, | ||
| 1317 | kernel_cap_t *inheritable, kernel_cap_t *permitted); | ||
| 1318 | int (*capset)(struct cred *new, const struct cred *old, | ||
| 1319 | const kernel_cap_t *effective, | ||
| 1320 | const kernel_cap_t *inheritable, | ||
| 1321 | const kernel_cap_t *permitted); | ||
| 1322 | int (*capable)(const struct cred *cred, struct user_namespace *ns, | ||
| 1323 | int cap, int audit); | ||
| 1324 | int (*quotactl)(int cmds, int type, int id, struct super_block *sb); | ||
| 1325 | int (*quota_on)(struct dentry *dentry); | ||
| 1326 | int (*syslog)(int type); | ||
| 1327 | int (*settime)(const struct timespec *ts, const struct timezone *tz); | ||
| 1328 | int (*vm_enough_memory)(struct mm_struct *mm, long pages); | ||
| 1329 | |||
| 1330 | int (*bprm_set_creds)(struct linux_binprm *bprm); | ||
| 1331 | int (*bprm_check_security)(struct linux_binprm *bprm); | ||
| 1332 | int (*bprm_secureexec)(struct linux_binprm *bprm); | ||
| 1333 | void (*bprm_committing_creds)(struct linux_binprm *bprm); | ||
| 1334 | void (*bprm_committed_creds)(struct linux_binprm *bprm); | ||
| 1335 | |||
| 1336 | int (*sb_alloc_security)(struct super_block *sb); | ||
| 1337 | void (*sb_free_security)(struct super_block *sb); | ||
| 1338 | int (*sb_copy_data)(char *orig, char *copy); | ||
| 1339 | int (*sb_remount)(struct super_block *sb, void *data); | ||
| 1340 | int (*sb_kern_mount)(struct super_block *sb, int flags, void *data); | ||
| 1341 | int (*sb_show_options)(struct seq_file *m, struct super_block *sb); | ||
| 1342 | int (*sb_statfs)(struct dentry *dentry); | ||
| 1343 | int (*sb_mount)(const char *dev_name, struct path *path, | ||
| 1344 | const char *type, unsigned long flags, void *data); | ||
| 1345 | int (*sb_umount)(struct vfsmount *mnt, int flags); | ||
| 1346 | int (*sb_pivotroot)(struct path *old_path, struct path *new_path); | ||
| 1347 | int (*sb_set_mnt_opts)(struct super_block *sb, | ||
| 1348 | struct security_mnt_opts *opts, | ||
| 1349 | unsigned long kern_flags, | ||
| 1350 | unsigned long *set_kern_flags); | ||
| 1351 | int (*sb_clone_mnt_opts)(const struct super_block *oldsb, | ||
| 1352 | struct super_block *newsb); | ||
| 1353 | int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts); | ||
| 1354 | int (*dentry_init_security)(struct dentry *dentry, int mode, | ||
| 1355 | struct qstr *name, void **ctx, | ||
| 1356 | u32 *ctxlen); | ||
| 1357 | |||
| 1358 | |||
| 1359 | #ifdef CONFIG_SECURITY_PATH | ||
| 1360 | int (*path_unlink)(struct path *dir, struct dentry *dentry); | ||
| 1361 | int (*path_mkdir)(struct path *dir, struct dentry *dentry, | ||
| 1362 | umode_t mode); | ||
| 1363 | int (*path_rmdir)(struct path *dir, struct dentry *dentry); | ||
| 1364 | int (*path_mknod)(struct path *dir, struct dentry *dentry, | ||
| 1365 | umode_t mode, unsigned int dev); | ||
| 1366 | int (*path_truncate)(struct path *path); | ||
| 1367 | int (*path_symlink)(struct path *dir, struct dentry *dentry, | ||
| 1368 | const char *old_name); | ||
| 1369 | int (*path_link)(struct dentry *old_dentry, struct path *new_dir, | ||
| 1370 | struct dentry *new_dentry); | ||
| 1371 | int (*path_rename)(struct path *old_dir, struct dentry *old_dentry, | ||
| 1372 | struct path *new_dir, | ||
| 1373 | struct dentry *new_dentry); | ||
| 1374 | int (*path_chmod)(struct path *path, umode_t mode); | ||
| 1375 | int (*path_chown)(struct path *path, kuid_t uid, kgid_t gid); | ||
| 1376 | int (*path_chroot)(struct path *path); | ||
| 1377 | #endif | ||
| 1378 | |||
| 1379 | int (*inode_alloc_security)(struct inode *inode); | ||
| 1380 | void (*inode_free_security)(struct inode *inode); | ||
| 1381 | int (*inode_init_security)(struct inode *inode, struct inode *dir, | ||
| 1382 | const struct qstr *qstr, | ||
| 1383 | const char **name, void **value, | ||
| 1384 | size_t *len); | ||
| 1385 | int (*inode_create)(struct inode *dir, struct dentry *dentry, | ||
| 1386 | umode_t mode); | ||
| 1387 | int (*inode_link)(struct dentry *old_dentry, struct inode *dir, | ||
| 1388 | struct dentry *new_dentry); | ||
| 1389 | int (*inode_unlink)(struct inode *dir, struct dentry *dentry); | ||
| 1390 | int (*inode_symlink)(struct inode *dir, struct dentry *dentry, | ||
| 1391 | const char *old_name); | ||
| 1392 | int (*inode_mkdir)(struct inode *dir, struct dentry *dentry, | ||
| 1393 | umode_t mode); | ||
| 1394 | int (*inode_rmdir)(struct inode *dir, struct dentry *dentry); | ||
| 1395 | int (*inode_mknod)(struct inode *dir, struct dentry *dentry, | ||
| 1396 | umode_t mode, dev_t dev); | ||
| 1397 | int (*inode_rename)(struct inode *old_dir, struct dentry *old_dentry, | ||
| 1398 | struct inode *new_dir, | ||
| 1399 | struct dentry *new_dentry); | ||
| 1400 | int (*inode_readlink)(struct dentry *dentry); | ||
| 1401 | int (*inode_follow_link)(struct dentry *dentry, struct inode *inode, | ||
| 1402 | bool rcu); | ||
| 1403 | int (*inode_permission)(struct inode *inode, int mask); | ||
| 1404 | int (*inode_setattr)(struct dentry *dentry, struct iattr *attr); | ||
| 1405 | int (*inode_getattr)(const struct path *path); | ||
| 1406 | int (*inode_setxattr)(struct dentry *dentry, const char *name, | ||
| 1407 | const void *value, size_t size, int flags); | ||
| 1408 | void (*inode_post_setxattr)(struct dentry *dentry, const char *name, | ||
| 1409 | const void *value, size_t size, | ||
| 1410 | int flags); | ||
| 1411 | int (*inode_getxattr)(struct dentry *dentry, const char *name); | ||
| 1412 | int (*inode_listxattr)(struct dentry *dentry); | ||
| 1413 | int (*inode_removexattr)(struct dentry *dentry, const char *name); | ||
| 1414 | int (*inode_need_killpriv)(struct dentry *dentry); | ||
| 1415 | int (*inode_killpriv)(struct dentry *dentry); | ||
| 1416 | int (*inode_getsecurity)(const struct inode *inode, const char *name, | ||
| 1417 | void **buffer, bool alloc); | ||
| 1418 | int (*inode_setsecurity)(struct inode *inode, const char *name, | ||
| 1419 | const void *value, size_t size, | ||
| 1420 | int flags); | ||
| 1421 | int (*inode_listsecurity)(struct inode *inode, char *buffer, | ||
| 1422 | size_t buffer_size); | ||
| 1423 | void (*inode_getsecid)(const struct inode *inode, u32 *secid); | ||
| 1424 | |||
| 1425 | int (*file_permission)(struct file *file, int mask); | ||
| 1426 | int (*file_alloc_security)(struct file *file); | ||
| 1427 | void (*file_free_security)(struct file *file); | ||
| 1428 | int (*file_ioctl)(struct file *file, unsigned int cmd, | ||
| 1429 | unsigned long arg); | ||
| 1430 | int (*mmap_addr)(unsigned long addr); | ||
| 1431 | int (*mmap_file)(struct file *file, unsigned long reqprot, | ||
| 1432 | unsigned long prot, unsigned long flags); | ||
| 1433 | int (*file_mprotect)(struct vm_area_struct *vma, unsigned long reqprot, | ||
| 1434 | unsigned long prot); | ||
| 1435 | int (*file_lock)(struct file *file, unsigned int cmd); | ||
| 1436 | int (*file_fcntl)(struct file *file, unsigned int cmd, | ||
| 1437 | unsigned long arg); | ||
| 1438 | void (*file_set_fowner)(struct file *file); | ||
| 1439 | int (*file_send_sigiotask)(struct task_struct *tsk, | ||
| 1440 | struct fown_struct *fown, int sig); | ||
| 1441 | int (*file_receive)(struct file *file); | ||
| 1442 | int (*file_open)(struct file *file, const struct cred *cred); | ||
| 1443 | |||
| 1444 | int (*task_create)(unsigned long clone_flags); | ||
| 1445 | void (*task_free)(struct task_struct *task); | ||
| 1446 | int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp); | ||
| 1447 | void (*cred_free)(struct cred *cred); | ||
| 1448 | int (*cred_prepare)(struct cred *new, const struct cred *old, | ||
| 1449 | gfp_t gfp); | ||
| 1450 | void (*cred_transfer)(struct cred *new, const struct cred *old); | ||
| 1451 | int (*kernel_act_as)(struct cred *new, u32 secid); | ||
| 1452 | int (*kernel_create_files_as)(struct cred *new, struct inode *inode); | ||
| 1453 | int (*kernel_fw_from_file)(struct file *file, char *buf, size_t size); | ||
| 1454 | int (*kernel_module_request)(char *kmod_name); | ||
| 1455 | int (*kernel_module_from_file)(struct file *file); | ||
| 1456 | int (*task_fix_setuid)(struct cred *new, const struct cred *old, | ||
| 1457 | int flags); | ||
| 1458 | int (*task_setpgid)(struct task_struct *p, pid_t pgid); | ||
| 1459 | int (*task_getpgid)(struct task_struct *p); | ||
| 1460 | int (*task_getsid)(struct task_struct *p); | ||
| 1461 | void (*task_getsecid)(struct task_struct *p, u32 *secid); | ||
| 1462 | int (*task_setnice)(struct task_struct *p, int nice); | ||
| 1463 | int (*task_setioprio)(struct task_struct *p, int ioprio); | ||
| 1464 | int (*task_getioprio)(struct task_struct *p); | ||
| 1465 | int (*task_setrlimit)(struct task_struct *p, unsigned int resource, | ||
| 1466 | struct rlimit *new_rlim); | ||
| 1467 | int (*task_setscheduler)(struct task_struct *p); | ||
| 1468 | int (*task_getscheduler)(struct task_struct *p); | ||
| 1469 | int (*task_movememory)(struct task_struct *p); | ||
| 1470 | int (*task_kill)(struct task_struct *p, struct siginfo *info, | ||
| 1471 | int sig, u32 secid); | ||
| 1472 | int (*task_wait)(struct task_struct *p); | ||
| 1473 | int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3, | ||
| 1474 | unsigned long arg4, unsigned long arg5); | ||
| 1475 | void (*task_to_inode)(struct task_struct *p, struct inode *inode); | ||
| 1476 | |||
| 1477 | int (*ipc_permission)(struct kern_ipc_perm *ipcp, short flag); | ||
| 1478 | void (*ipc_getsecid)(struct kern_ipc_perm *ipcp, u32 *secid); | ||
| 1479 | |||
| 1480 | int (*msg_msg_alloc_security)(struct msg_msg *msg); | ||
| 1481 | void (*msg_msg_free_security)(struct msg_msg *msg); | ||
| 1482 | |||
| 1483 | int (*msg_queue_alloc_security)(struct msg_queue *msq); | ||
| 1484 | void (*msg_queue_free_security)(struct msg_queue *msq); | ||
| 1485 | int (*msg_queue_associate)(struct msg_queue *msq, int msqflg); | ||
| 1486 | int (*msg_queue_msgctl)(struct msg_queue *msq, int cmd); | ||
| 1487 | int (*msg_queue_msgsnd)(struct msg_queue *msq, struct msg_msg *msg, | ||
| 1488 | int msqflg); | ||
| 1489 | int (*msg_queue_msgrcv)(struct msg_queue *msq, struct msg_msg *msg, | ||
| 1490 | struct task_struct *target, long type, | ||
| 1491 | int mode); | ||
| 1492 | |||
| 1493 | int (*shm_alloc_security)(struct shmid_kernel *shp); | ||
| 1494 | void (*shm_free_security)(struct shmid_kernel *shp); | ||
| 1495 | int (*shm_associate)(struct shmid_kernel *shp, int shmflg); | ||
| 1496 | int (*shm_shmctl)(struct shmid_kernel *shp, int cmd); | ||
| 1497 | int (*shm_shmat)(struct shmid_kernel *shp, char __user *shmaddr, | ||
| 1498 | int shmflg); | ||
| 1499 | |||
| 1500 | int (*sem_alloc_security)(struct sem_array *sma); | ||
| 1501 | void (*sem_free_security)(struct sem_array *sma); | ||
| 1502 | int (*sem_associate)(struct sem_array *sma, int semflg); | ||
| 1503 | int (*sem_semctl)(struct sem_array *sma, int cmd); | ||
| 1504 | int (*sem_semop)(struct sem_array *sma, struct sembuf *sops, | ||
| 1505 | unsigned nsops, int alter); | ||
| 1506 | |||
| 1507 | int (*netlink_send)(struct sock *sk, struct sk_buff *skb); | ||
| 1508 | |||
| 1509 | void (*d_instantiate)(struct dentry *dentry, struct inode *inode); | ||
| 1510 | |||
| 1511 | int (*getprocattr)(struct task_struct *p, char *name, char **value); | ||
| 1512 | int (*setprocattr)(struct task_struct *p, char *name, void *value, | ||
| 1513 | size_t size); | ||
| 1514 | int (*ismaclabel)(const char *name); | ||
| 1515 | int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen); | ||
| 1516 | int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid); | ||
| 1517 | void (*release_secctx)(char *secdata, u32 seclen); | ||
| 1518 | |||
| 1519 | int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen); | ||
| 1520 | int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen); | ||
| 1521 | int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen); | ||
| 1522 | |||
| 1523 | #ifdef CONFIG_SECURITY_NETWORK | ||
| 1524 | int (*unix_stream_connect)(struct sock *sock, struct sock *other, | ||
| 1525 | struct sock *newsk); | ||
| 1526 | int (*unix_may_send)(struct socket *sock, struct socket *other); | ||
| 1527 | |||
| 1528 | int (*socket_create)(int family, int type, int protocol, int kern); | ||
| 1529 | int (*socket_post_create)(struct socket *sock, int family, int type, | ||
| 1530 | int protocol, int kern); | ||
| 1531 | int (*socket_bind)(struct socket *sock, struct sockaddr *address, | ||
| 1532 | int addrlen); | ||
| 1533 | int (*socket_connect)(struct socket *sock, struct sockaddr *address, | ||
| 1534 | int addrlen); | ||
| 1535 | int (*socket_listen)(struct socket *sock, int backlog); | ||
| 1536 | int (*socket_accept)(struct socket *sock, struct socket *newsock); | ||
| 1537 | int (*socket_sendmsg)(struct socket *sock, struct msghdr *msg, | ||
| 1538 | int size); | ||
| 1539 | int (*socket_recvmsg)(struct socket *sock, struct msghdr *msg, | ||
| 1540 | int size, int flags); | ||
| 1541 | int (*socket_getsockname)(struct socket *sock); | ||
| 1542 | int (*socket_getpeername)(struct socket *sock); | ||
| 1543 | int (*socket_getsockopt)(struct socket *sock, int level, int optname); | ||
| 1544 | int (*socket_setsockopt)(struct socket *sock, int level, int optname); | ||
| 1545 | int (*socket_shutdown)(struct socket *sock, int how); | ||
| 1546 | int (*socket_sock_rcv_skb)(struct sock *sk, struct sk_buff *skb); | ||
| 1547 | int (*socket_getpeersec_stream)(struct socket *sock, | ||
| 1548 | char __user *optval, | ||
| 1549 | int __user *optlen, unsigned len); | ||
| 1550 | int (*socket_getpeersec_dgram)(struct socket *sock, | ||
| 1551 | struct sk_buff *skb, u32 *secid); | ||
| 1552 | int (*sk_alloc_security)(struct sock *sk, int family, gfp_t priority); | ||
| 1553 | void (*sk_free_security)(struct sock *sk); | ||
| 1554 | void (*sk_clone_security)(const struct sock *sk, struct sock *newsk); | ||
| 1555 | void (*sk_getsecid)(struct sock *sk, u32 *secid); | ||
| 1556 | void (*sock_graft)(struct sock *sk, struct socket *parent); | ||
| 1557 | int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb, | ||
| 1558 | struct request_sock *req); | ||
| 1559 | void (*inet_csk_clone)(struct sock *newsk, | ||
| 1560 | const struct request_sock *req); | ||
| 1561 | void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb); | ||
| 1562 | int (*secmark_relabel_packet)(u32 secid); | ||
| 1563 | void (*secmark_refcount_inc)(void); | ||
| 1564 | void (*secmark_refcount_dec)(void); | ||
| 1565 | void (*req_classify_flow)(const struct request_sock *req, | ||
| 1566 | struct flowi *fl); | ||
| 1567 | int (*tun_dev_alloc_security)(void **security); | ||
| 1568 | void (*tun_dev_free_security)(void *security); | ||
| 1569 | int (*tun_dev_create)(void); | ||
| 1570 | int (*tun_dev_attach_queue)(void *security); | ||
| 1571 | int (*tun_dev_attach)(struct sock *sk, void *security); | ||
| 1572 | int (*tun_dev_open)(void *security); | ||
| 1573 | #endif /* CONFIG_SECURITY_NETWORK */ | ||
| 1574 | |||
| 1575 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | ||
| 1576 | int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **ctxp, | ||
| 1577 | struct xfrm_user_sec_ctx *sec_ctx, | ||
| 1578 | gfp_t gfp); | ||
| 1579 | int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *old_ctx, | ||
| 1580 | struct xfrm_sec_ctx **new_ctx); | ||
| 1581 | void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *ctx); | ||
| 1582 | int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *ctx); | ||
| 1583 | int (*xfrm_state_alloc)(struct xfrm_state *x, | ||
| 1584 | struct xfrm_user_sec_ctx *sec_ctx); | ||
| 1585 | int (*xfrm_state_alloc_acquire)(struct xfrm_state *x, | ||
| 1586 | struct xfrm_sec_ctx *polsec, | ||
| 1587 | u32 secid); | ||
| 1588 | void (*xfrm_state_free_security)(struct xfrm_state *x); | ||
| 1589 | int (*xfrm_state_delete_security)(struct xfrm_state *x); | ||
| 1590 | int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid, | ||
| 1591 | u8 dir); | ||
| 1592 | int (*xfrm_state_pol_flow_match)(struct xfrm_state *x, | ||
| 1593 | struct xfrm_policy *xp, | ||
| 1594 | const struct flowi *fl); | ||
| 1595 | int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall); | ||
| 1596 | #endif /* CONFIG_SECURITY_NETWORK_XFRM */ | ||
| 1597 | |||
| 1598 | /* key management security hooks */ | ||
| 1599 | #ifdef CONFIG_KEYS | ||
| 1600 | int (*key_alloc)(struct key *key, const struct cred *cred, | ||
| 1601 | unsigned long flags); | ||
| 1602 | void (*key_free)(struct key *key); | ||
| 1603 | int (*key_permission)(key_ref_t key_ref, const struct cred *cred, | ||
| 1604 | unsigned perm); | ||
| 1605 | int (*key_getsecurity)(struct key *key, char **_buffer); | ||
| 1606 | #endif /* CONFIG_KEYS */ | ||
| 1607 | |||
| 1608 | #ifdef CONFIG_AUDIT | ||
| 1609 | int (*audit_rule_init)(u32 field, u32 op, char *rulestr, | ||
| 1610 | void **lsmrule); | ||
| 1611 | int (*audit_rule_known)(struct audit_krule *krule); | ||
| 1612 | int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule, | ||
| 1613 | struct audit_context *actx); | ||
| 1614 | void (*audit_rule_free)(void *lsmrule); | ||
| 1615 | #endif /* CONFIG_AUDIT */ | ||
| 1616 | }; | ||
| 1617 | |||
| 1618 | struct security_hook_heads { | ||
| 1619 | struct list_head binder_set_context_mgr; | ||
| 1620 | struct list_head binder_transaction; | ||
| 1621 | struct list_head binder_transfer_binder; | ||
| 1622 | struct list_head binder_transfer_file; | ||
| 1623 | struct list_head ptrace_access_check; | ||
| 1624 | struct list_head ptrace_traceme; | ||
| 1625 | struct list_head capget; | ||
| 1626 | struct list_head capset; | ||
| 1627 | struct list_head capable; | ||
| 1628 | struct list_head quotactl; | ||
| 1629 | struct list_head quota_on; | ||
| 1630 | struct list_head syslog; | ||
| 1631 | struct list_head settime; | ||
| 1632 | struct list_head vm_enough_memory; | ||
| 1633 | struct list_head bprm_set_creds; | ||
| 1634 | struct list_head bprm_check_security; | ||
| 1635 | struct list_head bprm_secureexec; | ||
| 1636 | struct list_head bprm_committing_creds; | ||
| 1637 | struct list_head bprm_committed_creds; | ||
| 1638 | struct list_head sb_alloc_security; | ||
| 1639 | struct list_head sb_free_security; | ||
| 1640 | struct list_head sb_copy_data; | ||
| 1641 | struct list_head sb_remount; | ||
| 1642 | struct list_head sb_kern_mount; | ||
| 1643 | struct list_head sb_show_options; | ||
| 1644 | struct list_head sb_statfs; | ||
| 1645 | struct list_head sb_mount; | ||
| 1646 | struct list_head sb_umount; | ||
| 1647 | struct list_head sb_pivotroot; | ||
| 1648 | struct list_head sb_set_mnt_opts; | ||
| 1649 | struct list_head sb_clone_mnt_opts; | ||
| 1650 | struct list_head sb_parse_opts_str; | ||
| 1651 | struct list_head dentry_init_security; | ||
| 1652 | #ifdef CONFIG_SECURITY_PATH | ||
| 1653 | struct list_head path_unlink; | ||
| 1654 | struct list_head path_mkdir; | ||
| 1655 | struct list_head path_rmdir; | ||
| 1656 | struct list_head path_mknod; | ||
| 1657 | struct list_head path_truncate; | ||
| 1658 | struct list_head path_symlink; | ||
| 1659 | struct list_head path_link; | ||
| 1660 | struct list_head path_rename; | ||
| 1661 | struct list_head path_chmod; | ||
| 1662 | struct list_head path_chown; | ||
| 1663 | struct list_head path_chroot; | ||
| 1664 | #endif | ||
| 1665 | struct list_head inode_alloc_security; | ||
| 1666 | struct list_head inode_free_security; | ||
| 1667 | struct list_head inode_init_security; | ||
| 1668 | struct list_head inode_create; | ||
| 1669 | struct list_head inode_link; | ||
| 1670 | struct list_head inode_unlink; | ||
| 1671 | struct list_head inode_symlink; | ||
| 1672 | struct list_head inode_mkdir; | ||
| 1673 | struct list_head inode_rmdir; | ||
| 1674 | struct list_head inode_mknod; | ||
| 1675 | struct list_head inode_rename; | ||
| 1676 | struct list_head inode_readlink; | ||
| 1677 | struct list_head inode_follow_link; | ||
| 1678 | struct list_head inode_permission; | ||
| 1679 | struct list_head inode_setattr; | ||
| 1680 | struct list_head inode_getattr; | ||
| 1681 | struct list_head inode_setxattr; | ||
| 1682 | struct list_head inode_post_setxattr; | ||
| 1683 | struct list_head inode_getxattr; | ||
| 1684 | struct list_head inode_listxattr; | ||
| 1685 | struct list_head inode_removexattr; | ||
| 1686 | struct list_head inode_need_killpriv; | ||
| 1687 | struct list_head inode_killpriv; | ||
| 1688 | struct list_head inode_getsecurity; | ||
| 1689 | struct list_head inode_setsecurity; | ||
| 1690 | struct list_head inode_listsecurity; | ||
| 1691 | struct list_head inode_getsecid; | ||
| 1692 | struct list_head file_permission; | ||
| 1693 | struct list_head file_alloc_security; | ||
| 1694 | struct list_head file_free_security; | ||
| 1695 | struct list_head file_ioctl; | ||
| 1696 | struct list_head mmap_addr; | ||
| 1697 | struct list_head mmap_file; | ||
| 1698 | struct list_head file_mprotect; | ||
| 1699 | struct list_head file_lock; | ||
| 1700 | struct list_head file_fcntl; | ||
| 1701 | struct list_head file_set_fowner; | ||
| 1702 | struct list_head file_send_sigiotask; | ||
| 1703 | struct list_head file_receive; | ||
| 1704 | struct list_head file_open; | ||
| 1705 | struct list_head task_create; | ||
| 1706 | struct list_head task_free; | ||
| 1707 | struct list_head cred_alloc_blank; | ||
| 1708 | struct list_head cred_free; | ||
| 1709 | struct list_head cred_prepare; | ||
| 1710 | struct list_head cred_transfer; | ||
| 1711 | struct list_head kernel_act_as; | ||
| 1712 | struct list_head kernel_create_files_as; | ||
| 1713 | struct list_head kernel_fw_from_file; | ||
| 1714 | struct list_head kernel_module_request; | ||
| 1715 | struct list_head kernel_module_from_file; | ||
| 1716 | struct list_head task_fix_setuid; | ||
| 1717 | struct list_head task_setpgid; | ||
| 1718 | struct list_head task_getpgid; | ||
| 1719 | struct list_head task_getsid; | ||
| 1720 | struct list_head task_getsecid; | ||
| 1721 | struct list_head task_setnice; | ||
| 1722 | struct list_head task_setioprio; | ||
| 1723 | struct list_head task_getioprio; | ||
| 1724 | struct list_head task_setrlimit; | ||
| 1725 | struct list_head task_setscheduler; | ||
| 1726 | struct list_head task_getscheduler; | ||
| 1727 | struct list_head task_movememory; | ||
| 1728 | struct list_head task_kill; | ||
| 1729 | struct list_head task_wait; | ||
| 1730 | struct list_head task_prctl; | ||
| 1731 | struct list_head task_to_inode; | ||
| 1732 | struct list_head ipc_permission; | ||
| 1733 | struct list_head ipc_getsecid; | ||
| 1734 | struct list_head msg_msg_alloc_security; | ||
| 1735 | struct list_head msg_msg_free_security; | ||
| 1736 | struct list_head msg_queue_alloc_security; | ||
| 1737 | struct list_head msg_queue_free_security; | ||
| 1738 | struct list_head msg_queue_associate; | ||
| 1739 | struct list_head msg_queue_msgctl; | ||
| 1740 | struct list_head msg_queue_msgsnd; | ||
| 1741 | struct list_head msg_queue_msgrcv; | ||
| 1742 | struct list_head shm_alloc_security; | ||
| 1743 | struct list_head shm_free_security; | ||
| 1744 | struct list_head shm_associate; | ||
| 1745 | struct list_head shm_shmctl; | ||
| 1746 | struct list_head shm_shmat; | ||
| 1747 | struct list_head sem_alloc_security; | ||
| 1748 | struct list_head sem_free_security; | ||
| 1749 | struct list_head sem_associate; | ||
| 1750 | struct list_head sem_semctl; | ||
| 1751 | struct list_head sem_semop; | ||
| 1752 | struct list_head netlink_send; | ||
| 1753 | struct list_head d_instantiate; | ||
| 1754 | struct list_head getprocattr; | ||
| 1755 | struct list_head setprocattr; | ||
| 1756 | struct list_head ismaclabel; | ||
| 1757 | struct list_head secid_to_secctx; | ||
| 1758 | struct list_head secctx_to_secid; | ||
| 1759 | struct list_head release_secctx; | ||
| 1760 | struct list_head inode_notifysecctx; | ||
| 1761 | struct list_head inode_setsecctx; | ||
| 1762 | struct list_head inode_getsecctx; | ||
| 1763 | #ifdef CONFIG_SECURITY_NETWORK | ||
| 1764 | struct list_head unix_stream_connect; | ||
| 1765 | struct list_head unix_may_send; | ||
| 1766 | struct list_head socket_create; | ||
| 1767 | struct list_head socket_post_create; | ||
| 1768 | struct list_head socket_bind; | ||
| 1769 | struct list_head socket_connect; | ||
| 1770 | struct list_head socket_listen; | ||
| 1771 | struct list_head socket_accept; | ||
| 1772 | struct list_head socket_sendmsg; | ||
| 1773 | struct list_head socket_recvmsg; | ||
| 1774 | struct list_head socket_getsockname; | ||
| 1775 | struct list_head socket_getpeername; | ||
| 1776 | struct list_head socket_getsockopt; | ||
| 1777 | struct list_head socket_setsockopt; | ||
| 1778 | struct list_head socket_shutdown; | ||
| 1779 | struct list_head socket_sock_rcv_skb; | ||
| 1780 | struct list_head socket_getpeersec_stream; | ||
| 1781 | struct list_head socket_getpeersec_dgram; | ||
| 1782 | struct list_head sk_alloc_security; | ||
| 1783 | struct list_head sk_free_security; | ||
| 1784 | struct list_head sk_clone_security; | ||
| 1785 | struct list_head sk_getsecid; | ||
| 1786 | struct list_head sock_graft; | ||
| 1787 | struct list_head inet_conn_request; | ||
| 1788 | struct list_head inet_csk_clone; | ||
| 1789 | struct list_head inet_conn_established; | ||
| 1790 | struct list_head secmark_relabel_packet; | ||
| 1791 | struct list_head secmark_refcount_inc; | ||
| 1792 | struct list_head secmark_refcount_dec; | ||
| 1793 | struct list_head req_classify_flow; | ||
| 1794 | struct list_head tun_dev_alloc_security; | ||
| 1795 | struct list_head tun_dev_free_security; | ||
| 1796 | struct list_head tun_dev_create; | ||
| 1797 | struct list_head tun_dev_attach_queue; | ||
| 1798 | struct list_head tun_dev_attach; | ||
| 1799 | struct list_head tun_dev_open; | ||
| 1800 | struct list_head skb_owned_by; | ||
| 1801 | #endif /* CONFIG_SECURITY_NETWORK */ | ||
| 1802 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | ||
| 1803 | struct list_head xfrm_policy_alloc_security; | ||
| 1804 | struct list_head xfrm_policy_clone_security; | ||
| 1805 | struct list_head xfrm_policy_free_security; | ||
| 1806 | struct list_head xfrm_policy_delete_security; | ||
| 1807 | struct list_head xfrm_state_alloc; | ||
| 1808 | struct list_head xfrm_state_alloc_acquire; | ||
| 1809 | struct list_head xfrm_state_free_security; | ||
| 1810 | struct list_head xfrm_state_delete_security; | ||
| 1811 | struct list_head xfrm_policy_lookup; | ||
| 1812 | struct list_head xfrm_state_pol_flow_match; | ||
| 1813 | struct list_head xfrm_decode_session; | ||
| 1814 | #endif /* CONFIG_SECURITY_NETWORK_XFRM */ | ||
| 1815 | #ifdef CONFIG_KEYS | ||
| 1816 | struct list_head key_alloc; | ||
| 1817 | struct list_head key_free; | ||
| 1818 | struct list_head key_permission; | ||
| 1819 | struct list_head key_getsecurity; | ||
| 1820 | #endif /* CONFIG_KEYS */ | ||
| 1821 | #ifdef CONFIG_AUDIT | ||
| 1822 | struct list_head audit_rule_init; | ||
| 1823 | struct list_head audit_rule_known; | ||
| 1824 | struct list_head audit_rule_match; | ||
| 1825 | struct list_head audit_rule_free; | ||
| 1826 | #endif /* CONFIG_AUDIT */ | ||
| 1827 | }; | ||
| 1828 | |||
| 1829 | /* | ||
| 1830 | * Security module hook list structure. | ||
| 1831 | * For use with generic list macros for common operations. | ||
| 1832 | */ | ||
| 1833 | struct security_hook_list { | ||
| 1834 | struct list_head list; | ||
| 1835 | struct list_head *head; | ||
| 1836 | union security_list_options hook; | ||
| 1837 | }; | ||
| 1838 | |||
| 1839 | /* | ||
| 1840 | * Initializing a security_hook_list structure takes | ||
| 1841 | * up a lot of space in a source file. This macro takes | ||
| 1842 | * care of the common case and reduces the amount of | ||
| 1843 | * text involved. | ||
| 1844 | */ | ||
| 1845 | #define LSM_HOOK_INIT(HEAD, HOOK) \ | ||
| 1846 | { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } } | ||
| 1847 | |||
| 1848 | extern struct security_hook_heads security_hook_heads; | ||
| 1849 | |||
| 1850 | static inline void security_add_hooks(struct security_hook_list *hooks, | ||
| 1851 | int count) | ||
| 1852 | { | ||
| 1853 | int i; | ||
| 1854 | |||
| 1855 | for (i = 0; i < count; i++) | ||
| 1856 | list_add_tail_rcu(&hooks[i].list, hooks[i].head); | ||
| 1857 | } | ||
| 1858 | |||
| 1859 | #ifdef CONFIG_SECURITY_SELINUX_DISABLE | ||
| 1860 | /* | ||
| 1861 | * Assuring the safety of deleting a security module is up to | ||
| 1862 | * the security module involved. This may entail ordering the | ||
| 1863 | * module's hook list in a particular way, refusing to disable | ||
| 1864 | * the module once a policy is loaded or any number of other | ||
| 1865 | * actions better imagined than described. | ||
| 1866 | * | ||
| 1867 | * The name of the configuration option reflects the only module | ||
| 1868 | * that currently uses the mechanism. Any developer who thinks | ||
| 1869 | * disabling their module is a good idea needs to be at least as | ||
| 1870 | * careful as the SELinux team. | ||
| 1871 | */ | ||
| 1872 | static inline void security_delete_hooks(struct security_hook_list *hooks, | ||
| 1873 | int count) | ||
| 1874 | { | ||
| 1875 | int i; | ||
| 1876 | |||
| 1877 | for (i = 0; i < count; i++) | ||
| 1878 | list_del_rcu(&hooks[i].list); | ||
| 1879 | } | ||
| 1880 | #endif /* CONFIG_SECURITY_SELINUX_DISABLE */ | ||
| 1881 | |||
| 1882 | extern int __init security_module_enable(const char *module); | ||
| 1883 | extern void __init capability_add_hooks(void); | ||
| 1884 | #ifdef CONFIG_SECURITY_YAMA_STACKED | ||
| 1885 | void __init yama_add_hooks(void); | ||
| 1886 | #endif | ||
| 1887 | |||
| 1888 | #endif /* ! __LINUX_LSM_HOOKS_H */ | ||
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h index 1726ccbd8009..44348710953f 100644 --- a/include/linux/mailbox_client.h +++ b/include/linux/mailbox_client.h | |||
| @@ -40,6 +40,8 @@ struct mbox_client { | |||
| 40 | void (*tx_done)(struct mbox_client *cl, void *mssg, int r); | 40 | void (*tx_done)(struct mbox_client *cl, void *mssg, int r); |
| 41 | }; | 41 | }; |
| 42 | 42 | ||
| 43 | struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, | ||
| 44 | const char *name); | ||
| 43 | struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index); | 45 | struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index); |
| 44 | int mbox_send_message(struct mbox_chan *chan, void *mssg); | 46 | int mbox_send_message(struct mbox_chan *chan, void *mssg); |
| 45 | void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */ | 47 | void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */ |
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h index d4cf96f07cfc..68c42454439b 100644 --- a/include/linux/mailbox_controller.h +++ b/include/linux/mailbox_controller.h | |||
| @@ -72,7 +72,7 @@ struct mbox_chan_ops { | |||
| 72 | */ | 72 | */ |
| 73 | struct mbox_controller { | 73 | struct mbox_controller { |
| 74 | struct device *dev; | 74 | struct device *dev; |
| 75 | struct mbox_chan_ops *ops; | 75 | const struct mbox_chan_ops *ops; |
| 76 | struct mbox_chan *chans; | 76 | struct mbox_chan *chans; |
| 77 | int num_chans; | 77 | int num_chans; |
| 78 | bool txdone_irq; | 78 | bool txdone_irq; |
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h index 66c30a763b10..11f00cdabe3d 100644 --- a/include/linux/mdio-gpio.h +++ b/include/linux/mdio-gpio.h | |||
| @@ -23,7 +23,8 @@ struct mdio_gpio_platform_data { | |||
| 23 | bool mdio_active_low; | 23 | bool mdio_active_low; |
| 24 | bool mdo_active_low; | 24 | bool mdo_active_low; |
| 25 | 25 | ||
| 26 | unsigned int phy_mask; | 26 | u32 phy_mask; |
| 27 | u32 phy_ignore_ta_mask; | ||
| 27 | int irqs[PHY_MAX_ADDR]; | 28 | int irqs[PHY_MAX_ADDR]; |
| 28 | /* reset callback */ | 29 | /* reset callback */ |
| 29 | int (*reset)(struct mii_bus *bus); | 30 | int (*reset)(struct mii_bus *bus); |
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index 0819d36a3a74..a16b1f9c1aca 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h | |||
| @@ -7,6 +7,42 @@ | |||
| 7 | 7 | ||
| 8 | struct mei_cl_device; | 8 | struct mei_cl_device; |
| 9 | 9 | ||
| 10 | typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device, | ||
| 11 | u32 events, void *context); | ||
| 12 | |||
| 13 | /** | ||
| 14 | * struct mei_cl_device - MEI device handle | ||
| 15 | * An mei_cl_device pointer is returned from mei_add_device() | ||
| 16 | * and links MEI bus clients to their actual ME host client pointer. | ||
| 17 | * Drivers for MEI devices will get an mei_cl_device pointer | ||
| 18 | * when being probed and shall use it for doing ME bus I/O. | ||
| 19 | * | ||
| 20 | * @dev: linux driver model device pointer | ||
| 21 | * @me_cl: me client | ||
| 22 | * @cl: mei client | ||
| 23 | * @name: device name | ||
| 24 | * @event_work: async work to execute event callback | ||
| 25 | * @event_cb: Drivers register this callback to get asynchronous ME | ||
| 26 | * events (e.g. Rx buffer pending) notifications. | ||
| 27 | * @event_context: event callback run context | ||
| 28 | * @events: Events bitmask sent to the driver. | ||
| 29 | * @priv_data: client private data | ||
| 30 | */ | ||
| 31 | struct mei_cl_device { | ||
| 32 | struct device dev; | ||
| 33 | |||
| 34 | struct mei_me_client *me_cl; | ||
| 35 | struct mei_cl *cl; | ||
| 36 | char name[MEI_CL_NAME_SIZE]; | ||
| 37 | |||
| 38 | struct work_struct event_work; | ||
| 39 | mei_cl_event_cb_t event_cb; | ||
| 40 | void *event_context; | ||
| 41 | unsigned long events; | ||
| 42 | |||
| 43 | void *priv_data; | ||
| 44 | }; | ||
| 45 | |||
| 10 | struct mei_cl_driver { | 46 | struct mei_cl_driver { |
| 11 | struct device_driver driver; | 47 | struct device_driver driver; |
| 12 | const char *name; | 48 | const char *name; |
| @@ -28,8 +64,6 @@ void mei_cl_driver_unregister(struct mei_cl_driver *driver); | |||
| 28 | ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length); | 64 | ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length); |
| 29 | ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length); | 65 | ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length); |
| 30 | 66 | ||
| 31 | typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device, | ||
| 32 | u32 events, void *context); | ||
| 33 | int mei_cl_register_event_cb(struct mei_cl_device *device, | 67 | int mei_cl_register_event_cb(struct mei_cl_device *device, |
| 34 | mei_cl_event_cb_t read_cb, void *context); | 68 | mei_cl_event_cb_t read_cb, void *context); |
| 35 | 69 | ||
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 9497ec7c77ea..cc4b01972060 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
| @@ -21,7 +21,11 @@ | |||
| 21 | #define INIT_PHYSMEM_REGIONS 4 | 21 | #define INIT_PHYSMEM_REGIONS 4 |
| 22 | 22 | ||
| 23 | /* Definition of memblock flags. */ | 23 | /* Definition of memblock flags. */ |
| 24 | #define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */ | 24 | enum { |
| 25 | MEMBLOCK_NONE = 0x0, /* No special request */ | ||
| 26 | MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ | ||
| 27 | MEMBLOCK_MIRROR = 0x2, /* mirrored region */ | ||
| 28 | }; | ||
| 25 | 29 | ||
| 26 | struct memblock_region { | 30 | struct memblock_region { |
| 27 | phys_addr_t base; | 31 | phys_addr_t base; |
| @@ -61,7 +65,7 @@ extern bool movable_node_enabled; | |||
| 61 | 65 | ||
| 62 | phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, | 66 | phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, |
| 63 | phys_addr_t start, phys_addr_t end, | 67 | phys_addr_t start, phys_addr_t end, |
| 64 | int nid); | 68 | int nid, ulong flags); |
| 65 | phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, | 69 | phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, |
| 66 | phys_addr_t size, phys_addr_t align); | 70 | phys_addr_t size, phys_addr_t align); |
| 67 | phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); | 71 | phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); |
| @@ -75,6 +79,8 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size); | |||
| 75 | void memblock_trim_memory(phys_addr_t align); | 79 | void memblock_trim_memory(phys_addr_t align); |
| 76 | int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); | 80 | int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); |
| 77 | int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); | 81 | int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); |
| 82 | int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); | ||
| 83 | ulong choose_memblock_flags(void); | ||
| 78 | 84 | ||
| 79 | /* Low level functions */ | 85 | /* Low level functions */ |
| 80 | int memblock_add_range(struct memblock_type *type, | 86 | int memblock_add_range(struct memblock_type *type, |
| @@ -85,14 +91,19 @@ int memblock_remove_range(struct memblock_type *type, | |||
| 85 | phys_addr_t base, | 91 | phys_addr_t base, |
| 86 | phys_addr_t size); | 92 | phys_addr_t size); |
| 87 | 93 | ||
| 88 | void __next_mem_range(u64 *idx, int nid, struct memblock_type *type_a, | 94 | void __next_mem_range(u64 *idx, int nid, ulong flags, |
| 95 | struct memblock_type *type_a, | ||
| 89 | struct memblock_type *type_b, phys_addr_t *out_start, | 96 | struct memblock_type *type_b, phys_addr_t *out_start, |
| 90 | phys_addr_t *out_end, int *out_nid); | 97 | phys_addr_t *out_end, int *out_nid); |
| 91 | 98 | ||
| 92 | void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a, | 99 | void __next_mem_range_rev(u64 *idx, int nid, ulong flags, |
| 100 | struct memblock_type *type_a, | ||
| 93 | struct memblock_type *type_b, phys_addr_t *out_start, | 101 | struct memblock_type *type_b, phys_addr_t *out_start, |
| 94 | phys_addr_t *out_end, int *out_nid); | 102 | phys_addr_t *out_end, int *out_nid); |
| 95 | 103 | ||
| 104 | void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, | ||
| 105 | phys_addr_t *out_end); | ||
| 106 | |||
| 96 | /** | 107 | /** |
| 97 | * for_each_mem_range - iterate through memblock areas from type_a and not | 108 | * for_each_mem_range - iterate through memblock areas from type_a and not |
| 98 | * included in type_b. Or just type_a if type_b is NULL. | 109 | * included in type_b. Or just type_a if type_b is NULL. |
| @@ -100,16 +111,17 @@ void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a, | |||
| 100 | * @type_a: ptr to memblock_type to iterate | 111 | * @type_a: ptr to memblock_type to iterate |
| 101 | * @type_b: ptr to memblock_type which excludes from the iteration | 112 | * @type_b: ptr to memblock_type which excludes from the iteration |
| 102 | * @nid: node selector, %NUMA_NO_NODE for all nodes | 113 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
| 114 | * @flags: pick from blocks based on memory attributes | ||
| 103 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | 115 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
| 104 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | 116 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
| 105 | * @p_nid: ptr to int for nid of the range, can be %NULL | 117 | * @p_nid: ptr to int for nid of the range, can be %NULL |
| 106 | */ | 118 | */ |
| 107 | #define for_each_mem_range(i, type_a, type_b, nid, \ | 119 | #define for_each_mem_range(i, type_a, type_b, nid, flags, \ |
| 108 | p_start, p_end, p_nid) \ | 120 | p_start, p_end, p_nid) \ |
| 109 | for (i = 0, __next_mem_range(&i, nid, type_a, type_b, \ | 121 | for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \ |
| 110 | p_start, p_end, p_nid); \ | 122 | p_start, p_end, p_nid); \ |
| 111 | i != (u64)ULLONG_MAX; \ | 123 | i != (u64)ULLONG_MAX; \ |
| 112 | __next_mem_range(&i, nid, type_a, type_b, \ | 124 | __next_mem_range(&i, nid, flags, type_a, type_b, \ |
| 113 | p_start, p_end, p_nid)) | 125 | p_start, p_end, p_nid)) |
| 114 | 126 | ||
| 115 | /** | 127 | /** |
| @@ -119,19 +131,35 @@ void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a, | |||
| 119 | * @type_a: ptr to memblock_type to iterate | 131 | * @type_a: ptr to memblock_type to iterate |
| 120 | * @type_b: ptr to memblock_type which excludes from the iteration | 132 | * @type_b: ptr to memblock_type which excludes from the iteration |
| 121 | * @nid: node selector, %NUMA_NO_NODE for all nodes | 133 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
| 134 | * @flags: pick from blocks based on memory attributes | ||
| 122 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | 135 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
| 123 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | 136 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
| 124 | * @p_nid: ptr to int for nid of the range, can be %NULL | 137 | * @p_nid: ptr to int for nid of the range, can be %NULL |
| 125 | */ | 138 | */ |
| 126 | #define for_each_mem_range_rev(i, type_a, type_b, nid, \ | 139 | #define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ |
| 127 | p_start, p_end, p_nid) \ | 140 | p_start, p_end, p_nid) \ |
| 128 | for (i = (u64)ULLONG_MAX, \ | 141 | for (i = (u64)ULLONG_MAX, \ |
| 129 | __next_mem_range_rev(&i, nid, type_a, type_b, \ | 142 | __next_mem_range_rev(&i, nid, flags, type_a, type_b,\ |
| 130 | p_start, p_end, p_nid); \ | 143 | p_start, p_end, p_nid); \ |
| 131 | i != (u64)ULLONG_MAX; \ | 144 | i != (u64)ULLONG_MAX; \ |
| 132 | __next_mem_range_rev(&i, nid, type_a, type_b, \ | 145 | __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ |
| 133 | p_start, p_end, p_nid)) | 146 | p_start, p_end, p_nid)) |
| 134 | 147 | ||
| 148 | /** | ||
| 149 | * for_each_reserved_mem_region - iterate over all reserved memblock areas | ||
| 150 | * @i: u64 used as loop variable | ||
| 151 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | ||
| 152 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | ||
| 153 | * | ||
| 154 | * Walks over reserved areas of memblock. Available as soon as memblock | ||
| 155 | * is initialized. | ||
| 156 | */ | ||
| 157 | #define for_each_reserved_mem_region(i, p_start, p_end) \ | ||
| 158 | for (i = 0UL, \ | ||
| 159 | __next_reserved_mem_region(&i, p_start, p_end); \ | ||
| 160 | i != (u64)ULLONG_MAX; \ | ||
| 161 | __next_reserved_mem_region(&i, p_start, p_end)) | ||
| 162 | |||
| 135 | #ifdef CONFIG_MOVABLE_NODE | 163 | #ifdef CONFIG_MOVABLE_NODE |
| 136 | static inline bool memblock_is_hotpluggable(struct memblock_region *m) | 164 | static inline bool memblock_is_hotpluggable(struct memblock_region *m) |
| 137 | { | 165 | { |
| @@ -153,6 +181,11 @@ static inline bool movable_node_is_enabled(void) | |||
| 153 | } | 181 | } |
| 154 | #endif | 182 | #endif |
| 155 | 183 | ||
| 184 | static inline bool memblock_is_mirror(struct memblock_region *m) | ||
| 185 | { | ||
| 186 | return m->flags & MEMBLOCK_MIRROR; | ||
| 187 | } | ||
| 188 | |||
| 156 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | 189 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 157 | int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, | 190 | int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, |
| 158 | unsigned long *end_pfn); | 191 | unsigned long *end_pfn); |
| @@ -181,13 +214,14 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, | |||
| 181 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | 214 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
| 182 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | 215 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
| 183 | * @p_nid: ptr to int for nid of the range, can be %NULL | 216 | * @p_nid: ptr to int for nid of the range, can be %NULL |
| 217 | * @flags: pick from blocks based on memory attributes | ||
| 184 | * | 218 | * |
| 185 | * Walks over free (memory && !reserved) areas of memblock. Available as | 219 | * Walks over free (memory && !reserved) areas of memblock. Available as |
| 186 | * soon as memblock is initialized. | 220 | * soon as memblock is initialized. |
| 187 | */ | 221 | */ |
| 188 | #define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \ | 222 | #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \ |
| 189 | for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ | 223 | for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ |
| 190 | nid, p_start, p_end, p_nid) | 224 | nid, flags, p_start, p_end, p_nid) |
| 191 | 225 | ||
| 192 | /** | 226 | /** |
| 193 | * for_each_free_mem_range_reverse - rev-iterate through free memblock areas | 227 | * for_each_free_mem_range_reverse - rev-iterate through free memblock areas |
| @@ -196,13 +230,15 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, | |||
| 196 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | 230 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
| 197 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | 231 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
| 198 | * @p_nid: ptr to int for nid of the range, can be %NULL | 232 | * @p_nid: ptr to int for nid of the range, can be %NULL |
| 233 | * @flags: pick from blocks based on memory attributes | ||
| 199 | * | 234 | * |
| 200 | * Walks over free (memory && !reserved) areas of memblock in reverse | 235 | * Walks over free (memory && !reserved) areas of memblock in reverse |
| 201 | * order. Available as soon as memblock is initialized. | 236 | * order. Available as soon as memblock is initialized. |
| 202 | */ | 237 | */ |
| 203 | #define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \ | 238 | #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \ |
| 239 | p_nid) \ | ||
| 204 | for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ | 240 | for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ |
| 205 | nid, p_start, p_end, p_nid) | 241 | nid, flags, p_start, p_end, p_nid) |
| 206 | 242 | ||
| 207 | static inline void memblock_set_region_flags(struct memblock_region *r, | 243 | static inline void memblock_set_region_flags(struct memblock_region *r, |
| 208 | unsigned long flags) | 244 | unsigned long flags) |
| @@ -273,7 +309,8 @@ static inline bool memblock_bottom_up(void) { return false; } | |||
| 273 | #define MEMBLOCK_ALLOC_ACCESSIBLE 0 | 309 | #define MEMBLOCK_ALLOC_ACCESSIBLE 0 |
| 274 | 310 | ||
| 275 | phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, | 311 | phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, |
| 276 | phys_addr_t start, phys_addr_t end); | 312 | phys_addr_t start, phys_addr_t end, |
| 313 | ulong flags); | ||
| 277 | phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, | 314 | phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, |
| 278 | phys_addr_t max_addr); | 315 | phys_addr_t max_addr); |
| 279 | phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, | 316 | phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6c8918114804..73b02b0a8f60 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -41,6 +41,7 @@ enum mem_cgroup_stat_index { | |||
| 41 | MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ | 41 | MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ |
| 42 | MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ | 42 | MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ |
| 43 | MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ | 43 | MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ |
| 44 | MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */ | ||
| 44 | MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ | 45 | MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ |
| 45 | MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ | 46 | MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ |
| 46 | MEM_CGROUP_STAT_NSTATS, | 47 | MEM_CGROUP_STAT_NSTATS, |
| @@ -67,6 +68,8 @@ enum mem_cgroup_events_index { | |||
| 67 | }; | 68 | }; |
| 68 | 69 | ||
| 69 | #ifdef CONFIG_MEMCG | 70 | #ifdef CONFIG_MEMCG |
| 71 | extern struct cgroup_subsys_state *mem_cgroup_root_css; | ||
| 72 | |||
| 70 | void mem_cgroup_events(struct mem_cgroup *memcg, | 73 | void mem_cgroup_events(struct mem_cgroup *memcg, |
| 71 | enum mem_cgroup_events_index idx, | 74 | enum mem_cgroup_events_index idx, |
| 72 | unsigned int nr); | 75 | unsigned int nr); |
| @@ -112,6 +115,7 @@ static inline bool mm_match_cgroup(struct mm_struct *mm, | |||
| 112 | } | 115 | } |
| 113 | 116 | ||
| 114 | extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); | 117 | extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); |
| 118 | extern struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); | ||
| 115 | 119 | ||
| 116 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, | 120 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, |
| 117 | struct mem_cgroup *, | 121 | struct mem_cgroup *, |
| @@ -195,6 +199,8 @@ void mem_cgroup_split_huge_fixup(struct page *head); | |||
| 195 | #else /* CONFIG_MEMCG */ | 199 | #else /* CONFIG_MEMCG */ |
| 196 | struct mem_cgroup; | 200 | struct mem_cgroup; |
| 197 | 201 | ||
| 202 | #define mem_cgroup_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL)) | ||
| 203 | |||
| 198 | static inline void mem_cgroup_events(struct mem_cgroup *memcg, | 204 | static inline void mem_cgroup_events(struct mem_cgroup *memcg, |
| 199 | enum mem_cgroup_events_index idx, | 205 | enum mem_cgroup_events_index idx, |
| 200 | unsigned int nr) | 206 | unsigned int nr) |
| @@ -382,6 +388,29 @@ enum { | |||
| 382 | OVER_LIMIT, | 388 | OVER_LIMIT, |
| 383 | }; | 389 | }; |
| 384 | 390 | ||
| 391 | #ifdef CONFIG_CGROUP_WRITEBACK | ||
| 392 | |||
| 393 | struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); | ||
| 394 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); | ||
| 395 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, | ||
| 396 | unsigned long *pdirty, unsigned long *pwriteback); | ||
| 397 | |||
| 398 | #else /* CONFIG_CGROUP_WRITEBACK */ | ||
| 399 | |||
| 400 | static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) | ||
| 401 | { | ||
| 402 | return NULL; | ||
| 403 | } | ||
| 404 | |||
| 405 | static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, | ||
| 406 | unsigned long *pavail, | ||
| 407 | unsigned long *pdirty, | ||
| 408 | unsigned long *pwriteback) | ||
| 409 | { | ||
| 410 | } | ||
| 411 | |||
| 412 | #endif /* CONFIG_CGROUP_WRITEBACK */ | ||
| 413 | |||
| 385 | struct sock; | 414 | struct sock; |
| 386 | #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) | 415 | #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) |
| 387 | void sock_update_memcg(struct sock *sk); | 416 | void sock_update_memcg(struct sock *sk); |
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h index 16a498f48169..2f434f4f79a1 100644 --- a/include/linux/mfd/arizona/core.h +++ b/include/linux/mfd/arizona/core.h | |||
| @@ -117,6 +117,7 @@ struct arizona { | |||
| 117 | int num_core_supplies; | 117 | int num_core_supplies; |
| 118 | struct regulator_bulk_data core_supplies[ARIZONA_MAX_CORE_SUPPLIES]; | 118 | struct regulator_bulk_data core_supplies[ARIZONA_MAX_CORE_SUPPLIES]; |
| 119 | struct regulator *dcvdd; | 119 | struct regulator *dcvdd; |
| 120 | bool has_fully_powered_off; | ||
| 120 | 121 | ||
| 121 | struct arizona_pdata pdata; | 122 | struct arizona_pdata pdata; |
| 122 | 123 | ||
| @@ -153,7 +154,15 @@ int arizona_request_irq(struct arizona *arizona, int irq, char *name, | |||
| 153 | void arizona_free_irq(struct arizona *arizona, int irq, void *data); | 154 | void arizona_free_irq(struct arizona *arizona, int irq, void *data); |
| 154 | int arizona_set_irq_wake(struct arizona *arizona, int irq, int on); | 155 | int arizona_set_irq_wake(struct arizona *arizona, int irq, int on); |
| 155 | 156 | ||
| 157 | #ifdef CONFIG_MFD_WM5102 | ||
| 156 | int wm5102_patch(struct arizona *arizona); | 158 | int wm5102_patch(struct arizona *arizona); |
| 159 | #else | ||
| 160 | static inline int wm5102_patch(struct arizona *arizona) | ||
| 161 | { | ||
| 162 | return 0; | ||
| 163 | } | ||
| 164 | #endif | ||
| 165 | |||
| 157 | int wm5110_patch(struct arizona *arizona); | 166 | int wm5110_patch(struct arizona *arizona); |
| 158 | int wm8997_patch(struct arizona *arizona); | 167 | int wm8997_patch(struct arizona *arizona); |
| 159 | 168 | ||
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h index 1789cb0f4f17..43db4faad143 100644 --- a/include/linux/mfd/arizona/pdata.h +++ b/include/linux/mfd/arizona/pdata.h | |||
| @@ -121,6 +121,9 @@ struct arizona_pdata { | |||
| 121 | /** GPIO used for mic isolation with HPDET */ | 121 | /** GPIO used for mic isolation with HPDET */ |
| 122 | int hpdet_id_gpio; | 122 | int hpdet_id_gpio; |
| 123 | 123 | ||
| 124 | /** Channel to use for headphone detection */ | ||
| 125 | unsigned int hpdet_channel; | ||
| 126 | |||
| 124 | /** Extra debounce timeout used during initial mic detection (ms) */ | 127 | /** Extra debounce timeout used during initial mic detection (ms) */ |
| 125 | int micd_detect_debounce; | 128 | int micd_detect_debounce; |
| 126 | 129 | ||
| @@ -156,7 +159,10 @@ struct arizona_pdata { | |||
| 156 | /** MICBIAS configurations */ | 159 | /** MICBIAS configurations */ |
| 157 | struct arizona_micbias micbias[ARIZONA_MAX_MICBIAS]; | 160 | struct arizona_micbias micbias[ARIZONA_MAX_MICBIAS]; |
| 158 | 161 | ||
| 159 | /** Mode of input structures */ | 162 | /** |
| 163 | * Mode of input structures | ||
| 164 | * One of the ARIZONA_INMODE_xxx values | ||
| 165 | */ | ||
| 160 | int inmode[ARIZONA_MAX_INPUT]; | 166 | int inmode[ARIZONA_MAX_INPUT]; |
| 161 | 167 | ||
| 162 | /** Mode for outputs */ | 168 | /** Mode for outputs */ |
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h index aacc10d7789c..3499d36e6067 100644 --- a/include/linux/mfd/arizona/registers.h +++ b/include/linux/mfd/arizona/registers.h | |||
| @@ -2515,9 +2515,12 @@ | |||
| 2515 | #define ARIZONA_IN1_DMIC_SUP_MASK 0x1800 /* IN1_DMIC_SUP - [12:11] */ | 2515 | #define ARIZONA_IN1_DMIC_SUP_MASK 0x1800 /* IN1_DMIC_SUP - [12:11] */ |
| 2516 | #define ARIZONA_IN1_DMIC_SUP_SHIFT 11 /* IN1_DMIC_SUP - [12:11] */ | 2516 | #define ARIZONA_IN1_DMIC_SUP_SHIFT 11 /* IN1_DMIC_SUP - [12:11] */ |
| 2517 | #define ARIZONA_IN1_DMIC_SUP_WIDTH 2 /* IN1_DMIC_SUP - [12:11] */ | 2517 | #define ARIZONA_IN1_DMIC_SUP_WIDTH 2 /* IN1_DMIC_SUP - [12:11] */ |
| 2518 | #define ARIZONA_IN1_MODE_MASK 0x0600 /* IN1_MODE - [10:9] */ | 2518 | #define ARIZONA_IN1_MODE_MASK 0x0400 /* IN1_MODE - [10] */ |
| 2519 | #define ARIZONA_IN1_MODE_SHIFT 9 /* IN1_MODE - [10:9] */ | 2519 | #define ARIZONA_IN1_MODE_SHIFT 10 /* IN1_MODE - [10] */ |
| 2520 | #define ARIZONA_IN1_MODE_WIDTH 2 /* IN1_MODE - [10:9] */ | 2520 | #define ARIZONA_IN1_MODE_WIDTH 1 /* IN1_MODE - [10] */ |
| 2521 | #define ARIZONA_IN1_SINGLE_ENDED_MASK 0x0200 /* IN1_MODE - [9] */ | ||
| 2522 | #define ARIZONA_IN1_SINGLE_ENDED_SHIFT 9 /* IN1_MODE - [9] */ | ||
| 2523 | #define ARIZONA_IN1_SINGLE_ENDED_WIDTH 1 /* IN1_MODE - [9] */ | ||
| 2521 | #define ARIZONA_IN1L_PGA_VOL_MASK 0x00FE /* IN1L_PGA_VOL - [7:1] */ | 2524 | #define ARIZONA_IN1L_PGA_VOL_MASK 0x00FE /* IN1L_PGA_VOL - [7:1] */ |
| 2522 | #define ARIZONA_IN1L_PGA_VOL_SHIFT 1 /* IN1L_PGA_VOL - [7:1] */ | 2525 | #define ARIZONA_IN1L_PGA_VOL_SHIFT 1 /* IN1L_PGA_VOL - [7:1] */ |
| 2523 | #define ARIZONA_IN1L_PGA_VOL_WIDTH 7 /* IN1L_PGA_VOL - [7:1] */ | 2526 | #define ARIZONA_IN1L_PGA_VOL_WIDTH 7 /* IN1L_PGA_VOL - [7:1] */ |
| @@ -2588,9 +2591,12 @@ | |||
| 2588 | #define ARIZONA_IN2_DMIC_SUP_MASK 0x1800 /* IN2_DMIC_SUP - [12:11] */ | 2591 | #define ARIZONA_IN2_DMIC_SUP_MASK 0x1800 /* IN2_DMIC_SUP - [12:11] */ |
| 2589 | #define ARIZONA_IN2_DMIC_SUP_SHIFT 11 /* IN2_DMIC_SUP - [12:11] */ | 2592 | #define ARIZONA_IN2_DMIC_SUP_SHIFT 11 /* IN2_DMIC_SUP - [12:11] */ |
| 2590 | #define ARIZONA_IN2_DMIC_SUP_WIDTH 2 /* IN2_DMIC_SUP - [12:11] */ | 2593 | #define ARIZONA_IN2_DMIC_SUP_WIDTH 2 /* IN2_DMIC_SUP - [12:11] */ |
| 2591 | #define ARIZONA_IN2_MODE_MASK 0x0600 /* IN2_MODE - [10:9] */ | 2594 | #define ARIZONA_IN2_MODE_MASK 0x0400 /* IN2_MODE - [10] */ |
| 2592 | #define ARIZONA_IN2_MODE_SHIFT 9 /* IN2_MODE - [10:9] */ | 2595 | #define ARIZONA_IN2_MODE_SHIFT 10 /* IN2_MODE - [10] */ |
| 2593 | #define ARIZONA_IN2_MODE_WIDTH 2 /* IN2_MODE - [10:9] */ | 2596 | #define ARIZONA_IN2_MODE_WIDTH 1 /* IN2_MODE - [10] */ |
| 2597 | #define ARIZONA_IN2_SINGLE_ENDED_MASK 0x0200 /* IN2_MODE - [9] */ | ||
| 2598 | #define ARIZONA_IN2_SINGLE_ENDED_SHIFT 9 /* IN2_MODE - [9] */ | ||
| 2599 | #define ARIZONA_IN2_SINGLE_ENDED_WIDTH 1 /* IN2_MODE - [9] */ | ||
| 2594 | #define ARIZONA_IN2L_PGA_VOL_MASK 0x00FE /* IN2L_PGA_VOL - [7:1] */ | 2600 | #define ARIZONA_IN2L_PGA_VOL_MASK 0x00FE /* IN2L_PGA_VOL - [7:1] */ |
| 2595 | #define ARIZONA_IN2L_PGA_VOL_SHIFT 1 /* IN2L_PGA_VOL - [7:1] */ | 2601 | #define ARIZONA_IN2L_PGA_VOL_SHIFT 1 /* IN2L_PGA_VOL - [7:1] */ |
| 2596 | #define ARIZONA_IN2L_PGA_VOL_WIDTH 7 /* IN2L_PGA_VOL - [7:1] */ | 2602 | #define ARIZONA_IN2L_PGA_VOL_WIDTH 7 /* IN2L_PGA_VOL - [7:1] */ |
| @@ -2661,9 +2667,12 @@ | |||
| 2661 | #define ARIZONA_IN3_DMIC_SUP_MASK 0x1800 /* IN3_DMIC_SUP - [12:11] */ | 2667 | #define ARIZONA_IN3_DMIC_SUP_MASK 0x1800 /* IN3_DMIC_SUP - [12:11] */ |
| 2662 | #define ARIZONA_IN3_DMIC_SUP_SHIFT 11 /* IN3_DMIC_SUP - [12:11] */ | 2668 | #define ARIZONA_IN3_DMIC_SUP_SHIFT 11 /* IN3_DMIC_SUP - [12:11] */ |
| 2663 | #define ARIZONA_IN3_DMIC_SUP_WIDTH 2 /* IN3_DMIC_SUP - [12:11] */ | 2669 | #define ARIZONA_IN3_DMIC_SUP_WIDTH 2 /* IN3_DMIC_SUP - [12:11] */ |
| 2664 | #define ARIZONA_IN3_MODE_MASK 0x0600 /* IN3_MODE - [10:9] */ | 2670 | #define ARIZONA_IN3_MODE_MASK 0x0400 /* IN3_MODE - [10] */ |
| 2665 | #define ARIZONA_IN3_MODE_SHIFT 9 /* IN3_MODE - [10:9] */ | 2671 | #define ARIZONA_IN3_MODE_SHIFT 10 /* IN3_MODE - [10] */ |
| 2666 | #define ARIZONA_IN3_MODE_WIDTH 2 /* IN3_MODE - [10:9] */ | 2672 | #define ARIZONA_IN3_MODE_WIDTH 1 /* IN3_MODE - [10] */ |
| 2673 | #define ARIZONA_IN3_SINGLE_ENDED_MASK 0x0200 /* IN3_MODE - [9] */ | ||
| 2674 | #define ARIZONA_IN3_SINGLE_ENDED_SHIFT 9 /* IN3_MODE - [9] */ | ||
| 2675 | #define ARIZONA_IN3_SINGLE_ENDED_WIDTH 1 /* IN3_MODE - [9] */ | ||
| 2667 | #define ARIZONA_IN3L_PGA_VOL_MASK 0x00FE /* IN3L_PGA_VOL - [7:1] */ | 2676 | #define ARIZONA_IN3L_PGA_VOL_MASK 0x00FE /* IN3L_PGA_VOL - [7:1] */ |
| 2668 | #define ARIZONA_IN3L_PGA_VOL_SHIFT 1 /* IN3L_PGA_VOL - [7:1] */ | 2677 | #define ARIZONA_IN3L_PGA_VOL_SHIFT 1 /* IN3L_PGA_VOL - [7:1] */ |
| 2669 | #define ARIZONA_IN3L_PGA_VOL_WIDTH 7 /* IN3L_PGA_VOL - [7:1] */ | 2678 | #define ARIZONA_IN3L_PGA_VOL_WIDTH 7 /* IN3L_PGA_VOL - [7:1] */ |
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index dfabd6db7ddf..c2aa853fb412 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | enum { | 14 | enum { |
| 15 | AXP202_ID = 0, | 15 | AXP202_ID = 0, |
| 16 | AXP209_ID, | 16 | AXP209_ID, |
| 17 | AXP221_ID, | ||
| 17 | AXP288_ID, | 18 | AXP288_ID, |
| 18 | NR_AXP20X_VARIANTS, | 19 | NR_AXP20X_VARIANTS, |
| 19 | }; | 20 | }; |
| @@ -45,6 +46,28 @@ enum { | |||
| 45 | #define AXP20X_V_LTF_DISCHRG 0x3c | 46 | #define AXP20X_V_LTF_DISCHRG 0x3c |
| 46 | #define AXP20X_V_HTF_DISCHRG 0x3d | 47 | #define AXP20X_V_HTF_DISCHRG 0x3d |
| 47 | 48 | ||
| 49 | #define AXP22X_PWR_OUT_CTRL1 0x10 | ||
| 50 | #define AXP22X_PWR_OUT_CTRL2 0x12 | ||
| 51 | #define AXP22X_PWR_OUT_CTRL3 0x13 | ||
| 52 | #define AXP22X_DLDO1_V_OUT 0x15 | ||
| 53 | #define AXP22X_DLDO2_V_OUT 0x16 | ||
| 54 | #define AXP22X_DLDO3_V_OUT 0x17 | ||
| 55 | #define AXP22X_DLDO4_V_OUT 0x18 | ||
| 56 | #define AXP22X_ELDO1_V_OUT 0x19 | ||
| 57 | #define AXP22X_ELDO2_V_OUT 0x1a | ||
| 58 | #define AXP22X_ELDO3_V_OUT 0x1b | ||
| 59 | #define AXP22X_DC5LDO_V_OUT 0x1c | ||
| 60 | #define AXP22X_DCDC1_V_OUT 0x21 | ||
| 61 | #define AXP22X_DCDC2_V_OUT 0x22 | ||
| 62 | #define AXP22X_DCDC3_V_OUT 0x23 | ||
| 63 | #define AXP22X_DCDC4_V_OUT 0x24 | ||
| 64 | #define AXP22X_DCDC5_V_OUT 0x25 | ||
| 65 | #define AXP22X_DCDC23_V_RAMP_CTRL 0x27 | ||
| 66 | #define AXP22X_ALDO1_V_OUT 0x28 | ||
| 67 | #define AXP22X_ALDO2_V_OUT 0x29 | ||
| 68 | #define AXP22X_ALDO3_V_OUT 0x2a | ||
| 69 | #define AXP22X_CHRG_CTRL3 0x35 | ||
| 70 | |||
| 48 | /* Interrupt */ | 71 | /* Interrupt */ |
| 49 | #define AXP20X_IRQ1_EN 0x40 | 72 | #define AXP20X_IRQ1_EN 0x40 |
| 50 | #define AXP20X_IRQ2_EN 0x41 | 73 | #define AXP20X_IRQ2_EN 0x41 |
| @@ -100,6 +123,9 @@ enum { | |||
| 100 | #define AXP20X_VBUS_MON 0x8b | 123 | #define AXP20X_VBUS_MON 0x8b |
| 101 | #define AXP20X_OVER_TMP 0x8f | 124 | #define AXP20X_OVER_TMP 0x8f |
| 102 | 125 | ||
| 126 | #define AXP22X_PWREN_CTRL1 0x8c | ||
| 127 | #define AXP22X_PWREN_CTRL2 0x8d | ||
| 128 | |||
| 103 | /* GPIO */ | 129 | /* GPIO */ |
| 104 | #define AXP20X_GPIO0_CTRL 0x90 | 130 | #define AXP20X_GPIO0_CTRL 0x90 |
| 105 | #define AXP20X_LDO5_V_OUT 0x91 | 131 | #define AXP20X_LDO5_V_OUT 0x91 |
| @@ -108,6 +134,11 @@ enum { | |||
| 108 | #define AXP20X_GPIO20_SS 0x94 | 134 | #define AXP20X_GPIO20_SS 0x94 |
| 109 | #define AXP20X_GPIO3_CTRL 0x95 | 135 | #define AXP20X_GPIO3_CTRL 0x95 |
| 110 | 136 | ||
| 137 | #define AXP22X_LDO_IO0_V_OUT 0x91 | ||
| 138 | #define AXP22X_LDO_IO1_V_OUT 0x93 | ||
| 139 | #define AXP22X_GPIO_STATE 0x94 | ||
| 140 | #define AXP22X_GPIO_PULL_DOWN 0x95 | ||
| 141 | |||
| 111 | /* Battery */ | 142 | /* Battery */ |
| 112 | #define AXP20X_CHRG_CC_31_24 0xb0 | 143 | #define AXP20X_CHRG_CC_31_24 0xb0 |
| 113 | #define AXP20X_CHRG_CC_23_16 0xb1 | 144 | #define AXP20X_CHRG_CC_23_16 0xb1 |
| @@ -120,6 +151,9 @@ enum { | |||
| 120 | #define AXP20X_CC_CTRL 0xb8 | 151 | #define AXP20X_CC_CTRL 0xb8 |
| 121 | #define AXP20X_FG_RES 0xb9 | 152 | #define AXP20X_FG_RES 0xb9 |
| 122 | 153 | ||
| 154 | /* AXP22X specific registers */ | ||
| 155 | #define AXP22X_BATLOW_THRES1 0xe6 | ||
| 156 | |||
| 123 | /* AXP288 specific registers */ | 157 | /* AXP288 specific registers */ |
| 124 | #define AXP288_PMIC_ADC_H 0x56 | 158 | #define AXP288_PMIC_ADC_H 0x56 |
| 125 | #define AXP288_PMIC_ADC_L 0x57 | 159 | #define AXP288_PMIC_ADC_L 0x57 |
| @@ -158,6 +192,30 @@ enum { | |||
| 158 | AXP20X_REG_ID_MAX, | 192 | AXP20X_REG_ID_MAX, |
| 159 | }; | 193 | }; |
| 160 | 194 | ||
| 195 | enum { | ||
| 196 | AXP22X_DCDC1 = 0, | ||
| 197 | AXP22X_DCDC2, | ||
| 198 | AXP22X_DCDC3, | ||
| 199 | AXP22X_DCDC4, | ||
| 200 | AXP22X_DCDC5, | ||
| 201 | AXP22X_DC1SW, | ||
| 202 | AXP22X_DC5LDO, | ||
| 203 | AXP22X_ALDO1, | ||
| 204 | AXP22X_ALDO2, | ||
| 205 | AXP22X_ALDO3, | ||
| 206 | AXP22X_ELDO1, | ||
| 207 | AXP22X_ELDO2, | ||
| 208 | AXP22X_ELDO3, | ||
| 209 | AXP22X_DLDO1, | ||
| 210 | AXP22X_DLDO2, | ||
| 211 | AXP22X_DLDO3, | ||
| 212 | AXP22X_DLDO4, | ||
| 213 | AXP22X_RTC_LDO, | ||
| 214 | AXP22X_LDO_IO0, | ||
| 215 | AXP22X_LDO_IO1, | ||
| 216 | AXP22X_REG_ID_MAX, | ||
| 217 | }; | ||
| 218 | |||
| 161 | /* IRQs */ | 219 | /* IRQs */ |
| 162 | enum { | 220 | enum { |
| 163 | AXP20X_IRQ_ACIN_OVER_V = 1, | 221 | AXP20X_IRQ_ACIN_OVER_V = 1, |
| @@ -199,6 +257,34 @@ enum { | |||
| 199 | AXP20X_IRQ_GPIO0_INPUT, | 257 | AXP20X_IRQ_GPIO0_INPUT, |
| 200 | }; | 258 | }; |
| 201 | 259 | ||
| 260 | enum axp22x_irqs { | ||
| 261 | AXP22X_IRQ_ACIN_OVER_V = 1, | ||
| 262 | AXP22X_IRQ_ACIN_PLUGIN, | ||
| 263 | AXP22X_IRQ_ACIN_REMOVAL, | ||
| 264 | AXP22X_IRQ_VBUS_OVER_V, | ||
| 265 | AXP22X_IRQ_VBUS_PLUGIN, | ||
| 266 | AXP22X_IRQ_VBUS_REMOVAL, | ||
| 267 | AXP22X_IRQ_VBUS_V_LOW, | ||
| 268 | AXP22X_IRQ_BATT_PLUGIN, | ||
| 269 | AXP22X_IRQ_BATT_REMOVAL, | ||
| 270 | AXP22X_IRQ_BATT_ENT_ACT_MODE, | ||
| 271 | AXP22X_IRQ_BATT_EXIT_ACT_MODE, | ||
| 272 | AXP22X_IRQ_CHARG, | ||
| 273 | AXP22X_IRQ_CHARG_DONE, | ||
| 274 | AXP22X_IRQ_BATT_TEMP_HIGH, | ||
| 275 | AXP22X_IRQ_BATT_TEMP_LOW, | ||
| 276 | AXP22X_IRQ_DIE_TEMP_HIGH, | ||
| 277 | AXP22X_IRQ_PEK_SHORT, | ||
| 278 | AXP22X_IRQ_PEK_LONG, | ||
| 279 | AXP22X_IRQ_LOW_PWR_LVL1, | ||
| 280 | AXP22X_IRQ_LOW_PWR_LVL2, | ||
| 281 | AXP22X_IRQ_TIMER, | ||
| 282 | AXP22X_IRQ_PEK_RIS_EDGE, | ||
| 283 | AXP22X_IRQ_PEK_FAL_EDGE, | ||
| 284 | AXP22X_IRQ_GPIO1_INPUT, | ||
| 285 | AXP22X_IRQ_GPIO0_INPUT, | ||
| 286 | }; | ||
| 287 | |||
| 202 | enum axp288_irqs { | 288 | enum axp288_irqs { |
| 203 | AXP288_IRQ_VBUS_FALL = 2, | 289 | AXP288_IRQ_VBUS_FALL = 2, |
| 204 | AXP288_IRQ_VBUS_RISE, | 290 | AXP288_IRQ_VBUS_RISE, |
| @@ -275,4 +361,16 @@ struct axp20x_fg_pdata { | |||
| 275 | int thermistor_curve[MAX_THERM_CURVE_SIZE][2]; | 361 | int thermistor_curve[MAX_THERM_CURVE_SIZE][2]; |
| 276 | }; | 362 | }; |
| 277 | 363 | ||
| 364 | struct axp20x_chrg_pdata { | ||
| 365 | int max_cc; | ||
| 366 | int max_cv; | ||
| 367 | int def_cc; | ||
| 368 | int def_cv; | ||
| 369 | }; | ||
| 370 | |||
| 371 | struct axp288_extcon_pdata { | ||
| 372 | /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */ | ||
| 373 | struct gpio_desc *gpio_mux_cntl; | ||
| 374 | }; | ||
| 375 | |||
| 278 | #endif /* __LINUX_MFD_AXP20X_H */ | 376 | #endif /* __LINUX_MFD_AXP20X_H */ |
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 324a34683971..da72671a42fa 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h | |||
| @@ -17,10 +17,29 @@ | |||
| 17 | #define __LINUX_MFD_CROS_EC_H | 17 | #define __LINUX_MFD_CROS_EC_H |
| 18 | 18 | ||
| 19 | #include <linux/cdev.h> | 19 | #include <linux/cdev.h> |
| 20 | #include <linux/device.h> | ||
| 20 | #include <linux/notifier.h> | 21 | #include <linux/notifier.h> |
| 21 | #include <linux/mfd/cros_ec_commands.h> | 22 | #include <linux/mfd/cros_ec_commands.h> |
| 22 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
| 23 | 24 | ||
| 25 | #define CROS_EC_DEV_NAME "cros_ec" | ||
| 26 | #define CROS_EC_DEV_PD_NAME "cros_pd" | ||
| 27 | |||
| 28 | /* | ||
| 29 | * The EC is unresponsive for a time after a reboot command. Add a | ||
| 30 | * simple delay to make sure that the bus stays locked. | ||
| 31 | */ | ||
| 32 | #define EC_REBOOT_DELAY_MS 50 | ||
| 33 | |||
| 34 | /* | ||
| 35 | * Max bus-specific overhead incurred by request/responses. | ||
| 36 | * I2C requires 1 additional byte for requests. | ||
| 37 | * I2C requires 2 additional bytes for responses. | ||
| 38 | * */ | ||
| 39 | #define EC_PROTO_VERSION_UNKNOWN 0 | ||
| 40 | #define EC_MAX_REQUEST_OVERHEAD 1 | ||
| 41 | #define EC_MAX_RESPONSE_OVERHEAD 2 | ||
| 42 | |||
| 24 | /* | 43 | /* |
| 25 | * Command interface between EC and AP, for LPC, I2C and SPI interfaces. | 44 | * Command interface between EC and AP, for LPC, I2C and SPI interfaces. |
| 26 | */ | 45 | */ |
| @@ -42,8 +61,7 @@ enum { | |||
| 42 | * @outsize: Outgoing length in bytes | 61 | * @outsize: Outgoing length in bytes |
| 43 | * @insize: Max number of bytes to accept from EC | 62 | * @insize: Max number of bytes to accept from EC |
| 44 | * @result: EC's response to the command (separate from communication failure) | 63 | * @result: EC's response to the command (separate from communication failure) |
| 45 | * @outdata: Outgoing data to EC | 64 | * @data: Where to put the incoming data from EC and outgoing data to EC |
| 46 | * @indata: Where to put the incoming data from EC | ||
| 47 | */ | 65 | */ |
| 48 | struct cros_ec_command { | 66 | struct cros_ec_command { |
| 49 | uint32_t version; | 67 | uint32_t version; |
| @@ -51,18 +69,14 @@ struct cros_ec_command { | |||
| 51 | uint32_t outsize; | 69 | uint32_t outsize; |
| 52 | uint32_t insize; | 70 | uint32_t insize; |
| 53 | uint32_t result; | 71 | uint32_t result; |
| 54 | uint8_t outdata[EC_PROTO2_MAX_PARAM_SIZE]; | 72 | uint8_t data[0]; |
| 55 | uint8_t indata[EC_PROTO2_MAX_PARAM_SIZE]; | ||
| 56 | }; | 73 | }; |
| 57 | 74 | ||
| 58 | /** | 75 | /** |
| 59 | * struct cros_ec_device - Information about a ChromeOS EC device | 76 | * struct cros_ec_device - Information about a ChromeOS EC device |
| 60 | * | 77 | * |
| 61 | * @ec_name: name of EC device (e.g. 'chromeos-ec') | ||
| 62 | * @phys_name: name of physical comms layer (e.g. 'i2c-4') | 78 | * @phys_name: name of physical comms layer (e.g. 'i2c-4') |
| 63 | * @dev: Device pointer for physical comms device | 79 | * @dev: Device pointer for physical comms device |
| 64 | * @vdev: Device pointer for virtual comms device | ||
| 65 | * @cdev: Character device structure for virtual comms device | ||
| 66 | * @was_wake_device: true if this device was set to wake the system from | 80 | * @was_wake_device: true if this device was set to wake the system from |
| 67 | * sleep at the last suspend | 81 | * sleep at the last suspend |
| 68 | * @cmd_readmem: direct read of the EC memory-mapped region, if supported | 82 | * @cmd_readmem: direct read of the EC memory-mapped region, if supported |
| @@ -74,6 +88,7 @@ struct cros_ec_command { | |||
| 74 | * | 88 | * |
| 75 | * @priv: Private data | 89 | * @priv: Private data |
| 76 | * @irq: Interrupt to use | 90 | * @irq: Interrupt to use |
| 91 | * @id: Device id | ||
| 77 | * @din: input buffer (for data from EC) | 92 | * @din: input buffer (for data from EC) |
| 78 | * @dout: output buffer (for data to EC) | 93 | * @dout: output buffer (for data to EC) |
| 79 | * \note | 94 | * \note |
| @@ -85,41 +100,72 @@ struct cros_ec_command { | |||
| 85 | * to using dword. | 100 | * to using dword. |
| 86 | * @din_size: size of din buffer to allocate (zero to use static din) | 101 | * @din_size: size of din buffer to allocate (zero to use static din) |
| 87 | * @dout_size: size of dout buffer to allocate (zero to use static dout) | 102 | * @dout_size: size of dout buffer to allocate (zero to use static dout) |
| 88 | * @parent: pointer to parent device (e.g. i2c or spi device) | ||
| 89 | * @wake_enabled: true if this device can wake the system from sleep | 103 | * @wake_enabled: true if this device can wake the system from sleep |
| 90 | * @cmd_xfer: send command to EC and get response | 104 | * @cmd_xfer: send command to EC and get response |
| 91 | * Returns the number of bytes received if the communication succeeded, but | 105 | * Returns the number of bytes received if the communication succeeded, but |
| 92 | * that doesn't mean the EC was happy with the command. The caller | 106 | * that doesn't mean the EC was happy with the command. The caller |
| 93 | * should check msg.result for the EC's result code. | 107 | * should check msg.result for the EC's result code. |
| 108 | * @pkt_xfer: send packet to EC and get response | ||
| 94 | * @lock: one transaction at a time | 109 | * @lock: one transaction at a time |
| 95 | */ | 110 | */ |
| 96 | struct cros_ec_device { | 111 | struct cros_ec_device { |
| 97 | 112 | ||
| 98 | /* These are used by other drivers that want to talk to the EC */ | 113 | /* These are used by other drivers that want to talk to the EC */ |
| 99 | const char *ec_name; | ||
| 100 | const char *phys_name; | 114 | const char *phys_name; |
| 101 | struct device *dev; | 115 | struct device *dev; |
| 102 | struct device *vdev; | ||
| 103 | struct cdev cdev; | ||
| 104 | bool was_wake_device; | 116 | bool was_wake_device; |
| 105 | struct class *cros_class; | 117 | struct class *cros_class; |
| 106 | int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset, | 118 | int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset, |
| 107 | unsigned int bytes, void *dest); | 119 | unsigned int bytes, void *dest); |
| 108 | 120 | ||
| 109 | /* These are used to implement the platform-specific interface */ | 121 | /* These are used to implement the platform-specific interface */ |
| 122 | u16 max_request; | ||
| 123 | u16 max_response; | ||
| 124 | u16 max_passthru; | ||
| 125 | u16 proto_version; | ||
| 110 | void *priv; | 126 | void *priv; |
| 111 | int irq; | 127 | int irq; |
| 112 | uint8_t *din; | 128 | u8 *din; |
| 113 | uint8_t *dout; | 129 | u8 *dout; |
| 114 | int din_size; | 130 | int din_size; |
| 115 | int dout_size; | 131 | int dout_size; |
| 116 | struct device *parent; | ||
| 117 | bool wake_enabled; | 132 | bool wake_enabled; |
| 118 | int (*cmd_xfer)(struct cros_ec_device *ec, | 133 | int (*cmd_xfer)(struct cros_ec_device *ec, |
| 119 | struct cros_ec_command *msg); | 134 | struct cros_ec_command *msg); |
| 135 | int (*pkt_xfer)(struct cros_ec_device *ec, | ||
| 136 | struct cros_ec_command *msg); | ||
| 120 | struct mutex lock; | 137 | struct mutex lock; |
| 121 | }; | 138 | }; |
| 122 | 139 | ||
| 140 | /* struct cros_ec_platform - ChromeOS EC platform information | ||
| 141 | * | ||
| 142 | * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...) | ||
| 143 | * used in /dev/ and sysfs. | ||
| 144 | * @cmd_offset: offset to apply for each command. Set when | ||
| 145 | * registering a devicde behind another one. | ||
| 146 | */ | ||
| 147 | struct cros_ec_platform { | ||
| 148 | const char *ec_name; | ||
| 149 | u16 cmd_offset; | ||
| 150 | }; | ||
| 151 | |||
| 152 | /* | ||
| 153 | * struct cros_ec_dev - ChromeOS EC device entry point | ||
| 154 | * | ||
| 155 | * @class_dev: Device structure used in sysfs | ||
| 156 | * @cdev: Character device structure in /dev | ||
| 157 | * @ec_dev: cros_ec_device structure to talk to the physical device | ||
| 158 | * @dev: pointer to the platform device | ||
| 159 | * @cmd_offset: offset to apply for each command. | ||
| 160 | */ | ||
| 161 | struct cros_ec_dev { | ||
| 162 | struct device class_dev; | ||
| 163 | struct cdev cdev; | ||
| 164 | struct cros_ec_device *ec_dev; | ||
| 165 | struct device *dev; | ||
| 166 | u16 cmd_offset; | ||
| 167 | }; | ||
| 168 | |||
| 123 | /** | 169 | /** |
| 124 | * cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device | 170 | * cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device |
| 125 | * | 171 | * |
| @@ -198,4 +244,16 @@ int cros_ec_remove(struct cros_ec_device *ec_dev); | |||
| 198 | */ | 244 | */ |
| 199 | int cros_ec_register(struct cros_ec_device *ec_dev); | 245 | int cros_ec_register(struct cros_ec_device *ec_dev); |
| 200 | 246 | ||
| 247 | /** | ||
| 248 | * cros_ec_register - Query the protocol version supported by the ChromeOS EC | ||
| 249 | * | ||
| 250 | * @ec_dev: Device to register | ||
| 251 | * @return 0 if ok, -ve on error | ||
| 252 | */ | ||
| 253 | int cros_ec_query_all(struct cros_ec_device *ec_dev); | ||
| 254 | |||
| 255 | /* sysfs stuff */ | ||
| 256 | extern struct attribute_group cros_ec_attr_group; | ||
| 257 | extern struct attribute_group cros_ec_lightbar_attr_group; | ||
| 258 | |||
| 201 | #endif /* __LINUX_MFD_CROS_EC_H */ | 259 | #endif /* __LINUX_MFD_CROS_EC_H */ |
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index a49cd41feea7..13b630c10d4c 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h | |||
| @@ -515,7 +515,7 @@ struct ec_host_response { | |||
| 515 | /* | 515 | /* |
| 516 | * Notes on commands: | 516 | * Notes on commands: |
| 517 | * | 517 | * |
| 518 | * Each command is an 8-byte command value. Commands which take params or | 518 | * Each command is an 16-bit command value. Commands which take params or |
| 519 | * return response data specify structs for that data. If no struct is | 519 | * return response data specify structs for that data. If no struct is |
| 520 | * specified, the command does not input or output data, respectively. | 520 | * specified, the command does not input or output data, respectively. |
| 521 | * Parameter/response length is implicit in the structs. Some underlying | 521 | * Parameter/response length is implicit in the structs. Some underlying |
| @@ -966,7 +966,7 @@ struct rgb_s { | |||
| 966 | /* List of tweakable parameters. NOTE: It's __packed so it can be sent in a | 966 | /* List of tweakable parameters. NOTE: It's __packed so it can be sent in a |
| 967 | * host command, but the alignment is the same regardless. Keep it that way. | 967 | * host command, but the alignment is the same regardless. Keep it that way. |
| 968 | */ | 968 | */ |
| 969 | struct lightbar_params { | 969 | struct lightbar_params_v0 { |
| 970 | /* Timing */ | 970 | /* Timing */ |
| 971 | int32_t google_ramp_up; | 971 | int32_t google_ramp_up; |
| 972 | int32_t google_ramp_down; | 972 | int32_t google_ramp_down; |
| @@ -1000,32 +1000,81 @@ struct lightbar_params { | |||
| 1000 | struct rgb_s color[8]; /* 0-3 are Google colors */ | 1000 | struct rgb_s color[8]; /* 0-3 are Google colors */ |
| 1001 | } __packed; | 1001 | } __packed; |
| 1002 | 1002 | ||
| 1003 | struct lightbar_params_v1 { | ||
| 1004 | /* Timing */ | ||
| 1005 | int32_t google_ramp_up; | ||
| 1006 | int32_t google_ramp_down; | ||
| 1007 | int32_t s3s0_ramp_up; | ||
| 1008 | int32_t s0_tick_delay[2]; /* AC=0/1 */ | ||
| 1009 | int32_t s0a_tick_delay[2]; /* AC=0/1 */ | ||
| 1010 | int32_t s0s3_ramp_down; | ||
| 1011 | int32_t s3_sleep_for; | ||
| 1012 | int32_t s3_ramp_up; | ||
| 1013 | int32_t s3_ramp_down; | ||
| 1014 | int32_t tap_tick_delay; | ||
| 1015 | int32_t tap_display_time; | ||
| 1016 | |||
| 1017 | /* Tap-for-battery params */ | ||
| 1018 | uint8_t tap_pct_red; | ||
| 1019 | uint8_t tap_pct_green; | ||
| 1020 | uint8_t tap_seg_min_on; | ||
| 1021 | uint8_t tap_seg_max_on; | ||
| 1022 | uint8_t tap_seg_osc; | ||
| 1023 | uint8_t tap_idx[3]; | ||
| 1024 | |||
| 1025 | /* Oscillation */ | ||
| 1026 | uint8_t osc_min[2]; /* AC=0/1 */ | ||
| 1027 | uint8_t osc_max[2]; /* AC=0/1 */ | ||
| 1028 | uint8_t w_ofs[2]; /* AC=0/1 */ | ||
| 1029 | |||
| 1030 | /* Brightness limits based on the backlight and AC. */ | ||
| 1031 | uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */ | ||
| 1032 | uint8_t bright_bl_on_min[2]; /* AC=0/1 */ | ||
| 1033 | uint8_t bright_bl_on_max[2]; /* AC=0/1 */ | ||
| 1034 | |||
| 1035 | /* Battery level thresholds */ | ||
| 1036 | uint8_t battery_threshold[LB_BATTERY_LEVELS - 1]; | ||
| 1037 | |||
| 1038 | /* Map [AC][battery_level] to color index */ | ||
| 1039 | uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */ | ||
| 1040 | uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */ | ||
| 1041 | |||
| 1042 | /* Color palette */ | ||
| 1043 | struct rgb_s color[8]; /* 0-3 are Google colors */ | ||
| 1044 | } __packed; | ||
| 1045 | |||
| 1003 | struct ec_params_lightbar { | 1046 | struct ec_params_lightbar { |
| 1004 | uint8_t cmd; /* Command (see enum lightbar_command) */ | 1047 | uint8_t cmd; /* Command (see enum lightbar_command) */ |
| 1005 | union { | 1048 | union { |
| 1006 | struct { | 1049 | struct { |
| 1007 | /* no args */ | 1050 | /* no args */ |
| 1008 | } dump, off, on, init, get_seq, get_params, version; | 1051 | } dump, off, on, init, get_seq, get_params_v0, get_params_v1, |
| 1052 | version, get_brightness, get_demo; | ||
| 1009 | 1053 | ||
| 1010 | struct num { | 1054 | struct { |
| 1011 | uint8_t num; | 1055 | uint8_t num; |
| 1012 | } brightness, seq, demo; | 1056 | } set_brightness, seq, demo; |
| 1013 | 1057 | ||
| 1014 | struct reg { | 1058 | struct { |
| 1015 | uint8_t ctrl, reg, value; | 1059 | uint8_t ctrl, reg, value; |
| 1016 | } reg; | 1060 | } reg; |
| 1017 | 1061 | ||
| 1018 | struct rgb { | 1062 | struct { |
| 1019 | uint8_t led, red, green, blue; | 1063 | uint8_t led, red, green, blue; |
| 1020 | } rgb; | 1064 | } set_rgb; |
| 1065 | |||
| 1066 | struct { | ||
| 1067 | uint8_t led; | ||
| 1068 | } get_rgb; | ||
| 1021 | 1069 | ||
| 1022 | struct lightbar_params set_params; | 1070 | struct lightbar_params_v0 set_params_v0; |
| 1071 | struct lightbar_params_v1 set_params_v1; | ||
| 1023 | }; | 1072 | }; |
| 1024 | } __packed; | 1073 | } __packed; |
| 1025 | 1074 | ||
| 1026 | struct ec_response_lightbar { | 1075 | struct ec_response_lightbar { |
| 1027 | union { | 1076 | union { |
| 1028 | struct dump { | 1077 | struct { |
| 1029 | struct { | 1078 | struct { |
| 1030 | uint8_t reg; | 1079 | uint8_t reg; |
| 1031 | uint8_t ic0; | 1080 | uint8_t ic0; |
| @@ -1033,20 +1082,26 @@ struct ec_response_lightbar { | |||
| 1033 | } vals[23]; | 1082 | } vals[23]; |
| 1034 | } dump; | 1083 | } dump; |
| 1035 | 1084 | ||
| 1036 | struct get_seq { | 1085 | struct { |
| 1037 | uint8_t num; | 1086 | uint8_t num; |
| 1038 | } get_seq; | 1087 | } get_seq, get_brightness, get_demo; |
| 1039 | 1088 | ||
| 1040 | struct lightbar_params get_params; | 1089 | struct lightbar_params_v0 get_params_v0; |
| 1090 | struct lightbar_params_v1 get_params_v1; | ||
| 1041 | 1091 | ||
| 1042 | struct version { | 1092 | struct { |
| 1043 | uint32_t num; | 1093 | uint32_t num; |
| 1044 | uint32_t flags; | 1094 | uint32_t flags; |
| 1045 | } version; | 1095 | } version; |
| 1046 | 1096 | ||
| 1047 | struct { | 1097 | struct { |
| 1098 | uint8_t red, green, blue; | ||
| 1099 | } get_rgb; | ||
| 1100 | |||
| 1101 | struct { | ||
| 1048 | /* no return params */ | 1102 | /* no return params */ |
| 1049 | } off, on, init, brightness, seq, reg, rgb, demo, set_params; | 1103 | } off, on, init, set_brightness, seq, reg, set_rgb, |
| 1104 | demo, set_params_v0, set_params_v1; | ||
| 1050 | }; | 1105 | }; |
| 1051 | } __packed; | 1106 | } __packed; |
| 1052 | 1107 | ||
| @@ -1056,15 +1111,20 @@ enum lightbar_command { | |||
| 1056 | LIGHTBAR_CMD_OFF = 1, | 1111 | LIGHTBAR_CMD_OFF = 1, |
| 1057 | LIGHTBAR_CMD_ON = 2, | 1112 | LIGHTBAR_CMD_ON = 2, |
| 1058 | LIGHTBAR_CMD_INIT = 3, | 1113 | LIGHTBAR_CMD_INIT = 3, |
| 1059 | LIGHTBAR_CMD_BRIGHTNESS = 4, | 1114 | LIGHTBAR_CMD_SET_BRIGHTNESS = 4, |
| 1060 | LIGHTBAR_CMD_SEQ = 5, | 1115 | LIGHTBAR_CMD_SEQ = 5, |
| 1061 | LIGHTBAR_CMD_REG = 6, | 1116 | LIGHTBAR_CMD_REG = 6, |
| 1062 | LIGHTBAR_CMD_RGB = 7, | 1117 | LIGHTBAR_CMD_SET_RGB = 7, |
| 1063 | LIGHTBAR_CMD_GET_SEQ = 8, | 1118 | LIGHTBAR_CMD_GET_SEQ = 8, |
| 1064 | LIGHTBAR_CMD_DEMO = 9, | 1119 | LIGHTBAR_CMD_DEMO = 9, |
| 1065 | LIGHTBAR_CMD_GET_PARAMS = 10, | 1120 | LIGHTBAR_CMD_GET_PARAMS_V0 = 10, |
| 1066 | LIGHTBAR_CMD_SET_PARAMS = 11, | 1121 | LIGHTBAR_CMD_SET_PARAMS_V0 = 11, |
| 1067 | LIGHTBAR_CMD_VERSION = 12, | 1122 | LIGHTBAR_CMD_VERSION = 12, |
| 1123 | LIGHTBAR_CMD_GET_BRIGHTNESS = 13, | ||
| 1124 | LIGHTBAR_CMD_GET_RGB = 14, | ||
| 1125 | LIGHTBAR_CMD_GET_DEMO = 15, | ||
| 1126 | LIGHTBAR_CMD_GET_PARAMS_V1 = 16, | ||
| 1127 | LIGHTBAR_CMD_SET_PARAMS_V1 = 17, | ||
| 1068 | LIGHTBAR_NUM_CMDS | 1128 | LIGHTBAR_NUM_CMDS |
| 1069 | }; | 1129 | }; |
| 1070 | 1130 | ||
| @@ -1421,8 +1481,40 @@ struct ec_response_rtc { | |||
| 1421 | /*****************************************************************************/ | 1481 | /*****************************************************************************/ |
| 1422 | /* Port80 log access */ | 1482 | /* Port80 log access */ |
| 1423 | 1483 | ||
| 1484 | /* Maximum entries that can be read/written in a single command */ | ||
| 1485 | #define EC_PORT80_SIZE_MAX 32 | ||
| 1486 | |||
| 1424 | /* Get last port80 code from previous boot */ | 1487 | /* Get last port80 code from previous boot */ |
| 1425 | #define EC_CMD_PORT80_LAST_BOOT 0x48 | 1488 | #define EC_CMD_PORT80_LAST_BOOT 0x48 |
| 1489 | #define EC_CMD_PORT80_READ 0x48 | ||
| 1490 | |||
| 1491 | enum ec_port80_subcmd { | ||
| 1492 | EC_PORT80_GET_INFO = 0, | ||
| 1493 | EC_PORT80_READ_BUFFER, | ||
| 1494 | }; | ||
| 1495 | |||
| 1496 | struct ec_params_port80_read { | ||
| 1497 | uint16_t subcmd; | ||
| 1498 | union { | ||
| 1499 | struct { | ||
| 1500 | uint32_t offset; | ||
| 1501 | uint32_t num_entries; | ||
| 1502 | } read_buffer; | ||
| 1503 | }; | ||
| 1504 | } __packed; | ||
| 1505 | |||
| 1506 | struct ec_response_port80_read { | ||
| 1507 | union { | ||
| 1508 | struct { | ||
| 1509 | uint32_t writes; | ||
| 1510 | uint32_t history_size; | ||
| 1511 | uint32_t last_boot; | ||
| 1512 | } get_info; | ||
| 1513 | struct { | ||
| 1514 | uint16_t codes[EC_PORT80_SIZE_MAX]; | ||
| 1515 | } data; | ||
| 1516 | }; | ||
| 1517 | } __packed; | ||
| 1426 | 1518 | ||
| 1427 | struct ec_response_port80_last_boot { | 1519 | struct ec_response_port80_last_boot { |
| 1428 | uint16_t code; | 1520 | uint16_t code; |
| @@ -1782,6 +1874,7 @@ struct ec_params_gpio_set { | |||
| 1782 | /* Get GPIO value */ | 1874 | /* Get GPIO value */ |
| 1783 | #define EC_CMD_GPIO_GET 0x93 | 1875 | #define EC_CMD_GPIO_GET 0x93 |
| 1784 | 1876 | ||
| 1877 | /* Version 0 of input params and response */ | ||
| 1785 | struct ec_params_gpio_get { | 1878 | struct ec_params_gpio_get { |
| 1786 | char name[32]; | 1879 | char name[32]; |
| 1787 | } __packed; | 1880 | } __packed; |
| @@ -1789,6 +1882,38 @@ struct ec_response_gpio_get { | |||
| 1789 | uint8_t val; | 1882 | uint8_t val; |
| 1790 | } __packed; | 1883 | } __packed; |
| 1791 | 1884 | ||
| 1885 | /* Version 1 of input params and response */ | ||
| 1886 | struct ec_params_gpio_get_v1 { | ||
| 1887 | uint8_t subcmd; | ||
| 1888 | union { | ||
| 1889 | struct { | ||
| 1890 | char name[32]; | ||
| 1891 | } get_value_by_name; | ||
| 1892 | struct { | ||
| 1893 | uint8_t index; | ||
| 1894 | } get_info; | ||
| 1895 | }; | ||
| 1896 | } __packed; | ||
| 1897 | |||
| 1898 | struct ec_response_gpio_get_v1 { | ||
| 1899 | union { | ||
| 1900 | struct { | ||
| 1901 | uint8_t val; | ||
| 1902 | } get_value_by_name, get_count; | ||
| 1903 | struct { | ||
| 1904 | uint8_t val; | ||
| 1905 | char name[32]; | ||
| 1906 | uint32_t flags; | ||
| 1907 | } get_info; | ||
| 1908 | }; | ||
| 1909 | } __packed; | ||
| 1910 | |||
| 1911 | enum gpio_get_subcmd { | ||
| 1912 | EC_GPIO_GET_BY_NAME = 0, | ||
| 1913 | EC_GPIO_GET_COUNT = 1, | ||
| 1914 | EC_GPIO_GET_INFO = 2, | ||
| 1915 | }; | ||
| 1916 | |||
| 1792 | /*****************************************************************************/ | 1917 | /*****************************************************************************/ |
| 1793 | /* I2C commands. Only available when flash write protect is unlocked. */ | 1918 | /* I2C commands. Only available when flash write protect is unlocked. */ |
| 1794 | 1919 | ||
| @@ -1857,13 +1982,21 @@ struct ec_params_charge_control { | |||
| 1857 | /*****************************************************************************/ | 1982 | /*****************************************************************************/ |
| 1858 | 1983 | ||
| 1859 | /* | 1984 | /* |
| 1860 | * Cut off battery power output if the battery supports. | 1985 | * Cut off battery power immediately or after the host has shut down. |
| 1861 | * | 1986 | * |
| 1862 | * For unsupported battery, just don't implement this command and lets EC | 1987 | * return EC_RES_INVALID_COMMAND if unsupported by a board/battery. |
| 1863 | * return EC_RES_INVALID_COMMAND. | 1988 | * EC_RES_SUCCESS if the command was successful. |
| 1989 | * EC_RES_ERROR if the cut off command failed. | ||
| 1864 | */ | 1990 | */ |
| 1991 | |||
| 1865 | #define EC_CMD_BATTERY_CUT_OFF 0x99 | 1992 | #define EC_CMD_BATTERY_CUT_OFF 0x99 |
| 1866 | 1993 | ||
| 1994 | #define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN (1 << 0) | ||
| 1995 | |||
| 1996 | struct ec_params_battery_cutoff { | ||
| 1997 | uint8_t flags; | ||
| 1998 | } __packed; | ||
| 1999 | |||
| 1867 | /*****************************************************************************/ | 2000 | /*****************************************************************************/ |
| 1868 | /* USB port mux control. */ | 2001 | /* USB port mux control. */ |
| 1869 | 2002 | ||
| @@ -2142,6 +2275,32 @@ struct ec_params_sb_wr_block { | |||
| 2142 | } __packed; | 2275 | } __packed; |
| 2143 | 2276 | ||
| 2144 | /*****************************************************************************/ | 2277 | /*****************************************************************************/ |
| 2278 | /* Battery vendor parameters | ||
| 2279 | * | ||
| 2280 | * Get or set vendor-specific parameters in the battery. Implementations may | ||
| 2281 | * differ between boards or batteries. On a set operation, the response | ||
| 2282 | * contains the actual value set, which may be rounded or clipped from the | ||
| 2283 | * requested value. | ||
| 2284 | */ | ||
| 2285 | |||
| 2286 | #define EC_CMD_BATTERY_VENDOR_PARAM 0xb4 | ||
| 2287 | |||
| 2288 | enum ec_battery_vendor_param_mode { | ||
| 2289 | BATTERY_VENDOR_PARAM_MODE_GET = 0, | ||
| 2290 | BATTERY_VENDOR_PARAM_MODE_SET, | ||
| 2291 | }; | ||
| 2292 | |||
| 2293 | struct ec_params_battery_vendor_param { | ||
| 2294 | uint32_t param; | ||
| 2295 | uint32_t value; | ||
| 2296 | uint8_t mode; | ||
| 2297 | } __packed; | ||
| 2298 | |||
| 2299 | struct ec_response_battery_vendor_param { | ||
| 2300 | uint32_t value; | ||
| 2301 | } __packed; | ||
| 2302 | |||
| 2303 | /*****************************************************************************/ | ||
| 2145 | /* System commands */ | 2304 | /* System commands */ |
| 2146 | 2305 | ||
| 2147 | /* | 2306 | /* |
| @@ -2338,6 +2497,80 @@ struct ec_params_reboot_ec { | |||
| 2338 | 2497 | ||
| 2339 | /*****************************************************************************/ | 2498 | /*****************************************************************************/ |
| 2340 | /* | 2499 | /* |
| 2500 | * PD commands | ||
| 2501 | * | ||
| 2502 | * These commands are for PD MCU communication. | ||
| 2503 | */ | ||
| 2504 | |||
| 2505 | /* EC to PD MCU exchange status command */ | ||
| 2506 | #define EC_CMD_PD_EXCHANGE_STATUS 0x100 | ||
| 2507 | |||
| 2508 | /* Status of EC being sent to PD */ | ||
| 2509 | struct ec_params_pd_status { | ||
| 2510 | int8_t batt_soc; /* battery state of charge */ | ||
| 2511 | } __packed; | ||
| 2512 | |||
| 2513 | /* Status of PD being sent back to EC */ | ||
| 2514 | struct ec_response_pd_status { | ||
| 2515 | int8_t status; /* PD MCU status */ | ||
| 2516 | uint32_t curr_lim_ma; /* input current limit */ | ||
| 2517 | } __packed; | ||
| 2518 | |||
| 2519 | /* Set USB type-C port role and muxes */ | ||
| 2520 | #define EC_CMD_USB_PD_CONTROL 0x101 | ||
| 2521 | |||
| 2522 | enum usb_pd_control_role { | ||
| 2523 | USB_PD_CTRL_ROLE_NO_CHANGE = 0, | ||
| 2524 | USB_PD_CTRL_ROLE_TOGGLE_ON = 1, /* == AUTO */ | ||
| 2525 | USB_PD_CTRL_ROLE_TOGGLE_OFF = 2, | ||
| 2526 | USB_PD_CTRL_ROLE_FORCE_SINK = 3, | ||
| 2527 | USB_PD_CTRL_ROLE_FORCE_SOURCE = 4, | ||
| 2528 | }; | ||
| 2529 | |||
| 2530 | enum usb_pd_control_mux { | ||
| 2531 | USB_PD_CTRL_MUX_NO_CHANGE = 0, | ||
| 2532 | USB_PD_CTRL_MUX_NONE = 1, | ||
| 2533 | USB_PD_CTRL_MUX_USB = 2, | ||
| 2534 | USB_PD_CTRL_MUX_DP = 3, | ||
| 2535 | USB_PD_CTRL_MUX_DOCK = 4, | ||
| 2536 | USB_PD_CTRL_MUX_AUTO = 5, | ||
| 2537 | }; | ||
| 2538 | |||
| 2539 | struct ec_params_usb_pd_control { | ||
| 2540 | uint8_t port; | ||
| 2541 | uint8_t role; | ||
| 2542 | uint8_t mux; | ||
| 2543 | } __packed; | ||
| 2544 | |||
| 2545 | /*****************************************************************************/ | ||
| 2546 | /* | ||
| 2547 | * Passthru commands | ||
| 2548 | * | ||
| 2549 | * Some platforms have sub-processors chained to each other. For example. | ||
| 2550 | * | ||
| 2551 | * AP <--> EC <--> PD MCU | ||
| 2552 | * | ||
| 2553 | * The top 2 bits of the command number are used to indicate which device the | ||
| 2554 | * command is intended for. Device 0 is always the device receiving the | ||
| 2555 | * command; other device mapping is board-specific. | ||
| 2556 | * | ||
| 2557 | * When a device receives a command to be passed to a sub-processor, it passes | ||
| 2558 | * it on with the device number set back to 0. This allows the sub-processor | ||
| 2559 | * to remain blissfully unaware of whether the command originated on the next | ||
| 2560 | * device up the chain, or was passed through from the AP. | ||
| 2561 | * | ||
| 2562 | * In the above example, if the AP wants to send command 0x0002 to the PD MCU, | ||
| 2563 | * AP sends command 0x4002 to the EC | ||
| 2564 | * EC sends command 0x0002 to the PD MCU | ||
| 2565 | * EC forwards PD MCU response back to the AP | ||
| 2566 | */ | ||
| 2567 | |||
| 2568 | /* Offset and max command number for sub-device n */ | ||
| 2569 | #define EC_CMD_PASSTHRU_OFFSET(n) (0x4000 * (n)) | ||
| 2570 | #define EC_CMD_PASSTHRU_MAX(n) (EC_CMD_PASSTHRU_OFFSET(n) + 0x3fff) | ||
| 2571 | |||
| 2572 | /*****************************************************************************/ | ||
| 2573 | /* | ||
| 2341 | * Deprecated constants. These constants have been renamed for clarity. The | 2574 | * Deprecated constants. These constants have been renamed for clarity. The |
| 2342 | * meaning and size has not changed. Programs that use the old names should | 2575 | * meaning and size has not changed. Programs that use the old names should |
| 2343 | * switch to the new names soon, as the old names may not be carried forward | 2576 | * switch to the new names soon, as the old names may not be carried forward |
diff --git a/include/linux/mfd/da9055/core.h b/include/linux/mfd/da9055/core.h index 956afa445998..5dc743fd63a6 100644 --- a/include/linux/mfd/da9055/core.h +++ b/include/linux/mfd/da9055/core.h | |||
| @@ -89,6 +89,6 @@ static inline int da9055_reg_update(struct da9055 *da9055, unsigned char reg, | |||
| 89 | int da9055_device_init(struct da9055 *da9055); | 89 | int da9055_device_init(struct da9055 *da9055); |
| 90 | void da9055_device_exit(struct da9055 *da9055); | 90 | void da9055_device_exit(struct da9055 *da9055); |
| 91 | 91 | ||
| 92 | extern struct regmap_config da9055_regmap_config; | 92 | extern const struct regmap_config da9055_regmap_config; |
| 93 | 93 | ||
| 94 | #endif /* __DA9055_CORE_H */ | 94 | #endif /* __DA9055_CORE_H */ |
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h index 95c8742215a7..612383bd80ae 100644 --- a/include/linux/mfd/da9063/pdata.h +++ b/include/linux/mfd/da9063/pdata.h | |||
| @@ -103,6 +103,7 @@ struct da9063; | |||
| 103 | struct da9063_pdata { | 103 | struct da9063_pdata { |
| 104 | int (*init)(struct da9063 *da9063); | 104 | int (*init)(struct da9063 *da9063); |
| 105 | int irq_base; | 105 | int irq_base; |
| 106 | bool key_power; | ||
| 106 | unsigned flags; | 107 | unsigned flags; |
| 107 | struct da9063_regulators_pdata *regulators_pdata; | 108 | struct da9063_regulators_pdata *regulators_pdata; |
| 108 | struct led_platform_data *leds_pdata; | 109 | struct led_platform_data *leds_pdata; |
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h index bb995ab9a575..d4b72d519115 100644 --- a/include/linux/mfd/max77686.h +++ b/include/linux/mfd/max77686.h | |||
| @@ -125,9 +125,4 @@ enum max77686_opmode { | |||
| 125 | MAX77686_OPMODE_STANDBY, | 125 | MAX77686_OPMODE_STANDBY, |
| 126 | }; | 126 | }; |
| 127 | 127 | ||
| 128 | struct max77686_opmode_data { | ||
| 129 | int id; | ||
| 130 | int mode; | ||
| 131 | }; | ||
| 132 | |||
| 133 | #endif /* __LINUX_MFD_MAX77686_H */ | 128 | #endif /* __LINUX_MFD_MAX77686_H */ |
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h index c9d869027300..cb83883918a7 100644 --- a/include/linux/mfd/stmpe.h +++ b/include/linux/mfd/stmpe.h | |||
| @@ -118,47 +118,6 @@ extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks); | |||
| 118 | #define STMPE_GPIO_NOREQ_811_TOUCH (0xf0) | 118 | #define STMPE_GPIO_NOREQ_811_TOUCH (0xf0) |
| 119 | 119 | ||
| 120 | /** | 120 | /** |
| 121 | * struct stmpe_ts_platform_data - stmpe811 touch screen controller platform | ||
| 122 | * data | ||
| 123 | * @sample_time: ADC converstion time in number of clock. | ||
| 124 | * (0 -> 36 clocks, 1 -> 44 clocks, 2 -> 56 clocks, 3 -> 64 clocks, | ||
| 125 | * 4 -> 80 clocks, 5 -> 96 clocks, 6 -> 144 clocks), | ||
| 126 | * recommended is 4. | ||
| 127 | * @mod_12b: ADC Bit mode (0 -> 10bit ADC, 1 -> 12bit ADC) | ||
| 128 | * @ref_sel: ADC reference source | ||
| 129 | * (0 -> internal reference, 1 -> external reference) | ||
| 130 | * @adc_freq: ADC Clock speed | ||
| 131 | * (0 -> 1.625 MHz, 1 -> 3.25 MHz, 2 || 3 -> 6.5 MHz) | ||
| 132 | * @ave_ctrl: Sample average control | ||
| 133 | * (0 -> 1 sample, 1 -> 2 samples, 2 -> 4 samples, 3 -> 8 samples) | ||
| 134 | * @touch_det_delay: Touch detect interrupt delay | ||
| 135 | * (0 -> 10 us, 1 -> 50 us, 2 -> 100 us, 3 -> 500 us, | ||
| 136 | * 4-> 1 ms, 5 -> 5 ms, 6 -> 10 ms, 7 -> 50 ms) | ||
| 137 | * recommended is 3 | ||
| 138 | * @settling: Panel driver settling time | ||
| 139 | * (0 -> 10 us, 1 -> 100 us, 2 -> 500 us, 3 -> 1 ms, | ||
| 140 | * 4 -> 5 ms, 5 -> 10 ms, 6 for 50 ms, 7 -> 100 ms) | ||
| 141 | * recommended is 2 | ||
| 142 | * @fraction_z: Length of the fractional part in z | ||
| 143 | * (fraction_z ([0..7]) = Count of the fractional part) | ||
| 144 | * recommended is 7 | ||
| 145 | * @i_drive: current limit value of the touchscreen drivers | ||
| 146 | * (0 -> 20 mA typical 35 mA max, 1 -> 50 mA typical 80 mA max) | ||
| 147 | * | ||
| 148 | * */ | ||
| 149 | struct stmpe_ts_platform_data { | ||
| 150 | u8 sample_time; | ||
| 151 | u8 mod_12b; | ||
| 152 | u8 ref_sel; | ||
| 153 | u8 adc_freq; | ||
| 154 | u8 ave_ctrl; | ||
| 155 | u8 touch_det_delay; | ||
| 156 | u8 settling; | ||
| 157 | u8 fraction_z; | ||
| 158 | u8 i_drive; | ||
| 159 | }; | ||
| 160 | |||
| 161 | /** | ||
| 162 | * struct stmpe_platform_data - STMPE platform data | 121 | * struct stmpe_platform_data - STMPE platform data |
| 163 | * @id: device id to distinguish between multiple STMPEs on the same board | 122 | * @id: device id to distinguish between multiple STMPEs on the same board |
| 164 | * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*) | 123 | * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*) |
| @@ -168,7 +127,6 @@ struct stmpe_ts_platform_data { | |||
| 168 | * @irq_over_gpio: true if gpio is used to get irq | 127 | * @irq_over_gpio: true if gpio is used to get irq |
| 169 | * @irq_gpio: gpio number over which irq will be requested (significant only if | 128 | * @irq_gpio: gpio number over which irq will be requested (significant only if |
| 170 | * irq_over_gpio is true) | 129 | * irq_over_gpio is true) |
| 171 | * @ts: touchscreen-specific platform data | ||
| 172 | */ | 130 | */ |
| 173 | struct stmpe_platform_data { | 131 | struct stmpe_platform_data { |
| 174 | int id; | 132 | int id; |
| @@ -178,8 +136,6 @@ struct stmpe_platform_data { | |||
| 178 | bool irq_over_gpio; | 136 | bool irq_over_gpio; |
| 179 | int irq_gpio; | 137 | int irq_gpio; |
| 180 | int autosleep_timeout; | 138 | int autosleep_timeout; |
| 181 | |||
| 182 | struct stmpe_ts_platform_data *ts; | ||
| 183 | }; | 139 | }; |
| 184 | 140 | ||
| 185 | #endif | 141 | #endif |
diff --git a/include/linux/mfd/syscon/atmel-mc.h b/include/linux/mfd/syscon/atmel-mc.h new file mode 100644 index 000000000000..afd9b8f1e363 --- /dev/null +++ b/include/linux/mfd/syscon/atmel-mc.h | |||
| @@ -0,0 +1,144 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2005 Ivan Kokshaysky | ||
| 3 | * Copyright (C) SAN People | ||
| 4 | * | ||
| 5 | * Memory Controllers (MC, EBI, SMC, SDRAMC, BFC) - System peripherals | ||
| 6 | * registers. | ||
| 7 | * Based on AT91RM9200 datasheet revision E. | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License as published by | ||
| 11 | * the Free Software Foundation; either version 2 of the License, or | ||
| 12 | * (at your option) any later version. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #ifndef _LINUX_MFD_SYSCON_ATMEL_MC_H_ | ||
| 16 | #define _LINUX_MFD_SYSCON_ATMEL_MC_H_ | ||
| 17 | |||
| 18 | /* Memory Controller */ | ||
| 19 | #define AT91_MC_RCR 0x00 | ||
| 20 | #define AT91_MC_RCB BIT(0) | ||
| 21 | |||
| 22 | #define AT91_MC_ASR 0x04 | ||
| 23 | #define AT91_MC_UNADD BIT(0) | ||
| 24 | #define AT91_MC_MISADD BIT(1) | ||
| 25 | #define AT91_MC_ABTSZ GENMASK(9, 8) | ||
| 26 | #define AT91_MC_ABTSZ_BYTE (0 << 8) | ||
| 27 | #define AT91_MC_ABTSZ_HALFWORD (1 << 8) | ||
| 28 | #define AT91_MC_ABTSZ_WORD (2 << 8) | ||
| 29 | #define AT91_MC_ABTTYP GENMASK(11, 10) | ||
| 30 | #define AT91_MC_ABTTYP_DATAREAD (0 << 10) | ||
| 31 | #define AT91_MC_ABTTYP_DATAWRITE (1 << 10) | ||
| 32 | #define AT91_MC_ABTTYP_FETCH (2 << 10) | ||
| 33 | #define AT91_MC_MST(n) BIT(16 + (n)) | ||
| 34 | #define AT91_MC_SVMST(n) BIT(24 + (n)) | ||
| 35 | |||
| 36 | #define AT91_MC_AASR 0x08 | ||
| 37 | |||
| 38 | #define AT91_MC_MPR 0x0c | ||
| 39 | #define AT91_MPR_MSTP(n) GENMASK(2 + ((x) * 4), ((x) * 4)) | ||
| 40 | |||
| 41 | /* External Bus Interface (EBI) registers */ | ||
| 42 | #define AT91_MC_EBI_CSA 0x60 | ||
| 43 | #define AT91_MC_EBI_CS(n) BIT(x) | ||
| 44 | #define AT91_MC_EBI_NUM_CS 8 | ||
| 45 | |||
| 46 | #define AT91_MC_EBI_CFGR 0x64 | ||
| 47 | #define AT91_MC_EBI_DBPUC BIT(0) | ||
| 48 | |||
| 49 | /* Static Memory Controller (SMC) registers */ | ||
| 50 | #define AT91_MC_SMC_CSR(n) (0x70 + ((n) * 4)) | ||
| 51 | #define AT91_MC_SMC_NWS GENMASK(6, 0) | ||
| 52 | #define AT91_MC_SMC_NWS_(x) ((x) << 0) | ||
| 53 | #define AT91_MC_SMC_WSEN BIT(7) | ||
| 54 | #define AT91_MC_SMC_TDF GENMASK(11, 8) | ||
| 55 | #define AT91_MC_SMC_TDF_(x) ((x) << 8) | ||
| 56 | #define AT91_MC_SMC_TDF_MAX 0xf | ||
| 57 | #define AT91_MC_SMC_BAT BIT(12) | ||
| 58 | #define AT91_MC_SMC_DBW GENMASK(14, 13) | ||
| 59 | #define AT91_MC_SMC_DBW_16 (1 << 13) | ||
| 60 | #define AT91_MC_SMC_DBW_8 (2 << 13) | ||
| 61 | #define AT91_MC_SMC_DPR BIT(15) | ||
| 62 | #define AT91_MC_SMC_ACSS GENMASK(17, 16) | ||
| 63 | #define AT91_MC_SMC_ACSS_(x) ((x) << 16) | ||
| 64 | #define AT91_MC_SMC_ACSS_MAX 3 | ||
| 65 | #define AT91_MC_SMC_RWSETUP GENMASK(26, 24) | ||
| 66 | #define AT91_MC_SMC_RWSETUP_(x) ((x) << 24) | ||
| 67 | #define AT91_MC_SMC_RWHOLD GENMASK(30, 28) | ||
| 68 | #define AT91_MC_SMC_RWHOLD_(x) ((x) << 28) | ||
| 69 | #define AT91_MC_SMC_RWHOLDSETUP_MAX 7 | ||
| 70 | |||
| 71 | /* SDRAM Controller registers */ | ||
| 72 | #define AT91_MC_SDRAMC_MR 0x90 | ||
| 73 | #define AT91_MC_SDRAMC_MODE GENMASK(3, 0) | ||
| 74 | #define AT91_MC_SDRAMC_MODE_NORMAL (0 << 0) | ||
| 75 | #define AT91_MC_SDRAMC_MODE_NOP (1 << 0) | ||
| 76 | #define AT91_MC_SDRAMC_MODE_PRECHARGE (2 << 0) | ||
| 77 | #define AT91_MC_SDRAMC_MODE_LMR (3 << 0) | ||
| 78 | #define AT91_MC_SDRAMC_MODE_REFRESH (4 << 0) | ||
| 79 | #define AT91_MC_SDRAMC_DBW_16 BIT(4) | ||
| 80 | |||
| 81 | #define AT91_MC_SDRAMC_TR 0x94 | ||
| 82 | #define AT91_MC_SDRAMC_COUNT GENMASK(11, 0) | ||
| 83 | |||
| 84 | #define AT91_MC_SDRAMC_CR 0x98 | ||
| 85 | #define AT91_MC_SDRAMC_NC GENMASK(1, 0) | ||
| 86 | #define AT91_MC_SDRAMC_NC_8 (0 << 0) | ||
| 87 | #define AT91_MC_SDRAMC_NC_9 (1 << 0) | ||
| 88 | #define AT91_MC_SDRAMC_NC_10 (2 << 0) | ||
| 89 | #define AT91_MC_SDRAMC_NC_11 (3 << 0) | ||
| 90 | #define AT91_MC_SDRAMC_NR GENMASK(3, 2) | ||
| 91 | #define AT91_MC_SDRAMC_NR_11 (0 << 2) | ||
| 92 | #define AT91_MC_SDRAMC_NR_12 (1 << 2) | ||
| 93 | #define AT91_MC_SDRAMC_NR_13 (2 << 2) | ||
| 94 | #define AT91_MC_SDRAMC_NB BIT(4) | ||
| 95 | #define AT91_MC_SDRAMC_NB_2 (0 << 4) | ||
| 96 | #define AT91_MC_SDRAMC_NB_4 (1 << 4) | ||
| 97 | #define AT91_MC_SDRAMC_CAS GENMASK(6, 5) | ||
| 98 | #define AT91_MC_SDRAMC_CAS_2 (2 << 5) | ||
| 99 | #define AT91_MC_SDRAMC_TWR GENMASK(10, 7) | ||
| 100 | #define AT91_MC_SDRAMC_TRC GENMASK(14, 11) | ||
| 101 | #define AT91_MC_SDRAMC_TRP GENMASK(18, 15) | ||
| 102 | #define AT91_MC_SDRAMC_TRCD GENMASK(22, 19) | ||
| 103 | #define AT91_MC_SDRAMC_TRAS GENMASK(26, 23) | ||
| 104 | #define AT91_MC_SDRAMC_TXSR GENMASK(30, 27) | ||
| 105 | |||
| 106 | #define AT91_MC_SDRAMC_SRR 0x9c | ||
| 107 | #define AT91_MC_SDRAMC_SRCB BIT(0) | ||
| 108 | |||
| 109 | #define AT91_MC_SDRAMC_LPR 0xa0 | ||
| 110 | #define AT91_MC_SDRAMC_LPCB BIT(0) | ||
| 111 | |||
| 112 | #define AT91_MC_SDRAMC_IER 0xa4 | ||
| 113 | #define AT91_MC_SDRAMC_IDR 0xa8 | ||
| 114 | #define AT91_MC_SDRAMC_IMR 0xac | ||
| 115 | #define AT91_MC_SDRAMC_ISR 0xb0 | ||
| 116 | #define AT91_MC_SDRAMC_RES BIT(0) | ||
| 117 | |||
| 118 | /* Burst Flash Controller register */ | ||
| 119 | #define AT91_MC_BFC_MR 0xc0 | ||
| 120 | #define AT91_MC_BFC_BFCOM GENMASK(1, 0) | ||
| 121 | #define AT91_MC_BFC_BFCOM_DISABLED (0 << 0) | ||
| 122 | #define AT91_MC_BFC_BFCOM_ASYNC (1 << 0) | ||
| 123 | #define AT91_MC_BFC_BFCOM_BURST (2 << 0) | ||
| 124 | #define AT91_MC_BFC_BFCC GENMASK(3, 2) | ||
| 125 | #define AT91_MC_BFC_BFCC_MCK (1 << 2) | ||
| 126 | #define AT91_MC_BFC_BFCC_DIV2 (2 << 2) | ||
| 127 | #define AT91_MC_BFC_BFCC_DIV4 (3 << 2) | ||
| 128 | #define AT91_MC_BFC_AVL GENMASK(7, 4) | ||
| 129 | #define AT91_MC_BFC_PAGES GENMASK(10, 8) | ||
| 130 | #define AT91_MC_BFC_PAGES_NO_PAGE (0 << 8) | ||
| 131 | #define AT91_MC_BFC_PAGES_16 (1 << 8) | ||
| 132 | #define AT91_MC_BFC_PAGES_32 (2 << 8) | ||
| 133 | #define AT91_MC_BFC_PAGES_64 (3 << 8) | ||
| 134 | #define AT91_MC_BFC_PAGES_128 (4 << 8) | ||
| 135 | #define AT91_MC_BFC_PAGES_256 (5 << 8) | ||
| 136 | #define AT91_MC_BFC_PAGES_512 (6 << 8) | ||
| 137 | #define AT91_MC_BFC_PAGES_1024 (7 << 8) | ||
| 138 | #define AT91_MC_BFC_OEL GENMASK(13, 12) | ||
| 139 | #define AT91_MC_BFC_BAAEN BIT(16) | ||
| 140 | #define AT91_MC_BFC_BFOEH BIT(17) | ||
| 141 | #define AT91_MC_BFC_MUXEN BIT(18) | ||
| 142 | #define AT91_MC_BFC_RDYEN BIT(19) | ||
| 143 | |||
| 144 | #endif /* _LINUX_MFD_SYSCON_ATMEL_MC_H_ */ | ||
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index f62e7cf227c6..58391f2e0414 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h | |||
| @@ -35,6 +35,8 @@ | |||
| 35 | 35 | ||
| 36 | #include <linux/dma-mapping.h> | 36 | #include <linux/dma-mapping.h> |
| 37 | #include <linux/if_link.h> | 37 | #include <linux/if_link.h> |
| 38 | #include <linux/mlx4/device.h> | ||
| 39 | #include <linux/netdevice.h> | ||
| 38 | 40 | ||
| 39 | enum { | 41 | enum { |
| 40 | /* initialization and general commands */ | 42 | /* initialization and general commands */ |
| @@ -300,6 +302,10 @@ static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_para | |||
| 300 | struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev); | 302 | struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev); |
| 301 | void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox); | 303 | void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox); |
| 302 | 304 | ||
| 305 | int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index, | ||
| 306 | struct mlx4_counter *counter_stats, int reset); | ||
| 307 | int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx, | ||
| 308 | struct ifla_vf_stats *vf_stats); | ||
| 303 | u32 mlx4_comm_get_version(void); | 309 | u32 mlx4_comm_get_version(void); |
| 304 | int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); | 310 | int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); |
| 305 | int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); | 311 | int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 83e80ab94500..fd13c1ce3b4a 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -46,8 +46,9 @@ | |||
| 46 | 46 | ||
| 47 | #define MAX_MSIX_P_PORT 17 | 47 | #define MAX_MSIX_P_PORT 17 |
| 48 | #define MAX_MSIX 64 | 48 | #define MAX_MSIX 64 |
| 49 | #define MSIX_LEGACY_SZ 4 | ||
| 50 | #define MIN_MSIX_P_PORT 5 | 49 | #define MIN_MSIX_P_PORT 5 |
| 50 | #define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \ | ||
| 51 | (dev_cap).num_ports * MIN_MSIX_P_PORT) | ||
| 51 | 52 | ||
| 52 | #define MLX4_MAX_100M_UNITS_VAL 255 /* | 53 | #define MLX4_MAX_100M_UNITS_VAL 255 /* |
| 53 | * work around: can't set values | 54 | * work around: can't set values |
| @@ -528,7 +529,6 @@ struct mlx4_caps { | |||
| 528 | int num_eqs; | 529 | int num_eqs; |
| 529 | int reserved_eqs; | 530 | int reserved_eqs; |
| 530 | int num_comp_vectors; | 531 | int num_comp_vectors; |
| 531 | int comp_pool; | ||
| 532 | int num_mpts; | 532 | int num_mpts; |
| 533 | int max_fmr_maps; | 533 | int max_fmr_maps; |
| 534 | int num_mtts; | 534 | int num_mtts; |
| @@ -771,6 +771,14 @@ union mlx4_ext_av { | |||
| 771 | struct mlx4_eth_av eth; | 771 | struct mlx4_eth_av eth; |
| 772 | }; | 772 | }; |
| 773 | 773 | ||
| 774 | /* Counters should be saturate once they reach their maximum value */ | ||
| 775 | #define ASSIGN_32BIT_COUNTER(counter, value) do { \ | ||
| 776 | if ((value) > U32_MAX) \ | ||
| 777 | counter = cpu_to_be32(U32_MAX); \ | ||
| 778 | else \ | ||
| 779 | counter = cpu_to_be32(value); \ | ||
| 780 | } while (0) | ||
| 781 | |||
| 774 | struct mlx4_counter { | 782 | struct mlx4_counter { |
| 775 | u8 reserved1[3]; | 783 | u8 reserved1[3]; |
| 776 | u8 counter_mode; | 784 | u8 counter_mode; |
| @@ -829,6 +837,12 @@ struct mlx4_dev { | |||
| 829 | struct mlx4_vf_dev *dev_vfs; | 837 | struct mlx4_vf_dev *dev_vfs; |
| 830 | }; | 838 | }; |
| 831 | 839 | ||
| 840 | struct mlx4_clock_params { | ||
| 841 | u64 offset; | ||
| 842 | u8 bar; | ||
| 843 | u8 size; | ||
| 844 | }; | ||
| 845 | |||
| 832 | struct mlx4_eqe { | 846 | struct mlx4_eqe { |
| 833 | u8 reserved1; | 847 | u8 reserved1; |
| 834 | u8 type; | 848 | u8 type; |
| @@ -957,6 +971,7 @@ struct mlx4_mad_ifc { | |||
| 957 | ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) | 971 | ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) |
| 958 | 972 | ||
| 959 | #define MLX4_INVALID_SLAVE_ID 0xFF | 973 | #define MLX4_INVALID_SLAVE_ID 0xFF |
| 974 | #define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1) | ||
| 960 | 975 | ||
| 961 | void handle_port_mgmt_change_event(struct work_struct *work); | 976 | void handle_port_mgmt_change_event(struct work_struct *work); |
| 962 | 977 | ||
| @@ -1332,10 +1347,13 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, | |||
| 1332 | int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); | 1347 | int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); |
| 1333 | int mlx4_SYNC_TPT(struct mlx4_dev *dev); | 1348 | int mlx4_SYNC_TPT(struct mlx4_dev *dev); |
| 1334 | int mlx4_test_interrupts(struct mlx4_dev *dev); | 1349 | int mlx4_test_interrupts(struct mlx4_dev *dev); |
| 1335 | int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, | 1350 | u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port); |
| 1336 | int *vector); | 1351 | bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector); |
| 1352 | struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port); | ||
| 1353 | int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector); | ||
| 1337 | void mlx4_release_eq(struct mlx4_dev *dev, int vec); | 1354 | void mlx4_release_eq(struct mlx4_dev *dev, int vec); |
| 1338 | 1355 | ||
| 1356 | int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector); | ||
| 1339 | int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec); | 1357 | int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec); |
| 1340 | 1358 | ||
| 1341 | int mlx4_get_phys_port_id(struct mlx4_dev *dev); | 1359 | int mlx4_get_phys_port_id(struct mlx4_dev *dev); |
| @@ -1344,6 +1362,7 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); | |||
| 1344 | 1362 | ||
| 1345 | int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); | 1363 | int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); |
| 1346 | void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); | 1364 | void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); |
| 1365 | int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port); | ||
| 1347 | 1366 | ||
| 1348 | void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, | 1367 | void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, |
| 1349 | int port); | 1368 | int port); |
| @@ -1485,4 +1504,7 @@ int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, | |||
| 1485 | enum mlx4_access_reg_method method, | 1504 | enum mlx4_access_reg_method method, |
| 1486 | struct mlx4_ptys_reg *ptys_reg); | 1505 | struct mlx4_ptys_reg *ptys_reg); |
| 1487 | 1506 | ||
| 1507 | int mlx4_get_internal_clock_params(struct mlx4_dev *dev, | ||
| 1508 | struct mlx4_clock_params *params); | ||
| 1509 | |||
| 1488 | #endif /* MLX4_DEVICE_H */ | 1510 | #endif /* MLX4_DEVICE_H */ |
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 2695ced222df..abc4767695e4 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h | |||
| @@ -169,6 +169,9 @@ int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, | |||
| 169 | struct mlx5_query_cq_mbox_out *out); | 169 | struct mlx5_query_cq_mbox_out *out); |
| 170 | int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, | 170 | int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, |
| 171 | struct mlx5_modify_cq_mbox_in *in, int in_sz); | 171 | struct mlx5_modify_cq_mbox_in *in, int in_sz); |
| 172 | int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, | ||
| 173 | struct mlx5_core_cq *cq, u16 cq_period, | ||
| 174 | u16 cq_max_count); | ||
| 172 | int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); | 175 | int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); |
| 173 | void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); | 176 | void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); |
| 174 | 177 | ||
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index abf65c790421..b943cd9e2097 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | 35 | ||
| 36 | #include <linux/types.h> | 36 | #include <linux/types.h> |
| 37 | #include <rdma/ib_verbs.h> | 37 | #include <rdma/ib_verbs.h> |
| 38 | #include <linux/mlx5/mlx5_ifc.h> | ||
| 38 | 39 | ||
| 39 | #if defined(__LITTLE_ENDIAN) | 40 | #if defined(__LITTLE_ENDIAN) |
| 40 | #define MLX5_SET_HOST_ENDIANNESS 0 | 41 | #define MLX5_SET_HOST_ENDIANNESS 0 |
| @@ -58,6 +59,8 @@ | |||
| 58 | #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) | 59 | #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) |
| 59 | #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) | 60 | #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) |
| 60 | #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) | 61 | #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) |
| 62 | #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) | ||
| 63 | #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) | ||
| 61 | #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) | 64 | #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) |
| 62 | #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) | 65 | #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) |
| 63 | 66 | ||
| @@ -70,6 +73,14 @@ | |||
| 70 | << __mlx5_dw_bit_off(typ, fld))); \ | 73 | << __mlx5_dw_bit_off(typ, fld))); \ |
| 71 | } while (0) | 74 | } while (0) |
| 72 | 75 | ||
| 76 | #define MLX5_SET_TO_ONES(typ, p, fld) do { \ | ||
| 77 | BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ | ||
| 78 | *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ | ||
| 79 | cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ | ||
| 80 | (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \ | ||
| 81 | << __mlx5_dw_bit_off(typ, fld))); \ | ||
| 82 | } while (0) | ||
| 83 | |||
| 73 | #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ | 84 | #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ |
| 74 | __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ | 85 | __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ |
| 75 | __mlx5_mask(typ, fld)) | 86 | __mlx5_mask(typ, fld)) |
| @@ -88,6 +99,12 @@ __mlx5_mask(typ, fld)) | |||
| 88 | 99 | ||
| 89 | #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) | 100 | #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) |
| 90 | 101 | ||
| 102 | #define MLX5_GET64_PR(typ, p, fld) ({ \ | ||
| 103 | u64 ___t = MLX5_GET64(typ, p, fld); \ | ||
| 104 | pr_debug(#fld " = 0x%llx\n", ___t); \ | ||
| 105 | ___t; \ | ||
| 106 | }) | ||
| 107 | |||
| 91 | enum { | 108 | enum { |
| 92 | MLX5_MAX_COMMANDS = 32, | 109 | MLX5_MAX_COMMANDS = 32, |
| 93 | MLX5_CMD_DATA_BLOCK_SIZE = 512, | 110 | MLX5_CMD_DATA_BLOCK_SIZE = 512, |
| @@ -115,6 +132,10 @@ enum { | |||
| 115 | }; | 132 | }; |
| 116 | 133 | ||
| 117 | enum { | 134 | enum { |
| 135 | MLX5_HW_START_PADDING = MLX5_INLINE_SEG, | ||
| 136 | }; | ||
| 137 | |||
| 138 | enum { | ||
| 118 | MLX5_MIN_PKEY_TABLE_SIZE = 128, | 139 | MLX5_MIN_PKEY_TABLE_SIZE = 128, |
| 119 | MLX5_MAX_LOG_PKEY_TABLE = 5, | 140 | MLX5_MAX_LOG_PKEY_TABLE = 5, |
| 120 | }; | 141 | }; |
| @@ -264,6 +285,7 @@ enum { | |||
| 264 | MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, | 285 | MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, |
| 265 | MLX5_OPCODE_SEND = 0x0a, | 286 | MLX5_OPCODE_SEND = 0x0a, |
| 266 | MLX5_OPCODE_SEND_IMM = 0x0b, | 287 | MLX5_OPCODE_SEND_IMM = 0x0b, |
| 288 | MLX5_OPCODE_LSO = 0x0e, | ||
| 267 | MLX5_OPCODE_RDMA_READ = 0x10, | 289 | MLX5_OPCODE_RDMA_READ = 0x10, |
| 268 | MLX5_OPCODE_ATOMIC_CS = 0x11, | 290 | MLX5_OPCODE_ATOMIC_CS = 0x11, |
| 269 | MLX5_OPCODE_ATOMIC_FA = 0x12, | 291 | MLX5_OPCODE_ATOMIC_FA = 0x12, |
| @@ -312,13 +334,6 @@ enum { | |||
| 312 | MLX5_CAP_OFF_CMDIF_CSUM = 46, | 334 | MLX5_CAP_OFF_CMDIF_CSUM = 46, |
| 313 | }; | 335 | }; |
| 314 | 336 | ||
| 315 | enum { | ||
| 316 | HCA_CAP_OPMOD_GET_MAX = 0, | ||
| 317 | HCA_CAP_OPMOD_GET_CUR = 1, | ||
| 318 | HCA_CAP_OPMOD_GET_ODP_MAX = 4, | ||
| 319 | HCA_CAP_OPMOD_GET_ODP_CUR = 5 | ||
| 320 | }; | ||
| 321 | |||
| 322 | struct mlx5_inbox_hdr { | 337 | struct mlx5_inbox_hdr { |
| 323 | __be16 opcode; | 338 | __be16 opcode; |
| 324 | u8 rsvd[4]; | 339 | u8 rsvd[4]; |
| @@ -541,6 +556,10 @@ struct mlx5_cmd_prot_block { | |||
| 541 | u8 sig; | 556 | u8 sig; |
| 542 | }; | 557 | }; |
| 543 | 558 | ||
| 559 | enum { | ||
| 560 | MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5, | ||
| 561 | }; | ||
| 562 | |||
| 544 | struct mlx5_err_cqe { | 563 | struct mlx5_err_cqe { |
| 545 | u8 rsvd0[32]; | 564 | u8 rsvd0[32]; |
| 546 | __be32 srqn; | 565 | __be32 srqn; |
| @@ -554,13 +573,22 @@ struct mlx5_err_cqe { | |||
| 554 | }; | 573 | }; |
| 555 | 574 | ||
| 556 | struct mlx5_cqe64 { | 575 | struct mlx5_cqe64 { |
| 557 | u8 rsvd0[17]; | 576 | u8 rsvd0[4]; |
| 577 | u8 lro_tcppsh_abort_dupack; | ||
| 578 | u8 lro_min_ttl; | ||
| 579 | __be16 lro_tcp_win; | ||
| 580 | __be32 lro_ack_seq_num; | ||
| 581 | __be32 rss_hash_result; | ||
| 582 | u8 rss_hash_type; | ||
| 558 | u8 ml_path; | 583 | u8 ml_path; |
| 559 | u8 rsvd20[4]; | 584 | u8 rsvd20[2]; |
| 585 | __be16 check_sum; | ||
| 560 | __be16 slid; | 586 | __be16 slid; |
| 561 | __be32 flags_rqpn; | 587 | __be32 flags_rqpn; |
| 562 | u8 rsvd28[4]; | 588 | u8 hds_ip_ext; |
| 563 | __be32 srqn; | 589 | u8 l4_hdr_type_etc; |
| 590 | __be16 vlan_info; | ||
| 591 | __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ | ||
| 564 | __be32 imm_inval_pkey; | 592 | __be32 imm_inval_pkey; |
| 565 | u8 rsvd40[4]; | 593 | u8 rsvd40[4]; |
| 566 | __be32 byte_cnt; | 594 | __be32 byte_cnt; |
| @@ -571,6 +599,40 @@ struct mlx5_cqe64 { | |||
| 571 | u8 op_own; | 599 | u8 op_own; |
| 572 | }; | 600 | }; |
| 573 | 601 | ||
| 602 | static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) | ||
| 603 | { | ||
| 604 | return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; | ||
| 605 | } | ||
| 606 | |||
| 607 | static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) | ||
| 608 | { | ||
| 609 | return (cqe->l4_hdr_type_etc >> 4) & 0x7; | ||
| 610 | } | ||
| 611 | |||
| 612 | static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) | ||
| 613 | { | ||
| 614 | return !!(cqe->l4_hdr_type_etc & 0x1); | ||
| 615 | } | ||
| 616 | |||
| 617 | enum { | ||
| 618 | CQE_L4_HDR_TYPE_NONE = 0x0, | ||
| 619 | CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, | ||
| 620 | CQE_L4_HDR_TYPE_UDP = 0x2, | ||
| 621 | CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3, | ||
| 622 | CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4, | ||
| 623 | }; | ||
| 624 | |||
| 625 | enum { | ||
| 626 | CQE_RSS_HTYPE_IP = 0x3 << 6, | ||
| 627 | CQE_RSS_HTYPE_L4 = 0x3 << 2, | ||
| 628 | }; | ||
| 629 | |||
| 630 | enum { | ||
| 631 | CQE_L2_OK = 1 << 0, | ||
| 632 | CQE_L3_OK = 1 << 1, | ||
| 633 | CQE_L4_OK = 1 << 2, | ||
| 634 | }; | ||
| 635 | |||
| 574 | struct mlx5_sig_err_cqe { | 636 | struct mlx5_sig_err_cqe { |
| 575 | u8 rsvd0[16]; | 637 | u8 rsvd0[16]; |
| 576 | __be32 expected_trans_sig; | 638 | __be32 expected_trans_sig; |
| @@ -996,4 +1058,135 @@ struct mlx5_destroy_psv_out { | |||
| 996 | u8 rsvd[8]; | 1058 | u8 rsvd[8]; |
| 997 | }; | 1059 | }; |
| 998 | 1060 | ||
| 1061 | #define MLX5_CMD_OP_MAX 0x920 | ||
| 1062 | |||
| 1063 | enum { | ||
| 1064 | VPORT_STATE_DOWN = 0x0, | ||
| 1065 | VPORT_STATE_UP = 0x1, | ||
| 1066 | }; | ||
| 1067 | |||
| 1068 | enum { | ||
| 1069 | MLX5_L3_PROT_TYPE_IPV4 = 0, | ||
| 1070 | MLX5_L3_PROT_TYPE_IPV6 = 1, | ||
| 1071 | }; | ||
| 1072 | |||
| 1073 | enum { | ||
| 1074 | MLX5_L4_PROT_TYPE_TCP = 0, | ||
| 1075 | MLX5_L4_PROT_TYPE_UDP = 1, | ||
| 1076 | }; | ||
| 1077 | |||
| 1078 | enum { | ||
| 1079 | MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0, | ||
| 1080 | MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1, | ||
| 1081 | MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2, | ||
| 1082 | MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3, | ||
| 1083 | MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4, | ||
| 1084 | }; | ||
| 1085 | |||
| 1086 | enum { | ||
| 1087 | MLX5_MATCH_OUTER_HEADERS = 1 << 0, | ||
| 1088 | MLX5_MATCH_MISC_PARAMETERS = 1 << 1, | ||
| 1089 | MLX5_MATCH_INNER_HEADERS = 1 << 2, | ||
| 1090 | |||
| 1091 | }; | ||
| 1092 | |||
| 1093 | enum { | ||
| 1094 | MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0, | ||
| 1095 | MLX5_FLOW_TABLE_TYPE_ESWITCH = 4, | ||
| 1096 | }; | ||
| 1097 | |||
| 1098 | enum { | ||
| 1099 | MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0, | ||
| 1100 | MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1, | ||
| 1101 | MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2, | ||
| 1102 | }; | ||
| 1103 | |||
| 1104 | enum { | ||
| 1105 | MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, | ||
| 1106 | MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, | ||
| 1107 | }; | ||
| 1108 | |||
| 1109 | /* MLX5 DEV CAPs */ | ||
| 1110 | |||
| 1111 | /* TODO: EAT.ME */ | ||
| 1112 | enum mlx5_cap_mode { | ||
| 1113 | HCA_CAP_OPMOD_GET_MAX = 0, | ||
| 1114 | HCA_CAP_OPMOD_GET_CUR = 1, | ||
| 1115 | }; | ||
| 1116 | |||
| 1117 | enum mlx5_cap_type { | ||
| 1118 | MLX5_CAP_GENERAL = 0, | ||
| 1119 | MLX5_CAP_ETHERNET_OFFLOADS, | ||
| 1120 | MLX5_CAP_ODP, | ||
| 1121 | MLX5_CAP_ATOMIC, | ||
| 1122 | MLX5_CAP_ROCE, | ||
| 1123 | MLX5_CAP_IPOIB_OFFLOADS, | ||
| 1124 | MLX5_CAP_EOIB_OFFLOADS, | ||
| 1125 | MLX5_CAP_FLOW_TABLE, | ||
| 1126 | /* NUM OF CAP Types */ | ||
| 1127 | MLX5_CAP_NUM | ||
| 1128 | }; | ||
| 1129 | |||
| 1130 | /* GET Dev Caps macros */ | ||
| 1131 | #define MLX5_CAP_GEN(mdev, cap) \ | ||
| 1132 | MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap) | ||
| 1133 | |||
| 1134 | #define MLX5_CAP_GEN_MAX(mdev, cap) \ | ||
| 1135 | MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap) | ||
| 1136 | |||
| 1137 | #define MLX5_CAP_ETH(mdev, cap) \ | ||
| 1138 | MLX5_GET(per_protocol_networking_offload_caps,\ | ||
| 1139 | mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) | ||
| 1140 | |||
| 1141 | #define MLX5_CAP_ETH_MAX(mdev, cap) \ | ||
| 1142 | MLX5_GET(per_protocol_networking_offload_caps,\ | ||
| 1143 | mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) | ||
| 1144 | |||
| 1145 | #define MLX5_CAP_ROCE(mdev, cap) \ | ||
| 1146 | MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap) | ||
| 1147 | |||
| 1148 | #define MLX5_CAP_ROCE_MAX(mdev, cap) \ | ||
| 1149 | MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap) | ||
| 1150 | |||
| 1151 | #define MLX5_CAP_ATOMIC(mdev, cap) \ | ||
| 1152 | MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap) | ||
| 1153 | |||
| 1154 | #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ | ||
| 1155 | MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap) | ||
| 1156 | |||
| 1157 | #define MLX5_CAP_FLOWTABLE(mdev, cap) \ | ||
| 1158 | MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap) | ||
| 1159 | |||
| 1160 | #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ | ||
| 1161 | MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) | ||
| 1162 | |||
| 1163 | #define MLX5_CAP_ODP(mdev, cap)\ | ||
| 1164 | MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) | ||
| 1165 | |||
| 1166 | enum { | ||
| 1167 | MLX5_CMD_STAT_OK = 0x0, | ||
| 1168 | MLX5_CMD_STAT_INT_ERR = 0x1, | ||
| 1169 | MLX5_CMD_STAT_BAD_OP_ERR = 0x2, | ||
| 1170 | MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, | ||
| 1171 | MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, | ||
| 1172 | MLX5_CMD_STAT_BAD_RES_ERR = 0x5, | ||
| 1173 | MLX5_CMD_STAT_RES_BUSY = 0x6, | ||
| 1174 | MLX5_CMD_STAT_LIM_ERR = 0x8, | ||
| 1175 | MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, | ||
| 1176 | MLX5_CMD_STAT_IX_ERR = 0xa, | ||
| 1177 | MLX5_CMD_STAT_NO_RES_ERR = 0xf, | ||
| 1178 | MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, | ||
| 1179 | MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, | ||
| 1180 | MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, | ||
| 1181 | MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, | ||
| 1182 | MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, | ||
| 1183 | }; | ||
| 1184 | |||
| 1185 | static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) | ||
| 1186 | { | ||
| 1187 | if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) | ||
| 1188 | return 0; | ||
| 1189 | return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; | ||
| 1190 | } | ||
| 1191 | |||
| 999 | #endif /* MLX5_DEVICE_H */ | 1192 | #endif /* MLX5_DEVICE_H */ |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 9a90e7523dc2..5722d88c2429 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -44,7 +44,6 @@ | |||
| 44 | 44 | ||
| 45 | #include <linux/mlx5/device.h> | 45 | #include <linux/mlx5/device.h> |
| 46 | #include <linux/mlx5/doorbell.h> | 46 | #include <linux/mlx5/doorbell.h> |
| 47 | #include <linux/mlx5/mlx5_ifc.h> | ||
| 48 | 47 | ||
| 49 | enum { | 48 | enum { |
| 50 | MLX5_BOARD_ID_LEN = 64, | 49 | MLX5_BOARD_ID_LEN = 64, |
| @@ -85,7 +84,7 @@ enum { | |||
| 85 | }; | 84 | }; |
| 86 | 85 | ||
| 87 | enum { | 86 | enum { |
| 88 | MLX5_MAX_EQ_NAME = 32 | 87 | MLX5_MAX_IRQ_NAME = 32 |
| 89 | }; | 88 | }; |
| 90 | 89 | ||
| 91 | enum { | 90 | enum { |
| @@ -108,6 +107,7 @@ enum { | |||
| 108 | MLX5_REG_PUDE = 0x5009, | 107 | MLX5_REG_PUDE = 0x5009, |
| 109 | MLX5_REG_PMPE = 0x5010, | 108 | MLX5_REG_PMPE = 0x5010, |
| 110 | MLX5_REG_PELC = 0x500e, | 109 | MLX5_REG_PELC = 0x500e, |
| 110 | MLX5_REG_PVLC = 0x500f, | ||
| 111 | MLX5_REG_PMLP = 0, /* TBD */ | 111 | MLX5_REG_PMLP = 0, /* TBD */ |
| 112 | MLX5_REG_NODE_DESC = 0x6001, | 112 | MLX5_REG_NODE_DESC = 0x6001, |
| 113 | MLX5_REG_HOST_ENDIANNESS = 0x7004, | 113 | MLX5_REG_HOST_ENDIANNESS = 0x7004, |
| @@ -150,6 +150,11 @@ enum mlx5_dev_event { | |||
| 150 | MLX5_DEV_EVENT_CLIENT_REREG, | 150 | MLX5_DEV_EVENT_CLIENT_REREG, |
| 151 | }; | 151 | }; |
| 152 | 152 | ||
| 153 | enum mlx5_port_status { | ||
| 154 | MLX5_PORT_UP = 1 << 1, | ||
| 155 | MLX5_PORT_DOWN = 1 << 2, | ||
| 156 | }; | ||
| 157 | |||
| 153 | struct mlx5_uuar_info { | 158 | struct mlx5_uuar_info { |
| 154 | struct mlx5_uar *uars; | 159 | struct mlx5_uar *uars; |
| 155 | int num_uars; | 160 | int num_uars; |
| @@ -269,56 +274,7 @@ struct mlx5_cmd { | |||
| 269 | struct mlx5_port_caps { | 274 | struct mlx5_port_caps { |
| 270 | int gid_table_len; | 275 | int gid_table_len; |
| 271 | int pkey_table_len; | 276 | int pkey_table_len; |
| 272 | }; | 277 | u8 ext_port_cap; |
| 273 | |||
| 274 | struct mlx5_general_caps { | ||
| 275 | u8 log_max_eq; | ||
| 276 | u8 log_max_cq; | ||
| 277 | u8 log_max_qp; | ||
| 278 | u8 log_max_mkey; | ||
| 279 | u8 log_max_pd; | ||
| 280 | u8 log_max_srq; | ||
| 281 | u8 log_max_strq; | ||
| 282 | u8 log_max_mrw_sz; | ||
| 283 | u8 log_max_bsf_list_size; | ||
| 284 | u8 log_max_klm_list_size; | ||
| 285 | u32 max_cqes; | ||
| 286 | int max_wqes; | ||
| 287 | u32 max_eqes; | ||
| 288 | u32 max_indirection; | ||
| 289 | int max_sq_desc_sz; | ||
| 290 | int max_rq_desc_sz; | ||
| 291 | int max_dc_sq_desc_sz; | ||
| 292 | u64 flags; | ||
| 293 | u16 stat_rate_support; | ||
| 294 | int log_max_msg; | ||
| 295 | int num_ports; | ||
| 296 | u8 log_max_ra_res_qp; | ||
| 297 | u8 log_max_ra_req_qp; | ||
| 298 | int max_srq_wqes; | ||
| 299 | int bf_reg_size; | ||
| 300 | int bf_regs_per_page; | ||
| 301 | struct mlx5_port_caps port[MLX5_MAX_PORTS]; | ||
| 302 | u8 ext_port_cap[MLX5_MAX_PORTS]; | ||
| 303 | int max_vf; | ||
| 304 | u32 reserved_lkey; | ||
| 305 | u8 local_ca_ack_delay; | ||
| 306 | u8 log_max_mcg; | ||
| 307 | u32 max_qp_mcg; | ||
| 308 | int min_page_sz; | ||
| 309 | int pd_cap; | ||
| 310 | u32 max_qp_counters; | ||
| 311 | u32 pkey_table_size; | ||
| 312 | u8 log_max_ra_req_dc; | ||
| 313 | u8 log_max_ra_res_dc; | ||
| 314 | u32 uar_sz; | ||
| 315 | u8 min_log_pg_sz; | ||
| 316 | u8 log_max_xrcd; | ||
| 317 | u16 log_uar_page_sz; | ||
| 318 | }; | ||
| 319 | |||
| 320 | struct mlx5_caps { | ||
| 321 | struct mlx5_general_caps gen; | ||
| 322 | }; | 278 | }; |
| 323 | 279 | ||
| 324 | struct mlx5_cmd_mailbox { | 280 | struct mlx5_cmd_mailbox { |
| @@ -334,8 +290,6 @@ struct mlx5_buf_list { | |||
| 334 | 290 | ||
| 335 | struct mlx5_buf { | 291 | struct mlx5_buf { |
| 336 | struct mlx5_buf_list direct; | 292 | struct mlx5_buf_list direct; |
| 337 | struct mlx5_buf_list *page_list; | ||
| 338 | int nbufs; | ||
| 339 | int npages; | 293 | int npages; |
| 340 | int size; | 294 | int size; |
| 341 | u8 page_shift; | 295 | u8 page_shift; |
| @@ -351,7 +305,6 @@ struct mlx5_eq { | |||
| 351 | u8 eqn; | 305 | u8 eqn; |
| 352 | int nent; | 306 | int nent; |
| 353 | u64 mask; | 307 | u64 mask; |
| 354 | char name[MLX5_MAX_EQ_NAME]; | ||
| 355 | struct list_head list; | 308 | struct list_head list; |
| 356 | int index; | 309 | int index; |
| 357 | struct mlx5_rsc_debug *dbg; | 310 | struct mlx5_rsc_debug *dbg; |
| @@ -387,6 +340,8 @@ struct mlx5_core_mr { | |||
| 387 | 340 | ||
| 388 | enum mlx5_res_type { | 341 | enum mlx5_res_type { |
| 389 | MLX5_RES_QP, | 342 | MLX5_RES_QP, |
| 343 | MLX5_RES_SRQ, | ||
| 344 | MLX5_RES_XSRQ, | ||
| 390 | }; | 345 | }; |
| 391 | 346 | ||
| 392 | struct mlx5_core_rsc_common { | 347 | struct mlx5_core_rsc_common { |
| @@ -396,6 +351,7 @@ struct mlx5_core_rsc_common { | |||
| 396 | }; | 351 | }; |
| 397 | 352 | ||
| 398 | struct mlx5_core_srq { | 353 | struct mlx5_core_srq { |
| 354 | struct mlx5_core_rsc_common common; /* must be first */ | ||
| 399 | u32 srqn; | 355 | u32 srqn; |
| 400 | int max; | 356 | int max; |
| 401 | int max_gs; | 357 | int max_gs; |
| @@ -414,7 +370,6 @@ struct mlx5_eq_table { | |||
| 414 | struct mlx5_eq pages_eq; | 370 | struct mlx5_eq pages_eq; |
| 415 | struct mlx5_eq async_eq; | 371 | struct mlx5_eq async_eq; |
| 416 | struct mlx5_eq cmd_eq; | 372 | struct mlx5_eq cmd_eq; |
| 417 | struct msix_entry *msix_arr; | ||
| 418 | int num_comp_vectors; | 373 | int num_comp_vectors; |
| 419 | /* protect EQs list | 374 | /* protect EQs list |
| 420 | */ | 375 | */ |
| @@ -467,9 +422,16 @@ struct mlx5_mr_table { | |||
| 467 | struct radix_tree_root tree; | 422 | struct radix_tree_root tree; |
| 468 | }; | 423 | }; |
| 469 | 424 | ||
| 425 | struct mlx5_irq_info { | ||
| 426 | cpumask_var_t mask; | ||
| 427 | char name[MLX5_MAX_IRQ_NAME]; | ||
| 428 | }; | ||
| 429 | |||
| 470 | struct mlx5_priv { | 430 | struct mlx5_priv { |
| 471 | char name[MLX5_MAX_NAME_LEN]; | 431 | char name[MLX5_MAX_NAME_LEN]; |
| 472 | struct mlx5_eq_table eq_table; | 432 | struct mlx5_eq_table eq_table; |
| 433 | struct msix_entry *msix_arr; | ||
| 434 | struct mlx5_irq_info *irq_info; | ||
| 473 | struct mlx5_uuar_info uuari; | 435 | struct mlx5_uuar_info uuari; |
| 474 | MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); | 436 | MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); |
| 475 | 437 | ||
| @@ -520,7 +482,9 @@ struct mlx5_core_dev { | |||
| 520 | u8 rev_id; | 482 | u8 rev_id; |
| 521 | char board_id[MLX5_BOARD_ID_LEN]; | 483 | char board_id[MLX5_BOARD_ID_LEN]; |
| 522 | struct mlx5_cmd cmd; | 484 | struct mlx5_cmd cmd; |
| 523 | struct mlx5_caps caps; | 485 | struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; |
| 486 | u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; | ||
| 487 | u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; | ||
| 524 | phys_addr_t iseg_base; | 488 | phys_addr_t iseg_base; |
| 525 | struct mlx5_init_seg __iomem *iseg; | 489 | struct mlx5_init_seg __iomem *iseg; |
| 526 | void (*event) (struct mlx5_core_dev *dev, | 490 | void (*event) (struct mlx5_core_dev *dev, |
| @@ -529,6 +493,7 @@ struct mlx5_core_dev { | |||
| 529 | struct mlx5_priv priv; | 493 | struct mlx5_priv priv; |
| 530 | struct mlx5_profile *profile; | 494 | struct mlx5_profile *profile; |
| 531 | atomic_t num_qps; | 495 | atomic_t num_qps; |
| 496 | u32 issi; | ||
| 532 | }; | 497 | }; |
| 533 | 498 | ||
| 534 | struct mlx5_db { | 499 | struct mlx5_db { |
| @@ -549,6 +514,11 @@ enum { | |||
| 549 | MLX5_COMP_EQ_SIZE = 1024, | 514 | MLX5_COMP_EQ_SIZE = 1024, |
| 550 | }; | 515 | }; |
| 551 | 516 | ||
| 517 | enum { | ||
| 518 | MLX5_PTYS_IB = 1 << 0, | ||
| 519 | MLX5_PTYS_EN = 1 << 2, | ||
| 520 | }; | ||
| 521 | |||
| 552 | struct mlx5_db_pgdir { | 522 | struct mlx5_db_pgdir { |
| 553 | struct list_head list; | 523 | struct list_head list; |
| 554 | DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); | 524 | DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); |
| @@ -584,13 +554,44 @@ struct mlx5_pas { | |||
| 584 | u8 log_sz; | 554 | u8 log_sz; |
| 585 | }; | 555 | }; |
| 586 | 556 | ||
| 557 | enum port_state_policy { | ||
| 558 | MLX5_AAA_000 | ||
| 559 | }; | ||
| 560 | |||
| 561 | enum phy_port_state { | ||
| 562 | MLX5_AAA_111 | ||
| 563 | }; | ||
| 564 | |||
| 565 | struct mlx5_hca_vport_context { | ||
| 566 | u32 field_select; | ||
| 567 | bool sm_virt_aware; | ||
| 568 | bool has_smi; | ||
| 569 | bool has_raw; | ||
| 570 | enum port_state_policy policy; | ||
| 571 | enum phy_port_state phys_state; | ||
| 572 | enum ib_port_state vport_state; | ||
| 573 | u8 port_physical_state; | ||
| 574 | u64 sys_image_guid; | ||
| 575 | u64 port_guid; | ||
| 576 | u64 node_guid; | ||
| 577 | u32 cap_mask1; | ||
| 578 | u32 cap_mask1_perm; | ||
| 579 | u32 cap_mask2; | ||
| 580 | u32 cap_mask2_perm; | ||
| 581 | u16 lid; | ||
| 582 | u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */ | ||
| 583 | u8 lmc; | ||
| 584 | u8 subnet_timeout; | ||
| 585 | u16 sm_lid; | ||
| 586 | u8 sm_sl; | ||
| 587 | u16 qkey_violation_counter; | ||
| 588 | u16 pkey_violation_counter; | ||
| 589 | bool grh_required; | ||
| 590 | }; | ||
| 591 | |||
| 587 | static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) | 592 | static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) |
| 588 | { | 593 | { |
| 589 | if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1)) | ||
| 590 | return buf->direct.buf + offset; | 594 | return buf->direct.buf + offset; |
| 591 | else | ||
| 592 | return buf->page_list[offset >> PAGE_SHIFT].buf + | ||
| 593 | (offset & (PAGE_SIZE - 1)); | ||
| 594 | } | 595 | } |
| 595 | 596 | ||
| 596 | extern struct workqueue_struct *mlx5_core_wq; | 597 | extern struct workqueue_struct *mlx5_core_wq; |
| @@ -654,8 +655,8 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev); | |||
| 654 | void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); | 655 | void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); |
| 655 | int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); | 656 | int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); |
| 656 | int mlx5_cmd_status_to_err_v2(void *ptr); | 657 | int mlx5_cmd_status_to_err_v2(void *ptr); |
| 657 | int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, | 658 | int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, |
| 658 | u16 opmod); | 659 | enum mlx5_cap_mode cap_mode); |
| 659 | int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, | 660 | int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, |
| 660 | int out_size); | 661 | int out_size); |
| 661 | int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, | 662 | int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, |
| @@ -665,19 +666,21 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); | |||
| 665 | int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); | 666 | int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); |
| 666 | int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); | 667 | int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); |
| 667 | int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); | 668 | int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); |
| 669 | int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); | ||
| 670 | void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); | ||
| 668 | void mlx5_health_cleanup(void); | 671 | void mlx5_health_cleanup(void); |
| 669 | void __init mlx5_health_init(void); | 672 | void __init mlx5_health_init(void); |
| 670 | void mlx5_start_health_poll(struct mlx5_core_dev *dev); | 673 | void mlx5_start_health_poll(struct mlx5_core_dev *dev); |
| 671 | void mlx5_stop_health_poll(struct mlx5_core_dev *dev); | 674 | void mlx5_stop_health_poll(struct mlx5_core_dev *dev); |
| 672 | int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, | 675 | int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); |
| 673 | struct mlx5_buf *buf); | ||
| 674 | void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); | 676 | void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); |
| 675 | struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, | 677 | struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, |
| 676 | gfp_t flags, int npages); | 678 | gfp_t flags, int npages); |
| 677 | void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, | 679 | void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, |
| 678 | struct mlx5_cmd_mailbox *head); | 680 | struct mlx5_cmd_mailbox *head); |
| 679 | int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, | 681 | int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, |
| 680 | struct mlx5_create_srq_mbox_in *in, int inlen); | 682 | struct mlx5_create_srq_mbox_in *in, int inlen, |
| 683 | int is_xrc); | ||
| 681 | int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); | 684 | int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); |
| 682 | int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, | 685 | int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, |
| 683 | struct mlx5_query_srq_mbox_out *out); | 686 | struct mlx5_query_srq_mbox_out *out); |
| @@ -696,7 +699,7 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, | |||
| 696 | u32 *mkey); | 699 | u32 *mkey); |
| 697 | int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); | 700 | int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); |
| 698 | int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); | 701 | int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); |
| 699 | int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, | 702 | int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, |
| 700 | u16 opmod, u8 port); | 703 | u16 opmod, u8 port); |
| 701 | void mlx5_pagealloc_init(struct mlx5_core_dev *dev); | 704 | void mlx5_pagealloc_init(struct mlx5_core_dev *dev); |
| 702 | void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); | 705 | void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); |
| @@ -734,7 +737,32 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); | |||
| 734 | int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, | 737 | int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, |
| 735 | int size_in, void *data_out, int size_out, | 738 | int size_in, void *data_out, int size_out, |
| 736 | u16 reg_num, int arg, int write); | 739 | u16 reg_num, int arg, int write); |
| 740 | |||
| 737 | int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); | 741 | int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); |
| 742 | int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, | ||
| 743 | int ptys_size, int proto_mask, u8 local_port); | ||
| 744 | int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, | ||
| 745 | u32 *proto_cap, int proto_mask); | ||
| 746 | int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, | ||
| 747 | u32 *proto_admin, int proto_mask); | ||
| 748 | int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, | ||
| 749 | u8 *link_width_oper, u8 local_port); | ||
| 750 | int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, | ||
| 751 | u8 *proto_oper, int proto_mask, | ||
| 752 | u8 local_port); | ||
| 753 | int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, | ||
| 754 | int proto_mask); | ||
| 755 | int mlx5_set_port_status(struct mlx5_core_dev *dev, | ||
| 756 | enum mlx5_port_status status); | ||
| 757 | int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status); | ||
| 758 | |||
| 759 | int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); | ||
| 760 | void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); | ||
| 761 | void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, | ||
| 762 | u8 port); | ||
| 763 | |||
| 764 | int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, | ||
| 765 | u8 *vl_hw_cap, u8 local_port); | ||
| 738 | 766 | ||
| 739 | int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); | 767 | int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); |
| 740 | void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); | 768 | void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); |
| @@ -799,6 +827,7 @@ struct mlx5_interface { | |||
| 799 | void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); | 827 | void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); |
| 800 | int mlx5_register_interface(struct mlx5_interface *intf); | 828 | int mlx5_register_interface(struct mlx5_interface *intf); |
| 801 | void mlx5_unregister_interface(struct mlx5_interface *intf); | 829 | void mlx5_unregister_interface(struct mlx5_interface *intf); |
| 830 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); | ||
| 802 | 831 | ||
| 803 | struct mlx5_profile { | 832 | struct mlx5_profile { |
| 804 | u64 mask; | 833 | u64 mask; |
| @@ -809,4 +838,14 @@ struct mlx5_profile { | |||
| 809 | } mr_cache[MAX_MR_CACHE_ENTRIES]; | 838 | } mr_cache[MAX_MR_CACHE_ENTRIES]; |
| 810 | }; | 839 | }; |
| 811 | 840 | ||
| 841 | static inline int mlx5_get_gid_table_len(u16 param) | ||
| 842 | { | ||
| 843 | if (param > 4) { | ||
| 844 | pr_warn("gid table length is zero\n"); | ||
| 845 | return 0; | ||
| 846 | } | ||
| 847 | |||
| 848 | return 8 * (1 << param); | ||
| 849 | } | ||
| 850 | |||
| 812 | #endif /* MLX5_DRIVER_H */ | 851 | #endif /* MLX5_DRIVER_H */ |
diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h new file mode 100644 index 000000000000..5f922c6d4fc2 --- /dev/null +++ b/include/linux/mlx5/flow_table.h | |||
| @@ -0,0 +1,54 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | */ | ||
| 32 | |||
| 33 | #ifndef MLX5_FLOW_TABLE_H | ||
| 34 | #define MLX5_FLOW_TABLE_H | ||
| 35 | |||
| 36 | #include <linux/mlx5/driver.h> | ||
| 37 | |||
| 38 | struct mlx5_flow_table_group { | ||
| 39 | u8 log_sz; | ||
| 40 | u8 match_criteria_enable; | ||
| 41 | u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; | ||
| 42 | }; | ||
| 43 | |||
| 44 | void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type, | ||
| 45 | u16 num_groups, | ||
| 46 | struct mlx5_flow_table_group *group); | ||
| 47 | void mlx5_destroy_flow_table(void *flow_table); | ||
| 48 | int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable, | ||
| 49 | void *match_criteria, void *flow_context, | ||
| 50 | u32 *flow_index); | ||
| 51 | void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index); | ||
| 52 | u32 mlx5_get_flow_table_id(void *flow_table); | ||
| 53 | |||
| 54 | #endif /* MLX5_FLOW_TABLE_H */ | ||
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index cb3ad17edd1f..6d2f6fee041c 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. | 2 | * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. |
| 3 | * | 3 | * |
| 4 | * This software is available to you under a choice of one of two | 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| @@ -28,12 +28,45 @@ | |||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. | 30 | * SOFTWARE. |
| 31 | */ | 31 | */ |
| 32 | |||
| 33 | #ifndef MLX5_IFC_H | 32 | #ifndef MLX5_IFC_H |
| 34 | #define MLX5_IFC_H | 33 | #define MLX5_IFC_H |
| 35 | 34 | ||
| 36 | enum { | 35 | enum { |
| 36 | MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0, | ||
| 37 | MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1, | ||
| 38 | MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED = 0x2, | ||
| 39 | MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED = 0x3, | ||
| 40 | MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED = 0x13, | ||
| 41 | MLX5_EVENT_TYPE_CODING_SRQ_LIMIT = 0x14, | ||
| 42 | MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED = 0x1c, | ||
| 43 | MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION = 0x1d, | ||
| 44 | MLX5_EVENT_TYPE_CODING_CQ_ERROR = 0x4, | ||
| 45 | MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR = 0x5, | ||
| 46 | MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED = 0x7, | ||
| 47 | MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT = 0xc, | ||
| 48 | MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR = 0x10, | ||
| 49 | MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR = 0x11, | ||
| 50 | MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR = 0x12, | ||
| 51 | MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR = 0x8, | ||
| 52 | MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE = 0x9, | ||
| 53 | MLX5_EVENT_TYPE_CODING_GPIO_EVENT = 0x15, | ||
| 54 | MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19, | ||
| 55 | MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a, | ||
| 56 | MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b, | ||
| 57 | MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f, | ||
| 58 | MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa, | ||
| 59 | MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb | ||
| 60 | }; | ||
| 61 | |||
| 62 | enum { | ||
| 63 | MLX5_MODIFY_TIR_BITMASK_LRO = 0x0, | ||
| 64 | MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1, | ||
| 65 | MLX5_MODIFY_TIR_BITMASK_HASH = 0x2, | ||
| 66 | MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3 | ||
| 67 | }; | ||
| 68 | |||
| 69 | enum { | ||
| 37 | MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, | 70 | MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, |
| 38 | MLX5_CMD_OP_QUERY_ADAPTER = 0x101, | 71 | MLX5_CMD_OP_QUERY_ADAPTER = 0x101, |
| 39 | MLX5_CMD_OP_INIT_HCA = 0x102, | 72 | MLX5_CMD_OP_INIT_HCA = 0x102, |
| @@ -43,6 +76,8 @@ enum { | |||
| 43 | MLX5_CMD_OP_QUERY_PAGES = 0x107, | 76 | MLX5_CMD_OP_QUERY_PAGES = 0x107, |
| 44 | MLX5_CMD_OP_MANAGE_PAGES = 0x108, | 77 | MLX5_CMD_OP_MANAGE_PAGES = 0x108, |
| 45 | MLX5_CMD_OP_SET_HCA_CAP = 0x109, | 78 | MLX5_CMD_OP_SET_HCA_CAP = 0x109, |
| 79 | MLX5_CMD_OP_QUERY_ISSI = 0x10a, | ||
| 80 | MLX5_CMD_OP_SET_ISSI = 0x10b, | ||
| 46 | MLX5_CMD_OP_CREATE_MKEY = 0x200, | 81 | MLX5_CMD_OP_CREATE_MKEY = 0x200, |
| 47 | MLX5_CMD_OP_QUERY_MKEY = 0x201, | 82 | MLX5_CMD_OP_QUERY_MKEY = 0x201, |
| 48 | MLX5_CMD_OP_DESTROY_MKEY = 0x202, | 83 | MLX5_CMD_OP_DESTROY_MKEY = 0x202, |
| @@ -66,6 +101,7 @@ enum { | |||
| 66 | MLX5_CMD_OP_2ERR_QP = 0x507, | 101 | MLX5_CMD_OP_2ERR_QP = 0x507, |
| 67 | MLX5_CMD_OP_2RST_QP = 0x50a, | 102 | MLX5_CMD_OP_2RST_QP = 0x50a, |
| 68 | MLX5_CMD_OP_QUERY_QP = 0x50b, | 103 | MLX5_CMD_OP_QUERY_QP = 0x50b, |
| 104 | MLX5_CMD_OP_SQD_RTS_QP = 0x50c, | ||
| 69 | MLX5_CMD_OP_INIT2INIT_QP = 0x50e, | 105 | MLX5_CMD_OP_INIT2INIT_QP = 0x50e, |
| 70 | MLX5_CMD_OP_CREATE_PSV = 0x600, | 106 | MLX5_CMD_OP_CREATE_PSV = 0x600, |
| 71 | MLX5_CMD_OP_DESTROY_PSV = 0x601, | 107 | MLX5_CMD_OP_DESTROY_PSV = 0x601, |
| @@ -73,7 +109,10 @@ enum { | |||
| 73 | MLX5_CMD_OP_DESTROY_SRQ = 0x701, | 109 | MLX5_CMD_OP_DESTROY_SRQ = 0x701, |
| 74 | MLX5_CMD_OP_QUERY_SRQ = 0x702, | 110 | MLX5_CMD_OP_QUERY_SRQ = 0x702, |
| 75 | MLX5_CMD_OP_ARM_RQ = 0x703, | 111 | MLX5_CMD_OP_ARM_RQ = 0x703, |
| 76 | MLX5_CMD_OP_RESIZE_SRQ = 0x704, | 112 | MLX5_CMD_OP_CREATE_XRC_SRQ = 0x705, |
| 113 | MLX5_CMD_OP_DESTROY_XRC_SRQ = 0x706, | ||
| 114 | MLX5_CMD_OP_QUERY_XRC_SRQ = 0x707, | ||
| 115 | MLX5_CMD_OP_ARM_XRC_SRQ = 0x708, | ||
| 77 | MLX5_CMD_OP_CREATE_DCT = 0x710, | 116 | MLX5_CMD_OP_CREATE_DCT = 0x710, |
| 78 | MLX5_CMD_OP_DESTROY_DCT = 0x711, | 117 | MLX5_CMD_OP_DESTROY_DCT = 0x711, |
| 79 | MLX5_CMD_OP_DRAIN_DCT = 0x712, | 118 | MLX5_CMD_OP_DRAIN_DCT = 0x712, |
| @@ -85,8 +124,12 @@ enum { | |||
| 85 | MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753, | 124 | MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753, |
| 86 | MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754, | 125 | MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754, |
| 87 | MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755, | 126 | MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755, |
| 88 | MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760, | 127 | MLX5_CMD_OP_QUERY_ROCE_ADDRESS = 0x760, |
| 89 | MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761, | 128 | MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761, |
| 129 | MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762, | ||
| 130 | MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763, | ||
| 131 | MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764, | ||
| 132 | MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765, | ||
| 90 | MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, | 133 | MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, |
| 91 | MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, | 134 | MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, |
| 92 | MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, | 135 | MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, |
| @@ -98,7 +141,7 @@ enum { | |||
| 98 | MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, | 141 | MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, |
| 99 | MLX5_CMD_OP_ACCESS_REG = 0x805, | 142 | MLX5_CMD_OP_ACCESS_REG = 0x805, |
| 100 | MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, | 143 | MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, |
| 101 | MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, | 144 | MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807, |
| 102 | MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, | 145 | MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, |
| 103 | MLX5_CMD_OP_MAD_IFC = 0x50d, | 146 | MLX5_CMD_OP_MAD_IFC = 0x50d, |
| 104 | MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, | 147 | MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, |
| @@ -106,23 +149,22 @@ enum { | |||
| 106 | MLX5_CMD_OP_NOP = 0x80d, | 149 | MLX5_CMD_OP_NOP = 0x80d, |
| 107 | MLX5_CMD_OP_ALLOC_XRCD = 0x80e, | 150 | MLX5_CMD_OP_ALLOC_XRCD = 0x80e, |
| 108 | MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, | 151 | MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, |
| 109 | MLX5_CMD_OP_SET_BURST_SIZE = 0x812, | 152 | MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816, |
| 110 | MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813, | 153 | MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817, |
| 111 | MLX5_CMD_OP_ACTIVATE_TRACER = 0x814, | 154 | MLX5_CMD_OP_QUERY_CONG_STATUS = 0x822, |
| 112 | MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815, | 155 | MLX5_CMD_OP_MODIFY_CONG_STATUS = 0x823, |
| 113 | MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820, | 156 | MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x824, |
| 114 | MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821, | 157 | MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x825, |
| 115 | MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822, | 158 | MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x826, |
| 116 | MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823, | 159 | MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT = 0x827, |
| 117 | MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824, | 160 | MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT = 0x828, |
| 161 | MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829, | ||
| 162 | MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a, | ||
| 163 | MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b, | ||
| 118 | MLX5_CMD_OP_CREATE_TIR = 0x900, | 164 | MLX5_CMD_OP_CREATE_TIR = 0x900, |
| 119 | MLX5_CMD_OP_MODIFY_TIR = 0x901, | 165 | MLX5_CMD_OP_MODIFY_TIR = 0x901, |
| 120 | MLX5_CMD_OP_DESTROY_TIR = 0x902, | 166 | MLX5_CMD_OP_DESTROY_TIR = 0x902, |
| 121 | MLX5_CMD_OP_QUERY_TIR = 0x903, | 167 | MLX5_CMD_OP_QUERY_TIR = 0x903, |
| 122 | MLX5_CMD_OP_CREATE_TIS = 0x912, | ||
| 123 | MLX5_CMD_OP_MODIFY_TIS = 0x913, | ||
| 124 | MLX5_CMD_OP_DESTROY_TIS = 0x914, | ||
| 125 | MLX5_CMD_OP_QUERY_TIS = 0x915, | ||
| 126 | MLX5_CMD_OP_CREATE_SQ = 0x904, | 168 | MLX5_CMD_OP_CREATE_SQ = 0x904, |
| 127 | MLX5_CMD_OP_MODIFY_SQ = 0x905, | 169 | MLX5_CMD_OP_MODIFY_SQ = 0x905, |
| 128 | MLX5_CMD_OP_DESTROY_SQ = 0x906, | 170 | MLX5_CMD_OP_DESTROY_SQ = 0x906, |
| @@ -135,9 +177,430 @@ enum { | |||
| 135 | MLX5_CMD_OP_MODIFY_RMP = 0x90d, | 177 | MLX5_CMD_OP_MODIFY_RMP = 0x90d, |
| 136 | MLX5_CMD_OP_DESTROY_RMP = 0x90e, | 178 | MLX5_CMD_OP_DESTROY_RMP = 0x90e, |
| 137 | MLX5_CMD_OP_QUERY_RMP = 0x90f, | 179 | MLX5_CMD_OP_QUERY_RMP = 0x90f, |
| 138 | MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910, | 180 | MLX5_CMD_OP_CREATE_TIS = 0x912, |
| 139 | MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911, | 181 | MLX5_CMD_OP_MODIFY_TIS = 0x913, |
| 140 | MLX5_CMD_OP_MAX = 0x911 | 182 | MLX5_CMD_OP_DESTROY_TIS = 0x914, |
| 183 | MLX5_CMD_OP_QUERY_TIS = 0x915, | ||
| 184 | MLX5_CMD_OP_CREATE_RQT = 0x916, | ||
| 185 | MLX5_CMD_OP_MODIFY_RQT = 0x917, | ||
| 186 | MLX5_CMD_OP_DESTROY_RQT = 0x918, | ||
| 187 | MLX5_CMD_OP_QUERY_RQT = 0x919, | ||
| 188 | MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930, | ||
| 189 | MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931, | ||
| 190 | MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932, | ||
| 191 | MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933, | ||
| 192 | MLX5_CMD_OP_DESTROY_FLOW_GROUP = 0x934, | ||
| 193 | MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935, | ||
| 194 | MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936, | ||
| 195 | MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937, | ||
| 196 | MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938 | ||
| 197 | }; | ||
| 198 | |||
| 199 | struct mlx5_ifc_flow_table_fields_supported_bits { | ||
| 200 | u8 outer_dmac[0x1]; | ||
| 201 | u8 outer_smac[0x1]; | ||
| 202 | u8 outer_ether_type[0x1]; | ||
| 203 | u8 reserved_0[0x1]; | ||
| 204 | u8 outer_first_prio[0x1]; | ||
| 205 | u8 outer_first_cfi[0x1]; | ||
| 206 | u8 outer_first_vid[0x1]; | ||
| 207 | u8 reserved_1[0x1]; | ||
| 208 | u8 outer_second_prio[0x1]; | ||
| 209 | u8 outer_second_cfi[0x1]; | ||
| 210 | u8 outer_second_vid[0x1]; | ||
| 211 | u8 reserved_2[0x1]; | ||
| 212 | u8 outer_sip[0x1]; | ||
| 213 | u8 outer_dip[0x1]; | ||
| 214 | u8 outer_frag[0x1]; | ||
| 215 | u8 outer_ip_protocol[0x1]; | ||
| 216 | u8 outer_ip_ecn[0x1]; | ||
| 217 | u8 outer_ip_dscp[0x1]; | ||
| 218 | u8 outer_udp_sport[0x1]; | ||
| 219 | u8 outer_udp_dport[0x1]; | ||
| 220 | u8 outer_tcp_sport[0x1]; | ||
| 221 | u8 outer_tcp_dport[0x1]; | ||
| 222 | u8 outer_tcp_flags[0x1]; | ||
| 223 | u8 outer_gre_protocol[0x1]; | ||
| 224 | u8 outer_gre_key[0x1]; | ||
| 225 | u8 outer_vxlan_vni[0x1]; | ||
| 226 | u8 reserved_3[0x5]; | ||
| 227 | u8 source_eswitch_port[0x1]; | ||
| 228 | |||
| 229 | u8 inner_dmac[0x1]; | ||
| 230 | u8 inner_smac[0x1]; | ||
| 231 | u8 inner_ether_type[0x1]; | ||
| 232 | u8 reserved_4[0x1]; | ||
| 233 | u8 inner_first_prio[0x1]; | ||
| 234 | u8 inner_first_cfi[0x1]; | ||
| 235 | u8 inner_first_vid[0x1]; | ||
| 236 | u8 reserved_5[0x1]; | ||
| 237 | u8 inner_second_prio[0x1]; | ||
| 238 | u8 inner_second_cfi[0x1]; | ||
| 239 | u8 inner_second_vid[0x1]; | ||
| 240 | u8 reserved_6[0x1]; | ||
| 241 | u8 inner_sip[0x1]; | ||
| 242 | u8 inner_dip[0x1]; | ||
| 243 | u8 inner_frag[0x1]; | ||
| 244 | u8 inner_ip_protocol[0x1]; | ||
| 245 | u8 inner_ip_ecn[0x1]; | ||
| 246 | u8 inner_ip_dscp[0x1]; | ||
| 247 | u8 inner_udp_sport[0x1]; | ||
| 248 | u8 inner_udp_dport[0x1]; | ||
| 249 | u8 inner_tcp_sport[0x1]; | ||
| 250 | u8 inner_tcp_dport[0x1]; | ||
| 251 | u8 inner_tcp_flags[0x1]; | ||
| 252 | u8 reserved_7[0x9]; | ||
| 253 | |||
| 254 | u8 reserved_8[0x40]; | ||
| 255 | }; | ||
| 256 | |||
| 257 | struct mlx5_ifc_flow_table_prop_layout_bits { | ||
| 258 | u8 ft_support[0x1]; | ||
| 259 | u8 reserved_0[0x1f]; | ||
| 260 | |||
| 261 | u8 reserved_1[0x2]; | ||
| 262 | u8 log_max_ft_size[0x6]; | ||
| 263 | u8 reserved_2[0x10]; | ||
| 264 | u8 max_ft_level[0x8]; | ||
| 265 | |||
| 266 | u8 reserved_3[0x20]; | ||
| 267 | |||
| 268 | u8 reserved_4[0x18]; | ||
| 269 | u8 log_max_ft_num[0x8]; | ||
| 270 | |||
| 271 | u8 reserved_5[0x18]; | ||
| 272 | u8 log_max_destination[0x8]; | ||
| 273 | |||
| 274 | u8 reserved_6[0x18]; | ||
| 275 | u8 log_max_flow[0x8]; | ||
| 276 | |||
| 277 | u8 reserved_7[0x40]; | ||
| 278 | |||
| 279 | struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support; | ||
| 280 | |||
| 281 | struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support; | ||
| 282 | }; | ||
| 283 | |||
| 284 | struct mlx5_ifc_odp_per_transport_service_cap_bits { | ||
| 285 | u8 send[0x1]; | ||
| 286 | u8 receive[0x1]; | ||
| 287 | u8 write[0x1]; | ||
| 288 | u8 read[0x1]; | ||
| 289 | u8 reserved_0[0x1]; | ||
| 290 | u8 srq_receive[0x1]; | ||
| 291 | u8 reserved_1[0x1a]; | ||
| 292 | }; | ||
| 293 | |||
| 294 | struct mlx5_ifc_fte_match_set_lyr_2_4_bits { | ||
| 295 | u8 smac_47_16[0x20]; | ||
| 296 | |||
| 297 | u8 smac_15_0[0x10]; | ||
| 298 | u8 ethertype[0x10]; | ||
| 299 | |||
| 300 | u8 dmac_47_16[0x20]; | ||
| 301 | |||
| 302 | u8 dmac_15_0[0x10]; | ||
| 303 | u8 first_prio[0x3]; | ||
| 304 | u8 first_cfi[0x1]; | ||
| 305 | u8 first_vid[0xc]; | ||
| 306 | |||
| 307 | u8 ip_protocol[0x8]; | ||
| 308 | u8 ip_dscp[0x6]; | ||
| 309 | u8 ip_ecn[0x2]; | ||
| 310 | u8 vlan_tag[0x1]; | ||
| 311 | u8 reserved_0[0x1]; | ||
| 312 | u8 frag[0x1]; | ||
| 313 | u8 reserved_1[0x4]; | ||
| 314 | u8 tcp_flags[0x9]; | ||
| 315 | |||
| 316 | u8 tcp_sport[0x10]; | ||
| 317 | u8 tcp_dport[0x10]; | ||
| 318 | |||
| 319 | u8 reserved_2[0x20]; | ||
| 320 | |||
| 321 | u8 udp_sport[0x10]; | ||
| 322 | u8 udp_dport[0x10]; | ||
| 323 | |||
| 324 | u8 src_ip[4][0x20]; | ||
| 325 | |||
| 326 | u8 dst_ip[4][0x20]; | ||
| 327 | }; | ||
| 328 | |||
| 329 | struct mlx5_ifc_fte_match_set_misc_bits { | ||
| 330 | u8 reserved_0[0x20]; | ||
| 331 | |||
| 332 | u8 reserved_1[0x10]; | ||
| 333 | u8 source_port[0x10]; | ||
| 334 | |||
| 335 | u8 outer_second_prio[0x3]; | ||
| 336 | u8 outer_second_cfi[0x1]; | ||
| 337 | u8 outer_second_vid[0xc]; | ||
| 338 | u8 inner_second_prio[0x3]; | ||
| 339 | u8 inner_second_cfi[0x1]; | ||
| 340 | u8 inner_second_vid[0xc]; | ||
| 341 | |||
| 342 | u8 outer_second_vlan_tag[0x1]; | ||
| 343 | u8 inner_second_vlan_tag[0x1]; | ||
| 344 | u8 reserved_2[0xe]; | ||
| 345 | u8 gre_protocol[0x10]; | ||
| 346 | |||
| 347 | u8 gre_key_h[0x18]; | ||
| 348 | u8 gre_key_l[0x8]; | ||
| 349 | |||
| 350 | u8 vxlan_vni[0x18]; | ||
| 351 | u8 reserved_3[0x8]; | ||
| 352 | |||
| 353 | u8 reserved_4[0x20]; | ||
| 354 | |||
| 355 | u8 reserved_5[0xc]; | ||
| 356 | u8 outer_ipv6_flow_label[0x14]; | ||
| 357 | |||
| 358 | u8 reserved_6[0xc]; | ||
| 359 | u8 inner_ipv6_flow_label[0x14]; | ||
| 360 | |||
| 361 | u8 reserved_7[0xe0]; | ||
| 362 | }; | ||
| 363 | |||
| 364 | struct mlx5_ifc_cmd_pas_bits { | ||
| 365 | u8 pa_h[0x20]; | ||
| 366 | |||
| 367 | u8 pa_l[0x14]; | ||
| 368 | u8 reserved_0[0xc]; | ||
| 369 | }; | ||
| 370 | |||
| 371 | struct mlx5_ifc_uint64_bits { | ||
| 372 | u8 hi[0x20]; | ||
| 373 | |||
| 374 | u8 lo[0x20]; | ||
| 375 | }; | ||
| 376 | |||
| 377 | enum { | ||
| 378 | MLX5_ADS_STAT_RATE_NO_LIMIT = 0x0, | ||
| 379 | MLX5_ADS_STAT_RATE_2_5GBPS = 0x7, | ||
| 380 | MLX5_ADS_STAT_RATE_10GBPS = 0x8, | ||
| 381 | MLX5_ADS_STAT_RATE_30GBPS = 0x9, | ||
| 382 | MLX5_ADS_STAT_RATE_5GBPS = 0xa, | ||
| 383 | MLX5_ADS_STAT_RATE_20GBPS = 0xb, | ||
| 384 | MLX5_ADS_STAT_RATE_40GBPS = 0xc, | ||
| 385 | MLX5_ADS_STAT_RATE_60GBPS = 0xd, | ||
| 386 | MLX5_ADS_STAT_RATE_80GBPS = 0xe, | ||
| 387 | MLX5_ADS_STAT_RATE_120GBPS = 0xf, | ||
| 388 | }; | ||
| 389 | |||
| 390 | struct mlx5_ifc_ads_bits { | ||
| 391 | u8 fl[0x1]; | ||
| 392 | u8 free_ar[0x1]; | ||
| 393 | u8 reserved_0[0xe]; | ||
| 394 | u8 pkey_index[0x10]; | ||
| 395 | |||
| 396 | u8 reserved_1[0x8]; | ||
| 397 | u8 grh[0x1]; | ||
| 398 | u8 mlid[0x7]; | ||
| 399 | u8 rlid[0x10]; | ||
| 400 | |||
| 401 | u8 ack_timeout[0x5]; | ||
| 402 | u8 reserved_2[0x3]; | ||
| 403 | u8 src_addr_index[0x8]; | ||
| 404 | u8 reserved_3[0x4]; | ||
| 405 | u8 stat_rate[0x4]; | ||
| 406 | u8 hop_limit[0x8]; | ||
| 407 | |||
| 408 | u8 reserved_4[0x4]; | ||
| 409 | u8 tclass[0x8]; | ||
| 410 | u8 flow_label[0x14]; | ||
| 411 | |||
| 412 | u8 rgid_rip[16][0x8]; | ||
| 413 | |||
| 414 | u8 reserved_5[0x4]; | ||
| 415 | u8 f_dscp[0x1]; | ||
| 416 | u8 f_ecn[0x1]; | ||
| 417 | u8 reserved_6[0x1]; | ||
| 418 | u8 f_eth_prio[0x1]; | ||
| 419 | u8 ecn[0x2]; | ||
| 420 | u8 dscp[0x6]; | ||
| 421 | u8 udp_sport[0x10]; | ||
| 422 | |||
| 423 | u8 dei_cfi[0x1]; | ||
| 424 | u8 eth_prio[0x3]; | ||
| 425 | u8 sl[0x4]; | ||
| 426 | u8 port[0x8]; | ||
| 427 | u8 rmac_47_32[0x10]; | ||
| 428 | |||
| 429 | u8 rmac_31_0[0x20]; | ||
| 430 | }; | ||
| 431 | |||
| 432 | struct mlx5_ifc_flow_table_nic_cap_bits { | ||
| 433 | u8 reserved_0[0x200]; | ||
| 434 | |||
| 435 | struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; | ||
| 436 | |||
| 437 | u8 reserved_1[0x200]; | ||
| 438 | |||
| 439 | struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer; | ||
| 440 | |||
| 441 | struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit; | ||
| 442 | |||
| 443 | u8 reserved_2[0x200]; | ||
| 444 | |||
| 445 | struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; | ||
| 446 | |||
| 447 | u8 reserved_3[0x7200]; | ||
| 448 | }; | ||
| 449 | |||
| 450 | struct mlx5_ifc_per_protocol_networking_offload_caps_bits { | ||
| 451 | u8 csum_cap[0x1]; | ||
| 452 | u8 vlan_cap[0x1]; | ||
| 453 | u8 lro_cap[0x1]; | ||
| 454 | u8 lro_psh_flag[0x1]; | ||
| 455 | u8 lro_time_stamp[0x1]; | ||
| 456 | u8 reserved_0[0x6]; | ||
| 457 | u8 max_lso_cap[0x5]; | ||
| 458 | u8 reserved_1[0x4]; | ||
| 459 | u8 rss_ind_tbl_cap[0x4]; | ||
| 460 | u8 reserved_2[0x3]; | ||
| 461 | u8 tunnel_lso_const_out_ip_id[0x1]; | ||
| 462 | u8 reserved_3[0x2]; | ||
| 463 | u8 tunnel_statless_gre[0x1]; | ||
| 464 | u8 tunnel_stateless_vxlan[0x1]; | ||
| 465 | |||
| 466 | u8 reserved_4[0x20]; | ||
| 467 | |||
| 468 | u8 reserved_5[0x10]; | ||
| 469 | u8 lro_min_mss_size[0x10]; | ||
| 470 | |||
| 471 | u8 reserved_6[0x120]; | ||
| 472 | |||
| 473 | u8 lro_timer_supported_periods[4][0x20]; | ||
| 474 | |||
| 475 | u8 reserved_7[0x600]; | ||
| 476 | }; | ||
| 477 | |||
| 478 | struct mlx5_ifc_roce_cap_bits { | ||
| 479 | u8 roce_apm[0x1]; | ||
| 480 | u8 reserved_0[0x1f]; | ||
| 481 | |||
| 482 | u8 reserved_1[0x60]; | ||
| 483 | |||
| 484 | u8 reserved_2[0xc]; | ||
| 485 | u8 l3_type[0x4]; | ||
| 486 | u8 reserved_3[0x8]; | ||
| 487 | u8 roce_version[0x8]; | ||
| 488 | |||
| 489 | u8 reserved_4[0x10]; | ||
| 490 | u8 r_roce_dest_udp_port[0x10]; | ||
| 491 | |||
| 492 | u8 r_roce_max_src_udp_port[0x10]; | ||
| 493 | u8 r_roce_min_src_udp_port[0x10]; | ||
| 494 | |||
| 495 | u8 reserved_5[0x10]; | ||
| 496 | u8 roce_address_table_size[0x10]; | ||
| 497 | |||
| 498 | u8 reserved_6[0x700]; | ||
| 499 | }; | ||
| 500 | |||
| 501 | enum { | ||
| 502 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0, | ||
| 503 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2, | ||
| 504 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES = 0x4, | ||
| 505 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES = 0x8, | ||
| 506 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES = 0x10, | ||
| 507 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES = 0x20, | ||
| 508 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES = 0x40, | ||
| 509 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES = 0x80, | ||
| 510 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES = 0x100, | ||
| 511 | }; | ||
| 512 | |||
| 513 | enum { | ||
| 514 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE = 0x1, | ||
| 515 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES = 0x2, | ||
| 516 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES = 0x4, | ||
| 517 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES = 0x8, | ||
| 518 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES = 0x10, | ||
| 519 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES = 0x20, | ||
| 520 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES = 0x40, | ||
| 521 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES = 0x80, | ||
| 522 | MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES = 0x100, | ||
| 523 | }; | ||
| 524 | |||
| 525 | struct mlx5_ifc_atomic_caps_bits { | ||
| 526 | u8 reserved_0[0x40]; | ||
| 527 | |||
| 528 | u8 atomic_req_endianness[0x1]; | ||
| 529 | u8 reserved_1[0x1f]; | ||
| 530 | |||
| 531 | u8 reserved_2[0x20]; | ||
| 532 | |||
| 533 | u8 reserved_3[0x10]; | ||
| 534 | u8 atomic_operations[0x10]; | ||
| 535 | |||
| 536 | u8 reserved_4[0x10]; | ||
| 537 | u8 atomic_size_qp[0x10]; | ||
| 538 | |||
| 539 | u8 reserved_5[0x10]; | ||
| 540 | u8 atomic_size_dc[0x10]; | ||
| 541 | |||
| 542 | u8 reserved_6[0x720]; | ||
| 543 | }; | ||
| 544 | |||
| 545 | struct mlx5_ifc_odp_cap_bits { | ||
| 546 | u8 reserved_0[0x40]; | ||
| 547 | |||
| 548 | u8 sig[0x1]; | ||
| 549 | u8 reserved_1[0x1f]; | ||
| 550 | |||
| 551 | u8 reserved_2[0x20]; | ||
| 552 | |||
| 553 | struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps; | ||
| 554 | |||
| 555 | struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps; | ||
| 556 | |||
| 557 | struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps; | ||
| 558 | |||
| 559 | u8 reserved_3[0x720]; | ||
| 560 | }; | ||
| 561 | |||
| 562 | enum { | ||
| 563 | MLX5_WQ_TYPE_LINKED_LIST = 0x0, | ||
| 564 | MLX5_WQ_TYPE_CYCLIC = 0x1, | ||
| 565 | MLX5_WQ_TYPE_STRQ = 0x2, | ||
| 566 | }; | ||
| 567 | |||
| 568 | enum { | ||
| 569 | MLX5_WQ_END_PAD_MODE_NONE = 0x0, | ||
| 570 | MLX5_WQ_END_PAD_MODE_ALIGN = 0x1, | ||
| 571 | }; | ||
| 572 | |||
| 573 | enum { | ||
| 574 | MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES = 0x0, | ||
| 575 | MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES = 0x1, | ||
| 576 | MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES = 0x2, | ||
| 577 | MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES = 0x3, | ||
| 578 | MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES = 0x4, | ||
| 579 | }; | ||
| 580 | |||
| 581 | enum { | ||
| 582 | MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES = 0x0, | ||
| 583 | MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES = 0x1, | ||
| 584 | MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES = 0x2, | ||
| 585 | MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES = 0x3, | ||
| 586 | MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES = 0x4, | ||
| 587 | MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES = 0x5, | ||
| 588 | }; | ||
| 589 | |||
| 590 | enum { | ||
| 591 | MLX5_CMD_HCA_CAP_PORT_TYPE_IB = 0x0, | ||
| 592 | MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET = 0x1, | ||
| 593 | }; | ||
| 594 | |||
| 595 | enum { | ||
| 596 | MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED = 0x0, | ||
| 597 | MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE = 0x1, | ||
| 598 | MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED = 0x3, | ||
| 599 | }; | ||
| 600 | |||
| 601 | enum { | ||
| 602 | MLX5_CAP_PORT_TYPE_IB = 0x0, | ||
| 603 | MLX5_CAP_PORT_TYPE_ETH = 0x1, | ||
| 141 | }; | 604 | }; |
| 142 | 605 | ||
| 143 | struct mlx5_ifc_cmd_hca_cap_bits { | 606 | struct mlx5_ifc_cmd_hca_cap_bits { |
| @@ -148,9 +611,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { | |||
| 148 | u8 reserved_1[0xb]; | 611 | u8 reserved_1[0xb]; |
| 149 | u8 log_max_qp[0x5]; | 612 | u8 log_max_qp[0x5]; |
| 150 | 613 | ||
| 151 | u8 log_max_strq_sz[0x8]; | 614 | u8 reserved_2[0xb]; |
| 152 | u8 reserved_2[0x3]; | 615 | u8 log_max_srq[0x5]; |
| 153 | u8 log_max_srqs[0x5]; | ||
| 154 | u8 reserved_3[0x10]; | 616 | u8 reserved_3[0x10]; |
| 155 | 617 | ||
| 156 | u8 reserved_4[0x8]; | 618 | u8 reserved_4[0x8]; |
| @@ -185,123 +647,2112 @@ struct mlx5_ifc_cmd_hca_cap_bits { | |||
| 185 | u8 pad_cap[0x1]; | 647 | u8 pad_cap[0x1]; |
| 186 | u8 cc_query_allowed[0x1]; | 648 | u8 cc_query_allowed[0x1]; |
| 187 | u8 cc_modify_allowed[0x1]; | 649 | u8 cc_modify_allowed[0x1]; |
| 188 | u8 reserved_15[0x1d]; | 650 | u8 reserved_15[0xd]; |
| 651 | u8 gid_table_size[0x10]; | ||
| 189 | 652 | ||
| 190 | u8 reserved_16[0x6]; | 653 | u8 out_of_seq_cnt[0x1]; |
| 654 | u8 vport_counters[0x1]; | ||
| 655 | u8 reserved_16[0x4]; | ||
| 191 | u8 max_qp_cnt[0xa]; | 656 | u8 max_qp_cnt[0xa]; |
| 192 | u8 pkey_table_size[0x10]; | 657 | u8 pkey_table_size[0x10]; |
| 193 | 658 | ||
| 194 | u8 eswitch_owner[0x1]; | 659 | u8 vport_group_manager[0x1]; |
| 195 | u8 reserved_17[0xa]; | 660 | u8 vhca_group_manager[0x1]; |
| 661 | u8 ib_virt[0x1]; | ||
| 662 | u8 eth_virt[0x1]; | ||
| 663 | u8 reserved_17[0x1]; | ||
| 664 | u8 ets[0x1]; | ||
| 665 | u8 nic_flow_table[0x1]; | ||
| 666 | u8 reserved_18[0x4]; | ||
| 196 | u8 local_ca_ack_delay[0x5]; | 667 | u8 local_ca_ack_delay[0x5]; |
| 197 | u8 reserved_18[0x8]; | 668 | u8 reserved_19[0x6]; |
| 669 | u8 port_type[0x2]; | ||
| 198 | u8 num_ports[0x8]; | 670 | u8 num_ports[0x8]; |
| 199 | 671 | ||
| 200 | u8 reserved_19[0x3]; | 672 | u8 reserved_20[0x3]; |
| 201 | u8 log_max_msg[0x5]; | 673 | u8 log_max_msg[0x5]; |
| 202 | u8 reserved_20[0x18]; | 674 | u8 reserved_21[0x18]; |
| 203 | 675 | ||
| 204 | u8 stat_rate_support[0x10]; | 676 | u8 stat_rate_support[0x10]; |
| 205 | u8 reserved_21[0x10]; | 677 | u8 reserved_22[0xc]; |
| 678 | u8 cqe_version[0x4]; | ||
| 206 | 679 | ||
| 207 | u8 reserved_22[0x10]; | 680 | u8 compact_address_vector[0x1]; |
| 681 | u8 reserved_23[0xe]; | ||
| 682 | u8 drain_sigerr[0x1]; | ||
| 208 | u8 cmdif_checksum[0x2]; | 683 | u8 cmdif_checksum[0x2]; |
| 209 | u8 sigerr_cqe[0x1]; | 684 | u8 sigerr_cqe[0x1]; |
| 210 | u8 reserved_23[0x1]; | 685 | u8 reserved_24[0x1]; |
| 211 | u8 wq_signature[0x1]; | 686 | u8 wq_signature[0x1]; |
| 212 | u8 sctr_data_cqe[0x1]; | 687 | u8 sctr_data_cqe[0x1]; |
| 213 | u8 reserved_24[0x1]; | 688 | u8 reserved_25[0x1]; |
| 214 | u8 sho[0x1]; | 689 | u8 sho[0x1]; |
| 215 | u8 tph[0x1]; | 690 | u8 tph[0x1]; |
| 216 | u8 rf[0x1]; | 691 | u8 rf[0x1]; |
| 217 | u8 dc[0x1]; | 692 | u8 dct[0x1]; |
| 218 | u8 reserved_25[0x2]; | 693 | u8 reserved_26[0x1]; |
| 694 | u8 eth_net_offloads[0x1]; | ||
| 219 | u8 roce[0x1]; | 695 | u8 roce[0x1]; |
| 220 | u8 atomic[0x1]; | 696 | u8 atomic[0x1]; |
| 221 | u8 rsz_srq[0x1]; | 697 | u8 reserved_27[0x1]; |
| 222 | 698 | ||
| 223 | u8 cq_oi[0x1]; | 699 | u8 cq_oi[0x1]; |
| 224 | u8 cq_resize[0x1]; | 700 | u8 cq_resize[0x1]; |
| 225 | u8 cq_moderation[0x1]; | 701 | u8 cq_moderation[0x1]; |
| 226 | u8 sniffer_rule_flow[0x1]; | 702 | u8 reserved_28[0x3]; |
| 227 | u8 sniffer_rule_vport[0x1]; | 703 | u8 cq_eq_remap[0x1]; |
| 228 | u8 sniffer_rule_phy[0x1]; | ||
| 229 | u8 reserved_26[0x1]; | ||
| 230 | u8 pg[0x1]; | 704 | u8 pg[0x1]; |
| 231 | u8 block_lb_mc[0x1]; | 705 | u8 block_lb_mc[0x1]; |
| 232 | u8 reserved_27[0x3]; | 706 | u8 reserved_29[0x1]; |
| 707 | u8 scqe_break_moderation[0x1]; | ||
| 708 | u8 reserved_30[0x1]; | ||
| 233 | u8 cd[0x1]; | 709 | u8 cd[0x1]; |
| 234 | u8 reserved_28[0x1]; | 710 | u8 reserved_31[0x1]; |
| 235 | u8 apm[0x1]; | 711 | u8 apm[0x1]; |
| 236 | u8 reserved_29[0x7]; | 712 | u8 reserved_32[0x7]; |
| 237 | u8 qkv[0x1]; | 713 | u8 qkv[0x1]; |
| 238 | u8 pkv[0x1]; | 714 | u8 pkv[0x1]; |
| 239 | u8 reserved_30[0x4]; | 715 | u8 reserved_33[0x4]; |
| 240 | u8 xrc[0x1]; | 716 | u8 xrc[0x1]; |
| 241 | u8 ud[0x1]; | 717 | u8 ud[0x1]; |
| 242 | u8 uc[0x1]; | 718 | u8 uc[0x1]; |
| 243 | u8 rc[0x1]; | 719 | u8 rc[0x1]; |
| 244 | 720 | ||
| 245 | u8 reserved_31[0xa]; | 721 | u8 reserved_34[0xa]; |
| 246 | u8 uar_sz[0x6]; | 722 | u8 uar_sz[0x6]; |
| 247 | u8 reserved_32[0x8]; | 723 | u8 reserved_35[0x8]; |
| 248 | u8 log_pg_sz[0x8]; | 724 | u8 log_pg_sz[0x8]; |
| 249 | 725 | ||
| 250 | u8 bf[0x1]; | 726 | u8 bf[0x1]; |
| 251 | u8 reserved_33[0xa]; | 727 | u8 reserved_36[0x1]; |
| 728 | u8 pad_tx_eth_packet[0x1]; | ||
| 729 | u8 reserved_37[0x8]; | ||
| 252 | u8 log_bf_reg_size[0x5]; | 730 | u8 log_bf_reg_size[0x5]; |
| 253 | u8 reserved_34[0x10]; | 731 | u8 reserved_38[0x10]; |
| 254 | 732 | ||
| 255 | u8 reserved_35[0x10]; | 733 | u8 reserved_39[0x10]; |
| 256 | u8 max_wqe_sz_sq[0x10]; | 734 | u8 max_wqe_sz_sq[0x10]; |
| 257 | 735 | ||
| 258 | u8 reserved_36[0x10]; | 736 | u8 reserved_40[0x10]; |
| 259 | u8 max_wqe_sz_rq[0x10]; | 737 | u8 max_wqe_sz_rq[0x10]; |
| 260 | 738 | ||
| 261 | u8 reserved_37[0x10]; | 739 | u8 reserved_41[0x10]; |
| 262 | u8 max_wqe_sz_sq_dc[0x10]; | 740 | u8 max_wqe_sz_sq_dc[0x10]; |
| 263 | 741 | ||
| 264 | u8 reserved_38[0x7]; | 742 | u8 reserved_42[0x7]; |
| 265 | u8 max_qp_mcg[0x19]; | 743 | u8 max_qp_mcg[0x19]; |
| 266 | 744 | ||
| 267 | u8 reserved_39[0x18]; | 745 | u8 reserved_43[0x18]; |
| 268 | u8 log_max_mcg[0x8]; | 746 | u8 log_max_mcg[0x8]; |
| 269 | 747 | ||
| 270 | u8 reserved_40[0xb]; | 748 | u8 reserved_44[0x3]; |
| 749 | u8 log_max_transport_domain[0x5]; | ||
| 750 | u8 reserved_45[0x3]; | ||
| 271 | u8 log_max_pd[0x5]; | 751 | u8 log_max_pd[0x5]; |
| 272 | u8 reserved_41[0xb]; | 752 | u8 reserved_46[0xb]; |
| 273 | u8 log_max_xrcd[0x5]; | 753 | u8 log_max_xrcd[0x5]; |
| 274 | 754 | ||
| 275 | u8 reserved_42[0x20]; | 755 | u8 reserved_47[0x20]; |
| 276 | 756 | ||
| 277 | u8 reserved_43[0x3]; | 757 | u8 reserved_48[0x3]; |
| 278 | u8 log_max_rq[0x5]; | 758 | u8 log_max_rq[0x5]; |
| 279 | u8 reserved_44[0x3]; | 759 | u8 reserved_49[0x3]; |
| 280 | u8 log_max_sq[0x5]; | 760 | u8 log_max_sq[0x5]; |
| 281 | u8 reserved_45[0x3]; | 761 | u8 reserved_50[0x3]; |
| 282 | u8 log_max_tir[0x5]; | 762 | u8 log_max_tir[0x5]; |
| 283 | u8 reserved_46[0x3]; | 763 | u8 reserved_51[0x3]; |
| 284 | u8 log_max_tis[0x5]; | 764 | u8 log_max_tis[0x5]; |
| 285 | 765 | ||
| 286 | u8 reserved_47[0x13]; | 766 | u8 basic_cyclic_rcv_wqe[0x1]; |
| 287 | u8 log_max_rq_per_tir[0x5]; | 767 | u8 reserved_52[0x2]; |
| 288 | u8 reserved_48[0x3]; | 768 | u8 log_max_rmp[0x5]; |
| 769 | u8 reserved_53[0x3]; | ||
| 770 | u8 log_max_rqt[0x5]; | ||
| 771 | u8 reserved_54[0x3]; | ||
| 772 | u8 log_max_rqt_size[0x5]; | ||
| 773 | u8 reserved_55[0x3]; | ||
| 289 | u8 log_max_tis_per_sq[0x5]; | 774 | u8 log_max_tis_per_sq[0x5]; |
| 290 | 775 | ||
| 291 | u8 reserved_49[0xe0]; | 776 | u8 reserved_56[0x3]; |
| 777 | u8 log_max_stride_sz_rq[0x5]; | ||
| 778 | u8 reserved_57[0x3]; | ||
| 779 | u8 log_min_stride_sz_rq[0x5]; | ||
| 780 | u8 reserved_58[0x3]; | ||
| 781 | u8 log_max_stride_sz_sq[0x5]; | ||
| 782 | u8 reserved_59[0x3]; | ||
| 783 | u8 log_min_stride_sz_sq[0x5]; | ||
| 292 | 784 | ||
| 293 | u8 reserved_50[0x10]; | 785 | u8 reserved_60[0x1b]; |
| 786 | u8 log_max_wq_sz[0x5]; | ||
| 787 | |||
| 788 | u8 reserved_61[0xa0]; | ||
| 789 | |||
| 790 | u8 reserved_62[0x3]; | ||
| 791 | u8 log_max_l2_table[0x5]; | ||
| 792 | u8 reserved_63[0x8]; | ||
| 294 | u8 log_uar_page_sz[0x10]; | 793 | u8 log_uar_page_sz[0x10]; |
| 295 | 794 | ||
| 296 | u8 reserved_51[0x100]; | 795 | u8 reserved_64[0x100]; |
| 297 | 796 | ||
| 298 | u8 reserved_52[0x1f]; | 797 | u8 reserved_65[0x1f]; |
| 299 | u8 cqe_zip[0x1]; | 798 | u8 cqe_zip[0x1]; |
| 300 | 799 | ||
| 301 | u8 cqe_zip_timeout[0x10]; | 800 | u8 cqe_zip_timeout[0x10]; |
| 302 | u8 cqe_zip_max_num[0x10]; | 801 | u8 cqe_zip_max_num[0x10]; |
| 303 | 802 | ||
| 304 | u8 reserved_53[0x220]; | 803 | u8 reserved_66[0x220]; |
| 804 | }; | ||
| 805 | |||
| 806 | enum { | ||
| 807 | MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_ = 0x1, | ||
| 808 | MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR = 0x2, | ||
| 809 | }; | ||
| 810 | |||
| 811 | struct mlx5_ifc_dest_format_struct_bits { | ||
| 812 | u8 destination_type[0x8]; | ||
| 813 | u8 destination_id[0x18]; | ||
| 814 | |||
| 815 | u8 reserved_0[0x20]; | ||
| 816 | }; | ||
| 817 | |||
| 818 | struct mlx5_ifc_fte_match_param_bits { | ||
| 819 | struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers; | ||
| 820 | |||
| 821 | struct mlx5_ifc_fte_match_set_misc_bits misc_parameters; | ||
| 822 | |||
| 823 | struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers; | ||
| 824 | |||
| 825 | u8 reserved_0[0xa00]; | ||
| 826 | }; | ||
| 827 | |||
| 828 | enum { | ||
| 829 | MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0, | ||
| 830 | MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1, | ||
| 831 | MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2, | ||
| 832 | MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3, | ||
| 833 | MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4, | ||
| 834 | }; | ||
| 835 | |||
| 836 | struct mlx5_ifc_rx_hash_field_select_bits { | ||
| 837 | u8 l3_prot_type[0x1]; | ||
| 838 | u8 l4_prot_type[0x1]; | ||
| 839 | u8 selected_fields[0x1e]; | ||
| 840 | }; | ||
| 841 | |||
| 842 | enum { | ||
| 843 | MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST = 0x0, | ||
| 844 | MLX5_WQ_WQ_TYPE_WQ_CYCLIC = 0x1, | ||
| 845 | }; | ||
| 846 | |||
| 847 | enum { | ||
| 848 | MLX5_WQ_END_PADDING_MODE_END_PAD_NONE = 0x0, | ||
| 849 | MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN = 0x1, | ||
| 850 | }; | ||
| 851 | |||
| 852 | struct mlx5_ifc_wq_bits { | ||
| 853 | u8 wq_type[0x4]; | ||
| 854 | u8 wq_signature[0x1]; | ||
| 855 | u8 end_padding_mode[0x2]; | ||
| 856 | u8 cd_slave[0x1]; | ||
| 857 | u8 reserved_0[0x18]; | ||
| 858 | |||
| 859 | u8 hds_skip_first_sge[0x1]; | ||
| 860 | u8 log2_hds_buf_size[0x3]; | ||
| 861 | u8 reserved_1[0x7]; | ||
| 862 | u8 page_offset[0x5]; | ||
| 863 | u8 lwm[0x10]; | ||
| 864 | |||
| 865 | u8 reserved_2[0x8]; | ||
| 866 | u8 pd[0x18]; | ||
| 867 | |||
| 868 | u8 reserved_3[0x8]; | ||
| 869 | u8 uar_page[0x18]; | ||
| 870 | |||
| 871 | u8 dbr_addr[0x40]; | ||
| 872 | |||
| 873 | u8 hw_counter[0x20]; | ||
| 874 | |||
| 875 | u8 sw_counter[0x20]; | ||
| 876 | |||
| 877 | u8 reserved_4[0xc]; | ||
| 878 | u8 log_wq_stride[0x4]; | ||
| 879 | u8 reserved_5[0x3]; | ||
| 880 | u8 log_wq_pg_sz[0x5]; | ||
| 881 | u8 reserved_6[0x3]; | ||
| 882 | u8 log_wq_sz[0x5]; | ||
| 883 | |||
| 884 | u8 reserved_7[0x4e0]; | ||
| 885 | |||
| 886 | struct mlx5_ifc_cmd_pas_bits pas[0]; | ||
| 887 | }; | ||
| 888 | |||
| 889 | struct mlx5_ifc_rq_num_bits { | ||
| 890 | u8 reserved_0[0x8]; | ||
| 891 | u8 rq_num[0x18]; | ||
| 892 | }; | ||
| 893 | |||
| 894 | struct mlx5_ifc_mac_address_layout_bits { | ||
| 895 | u8 reserved_0[0x10]; | ||
| 896 | u8 mac_addr_47_32[0x10]; | ||
| 897 | |||
| 898 | u8 mac_addr_31_0[0x20]; | ||
| 899 | }; | ||
| 900 | |||
| 901 | struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { | ||
| 902 | u8 reserved_0[0xa0]; | ||
| 903 | |||
| 904 | u8 min_time_between_cnps[0x20]; | ||
| 905 | |||
| 906 | u8 reserved_1[0x12]; | ||
| 907 | u8 cnp_dscp[0x6]; | ||
| 908 | u8 reserved_2[0x5]; | ||
| 909 | u8 cnp_802p_prio[0x3]; | ||
| 910 | |||
| 911 | u8 reserved_3[0x720]; | ||
| 912 | }; | ||
| 913 | |||
| 914 | struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits { | ||
| 915 | u8 reserved_0[0x60]; | ||
| 916 | |||
| 917 | u8 reserved_1[0x4]; | ||
| 918 | u8 clamp_tgt_rate[0x1]; | ||
| 919 | u8 reserved_2[0x3]; | ||
| 920 | u8 clamp_tgt_rate_after_time_inc[0x1]; | ||
| 921 | u8 reserved_3[0x17]; | ||
| 922 | |||
| 923 | u8 reserved_4[0x20]; | ||
| 924 | |||
| 925 | u8 rpg_time_reset[0x20]; | ||
| 926 | |||
| 927 | u8 rpg_byte_reset[0x20]; | ||
| 928 | |||
| 929 | u8 rpg_threshold[0x20]; | ||
| 930 | |||
| 931 | u8 rpg_max_rate[0x20]; | ||
| 932 | |||
| 933 | u8 rpg_ai_rate[0x20]; | ||
| 934 | |||
| 935 | u8 rpg_hai_rate[0x20]; | ||
| 936 | |||
| 937 | u8 rpg_gd[0x20]; | ||
| 938 | |||
| 939 | u8 rpg_min_dec_fac[0x20]; | ||
| 940 | |||
| 941 | u8 rpg_min_rate[0x20]; | ||
| 942 | |||
| 943 | u8 reserved_5[0xe0]; | ||
| 944 | |||
| 945 | u8 rate_to_set_on_first_cnp[0x20]; | ||
| 946 | |||
| 947 | u8 dce_tcp_g[0x20]; | ||
| 948 | |||
| 949 | u8 dce_tcp_rtt[0x20]; | ||
| 950 | |||
| 951 | u8 rate_reduce_monitor_period[0x20]; | ||
| 952 | |||
| 953 | u8 reserved_6[0x20]; | ||
| 954 | |||
| 955 | u8 initial_alpha_value[0x20]; | ||
| 956 | |||
| 957 | u8 reserved_7[0x4a0]; | ||
| 958 | }; | ||
| 959 | |||
| 960 | struct mlx5_ifc_cong_control_802_1qau_rp_bits { | ||
| 961 | u8 reserved_0[0x80]; | ||
| 962 | |||
| 963 | u8 rppp_max_rps[0x20]; | ||
| 964 | |||
| 965 | u8 rpg_time_reset[0x20]; | ||
| 966 | |||
| 967 | u8 rpg_byte_reset[0x20]; | ||
| 968 | |||
| 969 | u8 rpg_threshold[0x20]; | ||
| 970 | |||
| 971 | u8 rpg_max_rate[0x20]; | ||
| 972 | |||
| 973 | u8 rpg_ai_rate[0x20]; | ||
| 974 | |||
| 975 | u8 rpg_hai_rate[0x20]; | ||
| 976 | |||
| 977 | u8 rpg_gd[0x20]; | ||
| 978 | |||
| 979 | u8 rpg_min_dec_fac[0x20]; | ||
| 980 | |||
| 981 | u8 rpg_min_rate[0x20]; | ||
| 982 | |||
| 983 | u8 reserved_1[0x640]; | ||
| 984 | }; | ||
| 985 | |||
| 986 | enum { | ||
| 987 | MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE = 0x1, | ||
| 988 | MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET = 0x2, | ||
| 989 | MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE = 0x4, | ||
| 990 | }; | ||
| 991 | |||
| 992 | struct mlx5_ifc_resize_field_select_bits { | ||
| 993 | u8 resize_field_select[0x20]; | ||
| 994 | }; | ||
| 995 | |||
| 996 | enum { | ||
| 997 | MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1, | ||
| 998 | MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2, | ||
| 999 | MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI = 0x4, | ||
| 1000 | MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN = 0x8, | ||
| 1001 | }; | ||
| 1002 | |||
| 1003 | struct mlx5_ifc_modify_field_select_bits { | ||
| 1004 | u8 modify_field_select[0x20]; | ||
| 1005 | }; | ||
| 1006 | |||
| 1007 | struct mlx5_ifc_field_select_r_roce_np_bits { | ||
| 1008 | u8 field_select_r_roce_np[0x20]; | ||
| 1009 | }; | ||
| 1010 | |||
| 1011 | struct mlx5_ifc_field_select_r_roce_rp_bits { | ||
| 1012 | u8 field_select_r_roce_rp[0x20]; | ||
| 1013 | }; | ||
| 1014 | |||
| 1015 | enum { | ||
| 1016 | MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS = 0x4, | ||
| 1017 | MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET = 0x8, | ||
| 1018 | MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET = 0x10, | ||
| 1019 | MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD = 0x20, | ||
| 1020 | MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE = 0x40, | ||
| 1021 | MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE = 0x80, | ||
| 1022 | MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE = 0x100, | ||
| 1023 | MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD = 0x200, | ||
| 1024 | MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC = 0x400, | ||
| 1025 | MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE = 0x800, | ||
| 1026 | }; | ||
| 1027 | |||
| 1028 | struct mlx5_ifc_field_select_802_1qau_rp_bits { | ||
| 1029 | u8 field_select_8021qaurp[0x20]; | ||
| 1030 | }; | ||
| 1031 | |||
| 1032 | struct mlx5_ifc_phys_layer_cntrs_bits { | ||
| 1033 | u8 time_since_last_clear_high[0x20]; | ||
| 1034 | |||
| 1035 | u8 time_since_last_clear_low[0x20]; | ||
| 1036 | |||
| 1037 | u8 symbol_errors_high[0x20]; | ||
| 1038 | |||
| 1039 | u8 symbol_errors_low[0x20]; | ||
| 1040 | |||
| 1041 | u8 sync_headers_errors_high[0x20]; | ||
| 1042 | |||
| 1043 | u8 sync_headers_errors_low[0x20]; | ||
| 1044 | |||
| 1045 | u8 edpl_bip_errors_lane0_high[0x20]; | ||
| 1046 | |||
| 1047 | u8 edpl_bip_errors_lane0_low[0x20]; | ||
| 1048 | |||
| 1049 | u8 edpl_bip_errors_lane1_high[0x20]; | ||
| 1050 | |||
| 1051 | u8 edpl_bip_errors_lane1_low[0x20]; | ||
| 1052 | |||
| 1053 | u8 edpl_bip_errors_lane2_high[0x20]; | ||
| 1054 | |||
| 1055 | u8 edpl_bip_errors_lane2_low[0x20]; | ||
| 1056 | |||
| 1057 | u8 edpl_bip_errors_lane3_high[0x20]; | ||
| 1058 | |||
| 1059 | u8 edpl_bip_errors_lane3_low[0x20]; | ||
| 1060 | |||
| 1061 | u8 fc_fec_corrected_blocks_lane0_high[0x20]; | ||
| 1062 | |||
| 1063 | u8 fc_fec_corrected_blocks_lane0_low[0x20]; | ||
| 1064 | |||
| 1065 | u8 fc_fec_corrected_blocks_lane1_high[0x20]; | ||
| 1066 | |||
| 1067 | u8 fc_fec_corrected_blocks_lane1_low[0x20]; | ||
| 1068 | |||
| 1069 | u8 fc_fec_corrected_blocks_lane2_high[0x20]; | ||
| 1070 | |||
| 1071 | u8 fc_fec_corrected_blocks_lane2_low[0x20]; | ||
| 1072 | |||
| 1073 | u8 fc_fec_corrected_blocks_lane3_high[0x20]; | ||
| 1074 | |||
| 1075 | u8 fc_fec_corrected_blocks_lane3_low[0x20]; | ||
| 1076 | |||
| 1077 | u8 fc_fec_uncorrectable_blocks_lane0_high[0x20]; | ||
| 1078 | |||
| 1079 | u8 fc_fec_uncorrectable_blocks_lane0_low[0x20]; | ||
| 1080 | |||
| 1081 | u8 fc_fec_uncorrectable_blocks_lane1_high[0x20]; | ||
| 1082 | |||
| 1083 | u8 fc_fec_uncorrectable_blocks_lane1_low[0x20]; | ||
| 1084 | |||
| 1085 | u8 fc_fec_uncorrectable_blocks_lane2_high[0x20]; | ||
| 1086 | |||
| 1087 | u8 fc_fec_uncorrectable_blocks_lane2_low[0x20]; | ||
| 1088 | |||
| 1089 | u8 fc_fec_uncorrectable_blocks_lane3_high[0x20]; | ||
| 1090 | |||
| 1091 | u8 fc_fec_uncorrectable_blocks_lane3_low[0x20]; | ||
| 1092 | |||
| 1093 | u8 rs_fec_corrected_blocks_high[0x20]; | ||
| 1094 | |||
| 1095 | u8 rs_fec_corrected_blocks_low[0x20]; | ||
| 1096 | |||
| 1097 | u8 rs_fec_uncorrectable_blocks_high[0x20]; | ||
| 1098 | |||
| 1099 | u8 rs_fec_uncorrectable_blocks_low[0x20]; | ||
| 1100 | |||
| 1101 | u8 rs_fec_no_errors_blocks_high[0x20]; | ||
| 1102 | |||
| 1103 | u8 rs_fec_no_errors_blocks_low[0x20]; | ||
| 1104 | |||
| 1105 | u8 rs_fec_single_error_blocks_high[0x20]; | ||
| 1106 | |||
| 1107 | u8 rs_fec_single_error_blocks_low[0x20]; | ||
| 1108 | |||
| 1109 | u8 rs_fec_corrected_symbols_total_high[0x20]; | ||
| 1110 | |||
| 1111 | u8 rs_fec_corrected_symbols_total_low[0x20]; | ||
| 1112 | |||
| 1113 | u8 rs_fec_corrected_symbols_lane0_high[0x20]; | ||
| 1114 | |||
| 1115 | u8 rs_fec_corrected_symbols_lane0_low[0x20]; | ||
| 1116 | |||
| 1117 | u8 rs_fec_corrected_symbols_lane1_high[0x20]; | ||
| 1118 | |||
| 1119 | u8 rs_fec_corrected_symbols_lane1_low[0x20]; | ||
| 1120 | |||
| 1121 | u8 rs_fec_corrected_symbols_lane2_high[0x20]; | ||
| 1122 | |||
| 1123 | u8 rs_fec_corrected_symbols_lane2_low[0x20]; | ||
| 1124 | |||
| 1125 | u8 rs_fec_corrected_symbols_lane3_high[0x20]; | ||
| 1126 | |||
| 1127 | u8 rs_fec_corrected_symbols_lane3_low[0x20]; | ||
| 1128 | |||
| 1129 | u8 link_down_events[0x20]; | ||
| 1130 | |||
| 1131 | u8 successful_recovery_events[0x20]; | ||
| 1132 | |||
| 1133 | u8 reserved_0[0x180]; | ||
| 1134 | }; | ||
| 1135 | |||
| 1136 | struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits { | ||
| 1137 | u8 transmit_queue_high[0x20]; | ||
| 1138 | |||
| 1139 | u8 transmit_queue_low[0x20]; | ||
| 1140 | |||
| 1141 | u8 reserved_0[0x780]; | ||
| 1142 | }; | ||
| 1143 | |||
| 1144 | struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { | ||
| 1145 | u8 rx_octets_high[0x20]; | ||
| 1146 | |||
| 1147 | u8 rx_octets_low[0x20]; | ||
| 1148 | |||
| 1149 | u8 reserved_0[0xc0]; | ||
| 1150 | |||
| 1151 | u8 rx_frames_high[0x20]; | ||
| 1152 | |||
| 1153 | u8 rx_frames_low[0x20]; | ||
| 1154 | |||
| 1155 | u8 tx_octets_high[0x20]; | ||
| 1156 | |||
| 1157 | u8 tx_octets_low[0x20]; | ||
| 1158 | |||
| 1159 | u8 reserved_1[0xc0]; | ||
| 1160 | |||
| 1161 | u8 tx_frames_high[0x20]; | ||
| 1162 | |||
| 1163 | u8 tx_frames_low[0x20]; | ||
| 1164 | |||
| 1165 | u8 rx_pause_high[0x20]; | ||
| 1166 | |||
| 1167 | u8 rx_pause_low[0x20]; | ||
| 1168 | |||
| 1169 | u8 rx_pause_duration_high[0x20]; | ||
| 1170 | |||
| 1171 | u8 rx_pause_duration_low[0x20]; | ||
| 1172 | |||
| 1173 | u8 tx_pause_high[0x20]; | ||
| 1174 | |||
| 1175 | u8 tx_pause_low[0x20]; | ||
| 1176 | |||
| 1177 | u8 tx_pause_duration_high[0x20]; | ||
| 1178 | |||
| 1179 | u8 tx_pause_duration_low[0x20]; | ||
| 1180 | |||
| 1181 | u8 rx_pause_transition_high[0x20]; | ||
| 1182 | |||
| 1183 | u8 rx_pause_transition_low[0x20]; | ||
| 1184 | |||
| 1185 | u8 reserved_2[0x400]; | ||
| 1186 | }; | ||
| 1187 | |||
| 1188 | struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { | ||
| 1189 | u8 port_transmit_wait_high[0x20]; | ||
| 1190 | |||
| 1191 | u8 port_transmit_wait_low[0x20]; | ||
| 1192 | |||
| 1193 | u8 reserved_0[0x780]; | ||
| 1194 | }; | ||
| 1195 | |||
| 1196 | struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { | ||
| 1197 | u8 dot3stats_alignment_errors_high[0x20]; | ||
| 1198 | |||
| 1199 | u8 dot3stats_alignment_errors_low[0x20]; | ||
| 1200 | |||
| 1201 | u8 dot3stats_fcs_errors_high[0x20]; | ||
| 1202 | |||
| 1203 | u8 dot3stats_fcs_errors_low[0x20]; | ||
| 1204 | |||
| 1205 | u8 dot3stats_single_collision_frames_high[0x20]; | ||
| 1206 | |||
| 1207 | u8 dot3stats_single_collision_frames_low[0x20]; | ||
| 1208 | |||
| 1209 | u8 dot3stats_multiple_collision_frames_high[0x20]; | ||
| 1210 | |||
| 1211 | u8 dot3stats_multiple_collision_frames_low[0x20]; | ||
| 1212 | |||
| 1213 | u8 dot3stats_sqe_test_errors_high[0x20]; | ||
| 1214 | |||
| 1215 | u8 dot3stats_sqe_test_errors_low[0x20]; | ||
| 1216 | |||
| 1217 | u8 dot3stats_deferred_transmissions_high[0x20]; | ||
| 1218 | |||
| 1219 | u8 dot3stats_deferred_transmissions_low[0x20]; | ||
| 1220 | |||
| 1221 | u8 dot3stats_late_collisions_high[0x20]; | ||
| 1222 | |||
| 1223 | u8 dot3stats_late_collisions_low[0x20]; | ||
| 1224 | |||
| 1225 | u8 dot3stats_excessive_collisions_high[0x20]; | ||
| 1226 | |||
| 1227 | u8 dot3stats_excessive_collisions_low[0x20]; | ||
| 1228 | |||
| 1229 | u8 dot3stats_internal_mac_transmit_errors_high[0x20]; | ||
| 1230 | |||
| 1231 | u8 dot3stats_internal_mac_transmit_errors_low[0x20]; | ||
| 1232 | |||
| 1233 | u8 dot3stats_carrier_sense_errors_high[0x20]; | ||
| 1234 | |||
| 1235 | u8 dot3stats_carrier_sense_errors_low[0x20]; | ||
| 1236 | |||
| 1237 | u8 dot3stats_frame_too_longs_high[0x20]; | ||
| 1238 | |||
| 1239 | u8 dot3stats_frame_too_longs_low[0x20]; | ||
| 1240 | |||
| 1241 | u8 dot3stats_internal_mac_receive_errors_high[0x20]; | ||
| 1242 | |||
| 1243 | u8 dot3stats_internal_mac_receive_errors_low[0x20]; | ||
| 1244 | |||
| 1245 | u8 dot3stats_symbol_errors_high[0x20]; | ||
| 1246 | |||
| 1247 | u8 dot3stats_symbol_errors_low[0x20]; | ||
| 1248 | |||
| 1249 | u8 dot3control_in_unknown_opcodes_high[0x20]; | ||
| 1250 | |||
| 1251 | u8 dot3control_in_unknown_opcodes_low[0x20]; | ||
| 1252 | |||
| 1253 | u8 dot3in_pause_frames_high[0x20]; | ||
| 1254 | |||
| 1255 | u8 dot3in_pause_frames_low[0x20]; | ||
| 1256 | |||
| 1257 | u8 dot3out_pause_frames_high[0x20]; | ||
| 1258 | |||
| 1259 | u8 dot3out_pause_frames_low[0x20]; | ||
| 1260 | |||
| 1261 | u8 reserved_0[0x3c0]; | ||
| 1262 | }; | ||
| 1263 | |||
| 1264 | struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits { | ||
| 1265 | u8 ether_stats_drop_events_high[0x20]; | ||
| 1266 | |||
| 1267 | u8 ether_stats_drop_events_low[0x20]; | ||
| 1268 | |||
| 1269 | u8 ether_stats_octets_high[0x20]; | ||
| 1270 | |||
| 1271 | u8 ether_stats_octets_low[0x20]; | ||
| 1272 | |||
| 1273 | u8 ether_stats_pkts_high[0x20]; | ||
| 1274 | |||
| 1275 | u8 ether_stats_pkts_low[0x20]; | ||
| 1276 | |||
| 1277 | u8 ether_stats_broadcast_pkts_high[0x20]; | ||
| 1278 | |||
| 1279 | u8 ether_stats_broadcast_pkts_low[0x20]; | ||
| 1280 | |||
| 1281 | u8 ether_stats_multicast_pkts_high[0x20]; | ||
| 1282 | |||
| 1283 | u8 ether_stats_multicast_pkts_low[0x20]; | ||
| 1284 | |||
| 1285 | u8 ether_stats_crc_align_errors_high[0x20]; | ||
| 1286 | |||
| 1287 | u8 ether_stats_crc_align_errors_low[0x20]; | ||
| 1288 | |||
| 1289 | u8 ether_stats_undersize_pkts_high[0x20]; | ||
| 1290 | |||
| 1291 | u8 ether_stats_undersize_pkts_low[0x20]; | ||
| 1292 | |||
| 1293 | u8 ether_stats_oversize_pkts_high[0x20]; | ||
| 1294 | |||
| 1295 | u8 ether_stats_oversize_pkts_low[0x20]; | ||
| 1296 | |||
| 1297 | u8 ether_stats_fragments_high[0x20]; | ||
| 1298 | |||
| 1299 | u8 ether_stats_fragments_low[0x20]; | ||
| 1300 | |||
| 1301 | u8 ether_stats_jabbers_high[0x20]; | ||
| 1302 | |||
| 1303 | u8 ether_stats_jabbers_low[0x20]; | ||
| 1304 | |||
| 1305 | u8 ether_stats_collisions_high[0x20]; | ||
| 1306 | |||
| 1307 | u8 ether_stats_collisions_low[0x20]; | ||
| 1308 | |||
| 1309 | u8 ether_stats_pkts64octets_high[0x20]; | ||
| 1310 | |||
| 1311 | u8 ether_stats_pkts64octets_low[0x20]; | ||
| 1312 | |||
| 1313 | u8 ether_stats_pkts65to127octets_high[0x20]; | ||
| 1314 | |||
| 1315 | u8 ether_stats_pkts65to127octets_low[0x20]; | ||
| 1316 | |||
| 1317 | u8 ether_stats_pkts128to255octets_high[0x20]; | ||
| 1318 | |||
| 1319 | u8 ether_stats_pkts128to255octets_low[0x20]; | ||
| 1320 | |||
| 1321 | u8 ether_stats_pkts256to511octets_high[0x20]; | ||
| 1322 | |||
| 1323 | u8 ether_stats_pkts256to511octets_low[0x20]; | ||
| 1324 | |||
| 1325 | u8 ether_stats_pkts512to1023octets_high[0x20]; | ||
| 1326 | |||
| 1327 | u8 ether_stats_pkts512to1023octets_low[0x20]; | ||
| 1328 | |||
| 1329 | u8 ether_stats_pkts1024to1518octets_high[0x20]; | ||
| 1330 | |||
| 1331 | u8 ether_stats_pkts1024to1518octets_low[0x20]; | ||
| 1332 | |||
| 1333 | u8 ether_stats_pkts1519to2047octets_high[0x20]; | ||
| 1334 | |||
| 1335 | u8 ether_stats_pkts1519to2047octets_low[0x20]; | ||
| 1336 | |||
| 1337 | u8 ether_stats_pkts2048to4095octets_high[0x20]; | ||
| 1338 | |||
| 1339 | u8 ether_stats_pkts2048to4095octets_low[0x20]; | ||
| 1340 | |||
| 1341 | u8 ether_stats_pkts4096to8191octets_high[0x20]; | ||
| 1342 | |||
| 1343 | u8 ether_stats_pkts4096to8191octets_low[0x20]; | ||
| 1344 | |||
| 1345 | u8 ether_stats_pkts8192to10239octets_high[0x20]; | ||
| 1346 | |||
| 1347 | u8 ether_stats_pkts8192to10239octets_low[0x20]; | ||
| 1348 | |||
| 1349 | u8 reserved_0[0x280]; | ||
| 1350 | }; | ||
| 1351 | |||
| 1352 | struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits { | ||
| 1353 | u8 if_in_octets_high[0x20]; | ||
| 1354 | |||
| 1355 | u8 if_in_octets_low[0x20]; | ||
| 1356 | |||
| 1357 | u8 if_in_ucast_pkts_high[0x20]; | ||
| 1358 | |||
| 1359 | u8 if_in_ucast_pkts_low[0x20]; | ||
| 1360 | |||
| 1361 | u8 if_in_discards_high[0x20]; | ||
| 1362 | |||
| 1363 | u8 if_in_discards_low[0x20]; | ||
| 1364 | |||
| 1365 | u8 if_in_errors_high[0x20]; | ||
| 1366 | |||
| 1367 | u8 if_in_errors_low[0x20]; | ||
| 1368 | |||
| 1369 | u8 if_in_unknown_protos_high[0x20]; | ||
| 1370 | |||
| 1371 | u8 if_in_unknown_protos_low[0x20]; | ||
| 1372 | |||
| 1373 | u8 if_out_octets_high[0x20]; | ||
| 1374 | |||
| 1375 | u8 if_out_octets_low[0x20]; | ||
| 1376 | |||
| 1377 | u8 if_out_ucast_pkts_high[0x20]; | ||
| 1378 | |||
| 1379 | u8 if_out_ucast_pkts_low[0x20]; | ||
| 1380 | |||
| 1381 | u8 if_out_discards_high[0x20]; | ||
| 1382 | |||
| 1383 | u8 if_out_discards_low[0x20]; | ||
| 1384 | |||
| 1385 | u8 if_out_errors_high[0x20]; | ||
| 1386 | |||
| 1387 | u8 if_out_errors_low[0x20]; | ||
| 1388 | |||
| 1389 | u8 if_in_multicast_pkts_high[0x20]; | ||
| 1390 | |||
| 1391 | u8 if_in_multicast_pkts_low[0x20]; | ||
| 1392 | |||
| 1393 | u8 if_in_broadcast_pkts_high[0x20]; | ||
| 1394 | |||
| 1395 | u8 if_in_broadcast_pkts_low[0x20]; | ||
| 1396 | |||
| 1397 | u8 if_out_multicast_pkts_high[0x20]; | ||
| 1398 | |||
| 1399 | u8 if_out_multicast_pkts_low[0x20]; | ||
| 1400 | |||
| 1401 | u8 if_out_broadcast_pkts_high[0x20]; | ||
| 1402 | |||
| 1403 | u8 if_out_broadcast_pkts_low[0x20]; | ||
| 1404 | |||
| 1405 | u8 reserved_0[0x480]; | ||
| 1406 | }; | ||
| 1407 | |||
| 1408 | struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { | ||
| 1409 | u8 a_frames_transmitted_ok_high[0x20]; | ||
| 1410 | |||
| 1411 | u8 a_frames_transmitted_ok_low[0x20]; | ||
| 1412 | |||
| 1413 | u8 a_frames_received_ok_high[0x20]; | ||
| 1414 | |||
| 1415 | u8 a_frames_received_ok_low[0x20]; | ||
| 1416 | |||
| 1417 | u8 a_frame_check_sequence_errors_high[0x20]; | ||
| 1418 | |||
| 1419 | u8 a_frame_check_sequence_errors_low[0x20]; | ||
| 1420 | |||
| 1421 | u8 a_alignment_errors_high[0x20]; | ||
| 1422 | |||
| 1423 | u8 a_alignment_errors_low[0x20]; | ||
| 1424 | |||
| 1425 | u8 a_octets_transmitted_ok_high[0x20]; | ||
| 1426 | |||
| 1427 | u8 a_octets_transmitted_ok_low[0x20]; | ||
| 1428 | |||
| 1429 | u8 a_octets_received_ok_high[0x20]; | ||
| 1430 | |||
| 1431 | u8 a_octets_received_ok_low[0x20]; | ||
| 1432 | |||
| 1433 | u8 a_multicast_frames_xmitted_ok_high[0x20]; | ||
| 1434 | |||
| 1435 | u8 a_multicast_frames_xmitted_ok_low[0x20]; | ||
| 1436 | |||
| 1437 | u8 a_broadcast_frames_xmitted_ok_high[0x20]; | ||
| 1438 | |||
| 1439 | u8 a_broadcast_frames_xmitted_ok_low[0x20]; | ||
| 1440 | |||
| 1441 | u8 a_multicast_frames_received_ok_high[0x20]; | ||
| 1442 | |||
| 1443 | u8 a_multicast_frames_received_ok_low[0x20]; | ||
| 1444 | |||
| 1445 | u8 a_broadcast_frames_received_ok_high[0x20]; | ||
| 1446 | |||
| 1447 | u8 a_broadcast_frames_received_ok_low[0x20]; | ||
| 1448 | |||
| 1449 | u8 a_in_range_length_errors_high[0x20]; | ||
| 1450 | |||
| 1451 | u8 a_in_range_length_errors_low[0x20]; | ||
| 1452 | |||
| 1453 | u8 a_out_of_range_length_field_high[0x20]; | ||
| 1454 | |||
| 1455 | u8 a_out_of_range_length_field_low[0x20]; | ||
| 1456 | |||
| 1457 | u8 a_frame_too_long_errors_high[0x20]; | ||
| 1458 | |||
| 1459 | u8 a_frame_too_long_errors_low[0x20]; | ||
| 1460 | |||
| 1461 | u8 a_symbol_error_during_carrier_high[0x20]; | ||
| 1462 | |||
| 1463 | u8 a_symbol_error_during_carrier_low[0x20]; | ||
| 1464 | |||
| 1465 | u8 a_mac_control_frames_transmitted_high[0x20]; | ||
| 1466 | |||
| 1467 | u8 a_mac_control_frames_transmitted_low[0x20]; | ||
| 1468 | |||
| 1469 | u8 a_mac_control_frames_received_high[0x20]; | ||
| 1470 | |||
| 1471 | u8 a_mac_control_frames_received_low[0x20]; | ||
| 1472 | |||
| 1473 | u8 a_unsupported_opcodes_received_high[0x20]; | ||
| 1474 | |||
| 1475 | u8 a_unsupported_opcodes_received_low[0x20]; | ||
| 1476 | |||
| 1477 | u8 a_pause_mac_ctrl_frames_received_high[0x20]; | ||
| 1478 | |||
| 1479 | u8 a_pause_mac_ctrl_frames_received_low[0x20]; | ||
| 1480 | |||
| 1481 | u8 a_pause_mac_ctrl_frames_transmitted_high[0x20]; | ||
| 1482 | |||
| 1483 | u8 a_pause_mac_ctrl_frames_transmitted_low[0x20]; | ||
| 1484 | |||
| 1485 | u8 reserved_0[0x300]; | ||
| 1486 | }; | ||
| 1487 | |||
| 1488 | struct mlx5_ifc_cmd_inter_comp_event_bits { | ||
| 1489 | u8 command_completion_vector[0x20]; | ||
| 1490 | |||
| 1491 | u8 reserved_0[0xc0]; | ||
| 1492 | }; | ||
| 1493 | |||
| 1494 | struct mlx5_ifc_stall_vl_event_bits { | ||
| 1495 | u8 reserved_0[0x18]; | ||
| 1496 | u8 port_num[0x1]; | ||
| 1497 | u8 reserved_1[0x3]; | ||
| 1498 | u8 vl[0x4]; | ||
| 1499 | |||
| 1500 | u8 reserved_2[0xa0]; | ||
| 1501 | }; | ||
| 1502 | |||
| 1503 | struct mlx5_ifc_db_bf_congestion_event_bits { | ||
| 1504 | u8 event_subtype[0x8]; | ||
| 1505 | u8 reserved_0[0x8]; | ||
| 1506 | u8 congestion_level[0x8]; | ||
| 1507 | u8 reserved_1[0x8]; | ||
| 1508 | |||
| 1509 | u8 reserved_2[0xa0]; | ||
| 1510 | }; | ||
| 1511 | |||
| 1512 | struct mlx5_ifc_gpio_event_bits { | ||
| 1513 | u8 reserved_0[0x60]; | ||
| 1514 | |||
| 1515 | u8 gpio_event_hi[0x20]; | ||
| 1516 | |||
| 1517 | u8 gpio_event_lo[0x20]; | ||
| 1518 | |||
| 1519 | u8 reserved_1[0x40]; | ||
| 1520 | }; | ||
| 1521 | |||
| 1522 | struct mlx5_ifc_port_state_change_event_bits { | ||
| 1523 | u8 reserved_0[0x40]; | ||
| 1524 | |||
| 1525 | u8 port_num[0x4]; | ||
| 1526 | u8 reserved_1[0x1c]; | ||
| 1527 | |||
| 1528 | u8 reserved_2[0x80]; | ||
| 1529 | }; | ||
| 1530 | |||
| 1531 | struct mlx5_ifc_dropped_packet_logged_bits { | ||
| 1532 | u8 reserved_0[0xe0]; | ||
| 1533 | }; | ||
| 1534 | |||
| 1535 | enum { | ||
| 1536 | MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1, | ||
| 1537 | MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2, | ||
| 1538 | }; | ||
| 1539 | |||
| 1540 | struct mlx5_ifc_cq_error_bits { | ||
| 1541 | u8 reserved_0[0x8]; | ||
| 1542 | u8 cqn[0x18]; | ||
| 1543 | |||
| 1544 | u8 reserved_1[0x20]; | ||
| 1545 | |||
| 1546 | u8 reserved_2[0x18]; | ||
| 1547 | u8 syndrome[0x8]; | ||
| 1548 | |||
| 1549 | u8 reserved_3[0x80]; | ||
| 1550 | }; | ||
| 1551 | |||
| 1552 | struct mlx5_ifc_rdma_page_fault_event_bits { | ||
| 1553 | u8 bytes_committed[0x20]; | ||
| 1554 | |||
| 1555 | u8 r_key[0x20]; | ||
| 1556 | |||
| 1557 | u8 reserved_0[0x10]; | ||
| 1558 | u8 packet_len[0x10]; | ||
| 1559 | |||
| 1560 | u8 rdma_op_len[0x20]; | ||
| 1561 | |||
| 1562 | u8 rdma_va[0x40]; | ||
| 1563 | |||
| 1564 | u8 reserved_1[0x5]; | ||
| 1565 | u8 rdma[0x1]; | ||
| 1566 | u8 write[0x1]; | ||
| 1567 | u8 requestor[0x1]; | ||
| 1568 | u8 qp_number[0x18]; | ||
| 1569 | }; | ||
| 1570 | |||
| 1571 | struct mlx5_ifc_wqe_associated_page_fault_event_bits { | ||
| 1572 | u8 bytes_committed[0x20]; | ||
| 1573 | |||
| 1574 | u8 reserved_0[0x10]; | ||
| 1575 | u8 wqe_index[0x10]; | ||
| 1576 | |||
| 1577 | u8 reserved_1[0x10]; | ||
| 1578 | u8 len[0x10]; | ||
| 1579 | |||
| 1580 | u8 reserved_2[0x60]; | ||
| 1581 | |||
| 1582 | u8 reserved_3[0x5]; | ||
| 1583 | u8 rdma[0x1]; | ||
| 1584 | u8 write_read[0x1]; | ||
| 1585 | u8 requestor[0x1]; | ||
| 1586 | u8 qpn[0x18]; | ||
| 1587 | }; | ||
| 1588 | |||
| 1589 | struct mlx5_ifc_qp_events_bits { | ||
| 1590 | u8 reserved_0[0xa0]; | ||
| 1591 | |||
| 1592 | u8 type[0x8]; | ||
| 1593 | u8 reserved_1[0x18]; | ||
| 1594 | |||
| 1595 | u8 reserved_2[0x8]; | ||
| 1596 | u8 qpn_rqn_sqn[0x18]; | ||
| 1597 | }; | ||
| 1598 | |||
| 1599 | struct mlx5_ifc_dct_events_bits { | ||
| 1600 | u8 reserved_0[0xc0]; | ||
| 1601 | |||
| 1602 | u8 reserved_1[0x8]; | ||
| 1603 | u8 dct_number[0x18]; | ||
| 1604 | }; | ||
| 1605 | |||
| 1606 | struct mlx5_ifc_comp_event_bits { | ||
| 1607 | u8 reserved_0[0xc0]; | ||
| 1608 | |||
| 1609 | u8 reserved_1[0x8]; | ||
| 1610 | u8 cq_number[0x18]; | ||
| 1611 | }; | ||
| 1612 | |||
| 1613 | enum { | ||
| 1614 | MLX5_QPC_STATE_RST = 0x0, | ||
| 1615 | MLX5_QPC_STATE_INIT = 0x1, | ||
| 1616 | MLX5_QPC_STATE_RTR = 0x2, | ||
| 1617 | MLX5_QPC_STATE_RTS = 0x3, | ||
| 1618 | MLX5_QPC_STATE_SQER = 0x4, | ||
| 1619 | MLX5_QPC_STATE_ERR = 0x6, | ||
| 1620 | MLX5_QPC_STATE_SQD = 0x7, | ||
| 1621 | MLX5_QPC_STATE_SUSPENDED = 0x9, | ||
| 1622 | }; | ||
| 1623 | |||
| 1624 | enum { | ||
| 1625 | MLX5_QPC_ST_RC = 0x0, | ||
| 1626 | MLX5_QPC_ST_UC = 0x1, | ||
| 1627 | MLX5_QPC_ST_UD = 0x2, | ||
| 1628 | MLX5_QPC_ST_XRC = 0x3, | ||
| 1629 | MLX5_QPC_ST_DCI = 0x5, | ||
| 1630 | MLX5_QPC_ST_QP0 = 0x7, | ||
| 1631 | MLX5_QPC_ST_QP1 = 0x8, | ||
| 1632 | MLX5_QPC_ST_RAW_DATAGRAM = 0x9, | ||
| 1633 | MLX5_QPC_ST_REG_UMR = 0xc, | ||
| 1634 | }; | ||
| 1635 | |||
| 1636 | enum { | ||
| 1637 | MLX5_QPC_PM_STATE_ARMED = 0x0, | ||
| 1638 | MLX5_QPC_PM_STATE_REARM = 0x1, | ||
| 1639 | MLX5_QPC_PM_STATE_RESERVED = 0x2, | ||
| 1640 | MLX5_QPC_PM_STATE_MIGRATED = 0x3, | ||
| 1641 | }; | ||
| 1642 | |||
| 1643 | enum { | ||
| 1644 | MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0, | ||
| 1645 | MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1, | ||
| 1646 | }; | ||
| 1647 | |||
| 1648 | enum { | ||
| 1649 | MLX5_QPC_MTU_256_BYTES = 0x1, | ||
| 1650 | MLX5_QPC_MTU_512_BYTES = 0x2, | ||
| 1651 | MLX5_QPC_MTU_1K_BYTES = 0x3, | ||
| 1652 | MLX5_QPC_MTU_2K_BYTES = 0x4, | ||
| 1653 | MLX5_QPC_MTU_4K_BYTES = 0x5, | ||
| 1654 | MLX5_QPC_MTU_RAW_ETHERNET_QP = 0x7, | ||
| 1655 | }; | ||
| 1656 | |||
| 1657 | enum { | ||
| 1658 | MLX5_QPC_ATOMIC_MODE_IB_SPEC = 0x1, | ||
| 1659 | MLX5_QPC_ATOMIC_MODE_ONLY_8B = 0x2, | ||
| 1660 | MLX5_QPC_ATOMIC_MODE_UP_TO_8B = 0x3, | ||
| 1661 | MLX5_QPC_ATOMIC_MODE_UP_TO_16B = 0x4, | ||
| 1662 | MLX5_QPC_ATOMIC_MODE_UP_TO_32B = 0x5, | ||
| 1663 | MLX5_QPC_ATOMIC_MODE_UP_TO_64B = 0x6, | ||
| 1664 | MLX5_QPC_ATOMIC_MODE_UP_TO_128B = 0x7, | ||
| 1665 | MLX5_QPC_ATOMIC_MODE_UP_TO_256B = 0x8, | ||
| 1666 | }; | ||
| 1667 | |||
| 1668 | enum { | ||
| 1669 | MLX5_QPC_CS_REQ_DISABLE = 0x0, | ||
| 1670 | MLX5_QPC_CS_REQ_UP_TO_32B = 0x11, | ||
| 1671 | MLX5_QPC_CS_REQ_UP_TO_64B = 0x22, | ||
| 1672 | }; | ||
| 1673 | |||
| 1674 | enum { | ||
| 1675 | MLX5_QPC_CS_RES_DISABLE = 0x0, | ||
| 1676 | MLX5_QPC_CS_RES_UP_TO_32B = 0x1, | ||
| 1677 | MLX5_QPC_CS_RES_UP_TO_64B = 0x2, | ||
| 1678 | }; | ||
| 1679 | |||
| 1680 | struct mlx5_ifc_qpc_bits { | ||
| 1681 | u8 state[0x4]; | ||
| 1682 | u8 reserved_0[0x4]; | ||
| 1683 | u8 st[0x8]; | ||
| 1684 | u8 reserved_1[0x3]; | ||
| 1685 | u8 pm_state[0x2]; | ||
| 1686 | u8 reserved_2[0x7]; | ||
| 1687 | u8 end_padding_mode[0x2]; | ||
| 1688 | u8 reserved_3[0x2]; | ||
| 1689 | |||
| 1690 | u8 wq_signature[0x1]; | ||
| 1691 | u8 block_lb_mc[0x1]; | ||
| 1692 | u8 atomic_like_write_en[0x1]; | ||
| 1693 | u8 latency_sensitive[0x1]; | ||
| 1694 | u8 reserved_4[0x1]; | ||
| 1695 | u8 drain_sigerr[0x1]; | ||
| 1696 | u8 reserved_5[0x2]; | ||
| 1697 | u8 pd[0x18]; | ||
| 1698 | |||
| 1699 | u8 mtu[0x3]; | ||
| 1700 | u8 log_msg_max[0x5]; | ||
| 1701 | u8 reserved_6[0x1]; | ||
| 1702 | u8 log_rq_size[0x4]; | ||
| 1703 | u8 log_rq_stride[0x3]; | ||
| 1704 | u8 no_sq[0x1]; | ||
| 1705 | u8 log_sq_size[0x4]; | ||
| 1706 | u8 reserved_7[0x6]; | ||
| 1707 | u8 rlky[0x1]; | ||
| 1708 | u8 reserved_8[0x4]; | ||
| 1709 | |||
| 1710 | u8 counter_set_id[0x8]; | ||
| 1711 | u8 uar_page[0x18]; | ||
| 1712 | |||
| 1713 | u8 reserved_9[0x8]; | ||
| 1714 | u8 user_index[0x18]; | ||
| 1715 | |||
| 1716 | u8 reserved_10[0x3]; | ||
| 1717 | u8 log_page_size[0x5]; | ||
| 1718 | u8 remote_qpn[0x18]; | ||
| 1719 | |||
| 1720 | struct mlx5_ifc_ads_bits primary_address_path; | ||
| 1721 | |||
| 1722 | struct mlx5_ifc_ads_bits secondary_address_path; | ||
| 1723 | |||
| 1724 | u8 log_ack_req_freq[0x4]; | ||
| 1725 | u8 reserved_11[0x4]; | ||
| 1726 | u8 log_sra_max[0x3]; | ||
| 1727 | u8 reserved_12[0x2]; | ||
| 1728 | u8 retry_count[0x3]; | ||
| 1729 | u8 rnr_retry[0x3]; | ||
| 1730 | u8 reserved_13[0x1]; | ||
| 1731 | u8 fre[0x1]; | ||
| 1732 | u8 cur_rnr_retry[0x3]; | ||
| 1733 | u8 cur_retry_count[0x3]; | ||
| 1734 | u8 reserved_14[0x5]; | ||
| 1735 | |||
| 1736 | u8 reserved_15[0x20]; | ||
| 1737 | |||
| 1738 | u8 reserved_16[0x8]; | ||
| 1739 | u8 next_send_psn[0x18]; | ||
| 1740 | |||
| 1741 | u8 reserved_17[0x8]; | ||
| 1742 | u8 cqn_snd[0x18]; | ||
| 1743 | |||
| 1744 | u8 reserved_18[0x40]; | ||
| 1745 | |||
| 1746 | u8 reserved_19[0x8]; | ||
| 1747 | u8 last_acked_psn[0x18]; | ||
| 1748 | |||
| 1749 | u8 reserved_20[0x8]; | ||
| 1750 | u8 ssn[0x18]; | ||
| 1751 | |||
| 1752 | u8 reserved_21[0x8]; | ||
| 1753 | u8 log_rra_max[0x3]; | ||
| 1754 | u8 reserved_22[0x1]; | ||
| 1755 | u8 atomic_mode[0x4]; | ||
| 1756 | u8 rre[0x1]; | ||
| 1757 | u8 rwe[0x1]; | ||
| 1758 | u8 rae[0x1]; | ||
| 1759 | u8 reserved_23[0x1]; | ||
| 1760 | u8 page_offset[0x6]; | ||
| 1761 | u8 reserved_24[0x3]; | ||
| 1762 | u8 cd_slave_receive[0x1]; | ||
| 1763 | u8 cd_slave_send[0x1]; | ||
| 1764 | u8 cd_master[0x1]; | ||
| 1765 | |||
| 1766 | u8 reserved_25[0x3]; | ||
| 1767 | u8 min_rnr_nak[0x5]; | ||
| 1768 | u8 next_rcv_psn[0x18]; | ||
| 1769 | |||
| 1770 | u8 reserved_26[0x8]; | ||
| 1771 | u8 xrcd[0x18]; | ||
| 1772 | |||
| 1773 | u8 reserved_27[0x8]; | ||
| 1774 | u8 cqn_rcv[0x18]; | ||
| 1775 | |||
| 1776 | u8 dbr_addr[0x40]; | ||
| 1777 | |||
| 1778 | u8 q_key[0x20]; | ||
| 1779 | |||
| 1780 | u8 reserved_28[0x5]; | ||
| 1781 | u8 rq_type[0x3]; | ||
| 1782 | u8 srqn_rmpn[0x18]; | ||
| 1783 | |||
| 1784 | u8 reserved_29[0x8]; | ||
| 1785 | u8 rmsn[0x18]; | ||
| 1786 | |||
| 1787 | u8 hw_sq_wqebb_counter[0x10]; | ||
| 1788 | u8 sw_sq_wqebb_counter[0x10]; | ||
| 1789 | |||
| 1790 | u8 hw_rq_counter[0x20]; | ||
| 1791 | |||
| 1792 | u8 sw_rq_counter[0x20]; | ||
| 1793 | |||
| 1794 | u8 reserved_30[0x20]; | ||
| 1795 | |||
| 1796 | u8 reserved_31[0xf]; | ||
| 1797 | u8 cgs[0x1]; | ||
| 1798 | u8 cs_req[0x8]; | ||
| 1799 | u8 cs_res[0x8]; | ||
| 1800 | |||
| 1801 | u8 dc_access_key[0x40]; | ||
| 1802 | |||
| 1803 | u8 reserved_32[0xc0]; | ||
| 1804 | }; | ||
| 1805 | |||
| 1806 | struct mlx5_ifc_roce_addr_layout_bits { | ||
| 1807 | u8 source_l3_address[16][0x8]; | ||
| 1808 | |||
| 1809 | u8 reserved_0[0x3]; | ||
| 1810 | u8 vlan_valid[0x1]; | ||
| 1811 | u8 vlan_id[0xc]; | ||
| 1812 | u8 source_mac_47_32[0x10]; | ||
| 1813 | |||
| 1814 | u8 source_mac_31_0[0x20]; | ||
| 1815 | |||
| 1816 | u8 reserved_1[0x14]; | ||
| 1817 | u8 roce_l3_type[0x4]; | ||
| 1818 | u8 roce_version[0x8]; | ||
| 1819 | |||
| 1820 | u8 reserved_2[0x20]; | ||
| 1821 | }; | ||
| 1822 | |||
| 1823 | union mlx5_ifc_hca_cap_union_bits { | ||
| 1824 | struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap; | ||
| 1825 | struct mlx5_ifc_odp_cap_bits odp_cap; | ||
| 1826 | struct mlx5_ifc_atomic_caps_bits atomic_caps; | ||
| 1827 | struct mlx5_ifc_roce_cap_bits roce_cap; | ||
| 1828 | struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps; | ||
| 1829 | struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; | ||
| 1830 | u8 reserved_0[0x8000]; | ||
| 1831 | }; | ||
| 1832 | |||
| 1833 | enum { | ||
| 1834 | MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1, | ||
| 1835 | MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, | ||
| 1836 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, | ||
| 1837 | }; | ||
| 1838 | |||
| 1839 | struct mlx5_ifc_flow_context_bits { | ||
| 1840 | u8 reserved_0[0x20]; | ||
| 1841 | |||
| 1842 | u8 group_id[0x20]; | ||
| 1843 | |||
| 1844 | u8 reserved_1[0x8]; | ||
| 1845 | u8 flow_tag[0x18]; | ||
| 1846 | |||
| 1847 | u8 reserved_2[0x10]; | ||
| 1848 | u8 action[0x10]; | ||
| 1849 | |||
| 1850 | u8 reserved_3[0x8]; | ||
| 1851 | u8 destination_list_size[0x18]; | ||
| 1852 | |||
| 1853 | u8 reserved_4[0x160]; | ||
| 1854 | |||
| 1855 | struct mlx5_ifc_fte_match_param_bits match_value; | ||
| 1856 | |||
| 1857 | u8 reserved_5[0x600]; | ||
| 1858 | |||
| 1859 | struct mlx5_ifc_dest_format_struct_bits destination[0]; | ||
| 1860 | }; | ||
| 1861 | |||
| 1862 | enum { | ||
| 1863 | MLX5_XRC_SRQC_STATE_GOOD = 0x0, | ||
| 1864 | MLX5_XRC_SRQC_STATE_ERROR = 0x1, | ||
| 1865 | }; | ||
| 1866 | |||
| 1867 | struct mlx5_ifc_xrc_srqc_bits { | ||
| 1868 | u8 state[0x4]; | ||
| 1869 | u8 log_xrc_srq_size[0x4]; | ||
| 1870 | u8 reserved_0[0x18]; | ||
| 1871 | |||
| 1872 | u8 wq_signature[0x1]; | ||
| 1873 | u8 cont_srq[0x1]; | ||
| 1874 | u8 reserved_1[0x1]; | ||
| 1875 | u8 rlky[0x1]; | ||
| 1876 | u8 basic_cyclic_rcv_wqe[0x1]; | ||
| 1877 | u8 log_rq_stride[0x3]; | ||
| 1878 | u8 xrcd[0x18]; | ||
| 1879 | |||
| 1880 | u8 page_offset[0x6]; | ||
| 1881 | u8 reserved_2[0x2]; | ||
| 1882 | u8 cqn[0x18]; | ||
| 1883 | |||
| 1884 | u8 reserved_3[0x20]; | ||
| 1885 | |||
| 1886 | u8 user_index_equal_xrc_srqn[0x1]; | ||
| 1887 | u8 reserved_4[0x1]; | ||
| 1888 | u8 log_page_size[0x6]; | ||
| 1889 | u8 user_index[0x18]; | ||
| 1890 | |||
| 1891 | u8 reserved_5[0x20]; | ||
| 1892 | |||
| 1893 | u8 reserved_6[0x8]; | ||
| 1894 | u8 pd[0x18]; | ||
| 1895 | |||
| 1896 | u8 lwm[0x10]; | ||
| 1897 | u8 wqe_cnt[0x10]; | ||
| 1898 | |||
| 1899 | u8 reserved_7[0x40]; | ||
| 1900 | |||
| 1901 | u8 db_record_addr_h[0x20]; | ||
| 1902 | |||
| 1903 | u8 db_record_addr_l[0x1e]; | ||
| 1904 | u8 reserved_8[0x2]; | ||
| 1905 | |||
| 1906 | u8 reserved_9[0x80]; | ||
| 1907 | }; | ||
| 1908 | |||
| 1909 | struct mlx5_ifc_traffic_counter_bits { | ||
| 1910 | u8 packets[0x40]; | ||
| 1911 | |||
| 1912 | u8 octets[0x40]; | ||
| 1913 | }; | ||
| 1914 | |||
| 1915 | struct mlx5_ifc_tisc_bits { | ||
| 1916 | u8 reserved_0[0xc]; | ||
| 1917 | u8 prio[0x4]; | ||
| 1918 | u8 reserved_1[0x10]; | ||
| 1919 | |||
| 1920 | u8 reserved_2[0x100]; | ||
| 1921 | |||
| 1922 | u8 reserved_3[0x8]; | ||
| 1923 | u8 transport_domain[0x18]; | ||
| 1924 | |||
| 1925 | u8 reserved_4[0x3c0]; | ||
| 1926 | }; | ||
| 1927 | |||
| 1928 | enum { | ||
| 1929 | MLX5_TIRC_DISP_TYPE_DIRECT = 0x0, | ||
| 1930 | MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1, | ||
| 1931 | }; | ||
| 1932 | |||
| 1933 | enum { | ||
| 1934 | MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1, | ||
| 1935 | MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2, | ||
| 1936 | }; | ||
| 1937 | |||
| 1938 | enum { | ||
| 1939 | MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0, | ||
| 1940 | MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1, | ||
| 1941 | MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2, | ||
| 1942 | }; | ||
| 1943 | |||
| 1944 | enum { | ||
| 1945 | MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1, | ||
| 1946 | MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2, | ||
| 1947 | }; | ||
| 1948 | |||
| 1949 | struct mlx5_ifc_tirc_bits { | ||
| 1950 | u8 reserved_0[0x20]; | ||
| 1951 | |||
| 1952 | u8 disp_type[0x4]; | ||
| 1953 | u8 reserved_1[0x1c]; | ||
| 1954 | |||
| 1955 | u8 reserved_2[0x40]; | ||
| 1956 | |||
| 1957 | u8 reserved_3[0x4]; | ||
| 1958 | u8 lro_timeout_period_usecs[0x10]; | ||
| 1959 | u8 lro_enable_mask[0x4]; | ||
| 1960 | u8 lro_max_ip_payload_size[0x8]; | ||
| 1961 | |||
| 1962 | u8 reserved_4[0x40]; | ||
| 1963 | |||
| 1964 | u8 reserved_5[0x8]; | ||
| 1965 | u8 inline_rqn[0x18]; | ||
| 1966 | |||
| 1967 | u8 rx_hash_symmetric[0x1]; | ||
| 1968 | u8 reserved_6[0x1]; | ||
| 1969 | u8 tunneled_offload_en[0x1]; | ||
| 1970 | u8 reserved_7[0x5]; | ||
| 1971 | u8 indirect_table[0x18]; | ||
| 1972 | |||
| 1973 | u8 rx_hash_fn[0x4]; | ||
| 1974 | u8 reserved_8[0x2]; | ||
| 1975 | u8 self_lb_block[0x2]; | ||
| 1976 | u8 transport_domain[0x18]; | ||
| 1977 | |||
| 1978 | u8 rx_hash_toeplitz_key[10][0x20]; | ||
| 1979 | |||
| 1980 | struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer; | ||
| 1981 | |||
| 1982 | struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner; | ||
| 1983 | |||
| 1984 | u8 reserved_9[0x4c0]; | ||
| 1985 | }; | ||
| 1986 | |||
| 1987 | enum { | ||
| 1988 | MLX5_SRQC_STATE_GOOD = 0x0, | ||
| 1989 | MLX5_SRQC_STATE_ERROR = 0x1, | ||
| 1990 | }; | ||
| 1991 | |||
| 1992 | struct mlx5_ifc_srqc_bits { | ||
| 1993 | u8 state[0x4]; | ||
| 1994 | u8 log_srq_size[0x4]; | ||
| 1995 | u8 reserved_0[0x18]; | ||
| 1996 | |||
| 1997 | u8 wq_signature[0x1]; | ||
| 1998 | u8 cont_srq[0x1]; | ||
| 1999 | u8 reserved_1[0x1]; | ||
| 2000 | u8 rlky[0x1]; | ||
| 2001 | u8 reserved_2[0x1]; | ||
| 2002 | u8 log_rq_stride[0x3]; | ||
| 2003 | u8 xrcd[0x18]; | ||
| 2004 | |||
| 2005 | u8 page_offset[0x6]; | ||
| 2006 | u8 reserved_3[0x2]; | ||
| 2007 | u8 cqn[0x18]; | ||
| 2008 | |||
| 2009 | u8 reserved_4[0x20]; | ||
| 2010 | |||
| 2011 | u8 reserved_5[0x2]; | ||
| 2012 | u8 log_page_size[0x6]; | ||
| 2013 | u8 reserved_6[0x18]; | ||
| 2014 | |||
| 2015 | u8 reserved_7[0x20]; | ||
| 2016 | |||
| 2017 | u8 reserved_8[0x8]; | ||
| 2018 | u8 pd[0x18]; | ||
| 2019 | |||
| 2020 | u8 lwm[0x10]; | ||
| 2021 | u8 wqe_cnt[0x10]; | ||
| 2022 | |||
| 2023 | u8 reserved_9[0x40]; | ||
| 2024 | |||
| 2025 | u8 dbr_addr[0x40]; | ||
| 2026 | |||
| 2027 | u8 reserved_10[0x80]; | ||
| 2028 | }; | ||
| 2029 | |||
| 2030 | enum { | ||
| 2031 | MLX5_SQC_STATE_RST = 0x0, | ||
| 2032 | MLX5_SQC_STATE_RDY = 0x1, | ||
| 2033 | MLX5_SQC_STATE_ERR = 0x3, | ||
| 2034 | }; | ||
| 2035 | |||
| 2036 | struct mlx5_ifc_sqc_bits { | ||
| 2037 | u8 rlky[0x1]; | ||
| 2038 | u8 cd_master[0x1]; | ||
| 2039 | u8 fre[0x1]; | ||
| 2040 | u8 flush_in_error_en[0x1]; | ||
| 2041 | u8 reserved_0[0x4]; | ||
| 2042 | u8 state[0x4]; | ||
| 2043 | u8 reserved_1[0x14]; | ||
| 2044 | |||
| 2045 | u8 reserved_2[0x8]; | ||
| 2046 | u8 user_index[0x18]; | ||
| 2047 | |||
| 2048 | u8 reserved_3[0x8]; | ||
| 2049 | u8 cqn[0x18]; | ||
| 2050 | |||
| 2051 | u8 reserved_4[0xa0]; | ||
| 2052 | |||
| 2053 | u8 tis_lst_sz[0x10]; | ||
| 2054 | u8 reserved_5[0x10]; | ||
| 2055 | |||
| 2056 | u8 reserved_6[0x40]; | ||
| 2057 | |||
| 2058 | u8 reserved_7[0x8]; | ||
| 2059 | u8 tis_num_0[0x18]; | ||
| 2060 | |||
| 2061 | struct mlx5_ifc_wq_bits wq; | ||
| 2062 | }; | ||
| 2063 | |||
| 2064 | struct mlx5_ifc_rqtc_bits { | ||
| 2065 | u8 reserved_0[0xa0]; | ||
| 2066 | |||
| 2067 | u8 reserved_1[0x10]; | ||
| 2068 | u8 rqt_max_size[0x10]; | ||
| 2069 | |||
| 2070 | u8 reserved_2[0x10]; | ||
| 2071 | u8 rqt_actual_size[0x10]; | ||
| 2072 | |||
| 2073 | u8 reserved_3[0x6a0]; | ||
| 2074 | |||
| 2075 | struct mlx5_ifc_rq_num_bits rq_num[0]; | ||
| 2076 | }; | ||
| 2077 | |||
| 2078 | enum { | ||
| 2079 | MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, | ||
| 2080 | MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1, | ||
| 2081 | }; | ||
| 2082 | |||
| 2083 | enum { | ||
| 2084 | MLX5_RQC_STATE_RST = 0x0, | ||
| 2085 | MLX5_RQC_STATE_RDY = 0x1, | ||
| 2086 | MLX5_RQC_STATE_ERR = 0x3, | ||
| 2087 | }; | ||
| 2088 | |||
| 2089 | struct mlx5_ifc_rqc_bits { | ||
| 2090 | u8 rlky[0x1]; | ||
| 2091 | u8 reserved_0[0x2]; | ||
| 2092 | u8 vsd[0x1]; | ||
| 2093 | u8 mem_rq_type[0x4]; | ||
| 2094 | u8 state[0x4]; | ||
| 2095 | u8 reserved_1[0x1]; | ||
| 2096 | u8 flush_in_error_en[0x1]; | ||
| 2097 | u8 reserved_2[0x12]; | ||
| 2098 | |||
| 2099 | u8 reserved_3[0x8]; | ||
| 2100 | u8 user_index[0x18]; | ||
| 2101 | |||
| 2102 | u8 reserved_4[0x8]; | ||
| 2103 | u8 cqn[0x18]; | ||
| 2104 | |||
| 2105 | u8 counter_set_id[0x8]; | ||
| 2106 | u8 reserved_5[0x18]; | ||
| 2107 | |||
| 2108 | u8 reserved_6[0x8]; | ||
| 2109 | u8 rmpn[0x18]; | ||
| 2110 | |||
| 2111 | u8 reserved_7[0xe0]; | ||
| 2112 | |||
| 2113 | struct mlx5_ifc_wq_bits wq; | ||
| 2114 | }; | ||
| 2115 | |||
| 2116 | enum { | ||
| 2117 | MLX5_RMPC_STATE_RDY = 0x1, | ||
| 2118 | MLX5_RMPC_STATE_ERR = 0x3, | ||
| 2119 | }; | ||
| 2120 | |||
| 2121 | struct mlx5_ifc_rmpc_bits { | ||
| 2122 | u8 reserved_0[0x8]; | ||
| 2123 | u8 state[0x4]; | ||
| 2124 | u8 reserved_1[0x14]; | ||
| 2125 | |||
| 2126 | u8 basic_cyclic_rcv_wqe[0x1]; | ||
| 2127 | u8 reserved_2[0x1f]; | ||
| 2128 | |||
| 2129 | u8 reserved_3[0x140]; | ||
| 2130 | |||
| 2131 | struct mlx5_ifc_wq_bits wq; | ||
| 2132 | }; | ||
| 2133 | |||
| 2134 | enum { | ||
| 2135 | MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0, | ||
| 2136 | }; | ||
| 2137 | |||
| 2138 | struct mlx5_ifc_nic_vport_context_bits { | ||
| 2139 | u8 reserved_0[0x1f]; | ||
| 2140 | u8 roce_en[0x1]; | ||
| 2141 | |||
| 2142 | u8 reserved_1[0x760]; | ||
| 2143 | |||
| 2144 | u8 reserved_2[0x5]; | ||
| 2145 | u8 allowed_list_type[0x3]; | ||
| 2146 | u8 reserved_3[0xc]; | ||
| 2147 | u8 allowed_list_size[0xc]; | ||
| 2148 | |||
| 2149 | struct mlx5_ifc_mac_address_layout_bits permanent_address; | ||
| 2150 | |||
| 2151 | u8 reserved_4[0x20]; | ||
| 2152 | |||
| 2153 | u8 current_uc_mac_address[0][0x40]; | ||
| 2154 | }; | ||
| 2155 | |||
| 2156 | enum { | ||
| 2157 | MLX5_MKC_ACCESS_MODE_PA = 0x0, | ||
| 2158 | MLX5_MKC_ACCESS_MODE_MTT = 0x1, | ||
| 2159 | MLX5_MKC_ACCESS_MODE_KLMS = 0x2, | ||
| 2160 | }; | ||
| 2161 | |||
| 2162 | struct mlx5_ifc_mkc_bits { | ||
| 2163 | u8 reserved_0[0x1]; | ||
| 2164 | u8 free[0x1]; | ||
| 2165 | u8 reserved_1[0xd]; | ||
| 2166 | u8 small_fence_on_rdma_read_response[0x1]; | ||
| 2167 | u8 umr_en[0x1]; | ||
| 2168 | u8 a[0x1]; | ||
| 2169 | u8 rw[0x1]; | ||
| 2170 | u8 rr[0x1]; | ||
| 2171 | u8 lw[0x1]; | ||
| 2172 | u8 lr[0x1]; | ||
| 2173 | u8 access_mode[0x2]; | ||
| 2174 | u8 reserved_2[0x8]; | ||
| 2175 | |||
| 2176 | u8 qpn[0x18]; | ||
| 2177 | u8 mkey_7_0[0x8]; | ||
| 2178 | |||
| 2179 | u8 reserved_3[0x20]; | ||
| 2180 | |||
| 2181 | u8 length64[0x1]; | ||
| 2182 | u8 bsf_en[0x1]; | ||
| 2183 | u8 sync_umr[0x1]; | ||
| 2184 | u8 reserved_4[0x2]; | ||
| 2185 | u8 expected_sigerr_count[0x1]; | ||
| 2186 | u8 reserved_5[0x1]; | ||
| 2187 | u8 en_rinval[0x1]; | ||
| 2188 | u8 pd[0x18]; | ||
| 2189 | |||
| 2190 | u8 start_addr[0x40]; | ||
| 2191 | |||
| 2192 | u8 len[0x40]; | ||
| 2193 | |||
| 2194 | u8 bsf_octword_size[0x20]; | ||
| 2195 | |||
| 2196 | u8 reserved_6[0x80]; | ||
| 2197 | |||
| 2198 | u8 translations_octword_size[0x20]; | ||
| 2199 | |||
| 2200 | u8 reserved_7[0x1b]; | ||
| 2201 | u8 log_page_size[0x5]; | ||
| 2202 | |||
| 2203 | u8 reserved_8[0x20]; | ||
| 2204 | }; | ||
| 2205 | |||
| 2206 | struct mlx5_ifc_pkey_bits { | ||
| 2207 | u8 reserved_0[0x10]; | ||
| 2208 | u8 pkey[0x10]; | ||
| 2209 | }; | ||
| 2210 | |||
| 2211 | struct mlx5_ifc_array128_auto_bits { | ||
| 2212 | u8 array128_auto[16][0x8]; | ||
| 2213 | }; | ||
| 2214 | |||
| 2215 | struct mlx5_ifc_hca_vport_context_bits { | ||
| 2216 | u8 field_select[0x20]; | ||
| 2217 | |||
| 2218 | u8 reserved_0[0xe0]; | ||
| 2219 | |||
| 2220 | u8 sm_virt_aware[0x1]; | ||
| 2221 | u8 has_smi[0x1]; | ||
| 2222 | u8 has_raw[0x1]; | ||
| 2223 | u8 grh_required[0x1]; | ||
| 2224 | u8 reserved_1[0xc]; | ||
| 2225 | u8 port_physical_state[0x4]; | ||
| 2226 | u8 vport_state_policy[0x4]; | ||
| 2227 | u8 port_state[0x4]; | ||
| 2228 | u8 vport_state[0x4]; | ||
| 2229 | |||
| 2230 | u8 reserved_2[0x20]; | ||
| 2231 | |||
| 2232 | u8 system_image_guid[0x40]; | ||
| 2233 | |||
| 2234 | u8 port_guid[0x40]; | ||
| 2235 | |||
| 2236 | u8 node_guid[0x40]; | ||
| 2237 | |||
| 2238 | u8 cap_mask1[0x20]; | ||
| 2239 | |||
| 2240 | u8 cap_mask1_field_select[0x20]; | ||
| 2241 | |||
| 2242 | u8 cap_mask2[0x20]; | ||
| 2243 | |||
| 2244 | u8 cap_mask2_field_select[0x20]; | ||
| 2245 | |||
| 2246 | u8 reserved_3[0x80]; | ||
| 2247 | |||
| 2248 | u8 lid[0x10]; | ||
| 2249 | u8 reserved_4[0x4]; | ||
| 2250 | u8 init_type_reply[0x4]; | ||
| 2251 | u8 lmc[0x3]; | ||
| 2252 | u8 subnet_timeout[0x5]; | ||
| 2253 | |||
| 2254 | u8 sm_lid[0x10]; | ||
| 2255 | u8 sm_sl[0x4]; | ||
| 2256 | u8 reserved_5[0xc]; | ||
| 2257 | |||
| 2258 | u8 qkey_violation_counter[0x10]; | ||
| 2259 | u8 pkey_violation_counter[0x10]; | ||
| 2260 | |||
| 2261 | u8 reserved_6[0xca0]; | ||
| 2262 | }; | ||
| 2263 | |||
| 2264 | enum { | ||
| 2265 | MLX5_EQC_STATUS_OK = 0x0, | ||
| 2266 | MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa, | ||
| 2267 | }; | ||
| 2268 | |||
| 2269 | enum { | ||
| 2270 | MLX5_EQC_ST_ARMED = 0x9, | ||
| 2271 | MLX5_EQC_ST_FIRED = 0xa, | ||
| 2272 | }; | ||
| 2273 | |||
| 2274 | struct mlx5_ifc_eqc_bits { | ||
| 2275 | u8 status[0x4]; | ||
| 2276 | u8 reserved_0[0x9]; | ||
| 2277 | u8 ec[0x1]; | ||
| 2278 | u8 oi[0x1]; | ||
| 2279 | u8 reserved_1[0x5]; | ||
| 2280 | u8 st[0x4]; | ||
| 2281 | u8 reserved_2[0x8]; | ||
| 2282 | |||
| 2283 | u8 reserved_3[0x20]; | ||
| 2284 | |||
| 2285 | u8 reserved_4[0x14]; | ||
| 2286 | u8 page_offset[0x6]; | ||
| 2287 | u8 reserved_5[0x6]; | ||
| 2288 | |||
| 2289 | u8 reserved_6[0x3]; | ||
| 2290 | u8 log_eq_size[0x5]; | ||
| 2291 | u8 uar_page[0x18]; | ||
| 2292 | |||
| 2293 | u8 reserved_7[0x20]; | ||
| 2294 | |||
| 2295 | u8 reserved_8[0x18]; | ||
| 2296 | u8 intr[0x8]; | ||
| 2297 | |||
| 2298 | u8 reserved_9[0x3]; | ||
| 2299 | u8 log_page_size[0x5]; | ||
| 2300 | u8 reserved_10[0x18]; | ||
| 2301 | |||
| 2302 | u8 reserved_11[0x60]; | ||
| 2303 | |||
| 2304 | u8 reserved_12[0x8]; | ||
| 2305 | u8 consumer_counter[0x18]; | ||
| 2306 | |||
| 2307 | u8 reserved_13[0x8]; | ||
| 2308 | u8 producer_counter[0x18]; | ||
| 2309 | |||
| 2310 | u8 reserved_14[0x80]; | ||
| 2311 | }; | ||
| 2312 | |||
| 2313 | enum { | ||
| 2314 | MLX5_DCTC_STATE_ACTIVE = 0x0, | ||
| 2315 | MLX5_DCTC_STATE_DRAINING = 0x1, | ||
| 2316 | MLX5_DCTC_STATE_DRAINED = 0x2, | ||
| 2317 | }; | ||
| 2318 | |||
| 2319 | enum { | ||
| 2320 | MLX5_DCTC_CS_RES_DISABLE = 0x0, | ||
| 2321 | MLX5_DCTC_CS_RES_NA = 0x1, | ||
| 2322 | MLX5_DCTC_CS_RES_UP_TO_64B = 0x2, | ||
| 2323 | }; | ||
| 2324 | |||
| 2325 | enum { | ||
| 2326 | MLX5_DCTC_MTU_256_BYTES = 0x1, | ||
| 2327 | MLX5_DCTC_MTU_512_BYTES = 0x2, | ||
| 2328 | MLX5_DCTC_MTU_1K_BYTES = 0x3, | ||
| 2329 | MLX5_DCTC_MTU_2K_BYTES = 0x4, | ||
| 2330 | MLX5_DCTC_MTU_4K_BYTES = 0x5, | ||
| 2331 | }; | ||
| 2332 | |||
| 2333 | struct mlx5_ifc_dctc_bits { | ||
| 2334 | u8 reserved_0[0x4]; | ||
| 2335 | u8 state[0x4]; | ||
| 2336 | u8 reserved_1[0x18]; | ||
| 2337 | |||
| 2338 | u8 reserved_2[0x8]; | ||
| 2339 | u8 user_index[0x18]; | ||
| 2340 | |||
| 2341 | u8 reserved_3[0x8]; | ||
| 2342 | u8 cqn[0x18]; | ||
| 2343 | |||
| 2344 | u8 counter_set_id[0x8]; | ||
| 2345 | u8 atomic_mode[0x4]; | ||
| 2346 | u8 rre[0x1]; | ||
| 2347 | u8 rwe[0x1]; | ||
| 2348 | u8 rae[0x1]; | ||
| 2349 | u8 atomic_like_write_en[0x1]; | ||
| 2350 | u8 latency_sensitive[0x1]; | ||
| 2351 | u8 rlky[0x1]; | ||
| 2352 | u8 free_ar[0x1]; | ||
| 2353 | u8 reserved_4[0xd]; | ||
| 2354 | |||
| 2355 | u8 reserved_5[0x8]; | ||
| 2356 | u8 cs_res[0x8]; | ||
| 2357 | u8 reserved_6[0x3]; | ||
| 2358 | u8 min_rnr_nak[0x5]; | ||
| 2359 | u8 reserved_7[0x8]; | ||
| 2360 | |||
| 2361 | u8 reserved_8[0x8]; | ||
| 2362 | u8 srqn[0x18]; | ||
| 2363 | |||
| 2364 | u8 reserved_9[0x8]; | ||
| 2365 | u8 pd[0x18]; | ||
| 2366 | |||
| 2367 | u8 tclass[0x8]; | ||
| 2368 | u8 reserved_10[0x4]; | ||
| 2369 | u8 flow_label[0x14]; | ||
| 2370 | |||
| 2371 | u8 dc_access_key[0x40]; | ||
| 2372 | |||
| 2373 | u8 reserved_11[0x5]; | ||
| 2374 | u8 mtu[0x3]; | ||
| 2375 | u8 port[0x8]; | ||
| 2376 | u8 pkey_index[0x10]; | ||
| 2377 | |||
| 2378 | u8 reserved_12[0x8]; | ||
| 2379 | u8 my_addr_index[0x8]; | ||
| 2380 | u8 reserved_13[0x8]; | ||
| 2381 | u8 hop_limit[0x8]; | ||
| 2382 | |||
| 2383 | u8 dc_access_key_violation_count[0x20]; | ||
| 2384 | |||
| 2385 | u8 reserved_14[0x14]; | ||
| 2386 | u8 dei_cfi[0x1]; | ||
| 2387 | u8 eth_prio[0x3]; | ||
| 2388 | u8 ecn[0x2]; | ||
| 2389 | u8 dscp[0x6]; | ||
| 2390 | |||
| 2391 | u8 reserved_15[0x40]; | ||
| 2392 | }; | ||
| 2393 | |||
| 2394 | enum { | ||
| 2395 | MLX5_CQC_STATUS_OK = 0x0, | ||
| 2396 | MLX5_CQC_STATUS_CQ_OVERFLOW = 0x9, | ||
| 2397 | MLX5_CQC_STATUS_CQ_WRITE_FAIL = 0xa, | ||
| 2398 | }; | ||
| 2399 | |||
| 2400 | enum { | ||
| 2401 | MLX5_CQC_CQE_SZ_64_BYTES = 0x0, | ||
| 2402 | MLX5_CQC_CQE_SZ_128_BYTES = 0x1, | ||
| 2403 | }; | ||
| 2404 | |||
| 2405 | enum { | ||
| 2406 | MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED = 0x6, | ||
| 2407 | MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED = 0x9, | ||
| 2408 | MLX5_CQC_ST_FIRED = 0xa, | ||
| 2409 | }; | ||
| 2410 | |||
| 2411 | struct mlx5_ifc_cqc_bits { | ||
| 2412 | u8 status[0x4]; | ||
| 2413 | u8 reserved_0[0x4]; | ||
| 2414 | u8 cqe_sz[0x3]; | ||
| 2415 | u8 cc[0x1]; | ||
| 2416 | u8 reserved_1[0x1]; | ||
| 2417 | u8 scqe_break_moderation_en[0x1]; | ||
| 2418 | u8 oi[0x1]; | ||
| 2419 | u8 reserved_2[0x2]; | ||
| 2420 | u8 cqe_zip_en[0x1]; | ||
| 2421 | u8 mini_cqe_res_format[0x2]; | ||
| 2422 | u8 st[0x4]; | ||
| 2423 | u8 reserved_3[0x8]; | ||
| 2424 | |||
| 2425 | u8 reserved_4[0x20]; | ||
| 2426 | |||
| 2427 | u8 reserved_5[0x14]; | ||
| 2428 | u8 page_offset[0x6]; | ||
| 2429 | u8 reserved_6[0x6]; | ||
| 2430 | |||
| 2431 | u8 reserved_7[0x3]; | ||
| 2432 | u8 log_cq_size[0x5]; | ||
| 2433 | u8 uar_page[0x18]; | ||
| 2434 | |||
| 2435 | u8 reserved_8[0x4]; | ||
| 2436 | u8 cq_period[0xc]; | ||
| 2437 | u8 cq_max_count[0x10]; | ||
| 2438 | |||
| 2439 | u8 reserved_9[0x18]; | ||
| 2440 | u8 c_eqn[0x8]; | ||
| 2441 | |||
| 2442 | u8 reserved_10[0x3]; | ||
| 2443 | u8 log_page_size[0x5]; | ||
| 2444 | u8 reserved_11[0x18]; | ||
| 2445 | |||
| 2446 | u8 reserved_12[0x20]; | ||
| 2447 | |||
| 2448 | u8 reserved_13[0x8]; | ||
| 2449 | u8 last_notified_index[0x18]; | ||
| 2450 | |||
| 2451 | u8 reserved_14[0x8]; | ||
| 2452 | u8 last_solicit_index[0x18]; | ||
| 2453 | |||
| 2454 | u8 reserved_15[0x8]; | ||
| 2455 | u8 consumer_counter[0x18]; | ||
| 2456 | |||
| 2457 | u8 reserved_16[0x8]; | ||
| 2458 | u8 producer_counter[0x18]; | ||
| 2459 | |||
| 2460 | u8 reserved_17[0x40]; | ||
| 2461 | |||
| 2462 | u8 dbr_addr[0x40]; | ||
| 2463 | }; | ||
| 2464 | |||
| 2465 | union mlx5_ifc_cong_control_roce_ecn_auto_bits { | ||
| 2466 | struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp; | ||
| 2467 | struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp; | ||
| 2468 | struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np; | ||
| 2469 | u8 reserved_0[0x800]; | ||
| 2470 | }; | ||
| 2471 | |||
| 2472 | struct mlx5_ifc_query_adapter_param_block_bits { | ||
| 2473 | u8 reserved_0[0xc0]; | ||
| 2474 | |||
| 2475 | u8 reserved_1[0x8]; | ||
| 2476 | u8 ieee_vendor_id[0x18]; | ||
| 2477 | |||
| 2478 | u8 reserved_2[0x10]; | ||
| 2479 | u8 vsd_vendor_id[0x10]; | ||
| 2480 | |||
| 2481 | u8 vsd[208][0x8]; | ||
| 2482 | |||
| 2483 | u8 vsd_contd_psid[16][0x8]; | ||
| 2484 | }; | ||
| 2485 | |||
| 2486 | union mlx5_ifc_modify_field_select_resize_field_select_auto_bits { | ||
| 2487 | struct mlx5_ifc_modify_field_select_bits modify_field_select; | ||
| 2488 | struct mlx5_ifc_resize_field_select_bits resize_field_select; | ||
| 2489 | u8 reserved_0[0x20]; | ||
| 2490 | }; | ||
| 2491 | |||
| 2492 | union mlx5_ifc_field_select_802_1_r_roce_auto_bits { | ||
| 2493 | struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp; | ||
| 2494 | struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp; | ||
| 2495 | struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np; | ||
| 2496 | u8 reserved_0[0x20]; | ||
| 2497 | }; | ||
| 2498 | |||
| 2499 | union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { | ||
| 2500 | struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; | ||
| 2501 | struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout; | ||
| 2502 | struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; | ||
| 2503 | struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; | ||
| 2504 | struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; | ||
| 2505 | struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; | ||
| 2506 | struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; | ||
| 2507 | struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; | ||
| 2508 | u8 reserved_0[0x7c0]; | ||
| 2509 | }; | ||
| 2510 | |||
| 2511 | union mlx5_ifc_event_auto_bits { | ||
| 2512 | struct mlx5_ifc_comp_event_bits comp_event; | ||
| 2513 | struct mlx5_ifc_dct_events_bits dct_events; | ||
| 2514 | struct mlx5_ifc_qp_events_bits qp_events; | ||
| 2515 | struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event; | ||
| 2516 | struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event; | ||
| 2517 | struct mlx5_ifc_cq_error_bits cq_error; | ||
| 2518 | struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged; | ||
| 2519 | struct mlx5_ifc_port_state_change_event_bits port_state_change_event; | ||
| 2520 | struct mlx5_ifc_gpio_event_bits gpio_event; | ||
| 2521 | struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event; | ||
| 2522 | struct mlx5_ifc_stall_vl_event_bits stall_vl_event; | ||
| 2523 | struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event; | ||
| 2524 | u8 reserved_0[0xe0]; | ||
| 2525 | }; | ||
| 2526 | |||
| 2527 | struct mlx5_ifc_health_buffer_bits { | ||
| 2528 | u8 reserved_0[0x100]; | ||
| 2529 | |||
| 2530 | u8 assert_existptr[0x20]; | ||
| 2531 | |||
| 2532 | u8 assert_callra[0x20]; | ||
| 2533 | |||
| 2534 | u8 reserved_1[0x40]; | ||
| 2535 | |||
| 2536 | u8 fw_version[0x20]; | ||
| 2537 | |||
| 2538 | u8 hw_id[0x20]; | ||
| 2539 | |||
| 2540 | u8 reserved_2[0x20]; | ||
| 2541 | |||
| 2542 | u8 irisc_index[0x8]; | ||
| 2543 | u8 synd[0x8]; | ||
| 2544 | u8 ext_synd[0x10]; | ||
| 2545 | }; | ||
| 2546 | |||
| 2547 | struct mlx5_ifc_register_loopback_control_bits { | ||
| 2548 | u8 no_lb[0x1]; | ||
| 2549 | u8 reserved_0[0x7]; | ||
| 2550 | u8 port[0x8]; | ||
| 2551 | u8 reserved_1[0x10]; | ||
| 2552 | |||
| 2553 | u8 reserved_2[0x60]; | ||
| 2554 | }; | ||
| 2555 | |||
| 2556 | struct mlx5_ifc_teardown_hca_out_bits { | ||
| 2557 | u8 status[0x8]; | ||
| 2558 | u8 reserved_0[0x18]; | ||
| 2559 | |||
| 2560 | u8 syndrome[0x20]; | ||
| 2561 | |||
| 2562 | u8 reserved_1[0x40]; | ||
| 2563 | }; | ||
| 2564 | |||
| 2565 | enum { | ||
| 2566 | MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0, | ||
| 2567 | MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1, | ||
| 2568 | }; | ||
| 2569 | |||
| 2570 | struct mlx5_ifc_teardown_hca_in_bits { | ||
| 2571 | u8 opcode[0x10]; | ||
| 2572 | u8 reserved_0[0x10]; | ||
| 2573 | |||
| 2574 | u8 reserved_1[0x10]; | ||
| 2575 | u8 op_mod[0x10]; | ||
| 2576 | |||
| 2577 | u8 reserved_2[0x10]; | ||
| 2578 | u8 profile[0x10]; | ||
| 2579 | |||
| 2580 | u8 reserved_3[0x20]; | ||
| 2581 | }; | ||
| 2582 | |||
| 2583 | struct mlx5_ifc_sqerr2rts_qp_out_bits { | ||
| 2584 | u8 status[0x8]; | ||
| 2585 | u8 reserved_0[0x18]; | ||
| 2586 | |||
| 2587 | u8 syndrome[0x20]; | ||
| 2588 | |||
| 2589 | u8 reserved_1[0x40]; | ||
| 2590 | }; | ||
| 2591 | |||
| 2592 | struct mlx5_ifc_sqerr2rts_qp_in_bits { | ||
| 2593 | u8 opcode[0x10]; | ||
| 2594 | u8 reserved_0[0x10]; | ||
| 2595 | |||
| 2596 | u8 reserved_1[0x10]; | ||
| 2597 | u8 op_mod[0x10]; | ||
| 2598 | |||
| 2599 | u8 reserved_2[0x8]; | ||
| 2600 | u8 qpn[0x18]; | ||
| 2601 | |||
| 2602 | u8 reserved_3[0x20]; | ||
| 2603 | |||
| 2604 | u8 opt_param_mask[0x20]; | ||
| 2605 | |||
| 2606 | u8 reserved_4[0x20]; | ||
| 2607 | |||
| 2608 | struct mlx5_ifc_qpc_bits qpc; | ||
| 2609 | |||
| 2610 | u8 reserved_5[0x80]; | ||
| 2611 | }; | ||
| 2612 | |||
| 2613 | struct mlx5_ifc_sqd2rts_qp_out_bits { | ||
| 2614 | u8 status[0x8]; | ||
| 2615 | u8 reserved_0[0x18]; | ||
| 2616 | |||
| 2617 | u8 syndrome[0x20]; | ||
| 2618 | |||
| 2619 | u8 reserved_1[0x40]; | ||
| 2620 | }; | ||
| 2621 | |||
| 2622 | struct mlx5_ifc_sqd2rts_qp_in_bits { | ||
| 2623 | u8 opcode[0x10]; | ||
| 2624 | u8 reserved_0[0x10]; | ||
| 2625 | |||
| 2626 | u8 reserved_1[0x10]; | ||
| 2627 | u8 op_mod[0x10]; | ||
| 2628 | |||
| 2629 | u8 reserved_2[0x8]; | ||
| 2630 | u8 qpn[0x18]; | ||
| 2631 | |||
| 2632 | u8 reserved_3[0x20]; | ||
| 2633 | |||
| 2634 | u8 opt_param_mask[0x20]; | ||
| 2635 | |||
| 2636 | u8 reserved_4[0x20]; | ||
| 2637 | |||
| 2638 | struct mlx5_ifc_qpc_bits qpc; | ||
| 2639 | |||
| 2640 | u8 reserved_5[0x80]; | ||
| 2641 | }; | ||
| 2642 | |||
| 2643 | struct mlx5_ifc_set_roce_address_out_bits { | ||
| 2644 | u8 status[0x8]; | ||
| 2645 | u8 reserved_0[0x18]; | ||
| 2646 | |||
| 2647 | u8 syndrome[0x20]; | ||
| 2648 | |||
| 2649 | u8 reserved_1[0x40]; | ||
| 2650 | }; | ||
| 2651 | |||
| 2652 | struct mlx5_ifc_set_roce_address_in_bits { | ||
| 2653 | u8 opcode[0x10]; | ||
| 2654 | u8 reserved_0[0x10]; | ||
| 2655 | |||
| 2656 | u8 reserved_1[0x10]; | ||
| 2657 | u8 op_mod[0x10]; | ||
| 2658 | |||
| 2659 | u8 roce_address_index[0x10]; | ||
| 2660 | u8 reserved_2[0x10]; | ||
| 2661 | |||
| 2662 | u8 reserved_3[0x20]; | ||
| 2663 | |||
| 2664 | struct mlx5_ifc_roce_addr_layout_bits roce_address; | ||
| 2665 | }; | ||
| 2666 | |||
| 2667 | struct mlx5_ifc_set_mad_demux_out_bits { | ||
| 2668 | u8 status[0x8]; | ||
| 2669 | u8 reserved_0[0x18]; | ||
| 2670 | |||
| 2671 | u8 syndrome[0x20]; | ||
| 2672 | |||
| 2673 | u8 reserved_1[0x40]; | ||
| 2674 | }; | ||
| 2675 | |||
| 2676 | enum { | ||
| 2677 | MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL = 0x0, | ||
| 2678 | MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE = 0x2, | ||
| 2679 | }; | ||
| 2680 | |||
| 2681 | struct mlx5_ifc_set_mad_demux_in_bits { | ||
| 2682 | u8 opcode[0x10]; | ||
| 2683 | u8 reserved_0[0x10]; | ||
| 2684 | |||
| 2685 | u8 reserved_1[0x10]; | ||
| 2686 | u8 op_mod[0x10]; | ||
| 2687 | |||
| 2688 | u8 reserved_2[0x20]; | ||
| 2689 | |||
| 2690 | u8 reserved_3[0x6]; | ||
| 2691 | u8 demux_mode[0x2]; | ||
| 2692 | u8 reserved_4[0x18]; | ||
| 2693 | }; | ||
| 2694 | |||
| 2695 | struct mlx5_ifc_set_l2_table_entry_out_bits { | ||
| 2696 | u8 status[0x8]; | ||
| 2697 | u8 reserved_0[0x18]; | ||
| 2698 | |||
| 2699 | u8 syndrome[0x20]; | ||
| 2700 | |||
| 2701 | u8 reserved_1[0x40]; | ||
| 2702 | }; | ||
| 2703 | |||
| 2704 | struct mlx5_ifc_set_l2_table_entry_in_bits { | ||
| 2705 | u8 opcode[0x10]; | ||
| 2706 | u8 reserved_0[0x10]; | ||
| 2707 | |||
| 2708 | u8 reserved_1[0x10]; | ||
| 2709 | u8 op_mod[0x10]; | ||
| 2710 | |||
| 2711 | u8 reserved_2[0x60]; | ||
| 2712 | |||
| 2713 | u8 reserved_3[0x8]; | ||
| 2714 | u8 table_index[0x18]; | ||
| 2715 | |||
| 2716 | u8 reserved_4[0x20]; | ||
| 2717 | |||
| 2718 | u8 reserved_5[0x13]; | ||
| 2719 | u8 vlan_valid[0x1]; | ||
| 2720 | u8 vlan[0xc]; | ||
| 2721 | |||
| 2722 | struct mlx5_ifc_mac_address_layout_bits mac_address; | ||
| 2723 | |||
| 2724 | u8 reserved_6[0xc0]; | ||
| 2725 | }; | ||
| 2726 | |||
| 2727 | struct mlx5_ifc_set_issi_out_bits { | ||
| 2728 | u8 status[0x8]; | ||
| 2729 | u8 reserved_0[0x18]; | ||
| 2730 | |||
| 2731 | u8 syndrome[0x20]; | ||
| 2732 | |||
| 2733 | u8 reserved_1[0x40]; | ||
| 2734 | }; | ||
| 2735 | |||
| 2736 | struct mlx5_ifc_set_issi_in_bits { | ||
| 2737 | u8 opcode[0x10]; | ||
| 2738 | u8 reserved_0[0x10]; | ||
| 2739 | |||
| 2740 | u8 reserved_1[0x10]; | ||
| 2741 | u8 op_mod[0x10]; | ||
| 2742 | |||
| 2743 | u8 reserved_2[0x10]; | ||
| 2744 | u8 current_issi[0x10]; | ||
| 2745 | |||
| 2746 | u8 reserved_3[0x20]; | ||
| 2747 | }; | ||
| 2748 | |||
| 2749 | struct mlx5_ifc_set_hca_cap_out_bits { | ||
| 2750 | u8 status[0x8]; | ||
| 2751 | u8 reserved_0[0x18]; | ||
| 2752 | |||
| 2753 | u8 syndrome[0x20]; | ||
| 2754 | |||
| 2755 | u8 reserved_1[0x40]; | ||
| 305 | }; | 2756 | }; |
| 306 | 2757 | ||
| 307 | struct mlx5_ifc_set_hca_cap_in_bits { | 2758 | struct mlx5_ifc_set_hca_cap_in_bits { |
| @@ -313,10 +2764,653 @@ struct mlx5_ifc_set_hca_cap_in_bits { | |||
| 313 | 2764 | ||
| 314 | u8 reserved_2[0x40]; | 2765 | u8 reserved_2[0x40]; |
| 315 | 2766 | ||
| 316 | struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct; | 2767 | union mlx5_ifc_hca_cap_union_bits capability; |
| 317 | }; | 2768 | }; |
| 318 | 2769 | ||
| 319 | struct mlx5_ifc_query_hca_cap_in_bits { | 2770 | struct mlx5_ifc_set_fte_out_bits { |
| 2771 | u8 status[0x8]; | ||
| 2772 | u8 reserved_0[0x18]; | ||
| 2773 | |||
| 2774 | u8 syndrome[0x20]; | ||
| 2775 | |||
| 2776 | u8 reserved_1[0x40]; | ||
| 2777 | }; | ||
| 2778 | |||
| 2779 | struct mlx5_ifc_set_fte_in_bits { | ||
| 2780 | u8 opcode[0x10]; | ||
| 2781 | u8 reserved_0[0x10]; | ||
| 2782 | |||
| 2783 | u8 reserved_1[0x10]; | ||
| 2784 | u8 op_mod[0x10]; | ||
| 2785 | |||
| 2786 | u8 reserved_2[0x40]; | ||
| 2787 | |||
| 2788 | u8 table_type[0x8]; | ||
| 2789 | u8 reserved_3[0x18]; | ||
| 2790 | |||
| 2791 | u8 reserved_4[0x8]; | ||
| 2792 | u8 table_id[0x18]; | ||
| 2793 | |||
| 2794 | u8 reserved_5[0x40]; | ||
| 2795 | |||
| 2796 | u8 flow_index[0x20]; | ||
| 2797 | |||
| 2798 | u8 reserved_6[0xe0]; | ||
| 2799 | |||
| 2800 | struct mlx5_ifc_flow_context_bits flow_context; | ||
| 2801 | }; | ||
| 2802 | |||
| 2803 | struct mlx5_ifc_rts2rts_qp_out_bits { | ||
| 2804 | u8 status[0x8]; | ||
| 2805 | u8 reserved_0[0x18]; | ||
| 2806 | |||
| 2807 | u8 syndrome[0x20]; | ||
| 2808 | |||
| 2809 | u8 reserved_1[0x40]; | ||
| 2810 | }; | ||
| 2811 | |||
| 2812 | struct mlx5_ifc_rts2rts_qp_in_bits { | ||
| 2813 | u8 opcode[0x10]; | ||
| 2814 | u8 reserved_0[0x10]; | ||
| 2815 | |||
| 2816 | u8 reserved_1[0x10]; | ||
| 2817 | u8 op_mod[0x10]; | ||
| 2818 | |||
| 2819 | u8 reserved_2[0x8]; | ||
| 2820 | u8 qpn[0x18]; | ||
| 2821 | |||
| 2822 | u8 reserved_3[0x20]; | ||
| 2823 | |||
| 2824 | u8 opt_param_mask[0x20]; | ||
| 2825 | |||
| 2826 | u8 reserved_4[0x20]; | ||
| 2827 | |||
| 2828 | struct mlx5_ifc_qpc_bits qpc; | ||
| 2829 | |||
| 2830 | u8 reserved_5[0x80]; | ||
| 2831 | }; | ||
| 2832 | |||
| 2833 | struct mlx5_ifc_rtr2rts_qp_out_bits { | ||
| 2834 | u8 status[0x8]; | ||
| 2835 | u8 reserved_0[0x18]; | ||
| 2836 | |||
| 2837 | u8 syndrome[0x20]; | ||
| 2838 | |||
| 2839 | u8 reserved_1[0x40]; | ||
| 2840 | }; | ||
| 2841 | |||
| 2842 | struct mlx5_ifc_rtr2rts_qp_in_bits { | ||
| 2843 | u8 opcode[0x10]; | ||
| 2844 | u8 reserved_0[0x10]; | ||
| 2845 | |||
| 2846 | u8 reserved_1[0x10]; | ||
| 2847 | u8 op_mod[0x10]; | ||
| 2848 | |||
| 2849 | u8 reserved_2[0x8]; | ||
| 2850 | u8 qpn[0x18]; | ||
| 2851 | |||
| 2852 | u8 reserved_3[0x20]; | ||
| 2853 | |||
| 2854 | u8 opt_param_mask[0x20]; | ||
| 2855 | |||
| 2856 | u8 reserved_4[0x20]; | ||
| 2857 | |||
| 2858 | struct mlx5_ifc_qpc_bits qpc; | ||
| 2859 | |||
| 2860 | u8 reserved_5[0x80]; | ||
| 2861 | }; | ||
| 2862 | |||
| 2863 | struct mlx5_ifc_rst2init_qp_out_bits { | ||
| 2864 | u8 status[0x8]; | ||
| 2865 | u8 reserved_0[0x18]; | ||
| 2866 | |||
| 2867 | u8 syndrome[0x20]; | ||
| 2868 | |||
| 2869 | u8 reserved_1[0x40]; | ||
| 2870 | }; | ||
| 2871 | |||
| 2872 | struct mlx5_ifc_rst2init_qp_in_bits { | ||
| 2873 | u8 opcode[0x10]; | ||
| 2874 | u8 reserved_0[0x10]; | ||
| 2875 | |||
| 2876 | u8 reserved_1[0x10]; | ||
| 2877 | u8 op_mod[0x10]; | ||
| 2878 | |||
| 2879 | u8 reserved_2[0x8]; | ||
| 2880 | u8 qpn[0x18]; | ||
| 2881 | |||
| 2882 | u8 reserved_3[0x20]; | ||
| 2883 | |||
| 2884 | u8 opt_param_mask[0x20]; | ||
| 2885 | |||
| 2886 | u8 reserved_4[0x20]; | ||
| 2887 | |||
| 2888 | struct mlx5_ifc_qpc_bits qpc; | ||
| 2889 | |||
| 2890 | u8 reserved_5[0x80]; | ||
| 2891 | }; | ||
| 2892 | |||
| 2893 | struct mlx5_ifc_query_xrc_srq_out_bits { | ||
| 2894 | u8 status[0x8]; | ||
| 2895 | u8 reserved_0[0x18]; | ||
| 2896 | |||
| 2897 | u8 syndrome[0x20]; | ||
| 2898 | |||
| 2899 | u8 reserved_1[0x40]; | ||
| 2900 | |||
| 2901 | struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; | ||
| 2902 | |||
| 2903 | u8 reserved_2[0x600]; | ||
| 2904 | |||
| 2905 | u8 pas[0][0x40]; | ||
| 2906 | }; | ||
| 2907 | |||
| 2908 | struct mlx5_ifc_query_xrc_srq_in_bits { | ||
| 2909 | u8 opcode[0x10]; | ||
| 2910 | u8 reserved_0[0x10]; | ||
| 2911 | |||
| 2912 | u8 reserved_1[0x10]; | ||
| 2913 | u8 op_mod[0x10]; | ||
| 2914 | |||
| 2915 | u8 reserved_2[0x8]; | ||
| 2916 | u8 xrc_srqn[0x18]; | ||
| 2917 | |||
| 2918 | u8 reserved_3[0x20]; | ||
| 2919 | }; | ||
| 2920 | |||
| 2921 | enum { | ||
| 2922 | MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN = 0x0, | ||
| 2923 | MLX5_QUERY_VPORT_STATE_OUT_STATE_UP = 0x1, | ||
| 2924 | }; | ||
| 2925 | |||
| 2926 | struct mlx5_ifc_query_vport_state_out_bits { | ||
| 2927 | u8 status[0x8]; | ||
| 2928 | u8 reserved_0[0x18]; | ||
| 2929 | |||
| 2930 | u8 syndrome[0x20]; | ||
| 2931 | |||
| 2932 | u8 reserved_1[0x20]; | ||
| 2933 | |||
| 2934 | u8 reserved_2[0x18]; | ||
| 2935 | u8 admin_state[0x4]; | ||
| 2936 | u8 state[0x4]; | ||
| 2937 | }; | ||
| 2938 | |||
| 2939 | enum { | ||
| 2940 | MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0, | ||
| 2941 | }; | ||
| 2942 | |||
| 2943 | struct mlx5_ifc_query_vport_state_in_bits { | ||
| 2944 | u8 opcode[0x10]; | ||
| 2945 | u8 reserved_0[0x10]; | ||
| 2946 | |||
| 2947 | u8 reserved_1[0x10]; | ||
| 2948 | u8 op_mod[0x10]; | ||
| 2949 | |||
| 2950 | u8 other_vport[0x1]; | ||
| 2951 | u8 reserved_2[0xf]; | ||
| 2952 | u8 vport_number[0x10]; | ||
| 2953 | |||
| 2954 | u8 reserved_3[0x20]; | ||
| 2955 | }; | ||
| 2956 | |||
| 2957 | struct mlx5_ifc_query_vport_counter_out_bits { | ||
| 2958 | u8 status[0x8]; | ||
| 2959 | u8 reserved_0[0x18]; | ||
| 2960 | |||
| 2961 | u8 syndrome[0x20]; | ||
| 2962 | |||
| 2963 | u8 reserved_1[0x40]; | ||
| 2964 | |||
| 2965 | struct mlx5_ifc_traffic_counter_bits received_errors; | ||
| 2966 | |||
| 2967 | struct mlx5_ifc_traffic_counter_bits transmit_errors; | ||
| 2968 | |||
| 2969 | struct mlx5_ifc_traffic_counter_bits received_ib_unicast; | ||
| 2970 | |||
| 2971 | struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast; | ||
| 2972 | |||
| 2973 | struct mlx5_ifc_traffic_counter_bits received_ib_multicast; | ||
| 2974 | |||
| 2975 | struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast; | ||
| 2976 | |||
| 2977 | struct mlx5_ifc_traffic_counter_bits received_eth_broadcast; | ||
| 2978 | |||
| 2979 | struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast; | ||
| 2980 | |||
| 2981 | struct mlx5_ifc_traffic_counter_bits received_eth_unicast; | ||
| 2982 | |||
| 2983 | struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast; | ||
| 2984 | |||
| 2985 | struct mlx5_ifc_traffic_counter_bits received_eth_multicast; | ||
| 2986 | |||
| 2987 | struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast; | ||
| 2988 | |||
| 2989 | u8 reserved_2[0xa00]; | ||
| 2990 | }; | ||
| 2991 | |||
| 2992 | enum { | ||
| 2993 | MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS = 0x0, | ||
| 2994 | }; | ||
| 2995 | |||
| 2996 | struct mlx5_ifc_query_vport_counter_in_bits { | ||
| 2997 | u8 opcode[0x10]; | ||
| 2998 | u8 reserved_0[0x10]; | ||
| 2999 | |||
| 3000 | u8 reserved_1[0x10]; | ||
| 3001 | u8 op_mod[0x10]; | ||
| 3002 | |||
| 3003 | u8 other_vport[0x1]; | ||
| 3004 | u8 reserved_2[0xf]; | ||
| 3005 | u8 vport_number[0x10]; | ||
| 3006 | |||
| 3007 | u8 reserved_3[0x60]; | ||
| 3008 | |||
| 3009 | u8 clear[0x1]; | ||
| 3010 | u8 reserved_4[0x1f]; | ||
| 3011 | |||
| 3012 | u8 reserved_5[0x20]; | ||
| 3013 | }; | ||
| 3014 | |||
| 3015 | struct mlx5_ifc_query_tis_out_bits { | ||
| 3016 | u8 status[0x8]; | ||
| 3017 | u8 reserved_0[0x18]; | ||
| 3018 | |||
| 3019 | u8 syndrome[0x20]; | ||
| 3020 | |||
| 3021 | u8 reserved_1[0x40]; | ||
| 3022 | |||
| 3023 | struct mlx5_ifc_tisc_bits tis_context; | ||
| 3024 | }; | ||
| 3025 | |||
| 3026 | struct mlx5_ifc_query_tis_in_bits { | ||
| 3027 | u8 opcode[0x10]; | ||
| 3028 | u8 reserved_0[0x10]; | ||
| 3029 | |||
| 3030 | u8 reserved_1[0x10]; | ||
| 3031 | u8 op_mod[0x10]; | ||
| 3032 | |||
| 3033 | u8 reserved_2[0x8]; | ||
| 3034 | u8 tisn[0x18]; | ||
| 3035 | |||
| 3036 | u8 reserved_3[0x20]; | ||
| 3037 | }; | ||
| 3038 | |||
| 3039 | struct mlx5_ifc_query_tir_out_bits { | ||
| 3040 | u8 status[0x8]; | ||
| 3041 | u8 reserved_0[0x18]; | ||
| 3042 | |||
| 3043 | u8 syndrome[0x20]; | ||
| 3044 | |||
| 3045 | u8 reserved_1[0xc0]; | ||
| 3046 | |||
| 3047 | struct mlx5_ifc_tirc_bits tir_context; | ||
| 3048 | }; | ||
| 3049 | |||
| 3050 | struct mlx5_ifc_query_tir_in_bits { | ||
| 3051 | u8 opcode[0x10]; | ||
| 3052 | u8 reserved_0[0x10]; | ||
| 3053 | |||
| 3054 | u8 reserved_1[0x10]; | ||
| 3055 | u8 op_mod[0x10]; | ||
| 3056 | |||
| 3057 | u8 reserved_2[0x8]; | ||
| 3058 | u8 tirn[0x18]; | ||
| 3059 | |||
| 3060 | u8 reserved_3[0x20]; | ||
| 3061 | }; | ||
| 3062 | |||
| 3063 | struct mlx5_ifc_query_srq_out_bits { | ||
| 3064 | u8 status[0x8]; | ||
| 3065 | u8 reserved_0[0x18]; | ||
| 3066 | |||
| 3067 | u8 syndrome[0x20]; | ||
| 3068 | |||
| 3069 | u8 reserved_1[0x40]; | ||
| 3070 | |||
| 3071 | struct mlx5_ifc_srqc_bits srq_context_entry; | ||
| 3072 | |||
| 3073 | u8 reserved_2[0x600]; | ||
| 3074 | |||
| 3075 | u8 pas[0][0x40]; | ||
| 3076 | }; | ||
| 3077 | |||
| 3078 | struct mlx5_ifc_query_srq_in_bits { | ||
| 3079 | u8 opcode[0x10]; | ||
| 3080 | u8 reserved_0[0x10]; | ||
| 3081 | |||
| 3082 | u8 reserved_1[0x10]; | ||
| 3083 | u8 op_mod[0x10]; | ||
| 3084 | |||
| 3085 | u8 reserved_2[0x8]; | ||
| 3086 | u8 srqn[0x18]; | ||
| 3087 | |||
| 3088 | u8 reserved_3[0x20]; | ||
| 3089 | }; | ||
| 3090 | |||
| 3091 | struct mlx5_ifc_query_sq_out_bits { | ||
| 3092 | u8 status[0x8]; | ||
| 3093 | u8 reserved_0[0x18]; | ||
| 3094 | |||
| 3095 | u8 syndrome[0x20]; | ||
| 3096 | |||
| 3097 | u8 reserved_1[0xc0]; | ||
| 3098 | |||
| 3099 | struct mlx5_ifc_sqc_bits sq_context; | ||
| 3100 | }; | ||
| 3101 | |||
| 3102 | struct mlx5_ifc_query_sq_in_bits { | ||
| 3103 | u8 opcode[0x10]; | ||
| 3104 | u8 reserved_0[0x10]; | ||
| 3105 | |||
| 3106 | u8 reserved_1[0x10]; | ||
| 3107 | u8 op_mod[0x10]; | ||
| 3108 | |||
| 3109 | u8 reserved_2[0x8]; | ||
| 3110 | u8 sqn[0x18]; | ||
| 3111 | |||
| 3112 | u8 reserved_3[0x20]; | ||
| 3113 | }; | ||
| 3114 | |||
| 3115 | struct mlx5_ifc_query_special_contexts_out_bits { | ||
| 3116 | u8 status[0x8]; | ||
| 3117 | u8 reserved_0[0x18]; | ||
| 3118 | |||
| 3119 | u8 syndrome[0x20]; | ||
| 3120 | |||
| 3121 | u8 reserved_1[0x20]; | ||
| 3122 | |||
| 3123 | u8 resd_lkey[0x20]; | ||
| 3124 | }; | ||
| 3125 | |||
| 3126 | struct mlx5_ifc_query_special_contexts_in_bits { | ||
| 3127 | u8 opcode[0x10]; | ||
| 3128 | u8 reserved_0[0x10]; | ||
| 3129 | |||
| 3130 | u8 reserved_1[0x10]; | ||
| 3131 | u8 op_mod[0x10]; | ||
| 3132 | |||
| 3133 | u8 reserved_2[0x40]; | ||
| 3134 | }; | ||
| 3135 | |||
| 3136 | struct mlx5_ifc_query_rqt_out_bits { | ||
| 3137 | u8 status[0x8]; | ||
| 3138 | u8 reserved_0[0x18]; | ||
| 3139 | |||
| 3140 | u8 syndrome[0x20]; | ||
| 3141 | |||
| 3142 | u8 reserved_1[0xc0]; | ||
| 3143 | |||
| 3144 | struct mlx5_ifc_rqtc_bits rqt_context; | ||
| 3145 | }; | ||
| 3146 | |||
| 3147 | struct mlx5_ifc_query_rqt_in_bits { | ||
| 3148 | u8 opcode[0x10]; | ||
| 3149 | u8 reserved_0[0x10]; | ||
| 3150 | |||
| 3151 | u8 reserved_1[0x10]; | ||
| 3152 | u8 op_mod[0x10]; | ||
| 3153 | |||
| 3154 | u8 reserved_2[0x8]; | ||
| 3155 | u8 rqtn[0x18]; | ||
| 3156 | |||
| 3157 | u8 reserved_3[0x20]; | ||
| 3158 | }; | ||
| 3159 | |||
| 3160 | struct mlx5_ifc_query_rq_out_bits { | ||
| 3161 | u8 status[0x8]; | ||
| 3162 | u8 reserved_0[0x18]; | ||
| 3163 | |||
| 3164 | u8 syndrome[0x20]; | ||
| 3165 | |||
| 3166 | u8 reserved_1[0xc0]; | ||
| 3167 | |||
| 3168 | struct mlx5_ifc_rqc_bits rq_context; | ||
| 3169 | }; | ||
| 3170 | |||
| 3171 | struct mlx5_ifc_query_rq_in_bits { | ||
| 3172 | u8 opcode[0x10]; | ||
| 3173 | u8 reserved_0[0x10]; | ||
| 3174 | |||
| 3175 | u8 reserved_1[0x10]; | ||
| 3176 | u8 op_mod[0x10]; | ||
| 3177 | |||
| 3178 | u8 reserved_2[0x8]; | ||
| 3179 | u8 rqn[0x18]; | ||
| 3180 | |||
| 3181 | u8 reserved_3[0x20]; | ||
| 3182 | }; | ||
| 3183 | |||
| 3184 | struct mlx5_ifc_query_roce_address_out_bits { | ||
| 3185 | u8 status[0x8]; | ||
| 3186 | u8 reserved_0[0x18]; | ||
| 3187 | |||
| 3188 | u8 syndrome[0x20]; | ||
| 3189 | |||
| 3190 | u8 reserved_1[0x40]; | ||
| 3191 | |||
| 3192 | struct mlx5_ifc_roce_addr_layout_bits roce_address; | ||
| 3193 | }; | ||
| 3194 | |||
| 3195 | struct mlx5_ifc_query_roce_address_in_bits { | ||
| 3196 | u8 opcode[0x10]; | ||
| 3197 | u8 reserved_0[0x10]; | ||
| 3198 | |||
| 3199 | u8 reserved_1[0x10]; | ||
| 3200 | u8 op_mod[0x10]; | ||
| 3201 | |||
| 3202 | u8 roce_address_index[0x10]; | ||
| 3203 | u8 reserved_2[0x10]; | ||
| 3204 | |||
| 3205 | u8 reserved_3[0x20]; | ||
| 3206 | }; | ||
| 3207 | |||
| 3208 | struct mlx5_ifc_query_rmp_out_bits { | ||
| 3209 | u8 status[0x8]; | ||
| 3210 | u8 reserved_0[0x18]; | ||
| 3211 | |||
| 3212 | u8 syndrome[0x20]; | ||
| 3213 | |||
| 3214 | u8 reserved_1[0xc0]; | ||
| 3215 | |||
| 3216 | struct mlx5_ifc_rmpc_bits rmp_context; | ||
| 3217 | }; | ||
| 3218 | |||
| 3219 | struct mlx5_ifc_query_rmp_in_bits { | ||
| 3220 | u8 opcode[0x10]; | ||
| 3221 | u8 reserved_0[0x10]; | ||
| 3222 | |||
| 3223 | u8 reserved_1[0x10]; | ||
| 3224 | u8 op_mod[0x10]; | ||
| 3225 | |||
| 3226 | u8 reserved_2[0x8]; | ||
| 3227 | u8 rmpn[0x18]; | ||
| 3228 | |||
| 3229 | u8 reserved_3[0x20]; | ||
| 3230 | }; | ||
| 3231 | |||
| 3232 | struct mlx5_ifc_query_qp_out_bits { | ||
| 3233 | u8 status[0x8]; | ||
| 3234 | u8 reserved_0[0x18]; | ||
| 3235 | |||
| 3236 | u8 syndrome[0x20]; | ||
| 3237 | |||
| 3238 | u8 reserved_1[0x40]; | ||
| 3239 | |||
| 3240 | u8 opt_param_mask[0x20]; | ||
| 3241 | |||
| 3242 | u8 reserved_2[0x20]; | ||
| 3243 | |||
| 3244 | struct mlx5_ifc_qpc_bits qpc; | ||
| 3245 | |||
| 3246 | u8 reserved_3[0x80]; | ||
| 3247 | |||
| 3248 | u8 pas[0][0x40]; | ||
| 3249 | }; | ||
| 3250 | |||
| 3251 | struct mlx5_ifc_query_qp_in_bits { | ||
| 3252 | u8 opcode[0x10]; | ||
| 3253 | u8 reserved_0[0x10]; | ||
| 3254 | |||
| 3255 | u8 reserved_1[0x10]; | ||
| 3256 | u8 op_mod[0x10]; | ||
| 3257 | |||
| 3258 | u8 reserved_2[0x8]; | ||
| 3259 | u8 qpn[0x18]; | ||
| 3260 | |||
| 3261 | u8 reserved_3[0x20]; | ||
| 3262 | }; | ||
| 3263 | |||
| 3264 | struct mlx5_ifc_query_q_counter_out_bits { | ||
| 3265 | u8 status[0x8]; | ||
| 3266 | u8 reserved_0[0x18]; | ||
| 3267 | |||
| 3268 | u8 syndrome[0x20]; | ||
| 3269 | |||
| 3270 | u8 reserved_1[0x40]; | ||
| 3271 | |||
| 3272 | u8 rx_write_requests[0x20]; | ||
| 3273 | |||
| 3274 | u8 reserved_2[0x20]; | ||
| 3275 | |||
| 3276 | u8 rx_read_requests[0x20]; | ||
| 3277 | |||
| 3278 | u8 reserved_3[0x20]; | ||
| 3279 | |||
| 3280 | u8 rx_atomic_requests[0x20]; | ||
| 3281 | |||
| 3282 | u8 reserved_4[0x20]; | ||
| 3283 | |||
| 3284 | u8 rx_dct_connect[0x20]; | ||
| 3285 | |||
| 3286 | u8 reserved_5[0x20]; | ||
| 3287 | |||
| 3288 | u8 out_of_buffer[0x20]; | ||
| 3289 | |||
| 3290 | u8 reserved_6[0x20]; | ||
| 3291 | |||
| 3292 | u8 out_of_sequence[0x20]; | ||
| 3293 | |||
| 3294 | u8 reserved_7[0x620]; | ||
| 3295 | }; | ||
| 3296 | |||
| 3297 | struct mlx5_ifc_query_q_counter_in_bits { | ||
| 3298 | u8 opcode[0x10]; | ||
| 3299 | u8 reserved_0[0x10]; | ||
| 3300 | |||
| 3301 | u8 reserved_1[0x10]; | ||
| 3302 | u8 op_mod[0x10]; | ||
| 3303 | |||
| 3304 | u8 reserved_2[0x80]; | ||
| 3305 | |||
| 3306 | u8 clear[0x1]; | ||
| 3307 | u8 reserved_3[0x1f]; | ||
| 3308 | |||
| 3309 | u8 reserved_4[0x18]; | ||
| 3310 | u8 counter_set_id[0x8]; | ||
| 3311 | }; | ||
| 3312 | |||
| 3313 | struct mlx5_ifc_query_pages_out_bits { | ||
| 3314 | u8 status[0x8]; | ||
| 3315 | u8 reserved_0[0x18]; | ||
| 3316 | |||
| 3317 | u8 syndrome[0x20]; | ||
| 3318 | |||
| 3319 | u8 reserved_1[0x10]; | ||
| 3320 | u8 function_id[0x10]; | ||
| 3321 | |||
| 3322 | u8 num_pages[0x20]; | ||
| 3323 | }; | ||
| 3324 | |||
| 3325 | enum { | ||
| 3326 | MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES = 0x1, | ||
| 3327 | MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES = 0x2, | ||
| 3328 | MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES = 0x3, | ||
| 3329 | }; | ||
| 3330 | |||
| 3331 | struct mlx5_ifc_query_pages_in_bits { | ||
| 3332 | u8 opcode[0x10]; | ||
| 3333 | u8 reserved_0[0x10]; | ||
| 3334 | |||
| 3335 | u8 reserved_1[0x10]; | ||
| 3336 | u8 op_mod[0x10]; | ||
| 3337 | |||
| 3338 | u8 reserved_2[0x10]; | ||
| 3339 | u8 function_id[0x10]; | ||
| 3340 | |||
| 3341 | u8 reserved_3[0x20]; | ||
| 3342 | }; | ||
| 3343 | |||
| 3344 | struct mlx5_ifc_query_nic_vport_context_out_bits { | ||
| 3345 | u8 status[0x8]; | ||
| 3346 | u8 reserved_0[0x18]; | ||
| 3347 | |||
| 3348 | u8 syndrome[0x20]; | ||
| 3349 | |||
| 3350 | u8 reserved_1[0x40]; | ||
| 3351 | |||
| 3352 | struct mlx5_ifc_nic_vport_context_bits nic_vport_context; | ||
| 3353 | }; | ||
| 3354 | |||
| 3355 | struct mlx5_ifc_query_nic_vport_context_in_bits { | ||
| 3356 | u8 opcode[0x10]; | ||
| 3357 | u8 reserved_0[0x10]; | ||
| 3358 | |||
| 3359 | u8 reserved_1[0x10]; | ||
| 3360 | u8 op_mod[0x10]; | ||
| 3361 | |||
| 3362 | u8 other_vport[0x1]; | ||
| 3363 | u8 reserved_2[0xf]; | ||
| 3364 | u8 vport_number[0x10]; | ||
| 3365 | |||
| 3366 | u8 reserved_3[0x5]; | ||
| 3367 | u8 allowed_list_type[0x3]; | ||
| 3368 | u8 reserved_4[0x18]; | ||
| 3369 | }; | ||
| 3370 | |||
| 3371 | struct mlx5_ifc_query_mkey_out_bits { | ||
| 3372 | u8 status[0x8]; | ||
| 3373 | u8 reserved_0[0x18]; | ||
| 3374 | |||
| 3375 | u8 syndrome[0x20]; | ||
| 3376 | |||
| 3377 | u8 reserved_1[0x40]; | ||
| 3378 | |||
| 3379 | struct mlx5_ifc_mkc_bits memory_key_mkey_entry; | ||
| 3380 | |||
| 3381 | u8 reserved_2[0x600]; | ||
| 3382 | |||
| 3383 | u8 bsf0_klm0_pas_mtt0_1[16][0x8]; | ||
| 3384 | |||
| 3385 | u8 bsf1_klm1_pas_mtt2_3[16][0x8]; | ||
| 3386 | }; | ||
| 3387 | |||
| 3388 | struct mlx5_ifc_query_mkey_in_bits { | ||
| 3389 | u8 opcode[0x10]; | ||
| 3390 | u8 reserved_0[0x10]; | ||
| 3391 | |||
| 3392 | u8 reserved_1[0x10]; | ||
| 3393 | u8 op_mod[0x10]; | ||
| 3394 | |||
| 3395 | u8 reserved_2[0x8]; | ||
| 3396 | u8 mkey_index[0x18]; | ||
| 3397 | |||
| 3398 | u8 pg_access[0x1]; | ||
| 3399 | u8 reserved_3[0x1f]; | ||
| 3400 | }; | ||
| 3401 | |||
| 3402 | struct mlx5_ifc_query_mad_demux_out_bits { | ||
| 3403 | u8 status[0x8]; | ||
| 3404 | u8 reserved_0[0x18]; | ||
| 3405 | |||
| 3406 | u8 syndrome[0x20]; | ||
| 3407 | |||
| 3408 | u8 reserved_1[0x40]; | ||
| 3409 | |||
| 3410 | u8 mad_dumux_parameters_block[0x20]; | ||
| 3411 | }; | ||
| 3412 | |||
| 3413 | struct mlx5_ifc_query_mad_demux_in_bits { | ||
| 320 | u8 opcode[0x10]; | 3414 | u8 opcode[0x10]; |
| 321 | u8 reserved_0[0x10]; | 3415 | u8 reserved_0[0x10]; |
| 322 | 3416 | ||
| @@ -326,6 +3420,146 @@ struct mlx5_ifc_query_hca_cap_in_bits { | |||
| 326 | u8 reserved_2[0x40]; | 3420 | u8 reserved_2[0x40]; |
| 327 | }; | 3421 | }; |
| 328 | 3422 | ||
| 3423 | struct mlx5_ifc_query_l2_table_entry_out_bits { | ||
| 3424 | u8 status[0x8]; | ||
| 3425 | u8 reserved_0[0x18]; | ||
| 3426 | |||
| 3427 | u8 syndrome[0x20]; | ||
| 3428 | |||
| 3429 | u8 reserved_1[0xa0]; | ||
| 3430 | |||
| 3431 | u8 reserved_2[0x13]; | ||
| 3432 | u8 vlan_valid[0x1]; | ||
| 3433 | u8 vlan[0xc]; | ||
| 3434 | |||
| 3435 | struct mlx5_ifc_mac_address_layout_bits mac_address; | ||
| 3436 | |||
| 3437 | u8 reserved_3[0xc0]; | ||
| 3438 | }; | ||
| 3439 | |||
| 3440 | struct mlx5_ifc_query_l2_table_entry_in_bits { | ||
| 3441 | u8 opcode[0x10]; | ||
| 3442 | u8 reserved_0[0x10]; | ||
| 3443 | |||
| 3444 | u8 reserved_1[0x10]; | ||
| 3445 | u8 op_mod[0x10]; | ||
| 3446 | |||
| 3447 | u8 reserved_2[0x60]; | ||
| 3448 | |||
| 3449 | u8 reserved_3[0x8]; | ||
| 3450 | u8 table_index[0x18]; | ||
| 3451 | |||
| 3452 | u8 reserved_4[0x140]; | ||
| 3453 | }; | ||
| 3454 | |||
| 3455 | struct mlx5_ifc_query_issi_out_bits { | ||
| 3456 | u8 status[0x8]; | ||
| 3457 | u8 reserved_0[0x18]; | ||
| 3458 | |||
| 3459 | u8 syndrome[0x20]; | ||
| 3460 | |||
| 3461 | u8 reserved_1[0x10]; | ||
| 3462 | u8 current_issi[0x10]; | ||
| 3463 | |||
| 3464 | u8 reserved_2[0xa0]; | ||
| 3465 | |||
| 3466 | u8 supported_issi_reserved[76][0x8]; | ||
| 3467 | u8 supported_issi_dw0[0x20]; | ||
| 3468 | }; | ||
| 3469 | |||
| 3470 | struct mlx5_ifc_query_issi_in_bits { | ||
| 3471 | u8 opcode[0x10]; | ||
| 3472 | u8 reserved_0[0x10]; | ||
| 3473 | |||
| 3474 | u8 reserved_1[0x10]; | ||
| 3475 | u8 op_mod[0x10]; | ||
| 3476 | |||
| 3477 | u8 reserved_2[0x40]; | ||
| 3478 | }; | ||
| 3479 | |||
| 3480 | struct mlx5_ifc_query_hca_vport_pkey_out_bits { | ||
| 3481 | u8 status[0x8]; | ||
| 3482 | u8 reserved_0[0x18]; | ||
| 3483 | |||
| 3484 | u8 syndrome[0x20]; | ||
| 3485 | |||
| 3486 | u8 reserved_1[0x40]; | ||
| 3487 | |||
| 3488 | struct mlx5_ifc_pkey_bits pkey[0]; | ||
| 3489 | }; | ||
| 3490 | |||
| 3491 | struct mlx5_ifc_query_hca_vport_pkey_in_bits { | ||
| 3492 | u8 opcode[0x10]; | ||
| 3493 | u8 reserved_0[0x10]; | ||
| 3494 | |||
| 3495 | u8 reserved_1[0x10]; | ||
| 3496 | u8 op_mod[0x10]; | ||
| 3497 | |||
| 3498 | u8 other_vport[0x1]; | ||
| 3499 | u8 reserved_2[0xb]; | ||
| 3500 | u8 port_num[0x4]; | ||
| 3501 | u8 vport_number[0x10]; | ||
| 3502 | |||
| 3503 | u8 reserved_3[0x10]; | ||
| 3504 | u8 pkey_index[0x10]; | ||
| 3505 | }; | ||
| 3506 | |||
| 3507 | struct mlx5_ifc_query_hca_vport_gid_out_bits { | ||
| 3508 | u8 status[0x8]; | ||
| 3509 | u8 reserved_0[0x18]; | ||
| 3510 | |||
| 3511 | u8 syndrome[0x20]; | ||
| 3512 | |||
| 3513 | u8 reserved_1[0x20]; | ||
| 3514 | |||
| 3515 | u8 gids_num[0x10]; | ||
| 3516 | u8 reserved_2[0x10]; | ||
| 3517 | |||
| 3518 | struct mlx5_ifc_array128_auto_bits gid[0]; | ||
| 3519 | }; | ||
| 3520 | |||
| 3521 | struct mlx5_ifc_query_hca_vport_gid_in_bits { | ||
| 3522 | u8 opcode[0x10]; | ||
| 3523 | u8 reserved_0[0x10]; | ||
| 3524 | |||
| 3525 | u8 reserved_1[0x10]; | ||
| 3526 | u8 op_mod[0x10]; | ||
| 3527 | |||
| 3528 | u8 other_vport[0x1]; | ||
| 3529 | u8 reserved_2[0xb]; | ||
| 3530 | u8 port_num[0x4]; | ||
| 3531 | u8 vport_number[0x10]; | ||
| 3532 | |||
| 3533 | u8 reserved_3[0x10]; | ||
| 3534 | u8 gid_index[0x10]; | ||
| 3535 | }; | ||
| 3536 | |||
| 3537 | struct mlx5_ifc_query_hca_vport_context_out_bits { | ||
| 3538 | u8 status[0x8]; | ||
| 3539 | u8 reserved_0[0x18]; | ||
| 3540 | |||
| 3541 | u8 syndrome[0x20]; | ||
| 3542 | |||
| 3543 | u8 reserved_1[0x40]; | ||
| 3544 | |||
| 3545 | struct mlx5_ifc_hca_vport_context_bits hca_vport_context; | ||
| 3546 | }; | ||
| 3547 | |||
| 3548 | struct mlx5_ifc_query_hca_vport_context_in_bits { | ||
| 3549 | u8 opcode[0x10]; | ||
| 3550 | u8 reserved_0[0x10]; | ||
| 3551 | |||
| 3552 | u8 reserved_1[0x10]; | ||
| 3553 | u8 op_mod[0x10]; | ||
| 3554 | |||
| 3555 | u8 other_vport[0x1]; | ||
| 3556 | u8 reserved_2[0xb]; | ||
| 3557 | u8 port_num[0x4]; | ||
| 3558 | u8 vport_number[0x10]; | ||
| 3559 | |||
| 3560 | u8 reserved_3[0x20]; | ||
| 3561 | }; | ||
| 3562 | |||
| 329 | struct mlx5_ifc_query_hca_cap_out_bits { | 3563 | struct mlx5_ifc_query_hca_cap_out_bits { |
| 330 | u8 status[0x8]; | 3564 | u8 status[0x8]; |
| 331 | u8 reserved_0[0x18]; | 3565 | u8 reserved_0[0x18]; |
| @@ -334,16 +3568,3216 @@ struct mlx5_ifc_query_hca_cap_out_bits { | |||
| 334 | 3568 | ||
| 335 | u8 reserved_1[0x40]; | 3569 | u8 reserved_1[0x40]; |
| 336 | 3570 | ||
| 337 | u8 capability_struct[256][0x8]; | 3571 | union mlx5_ifc_hca_cap_union_bits capability; |
| 338 | }; | 3572 | }; |
| 339 | 3573 | ||
| 340 | struct mlx5_ifc_set_hca_cap_out_bits { | 3574 | struct mlx5_ifc_query_hca_cap_in_bits { |
| 3575 | u8 opcode[0x10]; | ||
| 3576 | u8 reserved_0[0x10]; | ||
| 3577 | |||
| 3578 | u8 reserved_1[0x10]; | ||
| 3579 | u8 op_mod[0x10]; | ||
| 3580 | |||
| 3581 | u8 reserved_2[0x40]; | ||
| 3582 | }; | ||
| 3583 | |||
| 3584 | struct mlx5_ifc_query_flow_table_out_bits { | ||
| 3585 | u8 status[0x8]; | ||
| 3586 | u8 reserved_0[0x18]; | ||
| 3587 | |||
| 3588 | u8 syndrome[0x20]; | ||
| 3589 | |||
| 3590 | u8 reserved_1[0x80]; | ||
| 3591 | |||
| 3592 | u8 reserved_2[0x8]; | ||
| 3593 | u8 level[0x8]; | ||
| 3594 | u8 reserved_3[0x8]; | ||
| 3595 | u8 log_size[0x8]; | ||
| 3596 | |||
| 3597 | u8 reserved_4[0x120]; | ||
| 3598 | }; | ||
| 3599 | |||
| 3600 | struct mlx5_ifc_query_flow_table_in_bits { | ||
| 3601 | u8 opcode[0x10]; | ||
| 3602 | u8 reserved_0[0x10]; | ||
| 3603 | |||
| 3604 | u8 reserved_1[0x10]; | ||
| 3605 | u8 op_mod[0x10]; | ||
| 3606 | |||
| 3607 | u8 reserved_2[0x40]; | ||
| 3608 | |||
| 3609 | u8 table_type[0x8]; | ||
| 3610 | u8 reserved_3[0x18]; | ||
| 3611 | |||
| 3612 | u8 reserved_4[0x8]; | ||
| 3613 | u8 table_id[0x18]; | ||
| 3614 | |||
| 3615 | u8 reserved_5[0x140]; | ||
| 3616 | }; | ||
| 3617 | |||
| 3618 | struct mlx5_ifc_query_fte_out_bits { | ||
| 3619 | u8 status[0x8]; | ||
| 3620 | u8 reserved_0[0x18]; | ||
| 3621 | |||
| 3622 | u8 syndrome[0x20]; | ||
| 3623 | |||
| 3624 | u8 reserved_1[0x1c0]; | ||
| 3625 | |||
| 3626 | struct mlx5_ifc_flow_context_bits flow_context; | ||
| 3627 | }; | ||
| 3628 | |||
| 3629 | struct mlx5_ifc_query_fte_in_bits { | ||
| 3630 | u8 opcode[0x10]; | ||
| 3631 | u8 reserved_0[0x10]; | ||
| 3632 | |||
| 3633 | u8 reserved_1[0x10]; | ||
| 3634 | u8 op_mod[0x10]; | ||
| 3635 | |||
| 3636 | u8 reserved_2[0x40]; | ||
| 3637 | |||
| 3638 | u8 table_type[0x8]; | ||
| 3639 | u8 reserved_3[0x18]; | ||
| 3640 | |||
| 3641 | u8 reserved_4[0x8]; | ||
| 3642 | u8 table_id[0x18]; | ||
| 3643 | |||
| 3644 | u8 reserved_5[0x40]; | ||
| 3645 | |||
| 3646 | u8 flow_index[0x20]; | ||
| 3647 | |||
| 3648 | u8 reserved_6[0xe0]; | ||
| 3649 | }; | ||
| 3650 | |||
| 3651 | enum { | ||
| 3652 | MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, | ||
| 3653 | MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, | ||
| 3654 | MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, | ||
| 3655 | }; | ||
| 3656 | |||
| 3657 | struct mlx5_ifc_query_flow_group_out_bits { | ||
| 3658 | u8 status[0x8]; | ||
| 3659 | u8 reserved_0[0x18]; | ||
| 3660 | |||
| 3661 | u8 syndrome[0x20]; | ||
| 3662 | |||
| 3663 | u8 reserved_1[0xa0]; | ||
| 3664 | |||
| 3665 | u8 start_flow_index[0x20]; | ||
| 3666 | |||
| 3667 | u8 reserved_2[0x20]; | ||
| 3668 | |||
| 3669 | u8 end_flow_index[0x20]; | ||
| 3670 | |||
| 3671 | u8 reserved_3[0xa0]; | ||
| 3672 | |||
| 3673 | u8 reserved_4[0x18]; | ||
| 3674 | u8 match_criteria_enable[0x8]; | ||
| 3675 | |||
| 3676 | struct mlx5_ifc_fte_match_param_bits match_criteria; | ||
| 3677 | |||
| 3678 | u8 reserved_5[0xe00]; | ||
| 3679 | }; | ||
| 3680 | |||
| 3681 | struct mlx5_ifc_query_flow_group_in_bits { | ||
| 3682 | u8 opcode[0x10]; | ||
| 3683 | u8 reserved_0[0x10]; | ||
| 3684 | |||
| 3685 | u8 reserved_1[0x10]; | ||
| 3686 | u8 op_mod[0x10]; | ||
| 3687 | |||
| 3688 | u8 reserved_2[0x40]; | ||
| 3689 | |||
| 3690 | u8 table_type[0x8]; | ||
| 3691 | u8 reserved_3[0x18]; | ||
| 3692 | |||
| 3693 | u8 reserved_4[0x8]; | ||
| 3694 | u8 table_id[0x18]; | ||
| 3695 | |||
| 3696 | u8 group_id[0x20]; | ||
| 3697 | |||
| 3698 | u8 reserved_5[0x120]; | ||
| 3699 | }; | ||
| 3700 | |||
| 3701 | struct mlx5_ifc_query_eq_out_bits { | ||
| 3702 | u8 status[0x8]; | ||
| 3703 | u8 reserved_0[0x18]; | ||
| 3704 | |||
| 3705 | u8 syndrome[0x20]; | ||
| 3706 | |||
| 3707 | u8 reserved_1[0x40]; | ||
| 3708 | |||
| 3709 | struct mlx5_ifc_eqc_bits eq_context_entry; | ||
| 3710 | |||
| 3711 | u8 reserved_2[0x40]; | ||
| 3712 | |||
| 3713 | u8 event_bitmask[0x40]; | ||
| 3714 | |||
| 3715 | u8 reserved_3[0x580]; | ||
| 3716 | |||
| 3717 | u8 pas[0][0x40]; | ||
| 3718 | }; | ||
| 3719 | |||
| 3720 | struct mlx5_ifc_query_eq_in_bits { | ||
| 3721 | u8 opcode[0x10]; | ||
| 3722 | u8 reserved_0[0x10]; | ||
| 3723 | |||
| 3724 | u8 reserved_1[0x10]; | ||
| 3725 | u8 op_mod[0x10]; | ||
| 3726 | |||
| 3727 | u8 reserved_2[0x18]; | ||
| 3728 | u8 eq_number[0x8]; | ||
| 3729 | |||
| 3730 | u8 reserved_3[0x20]; | ||
| 3731 | }; | ||
| 3732 | |||
| 3733 | struct mlx5_ifc_query_dct_out_bits { | ||
| 3734 | u8 status[0x8]; | ||
| 3735 | u8 reserved_0[0x18]; | ||
| 3736 | |||
| 3737 | u8 syndrome[0x20]; | ||
| 3738 | |||
| 3739 | u8 reserved_1[0x40]; | ||
| 3740 | |||
| 3741 | struct mlx5_ifc_dctc_bits dct_context_entry; | ||
| 3742 | |||
| 3743 | u8 reserved_2[0x180]; | ||
| 3744 | }; | ||
| 3745 | |||
| 3746 | struct mlx5_ifc_query_dct_in_bits { | ||
| 3747 | u8 opcode[0x10]; | ||
| 3748 | u8 reserved_0[0x10]; | ||
| 3749 | |||
| 3750 | u8 reserved_1[0x10]; | ||
| 3751 | u8 op_mod[0x10]; | ||
| 3752 | |||
| 3753 | u8 reserved_2[0x8]; | ||
| 3754 | u8 dctn[0x18]; | ||
| 3755 | |||
| 3756 | u8 reserved_3[0x20]; | ||
| 3757 | }; | ||
| 3758 | |||
| 3759 | struct mlx5_ifc_query_cq_out_bits { | ||
| 341 | u8 status[0x8]; | 3760 | u8 status[0x8]; |
| 342 | u8 reserved_0[0x18]; | 3761 | u8 reserved_0[0x18]; |
| 343 | 3762 | ||
| 344 | u8 syndrome[0x20]; | 3763 | u8 syndrome[0x20]; |
| 345 | 3764 | ||
| 346 | u8 reserved_1[0x40]; | 3765 | u8 reserved_1[0x40]; |
| 3766 | |||
| 3767 | struct mlx5_ifc_cqc_bits cq_context; | ||
| 3768 | |||
| 3769 | u8 reserved_2[0x600]; | ||
| 3770 | |||
| 3771 | u8 pas[0][0x40]; | ||
| 3772 | }; | ||
| 3773 | |||
| 3774 | struct mlx5_ifc_query_cq_in_bits { | ||
| 3775 | u8 opcode[0x10]; | ||
| 3776 | u8 reserved_0[0x10]; | ||
| 3777 | |||
| 3778 | u8 reserved_1[0x10]; | ||
| 3779 | u8 op_mod[0x10]; | ||
| 3780 | |||
| 3781 | u8 reserved_2[0x8]; | ||
| 3782 | u8 cqn[0x18]; | ||
| 3783 | |||
| 3784 | u8 reserved_3[0x20]; | ||
| 3785 | }; | ||
| 3786 | |||
| 3787 | struct mlx5_ifc_query_cong_status_out_bits { | ||
| 3788 | u8 status[0x8]; | ||
| 3789 | u8 reserved_0[0x18]; | ||
| 3790 | |||
| 3791 | u8 syndrome[0x20]; | ||
| 3792 | |||
| 3793 | u8 reserved_1[0x20]; | ||
| 3794 | |||
| 3795 | u8 enable[0x1]; | ||
| 3796 | u8 tag_enable[0x1]; | ||
| 3797 | u8 reserved_2[0x1e]; | ||
| 3798 | }; | ||
| 3799 | |||
| 3800 | struct mlx5_ifc_query_cong_status_in_bits { | ||
| 3801 | u8 opcode[0x10]; | ||
| 3802 | u8 reserved_0[0x10]; | ||
| 3803 | |||
| 3804 | u8 reserved_1[0x10]; | ||
| 3805 | u8 op_mod[0x10]; | ||
| 3806 | |||
| 3807 | u8 reserved_2[0x18]; | ||
| 3808 | u8 priority[0x4]; | ||
| 3809 | u8 cong_protocol[0x4]; | ||
| 3810 | |||
| 3811 | u8 reserved_3[0x20]; | ||
| 3812 | }; | ||
| 3813 | |||
| 3814 | struct mlx5_ifc_query_cong_statistics_out_bits { | ||
| 3815 | u8 status[0x8]; | ||
| 3816 | u8 reserved_0[0x18]; | ||
| 3817 | |||
| 3818 | u8 syndrome[0x20]; | ||
| 3819 | |||
| 3820 | u8 reserved_1[0x40]; | ||
| 3821 | |||
| 3822 | u8 cur_flows[0x20]; | ||
| 3823 | |||
| 3824 | u8 sum_flows[0x20]; | ||
| 3825 | |||
| 3826 | u8 cnp_ignored_high[0x20]; | ||
| 3827 | |||
| 3828 | u8 cnp_ignored_low[0x20]; | ||
| 3829 | |||
| 3830 | u8 cnp_handled_high[0x20]; | ||
| 3831 | |||
| 3832 | u8 cnp_handled_low[0x20]; | ||
| 3833 | |||
| 3834 | u8 reserved_2[0x100]; | ||
| 3835 | |||
| 3836 | u8 time_stamp_high[0x20]; | ||
| 3837 | |||
| 3838 | u8 time_stamp_low[0x20]; | ||
| 3839 | |||
| 3840 | u8 accumulators_period[0x20]; | ||
| 3841 | |||
| 3842 | u8 ecn_marked_roce_packets_high[0x20]; | ||
| 3843 | |||
| 3844 | u8 ecn_marked_roce_packets_low[0x20]; | ||
| 3845 | |||
| 3846 | u8 cnps_sent_high[0x20]; | ||
| 3847 | |||
| 3848 | u8 cnps_sent_low[0x20]; | ||
| 3849 | |||
| 3850 | u8 reserved_3[0x560]; | ||
| 3851 | }; | ||
| 3852 | |||
| 3853 | struct mlx5_ifc_query_cong_statistics_in_bits { | ||
| 3854 | u8 opcode[0x10]; | ||
| 3855 | u8 reserved_0[0x10]; | ||
| 3856 | |||
| 3857 | u8 reserved_1[0x10]; | ||
| 3858 | u8 op_mod[0x10]; | ||
| 3859 | |||
| 3860 | u8 clear[0x1]; | ||
| 3861 | u8 reserved_2[0x1f]; | ||
| 3862 | |||
| 3863 | u8 reserved_3[0x20]; | ||
| 3864 | }; | ||
| 3865 | |||
| 3866 | struct mlx5_ifc_query_cong_params_out_bits { | ||
| 3867 | u8 status[0x8]; | ||
| 3868 | u8 reserved_0[0x18]; | ||
| 3869 | |||
| 3870 | u8 syndrome[0x20]; | ||
| 3871 | |||
| 3872 | u8 reserved_1[0x40]; | ||
| 3873 | |||
| 3874 | union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; | ||
| 3875 | }; | ||
| 3876 | |||
| 3877 | struct mlx5_ifc_query_cong_params_in_bits { | ||
| 3878 | u8 opcode[0x10]; | ||
| 3879 | u8 reserved_0[0x10]; | ||
| 3880 | |||
| 3881 | u8 reserved_1[0x10]; | ||
| 3882 | u8 op_mod[0x10]; | ||
| 3883 | |||
| 3884 | u8 reserved_2[0x1c]; | ||
| 3885 | u8 cong_protocol[0x4]; | ||
| 3886 | |||
| 3887 | u8 reserved_3[0x20]; | ||
| 3888 | }; | ||
| 3889 | |||
| 3890 | struct mlx5_ifc_query_adapter_out_bits { | ||
| 3891 | u8 status[0x8]; | ||
| 3892 | u8 reserved_0[0x18]; | ||
| 3893 | |||
| 3894 | u8 syndrome[0x20]; | ||
| 3895 | |||
| 3896 | u8 reserved_1[0x40]; | ||
| 3897 | |||
| 3898 | struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct; | ||
| 3899 | }; | ||
| 3900 | |||
| 3901 | struct mlx5_ifc_query_adapter_in_bits { | ||
| 3902 | u8 opcode[0x10]; | ||
| 3903 | u8 reserved_0[0x10]; | ||
| 3904 | |||
| 3905 | u8 reserved_1[0x10]; | ||
| 3906 | u8 op_mod[0x10]; | ||
| 3907 | |||
| 3908 | u8 reserved_2[0x40]; | ||
| 3909 | }; | ||
| 3910 | |||
| 3911 | struct mlx5_ifc_qp_2rst_out_bits { | ||
| 3912 | u8 status[0x8]; | ||
| 3913 | u8 reserved_0[0x18]; | ||
| 3914 | |||
| 3915 | u8 syndrome[0x20]; | ||
| 3916 | |||
| 3917 | u8 reserved_1[0x40]; | ||
| 3918 | }; | ||
| 3919 | |||
| 3920 | struct mlx5_ifc_qp_2rst_in_bits { | ||
| 3921 | u8 opcode[0x10]; | ||
| 3922 | u8 reserved_0[0x10]; | ||
| 3923 | |||
| 3924 | u8 reserved_1[0x10]; | ||
| 3925 | u8 op_mod[0x10]; | ||
| 3926 | |||
| 3927 | u8 reserved_2[0x8]; | ||
| 3928 | u8 qpn[0x18]; | ||
| 3929 | |||
| 3930 | u8 reserved_3[0x20]; | ||
| 3931 | }; | ||
| 3932 | |||
| 3933 | struct mlx5_ifc_qp_2err_out_bits { | ||
| 3934 | u8 status[0x8]; | ||
| 3935 | u8 reserved_0[0x18]; | ||
| 3936 | |||
| 3937 | u8 syndrome[0x20]; | ||
| 3938 | |||
| 3939 | u8 reserved_1[0x40]; | ||
| 3940 | }; | ||
| 3941 | |||
| 3942 | struct mlx5_ifc_qp_2err_in_bits { | ||
| 3943 | u8 opcode[0x10]; | ||
| 3944 | u8 reserved_0[0x10]; | ||
| 3945 | |||
| 3946 | u8 reserved_1[0x10]; | ||
| 3947 | u8 op_mod[0x10]; | ||
| 3948 | |||
| 3949 | u8 reserved_2[0x8]; | ||
| 3950 | u8 qpn[0x18]; | ||
| 3951 | |||
| 3952 | u8 reserved_3[0x20]; | ||
| 3953 | }; | ||
| 3954 | |||
| 3955 | struct mlx5_ifc_page_fault_resume_out_bits { | ||
| 3956 | u8 status[0x8]; | ||
| 3957 | u8 reserved_0[0x18]; | ||
| 3958 | |||
| 3959 | u8 syndrome[0x20]; | ||
| 3960 | |||
| 3961 | u8 reserved_1[0x40]; | ||
| 3962 | }; | ||
| 3963 | |||
| 3964 | struct mlx5_ifc_page_fault_resume_in_bits { | ||
| 3965 | u8 opcode[0x10]; | ||
| 3966 | u8 reserved_0[0x10]; | ||
| 3967 | |||
| 3968 | u8 reserved_1[0x10]; | ||
| 3969 | u8 op_mod[0x10]; | ||
| 3970 | |||
| 3971 | u8 error[0x1]; | ||
| 3972 | u8 reserved_2[0x4]; | ||
| 3973 | u8 rdma[0x1]; | ||
| 3974 | u8 read_write[0x1]; | ||
| 3975 | u8 req_res[0x1]; | ||
| 3976 | u8 qpn[0x18]; | ||
| 3977 | |||
| 3978 | u8 reserved_3[0x20]; | ||
| 3979 | }; | ||
| 3980 | |||
| 3981 | struct mlx5_ifc_nop_out_bits { | ||
| 3982 | u8 status[0x8]; | ||
| 3983 | u8 reserved_0[0x18]; | ||
| 3984 | |||
| 3985 | u8 syndrome[0x20]; | ||
| 3986 | |||
| 3987 | u8 reserved_1[0x40]; | ||
| 3988 | }; | ||
| 3989 | |||
| 3990 | struct mlx5_ifc_nop_in_bits { | ||
| 3991 | u8 opcode[0x10]; | ||
| 3992 | u8 reserved_0[0x10]; | ||
| 3993 | |||
| 3994 | u8 reserved_1[0x10]; | ||
| 3995 | u8 op_mod[0x10]; | ||
| 3996 | |||
| 3997 | u8 reserved_2[0x40]; | ||
| 3998 | }; | ||
| 3999 | |||
| 4000 | struct mlx5_ifc_modify_vport_state_out_bits { | ||
| 4001 | u8 status[0x8]; | ||
| 4002 | u8 reserved_0[0x18]; | ||
| 4003 | |||
| 4004 | u8 syndrome[0x20]; | ||
| 4005 | |||
| 4006 | u8 reserved_1[0x40]; | ||
| 4007 | }; | ||
| 4008 | |||
| 4009 | struct mlx5_ifc_modify_vport_state_in_bits { | ||
| 4010 | u8 opcode[0x10]; | ||
| 4011 | u8 reserved_0[0x10]; | ||
| 4012 | |||
| 4013 | u8 reserved_1[0x10]; | ||
| 4014 | u8 op_mod[0x10]; | ||
| 4015 | |||
| 4016 | u8 other_vport[0x1]; | ||
| 4017 | u8 reserved_2[0xf]; | ||
| 4018 | u8 vport_number[0x10]; | ||
| 4019 | |||
| 4020 | u8 reserved_3[0x18]; | ||
| 4021 | u8 admin_state[0x4]; | ||
| 4022 | u8 reserved_4[0x4]; | ||
| 4023 | }; | ||
| 4024 | |||
| 4025 | struct mlx5_ifc_modify_tis_out_bits { | ||
| 4026 | u8 status[0x8]; | ||
| 4027 | u8 reserved_0[0x18]; | ||
| 4028 | |||
| 4029 | u8 syndrome[0x20]; | ||
| 4030 | |||
| 4031 | u8 reserved_1[0x40]; | ||
| 4032 | }; | ||
| 4033 | |||
| 4034 | struct mlx5_ifc_modify_tis_in_bits { | ||
| 4035 | u8 opcode[0x10]; | ||
| 4036 | u8 reserved_0[0x10]; | ||
| 4037 | |||
| 4038 | u8 reserved_1[0x10]; | ||
| 4039 | u8 op_mod[0x10]; | ||
| 4040 | |||
| 4041 | u8 reserved_2[0x8]; | ||
| 4042 | u8 tisn[0x18]; | ||
| 4043 | |||
| 4044 | u8 reserved_3[0x20]; | ||
| 4045 | |||
| 4046 | u8 modify_bitmask[0x40]; | ||
| 4047 | |||
| 4048 | u8 reserved_4[0x40]; | ||
| 4049 | |||
| 4050 | struct mlx5_ifc_tisc_bits ctx; | ||
| 4051 | }; | ||
| 4052 | |||
| 4053 | struct mlx5_ifc_modify_tir_out_bits { | ||
| 4054 | u8 status[0x8]; | ||
| 4055 | u8 reserved_0[0x18]; | ||
| 4056 | |||
| 4057 | u8 syndrome[0x20]; | ||
| 4058 | |||
| 4059 | u8 reserved_1[0x40]; | ||
| 4060 | }; | ||
| 4061 | |||
| 4062 | struct mlx5_ifc_modify_tir_in_bits { | ||
| 4063 | u8 opcode[0x10]; | ||
| 4064 | u8 reserved_0[0x10]; | ||
| 4065 | |||
| 4066 | u8 reserved_1[0x10]; | ||
| 4067 | u8 op_mod[0x10]; | ||
| 4068 | |||
| 4069 | u8 reserved_2[0x8]; | ||
| 4070 | u8 tirn[0x18]; | ||
| 4071 | |||
| 4072 | u8 reserved_3[0x20]; | ||
| 4073 | |||
| 4074 | u8 modify_bitmask[0x40]; | ||
| 4075 | |||
| 4076 | u8 reserved_4[0x40]; | ||
| 4077 | |||
| 4078 | struct mlx5_ifc_tirc_bits ctx; | ||
| 4079 | }; | ||
| 4080 | |||
| 4081 | struct mlx5_ifc_modify_sq_out_bits { | ||
| 4082 | u8 status[0x8]; | ||
| 4083 | u8 reserved_0[0x18]; | ||
| 4084 | |||
| 4085 | u8 syndrome[0x20]; | ||
| 4086 | |||
| 4087 | u8 reserved_1[0x40]; | ||
| 4088 | }; | ||
| 4089 | |||
| 4090 | struct mlx5_ifc_modify_sq_in_bits { | ||
| 4091 | u8 opcode[0x10]; | ||
| 4092 | u8 reserved_0[0x10]; | ||
| 4093 | |||
| 4094 | u8 reserved_1[0x10]; | ||
| 4095 | u8 op_mod[0x10]; | ||
| 4096 | |||
| 4097 | u8 sq_state[0x4]; | ||
| 4098 | u8 reserved_2[0x4]; | ||
| 4099 | u8 sqn[0x18]; | ||
| 4100 | |||
| 4101 | u8 reserved_3[0x20]; | ||
| 4102 | |||
| 4103 | u8 modify_bitmask[0x40]; | ||
| 4104 | |||
| 4105 | u8 reserved_4[0x40]; | ||
| 4106 | |||
| 4107 | struct mlx5_ifc_sqc_bits ctx; | ||
| 4108 | }; | ||
| 4109 | |||
| 4110 | struct mlx5_ifc_modify_rqt_out_bits { | ||
| 4111 | u8 status[0x8]; | ||
| 4112 | u8 reserved_0[0x18]; | ||
| 4113 | |||
| 4114 | u8 syndrome[0x20]; | ||
| 4115 | |||
| 4116 | u8 reserved_1[0x40]; | ||
| 4117 | }; | ||
| 4118 | |||
| 4119 | struct mlx5_ifc_modify_rqt_in_bits { | ||
| 4120 | u8 opcode[0x10]; | ||
| 4121 | u8 reserved_0[0x10]; | ||
| 4122 | |||
| 4123 | u8 reserved_1[0x10]; | ||
| 4124 | u8 op_mod[0x10]; | ||
| 4125 | |||
| 4126 | u8 reserved_2[0x8]; | ||
| 4127 | u8 rqtn[0x18]; | ||
| 4128 | |||
| 4129 | u8 reserved_3[0x20]; | ||
| 4130 | |||
| 4131 | u8 modify_bitmask[0x40]; | ||
| 4132 | |||
| 4133 | u8 reserved_4[0x40]; | ||
| 4134 | |||
| 4135 | struct mlx5_ifc_rqtc_bits ctx; | ||
| 4136 | }; | ||
| 4137 | |||
| 4138 | struct mlx5_ifc_modify_rq_out_bits { | ||
| 4139 | u8 status[0x8]; | ||
| 4140 | u8 reserved_0[0x18]; | ||
| 4141 | |||
| 4142 | u8 syndrome[0x20]; | ||
| 4143 | |||
| 4144 | u8 reserved_1[0x40]; | ||
| 4145 | }; | ||
| 4146 | |||
| 4147 | struct mlx5_ifc_modify_rq_in_bits { | ||
| 4148 | u8 opcode[0x10]; | ||
| 4149 | u8 reserved_0[0x10]; | ||
| 4150 | |||
| 4151 | u8 reserved_1[0x10]; | ||
| 4152 | u8 op_mod[0x10]; | ||
| 4153 | |||
| 4154 | u8 rq_state[0x4]; | ||
| 4155 | u8 reserved_2[0x4]; | ||
| 4156 | u8 rqn[0x18]; | ||
| 4157 | |||
| 4158 | u8 reserved_3[0x20]; | ||
| 4159 | |||
| 4160 | u8 modify_bitmask[0x40]; | ||
| 4161 | |||
| 4162 | u8 reserved_4[0x40]; | ||
| 4163 | |||
| 4164 | struct mlx5_ifc_rqc_bits ctx; | ||
| 4165 | }; | ||
| 4166 | |||
| 4167 | struct mlx5_ifc_modify_rmp_out_bits { | ||
| 4168 | u8 status[0x8]; | ||
| 4169 | u8 reserved_0[0x18]; | ||
| 4170 | |||
| 4171 | u8 syndrome[0x20]; | ||
| 4172 | |||
| 4173 | u8 reserved_1[0x40]; | ||
| 4174 | }; | ||
| 4175 | |||
| 4176 | struct mlx5_ifc_rmp_bitmask_bits { | ||
| 4177 | u8 reserved[0x20]; | ||
| 4178 | |||
| 4179 | u8 reserved1[0x1f]; | ||
| 4180 | u8 lwm[0x1]; | ||
| 4181 | }; | ||
| 4182 | |||
| 4183 | struct mlx5_ifc_modify_rmp_in_bits { | ||
| 4184 | u8 opcode[0x10]; | ||
| 4185 | u8 reserved_0[0x10]; | ||
| 4186 | |||
| 4187 | u8 reserved_1[0x10]; | ||
| 4188 | u8 op_mod[0x10]; | ||
| 4189 | |||
| 4190 | u8 rmp_state[0x4]; | ||
| 4191 | u8 reserved_2[0x4]; | ||
| 4192 | u8 rmpn[0x18]; | ||
| 4193 | |||
| 4194 | u8 reserved_3[0x20]; | ||
| 4195 | |||
| 4196 | struct mlx5_ifc_rmp_bitmask_bits bitmask; | ||
| 4197 | |||
| 4198 | u8 reserved_4[0x40]; | ||
| 4199 | |||
| 4200 | struct mlx5_ifc_rmpc_bits ctx; | ||
| 4201 | }; | ||
| 4202 | |||
| 4203 | struct mlx5_ifc_modify_nic_vport_context_out_bits { | ||
| 4204 | u8 status[0x8]; | ||
| 4205 | u8 reserved_0[0x18]; | ||
| 4206 | |||
| 4207 | u8 syndrome[0x20]; | ||
| 4208 | |||
| 4209 | u8 reserved_1[0x40]; | ||
| 4210 | }; | ||
| 4211 | |||
| 4212 | struct mlx5_ifc_modify_nic_vport_field_select_bits { | ||
| 4213 | u8 reserved_0[0x1c]; | ||
| 4214 | u8 permanent_address[0x1]; | ||
| 4215 | u8 addresses_list[0x1]; | ||
| 4216 | u8 roce_en[0x1]; | ||
| 4217 | u8 reserved_1[0x1]; | ||
| 4218 | }; | ||
| 4219 | |||
| 4220 | struct mlx5_ifc_modify_nic_vport_context_in_bits { | ||
| 4221 | u8 opcode[0x10]; | ||
| 4222 | u8 reserved_0[0x10]; | ||
| 4223 | |||
| 4224 | u8 reserved_1[0x10]; | ||
| 4225 | u8 op_mod[0x10]; | ||
| 4226 | |||
| 4227 | u8 other_vport[0x1]; | ||
| 4228 | u8 reserved_2[0xf]; | ||
| 4229 | u8 vport_number[0x10]; | ||
| 4230 | |||
| 4231 | struct mlx5_ifc_modify_nic_vport_field_select_bits field_select; | ||
| 4232 | |||
| 4233 | u8 reserved_3[0x780]; | ||
| 4234 | |||
| 4235 | struct mlx5_ifc_nic_vport_context_bits nic_vport_context; | ||
| 4236 | }; | ||
| 4237 | |||
| 4238 | struct mlx5_ifc_modify_hca_vport_context_out_bits { | ||
| 4239 | u8 status[0x8]; | ||
| 4240 | u8 reserved_0[0x18]; | ||
| 4241 | |||
| 4242 | u8 syndrome[0x20]; | ||
| 4243 | |||
| 4244 | u8 reserved_1[0x40]; | ||
| 4245 | }; | ||
| 4246 | |||
| 4247 | struct mlx5_ifc_modify_hca_vport_context_in_bits { | ||
| 4248 | u8 opcode[0x10]; | ||
| 4249 | u8 reserved_0[0x10]; | ||
| 4250 | |||
| 4251 | u8 reserved_1[0x10]; | ||
| 4252 | u8 op_mod[0x10]; | ||
| 4253 | |||
| 4254 | u8 other_vport[0x1]; | ||
| 4255 | u8 reserved_2[0xb]; | ||
| 4256 | u8 port_num[0x4]; | ||
| 4257 | u8 vport_number[0x10]; | ||
| 4258 | |||
| 4259 | u8 reserved_3[0x20]; | ||
| 4260 | |||
| 4261 | struct mlx5_ifc_hca_vport_context_bits hca_vport_context; | ||
| 4262 | }; | ||
| 4263 | |||
| 4264 | struct mlx5_ifc_modify_cq_out_bits { | ||
| 4265 | u8 status[0x8]; | ||
| 4266 | u8 reserved_0[0x18]; | ||
| 4267 | |||
| 4268 | u8 syndrome[0x20]; | ||
| 4269 | |||
| 4270 | u8 reserved_1[0x40]; | ||
| 4271 | }; | ||
| 4272 | |||
| 4273 | enum { | ||
| 4274 | MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ = 0x0, | ||
| 4275 | MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ = 0x1, | ||
| 4276 | }; | ||
| 4277 | |||
| 4278 | struct mlx5_ifc_modify_cq_in_bits { | ||
| 4279 | u8 opcode[0x10]; | ||
| 4280 | u8 reserved_0[0x10]; | ||
| 4281 | |||
| 4282 | u8 reserved_1[0x10]; | ||
| 4283 | u8 op_mod[0x10]; | ||
| 4284 | |||
| 4285 | u8 reserved_2[0x8]; | ||
| 4286 | u8 cqn[0x18]; | ||
| 4287 | |||
| 4288 | union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select; | ||
| 4289 | |||
| 4290 | struct mlx5_ifc_cqc_bits cq_context; | ||
| 4291 | |||
| 4292 | u8 reserved_3[0x600]; | ||
| 4293 | |||
| 4294 | u8 pas[0][0x40]; | ||
| 4295 | }; | ||
| 4296 | |||
| 4297 | struct mlx5_ifc_modify_cong_status_out_bits { | ||
| 4298 | u8 status[0x8]; | ||
| 4299 | u8 reserved_0[0x18]; | ||
| 4300 | |||
| 4301 | u8 syndrome[0x20]; | ||
| 4302 | |||
| 4303 | u8 reserved_1[0x40]; | ||
| 4304 | }; | ||
| 4305 | |||
| 4306 | struct mlx5_ifc_modify_cong_status_in_bits { | ||
| 4307 | u8 opcode[0x10]; | ||
| 4308 | u8 reserved_0[0x10]; | ||
| 4309 | |||
| 4310 | u8 reserved_1[0x10]; | ||
| 4311 | u8 op_mod[0x10]; | ||
| 4312 | |||
| 4313 | u8 reserved_2[0x18]; | ||
| 4314 | u8 priority[0x4]; | ||
| 4315 | u8 cong_protocol[0x4]; | ||
| 4316 | |||
| 4317 | u8 enable[0x1]; | ||
| 4318 | u8 tag_enable[0x1]; | ||
| 4319 | u8 reserved_3[0x1e]; | ||
| 4320 | }; | ||
| 4321 | |||
| 4322 | struct mlx5_ifc_modify_cong_params_out_bits { | ||
| 4323 | u8 status[0x8]; | ||
| 4324 | u8 reserved_0[0x18]; | ||
| 4325 | |||
| 4326 | u8 syndrome[0x20]; | ||
| 4327 | |||
| 4328 | u8 reserved_1[0x40]; | ||
| 4329 | }; | ||
| 4330 | |||
| 4331 | struct mlx5_ifc_modify_cong_params_in_bits { | ||
| 4332 | u8 opcode[0x10]; | ||
| 4333 | u8 reserved_0[0x10]; | ||
| 4334 | |||
| 4335 | u8 reserved_1[0x10]; | ||
| 4336 | u8 op_mod[0x10]; | ||
| 4337 | |||
| 4338 | u8 reserved_2[0x1c]; | ||
| 4339 | u8 cong_protocol[0x4]; | ||
| 4340 | |||
| 4341 | union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select; | ||
| 4342 | |||
| 4343 | u8 reserved_3[0x80]; | ||
| 4344 | |||
| 4345 | union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; | ||
| 4346 | }; | ||
| 4347 | |||
| 4348 | struct mlx5_ifc_manage_pages_out_bits { | ||
| 4349 | u8 status[0x8]; | ||
| 4350 | u8 reserved_0[0x18]; | ||
| 4351 | |||
| 4352 | u8 syndrome[0x20]; | ||
| 4353 | |||
| 4354 | u8 output_num_entries[0x20]; | ||
| 4355 | |||
| 4356 | u8 reserved_1[0x20]; | ||
| 4357 | |||
| 4358 | u8 pas[0][0x40]; | ||
| 4359 | }; | ||
| 4360 | |||
| 4361 | enum { | ||
| 4362 | MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL = 0x0, | ||
| 4363 | MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS = 0x1, | ||
| 4364 | MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES = 0x2, | ||
| 4365 | }; | ||
| 4366 | |||
| 4367 | struct mlx5_ifc_manage_pages_in_bits { | ||
| 4368 | u8 opcode[0x10]; | ||
| 4369 | u8 reserved_0[0x10]; | ||
| 4370 | |||
| 4371 | u8 reserved_1[0x10]; | ||
| 4372 | u8 op_mod[0x10]; | ||
| 4373 | |||
| 4374 | u8 reserved_2[0x10]; | ||
| 4375 | u8 function_id[0x10]; | ||
| 4376 | |||
| 4377 | u8 input_num_entries[0x20]; | ||
| 4378 | |||
| 4379 | u8 pas[0][0x40]; | ||
| 4380 | }; | ||
| 4381 | |||
| 4382 | struct mlx5_ifc_mad_ifc_out_bits { | ||
| 4383 | u8 status[0x8]; | ||
| 4384 | u8 reserved_0[0x18]; | ||
| 4385 | |||
| 4386 | u8 syndrome[0x20]; | ||
| 4387 | |||
| 4388 | u8 reserved_1[0x40]; | ||
| 4389 | |||
| 4390 | u8 response_mad_packet[256][0x8]; | ||
| 4391 | }; | ||
| 4392 | |||
| 4393 | struct mlx5_ifc_mad_ifc_in_bits { | ||
| 4394 | u8 opcode[0x10]; | ||
| 4395 | u8 reserved_0[0x10]; | ||
| 4396 | |||
| 4397 | u8 reserved_1[0x10]; | ||
| 4398 | u8 op_mod[0x10]; | ||
| 4399 | |||
| 4400 | u8 remote_lid[0x10]; | ||
| 4401 | u8 reserved_2[0x8]; | ||
| 4402 | u8 port[0x8]; | ||
| 4403 | |||
| 4404 | u8 reserved_3[0x20]; | ||
| 4405 | |||
| 4406 | u8 mad[256][0x8]; | ||
| 4407 | }; | ||
| 4408 | |||
| 4409 | struct mlx5_ifc_init_hca_out_bits { | ||
| 4410 | u8 status[0x8]; | ||
| 4411 | u8 reserved_0[0x18]; | ||
| 4412 | |||
| 4413 | u8 syndrome[0x20]; | ||
| 4414 | |||
| 4415 | u8 reserved_1[0x40]; | ||
| 4416 | }; | ||
| 4417 | |||
| 4418 | struct mlx5_ifc_init_hca_in_bits { | ||
| 4419 | u8 opcode[0x10]; | ||
| 4420 | u8 reserved_0[0x10]; | ||
| 4421 | |||
| 4422 | u8 reserved_1[0x10]; | ||
| 4423 | u8 op_mod[0x10]; | ||
| 4424 | |||
| 4425 | u8 reserved_2[0x40]; | ||
| 4426 | }; | ||
| 4427 | |||
| 4428 | struct mlx5_ifc_init2rtr_qp_out_bits { | ||
| 4429 | u8 status[0x8]; | ||
| 4430 | u8 reserved_0[0x18]; | ||
| 4431 | |||
| 4432 | u8 syndrome[0x20]; | ||
| 4433 | |||
| 4434 | u8 reserved_1[0x40]; | ||
| 4435 | }; | ||
| 4436 | |||
| 4437 | struct mlx5_ifc_init2rtr_qp_in_bits { | ||
| 4438 | u8 opcode[0x10]; | ||
| 4439 | u8 reserved_0[0x10]; | ||
| 4440 | |||
| 4441 | u8 reserved_1[0x10]; | ||
| 4442 | u8 op_mod[0x10]; | ||
| 4443 | |||
| 4444 | u8 reserved_2[0x8]; | ||
| 4445 | u8 qpn[0x18]; | ||
| 4446 | |||
| 4447 | u8 reserved_3[0x20]; | ||
| 4448 | |||
| 4449 | u8 opt_param_mask[0x20]; | ||
| 4450 | |||
| 4451 | u8 reserved_4[0x20]; | ||
| 4452 | |||
| 4453 | struct mlx5_ifc_qpc_bits qpc; | ||
| 4454 | |||
| 4455 | u8 reserved_5[0x80]; | ||
| 4456 | }; | ||
| 4457 | |||
| 4458 | struct mlx5_ifc_init2init_qp_out_bits { | ||
| 4459 | u8 status[0x8]; | ||
| 4460 | u8 reserved_0[0x18]; | ||
| 4461 | |||
| 4462 | u8 syndrome[0x20]; | ||
| 4463 | |||
| 4464 | u8 reserved_1[0x40]; | ||
| 4465 | }; | ||
| 4466 | |||
| 4467 | struct mlx5_ifc_init2init_qp_in_bits { | ||
| 4468 | u8 opcode[0x10]; | ||
| 4469 | u8 reserved_0[0x10]; | ||
| 4470 | |||
| 4471 | u8 reserved_1[0x10]; | ||
| 4472 | u8 op_mod[0x10]; | ||
| 4473 | |||
| 4474 | u8 reserved_2[0x8]; | ||
| 4475 | u8 qpn[0x18]; | ||
| 4476 | |||
| 4477 | u8 reserved_3[0x20]; | ||
| 4478 | |||
| 4479 | u8 opt_param_mask[0x20]; | ||
| 4480 | |||
| 4481 | u8 reserved_4[0x20]; | ||
| 4482 | |||
| 4483 | struct mlx5_ifc_qpc_bits qpc; | ||
| 4484 | |||
| 4485 | u8 reserved_5[0x80]; | ||
| 4486 | }; | ||
| 4487 | |||
| 4488 | struct mlx5_ifc_get_dropped_packet_log_out_bits { | ||
| 4489 | u8 status[0x8]; | ||
| 4490 | u8 reserved_0[0x18]; | ||
| 4491 | |||
| 4492 | u8 syndrome[0x20]; | ||
| 4493 | |||
| 4494 | u8 reserved_1[0x40]; | ||
| 4495 | |||
| 4496 | u8 packet_headers_log[128][0x8]; | ||
| 4497 | |||
| 4498 | u8 packet_syndrome[64][0x8]; | ||
| 4499 | }; | ||
| 4500 | |||
| 4501 | struct mlx5_ifc_get_dropped_packet_log_in_bits { | ||
| 4502 | u8 opcode[0x10]; | ||
| 4503 | u8 reserved_0[0x10]; | ||
| 4504 | |||
| 4505 | u8 reserved_1[0x10]; | ||
| 4506 | u8 op_mod[0x10]; | ||
| 4507 | |||
| 4508 | u8 reserved_2[0x40]; | ||
| 4509 | }; | ||
| 4510 | |||
| 4511 | struct mlx5_ifc_gen_eqe_in_bits { | ||
| 4512 | u8 opcode[0x10]; | ||
| 4513 | u8 reserved_0[0x10]; | ||
| 4514 | |||
| 4515 | u8 reserved_1[0x10]; | ||
| 4516 | u8 op_mod[0x10]; | ||
| 4517 | |||
| 4518 | u8 reserved_2[0x18]; | ||
| 4519 | u8 eq_number[0x8]; | ||
| 4520 | |||
| 4521 | u8 reserved_3[0x20]; | ||
| 4522 | |||
| 4523 | u8 eqe[64][0x8]; | ||
| 4524 | }; | ||
| 4525 | |||
| 4526 | struct mlx5_ifc_gen_eq_out_bits { | ||
| 4527 | u8 status[0x8]; | ||
| 4528 | u8 reserved_0[0x18]; | ||
| 4529 | |||
| 4530 | u8 syndrome[0x20]; | ||
| 4531 | |||
| 4532 | u8 reserved_1[0x40]; | ||
| 4533 | }; | ||
| 4534 | |||
| 4535 | struct mlx5_ifc_enable_hca_out_bits { | ||
| 4536 | u8 status[0x8]; | ||
| 4537 | u8 reserved_0[0x18]; | ||
| 4538 | |||
| 4539 | u8 syndrome[0x20]; | ||
| 4540 | |||
| 4541 | u8 reserved_1[0x20]; | ||
| 4542 | }; | ||
| 4543 | |||
| 4544 | struct mlx5_ifc_enable_hca_in_bits { | ||
| 4545 | u8 opcode[0x10]; | ||
| 4546 | u8 reserved_0[0x10]; | ||
| 4547 | |||
| 4548 | u8 reserved_1[0x10]; | ||
| 4549 | u8 op_mod[0x10]; | ||
| 4550 | |||
| 4551 | u8 reserved_2[0x10]; | ||
| 4552 | u8 function_id[0x10]; | ||
| 4553 | |||
| 4554 | u8 reserved_3[0x20]; | ||
| 4555 | }; | ||
| 4556 | |||
| 4557 | struct mlx5_ifc_drain_dct_out_bits { | ||
| 4558 | u8 status[0x8]; | ||
| 4559 | u8 reserved_0[0x18]; | ||
| 4560 | |||
| 4561 | u8 syndrome[0x20]; | ||
| 4562 | |||
| 4563 | u8 reserved_1[0x40]; | ||
| 4564 | }; | ||
| 4565 | |||
| 4566 | struct mlx5_ifc_drain_dct_in_bits { | ||
| 4567 | u8 opcode[0x10]; | ||
| 4568 | u8 reserved_0[0x10]; | ||
| 4569 | |||
| 4570 | u8 reserved_1[0x10]; | ||
| 4571 | u8 op_mod[0x10]; | ||
| 4572 | |||
| 4573 | u8 reserved_2[0x8]; | ||
| 4574 | u8 dctn[0x18]; | ||
| 4575 | |||
| 4576 | u8 reserved_3[0x20]; | ||
| 4577 | }; | ||
| 4578 | |||
| 4579 | struct mlx5_ifc_disable_hca_out_bits { | ||
| 4580 | u8 status[0x8]; | ||
| 4581 | u8 reserved_0[0x18]; | ||
| 4582 | |||
| 4583 | u8 syndrome[0x20]; | ||
| 4584 | |||
| 4585 | u8 reserved_1[0x20]; | ||
| 4586 | }; | ||
| 4587 | |||
| 4588 | struct mlx5_ifc_disable_hca_in_bits { | ||
| 4589 | u8 opcode[0x10]; | ||
| 4590 | u8 reserved_0[0x10]; | ||
| 4591 | |||
| 4592 | u8 reserved_1[0x10]; | ||
| 4593 | u8 op_mod[0x10]; | ||
| 4594 | |||
| 4595 | u8 reserved_2[0x10]; | ||
| 4596 | u8 function_id[0x10]; | ||
| 4597 | |||
| 4598 | u8 reserved_3[0x20]; | ||
| 4599 | }; | ||
| 4600 | |||
| 4601 | struct mlx5_ifc_detach_from_mcg_out_bits { | ||
| 4602 | u8 status[0x8]; | ||
| 4603 | u8 reserved_0[0x18]; | ||
| 4604 | |||
| 4605 | u8 syndrome[0x20]; | ||
| 4606 | |||
| 4607 | u8 reserved_1[0x40]; | ||
| 4608 | }; | ||
| 4609 | |||
| 4610 | struct mlx5_ifc_detach_from_mcg_in_bits { | ||
| 4611 | u8 opcode[0x10]; | ||
| 4612 | u8 reserved_0[0x10]; | ||
| 4613 | |||
| 4614 | u8 reserved_1[0x10]; | ||
| 4615 | u8 op_mod[0x10]; | ||
| 4616 | |||
| 4617 | u8 reserved_2[0x8]; | ||
| 4618 | u8 qpn[0x18]; | ||
| 4619 | |||
| 4620 | u8 reserved_3[0x20]; | ||
| 4621 | |||
| 4622 | u8 multicast_gid[16][0x8]; | ||
| 4623 | }; | ||
| 4624 | |||
| 4625 | struct mlx5_ifc_destroy_xrc_srq_out_bits { | ||
| 4626 | u8 status[0x8]; | ||
| 4627 | u8 reserved_0[0x18]; | ||
| 4628 | |||
| 4629 | u8 syndrome[0x20]; | ||
| 4630 | |||
| 4631 | u8 reserved_1[0x40]; | ||
| 4632 | }; | ||
| 4633 | |||
| 4634 | struct mlx5_ifc_destroy_xrc_srq_in_bits { | ||
| 4635 | u8 opcode[0x10]; | ||
| 4636 | u8 reserved_0[0x10]; | ||
| 4637 | |||
| 4638 | u8 reserved_1[0x10]; | ||
| 4639 | u8 op_mod[0x10]; | ||
| 4640 | |||
| 4641 | u8 reserved_2[0x8]; | ||
| 4642 | u8 xrc_srqn[0x18]; | ||
| 4643 | |||
| 4644 | u8 reserved_3[0x20]; | ||
| 4645 | }; | ||
| 4646 | |||
| 4647 | struct mlx5_ifc_destroy_tis_out_bits { | ||
| 4648 | u8 status[0x8]; | ||
| 4649 | u8 reserved_0[0x18]; | ||
| 4650 | |||
| 4651 | u8 syndrome[0x20]; | ||
| 4652 | |||
| 4653 | u8 reserved_1[0x40]; | ||
| 4654 | }; | ||
| 4655 | |||
| 4656 | struct mlx5_ifc_destroy_tis_in_bits { | ||
| 4657 | u8 opcode[0x10]; | ||
| 4658 | u8 reserved_0[0x10]; | ||
| 4659 | |||
| 4660 | u8 reserved_1[0x10]; | ||
| 4661 | u8 op_mod[0x10]; | ||
| 4662 | |||
| 4663 | u8 reserved_2[0x8]; | ||
| 4664 | u8 tisn[0x18]; | ||
| 4665 | |||
| 4666 | u8 reserved_3[0x20]; | ||
| 4667 | }; | ||
| 4668 | |||
| 4669 | struct mlx5_ifc_destroy_tir_out_bits { | ||
| 4670 | u8 status[0x8]; | ||
| 4671 | u8 reserved_0[0x18]; | ||
| 4672 | |||
| 4673 | u8 syndrome[0x20]; | ||
| 4674 | |||
| 4675 | u8 reserved_1[0x40]; | ||
| 4676 | }; | ||
| 4677 | |||
| 4678 | struct mlx5_ifc_destroy_tir_in_bits { | ||
| 4679 | u8 opcode[0x10]; | ||
| 4680 | u8 reserved_0[0x10]; | ||
| 4681 | |||
| 4682 | u8 reserved_1[0x10]; | ||
| 4683 | u8 op_mod[0x10]; | ||
| 4684 | |||
| 4685 | u8 reserved_2[0x8]; | ||
| 4686 | u8 tirn[0x18]; | ||
| 4687 | |||
| 4688 | u8 reserved_3[0x20]; | ||
| 4689 | }; | ||
| 4690 | |||
| 4691 | struct mlx5_ifc_destroy_srq_out_bits { | ||
| 4692 | u8 status[0x8]; | ||
| 4693 | u8 reserved_0[0x18]; | ||
| 4694 | |||
| 4695 | u8 syndrome[0x20]; | ||
| 4696 | |||
| 4697 | u8 reserved_1[0x40]; | ||
| 4698 | }; | ||
| 4699 | |||
| 4700 | struct mlx5_ifc_destroy_srq_in_bits { | ||
| 4701 | u8 opcode[0x10]; | ||
| 4702 | u8 reserved_0[0x10]; | ||
| 4703 | |||
| 4704 | u8 reserved_1[0x10]; | ||
| 4705 | u8 op_mod[0x10]; | ||
| 4706 | |||
| 4707 | u8 reserved_2[0x8]; | ||
| 4708 | u8 srqn[0x18]; | ||
| 4709 | |||
| 4710 | u8 reserved_3[0x20]; | ||
| 4711 | }; | ||
| 4712 | |||
| 4713 | struct mlx5_ifc_destroy_sq_out_bits { | ||
| 4714 | u8 status[0x8]; | ||
| 4715 | u8 reserved_0[0x18]; | ||
| 4716 | |||
| 4717 | u8 syndrome[0x20]; | ||
| 4718 | |||
| 4719 | u8 reserved_1[0x40]; | ||
| 4720 | }; | ||
| 4721 | |||
| 4722 | struct mlx5_ifc_destroy_sq_in_bits { | ||
| 4723 | u8 opcode[0x10]; | ||
| 4724 | u8 reserved_0[0x10]; | ||
| 4725 | |||
| 4726 | u8 reserved_1[0x10]; | ||
| 4727 | u8 op_mod[0x10]; | ||
| 4728 | |||
| 4729 | u8 reserved_2[0x8]; | ||
| 4730 | u8 sqn[0x18]; | ||
| 4731 | |||
| 4732 | u8 reserved_3[0x20]; | ||
| 4733 | }; | ||
| 4734 | |||
| 4735 | struct mlx5_ifc_destroy_rqt_out_bits { | ||
| 4736 | u8 status[0x8]; | ||
| 4737 | u8 reserved_0[0x18]; | ||
| 4738 | |||
| 4739 | u8 syndrome[0x20]; | ||
| 4740 | |||
| 4741 | u8 reserved_1[0x40]; | ||
| 4742 | }; | ||
| 4743 | |||
| 4744 | struct mlx5_ifc_destroy_rqt_in_bits { | ||
| 4745 | u8 opcode[0x10]; | ||
| 4746 | u8 reserved_0[0x10]; | ||
| 4747 | |||
| 4748 | u8 reserved_1[0x10]; | ||
| 4749 | u8 op_mod[0x10]; | ||
| 4750 | |||
| 4751 | u8 reserved_2[0x8]; | ||
| 4752 | u8 rqtn[0x18]; | ||
| 4753 | |||
| 4754 | u8 reserved_3[0x20]; | ||
| 4755 | }; | ||
| 4756 | |||
| 4757 | struct mlx5_ifc_destroy_rq_out_bits { | ||
| 4758 | u8 status[0x8]; | ||
| 4759 | u8 reserved_0[0x18]; | ||
| 4760 | |||
| 4761 | u8 syndrome[0x20]; | ||
| 4762 | |||
| 4763 | u8 reserved_1[0x40]; | ||
| 4764 | }; | ||
| 4765 | |||
| 4766 | struct mlx5_ifc_destroy_rq_in_bits { | ||
| 4767 | u8 opcode[0x10]; | ||
| 4768 | u8 reserved_0[0x10]; | ||
| 4769 | |||
| 4770 | u8 reserved_1[0x10]; | ||
| 4771 | u8 op_mod[0x10]; | ||
| 4772 | |||
| 4773 | u8 reserved_2[0x8]; | ||
| 4774 | u8 rqn[0x18]; | ||
| 4775 | |||
| 4776 | u8 reserved_3[0x20]; | ||
| 4777 | }; | ||
| 4778 | |||
| 4779 | struct mlx5_ifc_destroy_rmp_out_bits { | ||
| 4780 | u8 status[0x8]; | ||
| 4781 | u8 reserved_0[0x18]; | ||
| 4782 | |||
| 4783 | u8 syndrome[0x20]; | ||
| 4784 | |||
| 4785 | u8 reserved_1[0x40]; | ||
| 4786 | }; | ||
| 4787 | |||
| 4788 | struct mlx5_ifc_destroy_rmp_in_bits { | ||
| 4789 | u8 opcode[0x10]; | ||
| 4790 | u8 reserved_0[0x10]; | ||
| 4791 | |||
| 4792 | u8 reserved_1[0x10]; | ||
| 4793 | u8 op_mod[0x10]; | ||
| 4794 | |||
| 4795 | u8 reserved_2[0x8]; | ||
| 4796 | u8 rmpn[0x18]; | ||
| 4797 | |||
| 4798 | u8 reserved_3[0x20]; | ||
| 4799 | }; | ||
| 4800 | |||
| 4801 | struct mlx5_ifc_destroy_qp_out_bits { | ||
| 4802 | u8 status[0x8]; | ||
| 4803 | u8 reserved_0[0x18]; | ||
| 4804 | |||
| 4805 | u8 syndrome[0x20]; | ||
| 4806 | |||
| 4807 | u8 reserved_1[0x40]; | ||
| 4808 | }; | ||
| 4809 | |||
| 4810 | struct mlx5_ifc_destroy_qp_in_bits { | ||
| 4811 | u8 opcode[0x10]; | ||
| 4812 | u8 reserved_0[0x10]; | ||
| 4813 | |||
| 4814 | u8 reserved_1[0x10]; | ||
| 4815 | u8 op_mod[0x10]; | ||
| 4816 | |||
| 4817 | u8 reserved_2[0x8]; | ||
| 4818 | u8 qpn[0x18]; | ||
| 4819 | |||
| 4820 | u8 reserved_3[0x20]; | ||
| 4821 | }; | ||
| 4822 | |||
| 4823 | struct mlx5_ifc_destroy_psv_out_bits { | ||
| 4824 | u8 status[0x8]; | ||
| 4825 | u8 reserved_0[0x18]; | ||
| 4826 | |||
| 4827 | u8 syndrome[0x20]; | ||
| 4828 | |||
| 4829 | u8 reserved_1[0x40]; | ||
| 4830 | }; | ||
| 4831 | |||
| 4832 | struct mlx5_ifc_destroy_psv_in_bits { | ||
| 4833 | u8 opcode[0x10]; | ||
| 4834 | u8 reserved_0[0x10]; | ||
| 4835 | |||
| 4836 | u8 reserved_1[0x10]; | ||
| 4837 | u8 op_mod[0x10]; | ||
| 4838 | |||
| 4839 | u8 reserved_2[0x8]; | ||
| 4840 | u8 psvn[0x18]; | ||
| 4841 | |||
| 4842 | u8 reserved_3[0x20]; | ||
| 4843 | }; | ||
| 4844 | |||
| 4845 | struct mlx5_ifc_destroy_mkey_out_bits { | ||
| 4846 | u8 status[0x8]; | ||
| 4847 | u8 reserved_0[0x18]; | ||
| 4848 | |||
| 4849 | u8 syndrome[0x20]; | ||
| 4850 | |||
| 4851 | u8 reserved_1[0x40]; | ||
| 4852 | }; | ||
| 4853 | |||
| 4854 | struct mlx5_ifc_destroy_mkey_in_bits { | ||
| 4855 | u8 opcode[0x10]; | ||
| 4856 | u8 reserved_0[0x10]; | ||
| 4857 | |||
| 4858 | u8 reserved_1[0x10]; | ||
| 4859 | u8 op_mod[0x10]; | ||
| 4860 | |||
| 4861 | u8 reserved_2[0x8]; | ||
| 4862 | u8 mkey_index[0x18]; | ||
| 4863 | |||
| 4864 | u8 reserved_3[0x20]; | ||
| 4865 | }; | ||
| 4866 | |||
| 4867 | struct mlx5_ifc_destroy_flow_table_out_bits { | ||
| 4868 | u8 status[0x8]; | ||
| 4869 | u8 reserved_0[0x18]; | ||
| 4870 | |||
| 4871 | u8 syndrome[0x20]; | ||
| 4872 | |||
| 4873 | u8 reserved_1[0x40]; | ||
| 4874 | }; | ||
| 4875 | |||
| 4876 | struct mlx5_ifc_destroy_flow_table_in_bits { | ||
| 4877 | u8 opcode[0x10]; | ||
| 4878 | u8 reserved_0[0x10]; | ||
| 4879 | |||
| 4880 | u8 reserved_1[0x10]; | ||
| 4881 | u8 op_mod[0x10]; | ||
| 4882 | |||
| 4883 | u8 reserved_2[0x40]; | ||
| 4884 | |||
| 4885 | u8 table_type[0x8]; | ||
| 4886 | u8 reserved_3[0x18]; | ||
| 4887 | |||
| 4888 | u8 reserved_4[0x8]; | ||
| 4889 | u8 table_id[0x18]; | ||
| 4890 | |||
| 4891 | u8 reserved_5[0x140]; | ||
| 4892 | }; | ||
| 4893 | |||
| 4894 | struct mlx5_ifc_destroy_flow_group_out_bits { | ||
| 4895 | u8 status[0x8]; | ||
| 4896 | u8 reserved_0[0x18]; | ||
| 4897 | |||
| 4898 | u8 syndrome[0x20]; | ||
| 4899 | |||
| 4900 | u8 reserved_1[0x40]; | ||
| 4901 | }; | ||
| 4902 | |||
| 4903 | struct mlx5_ifc_destroy_flow_group_in_bits { | ||
| 4904 | u8 opcode[0x10]; | ||
| 4905 | u8 reserved_0[0x10]; | ||
| 4906 | |||
| 4907 | u8 reserved_1[0x10]; | ||
| 4908 | u8 op_mod[0x10]; | ||
| 4909 | |||
| 4910 | u8 reserved_2[0x40]; | ||
| 4911 | |||
| 4912 | u8 table_type[0x8]; | ||
| 4913 | u8 reserved_3[0x18]; | ||
| 4914 | |||
| 4915 | u8 reserved_4[0x8]; | ||
| 4916 | u8 table_id[0x18]; | ||
| 4917 | |||
| 4918 | u8 group_id[0x20]; | ||
| 4919 | |||
| 4920 | u8 reserved_5[0x120]; | ||
| 4921 | }; | ||
| 4922 | |||
| 4923 | struct mlx5_ifc_destroy_eq_out_bits { | ||
| 4924 | u8 status[0x8]; | ||
| 4925 | u8 reserved_0[0x18]; | ||
| 4926 | |||
| 4927 | u8 syndrome[0x20]; | ||
| 4928 | |||
| 4929 | u8 reserved_1[0x40]; | ||
| 4930 | }; | ||
| 4931 | |||
| 4932 | struct mlx5_ifc_destroy_eq_in_bits { | ||
| 4933 | u8 opcode[0x10]; | ||
| 4934 | u8 reserved_0[0x10]; | ||
| 4935 | |||
| 4936 | u8 reserved_1[0x10]; | ||
| 4937 | u8 op_mod[0x10]; | ||
| 4938 | |||
| 4939 | u8 reserved_2[0x18]; | ||
| 4940 | u8 eq_number[0x8]; | ||
| 4941 | |||
| 4942 | u8 reserved_3[0x20]; | ||
| 4943 | }; | ||
| 4944 | |||
| 4945 | struct mlx5_ifc_destroy_dct_out_bits { | ||
| 4946 | u8 status[0x8]; | ||
| 4947 | u8 reserved_0[0x18]; | ||
| 4948 | |||
| 4949 | u8 syndrome[0x20]; | ||
| 4950 | |||
| 4951 | u8 reserved_1[0x40]; | ||
| 4952 | }; | ||
| 4953 | |||
| 4954 | struct mlx5_ifc_destroy_dct_in_bits { | ||
| 4955 | u8 opcode[0x10]; | ||
| 4956 | u8 reserved_0[0x10]; | ||
| 4957 | |||
| 4958 | u8 reserved_1[0x10]; | ||
| 4959 | u8 op_mod[0x10]; | ||
| 4960 | |||
| 4961 | u8 reserved_2[0x8]; | ||
| 4962 | u8 dctn[0x18]; | ||
| 4963 | |||
| 4964 | u8 reserved_3[0x20]; | ||
| 4965 | }; | ||
| 4966 | |||
| 4967 | struct mlx5_ifc_destroy_cq_out_bits { | ||
| 4968 | u8 status[0x8]; | ||
| 4969 | u8 reserved_0[0x18]; | ||
| 4970 | |||
| 4971 | u8 syndrome[0x20]; | ||
| 4972 | |||
| 4973 | u8 reserved_1[0x40]; | ||
| 4974 | }; | ||
| 4975 | |||
| 4976 | struct mlx5_ifc_destroy_cq_in_bits { | ||
| 4977 | u8 opcode[0x10]; | ||
| 4978 | u8 reserved_0[0x10]; | ||
| 4979 | |||
| 4980 | u8 reserved_1[0x10]; | ||
| 4981 | u8 op_mod[0x10]; | ||
| 4982 | |||
| 4983 | u8 reserved_2[0x8]; | ||
| 4984 | u8 cqn[0x18]; | ||
| 4985 | |||
| 4986 | u8 reserved_3[0x20]; | ||
| 4987 | }; | ||
| 4988 | |||
| 4989 | struct mlx5_ifc_delete_vxlan_udp_dport_out_bits { | ||
| 4990 | u8 status[0x8]; | ||
| 4991 | u8 reserved_0[0x18]; | ||
| 4992 | |||
| 4993 | u8 syndrome[0x20]; | ||
| 4994 | |||
| 4995 | u8 reserved_1[0x40]; | ||
| 4996 | }; | ||
| 4997 | |||
| 4998 | struct mlx5_ifc_delete_vxlan_udp_dport_in_bits { | ||
| 4999 | u8 opcode[0x10]; | ||
| 5000 | u8 reserved_0[0x10]; | ||
| 5001 | |||
| 5002 | u8 reserved_1[0x10]; | ||
| 5003 | u8 op_mod[0x10]; | ||
| 5004 | |||
| 5005 | u8 reserved_2[0x20]; | ||
| 5006 | |||
| 5007 | u8 reserved_3[0x10]; | ||
| 5008 | u8 vxlan_udp_port[0x10]; | ||
| 5009 | }; | ||
| 5010 | |||
| 5011 | struct mlx5_ifc_delete_l2_table_entry_out_bits { | ||
| 5012 | u8 status[0x8]; | ||
| 5013 | u8 reserved_0[0x18]; | ||
| 5014 | |||
| 5015 | u8 syndrome[0x20]; | ||
| 5016 | |||
| 5017 | u8 reserved_1[0x40]; | ||
| 5018 | }; | ||
| 5019 | |||
| 5020 | struct mlx5_ifc_delete_l2_table_entry_in_bits { | ||
| 5021 | u8 opcode[0x10]; | ||
| 5022 | u8 reserved_0[0x10]; | ||
| 5023 | |||
| 5024 | u8 reserved_1[0x10]; | ||
| 5025 | u8 op_mod[0x10]; | ||
| 5026 | |||
| 5027 | u8 reserved_2[0x60]; | ||
| 5028 | |||
| 5029 | u8 reserved_3[0x8]; | ||
| 5030 | u8 table_index[0x18]; | ||
| 5031 | |||
| 5032 | u8 reserved_4[0x140]; | ||
| 5033 | }; | ||
| 5034 | |||
| 5035 | struct mlx5_ifc_delete_fte_out_bits { | ||
| 5036 | u8 status[0x8]; | ||
| 5037 | u8 reserved_0[0x18]; | ||
| 5038 | |||
| 5039 | u8 syndrome[0x20]; | ||
| 5040 | |||
| 5041 | u8 reserved_1[0x40]; | ||
| 5042 | }; | ||
| 5043 | |||
| 5044 | struct mlx5_ifc_delete_fte_in_bits { | ||
| 5045 | u8 opcode[0x10]; | ||
| 5046 | u8 reserved_0[0x10]; | ||
| 5047 | |||
| 5048 | u8 reserved_1[0x10]; | ||
| 5049 | u8 op_mod[0x10]; | ||
| 5050 | |||
| 5051 | u8 reserved_2[0x40]; | ||
| 5052 | |||
| 5053 | u8 table_type[0x8]; | ||
| 5054 | u8 reserved_3[0x18]; | ||
| 5055 | |||
| 5056 | u8 reserved_4[0x8]; | ||
| 5057 | u8 table_id[0x18]; | ||
| 5058 | |||
| 5059 | u8 reserved_5[0x40]; | ||
| 5060 | |||
| 5061 | u8 flow_index[0x20]; | ||
| 5062 | |||
| 5063 | u8 reserved_6[0xe0]; | ||
| 5064 | }; | ||
| 5065 | |||
| 5066 | struct mlx5_ifc_dealloc_xrcd_out_bits { | ||
| 5067 | u8 status[0x8]; | ||
| 5068 | u8 reserved_0[0x18]; | ||
| 5069 | |||
| 5070 | u8 syndrome[0x20]; | ||
| 5071 | |||
| 5072 | u8 reserved_1[0x40]; | ||
| 5073 | }; | ||
| 5074 | |||
| 5075 | struct mlx5_ifc_dealloc_xrcd_in_bits { | ||
| 5076 | u8 opcode[0x10]; | ||
| 5077 | u8 reserved_0[0x10]; | ||
| 5078 | |||
| 5079 | u8 reserved_1[0x10]; | ||
| 5080 | u8 op_mod[0x10]; | ||
| 5081 | |||
| 5082 | u8 reserved_2[0x8]; | ||
| 5083 | u8 xrcd[0x18]; | ||
| 5084 | |||
| 5085 | u8 reserved_3[0x20]; | ||
| 5086 | }; | ||
| 5087 | |||
| 5088 | struct mlx5_ifc_dealloc_uar_out_bits { | ||
| 5089 | u8 status[0x8]; | ||
| 5090 | u8 reserved_0[0x18]; | ||
| 5091 | |||
| 5092 | u8 syndrome[0x20]; | ||
| 5093 | |||
| 5094 | u8 reserved_1[0x40]; | ||
| 5095 | }; | ||
| 5096 | |||
| 5097 | struct mlx5_ifc_dealloc_uar_in_bits { | ||
| 5098 | u8 opcode[0x10]; | ||
| 5099 | u8 reserved_0[0x10]; | ||
| 5100 | |||
| 5101 | u8 reserved_1[0x10]; | ||
| 5102 | u8 op_mod[0x10]; | ||
| 5103 | |||
| 5104 | u8 reserved_2[0x8]; | ||
| 5105 | u8 uar[0x18]; | ||
| 5106 | |||
| 5107 | u8 reserved_3[0x20]; | ||
| 5108 | }; | ||
| 5109 | |||
| 5110 | struct mlx5_ifc_dealloc_transport_domain_out_bits { | ||
| 5111 | u8 status[0x8]; | ||
| 5112 | u8 reserved_0[0x18]; | ||
| 5113 | |||
| 5114 | u8 syndrome[0x20]; | ||
| 5115 | |||
| 5116 | u8 reserved_1[0x40]; | ||
| 5117 | }; | ||
| 5118 | |||
| 5119 | struct mlx5_ifc_dealloc_transport_domain_in_bits { | ||
| 5120 | u8 opcode[0x10]; | ||
| 5121 | u8 reserved_0[0x10]; | ||
| 5122 | |||
| 5123 | u8 reserved_1[0x10]; | ||
| 5124 | u8 op_mod[0x10]; | ||
| 5125 | |||
| 5126 | u8 reserved_2[0x8]; | ||
| 5127 | u8 transport_domain[0x18]; | ||
| 5128 | |||
| 5129 | u8 reserved_3[0x20]; | ||
| 5130 | }; | ||
| 5131 | |||
| 5132 | struct mlx5_ifc_dealloc_q_counter_out_bits { | ||
| 5133 | u8 status[0x8]; | ||
| 5134 | u8 reserved_0[0x18]; | ||
| 5135 | |||
| 5136 | u8 syndrome[0x20]; | ||
| 5137 | |||
| 5138 | u8 reserved_1[0x40]; | ||
| 5139 | }; | ||
| 5140 | |||
| 5141 | struct mlx5_ifc_dealloc_q_counter_in_bits { | ||
| 5142 | u8 opcode[0x10]; | ||
| 5143 | u8 reserved_0[0x10]; | ||
| 5144 | |||
| 5145 | u8 reserved_1[0x10]; | ||
| 5146 | u8 op_mod[0x10]; | ||
| 5147 | |||
| 5148 | u8 reserved_2[0x18]; | ||
| 5149 | u8 counter_set_id[0x8]; | ||
| 5150 | |||
| 5151 | u8 reserved_3[0x20]; | ||
| 5152 | }; | ||
| 5153 | |||
| 5154 | struct mlx5_ifc_dealloc_pd_out_bits { | ||
| 5155 | u8 status[0x8]; | ||
| 5156 | u8 reserved_0[0x18]; | ||
| 5157 | |||
| 5158 | u8 syndrome[0x20]; | ||
| 5159 | |||
| 5160 | u8 reserved_1[0x40]; | ||
| 5161 | }; | ||
| 5162 | |||
| 5163 | struct mlx5_ifc_dealloc_pd_in_bits { | ||
| 5164 | u8 opcode[0x10]; | ||
| 5165 | u8 reserved_0[0x10]; | ||
| 5166 | |||
| 5167 | u8 reserved_1[0x10]; | ||
| 5168 | u8 op_mod[0x10]; | ||
| 5169 | |||
| 5170 | u8 reserved_2[0x8]; | ||
| 5171 | u8 pd[0x18]; | ||
| 5172 | |||
| 5173 | u8 reserved_3[0x20]; | ||
| 5174 | }; | ||
| 5175 | |||
| 5176 | struct mlx5_ifc_create_xrc_srq_out_bits { | ||
| 5177 | u8 status[0x8]; | ||
| 5178 | u8 reserved_0[0x18]; | ||
| 5179 | |||
| 5180 | u8 syndrome[0x20]; | ||
| 5181 | |||
| 5182 | u8 reserved_1[0x8]; | ||
| 5183 | u8 xrc_srqn[0x18]; | ||
| 5184 | |||
| 5185 | u8 reserved_2[0x20]; | ||
| 5186 | }; | ||
| 5187 | |||
| 5188 | struct mlx5_ifc_create_xrc_srq_in_bits { | ||
| 5189 | u8 opcode[0x10]; | ||
| 5190 | u8 reserved_0[0x10]; | ||
| 5191 | |||
| 5192 | u8 reserved_1[0x10]; | ||
| 5193 | u8 op_mod[0x10]; | ||
| 5194 | |||
| 5195 | u8 reserved_2[0x40]; | ||
| 5196 | |||
| 5197 | struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; | ||
| 5198 | |||
| 5199 | u8 reserved_3[0x600]; | ||
| 5200 | |||
| 5201 | u8 pas[0][0x40]; | ||
| 5202 | }; | ||
| 5203 | |||
| 5204 | struct mlx5_ifc_create_tis_out_bits { | ||
| 5205 | u8 status[0x8]; | ||
| 5206 | u8 reserved_0[0x18]; | ||
| 5207 | |||
| 5208 | u8 syndrome[0x20]; | ||
| 5209 | |||
| 5210 | u8 reserved_1[0x8]; | ||
| 5211 | u8 tisn[0x18]; | ||
| 5212 | |||
| 5213 | u8 reserved_2[0x20]; | ||
| 5214 | }; | ||
| 5215 | |||
| 5216 | struct mlx5_ifc_create_tis_in_bits { | ||
| 5217 | u8 opcode[0x10]; | ||
| 5218 | u8 reserved_0[0x10]; | ||
| 5219 | |||
| 5220 | u8 reserved_1[0x10]; | ||
| 5221 | u8 op_mod[0x10]; | ||
| 5222 | |||
| 5223 | u8 reserved_2[0xc0]; | ||
| 5224 | |||
| 5225 | struct mlx5_ifc_tisc_bits ctx; | ||
| 5226 | }; | ||
| 5227 | |||
| 5228 | struct mlx5_ifc_create_tir_out_bits { | ||
| 5229 | u8 status[0x8]; | ||
| 5230 | u8 reserved_0[0x18]; | ||
| 5231 | |||
| 5232 | u8 syndrome[0x20]; | ||
| 5233 | |||
| 5234 | u8 reserved_1[0x8]; | ||
| 5235 | u8 tirn[0x18]; | ||
| 5236 | |||
| 5237 | u8 reserved_2[0x20]; | ||
| 5238 | }; | ||
| 5239 | |||
| 5240 | struct mlx5_ifc_create_tir_in_bits { | ||
| 5241 | u8 opcode[0x10]; | ||
| 5242 | u8 reserved_0[0x10]; | ||
| 5243 | |||
| 5244 | u8 reserved_1[0x10]; | ||
| 5245 | u8 op_mod[0x10]; | ||
| 5246 | |||
| 5247 | u8 reserved_2[0xc0]; | ||
| 5248 | |||
| 5249 | struct mlx5_ifc_tirc_bits ctx; | ||
| 5250 | }; | ||
| 5251 | |||
| 5252 | struct mlx5_ifc_create_srq_out_bits { | ||
| 5253 | u8 status[0x8]; | ||
| 5254 | u8 reserved_0[0x18]; | ||
| 5255 | |||
| 5256 | u8 syndrome[0x20]; | ||
| 5257 | |||
| 5258 | u8 reserved_1[0x8]; | ||
| 5259 | u8 srqn[0x18]; | ||
| 5260 | |||
| 5261 | u8 reserved_2[0x20]; | ||
| 5262 | }; | ||
| 5263 | |||
| 5264 | struct mlx5_ifc_create_srq_in_bits { | ||
| 5265 | u8 opcode[0x10]; | ||
| 5266 | u8 reserved_0[0x10]; | ||
| 5267 | |||
| 5268 | u8 reserved_1[0x10]; | ||
| 5269 | u8 op_mod[0x10]; | ||
| 5270 | |||
| 5271 | u8 reserved_2[0x40]; | ||
| 5272 | |||
| 5273 | struct mlx5_ifc_srqc_bits srq_context_entry; | ||
| 5274 | |||
| 5275 | u8 reserved_3[0x600]; | ||
| 5276 | |||
| 5277 | u8 pas[0][0x40]; | ||
| 5278 | }; | ||
| 5279 | |||
| 5280 | struct mlx5_ifc_create_sq_out_bits { | ||
| 5281 | u8 status[0x8]; | ||
| 5282 | u8 reserved_0[0x18]; | ||
| 5283 | |||
| 5284 | u8 syndrome[0x20]; | ||
| 5285 | |||
| 5286 | u8 reserved_1[0x8]; | ||
| 5287 | u8 sqn[0x18]; | ||
| 5288 | |||
| 5289 | u8 reserved_2[0x20]; | ||
| 5290 | }; | ||
| 5291 | |||
| 5292 | struct mlx5_ifc_create_sq_in_bits { | ||
| 5293 | u8 opcode[0x10]; | ||
| 5294 | u8 reserved_0[0x10]; | ||
| 5295 | |||
| 5296 | u8 reserved_1[0x10]; | ||
| 5297 | u8 op_mod[0x10]; | ||
| 5298 | |||
| 5299 | u8 reserved_2[0xc0]; | ||
| 5300 | |||
| 5301 | struct mlx5_ifc_sqc_bits ctx; | ||
| 5302 | }; | ||
| 5303 | |||
| 5304 | struct mlx5_ifc_create_rqt_out_bits { | ||
| 5305 | u8 status[0x8]; | ||
| 5306 | u8 reserved_0[0x18]; | ||
| 5307 | |||
| 5308 | u8 syndrome[0x20]; | ||
| 5309 | |||
| 5310 | u8 reserved_1[0x8]; | ||
| 5311 | u8 rqtn[0x18]; | ||
| 5312 | |||
| 5313 | u8 reserved_2[0x20]; | ||
| 5314 | }; | ||
| 5315 | |||
| 5316 | struct mlx5_ifc_create_rqt_in_bits { | ||
| 5317 | u8 opcode[0x10]; | ||
| 5318 | u8 reserved_0[0x10]; | ||
| 5319 | |||
| 5320 | u8 reserved_1[0x10]; | ||
| 5321 | u8 op_mod[0x10]; | ||
| 5322 | |||
| 5323 | u8 reserved_2[0xc0]; | ||
| 5324 | |||
| 5325 | struct mlx5_ifc_rqtc_bits rqt_context; | ||
| 5326 | }; | ||
| 5327 | |||
| 5328 | struct mlx5_ifc_create_rq_out_bits { | ||
| 5329 | u8 status[0x8]; | ||
| 5330 | u8 reserved_0[0x18]; | ||
| 5331 | |||
| 5332 | u8 syndrome[0x20]; | ||
| 5333 | |||
| 5334 | u8 reserved_1[0x8]; | ||
| 5335 | u8 rqn[0x18]; | ||
| 5336 | |||
| 5337 | u8 reserved_2[0x20]; | ||
| 5338 | }; | ||
| 5339 | |||
| 5340 | struct mlx5_ifc_create_rq_in_bits { | ||
| 5341 | u8 opcode[0x10]; | ||
| 5342 | u8 reserved_0[0x10]; | ||
| 5343 | |||
| 5344 | u8 reserved_1[0x10]; | ||
| 5345 | u8 op_mod[0x10]; | ||
| 5346 | |||
| 5347 | u8 reserved_2[0xc0]; | ||
| 5348 | |||
| 5349 | struct mlx5_ifc_rqc_bits ctx; | ||
| 5350 | }; | ||
| 5351 | |||
| 5352 | struct mlx5_ifc_create_rmp_out_bits { | ||
| 5353 | u8 status[0x8]; | ||
| 5354 | u8 reserved_0[0x18]; | ||
| 5355 | |||
| 5356 | u8 syndrome[0x20]; | ||
| 5357 | |||
| 5358 | u8 reserved_1[0x8]; | ||
| 5359 | u8 rmpn[0x18]; | ||
| 5360 | |||
| 5361 | u8 reserved_2[0x20]; | ||
| 5362 | }; | ||
| 5363 | |||
| 5364 | struct mlx5_ifc_create_rmp_in_bits { | ||
| 5365 | u8 opcode[0x10]; | ||
| 5366 | u8 reserved_0[0x10]; | ||
| 5367 | |||
| 5368 | u8 reserved_1[0x10]; | ||
| 5369 | u8 op_mod[0x10]; | ||
| 5370 | |||
| 5371 | u8 reserved_2[0xc0]; | ||
| 5372 | |||
| 5373 | struct mlx5_ifc_rmpc_bits ctx; | ||
| 5374 | }; | ||
| 5375 | |||
| 5376 | struct mlx5_ifc_create_qp_out_bits { | ||
| 5377 | u8 status[0x8]; | ||
| 5378 | u8 reserved_0[0x18]; | ||
| 5379 | |||
| 5380 | u8 syndrome[0x20]; | ||
| 5381 | |||
| 5382 | u8 reserved_1[0x8]; | ||
| 5383 | u8 qpn[0x18]; | ||
| 5384 | |||
| 5385 | u8 reserved_2[0x20]; | ||
| 5386 | }; | ||
| 5387 | |||
| 5388 | struct mlx5_ifc_create_qp_in_bits { | ||
| 5389 | u8 opcode[0x10]; | ||
| 5390 | u8 reserved_0[0x10]; | ||
| 5391 | |||
| 5392 | u8 reserved_1[0x10]; | ||
| 5393 | u8 op_mod[0x10]; | ||
| 5394 | |||
| 5395 | u8 reserved_2[0x40]; | ||
| 5396 | |||
| 5397 | u8 opt_param_mask[0x20]; | ||
| 5398 | |||
| 5399 | u8 reserved_3[0x20]; | ||
| 5400 | |||
| 5401 | struct mlx5_ifc_qpc_bits qpc; | ||
| 5402 | |||
| 5403 | u8 reserved_4[0x80]; | ||
| 5404 | |||
| 5405 | u8 pas[0][0x40]; | ||
| 5406 | }; | ||
| 5407 | |||
| 5408 | struct mlx5_ifc_create_psv_out_bits { | ||
| 5409 | u8 status[0x8]; | ||
| 5410 | u8 reserved_0[0x18]; | ||
| 5411 | |||
| 5412 | u8 syndrome[0x20]; | ||
| 5413 | |||
| 5414 | u8 reserved_1[0x40]; | ||
| 5415 | |||
| 5416 | u8 reserved_2[0x8]; | ||
| 5417 | u8 psv0_index[0x18]; | ||
| 5418 | |||
| 5419 | u8 reserved_3[0x8]; | ||
| 5420 | u8 psv1_index[0x18]; | ||
| 5421 | |||
| 5422 | u8 reserved_4[0x8]; | ||
| 5423 | u8 psv2_index[0x18]; | ||
| 5424 | |||
| 5425 | u8 reserved_5[0x8]; | ||
| 5426 | u8 psv3_index[0x18]; | ||
| 5427 | }; | ||
| 5428 | |||
| 5429 | struct mlx5_ifc_create_psv_in_bits { | ||
| 5430 | u8 opcode[0x10]; | ||
| 5431 | u8 reserved_0[0x10]; | ||
| 5432 | |||
| 5433 | u8 reserved_1[0x10]; | ||
| 5434 | u8 op_mod[0x10]; | ||
| 5435 | |||
| 5436 | u8 num_psv[0x4]; | ||
| 5437 | u8 reserved_2[0x4]; | ||
| 5438 | u8 pd[0x18]; | ||
| 5439 | |||
| 5440 | u8 reserved_3[0x20]; | ||
| 5441 | }; | ||
| 5442 | |||
| 5443 | struct mlx5_ifc_create_mkey_out_bits { | ||
| 5444 | u8 status[0x8]; | ||
| 5445 | u8 reserved_0[0x18]; | ||
| 5446 | |||
| 5447 | u8 syndrome[0x20]; | ||
| 5448 | |||
| 5449 | u8 reserved_1[0x8]; | ||
| 5450 | u8 mkey_index[0x18]; | ||
| 5451 | |||
| 5452 | u8 reserved_2[0x20]; | ||
| 5453 | }; | ||
| 5454 | |||
| 5455 | struct mlx5_ifc_create_mkey_in_bits { | ||
| 5456 | u8 opcode[0x10]; | ||
| 5457 | u8 reserved_0[0x10]; | ||
| 5458 | |||
| 5459 | u8 reserved_1[0x10]; | ||
| 5460 | u8 op_mod[0x10]; | ||
| 5461 | |||
| 5462 | u8 reserved_2[0x20]; | ||
| 5463 | |||
| 5464 | u8 pg_access[0x1]; | ||
| 5465 | u8 reserved_3[0x1f]; | ||
| 5466 | |||
| 5467 | struct mlx5_ifc_mkc_bits memory_key_mkey_entry; | ||
| 5468 | |||
| 5469 | u8 reserved_4[0x80]; | ||
| 5470 | |||
| 5471 | u8 translations_octword_actual_size[0x20]; | ||
| 5472 | |||
| 5473 | u8 reserved_5[0x560]; | ||
| 5474 | |||
| 5475 | u8 klm_pas_mtt[0][0x20]; | ||
| 5476 | }; | ||
| 5477 | |||
| 5478 | struct mlx5_ifc_create_flow_table_out_bits { | ||
| 5479 | u8 status[0x8]; | ||
| 5480 | u8 reserved_0[0x18]; | ||
| 5481 | |||
| 5482 | u8 syndrome[0x20]; | ||
| 5483 | |||
| 5484 | u8 reserved_1[0x8]; | ||
| 5485 | u8 table_id[0x18]; | ||
| 5486 | |||
| 5487 | u8 reserved_2[0x20]; | ||
| 5488 | }; | ||
| 5489 | |||
| 5490 | struct mlx5_ifc_create_flow_table_in_bits { | ||
| 5491 | u8 opcode[0x10]; | ||
| 5492 | u8 reserved_0[0x10]; | ||
| 5493 | |||
| 5494 | u8 reserved_1[0x10]; | ||
| 5495 | u8 op_mod[0x10]; | ||
| 5496 | |||
| 5497 | u8 reserved_2[0x40]; | ||
| 5498 | |||
| 5499 | u8 table_type[0x8]; | ||
| 5500 | u8 reserved_3[0x18]; | ||
| 5501 | |||
| 5502 | u8 reserved_4[0x20]; | ||
| 5503 | |||
| 5504 | u8 reserved_5[0x8]; | ||
| 5505 | u8 level[0x8]; | ||
| 5506 | u8 reserved_6[0x8]; | ||
| 5507 | u8 log_size[0x8]; | ||
| 5508 | |||
| 5509 | u8 reserved_7[0x120]; | ||
| 5510 | }; | ||
| 5511 | |||
| 5512 | struct mlx5_ifc_create_flow_group_out_bits { | ||
| 5513 | u8 status[0x8]; | ||
| 5514 | u8 reserved_0[0x18]; | ||
| 5515 | |||
| 5516 | u8 syndrome[0x20]; | ||
| 5517 | |||
| 5518 | u8 reserved_1[0x8]; | ||
| 5519 | u8 group_id[0x18]; | ||
| 5520 | |||
| 5521 | u8 reserved_2[0x20]; | ||
| 5522 | }; | ||
| 5523 | |||
| 5524 | enum { | ||
| 5525 | MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, | ||
| 5526 | MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, | ||
| 5527 | MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, | ||
| 5528 | }; | ||
| 5529 | |||
| 5530 | struct mlx5_ifc_create_flow_group_in_bits { | ||
| 5531 | u8 opcode[0x10]; | ||
| 5532 | u8 reserved_0[0x10]; | ||
| 5533 | |||
| 5534 | u8 reserved_1[0x10]; | ||
| 5535 | u8 op_mod[0x10]; | ||
| 5536 | |||
| 5537 | u8 reserved_2[0x40]; | ||
| 5538 | |||
| 5539 | u8 table_type[0x8]; | ||
| 5540 | u8 reserved_3[0x18]; | ||
| 5541 | |||
| 5542 | u8 reserved_4[0x8]; | ||
| 5543 | u8 table_id[0x18]; | ||
| 5544 | |||
| 5545 | u8 reserved_5[0x20]; | ||
| 5546 | |||
| 5547 | u8 start_flow_index[0x20]; | ||
| 5548 | |||
| 5549 | u8 reserved_6[0x20]; | ||
| 5550 | |||
| 5551 | u8 end_flow_index[0x20]; | ||
| 5552 | |||
| 5553 | u8 reserved_7[0xa0]; | ||
| 5554 | |||
| 5555 | u8 reserved_8[0x18]; | ||
| 5556 | u8 match_criteria_enable[0x8]; | ||
| 5557 | |||
| 5558 | struct mlx5_ifc_fte_match_param_bits match_criteria; | ||
| 5559 | |||
| 5560 | u8 reserved_9[0xe00]; | ||
| 5561 | }; | ||
| 5562 | |||
| 5563 | struct mlx5_ifc_create_eq_out_bits { | ||
| 5564 | u8 status[0x8]; | ||
| 5565 | u8 reserved_0[0x18]; | ||
| 5566 | |||
| 5567 | u8 syndrome[0x20]; | ||
| 5568 | |||
| 5569 | u8 reserved_1[0x18]; | ||
| 5570 | u8 eq_number[0x8]; | ||
| 5571 | |||
| 5572 | u8 reserved_2[0x20]; | ||
| 5573 | }; | ||
| 5574 | |||
| 5575 | struct mlx5_ifc_create_eq_in_bits { | ||
| 5576 | u8 opcode[0x10]; | ||
| 5577 | u8 reserved_0[0x10]; | ||
| 5578 | |||
| 5579 | u8 reserved_1[0x10]; | ||
| 5580 | u8 op_mod[0x10]; | ||
| 5581 | |||
| 5582 | u8 reserved_2[0x40]; | ||
| 5583 | |||
| 5584 | struct mlx5_ifc_eqc_bits eq_context_entry; | ||
| 5585 | |||
| 5586 | u8 reserved_3[0x40]; | ||
| 5587 | |||
| 5588 | u8 event_bitmask[0x40]; | ||
| 5589 | |||
| 5590 | u8 reserved_4[0x580]; | ||
| 5591 | |||
| 5592 | u8 pas[0][0x40]; | ||
| 5593 | }; | ||
| 5594 | |||
| 5595 | struct mlx5_ifc_create_dct_out_bits { | ||
| 5596 | u8 status[0x8]; | ||
| 5597 | u8 reserved_0[0x18]; | ||
| 5598 | |||
| 5599 | u8 syndrome[0x20]; | ||
| 5600 | |||
| 5601 | u8 reserved_1[0x8]; | ||
| 5602 | u8 dctn[0x18]; | ||
| 5603 | |||
| 5604 | u8 reserved_2[0x20]; | ||
| 5605 | }; | ||
| 5606 | |||
| 5607 | struct mlx5_ifc_create_dct_in_bits { | ||
| 5608 | u8 opcode[0x10]; | ||
| 5609 | u8 reserved_0[0x10]; | ||
| 5610 | |||
| 5611 | u8 reserved_1[0x10]; | ||
| 5612 | u8 op_mod[0x10]; | ||
| 5613 | |||
| 5614 | u8 reserved_2[0x40]; | ||
| 5615 | |||
| 5616 | struct mlx5_ifc_dctc_bits dct_context_entry; | ||
| 5617 | |||
| 5618 | u8 reserved_3[0x180]; | ||
| 5619 | }; | ||
| 5620 | |||
| 5621 | struct mlx5_ifc_create_cq_out_bits { | ||
| 5622 | u8 status[0x8]; | ||
| 5623 | u8 reserved_0[0x18]; | ||
| 5624 | |||
| 5625 | u8 syndrome[0x20]; | ||
| 5626 | |||
| 5627 | u8 reserved_1[0x8]; | ||
| 5628 | u8 cqn[0x18]; | ||
| 5629 | |||
| 5630 | u8 reserved_2[0x20]; | ||
| 5631 | }; | ||
| 5632 | |||
| 5633 | struct mlx5_ifc_create_cq_in_bits { | ||
| 5634 | u8 opcode[0x10]; | ||
| 5635 | u8 reserved_0[0x10]; | ||
| 5636 | |||
| 5637 | u8 reserved_1[0x10]; | ||
| 5638 | u8 op_mod[0x10]; | ||
| 5639 | |||
| 5640 | u8 reserved_2[0x40]; | ||
| 5641 | |||
| 5642 | struct mlx5_ifc_cqc_bits cq_context; | ||
| 5643 | |||
| 5644 | u8 reserved_3[0x600]; | ||
| 5645 | |||
| 5646 | u8 pas[0][0x40]; | ||
| 5647 | }; | ||
| 5648 | |||
| 5649 | struct mlx5_ifc_config_int_moderation_out_bits { | ||
| 5650 | u8 status[0x8]; | ||
| 5651 | u8 reserved_0[0x18]; | ||
| 5652 | |||
| 5653 | u8 syndrome[0x20]; | ||
| 5654 | |||
| 5655 | u8 reserved_1[0x4]; | ||
| 5656 | u8 min_delay[0xc]; | ||
| 5657 | u8 int_vector[0x10]; | ||
| 5658 | |||
| 5659 | u8 reserved_2[0x20]; | ||
| 5660 | }; | ||
| 5661 | |||
| 5662 | enum { | ||
| 5663 | MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE = 0x0, | ||
| 5664 | MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ = 0x1, | ||
| 5665 | }; | ||
| 5666 | |||
| 5667 | struct mlx5_ifc_config_int_moderation_in_bits { | ||
| 5668 | u8 opcode[0x10]; | ||
| 5669 | u8 reserved_0[0x10]; | ||
| 5670 | |||
| 5671 | u8 reserved_1[0x10]; | ||
| 5672 | u8 op_mod[0x10]; | ||
| 5673 | |||
| 5674 | u8 reserved_2[0x4]; | ||
| 5675 | u8 min_delay[0xc]; | ||
| 5676 | u8 int_vector[0x10]; | ||
| 5677 | |||
| 5678 | u8 reserved_3[0x20]; | ||
| 5679 | }; | ||
| 5680 | |||
| 5681 | struct mlx5_ifc_attach_to_mcg_out_bits { | ||
| 5682 | u8 status[0x8]; | ||
| 5683 | u8 reserved_0[0x18]; | ||
| 5684 | |||
| 5685 | u8 syndrome[0x20]; | ||
| 5686 | |||
| 5687 | u8 reserved_1[0x40]; | ||
| 5688 | }; | ||
| 5689 | |||
| 5690 | struct mlx5_ifc_attach_to_mcg_in_bits { | ||
| 5691 | u8 opcode[0x10]; | ||
| 5692 | u8 reserved_0[0x10]; | ||
| 5693 | |||
| 5694 | u8 reserved_1[0x10]; | ||
| 5695 | u8 op_mod[0x10]; | ||
| 5696 | |||
| 5697 | u8 reserved_2[0x8]; | ||
| 5698 | u8 qpn[0x18]; | ||
| 5699 | |||
| 5700 | u8 reserved_3[0x20]; | ||
| 5701 | |||
| 5702 | u8 multicast_gid[16][0x8]; | ||
| 5703 | }; | ||
| 5704 | |||
| 5705 | struct mlx5_ifc_arm_xrc_srq_out_bits { | ||
| 5706 | u8 status[0x8]; | ||
| 5707 | u8 reserved_0[0x18]; | ||
| 5708 | |||
| 5709 | u8 syndrome[0x20]; | ||
| 5710 | |||
| 5711 | u8 reserved_1[0x40]; | ||
| 5712 | }; | ||
| 5713 | |||
| 5714 | enum { | ||
| 5715 | MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ = 0x1, | ||
| 5716 | }; | ||
| 5717 | |||
| 5718 | struct mlx5_ifc_arm_xrc_srq_in_bits { | ||
| 5719 | u8 opcode[0x10]; | ||
| 5720 | u8 reserved_0[0x10]; | ||
| 5721 | |||
| 5722 | u8 reserved_1[0x10]; | ||
| 5723 | u8 op_mod[0x10]; | ||
| 5724 | |||
| 5725 | u8 reserved_2[0x8]; | ||
| 5726 | u8 xrc_srqn[0x18]; | ||
| 5727 | |||
| 5728 | u8 reserved_3[0x10]; | ||
| 5729 | u8 lwm[0x10]; | ||
| 5730 | }; | ||
| 5731 | |||
| 5732 | struct mlx5_ifc_arm_rq_out_bits { | ||
| 5733 | u8 status[0x8]; | ||
| 5734 | u8 reserved_0[0x18]; | ||
| 5735 | |||
| 5736 | u8 syndrome[0x20]; | ||
| 5737 | |||
| 5738 | u8 reserved_1[0x40]; | ||
| 5739 | }; | ||
| 5740 | |||
| 5741 | enum { | ||
| 5742 | MLX5_ARM_RQ_IN_OP_MOD_SRQ_ = 0x1, | ||
| 5743 | }; | ||
| 5744 | |||
| 5745 | struct mlx5_ifc_arm_rq_in_bits { | ||
| 5746 | u8 opcode[0x10]; | ||
| 5747 | u8 reserved_0[0x10]; | ||
| 5748 | |||
| 5749 | u8 reserved_1[0x10]; | ||
| 5750 | u8 op_mod[0x10]; | ||
| 5751 | |||
| 5752 | u8 reserved_2[0x8]; | ||
| 5753 | u8 srq_number[0x18]; | ||
| 5754 | |||
| 5755 | u8 reserved_3[0x10]; | ||
| 5756 | u8 lwm[0x10]; | ||
| 5757 | }; | ||
| 5758 | |||
| 5759 | struct mlx5_ifc_arm_dct_out_bits { | ||
| 5760 | u8 status[0x8]; | ||
| 5761 | u8 reserved_0[0x18]; | ||
| 5762 | |||
| 5763 | u8 syndrome[0x20]; | ||
| 5764 | |||
| 5765 | u8 reserved_1[0x40]; | ||
| 5766 | }; | ||
| 5767 | |||
| 5768 | struct mlx5_ifc_arm_dct_in_bits { | ||
| 5769 | u8 opcode[0x10]; | ||
| 5770 | u8 reserved_0[0x10]; | ||
| 5771 | |||
| 5772 | u8 reserved_1[0x10]; | ||
| 5773 | u8 op_mod[0x10]; | ||
| 5774 | |||
| 5775 | u8 reserved_2[0x8]; | ||
| 5776 | u8 dct_number[0x18]; | ||
| 5777 | |||
| 5778 | u8 reserved_3[0x20]; | ||
| 5779 | }; | ||
| 5780 | |||
| 5781 | struct mlx5_ifc_alloc_xrcd_out_bits { | ||
| 5782 | u8 status[0x8]; | ||
| 5783 | u8 reserved_0[0x18]; | ||
| 5784 | |||
| 5785 | u8 syndrome[0x20]; | ||
| 5786 | |||
| 5787 | u8 reserved_1[0x8]; | ||
| 5788 | u8 xrcd[0x18]; | ||
| 5789 | |||
| 5790 | u8 reserved_2[0x20]; | ||
| 5791 | }; | ||
| 5792 | |||
| 5793 | struct mlx5_ifc_alloc_xrcd_in_bits { | ||
| 5794 | u8 opcode[0x10]; | ||
| 5795 | u8 reserved_0[0x10]; | ||
| 5796 | |||
| 5797 | u8 reserved_1[0x10]; | ||
| 5798 | u8 op_mod[0x10]; | ||
| 5799 | |||
| 5800 | u8 reserved_2[0x40]; | ||
| 5801 | }; | ||
| 5802 | |||
| 5803 | struct mlx5_ifc_alloc_uar_out_bits { | ||
| 5804 | u8 status[0x8]; | ||
| 5805 | u8 reserved_0[0x18]; | ||
| 5806 | |||
| 5807 | u8 syndrome[0x20]; | ||
| 5808 | |||
| 5809 | u8 reserved_1[0x8]; | ||
| 5810 | u8 uar[0x18]; | ||
| 5811 | |||
| 5812 | u8 reserved_2[0x20]; | ||
| 5813 | }; | ||
| 5814 | |||
| 5815 | struct mlx5_ifc_alloc_uar_in_bits { | ||
| 5816 | u8 opcode[0x10]; | ||
| 5817 | u8 reserved_0[0x10]; | ||
| 5818 | |||
| 5819 | u8 reserved_1[0x10]; | ||
| 5820 | u8 op_mod[0x10]; | ||
| 5821 | |||
| 5822 | u8 reserved_2[0x40]; | ||
| 5823 | }; | ||
| 5824 | |||
| 5825 | struct mlx5_ifc_alloc_transport_domain_out_bits { | ||
| 5826 | u8 status[0x8]; | ||
| 5827 | u8 reserved_0[0x18]; | ||
| 5828 | |||
| 5829 | u8 syndrome[0x20]; | ||
| 5830 | |||
| 5831 | u8 reserved_1[0x8]; | ||
| 5832 | u8 transport_domain[0x18]; | ||
| 5833 | |||
| 5834 | u8 reserved_2[0x20]; | ||
| 5835 | }; | ||
| 5836 | |||
| 5837 | struct mlx5_ifc_alloc_transport_domain_in_bits { | ||
| 5838 | u8 opcode[0x10]; | ||
| 5839 | u8 reserved_0[0x10]; | ||
| 5840 | |||
| 5841 | u8 reserved_1[0x10]; | ||
| 5842 | u8 op_mod[0x10]; | ||
| 5843 | |||
| 5844 | u8 reserved_2[0x40]; | ||
| 5845 | }; | ||
| 5846 | |||
| 5847 | struct mlx5_ifc_alloc_q_counter_out_bits { | ||
| 5848 | u8 status[0x8]; | ||
| 5849 | u8 reserved_0[0x18]; | ||
| 5850 | |||
| 5851 | u8 syndrome[0x20]; | ||
| 5852 | |||
| 5853 | u8 reserved_1[0x18]; | ||
| 5854 | u8 counter_set_id[0x8]; | ||
| 5855 | |||
| 5856 | u8 reserved_2[0x20]; | ||
| 5857 | }; | ||
| 5858 | |||
| 5859 | struct mlx5_ifc_alloc_q_counter_in_bits { | ||
| 5860 | u8 opcode[0x10]; | ||
| 5861 | u8 reserved_0[0x10]; | ||
| 5862 | |||
| 5863 | u8 reserved_1[0x10]; | ||
| 5864 | u8 op_mod[0x10]; | ||
| 5865 | |||
| 5866 | u8 reserved_2[0x40]; | ||
| 5867 | }; | ||
| 5868 | |||
| 5869 | struct mlx5_ifc_alloc_pd_out_bits { | ||
| 5870 | u8 status[0x8]; | ||
| 5871 | u8 reserved_0[0x18]; | ||
| 5872 | |||
| 5873 | u8 syndrome[0x20]; | ||
| 5874 | |||
| 5875 | u8 reserved_1[0x8]; | ||
| 5876 | u8 pd[0x18]; | ||
| 5877 | |||
| 5878 | u8 reserved_2[0x20]; | ||
| 5879 | }; | ||
| 5880 | |||
| 5881 | struct mlx5_ifc_alloc_pd_in_bits { | ||
| 5882 | u8 opcode[0x10]; | ||
| 5883 | u8 reserved_0[0x10]; | ||
| 5884 | |||
| 5885 | u8 reserved_1[0x10]; | ||
| 5886 | u8 op_mod[0x10]; | ||
| 5887 | |||
| 5888 | u8 reserved_2[0x40]; | ||
| 5889 | }; | ||
| 5890 | |||
| 5891 | struct mlx5_ifc_add_vxlan_udp_dport_out_bits { | ||
| 5892 | u8 status[0x8]; | ||
| 5893 | u8 reserved_0[0x18]; | ||
| 5894 | |||
| 5895 | u8 syndrome[0x20]; | ||
| 5896 | |||
| 5897 | u8 reserved_1[0x40]; | ||
| 5898 | }; | ||
| 5899 | |||
| 5900 | struct mlx5_ifc_add_vxlan_udp_dport_in_bits { | ||
| 5901 | u8 opcode[0x10]; | ||
| 5902 | u8 reserved_0[0x10]; | ||
| 5903 | |||
| 5904 | u8 reserved_1[0x10]; | ||
| 5905 | u8 op_mod[0x10]; | ||
| 5906 | |||
| 5907 | u8 reserved_2[0x20]; | ||
| 5908 | |||
| 5909 | u8 reserved_3[0x10]; | ||
| 5910 | u8 vxlan_udp_port[0x10]; | ||
| 5911 | }; | ||
| 5912 | |||
| 5913 | struct mlx5_ifc_access_register_out_bits { | ||
| 5914 | u8 status[0x8]; | ||
| 5915 | u8 reserved_0[0x18]; | ||
| 5916 | |||
| 5917 | u8 syndrome[0x20]; | ||
| 5918 | |||
| 5919 | u8 reserved_1[0x40]; | ||
| 5920 | |||
| 5921 | u8 register_data[0][0x20]; | ||
| 5922 | }; | ||
| 5923 | |||
| 5924 | enum { | ||
| 5925 | MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE = 0x0, | ||
| 5926 | MLX5_ACCESS_REGISTER_IN_OP_MOD_READ = 0x1, | ||
| 5927 | }; | ||
| 5928 | |||
| 5929 | struct mlx5_ifc_access_register_in_bits { | ||
| 5930 | u8 opcode[0x10]; | ||
| 5931 | u8 reserved_0[0x10]; | ||
| 5932 | |||
| 5933 | u8 reserved_1[0x10]; | ||
| 5934 | u8 op_mod[0x10]; | ||
| 5935 | |||
| 5936 | u8 reserved_2[0x10]; | ||
| 5937 | u8 register_id[0x10]; | ||
| 5938 | |||
| 5939 | u8 argument[0x20]; | ||
| 5940 | |||
| 5941 | u8 register_data[0][0x20]; | ||
| 5942 | }; | ||
| 5943 | |||
| 5944 | struct mlx5_ifc_sltp_reg_bits { | ||
| 5945 | u8 status[0x4]; | ||
| 5946 | u8 version[0x4]; | ||
| 5947 | u8 local_port[0x8]; | ||
| 5948 | u8 pnat[0x2]; | ||
| 5949 | u8 reserved_0[0x2]; | ||
| 5950 | u8 lane[0x4]; | ||
| 5951 | u8 reserved_1[0x8]; | ||
| 5952 | |||
| 5953 | u8 reserved_2[0x20]; | ||
| 5954 | |||
| 5955 | u8 reserved_3[0x7]; | ||
| 5956 | u8 polarity[0x1]; | ||
| 5957 | u8 ob_tap0[0x8]; | ||
| 5958 | u8 ob_tap1[0x8]; | ||
| 5959 | u8 ob_tap2[0x8]; | ||
| 5960 | |||
| 5961 | u8 reserved_4[0xc]; | ||
| 5962 | u8 ob_preemp_mode[0x4]; | ||
| 5963 | u8 ob_reg[0x8]; | ||
| 5964 | u8 ob_bias[0x8]; | ||
| 5965 | |||
| 5966 | u8 reserved_5[0x20]; | ||
| 5967 | }; | ||
| 5968 | |||
| 5969 | struct mlx5_ifc_slrg_reg_bits { | ||
| 5970 | u8 status[0x4]; | ||
| 5971 | u8 version[0x4]; | ||
| 5972 | u8 local_port[0x8]; | ||
| 5973 | u8 pnat[0x2]; | ||
| 5974 | u8 reserved_0[0x2]; | ||
| 5975 | u8 lane[0x4]; | ||
| 5976 | u8 reserved_1[0x8]; | ||
| 5977 | |||
| 5978 | u8 time_to_link_up[0x10]; | ||
| 5979 | u8 reserved_2[0xc]; | ||
| 5980 | u8 grade_lane_speed[0x4]; | ||
| 5981 | |||
| 5982 | u8 grade_version[0x8]; | ||
| 5983 | u8 grade[0x18]; | ||
| 5984 | |||
| 5985 | u8 reserved_3[0x4]; | ||
| 5986 | u8 height_grade_type[0x4]; | ||
| 5987 | u8 height_grade[0x18]; | ||
| 5988 | |||
| 5989 | u8 height_dz[0x10]; | ||
| 5990 | u8 height_dv[0x10]; | ||
| 5991 | |||
| 5992 | u8 reserved_4[0x10]; | ||
| 5993 | u8 height_sigma[0x10]; | ||
| 5994 | |||
| 5995 | u8 reserved_5[0x20]; | ||
| 5996 | |||
| 5997 | u8 reserved_6[0x4]; | ||
| 5998 | u8 phase_grade_type[0x4]; | ||
| 5999 | u8 phase_grade[0x18]; | ||
| 6000 | |||
| 6001 | u8 reserved_7[0x8]; | ||
| 6002 | u8 phase_eo_pos[0x8]; | ||
| 6003 | u8 reserved_8[0x8]; | ||
| 6004 | u8 phase_eo_neg[0x8]; | ||
| 6005 | |||
| 6006 | u8 ffe_set_tested[0x10]; | ||
| 6007 | u8 test_errors_per_lane[0x10]; | ||
| 6008 | }; | ||
| 6009 | |||
| 6010 | struct mlx5_ifc_pvlc_reg_bits { | ||
| 6011 | u8 reserved_0[0x8]; | ||
| 6012 | u8 local_port[0x8]; | ||
| 6013 | u8 reserved_1[0x10]; | ||
| 6014 | |||
| 6015 | u8 reserved_2[0x1c]; | ||
| 6016 | u8 vl_hw_cap[0x4]; | ||
| 6017 | |||
| 6018 | u8 reserved_3[0x1c]; | ||
| 6019 | u8 vl_admin[0x4]; | ||
| 6020 | |||
| 6021 | u8 reserved_4[0x1c]; | ||
| 6022 | u8 vl_operational[0x4]; | ||
| 6023 | }; | ||
| 6024 | |||
| 6025 | struct mlx5_ifc_pude_reg_bits { | ||
| 6026 | u8 swid[0x8]; | ||
| 6027 | u8 local_port[0x8]; | ||
| 6028 | u8 reserved_0[0x4]; | ||
| 6029 | u8 admin_status[0x4]; | ||
| 6030 | u8 reserved_1[0x4]; | ||
| 6031 | u8 oper_status[0x4]; | ||
| 6032 | |||
| 6033 | u8 reserved_2[0x60]; | ||
| 6034 | }; | ||
| 6035 | |||
| 6036 | struct mlx5_ifc_ptys_reg_bits { | ||
| 6037 | u8 reserved_0[0x8]; | ||
| 6038 | u8 local_port[0x8]; | ||
| 6039 | u8 reserved_1[0xd]; | ||
| 6040 | u8 proto_mask[0x3]; | ||
| 6041 | |||
| 6042 | u8 reserved_2[0x40]; | ||
| 6043 | |||
| 6044 | u8 eth_proto_capability[0x20]; | ||
| 6045 | |||
| 6046 | u8 ib_link_width_capability[0x10]; | ||
| 6047 | u8 ib_proto_capability[0x10]; | ||
| 6048 | |||
| 6049 | u8 reserved_3[0x20]; | ||
| 6050 | |||
| 6051 | u8 eth_proto_admin[0x20]; | ||
| 6052 | |||
| 6053 | u8 ib_link_width_admin[0x10]; | ||
| 6054 | u8 ib_proto_admin[0x10]; | ||
| 6055 | |||
| 6056 | u8 reserved_4[0x20]; | ||
| 6057 | |||
| 6058 | u8 eth_proto_oper[0x20]; | ||
| 6059 | |||
| 6060 | u8 ib_link_width_oper[0x10]; | ||
| 6061 | u8 ib_proto_oper[0x10]; | ||
| 6062 | |||
| 6063 | u8 reserved_5[0x20]; | ||
| 6064 | |||
| 6065 | u8 eth_proto_lp_advertise[0x20]; | ||
| 6066 | |||
| 6067 | u8 reserved_6[0x60]; | ||
| 6068 | }; | ||
| 6069 | |||
| 6070 | struct mlx5_ifc_ptas_reg_bits { | ||
| 6071 | u8 reserved_0[0x20]; | ||
| 6072 | |||
| 6073 | u8 algorithm_options[0x10]; | ||
| 6074 | u8 reserved_1[0x4]; | ||
| 6075 | u8 repetitions_mode[0x4]; | ||
| 6076 | u8 num_of_repetitions[0x8]; | ||
| 6077 | |||
| 6078 | u8 grade_version[0x8]; | ||
| 6079 | u8 height_grade_type[0x4]; | ||
| 6080 | u8 phase_grade_type[0x4]; | ||
| 6081 | u8 height_grade_weight[0x8]; | ||
| 6082 | u8 phase_grade_weight[0x8]; | ||
| 6083 | |||
| 6084 | u8 gisim_measure_bits[0x10]; | ||
| 6085 | u8 adaptive_tap_measure_bits[0x10]; | ||
| 6086 | |||
| 6087 | u8 ber_bath_high_error_threshold[0x10]; | ||
| 6088 | u8 ber_bath_mid_error_threshold[0x10]; | ||
| 6089 | |||
| 6090 | u8 ber_bath_low_error_threshold[0x10]; | ||
| 6091 | u8 one_ratio_high_threshold[0x10]; | ||
| 6092 | |||
| 6093 | u8 one_ratio_high_mid_threshold[0x10]; | ||
| 6094 | u8 one_ratio_low_mid_threshold[0x10]; | ||
| 6095 | |||
| 6096 | u8 one_ratio_low_threshold[0x10]; | ||
| 6097 | u8 ndeo_error_threshold[0x10]; | ||
| 6098 | |||
| 6099 | u8 mixer_offset_step_size[0x10]; | ||
| 6100 | u8 reserved_2[0x8]; | ||
| 6101 | u8 mix90_phase_for_voltage_bath[0x8]; | ||
| 6102 | |||
| 6103 | u8 mixer_offset_start[0x10]; | ||
| 6104 | u8 mixer_offset_end[0x10]; | ||
| 6105 | |||
| 6106 | u8 reserved_3[0x15]; | ||
| 6107 | u8 ber_test_time[0xb]; | ||
| 6108 | }; | ||
| 6109 | |||
| 6110 | struct mlx5_ifc_pspa_reg_bits { | ||
| 6111 | u8 swid[0x8]; | ||
| 6112 | u8 local_port[0x8]; | ||
| 6113 | u8 sub_port[0x8]; | ||
| 6114 | u8 reserved_0[0x8]; | ||
| 6115 | |||
| 6116 | u8 reserved_1[0x20]; | ||
| 6117 | }; | ||
| 6118 | |||
| 6119 | struct mlx5_ifc_pqdr_reg_bits { | ||
| 6120 | u8 reserved_0[0x8]; | ||
| 6121 | u8 local_port[0x8]; | ||
| 6122 | u8 reserved_1[0x5]; | ||
| 6123 | u8 prio[0x3]; | ||
| 6124 | u8 reserved_2[0x6]; | ||
| 6125 | u8 mode[0x2]; | ||
| 6126 | |||
| 6127 | u8 reserved_3[0x20]; | ||
| 6128 | |||
| 6129 | u8 reserved_4[0x10]; | ||
| 6130 | u8 min_threshold[0x10]; | ||
| 6131 | |||
| 6132 | u8 reserved_5[0x10]; | ||
| 6133 | u8 max_threshold[0x10]; | ||
| 6134 | |||
| 6135 | u8 reserved_6[0x10]; | ||
| 6136 | u8 mark_probability_denominator[0x10]; | ||
| 6137 | |||
| 6138 | u8 reserved_7[0x60]; | ||
| 6139 | }; | ||
| 6140 | |||
| 6141 | struct mlx5_ifc_ppsc_reg_bits { | ||
| 6142 | u8 reserved_0[0x8]; | ||
| 6143 | u8 local_port[0x8]; | ||
| 6144 | u8 reserved_1[0x10]; | ||
| 6145 | |||
| 6146 | u8 reserved_2[0x60]; | ||
| 6147 | |||
| 6148 | u8 reserved_3[0x1c]; | ||
| 6149 | u8 wrps_admin[0x4]; | ||
| 6150 | |||
| 6151 | u8 reserved_4[0x1c]; | ||
| 6152 | u8 wrps_status[0x4]; | ||
| 6153 | |||
| 6154 | u8 reserved_5[0x8]; | ||
| 6155 | u8 up_threshold[0x8]; | ||
| 6156 | u8 reserved_6[0x8]; | ||
| 6157 | u8 down_threshold[0x8]; | ||
| 6158 | |||
| 6159 | u8 reserved_7[0x20]; | ||
| 6160 | |||
| 6161 | u8 reserved_8[0x1c]; | ||
| 6162 | u8 srps_admin[0x4]; | ||
| 6163 | |||
| 6164 | u8 reserved_9[0x1c]; | ||
| 6165 | u8 srps_status[0x4]; | ||
| 6166 | |||
| 6167 | u8 reserved_10[0x40]; | ||
| 6168 | }; | ||
| 6169 | |||
| 6170 | struct mlx5_ifc_pplr_reg_bits { | ||
| 6171 | u8 reserved_0[0x8]; | ||
| 6172 | u8 local_port[0x8]; | ||
| 6173 | u8 reserved_1[0x10]; | ||
| 6174 | |||
| 6175 | u8 reserved_2[0x8]; | ||
| 6176 | u8 lb_cap[0x8]; | ||
| 6177 | u8 reserved_3[0x8]; | ||
| 6178 | u8 lb_en[0x8]; | ||
| 6179 | }; | ||
| 6180 | |||
| 6181 | struct mlx5_ifc_pplm_reg_bits { | ||
| 6182 | u8 reserved_0[0x8]; | ||
| 6183 | u8 local_port[0x8]; | ||
| 6184 | u8 reserved_1[0x10]; | ||
| 6185 | |||
| 6186 | u8 reserved_2[0x20]; | ||
| 6187 | |||
| 6188 | u8 port_profile_mode[0x8]; | ||
| 6189 | u8 static_port_profile[0x8]; | ||
| 6190 | u8 active_port_profile[0x8]; | ||
| 6191 | u8 reserved_3[0x8]; | ||
| 6192 | |||
| 6193 | u8 retransmission_active[0x8]; | ||
| 6194 | u8 fec_mode_active[0x18]; | ||
| 6195 | |||
| 6196 | u8 reserved_4[0x20]; | ||
| 6197 | }; | ||
| 6198 | |||
| 6199 | struct mlx5_ifc_ppcnt_reg_bits { | ||
| 6200 | u8 swid[0x8]; | ||
| 6201 | u8 local_port[0x8]; | ||
| 6202 | u8 pnat[0x2]; | ||
| 6203 | u8 reserved_0[0x8]; | ||
| 6204 | u8 grp[0x6]; | ||
| 6205 | |||
| 6206 | u8 clr[0x1]; | ||
| 6207 | u8 reserved_1[0x1c]; | ||
| 6208 | u8 prio_tc[0x3]; | ||
| 6209 | |||
| 6210 | union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; | ||
| 6211 | }; | ||
| 6212 | |||
| 6213 | struct mlx5_ifc_ppad_reg_bits { | ||
| 6214 | u8 reserved_0[0x3]; | ||
| 6215 | u8 single_mac[0x1]; | ||
| 6216 | u8 reserved_1[0x4]; | ||
| 6217 | u8 local_port[0x8]; | ||
| 6218 | u8 mac_47_32[0x10]; | ||
| 6219 | |||
| 6220 | u8 mac_31_0[0x20]; | ||
| 6221 | |||
| 6222 | u8 reserved_2[0x40]; | ||
| 6223 | }; | ||
| 6224 | |||
| 6225 | struct mlx5_ifc_pmtu_reg_bits { | ||
| 6226 | u8 reserved_0[0x8]; | ||
| 6227 | u8 local_port[0x8]; | ||
| 6228 | u8 reserved_1[0x10]; | ||
| 6229 | |||
| 6230 | u8 max_mtu[0x10]; | ||
| 6231 | u8 reserved_2[0x10]; | ||
| 6232 | |||
| 6233 | u8 admin_mtu[0x10]; | ||
| 6234 | u8 reserved_3[0x10]; | ||
| 6235 | |||
| 6236 | u8 oper_mtu[0x10]; | ||
| 6237 | u8 reserved_4[0x10]; | ||
| 6238 | }; | ||
| 6239 | |||
| 6240 | struct mlx5_ifc_pmpr_reg_bits { | ||
| 6241 | u8 reserved_0[0x8]; | ||
| 6242 | u8 module[0x8]; | ||
| 6243 | u8 reserved_1[0x10]; | ||
| 6244 | |||
| 6245 | u8 reserved_2[0x18]; | ||
| 6246 | u8 attenuation_5g[0x8]; | ||
| 6247 | |||
| 6248 | u8 reserved_3[0x18]; | ||
| 6249 | u8 attenuation_7g[0x8]; | ||
| 6250 | |||
| 6251 | u8 reserved_4[0x18]; | ||
| 6252 | u8 attenuation_12g[0x8]; | ||
| 6253 | }; | ||
| 6254 | |||
| 6255 | struct mlx5_ifc_pmpe_reg_bits { | ||
| 6256 | u8 reserved_0[0x8]; | ||
| 6257 | u8 module[0x8]; | ||
| 6258 | u8 reserved_1[0xc]; | ||
| 6259 | u8 module_status[0x4]; | ||
| 6260 | |||
| 6261 | u8 reserved_2[0x60]; | ||
| 6262 | }; | ||
| 6263 | |||
| 6264 | struct mlx5_ifc_pmpc_reg_bits { | ||
| 6265 | u8 module_state_updated[32][0x8]; | ||
| 6266 | }; | ||
| 6267 | |||
| 6268 | struct mlx5_ifc_pmlpn_reg_bits { | ||
| 6269 | u8 reserved_0[0x4]; | ||
| 6270 | u8 mlpn_status[0x4]; | ||
| 6271 | u8 local_port[0x8]; | ||
| 6272 | u8 reserved_1[0x10]; | ||
| 6273 | |||
| 6274 | u8 e[0x1]; | ||
| 6275 | u8 reserved_2[0x1f]; | ||
| 6276 | }; | ||
| 6277 | |||
| 6278 | struct mlx5_ifc_pmlp_reg_bits { | ||
| 6279 | u8 rxtx[0x1]; | ||
| 6280 | u8 reserved_0[0x7]; | ||
| 6281 | u8 local_port[0x8]; | ||
| 6282 | u8 reserved_1[0x8]; | ||
| 6283 | u8 width[0x8]; | ||
| 6284 | |||
| 6285 | u8 lane0_module_mapping[0x20]; | ||
| 6286 | |||
| 6287 | u8 lane1_module_mapping[0x20]; | ||
| 6288 | |||
| 6289 | u8 lane2_module_mapping[0x20]; | ||
| 6290 | |||
| 6291 | u8 lane3_module_mapping[0x20]; | ||
| 6292 | |||
| 6293 | u8 reserved_2[0x160]; | ||
| 6294 | }; | ||
| 6295 | |||
| 6296 | struct mlx5_ifc_pmaos_reg_bits { | ||
| 6297 | u8 reserved_0[0x8]; | ||
| 6298 | u8 module[0x8]; | ||
| 6299 | u8 reserved_1[0x4]; | ||
| 6300 | u8 admin_status[0x4]; | ||
| 6301 | u8 reserved_2[0x4]; | ||
| 6302 | u8 oper_status[0x4]; | ||
| 6303 | |||
| 6304 | u8 ase[0x1]; | ||
| 6305 | u8 ee[0x1]; | ||
| 6306 | u8 reserved_3[0x1c]; | ||
| 6307 | u8 e[0x2]; | ||
| 6308 | |||
| 6309 | u8 reserved_4[0x40]; | ||
| 6310 | }; | ||
| 6311 | |||
| 6312 | struct mlx5_ifc_plpc_reg_bits { | ||
| 6313 | u8 reserved_0[0x4]; | ||
| 6314 | u8 profile_id[0xc]; | ||
| 6315 | u8 reserved_1[0x4]; | ||
| 6316 | u8 proto_mask[0x4]; | ||
| 6317 | u8 reserved_2[0x8]; | ||
| 6318 | |||
| 6319 | u8 reserved_3[0x10]; | ||
| 6320 | u8 lane_speed[0x10]; | ||
| 6321 | |||
| 6322 | u8 reserved_4[0x17]; | ||
| 6323 | u8 lpbf[0x1]; | ||
| 6324 | u8 fec_mode_policy[0x8]; | ||
| 6325 | |||
| 6326 | u8 retransmission_capability[0x8]; | ||
| 6327 | u8 fec_mode_capability[0x18]; | ||
| 6328 | |||
| 6329 | u8 retransmission_support_admin[0x8]; | ||
| 6330 | u8 fec_mode_support_admin[0x18]; | ||
| 6331 | |||
| 6332 | u8 retransmission_request_admin[0x8]; | ||
| 6333 | u8 fec_mode_request_admin[0x18]; | ||
| 6334 | |||
| 6335 | u8 reserved_5[0x80]; | ||
| 6336 | }; | ||
| 6337 | |||
| 6338 | struct mlx5_ifc_plib_reg_bits { | ||
| 6339 | u8 reserved_0[0x8]; | ||
| 6340 | u8 local_port[0x8]; | ||
| 6341 | u8 reserved_1[0x8]; | ||
| 6342 | u8 ib_port[0x8]; | ||
| 6343 | |||
| 6344 | u8 reserved_2[0x60]; | ||
| 6345 | }; | ||
| 6346 | |||
| 6347 | struct mlx5_ifc_plbf_reg_bits { | ||
| 6348 | u8 reserved_0[0x8]; | ||
| 6349 | u8 local_port[0x8]; | ||
| 6350 | u8 reserved_1[0xd]; | ||
| 6351 | u8 lbf_mode[0x3]; | ||
| 6352 | |||
| 6353 | u8 reserved_2[0x20]; | ||
| 6354 | }; | ||
| 6355 | |||
| 6356 | struct mlx5_ifc_pipg_reg_bits { | ||
| 6357 | u8 reserved_0[0x8]; | ||
| 6358 | u8 local_port[0x8]; | ||
| 6359 | u8 reserved_1[0x10]; | ||
| 6360 | |||
| 6361 | u8 dic[0x1]; | ||
| 6362 | u8 reserved_2[0x19]; | ||
| 6363 | u8 ipg[0x4]; | ||
| 6364 | u8 reserved_3[0x2]; | ||
| 6365 | }; | ||
| 6366 | |||
| 6367 | struct mlx5_ifc_pifr_reg_bits { | ||
| 6368 | u8 reserved_0[0x8]; | ||
| 6369 | u8 local_port[0x8]; | ||
| 6370 | u8 reserved_1[0x10]; | ||
| 6371 | |||
| 6372 | u8 reserved_2[0xe0]; | ||
| 6373 | |||
| 6374 | u8 port_filter[8][0x20]; | ||
| 6375 | |||
| 6376 | u8 port_filter_update_en[8][0x20]; | ||
| 6377 | }; | ||
| 6378 | |||
| 6379 | struct mlx5_ifc_pfcc_reg_bits { | ||
| 6380 | u8 reserved_0[0x8]; | ||
| 6381 | u8 local_port[0x8]; | ||
| 6382 | u8 reserved_1[0x10]; | ||
| 6383 | |||
| 6384 | u8 ppan[0x4]; | ||
| 6385 | u8 reserved_2[0x4]; | ||
| 6386 | u8 prio_mask_tx[0x8]; | ||
| 6387 | u8 reserved_3[0x8]; | ||
| 6388 | u8 prio_mask_rx[0x8]; | ||
| 6389 | |||
| 6390 | u8 pptx[0x1]; | ||
| 6391 | u8 aptx[0x1]; | ||
| 6392 | u8 reserved_4[0x6]; | ||
| 6393 | u8 pfctx[0x8]; | ||
| 6394 | u8 reserved_5[0x10]; | ||
| 6395 | |||
| 6396 | u8 pprx[0x1]; | ||
| 6397 | u8 aprx[0x1]; | ||
| 6398 | u8 reserved_6[0x6]; | ||
| 6399 | u8 pfcrx[0x8]; | ||
| 6400 | u8 reserved_7[0x10]; | ||
| 6401 | |||
| 6402 | u8 reserved_8[0x80]; | ||
| 6403 | }; | ||
| 6404 | |||
| 6405 | struct mlx5_ifc_pelc_reg_bits { | ||
| 6406 | u8 op[0x4]; | ||
| 6407 | u8 reserved_0[0x4]; | ||
| 6408 | u8 local_port[0x8]; | ||
| 6409 | u8 reserved_1[0x10]; | ||
| 6410 | |||
| 6411 | u8 op_admin[0x8]; | ||
| 6412 | u8 op_capability[0x8]; | ||
| 6413 | u8 op_request[0x8]; | ||
| 6414 | u8 op_active[0x8]; | ||
| 6415 | |||
| 6416 | u8 admin[0x40]; | ||
| 6417 | |||
| 6418 | u8 capability[0x40]; | ||
| 6419 | |||
| 6420 | u8 request[0x40]; | ||
| 6421 | |||
| 6422 | u8 active[0x40]; | ||
| 6423 | |||
| 6424 | u8 reserved_2[0x80]; | ||
| 6425 | }; | ||
| 6426 | |||
| 6427 | struct mlx5_ifc_peir_reg_bits { | ||
| 6428 | u8 reserved_0[0x8]; | ||
| 6429 | u8 local_port[0x8]; | ||
| 6430 | u8 reserved_1[0x10]; | ||
| 6431 | |||
| 6432 | u8 reserved_2[0xc]; | ||
| 6433 | u8 error_count[0x4]; | ||
| 6434 | u8 reserved_3[0x10]; | ||
| 6435 | |||
| 6436 | u8 reserved_4[0xc]; | ||
| 6437 | u8 lane[0x4]; | ||
| 6438 | u8 reserved_5[0x8]; | ||
| 6439 | u8 error_type[0x8]; | ||
| 6440 | }; | ||
| 6441 | |||
| 6442 | struct mlx5_ifc_pcap_reg_bits { | ||
| 6443 | u8 reserved_0[0x8]; | ||
| 6444 | u8 local_port[0x8]; | ||
| 6445 | u8 reserved_1[0x10]; | ||
| 6446 | |||
| 6447 | u8 port_capability_mask[4][0x20]; | ||
| 6448 | }; | ||
| 6449 | |||
| 6450 | struct mlx5_ifc_paos_reg_bits { | ||
| 6451 | u8 swid[0x8]; | ||
| 6452 | u8 local_port[0x8]; | ||
| 6453 | u8 reserved_0[0x4]; | ||
| 6454 | u8 admin_status[0x4]; | ||
| 6455 | u8 reserved_1[0x4]; | ||
| 6456 | u8 oper_status[0x4]; | ||
| 6457 | |||
| 6458 | u8 ase[0x1]; | ||
| 6459 | u8 ee[0x1]; | ||
| 6460 | u8 reserved_2[0x1c]; | ||
| 6461 | u8 e[0x2]; | ||
| 6462 | |||
| 6463 | u8 reserved_3[0x40]; | ||
| 6464 | }; | ||
| 6465 | |||
| 6466 | struct mlx5_ifc_pamp_reg_bits { | ||
| 6467 | u8 reserved_0[0x8]; | ||
| 6468 | u8 opamp_group[0x8]; | ||
| 6469 | u8 reserved_1[0xc]; | ||
| 6470 | u8 opamp_group_type[0x4]; | ||
| 6471 | |||
| 6472 | u8 start_index[0x10]; | ||
| 6473 | u8 reserved_2[0x4]; | ||
| 6474 | u8 num_of_indices[0xc]; | ||
| 6475 | |||
| 6476 | u8 index_data[18][0x10]; | ||
| 6477 | }; | ||
| 6478 | |||
| 6479 | struct mlx5_ifc_lane_2_module_mapping_bits { | ||
| 6480 | u8 reserved_0[0x6]; | ||
| 6481 | u8 rx_lane[0x2]; | ||
| 6482 | u8 reserved_1[0x6]; | ||
| 6483 | u8 tx_lane[0x2]; | ||
| 6484 | u8 reserved_2[0x8]; | ||
| 6485 | u8 module[0x8]; | ||
| 6486 | }; | ||
| 6487 | |||
| 6488 | struct mlx5_ifc_bufferx_reg_bits { | ||
| 6489 | u8 reserved_0[0x6]; | ||
| 6490 | u8 lossy[0x1]; | ||
| 6491 | u8 epsb[0x1]; | ||
| 6492 | u8 reserved_1[0xc]; | ||
| 6493 | u8 size[0xc]; | ||
| 6494 | |||
| 6495 | u8 xoff_threshold[0x10]; | ||
| 6496 | u8 xon_threshold[0x10]; | ||
| 6497 | }; | ||
| 6498 | |||
| 6499 | struct mlx5_ifc_set_node_in_bits { | ||
| 6500 | u8 node_description[64][0x8]; | ||
| 6501 | }; | ||
| 6502 | |||
| 6503 | struct mlx5_ifc_register_power_settings_bits { | ||
| 6504 | u8 reserved_0[0x18]; | ||
| 6505 | u8 power_settings_level[0x8]; | ||
| 6506 | |||
| 6507 | u8 reserved_1[0x60]; | ||
| 6508 | }; | ||
| 6509 | |||
| 6510 | struct mlx5_ifc_register_host_endianness_bits { | ||
| 6511 | u8 he[0x1]; | ||
| 6512 | u8 reserved_0[0x1f]; | ||
| 6513 | |||
| 6514 | u8 reserved_1[0x60]; | ||
| 6515 | }; | ||
| 6516 | |||
| 6517 | struct mlx5_ifc_umr_pointer_desc_argument_bits { | ||
| 6518 | u8 reserved_0[0x20]; | ||
| 6519 | |||
| 6520 | u8 mkey[0x20]; | ||
| 6521 | |||
| 6522 | u8 addressh_63_32[0x20]; | ||
| 6523 | |||
| 6524 | u8 addressl_31_0[0x20]; | ||
| 6525 | }; | ||
| 6526 | |||
| 6527 | struct mlx5_ifc_ud_adrs_vector_bits { | ||
| 6528 | u8 dc_key[0x40]; | ||
| 6529 | |||
| 6530 | u8 ext[0x1]; | ||
| 6531 | u8 reserved_0[0x7]; | ||
| 6532 | u8 destination_qp_dct[0x18]; | ||
| 6533 | |||
| 6534 | u8 static_rate[0x4]; | ||
| 6535 | u8 sl_eth_prio[0x4]; | ||
| 6536 | u8 fl[0x1]; | ||
| 6537 | u8 mlid[0x7]; | ||
| 6538 | u8 rlid_udp_sport[0x10]; | ||
| 6539 | |||
| 6540 | u8 reserved_1[0x20]; | ||
| 6541 | |||
| 6542 | u8 rmac_47_16[0x20]; | ||
| 6543 | |||
| 6544 | u8 rmac_15_0[0x10]; | ||
| 6545 | u8 tclass[0x8]; | ||
| 6546 | u8 hop_limit[0x8]; | ||
| 6547 | |||
| 6548 | u8 reserved_2[0x1]; | ||
| 6549 | u8 grh[0x1]; | ||
| 6550 | u8 reserved_3[0x2]; | ||
| 6551 | u8 src_addr_index[0x8]; | ||
| 6552 | u8 flow_label[0x14]; | ||
| 6553 | |||
| 6554 | u8 rgid_rip[16][0x8]; | ||
| 6555 | }; | ||
| 6556 | |||
| 6557 | struct mlx5_ifc_pages_req_event_bits { | ||
| 6558 | u8 reserved_0[0x10]; | ||
| 6559 | u8 function_id[0x10]; | ||
| 6560 | |||
| 6561 | u8 num_pages[0x20]; | ||
| 6562 | |||
| 6563 | u8 reserved_1[0xa0]; | ||
| 6564 | }; | ||
| 6565 | |||
| 6566 | struct mlx5_ifc_eqe_bits { | ||
| 6567 | u8 reserved_0[0x8]; | ||
| 6568 | u8 event_type[0x8]; | ||
| 6569 | u8 reserved_1[0x8]; | ||
| 6570 | u8 event_sub_type[0x8]; | ||
| 6571 | |||
| 6572 | u8 reserved_2[0xe0]; | ||
| 6573 | |||
| 6574 | union mlx5_ifc_event_auto_bits event_data; | ||
| 6575 | |||
| 6576 | u8 reserved_3[0x10]; | ||
| 6577 | u8 signature[0x8]; | ||
| 6578 | u8 reserved_4[0x7]; | ||
| 6579 | u8 owner[0x1]; | ||
| 6580 | }; | ||
| 6581 | |||
| 6582 | enum { | ||
| 6583 | MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT = 0x7, | ||
| 6584 | }; | ||
| 6585 | |||
| 6586 | struct mlx5_ifc_cmd_queue_entry_bits { | ||
| 6587 | u8 type[0x8]; | ||
| 6588 | u8 reserved_0[0x18]; | ||
| 6589 | |||
| 6590 | u8 input_length[0x20]; | ||
| 6591 | |||
| 6592 | u8 input_mailbox_pointer_63_32[0x20]; | ||
| 6593 | |||
| 6594 | u8 input_mailbox_pointer_31_9[0x17]; | ||
| 6595 | u8 reserved_1[0x9]; | ||
| 6596 | |||
| 6597 | u8 command_input_inline_data[16][0x8]; | ||
| 6598 | |||
| 6599 | u8 command_output_inline_data[16][0x8]; | ||
| 6600 | |||
| 6601 | u8 output_mailbox_pointer_63_32[0x20]; | ||
| 6602 | |||
| 6603 | u8 output_mailbox_pointer_31_9[0x17]; | ||
| 6604 | u8 reserved_2[0x9]; | ||
| 6605 | |||
| 6606 | u8 output_length[0x20]; | ||
| 6607 | |||
| 6608 | u8 token[0x8]; | ||
| 6609 | u8 signature[0x8]; | ||
| 6610 | u8 reserved_3[0x8]; | ||
| 6611 | u8 status[0x7]; | ||
| 6612 | u8 ownership[0x1]; | ||
| 6613 | }; | ||
| 6614 | |||
| 6615 | struct mlx5_ifc_cmd_out_bits { | ||
| 6616 | u8 status[0x8]; | ||
| 6617 | u8 reserved_0[0x18]; | ||
| 6618 | |||
| 6619 | u8 syndrome[0x20]; | ||
| 6620 | |||
| 6621 | u8 command_output[0x20]; | ||
| 6622 | }; | ||
| 6623 | |||
| 6624 | struct mlx5_ifc_cmd_in_bits { | ||
| 6625 | u8 opcode[0x10]; | ||
| 6626 | u8 reserved_0[0x10]; | ||
| 6627 | |||
| 6628 | u8 reserved_1[0x10]; | ||
| 6629 | u8 op_mod[0x10]; | ||
| 6630 | |||
| 6631 | u8 command[0][0x20]; | ||
| 6632 | }; | ||
| 6633 | |||
| 6634 | struct mlx5_ifc_cmd_if_box_bits { | ||
| 6635 | u8 mailbox_data[512][0x8]; | ||
| 6636 | |||
| 6637 | u8 reserved_0[0x180]; | ||
| 6638 | |||
| 6639 | u8 next_pointer_63_32[0x20]; | ||
| 6640 | |||
| 6641 | u8 next_pointer_31_10[0x16]; | ||
| 6642 | u8 reserved_1[0xa]; | ||
| 6643 | |||
| 6644 | u8 block_number[0x20]; | ||
| 6645 | |||
| 6646 | u8 reserved_2[0x8]; | ||
| 6647 | u8 token[0x8]; | ||
| 6648 | u8 ctrl_signature[0x8]; | ||
| 6649 | u8 signature[0x8]; | ||
| 6650 | }; | ||
| 6651 | |||
| 6652 | struct mlx5_ifc_mtt_bits { | ||
| 6653 | u8 ptag_63_32[0x20]; | ||
| 6654 | |||
| 6655 | u8 ptag_31_8[0x18]; | ||
| 6656 | u8 reserved_0[0x6]; | ||
| 6657 | u8 wr_en[0x1]; | ||
| 6658 | u8 rd_en[0x1]; | ||
| 6659 | }; | ||
| 6660 | |||
| 6661 | enum { | ||
| 6662 | MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0, | ||
| 6663 | MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1, | ||
| 6664 | MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2, | ||
| 6665 | }; | ||
| 6666 | |||
| 6667 | enum { | ||
| 6668 | MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER = 0x0, | ||
| 6669 | MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED = 0x1, | ||
| 6670 | MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC = 0x2, | ||
| 6671 | }; | ||
| 6672 | |||
| 6673 | enum { | ||
| 6674 | MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR = 0x1, | ||
| 6675 | MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC = 0x7, | ||
| 6676 | MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR = 0x8, | ||
| 6677 | MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR = 0x9, | ||
| 6678 | MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR = 0xa, | ||
| 6679 | MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR = 0xb, | ||
| 6680 | MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN = 0xc, | ||
| 6681 | MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR = 0xd, | ||
| 6682 | MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe, | ||
| 6683 | MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf, | ||
| 6684 | MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10, | ||
| 6685 | }; | ||
| 6686 | |||
| 6687 | struct mlx5_ifc_initial_seg_bits { | ||
| 6688 | u8 fw_rev_minor[0x10]; | ||
| 6689 | u8 fw_rev_major[0x10]; | ||
| 6690 | |||
| 6691 | u8 cmd_interface_rev[0x10]; | ||
| 6692 | u8 fw_rev_subminor[0x10]; | ||
| 6693 | |||
| 6694 | u8 reserved_0[0x40]; | ||
| 6695 | |||
| 6696 | u8 cmdq_phy_addr_63_32[0x20]; | ||
| 6697 | |||
| 6698 | u8 cmdq_phy_addr_31_12[0x14]; | ||
| 6699 | u8 reserved_1[0x2]; | ||
| 6700 | u8 nic_interface[0x2]; | ||
| 6701 | u8 log_cmdq_size[0x4]; | ||
| 6702 | u8 log_cmdq_stride[0x4]; | ||
| 6703 | |||
| 6704 | u8 command_doorbell_vector[0x20]; | ||
| 6705 | |||
| 6706 | u8 reserved_2[0xf00]; | ||
| 6707 | |||
| 6708 | u8 initializing[0x1]; | ||
| 6709 | u8 reserved_3[0x4]; | ||
| 6710 | u8 nic_interface_supported[0x3]; | ||
| 6711 | u8 reserved_4[0x18]; | ||
| 6712 | |||
| 6713 | struct mlx5_ifc_health_buffer_bits health_buffer; | ||
| 6714 | |||
| 6715 | u8 no_dram_nic_offset[0x20]; | ||
| 6716 | |||
| 6717 | u8 reserved_5[0x6e40]; | ||
| 6718 | |||
| 6719 | u8 reserved_6[0x1f]; | ||
| 6720 | u8 clear_int[0x1]; | ||
| 6721 | |||
| 6722 | u8 health_syndrome[0x8]; | ||
| 6723 | u8 health_counter[0x18]; | ||
| 6724 | |||
| 6725 | u8 reserved_7[0x17fc0]; | ||
| 6726 | }; | ||
| 6727 | |||
| 6728 | union mlx5_ifc_ports_control_registers_document_bits { | ||
| 6729 | struct mlx5_ifc_bufferx_reg_bits bufferx_reg; | ||
| 6730 | struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; | ||
| 6731 | struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout; | ||
| 6732 | struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; | ||
| 6733 | struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; | ||
| 6734 | struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; | ||
| 6735 | struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; | ||
| 6736 | struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; | ||
| 6737 | struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping; | ||
| 6738 | struct mlx5_ifc_pamp_reg_bits pamp_reg; | ||
| 6739 | struct mlx5_ifc_paos_reg_bits paos_reg; | ||
| 6740 | struct mlx5_ifc_pcap_reg_bits pcap_reg; | ||
| 6741 | struct mlx5_ifc_peir_reg_bits peir_reg; | ||
| 6742 | struct mlx5_ifc_pelc_reg_bits pelc_reg; | ||
| 6743 | struct mlx5_ifc_pfcc_reg_bits pfcc_reg; | ||
| 6744 | struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; | ||
| 6745 | struct mlx5_ifc_pifr_reg_bits pifr_reg; | ||
| 6746 | struct mlx5_ifc_pipg_reg_bits pipg_reg; | ||
| 6747 | struct mlx5_ifc_plbf_reg_bits plbf_reg; | ||
| 6748 | struct mlx5_ifc_plib_reg_bits plib_reg; | ||
| 6749 | struct mlx5_ifc_plpc_reg_bits plpc_reg; | ||
| 6750 | struct mlx5_ifc_pmaos_reg_bits pmaos_reg; | ||
| 6751 | struct mlx5_ifc_pmlp_reg_bits pmlp_reg; | ||
| 6752 | struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg; | ||
| 6753 | struct mlx5_ifc_pmpc_reg_bits pmpc_reg; | ||
| 6754 | struct mlx5_ifc_pmpe_reg_bits pmpe_reg; | ||
| 6755 | struct mlx5_ifc_pmpr_reg_bits pmpr_reg; | ||
| 6756 | struct mlx5_ifc_pmtu_reg_bits pmtu_reg; | ||
| 6757 | struct mlx5_ifc_ppad_reg_bits ppad_reg; | ||
| 6758 | struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; | ||
| 6759 | struct mlx5_ifc_pplm_reg_bits pplm_reg; | ||
| 6760 | struct mlx5_ifc_pplr_reg_bits pplr_reg; | ||
| 6761 | struct mlx5_ifc_ppsc_reg_bits ppsc_reg; | ||
| 6762 | struct mlx5_ifc_pqdr_reg_bits pqdr_reg; | ||
| 6763 | struct mlx5_ifc_pspa_reg_bits pspa_reg; | ||
| 6764 | struct mlx5_ifc_ptas_reg_bits ptas_reg; | ||
| 6765 | struct mlx5_ifc_ptys_reg_bits ptys_reg; | ||
| 6766 | struct mlx5_ifc_pude_reg_bits pude_reg; | ||
| 6767 | struct mlx5_ifc_pvlc_reg_bits pvlc_reg; | ||
| 6768 | struct mlx5_ifc_slrg_reg_bits slrg_reg; | ||
| 6769 | struct mlx5_ifc_sltp_reg_bits sltp_reg; | ||
| 6770 | u8 reserved_0[0x60e0]; | ||
| 6771 | }; | ||
| 6772 | |||
| 6773 | union mlx5_ifc_debug_enhancements_document_bits { | ||
| 6774 | struct mlx5_ifc_health_buffer_bits health_buffer; | ||
| 6775 | u8 reserved_0[0x200]; | ||
| 6776 | }; | ||
| 6777 | |||
| 6778 | union mlx5_ifc_uplink_pci_interface_document_bits { | ||
| 6779 | struct mlx5_ifc_initial_seg_bits initial_seg; | ||
| 6780 | u8 reserved_0[0x20060]; | ||
| 347 | }; | 6781 | }; |
| 348 | 6782 | ||
| 349 | #endif /* MLX5_IFC_H */ | 6783 | #endif /* MLX5_IFC_H */ |
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 310b5f7fd6ae..f079fb1a31f7 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h | |||
| @@ -134,13 +134,21 @@ enum { | |||
| 134 | 134 | ||
| 135 | enum { | 135 | enum { |
| 136 | MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2, | 136 | MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2, |
| 137 | MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2, | ||
| 137 | MLX5_WQE_CTRL_SOLICITED = 1 << 1, | 138 | MLX5_WQE_CTRL_SOLICITED = 1 << 1, |
| 138 | }; | 139 | }; |
| 139 | 140 | ||
| 140 | enum { | 141 | enum { |
| 142 | MLX5_SEND_WQE_DS = 16, | ||
| 141 | MLX5_SEND_WQE_BB = 64, | 143 | MLX5_SEND_WQE_BB = 64, |
| 142 | }; | 144 | }; |
| 143 | 145 | ||
| 146 | #define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS) | ||
| 147 | |||
| 148 | enum { | ||
| 149 | MLX5_SEND_WQE_MAX_WQEBBS = 16, | ||
| 150 | }; | ||
| 151 | |||
| 144 | enum { | 152 | enum { |
| 145 | MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27, | 153 | MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27, |
| 146 | MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, | 154 | MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, |
| @@ -200,6 +208,23 @@ struct mlx5_wqe_ctrl_seg { | |||
| 200 | #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 | 208 | #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 |
| 201 | #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 | 209 | #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 |
| 202 | 210 | ||
| 211 | enum { | ||
| 212 | MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4, | ||
| 213 | MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5, | ||
| 214 | MLX5_ETH_WQE_L3_CSUM = 1 << 6, | ||
| 215 | MLX5_ETH_WQE_L4_CSUM = 1 << 7, | ||
| 216 | }; | ||
| 217 | |||
| 218 | struct mlx5_wqe_eth_seg { | ||
| 219 | u8 rsvd0[4]; | ||
| 220 | u8 cs_flags; | ||
| 221 | u8 rsvd1; | ||
| 222 | __be16 mss; | ||
| 223 | __be32 rsvd2; | ||
| 224 | __be16 inline_hdr_sz; | ||
| 225 | u8 inline_hdr_start[2]; | ||
| 226 | }; | ||
| 227 | |||
| 203 | struct mlx5_wqe_xrc_seg { | 228 | struct mlx5_wqe_xrc_seg { |
| 204 | __be32 xrc_srqn; | 229 | __be32 xrc_srqn; |
| 205 | u8 rsvd[12]; | 230 | u8 rsvd[12]; |
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h new file mode 100644 index 000000000000..967e0fd06e89 --- /dev/null +++ b/include/linux/mlx5/vport.h | |||
| @@ -0,0 +1,55 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. | ||
| 3 | * | ||
| 4 | * This software is available to you under a choice of one of two | ||
| 5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 6 | * General Public License (GPL) Version 2, available from the file | ||
| 7 | * COPYING in the main directory of this source tree, or the | ||
| 8 | * OpenIB.org BSD license below: | ||
| 9 | * | ||
| 10 | * Redistribution and use in source and binary forms, with or | ||
| 11 | * without modification, are permitted provided that the following | ||
| 12 | * conditions are met: | ||
| 13 | * | ||
| 14 | * - Redistributions of source code must retain the above | ||
| 15 | * copyright notice, this list of conditions and the following | ||
| 16 | * disclaimer. | ||
| 17 | * | ||
| 18 | * - Redistributions in binary form must reproduce the above | ||
| 19 | * copyright notice, this list of conditions and the following | ||
| 20 | * disclaimer in the documentation and/or other materials | ||
| 21 | * provided with the distribution. | ||
| 22 | * | ||
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 30 | * SOFTWARE. | ||
| 31 | */ | ||
| 32 | |||
| 33 | #ifndef __MLX5_VPORT_H__ | ||
| 34 | #define __MLX5_VPORT_H__ | ||
| 35 | |||
| 36 | #include <linux/mlx5/driver.h> | ||
| 37 | |||
| 38 | u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod); | ||
| 39 | void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr); | ||
| 40 | int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, | ||
| 41 | u8 port_num, u16 vf_num, u16 gid_index, | ||
| 42 | union ib_gid *gid); | ||
| 43 | int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport, | ||
| 44 | u8 port_num, u16 vf_num, u16 pkey_index, | ||
| 45 | u16 *pkey); | ||
| 46 | int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, | ||
| 47 | u8 other_vport, u8 port_num, | ||
| 48 | u16 vf_num, | ||
| 49 | struct mlx5_hca_vport_context *rep); | ||
| 50 | int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, | ||
| 51 | u64 *sys_image_guid); | ||
| 52 | int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, | ||
| 53 | u64 *node_guid); | ||
| 54 | |||
| 55 | #endif /* __MLX5_VPORT_H__ */ | ||
diff --git a/include/linux/mm-arch-hooks.h b/include/linux/mm-arch-hooks.h new file mode 100644 index 000000000000..4efc3f56e6df --- /dev/null +++ b/include/linux/mm-arch-hooks.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | /* | ||
| 2 | * Generic mm no-op hooks. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2015, IBM Corporation | ||
| 5 | * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | */ | ||
| 11 | #ifndef _LINUX_MM_ARCH_HOOKS_H | ||
| 12 | #define _LINUX_MM_ARCH_HOOKS_H | ||
| 13 | |||
| 14 | #include <asm/mm-arch-hooks.h> | ||
| 15 | |||
| 16 | #ifndef arch_remap | ||
| 17 | static inline void arch_remap(struct mm_struct *mm, | ||
| 18 | unsigned long old_start, unsigned long old_end, | ||
| 19 | unsigned long new_start, unsigned long new_end) | ||
| 20 | { | ||
| 21 | } | ||
| 22 | #define arch_remap arch_remap | ||
| 23 | #endif | ||
| 24 | |||
| 25 | #endif /* _LINUX_MM_ARCH_HOOKS_H */ | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index 0755b9fd03a7..2e872f92dbac 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -27,6 +27,7 @@ struct anon_vma_chain; | |||
| 27 | struct file_ra_state; | 27 | struct file_ra_state; |
| 28 | struct user_struct; | 28 | struct user_struct; |
| 29 | struct writeback_control; | 29 | struct writeback_control; |
| 30 | struct bdi_writeback; | ||
| 30 | 31 | ||
| 31 | #ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ | 32 | #ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ |
| 32 | extern unsigned long max_mapnr; | 33 | extern unsigned long max_mapnr; |
| @@ -499,7 +500,7 @@ static inline int page_count(struct page *page) | |||
| 499 | 500 | ||
| 500 | static inline bool __compound_tail_refcounted(struct page *page) | 501 | static inline bool __compound_tail_refcounted(struct page *page) |
| 501 | { | 502 | { |
| 502 | return !PageSlab(page) && !PageHeadHuge(page); | 503 | return PageAnon(page) && !PageSlab(page) && !PageHeadHuge(page); |
| 503 | } | 504 | } |
| 504 | 505 | ||
| 505 | /* | 506 | /* |
| @@ -1211,10 +1212,13 @@ int __set_page_dirty_nobuffers(struct page *page); | |||
| 1211 | int __set_page_dirty_no_writeback(struct page *page); | 1212 | int __set_page_dirty_no_writeback(struct page *page); |
| 1212 | int redirty_page_for_writepage(struct writeback_control *wbc, | 1213 | int redirty_page_for_writepage(struct writeback_control *wbc, |
| 1213 | struct page *page); | 1214 | struct page *page); |
| 1214 | void account_page_dirtied(struct page *page, struct address_space *mapping); | 1215 | void account_page_dirtied(struct page *page, struct address_space *mapping, |
| 1215 | void account_page_cleaned(struct page *page, struct address_space *mapping); | 1216 | struct mem_cgroup *memcg); |
| 1217 | void account_page_cleaned(struct page *page, struct address_space *mapping, | ||
| 1218 | struct mem_cgroup *memcg, struct bdi_writeback *wb); | ||
| 1216 | int set_page_dirty(struct page *page); | 1219 | int set_page_dirty(struct page *page); |
| 1217 | int set_page_dirty_lock(struct page *page); | 1220 | int set_page_dirty_lock(struct page *page); |
| 1221 | void cancel_dirty_page(struct page *page); | ||
| 1218 | int clear_page_dirty_for_io(struct page *page); | 1222 | int clear_page_dirty_for_io(struct page *page); |
| 1219 | 1223 | ||
| 1220 | int get_cmdline(struct task_struct *task, char *buffer, int buflen); | 1224 | int get_cmdline(struct task_struct *task, char *buffer, int buflen); |
| @@ -1631,6 +1635,8 @@ extern void free_highmem_page(struct page *page); | |||
| 1631 | extern void adjust_managed_page_count(struct page *page, long count); | 1635 | extern void adjust_managed_page_count(struct page *page, long count); |
| 1632 | extern void mem_init_print_info(const char *str); | 1636 | extern void mem_init_print_info(const char *str); |
| 1633 | 1637 | ||
| 1638 | extern void reserve_bootmem_region(unsigned long start, unsigned long end); | ||
| 1639 | |||
| 1634 | /* Free the reserved page into the buddy system, so it gets managed. */ | 1640 | /* Free the reserved page into the buddy system, so it gets managed. */ |
| 1635 | static inline void __free_reserved_page(struct page *page) | 1641 | static inline void __free_reserved_page(struct page *page) |
| 1636 | { | 1642 | { |
| @@ -1720,7 +1726,8 @@ extern void sparse_memory_present_with_active_regions(int nid); | |||
| 1720 | 1726 | ||
| 1721 | #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ | 1727 | #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ |
| 1722 | !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) | 1728 | !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) |
| 1723 | static inline int __early_pfn_to_nid(unsigned long pfn) | 1729 | static inline int __early_pfn_to_nid(unsigned long pfn, |
| 1730 | struct mminit_pfnnid_cache *state) | ||
| 1724 | { | 1731 | { |
| 1725 | return 0; | 1732 | return 0; |
| 1726 | } | 1733 | } |
| @@ -1728,7 +1735,8 @@ static inline int __early_pfn_to_nid(unsigned long pfn) | |||
| 1728 | /* please see mm/page_alloc.c */ | 1735 | /* please see mm/page_alloc.c */ |
| 1729 | extern int __meminit early_pfn_to_nid(unsigned long pfn); | 1736 | extern int __meminit early_pfn_to_nid(unsigned long pfn); |
| 1730 | /* there is a per-arch backend function. */ | 1737 | /* there is a per-arch backend function. */ |
| 1731 | extern int __meminit __early_pfn_to_nid(unsigned long pfn); | 1738 | extern int __meminit __early_pfn_to_nid(unsigned long pfn, |
| 1739 | struct mminit_pfnnid_cache *state); | ||
| 1732 | #endif | 1740 | #endif |
| 1733 | 1741 | ||
| 1734 | extern void set_dma_reserve(unsigned long new_dma_reserve); | 1742 | extern void set_dma_reserve(unsigned long new_dma_reserve); |
| @@ -2146,12 +2154,47 @@ enum mf_flags { | |||
| 2146 | extern int memory_failure(unsigned long pfn, int trapno, int flags); | 2154 | extern int memory_failure(unsigned long pfn, int trapno, int flags); |
| 2147 | extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); | 2155 | extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); |
| 2148 | extern int unpoison_memory(unsigned long pfn); | 2156 | extern int unpoison_memory(unsigned long pfn); |
| 2157 | extern int get_hwpoison_page(struct page *page); | ||
| 2149 | extern int sysctl_memory_failure_early_kill; | 2158 | extern int sysctl_memory_failure_early_kill; |
| 2150 | extern int sysctl_memory_failure_recovery; | 2159 | extern int sysctl_memory_failure_recovery; |
| 2151 | extern void shake_page(struct page *p, int access); | 2160 | extern void shake_page(struct page *p, int access); |
| 2152 | extern atomic_long_t num_poisoned_pages; | 2161 | extern atomic_long_t num_poisoned_pages; |
| 2153 | extern int soft_offline_page(struct page *page, int flags); | 2162 | extern int soft_offline_page(struct page *page, int flags); |
| 2154 | 2163 | ||
| 2164 | |||
| 2165 | /* | ||
| 2166 | * Error handlers for various types of pages. | ||
| 2167 | */ | ||
| 2168 | enum mf_result { | ||
| 2169 | MF_IGNORED, /* Error: cannot be handled */ | ||
| 2170 | MF_FAILED, /* Error: handling failed */ | ||
| 2171 | MF_DELAYED, /* Will be handled later */ | ||
| 2172 | MF_RECOVERED, /* Successfully recovered */ | ||
| 2173 | }; | ||
| 2174 | |||
| 2175 | enum mf_action_page_type { | ||
| 2176 | MF_MSG_KERNEL, | ||
| 2177 | MF_MSG_KERNEL_HIGH_ORDER, | ||
| 2178 | MF_MSG_SLAB, | ||
| 2179 | MF_MSG_DIFFERENT_COMPOUND, | ||
| 2180 | MF_MSG_POISONED_HUGE, | ||
| 2181 | MF_MSG_HUGE, | ||
| 2182 | MF_MSG_FREE_HUGE, | ||
| 2183 | MF_MSG_UNMAP_FAILED, | ||
| 2184 | MF_MSG_DIRTY_SWAPCACHE, | ||
| 2185 | MF_MSG_CLEAN_SWAPCACHE, | ||
| 2186 | MF_MSG_DIRTY_MLOCKED_LRU, | ||
| 2187 | MF_MSG_CLEAN_MLOCKED_LRU, | ||
| 2188 | MF_MSG_DIRTY_UNEVICTABLE_LRU, | ||
| 2189 | MF_MSG_CLEAN_UNEVICTABLE_LRU, | ||
| 2190 | MF_MSG_DIRTY_LRU, | ||
| 2191 | MF_MSG_CLEAN_LRU, | ||
| 2192 | MF_MSG_TRUNCATED_LRU, | ||
| 2193 | MF_MSG_BUDDY, | ||
| 2194 | MF_MSG_BUDDY_2ND, | ||
| 2195 | MF_MSG_UNKNOWN, | ||
| 2196 | }; | ||
| 2197 | |||
| 2155 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) | 2198 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) |
| 2156 | extern void clear_huge_page(struct page *page, | 2199 | extern void clear_huge_page(struct page *page, |
| 2157 | unsigned long addr, | 2200 | unsigned long addr, |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 8d37e26a1007..0038ac7466fd 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -226,6 +226,24 @@ struct page_frag { | |||
| 226 | #endif | 226 | #endif |
| 227 | }; | 227 | }; |
| 228 | 228 | ||
| 229 | #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) | ||
| 230 | #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) | ||
| 231 | |||
| 232 | struct page_frag_cache { | ||
| 233 | void * va; | ||
| 234 | #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) | ||
| 235 | __u16 offset; | ||
| 236 | __u16 size; | ||
| 237 | #else | ||
| 238 | __u32 offset; | ||
| 239 | #endif | ||
| 240 | /* we maintain a pagecount bias, so that we dont dirty cache line | ||
| 241 | * containing page->_count every time we allocate a fragment. | ||
| 242 | */ | ||
| 243 | unsigned int pagecnt_bias; | ||
| 244 | bool pfmemalloc; | ||
| 245 | }; | ||
| 246 | |||
| 229 | typedef unsigned long __nocast vm_flags_t; | 247 | typedef unsigned long __nocast vm_flags_t; |
| 230 | 248 | ||
| 231 | /* | 249 | /* |
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h index c5d52780d6a0..3ba327af055c 100644 --- a/include/linux/mmiotrace.h +++ b/include/linux/mmiotrace.h | |||
| @@ -106,6 +106,6 @@ extern void enable_mmiotrace(void); | |||
| 106 | extern void disable_mmiotrace(void); | 106 | extern void disable_mmiotrace(void); |
| 107 | extern void mmio_trace_rw(struct mmiotrace_rw *rw); | 107 | extern void mmio_trace_rw(struct mmiotrace_rw *rw); |
| 108 | extern void mmio_trace_mapping(struct mmiotrace_map *map); | 108 | extern void mmio_trace_mapping(struct mmiotrace_map *map); |
| 109 | extern int mmio_trace_printk(const char *fmt, va_list args); | 109 | extern __printf(1, 0) int mmio_trace_printk(const char *fmt, va_list args); |
| 110 | 110 | ||
| 111 | #endif /* _LINUX_MMIOTRACE_H */ | 111 | #endif /* _LINUX_MMIOTRACE_H */ |
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 95243d28a0ee..61cd67f4d788 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h | |||
| @@ -324,25 +324,25 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
| 324 | ___pte; \ | 324 | ___pte; \ |
| 325 | }) | 325 | }) |
| 326 | 326 | ||
| 327 | #define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \ | 327 | #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \ |
| 328 | ({ \ | 328 | ({ \ |
| 329 | unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ | 329 | unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ |
| 330 | struct mm_struct *___mm = (__vma)->vm_mm; \ | 330 | struct mm_struct *___mm = (__vma)->vm_mm; \ |
| 331 | pmd_t ___pmd; \ | 331 | pmd_t ___pmd; \ |
| 332 | \ | 332 | \ |
| 333 | ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \ | 333 | ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \ |
| 334 | mmu_notifier_invalidate_range(___mm, ___haddr, \ | 334 | mmu_notifier_invalidate_range(___mm, ___haddr, \ |
| 335 | ___haddr + HPAGE_PMD_SIZE); \ | 335 | ___haddr + HPAGE_PMD_SIZE); \ |
| 336 | \ | 336 | \ |
| 337 | ___pmd; \ | 337 | ___pmd; \ |
| 338 | }) | 338 | }) |
| 339 | 339 | ||
| 340 | #define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \ | 340 | #define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \ |
| 341 | ({ \ | 341 | ({ \ |
| 342 | unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ | 342 | unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ |
| 343 | pmd_t ___pmd; \ | 343 | pmd_t ___pmd; \ |
| 344 | \ | 344 | \ |
| 345 | ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \ | 345 | ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \ |
| 346 | mmu_notifier_invalidate_range(__mm, ___haddr, \ | 346 | mmu_notifier_invalidate_range(__mm, ___haddr, \ |
| 347 | ___haddr + HPAGE_PMD_SIZE); \ | 347 | ___haddr + HPAGE_PMD_SIZE); \ |
| 348 | \ | 348 | \ |
| @@ -428,8 +428,8 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) | |||
| 428 | #define ptep_clear_flush_young_notify ptep_clear_flush_young | 428 | #define ptep_clear_flush_young_notify ptep_clear_flush_young |
| 429 | #define pmdp_clear_flush_young_notify pmdp_clear_flush_young | 429 | #define pmdp_clear_flush_young_notify pmdp_clear_flush_young |
| 430 | #define ptep_clear_flush_notify ptep_clear_flush | 430 | #define ptep_clear_flush_notify ptep_clear_flush |
| 431 | #define pmdp_clear_flush_notify pmdp_clear_flush | 431 | #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush |
| 432 | #define pmdp_get_and_clear_notify pmdp_get_and_clear | 432 | #define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear |
| 433 | #define set_pte_at_notify set_pte_at | 433 | #define set_pte_at_notify set_pte_at |
| 434 | 434 | ||
| 435 | #endif /* CONFIG_MMU_NOTIFIER */ | 435 | #endif /* CONFIG_MMU_NOTIFIER */ |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 54d74f6eb233..754c25966a0a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -762,6 +762,14 @@ typedef struct pglist_data { | |||
| 762 | /* Number of pages migrated during the rate limiting time interval */ | 762 | /* Number of pages migrated during the rate limiting time interval */ |
| 763 | unsigned long numabalancing_migrate_nr_pages; | 763 | unsigned long numabalancing_migrate_nr_pages; |
| 764 | #endif | 764 | #endif |
| 765 | |||
| 766 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT | ||
| 767 | /* | ||
| 768 | * If memory initialisation on large machines is deferred then this | ||
| 769 | * is the first PFN that needs to be initialised. | ||
| 770 | */ | ||
| 771 | unsigned long first_deferred_pfn; | ||
| 772 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ | ||
| 765 | } pg_data_t; | 773 | } pg_data_t; |
| 766 | 774 | ||
| 767 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) | 775 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) |
| @@ -1216,11 +1224,16 @@ void sparse_init(void); | |||
| 1216 | #define sparse_index_init(_sec, _nid) do {} while (0) | 1224 | #define sparse_index_init(_sec, _nid) do {} while (0) |
| 1217 | #endif /* CONFIG_SPARSEMEM */ | 1225 | #endif /* CONFIG_SPARSEMEM */ |
| 1218 | 1226 | ||
| 1219 | #ifdef CONFIG_NODES_SPAN_OTHER_NODES | 1227 | /* |
| 1220 | bool early_pfn_in_nid(unsigned long pfn, int nid); | 1228 | * During memory init memblocks map pfns to nids. The search is expensive and |
| 1221 | #else | 1229 | * this caches recent lookups. The implementation of __early_pfn_to_nid |
| 1222 | #define early_pfn_in_nid(pfn, nid) (1) | 1230 | * may treat start/end as pfns or sections. |
| 1223 | #endif | 1231 | */ |
| 1232 | struct mminit_pfnnid_cache { | ||
| 1233 | unsigned long last_start; | ||
| 1234 | unsigned long last_end; | ||
| 1235 | int last_nid; | ||
| 1236 | }; | ||
| 1224 | 1237 | ||
| 1225 | #ifndef early_pfn_valid | 1238 | #ifndef early_pfn_valid |
| 1226 | #define early_pfn_valid(pfn) (1) | 1239 | #define early_pfn_valid(pfn) (1) |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 3bfd56778c29..34f25b7bf642 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
| @@ -189,6 +189,8 @@ struct css_device_id { | |||
| 189 | struct acpi_device_id { | 189 | struct acpi_device_id { |
| 190 | __u8 id[ACPI_ID_LEN]; | 190 | __u8 id[ACPI_ID_LEN]; |
| 191 | kernel_ulong_t driver_data; | 191 | kernel_ulong_t driver_data; |
| 192 | __u32 cls; | ||
| 193 | __u32 cls_msk; | ||
| 192 | }; | 194 | }; |
| 193 | 195 | ||
| 194 | #define PNP_ID_LEN 8 | 196 | #define PNP_ID_LEN 8 |
| @@ -599,9 +601,22 @@ struct ipack_device_id { | |||
| 599 | 601 | ||
| 600 | #define MEI_CL_MODULE_PREFIX "mei:" | 602 | #define MEI_CL_MODULE_PREFIX "mei:" |
| 601 | #define MEI_CL_NAME_SIZE 32 | 603 | #define MEI_CL_NAME_SIZE 32 |
| 604 | #define MEI_CL_UUID_FMT "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x" | ||
| 605 | #define MEI_CL_UUID_ARGS(_u) \ | ||
| 606 | _u[0], _u[1], _u[2], _u[3], _u[4], _u[5], _u[6], _u[7], \ | ||
| 607 | _u[8], _u[9], _u[10], _u[11], _u[12], _u[13], _u[14], _u[15] | ||
| 602 | 608 | ||
| 609 | /** | ||
| 610 | * struct mei_cl_device_id - MEI client device identifier | ||
| 611 | * @name: helper name | ||
| 612 | * @uuid: client uuid | ||
| 613 | * @driver_info: information used by the driver. | ||
| 614 | * | ||
| 615 | * identifies mei client device by uuid and name | ||
| 616 | */ | ||
| 603 | struct mei_cl_device_id { | 617 | struct mei_cl_device_id { |
| 604 | char name[MEI_CL_NAME_SIZE]; | 618 | char name[MEI_CL_NAME_SIZE]; |
| 619 | uuid_le uuid; | ||
| 605 | kernel_ulong_t driver_info; | 620 | kernel_ulong_t driver_info; |
| 606 | }; | 621 | }; |
| 607 | 622 | ||
| @@ -629,4 +644,10 @@ struct mcb_device_id { | |||
| 629 | kernel_ulong_t driver_data; | 644 | kernel_ulong_t driver_data; |
| 630 | }; | 645 | }; |
| 631 | 646 | ||
| 647 | struct ulpi_device_id { | ||
| 648 | __u16 vendor; | ||
| 649 | __u16 product; | ||
| 650 | kernel_ulong_t driver_data; | ||
| 651 | }; | ||
| 652 | |||
| 632 | #endif /* LINUX_MOD_DEVICETABLE_H */ | 653 | #endif /* LINUX_MOD_DEVICETABLE_H */ |
diff --git a/include/linux/module.h b/include/linux/module.h index 1e5436042eb0..3a19c79918e0 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
| @@ -11,12 +11,14 @@ | |||
| 11 | #include <linux/compiler.h> | 11 | #include <linux/compiler.h> |
| 12 | #include <linux/cache.h> | 12 | #include <linux/cache.h> |
| 13 | #include <linux/kmod.h> | 13 | #include <linux/kmod.h> |
| 14 | #include <linux/init.h> | ||
| 14 | #include <linux/elf.h> | 15 | #include <linux/elf.h> |
| 15 | #include <linux/stringify.h> | 16 | #include <linux/stringify.h> |
| 16 | #include <linux/kobject.h> | 17 | #include <linux/kobject.h> |
| 17 | #include <linux/moduleparam.h> | 18 | #include <linux/moduleparam.h> |
| 18 | #include <linux/jump_label.h> | 19 | #include <linux/jump_label.h> |
| 19 | #include <linux/export.h> | 20 | #include <linux/export.h> |
| 21 | #include <linux/rbtree_latch.h> | ||
| 20 | 22 | ||
| 21 | #include <linux/percpu.h> | 23 | #include <linux/percpu.h> |
| 22 | #include <asm/module.h> | 24 | #include <asm/module.h> |
| @@ -70,6 +72,89 @@ extern struct module_attribute module_uevent; | |||
| 70 | extern int init_module(void); | 72 | extern int init_module(void); |
| 71 | extern void cleanup_module(void); | 73 | extern void cleanup_module(void); |
| 72 | 74 | ||
| 75 | #ifndef MODULE | ||
| 76 | /** | ||
| 77 | * module_init() - driver initialization entry point | ||
| 78 | * @x: function to be run at kernel boot time or module insertion | ||
| 79 | * | ||
| 80 | * module_init() will either be called during do_initcalls() (if | ||
| 81 | * builtin) or at module insertion time (if a module). There can only | ||
| 82 | * be one per module. | ||
| 83 | */ | ||
| 84 | #define module_init(x) __initcall(x); | ||
| 85 | |||
| 86 | /** | ||
| 87 | * module_exit() - driver exit entry point | ||
| 88 | * @x: function to be run when driver is removed | ||
| 89 | * | ||
| 90 | * module_exit() will wrap the driver clean-up code | ||
| 91 | * with cleanup_module() when used with rmmod when | ||
| 92 | * the driver is a module. If the driver is statically | ||
| 93 | * compiled into the kernel, module_exit() has no effect. | ||
| 94 | * There can only be one per module. | ||
| 95 | */ | ||
| 96 | #define module_exit(x) __exitcall(x); | ||
| 97 | |||
| 98 | #else /* MODULE */ | ||
| 99 | |||
| 100 | /* | ||
| 101 | * In most cases loadable modules do not need custom | ||
| 102 | * initcall levels. There are still some valid cases where | ||
| 103 | * a driver may be needed early if built in, and does not | ||
| 104 | * matter when built as a loadable module. Like bus | ||
| 105 | * snooping debug drivers. | ||
| 106 | */ | ||
| 107 | #define early_initcall(fn) module_init(fn) | ||
| 108 | #define core_initcall(fn) module_init(fn) | ||
| 109 | #define core_initcall_sync(fn) module_init(fn) | ||
| 110 | #define postcore_initcall(fn) module_init(fn) | ||
| 111 | #define postcore_initcall_sync(fn) module_init(fn) | ||
| 112 | #define arch_initcall(fn) module_init(fn) | ||
| 113 | #define subsys_initcall(fn) module_init(fn) | ||
| 114 | #define subsys_initcall_sync(fn) module_init(fn) | ||
| 115 | #define fs_initcall(fn) module_init(fn) | ||
| 116 | #define fs_initcall_sync(fn) module_init(fn) | ||
| 117 | #define rootfs_initcall(fn) module_init(fn) | ||
| 118 | #define device_initcall(fn) module_init(fn) | ||
| 119 | #define device_initcall_sync(fn) module_init(fn) | ||
| 120 | #define late_initcall(fn) module_init(fn) | ||
| 121 | #define late_initcall_sync(fn) module_init(fn) | ||
| 122 | |||
| 123 | #define console_initcall(fn) module_init(fn) | ||
| 124 | #define security_initcall(fn) module_init(fn) | ||
| 125 | |||
| 126 | /* Each module must use one module_init(). */ | ||
| 127 | #define module_init(initfn) \ | ||
| 128 | static inline initcall_t __inittest(void) \ | ||
| 129 | { return initfn; } \ | ||
| 130 | int init_module(void) __attribute__((alias(#initfn))); | ||
| 131 | |||
| 132 | /* This is only required if you want to be unloadable. */ | ||
| 133 | #define module_exit(exitfn) \ | ||
| 134 | static inline exitcall_t __exittest(void) \ | ||
| 135 | { return exitfn; } \ | ||
| 136 | void cleanup_module(void) __attribute__((alias(#exitfn))); | ||
| 137 | |||
| 138 | #endif | ||
| 139 | |||
| 140 | /* This means "can be init if no module support, otherwise module load | ||
| 141 | may call it." */ | ||
| 142 | #ifdef CONFIG_MODULES | ||
| 143 | #define __init_or_module | ||
| 144 | #define __initdata_or_module | ||
| 145 | #define __initconst_or_module | ||
| 146 | #define __INIT_OR_MODULE .text | ||
| 147 | #define __INITDATA_OR_MODULE .data | ||
| 148 | #define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits | ||
| 149 | #else | ||
| 150 | #define __init_or_module __init | ||
| 151 | #define __initdata_or_module __initdata | ||
| 152 | #define __initconst_or_module __initconst | ||
| 153 | #define __INIT_OR_MODULE __INIT | ||
| 154 | #define __INITDATA_OR_MODULE __INITDATA | ||
| 155 | #define __INITRODATA_OR_MODULE __INITRODATA | ||
| 156 | #endif /*CONFIG_MODULES*/ | ||
| 157 | |||
| 73 | /* Archs provide a method of finding the correct exception table. */ | 158 | /* Archs provide a method of finding the correct exception table. */ |
| 74 | struct exception_table_entry; | 159 | struct exception_table_entry; |
| 75 | 160 | ||
| @@ -210,6 +295,13 @@ enum module_state { | |||
| 210 | MODULE_STATE_UNFORMED, /* Still setting it up. */ | 295 | MODULE_STATE_UNFORMED, /* Still setting it up. */ |
| 211 | }; | 296 | }; |
| 212 | 297 | ||
| 298 | struct module; | ||
| 299 | |||
| 300 | struct mod_tree_node { | ||
| 301 | struct module *mod; | ||
| 302 | struct latch_tree_node node; | ||
| 303 | }; | ||
| 304 | |||
| 213 | struct module { | 305 | struct module { |
| 214 | enum module_state state; | 306 | enum module_state state; |
| 215 | 307 | ||
| @@ -232,6 +324,9 @@ struct module { | |||
| 232 | unsigned int num_syms; | 324 | unsigned int num_syms; |
| 233 | 325 | ||
| 234 | /* Kernel parameters. */ | 326 | /* Kernel parameters. */ |
| 327 | #ifdef CONFIG_SYSFS | ||
| 328 | struct mutex param_lock; | ||
| 329 | #endif | ||
| 235 | struct kernel_param *kp; | 330 | struct kernel_param *kp; |
| 236 | unsigned int num_kp; | 331 | unsigned int num_kp; |
| 237 | 332 | ||
| @@ -257,6 +352,8 @@ struct module { | |||
| 257 | bool sig_ok; | 352 | bool sig_ok; |
| 258 | #endif | 353 | #endif |
| 259 | 354 | ||
| 355 | bool async_probe_requested; | ||
| 356 | |||
| 260 | /* symbols that will be GPL-only in the near future. */ | 357 | /* symbols that will be GPL-only in the near future. */ |
| 261 | const struct kernel_symbol *gpl_future_syms; | 358 | const struct kernel_symbol *gpl_future_syms; |
| 262 | const unsigned long *gpl_future_crcs; | 359 | const unsigned long *gpl_future_crcs; |
| @@ -269,8 +366,15 @@ struct module { | |||
| 269 | /* Startup function. */ | 366 | /* Startup function. */ |
| 270 | int (*init)(void); | 367 | int (*init)(void); |
| 271 | 368 | ||
| 272 | /* If this is non-NULL, vfree after init() returns */ | 369 | /* |
| 273 | void *module_init; | 370 | * If this is non-NULL, vfree() after init() returns. |
| 371 | * | ||
| 372 | * Cacheline align here, such that: | ||
| 373 | * module_init, module_core, init_size, core_size, | ||
| 374 | * init_text_size, core_text_size and mtn_core::{mod,node[0]} | ||
| 375 | * are on the same cacheline. | ||
| 376 | */ | ||
| 377 | void *module_init ____cacheline_aligned; | ||
| 274 | 378 | ||
| 275 | /* Here is the actual code + data, vfree'd on unload. */ | 379 | /* Here is the actual code + data, vfree'd on unload. */ |
| 276 | void *module_core; | 380 | void *module_core; |
| @@ -281,6 +385,16 @@ struct module { | |||
| 281 | /* The size of the executable code in each section. */ | 385 | /* The size of the executable code in each section. */ |
| 282 | unsigned int init_text_size, core_text_size; | 386 | unsigned int init_text_size, core_text_size; |
| 283 | 387 | ||
| 388 | #ifdef CONFIG_MODULES_TREE_LOOKUP | ||
| 389 | /* | ||
| 390 | * We want mtn_core::{mod,node[0]} to be in the same cacheline as the | ||
| 391 | * above entries such that a regular lookup will only touch one | ||
| 392 | * cacheline. | ||
| 393 | */ | ||
| 394 | struct mod_tree_node mtn_core; | ||
| 395 | struct mod_tree_node mtn_init; | ||
| 396 | #endif | ||
| 397 | |||
| 284 | /* Size of RO sections of the module (text+rodata) */ | 398 | /* Size of RO sections of the module (text+rodata) */ |
| 285 | unsigned int init_ro_size, core_ro_size; | 399 | unsigned int init_ro_size, core_ro_size; |
| 286 | 400 | ||
| @@ -336,7 +450,7 @@ struct module { | |||
| 336 | const char **trace_bprintk_fmt_start; | 450 | const char **trace_bprintk_fmt_start; |
| 337 | #endif | 451 | #endif |
| 338 | #ifdef CONFIG_EVENT_TRACING | 452 | #ifdef CONFIG_EVENT_TRACING |
| 339 | struct ftrace_event_call **trace_events; | 453 | struct trace_event_call **trace_events; |
| 340 | unsigned int num_trace_events; | 454 | unsigned int num_trace_events; |
| 341 | struct trace_enum_map **trace_enums; | 455 | struct trace_enum_map **trace_enums; |
| 342 | unsigned int num_trace_enums; | 456 | unsigned int num_trace_enums; |
| @@ -367,7 +481,7 @@ struct module { | |||
| 367 | ctor_fn_t *ctors; | 481 | ctor_fn_t *ctors; |
| 368 | unsigned int num_ctors; | 482 | unsigned int num_ctors; |
| 369 | #endif | 483 | #endif |
| 370 | }; | 484 | } ____cacheline_aligned; |
| 371 | #ifndef MODULE_ARCH_INIT | 485 | #ifndef MODULE_ARCH_INIT |
| 372 | #define MODULE_ARCH_INIT {} | 486 | #define MODULE_ARCH_INIT {} |
| 373 | #endif | 487 | #endif |
| @@ -421,14 +535,22 @@ struct symsearch { | |||
| 421 | bool unused; | 535 | bool unused; |
| 422 | }; | 536 | }; |
| 423 | 537 | ||
| 424 | /* Search for an exported symbol by name. */ | 538 | /* |
| 539 | * Search for an exported symbol by name. | ||
| 540 | * | ||
| 541 | * Must be called with module_mutex held or preemption disabled. | ||
| 542 | */ | ||
| 425 | const struct kernel_symbol *find_symbol(const char *name, | 543 | const struct kernel_symbol *find_symbol(const char *name, |
| 426 | struct module **owner, | 544 | struct module **owner, |
| 427 | const unsigned long **crc, | 545 | const unsigned long **crc, |
| 428 | bool gplok, | 546 | bool gplok, |
| 429 | bool warn); | 547 | bool warn); |
| 430 | 548 | ||
| 431 | /* Walk the exported symbol table */ | 549 | /* |
| 550 | * Walk the exported symbol table | ||
| 551 | * | ||
| 552 | * Must be called with module_mutex held or preemption disabled. | ||
| 553 | */ | ||
| 432 | bool each_symbol_section(bool (*fn)(const struct symsearch *arr, | 554 | bool each_symbol_section(bool (*fn)(const struct symsearch *arr, |
| 433 | struct module *owner, | 555 | struct module *owner, |
| 434 | void *data), void *data); | 556 | void *data), void *data); |
| @@ -508,6 +630,11 @@ int unregister_module_notifier(struct notifier_block *nb); | |||
| 508 | 630 | ||
| 509 | extern void print_modules(void); | 631 | extern void print_modules(void); |
| 510 | 632 | ||
| 633 | static inline bool module_requested_async_probing(struct module *module) | ||
| 634 | { | ||
| 635 | return module && module->async_probe_requested; | ||
| 636 | } | ||
| 637 | |||
| 511 | #else /* !CONFIG_MODULES... */ | 638 | #else /* !CONFIG_MODULES... */ |
| 512 | 639 | ||
| 513 | /* Given an address, look for it in the exception tables. */ | 640 | /* Given an address, look for it in the exception tables. */ |
| @@ -618,6 +745,12 @@ static inline int unregister_module_notifier(struct notifier_block *nb) | |||
| 618 | static inline void print_modules(void) | 745 | static inline void print_modules(void) |
| 619 | { | 746 | { |
| 620 | } | 747 | } |
| 748 | |||
| 749 | static inline bool module_requested_async_probing(struct module *module) | ||
| 750 | { | ||
| 751 | return false; | ||
| 752 | } | ||
| 753 | |||
| 621 | #endif /* CONFIG_MODULES */ | 754 | #endif /* CONFIG_MODULES */ |
| 622 | 755 | ||
| 623 | #ifdef CONFIG_SYSFS | 756 | #ifdef CONFIG_SYSFS |
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 1c9effa25e26..c12f2147c350 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h | |||
| @@ -67,8 +67,9 @@ enum { | |||
| 67 | 67 | ||
| 68 | struct kernel_param { | 68 | struct kernel_param { |
| 69 | const char *name; | 69 | const char *name; |
| 70 | struct module *mod; | ||
| 70 | const struct kernel_param_ops *ops; | 71 | const struct kernel_param_ops *ops; |
| 71 | u16 perm; | 72 | const u16 perm; |
| 72 | s8 level; | 73 | s8 level; |
| 73 | u8 flags; | 74 | u8 flags; |
| 74 | union { | 75 | union { |
| @@ -108,7 +109,7 @@ struct kparam_array | |||
| 108 | * | 109 | * |
| 109 | * @perm is 0 if the the variable is not to appear in sysfs, or 0444 | 110 | * @perm is 0 if the the variable is not to appear in sysfs, or 0444 |
| 110 | * for world-readable, 0644 for root-writable, etc. Note that if it | 111 | * for world-readable, 0644 for root-writable, etc. Note that if it |
| 111 | * is writable, you may need to use kparam_block_sysfs_write() around | 112 | * is writable, you may need to use kernel_param_lock() around |
| 112 | * accesses (esp. charp, which can be kfreed when it changes). | 113 | * accesses (esp. charp, which can be kfreed when it changes). |
| 113 | * | 114 | * |
| 114 | * The @type is simply pasted to refer to a param_ops_##type and a | 115 | * The @type is simply pasted to refer to a param_ops_##type and a |
| @@ -216,16 +217,16 @@ struct kparam_array | |||
| 216 | parameters. */ | 217 | parameters. */ |
| 217 | #define __module_param_call(prefix, name, ops, arg, perm, level, flags) \ | 218 | #define __module_param_call(prefix, name, ops, arg, perm, level, flags) \ |
| 218 | /* Default value instead of permissions? */ \ | 219 | /* Default value instead of permissions? */ \ |
| 219 | static const char __param_str_##name[] = prefix #name; \ | 220 | static const char __param_str_##name[] = prefix #name; \ |
| 220 | static struct kernel_param __moduleparam_const __param_##name \ | 221 | static struct kernel_param __moduleparam_const __param_##name \ |
| 221 | __used \ | 222 | __used \ |
| 222 | __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ | 223 | __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ |
| 223 | = { __param_str_##name, ops, VERIFY_OCTAL_PERMISSIONS(perm), \ | 224 | = { __param_str_##name, THIS_MODULE, ops, \ |
| 224 | level, flags, { arg } } | 225 | VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } } |
| 225 | 226 | ||
| 226 | /* Obsolete - use module_param_cb() */ | 227 | /* Obsolete - use module_param_cb() */ |
| 227 | #define module_param_call(name, set, get, arg, perm) \ | 228 | #define module_param_call(name, set, get, arg, perm) \ |
| 228 | static struct kernel_param_ops __param_ops_##name = \ | 229 | static const struct kernel_param_ops __param_ops_##name = \ |
| 229 | { .flags = 0, (void *)set, (void *)get }; \ | 230 | { .flags = 0, (void *)set, (void *)get }; \ |
| 230 | __module_param_call(MODULE_PARAM_PREFIX, \ | 231 | __module_param_call(MODULE_PARAM_PREFIX, \ |
| 231 | name, &__param_ops_##name, arg, \ | 232 | name, &__param_ops_##name, arg, \ |
| @@ -238,58 +239,14 @@ __check_old_set_param(int (*oldset)(const char *, struct kernel_param *)) | |||
| 238 | return 0; | 239 | return 0; |
| 239 | } | 240 | } |
| 240 | 241 | ||
| 241 | /** | ||
| 242 | * kparam_block_sysfs_write - make sure a parameter isn't written via sysfs. | ||
| 243 | * @name: the name of the parameter | ||
| 244 | * | ||
| 245 | * There's no point blocking write on a paramter that isn't writable via sysfs! | ||
| 246 | */ | ||
| 247 | #define kparam_block_sysfs_write(name) \ | ||
| 248 | do { \ | ||
| 249 | BUG_ON(!(__param_##name.perm & 0222)); \ | ||
| 250 | __kernel_param_lock(); \ | ||
| 251 | } while (0) | ||
| 252 | |||
| 253 | /** | ||
| 254 | * kparam_unblock_sysfs_write - allows sysfs to write to a parameter again. | ||
| 255 | * @name: the name of the parameter | ||
| 256 | */ | ||
| 257 | #define kparam_unblock_sysfs_write(name) \ | ||
| 258 | do { \ | ||
| 259 | BUG_ON(!(__param_##name.perm & 0222)); \ | ||
| 260 | __kernel_param_unlock(); \ | ||
| 261 | } while (0) | ||
| 262 | |||
| 263 | /** | ||
| 264 | * kparam_block_sysfs_read - make sure a parameter isn't read via sysfs. | ||
| 265 | * @name: the name of the parameter | ||
| 266 | * | ||
| 267 | * This also blocks sysfs writes. | ||
| 268 | */ | ||
| 269 | #define kparam_block_sysfs_read(name) \ | ||
| 270 | do { \ | ||
| 271 | BUG_ON(!(__param_##name.perm & 0444)); \ | ||
| 272 | __kernel_param_lock(); \ | ||
| 273 | } while (0) | ||
| 274 | |||
| 275 | /** | ||
| 276 | * kparam_unblock_sysfs_read - allows sysfs to read a parameter again. | ||
| 277 | * @name: the name of the parameter | ||
| 278 | */ | ||
| 279 | #define kparam_unblock_sysfs_read(name) \ | ||
| 280 | do { \ | ||
| 281 | BUG_ON(!(__param_##name.perm & 0444)); \ | ||
| 282 | __kernel_param_unlock(); \ | ||
| 283 | } while (0) | ||
| 284 | |||
| 285 | #ifdef CONFIG_SYSFS | 242 | #ifdef CONFIG_SYSFS |
| 286 | extern void __kernel_param_lock(void); | 243 | extern void kernel_param_lock(struct module *mod); |
| 287 | extern void __kernel_param_unlock(void); | 244 | extern void kernel_param_unlock(struct module *mod); |
| 288 | #else | 245 | #else |
| 289 | static inline void __kernel_param_lock(void) | 246 | static inline void kernel_param_lock(struct module *mod) |
| 290 | { | 247 | { |
| 291 | } | 248 | } |
| 292 | static inline void __kernel_param_unlock(void) | 249 | static inline void kernel_param_unlock(struct module *mod) |
| 293 | { | 250 | { |
| 294 | } | 251 | } |
| 295 | #endif | 252 | #endif |
| @@ -310,6 +267,15 @@ static inline void __kernel_param_unlock(void) | |||
| 310 | #define core_param(name, var, type, perm) \ | 267 | #define core_param(name, var, type, perm) \ |
| 311 | param_check_##type(name, &(var)); \ | 268 | param_check_##type(name, &(var)); \ |
| 312 | __module_param_call("", name, ¶m_ops_##type, &var, perm, -1, 0) | 269 | __module_param_call("", name, ¶m_ops_##type, &var, perm, -1, 0) |
| 270 | |||
| 271 | /** | ||
| 272 | * core_param_unsafe - same as core_param but taints kernel | ||
| 273 | */ | ||
| 274 | #define core_param_unsafe(name, var, type, perm) \ | ||
| 275 | param_check_##type(name, &(var)); \ | ||
| 276 | __module_param_call("", name, ¶m_ops_##type, &var, perm, \ | ||
| 277 | -1, KERNEL_PARAM_FL_UNSAFE) | ||
| 278 | |||
| 313 | #endif /* !MODULE */ | 279 | #endif /* !MODULE */ |
| 314 | 280 | ||
| 315 | /** | 281 | /** |
| @@ -357,8 +323,9 @@ extern char *parse_args(const char *name, | |||
| 357 | unsigned num, | 323 | unsigned num, |
| 358 | s16 level_min, | 324 | s16 level_min, |
| 359 | s16 level_max, | 325 | s16 level_max, |
| 326 | void *arg, | ||
| 360 | int (*unknown)(char *param, char *val, | 327 | int (*unknown)(char *param, char *val, |
| 361 | const char *doing)); | 328 | const char *doing, void *arg)); |
| 362 | 329 | ||
| 363 | /* Called by module remove. */ | 330 | /* Called by module remove. */ |
| 364 | #ifdef CONFIG_SYSFS | 331 | #ifdef CONFIG_SYSFS |
| @@ -376,64 +343,70 @@ static inline void destroy_params(const struct kernel_param *params, | |||
| 376 | #define __param_check(name, p, type) \ | 343 | #define __param_check(name, p, type) \ |
| 377 | static inline type __always_unused *__check_##name(void) { return(p); } | 344 | static inline type __always_unused *__check_##name(void) { return(p); } |
| 378 | 345 | ||
| 379 | extern struct kernel_param_ops param_ops_byte; | 346 | extern const struct kernel_param_ops param_ops_byte; |
| 380 | extern int param_set_byte(const char *val, const struct kernel_param *kp); | 347 | extern int param_set_byte(const char *val, const struct kernel_param *kp); |
| 381 | extern int param_get_byte(char *buffer, const struct kernel_param *kp); | 348 | extern int param_get_byte(char *buffer, const struct kernel_param *kp); |
| 382 | #define param_check_byte(name, p) __param_check(name, p, unsigned char) | 349 | #define param_check_byte(name, p) __param_check(name, p, unsigned char) |
| 383 | 350 | ||
| 384 | extern struct kernel_param_ops param_ops_short; | 351 | extern const struct kernel_param_ops param_ops_short; |
| 385 | extern int param_set_short(const char *val, const struct kernel_param *kp); | 352 | extern int param_set_short(const char *val, const struct kernel_param *kp); |
| 386 | extern int param_get_short(char *buffer, const struct kernel_param *kp); | 353 | extern int param_get_short(char *buffer, const struct kernel_param *kp); |
| 387 | #define param_check_short(name, p) __param_check(name, p, short) | 354 | #define param_check_short(name, p) __param_check(name, p, short) |
| 388 | 355 | ||
| 389 | extern struct kernel_param_ops param_ops_ushort; | 356 | extern const struct kernel_param_ops param_ops_ushort; |
| 390 | extern int param_set_ushort(const char *val, const struct kernel_param *kp); | 357 | extern int param_set_ushort(const char *val, const struct kernel_param *kp); |
| 391 | extern int param_get_ushort(char *buffer, const struct kernel_param *kp); | 358 | extern int param_get_ushort(char *buffer, const struct kernel_param *kp); |
| 392 | #define param_check_ushort(name, p) __param_check(name, p, unsigned short) | 359 | #define param_check_ushort(name, p) __param_check(name, p, unsigned short) |
| 393 | 360 | ||
| 394 | extern struct kernel_param_ops param_ops_int; | 361 | extern const struct kernel_param_ops param_ops_int; |
| 395 | extern int param_set_int(const char *val, const struct kernel_param *kp); | 362 | extern int param_set_int(const char *val, const struct kernel_param *kp); |
| 396 | extern int param_get_int(char *buffer, const struct kernel_param *kp); | 363 | extern int param_get_int(char *buffer, const struct kernel_param *kp); |
| 397 | #define param_check_int(name, p) __param_check(name, p, int) | 364 | #define param_check_int(name, p) __param_check(name, p, int) |
| 398 | 365 | ||
| 399 | extern struct kernel_param_ops param_ops_uint; | 366 | extern const struct kernel_param_ops param_ops_uint; |
| 400 | extern int param_set_uint(const char *val, const struct kernel_param *kp); | 367 | extern int param_set_uint(const char *val, const struct kernel_param *kp); |
| 401 | extern int param_get_uint(char *buffer, const struct kernel_param *kp); | 368 | extern int param_get_uint(char *buffer, const struct kernel_param *kp); |
| 402 | #define param_check_uint(name, p) __param_check(name, p, unsigned int) | 369 | #define param_check_uint(name, p) __param_check(name, p, unsigned int) |
| 403 | 370 | ||
| 404 | extern struct kernel_param_ops param_ops_long; | 371 | extern const struct kernel_param_ops param_ops_long; |
| 405 | extern int param_set_long(const char *val, const struct kernel_param *kp); | 372 | extern int param_set_long(const char *val, const struct kernel_param *kp); |
| 406 | extern int param_get_long(char *buffer, const struct kernel_param *kp); | 373 | extern int param_get_long(char *buffer, const struct kernel_param *kp); |
| 407 | #define param_check_long(name, p) __param_check(name, p, long) | 374 | #define param_check_long(name, p) __param_check(name, p, long) |
| 408 | 375 | ||
| 409 | extern struct kernel_param_ops param_ops_ulong; | 376 | extern const struct kernel_param_ops param_ops_ulong; |
| 410 | extern int param_set_ulong(const char *val, const struct kernel_param *kp); | 377 | extern int param_set_ulong(const char *val, const struct kernel_param *kp); |
| 411 | extern int param_get_ulong(char *buffer, const struct kernel_param *kp); | 378 | extern int param_get_ulong(char *buffer, const struct kernel_param *kp); |
| 412 | #define param_check_ulong(name, p) __param_check(name, p, unsigned long) | 379 | #define param_check_ulong(name, p) __param_check(name, p, unsigned long) |
| 413 | 380 | ||
| 414 | extern struct kernel_param_ops param_ops_ullong; | 381 | extern const struct kernel_param_ops param_ops_ullong; |
| 415 | extern int param_set_ullong(const char *val, const struct kernel_param *kp); | 382 | extern int param_set_ullong(const char *val, const struct kernel_param *kp); |
| 416 | extern int param_get_ullong(char *buffer, const struct kernel_param *kp); | 383 | extern int param_get_ullong(char *buffer, const struct kernel_param *kp); |
| 417 | #define param_check_ullong(name, p) __param_check(name, p, unsigned long long) | 384 | #define param_check_ullong(name, p) __param_check(name, p, unsigned long long) |
| 418 | 385 | ||
| 419 | extern struct kernel_param_ops param_ops_charp; | 386 | extern const struct kernel_param_ops param_ops_charp; |
| 420 | extern int param_set_charp(const char *val, const struct kernel_param *kp); | 387 | extern int param_set_charp(const char *val, const struct kernel_param *kp); |
| 421 | extern int param_get_charp(char *buffer, const struct kernel_param *kp); | 388 | extern int param_get_charp(char *buffer, const struct kernel_param *kp); |
| 422 | #define param_check_charp(name, p) __param_check(name, p, char *) | 389 | #define param_check_charp(name, p) __param_check(name, p, char *) |
| 423 | 390 | ||
| 424 | /* We used to allow int as well as bool. We're taking that away! */ | 391 | /* We used to allow int as well as bool. We're taking that away! */ |
| 425 | extern struct kernel_param_ops param_ops_bool; | 392 | extern const struct kernel_param_ops param_ops_bool; |
| 426 | extern int param_set_bool(const char *val, const struct kernel_param *kp); | 393 | extern int param_set_bool(const char *val, const struct kernel_param *kp); |
| 427 | extern int param_get_bool(char *buffer, const struct kernel_param *kp); | 394 | extern int param_get_bool(char *buffer, const struct kernel_param *kp); |
| 428 | #define param_check_bool(name, p) __param_check(name, p, bool) | 395 | #define param_check_bool(name, p) __param_check(name, p, bool) |
| 429 | 396 | ||
| 430 | extern struct kernel_param_ops param_ops_invbool; | 397 | extern const struct kernel_param_ops param_ops_bool_enable_only; |
| 398 | extern int param_set_bool_enable_only(const char *val, | ||
| 399 | const struct kernel_param *kp); | ||
| 400 | /* getter is the same as for the regular bool */ | ||
| 401 | #define param_check_bool_enable_only param_check_bool | ||
| 402 | |||
| 403 | extern const struct kernel_param_ops param_ops_invbool; | ||
| 431 | extern int param_set_invbool(const char *val, const struct kernel_param *kp); | 404 | extern int param_set_invbool(const char *val, const struct kernel_param *kp); |
| 432 | extern int param_get_invbool(char *buffer, const struct kernel_param *kp); | 405 | extern int param_get_invbool(char *buffer, const struct kernel_param *kp); |
| 433 | #define param_check_invbool(name, p) __param_check(name, p, bool) | 406 | #define param_check_invbool(name, p) __param_check(name, p, bool) |
| 434 | 407 | ||
| 435 | /* An int, which can only be set like a bool (though it shows as an int). */ | 408 | /* An int, which can only be set like a bool (though it shows as an int). */ |
| 436 | extern struct kernel_param_ops param_ops_bint; | 409 | extern const struct kernel_param_ops param_ops_bint; |
| 437 | extern int param_set_bint(const char *val, const struct kernel_param *kp); | 410 | extern int param_set_bint(const char *val, const struct kernel_param *kp); |
| 438 | #define param_get_bint param_get_int | 411 | #define param_get_bint param_get_int |
| 439 | #define param_check_bint param_check_int | 412 | #define param_check_bint param_check_int |
| @@ -477,9 +450,9 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp); | |||
| 477 | perm, -1, 0); \ | 450 | perm, -1, 0); \ |
| 478 | __MODULE_PARM_TYPE(name, "array of " #type) | 451 | __MODULE_PARM_TYPE(name, "array of " #type) |
| 479 | 452 | ||
| 480 | extern struct kernel_param_ops param_array_ops; | 453 | extern const struct kernel_param_ops param_array_ops; |
| 481 | 454 | ||
| 482 | extern struct kernel_param_ops param_ops_string; | 455 | extern const struct kernel_param_ops param_ops_string; |
| 483 | extern int param_set_copystring(const char *val, const struct kernel_param *); | 456 | extern int param_set_copystring(const char *val, const struct kernel_param *); |
| 484 | extern int param_get_string(char *buffer, const struct kernel_param *kp); | 457 | extern int param_get_string(char *buffer, const struct kernel_param *kp); |
| 485 | 458 | ||
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index 299d7d31fe53..9b57a9b1b081 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h | |||
| @@ -296,183 +296,19 @@ struct cfi_private { | |||
| 296 | struct flchip chips[0]; /* per-chip data structure for each chip */ | 296 | struct flchip chips[0]; /* per-chip data structure for each chip */ |
| 297 | }; | 297 | }; |
| 298 | 298 | ||
| 299 | /* | 299 | uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, |
| 300 | * Returns the command address according to the given geometry. | 300 | struct map_info *map, struct cfi_private *cfi); |
| 301 | */ | ||
| 302 | static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, | ||
| 303 | struct map_info *map, struct cfi_private *cfi) | ||
| 304 | { | ||
| 305 | unsigned bankwidth = map_bankwidth(map); | ||
| 306 | unsigned interleave = cfi_interleave(cfi); | ||
| 307 | unsigned type = cfi->device_type; | ||
| 308 | uint32_t addr; | ||
| 309 | |||
| 310 | addr = (cmd_ofs * type) * interleave; | ||
| 311 | |||
| 312 | /* Modify the unlock address if we are in compatibility mode. | ||
| 313 | * For 16bit devices on 8 bit busses | ||
| 314 | * and 32bit devices on 16 bit busses | ||
| 315 | * set the low bit of the alternating bit sequence of the address. | ||
| 316 | */ | ||
| 317 | if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa)) | ||
| 318 | addr |= (type >> 1)*interleave; | ||
| 319 | |||
| 320 | return addr; | ||
| 321 | } | ||
| 322 | |||
| 323 | /* | ||
| 324 | * Transforms the CFI command for the given geometry (bus width & interleave). | ||
| 325 | * It looks too long to be inline, but in the common case it should almost all | ||
| 326 | * get optimised away. | ||
| 327 | */ | ||
| 328 | static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi) | ||
| 329 | { | ||
| 330 | map_word val = { {0} }; | ||
| 331 | int wordwidth, words_per_bus, chip_mode, chips_per_word; | ||
| 332 | unsigned long onecmd; | ||
| 333 | int i; | ||
| 334 | |||
| 335 | /* We do it this way to give the compiler a fighting chance | ||
| 336 | of optimising away all the crap for 'bankwidth' larger than | ||
| 337 | an unsigned long, in the common case where that support is | ||
| 338 | disabled */ | ||
| 339 | if (map_bankwidth_is_large(map)) { | ||
| 340 | wordwidth = sizeof(unsigned long); | ||
| 341 | words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1 | ||
| 342 | } else { | ||
| 343 | wordwidth = map_bankwidth(map); | ||
| 344 | words_per_bus = 1; | ||
| 345 | } | ||
| 346 | |||
| 347 | chip_mode = map_bankwidth(map) / cfi_interleave(cfi); | ||
| 348 | chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map); | ||
| 349 | |||
| 350 | /* First, determine what the bit-pattern should be for a single | ||
| 351 | device, according to chip mode and endianness... */ | ||
| 352 | switch (chip_mode) { | ||
| 353 | default: BUG(); | ||
| 354 | case 1: | ||
| 355 | onecmd = cmd; | ||
| 356 | break; | ||
| 357 | case 2: | ||
| 358 | onecmd = cpu_to_cfi16(map, cmd); | ||
| 359 | break; | ||
| 360 | case 4: | ||
| 361 | onecmd = cpu_to_cfi32(map, cmd); | ||
| 362 | break; | ||
| 363 | } | ||
| 364 | |||
| 365 | /* Now replicate it across the size of an unsigned long, or | ||
| 366 | just to the bus width as appropriate */ | ||
| 367 | switch (chips_per_word) { | ||
| 368 | default: BUG(); | ||
| 369 | #if BITS_PER_LONG >= 64 | ||
| 370 | case 8: | ||
| 371 | onecmd |= (onecmd << (chip_mode * 32)); | ||
| 372 | #endif | ||
| 373 | case 4: | ||
| 374 | onecmd |= (onecmd << (chip_mode * 16)); | ||
| 375 | case 2: | ||
| 376 | onecmd |= (onecmd << (chip_mode * 8)); | ||
| 377 | case 1: | ||
| 378 | ; | ||
| 379 | } | ||
| 380 | 301 | ||
| 381 | /* And finally, for the multi-word case, replicate it | 302 | map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi); |
| 382 | in all words in the structure */ | ||
| 383 | for (i=0; i < words_per_bus; i++) { | ||
| 384 | val.x[i] = onecmd; | ||
| 385 | } | ||
| 386 | |||
| 387 | return val; | ||
| 388 | } | ||
| 389 | #define CMD(x) cfi_build_cmd((x), map, cfi) | 303 | #define CMD(x) cfi_build_cmd((x), map, cfi) |
| 390 | 304 | ||
| 391 | 305 | unsigned long cfi_merge_status(map_word val, struct map_info *map, | |
| 392 | static inline unsigned long cfi_merge_status(map_word val, struct map_info *map, | 306 | struct cfi_private *cfi); |
| 393 | struct cfi_private *cfi) | ||
| 394 | { | ||
| 395 | int wordwidth, words_per_bus, chip_mode, chips_per_word; | ||
| 396 | unsigned long onestat, res = 0; | ||
| 397 | int i; | ||
| 398 | |||
| 399 | /* We do it this way to give the compiler a fighting chance | ||
| 400 | of optimising away all the crap for 'bankwidth' larger than | ||
| 401 | an unsigned long, in the common case where that support is | ||
| 402 | disabled */ | ||
| 403 | if (map_bankwidth_is_large(map)) { | ||
| 404 | wordwidth = sizeof(unsigned long); | ||
| 405 | words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1 | ||
| 406 | } else { | ||
| 407 | wordwidth = map_bankwidth(map); | ||
| 408 | words_per_bus = 1; | ||
| 409 | } | ||
| 410 | |||
| 411 | chip_mode = map_bankwidth(map) / cfi_interleave(cfi); | ||
| 412 | chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map); | ||
| 413 | |||
| 414 | onestat = val.x[0]; | ||
| 415 | /* Or all status words together */ | ||
| 416 | for (i=1; i < words_per_bus; i++) { | ||
| 417 | onestat |= val.x[i]; | ||
| 418 | } | ||
| 419 | |||
| 420 | res = onestat; | ||
| 421 | switch(chips_per_word) { | ||
| 422 | default: BUG(); | ||
| 423 | #if BITS_PER_LONG >= 64 | ||
| 424 | case 8: | ||
| 425 | res |= (onestat >> (chip_mode * 32)); | ||
| 426 | #endif | ||
| 427 | case 4: | ||
| 428 | res |= (onestat >> (chip_mode * 16)); | ||
| 429 | case 2: | ||
| 430 | res |= (onestat >> (chip_mode * 8)); | ||
| 431 | case 1: | ||
| 432 | ; | ||
| 433 | } | ||
| 434 | |||
| 435 | /* Last, determine what the bit-pattern should be for a single | ||
| 436 | device, according to chip mode and endianness... */ | ||
| 437 | switch (chip_mode) { | ||
| 438 | case 1: | ||
| 439 | break; | ||
| 440 | case 2: | ||
| 441 | res = cfi16_to_cpu(map, res); | ||
| 442 | break; | ||
| 443 | case 4: | ||
| 444 | res = cfi32_to_cpu(map, res); | ||
| 445 | break; | ||
| 446 | default: BUG(); | ||
| 447 | } | ||
| 448 | return res; | ||
| 449 | } | ||
| 450 | |||
| 451 | #define MERGESTATUS(x) cfi_merge_status((x), map, cfi) | 307 | #define MERGESTATUS(x) cfi_merge_status((x), map, cfi) |
| 452 | 308 | ||
| 453 | 309 | uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base, | |
| 454 | /* | ||
| 455 | * Sends a CFI command to a bank of flash for the given geometry. | ||
| 456 | * | ||
| 457 | * Returns the offset in flash where the command was written. | ||
| 458 | * If prev_val is non-null, it will be set to the value at the command address, | ||
| 459 | * before the command was written. | ||
| 460 | */ | ||
| 461 | static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base, | ||
| 462 | struct map_info *map, struct cfi_private *cfi, | 310 | struct map_info *map, struct cfi_private *cfi, |
| 463 | int type, map_word *prev_val) | 311 | int type, map_word *prev_val); |
| 464 | { | ||
| 465 | map_word val; | ||
| 466 | uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi); | ||
| 467 | val = cfi_build_cmd(cmd, map, cfi); | ||
| 468 | |||
| 469 | if (prev_val) | ||
| 470 | *prev_val = map_read(map, addr); | ||
| 471 | |||
| 472 | map_write(map, val, addr); | ||
| 473 | |||
| 474 | return addr - base; | ||
| 475 | } | ||
| 476 | 312 | ||
| 477 | static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) | 313 | static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) |
| 478 | { | 314 | { |
| @@ -506,15 +342,7 @@ static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr) | |||
| 506 | } | 342 | } |
| 507 | } | 343 | } |
| 508 | 344 | ||
| 509 | static inline void cfi_udelay(int us) | 345 | void cfi_udelay(int us); |
| 510 | { | ||
| 511 | if (us >= 1000) { | ||
| 512 | msleep((us+999)/1000); | ||
| 513 | } else { | ||
| 514 | udelay(us); | ||
| 515 | cond_resched(); | ||
| 516 | } | ||
| 517 | } | ||
| 518 | 346 | ||
| 519 | int __xipram cfi_qry_present(struct map_info *map, __u32 base, | 347 | int __xipram cfi_qry_present(struct map_info *map, __u32 base, |
| 520 | struct cfi_private *cfi); | 348 | struct cfi_private *cfi); |
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 3d4ea7eb2b68..272f42952f34 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
| @@ -26,6 +26,8 @@ | |||
| 26 | 26 | ||
| 27 | struct mtd_info; | 27 | struct mtd_info; |
| 28 | struct nand_flash_dev; | 28 | struct nand_flash_dev; |
| 29 | struct device_node; | ||
| 30 | |||
| 29 | /* Scan and identify a NAND device */ | 31 | /* Scan and identify a NAND device */ |
| 30 | extern int nand_scan(struct mtd_info *mtd, int max_chips); | 32 | extern int nand_scan(struct mtd_info *mtd, int max_chips); |
| 31 | /* | 33 | /* |
| @@ -176,17 +178,17 @@ typedef enum { | |||
| 176 | /* Chip may not exist, so silence any errors in scan */ | 178 | /* Chip may not exist, so silence any errors in scan */ |
| 177 | #define NAND_SCAN_SILENT_NODEV 0x00040000 | 179 | #define NAND_SCAN_SILENT_NODEV 0x00040000 |
| 178 | /* | 180 | /* |
| 179 | * This option could be defined by controller drivers to protect against | ||
| 180 | * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers | ||
| 181 | */ | ||
| 182 | #define NAND_USE_BOUNCE_BUFFER 0x00080000 | ||
| 183 | /* | ||
| 184 | * Autodetect nand buswidth with readid/onfi. | 181 | * Autodetect nand buswidth with readid/onfi. |
| 185 | * This suppose the driver will configure the hardware in 8 bits mode | 182 | * This suppose the driver will configure the hardware in 8 bits mode |
| 186 | * when calling nand_scan_ident, and update its configuration | 183 | * when calling nand_scan_ident, and update its configuration |
| 187 | * before calling nand_scan_tail. | 184 | * before calling nand_scan_tail. |
| 188 | */ | 185 | */ |
| 189 | #define NAND_BUSWIDTH_AUTO 0x00080000 | 186 | #define NAND_BUSWIDTH_AUTO 0x00080000 |
| 187 | /* | ||
| 188 | * This option could be defined by controller drivers to protect against | ||
| 189 | * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers | ||
| 190 | */ | ||
| 191 | #define NAND_USE_BOUNCE_BUFFER 0x00100000 | ||
| 190 | 192 | ||
| 191 | /* Options set by nand scan */ | 193 | /* Options set by nand scan */ |
| 192 | /* Nand scan has allocated controller struct */ | 194 | /* Nand scan has allocated controller struct */ |
| @@ -542,6 +544,7 @@ struct nand_buffers { | |||
| 542 | * flash device | 544 | * flash device |
| 543 | * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the | 545 | * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the |
| 544 | * flash device. | 546 | * flash device. |
| 547 | * @dn: [BOARDSPECIFIC] device node describing this instance | ||
| 545 | * @read_byte: [REPLACEABLE] read one byte from the chip | 548 | * @read_byte: [REPLACEABLE] read one byte from the chip |
| 546 | * @read_word: [REPLACEABLE] read one word from the chip | 549 | * @read_word: [REPLACEABLE] read one word from the chip |
| 547 | * @write_byte: [REPLACEABLE] write a single byte to the chip on the | 550 | * @write_byte: [REPLACEABLE] write a single byte to the chip on the |
| @@ -644,6 +647,8 @@ struct nand_chip { | |||
| 644 | void __iomem *IO_ADDR_R; | 647 | void __iomem *IO_ADDR_R; |
| 645 | void __iomem *IO_ADDR_W; | 648 | void __iomem *IO_ADDR_W; |
| 646 | 649 | ||
| 650 | struct device_node *dn; | ||
| 651 | |||
| 647 | uint8_t (*read_byte)(struct mtd_info *mtd); | 652 | uint8_t (*read_byte)(struct mtd_info *mtd); |
| 648 | u16 (*read_word)(struct mtd_info *mtd); | 653 | u16 (*read_word)(struct mtd_info *mtd); |
| 649 | void (*write_byte)(struct mtd_info *mtd, uint8_t byte); | 654 | void (*write_byte)(struct mtd_info *mtd, uint8_t byte); |
| @@ -833,7 +838,6 @@ struct nand_manufacturers { | |||
| 833 | extern struct nand_flash_dev nand_flash_ids[]; | 838 | extern struct nand_flash_dev nand_flash_ids[]; |
| 834 | extern struct nand_manufacturers nand_manuf_ids[]; | 839 | extern struct nand_manufacturers nand_manuf_ids[]; |
| 835 | 840 | ||
| 836 | extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd); | ||
| 837 | extern int nand_default_bbt(struct mtd_info *mtd); | 841 | extern int nand_default_bbt(struct mtd_info *mtd); |
| 838 | extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); | 842 | extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); |
| 839 | extern int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs); | 843 | extern int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs); |
diff --git a/include/linux/nd.h b/include/linux/nd.h new file mode 100644 index 000000000000..507e47c86737 --- /dev/null +++ b/include/linux/nd.h | |||
| @@ -0,0 +1,151 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of version 2 of the GNU General Public License as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but | ||
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 11 | * General Public License for more details. | ||
| 12 | */ | ||
| 13 | #ifndef __LINUX_ND_H__ | ||
| 14 | #define __LINUX_ND_H__ | ||
| 15 | #include <linux/fs.h> | ||
| 16 | #include <linux/ndctl.h> | ||
| 17 | #include <linux/device.h> | ||
| 18 | |||
| 19 | struct nd_device_driver { | ||
| 20 | struct device_driver drv; | ||
| 21 | unsigned long type; | ||
| 22 | int (*probe)(struct device *dev); | ||
| 23 | int (*remove)(struct device *dev); | ||
| 24 | }; | ||
| 25 | |||
| 26 | static inline struct nd_device_driver *to_nd_device_driver( | ||
| 27 | struct device_driver *drv) | ||
| 28 | { | ||
| 29 | return container_of(drv, struct nd_device_driver, drv); | ||
| 30 | }; | ||
| 31 | |||
| 32 | /** | ||
| 33 | * struct nd_namespace_common - core infrastructure of a namespace | ||
| 34 | * @force_raw: ignore other personalities for the namespace (e.g. btt) | ||
| 35 | * @dev: device model node | ||
| 36 | * @claim: when set a another personality has taken ownership of the namespace | ||
| 37 | * @rw_bytes: access the raw namespace capacity with byte-aligned transfers | ||
| 38 | */ | ||
| 39 | struct nd_namespace_common { | ||
| 40 | int force_raw; | ||
| 41 | struct device dev; | ||
| 42 | struct device *claim; | ||
| 43 | int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset, | ||
| 44 | void *buf, size_t size, int rw); | ||
| 45 | }; | ||
| 46 | |||
| 47 | static inline struct nd_namespace_common *to_ndns(struct device *dev) | ||
| 48 | { | ||
| 49 | return container_of(dev, struct nd_namespace_common, dev); | ||
| 50 | } | ||
| 51 | |||
| 52 | /** | ||
| 53 | * struct nd_namespace_io - infrastructure for loading an nd_pmem instance | ||
| 54 | * @dev: namespace device created by the nd region driver | ||
| 55 | * @res: struct resource conversion of a NFIT SPA table | ||
| 56 | */ | ||
| 57 | struct nd_namespace_io { | ||
| 58 | struct nd_namespace_common common; | ||
| 59 | struct resource res; | ||
| 60 | }; | ||
| 61 | |||
| 62 | /** | ||
| 63 | * struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory | ||
| 64 | * @nsio: device and system physical address range to drive | ||
| 65 | * @alt_name: namespace name supplied in the dimm label | ||
| 66 | * @uuid: namespace name supplied in the dimm label | ||
| 67 | */ | ||
| 68 | struct nd_namespace_pmem { | ||
| 69 | struct nd_namespace_io nsio; | ||
| 70 | char *alt_name; | ||
| 71 | u8 *uuid; | ||
| 72 | }; | ||
| 73 | |||
| 74 | /** | ||
| 75 | * struct nd_namespace_blk - namespace for dimm-bounded persistent memory | ||
| 76 | * @alt_name: namespace name supplied in the dimm label | ||
| 77 | * @uuid: namespace name supplied in the dimm label | ||
| 78 | * @id: ida allocated id | ||
| 79 | * @lbasize: blk namespaces have a native sector size when btt not present | ||
| 80 | * @num_resources: number of dpa extents to claim | ||
| 81 | * @res: discontiguous dpa extents for given dimm | ||
| 82 | */ | ||
| 83 | struct nd_namespace_blk { | ||
| 84 | struct nd_namespace_common common; | ||
| 85 | char *alt_name; | ||
| 86 | u8 *uuid; | ||
| 87 | int id; | ||
| 88 | unsigned long lbasize; | ||
| 89 | int num_resources; | ||
| 90 | struct resource **res; | ||
| 91 | }; | ||
| 92 | |||
| 93 | static inline struct nd_namespace_io *to_nd_namespace_io(struct device *dev) | ||
| 94 | { | ||
| 95 | return container_of(dev, struct nd_namespace_io, common.dev); | ||
| 96 | } | ||
| 97 | |||
| 98 | static inline struct nd_namespace_pmem *to_nd_namespace_pmem(struct device *dev) | ||
| 99 | { | ||
| 100 | struct nd_namespace_io *nsio = to_nd_namespace_io(dev); | ||
| 101 | |||
| 102 | return container_of(nsio, struct nd_namespace_pmem, nsio); | ||
| 103 | } | ||
| 104 | |||
| 105 | static inline struct nd_namespace_blk *to_nd_namespace_blk(struct device *dev) | ||
| 106 | { | ||
| 107 | return container_of(dev, struct nd_namespace_blk, common.dev); | ||
| 108 | } | ||
| 109 | |||
| 110 | /** | ||
| 111 | * nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace | ||
| 112 | * @ndns: device to read | ||
| 113 | * @offset: namespace-relative starting offset | ||
| 114 | * @buf: buffer to fill | ||
| 115 | * @size: transfer length | ||
| 116 | * | ||
| 117 | * @buf is up-to-date upon return from this routine. | ||
| 118 | */ | ||
| 119 | static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns, | ||
| 120 | resource_size_t offset, void *buf, size_t size) | ||
| 121 | { | ||
| 122 | return ndns->rw_bytes(ndns, offset, buf, size, READ); | ||
| 123 | } | ||
| 124 | |||
| 125 | /** | ||
| 126 | * nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace | ||
| 127 | * @ndns: device to read | ||
| 128 | * @offset: namespace-relative starting offset | ||
| 129 | * @buf: buffer to drain | ||
| 130 | * @size: transfer length | ||
| 131 | * | ||
| 132 | * NVDIMM Namepaces disks do not implement sectors internally. Depending on | ||
| 133 | * the @ndns, the contents of @buf may be in cpu cache, platform buffers, | ||
| 134 | * or on backing memory media upon return from this routine. Flushing | ||
| 135 | * to media is handled internal to the @ndns driver, if at all. | ||
| 136 | */ | ||
| 137 | static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns, | ||
| 138 | resource_size_t offset, void *buf, size_t size) | ||
| 139 | { | ||
| 140 | return ndns->rw_bytes(ndns, offset, buf, size, WRITE); | ||
| 141 | } | ||
| 142 | |||
| 143 | #define MODULE_ALIAS_ND_DEVICE(type) \ | ||
| 144 | MODULE_ALIAS("nd:t" __stringify(type) "*") | ||
| 145 | #define ND_DEVICE_MODALIAS_FMT "nd:t%d" | ||
| 146 | |||
| 147 | int __must_check __nd_driver_register(struct nd_device_driver *nd_drv, | ||
| 148 | struct module *module, const char *mod_name); | ||
| 149 | #define nd_driver_register(driver) \ | ||
| 150 | __nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) | ||
| 151 | #endif /* __LINUX_ND_H__ */ | ||
diff --git a/include/linux/net.h b/include/linux/net.h index 738ea48be889..04aa06852771 100644 --- a/include/linux/net.h +++ b/include/linux/net.h | |||
| @@ -38,7 +38,6 @@ struct net; | |||
| 38 | #define SOCK_NOSPACE 2 | 38 | #define SOCK_NOSPACE 2 |
| 39 | #define SOCK_PASSCRED 3 | 39 | #define SOCK_PASSCRED 3 |
| 40 | #define SOCK_PASSSEC 4 | 40 | #define SOCK_PASSSEC 4 |
| 41 | #define SOCK_EXTERNALLY_ALLOCATED 5 | ||
| 42 | 41 | ||
| 43 | #ifndef ARCH_HAS_SOCKET_TYPES | 42 | #ifndef ARCH_HAS_SOCKET_TYPES |
| 44 | /** | 43 | /** |
| @@ -208,7 +207,7 @@ void sock_unregister(int family); | |||
| 208 | int __sock_create(struct net *net, int family, int type, int proto, | 207 | int __sock_create(struct net *net, int family, int type, int proto, |
| 209 | struct socket **res, int kern); | 208 | struct socket **res, int kern); |
| 210 | int sock_create(int family, int type, int proto, struct socket **res); | 209 | int sock_create(int family, int type, int proto, struct socket **res); |
| 211 | int sock_create_kern(int family, int type, int proto, struct socket **res); | 210 | int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res); |
| 212 | int sock_create_lite(int family, int type, int proto, struct socket **res); | 211 | int sock_create_lite(int family, int type, int proto, struct socket **res); |
| 213 | void sock_release(struct socket *sock); | 212 | void sock_release(struct socket *sock); |
| 214 | int sock_sendmsg(struct socket *sock, struct msghdr *msg); | 213 | int sock_sendmsg(struct socket *sock, struct msghdr *msg); |
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 7d59dc6ab789..9672781c593d 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h | |||
| @@ -66,7 +66,6 @@ enum { | |||
| 66 | NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ | 66 | NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ |
| 67 | NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ | 67 | NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ |
| 68 | NETIF_F_BUSY_POLL_BIT, /* Busy poll */ | 68 | NETIF_F_BUSY_POLL_BIT, /* Busy poll */ |
| 69 | NETIF_F_HW_SWITCH_OFFLOAD_BIT, /* HW switch offload */ | ||
| 70 | 69 | ||
| 71 | /* | 70 | /* |
| 72 | * Add your fresh new feature above and remember to update | 71 | * Add your fresh new feature above and remember to update |
| @@ -125,7 +124,6 @@ enum { | |||
| 125 | #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) | 124 | #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) |
| 126 | #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) | 125 | #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) |
| 127 | #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) | 126 | #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) |
| 128 | #define NETIF_F_HW_SWITCH_OFFLOAD __NETIF_F(HW_SWITCH_OFFLOAD) | ||
| 129 | 127 | ||
| 130 | /* Features valid for ethtool to change */ | 128 | /* Features valid for ethtool to change */ |
| 131 | /* = all defined minus driver/device-class-related */ | 129 | /* = all defined minus driver/device-class-related */ |
| @@ -161,8 +159,7 @@ enum { | |||
| 161 | */ | 159 | */ |
| 162 | #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ | 160 | #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ |
| 163 | NETIF_F_SG | NETIF_F_HIGHDMA | \ | 161 | NETIF_F_SG | NETIF_F_HIGHDMA | \ |
| 164 | NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED | \ | 162 | NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED) |
| 165 | NETIF_F_HW_SWITCH_OFFLOAD) | ||
| 166 | 163 | ||
| 167 | /* | 164 | /* |
| 168 | * If one device doesn't support one of these features, then disable it | 165 | * If one device doesn't support one of these features, then disable it |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 05b9a694e213..e20979dfd6a9 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -1100,6 +1100,10 @@ struct net_device_ops { | |||
| 1100 | struct ifla_vf_info *ivf); | 1100 | struct ifla_vf_info *ivf); |
| 1101 | int (*ndo_set_vf_link_state)(struct net_device *dev, | 1101 | int (*ndo_set_vf_link_state)(struct net_device *dev, |
| 1102 | int vf, int link_state); | 1102 | int vf, int link_state); |
| 1103 | int (*ndo_get_vf_stats)(struct net_device *dev, | ||
| 1104 | int vf, | ||
| 1105 | struct ifla_vf_stats | ||
| 1106 | *vf_stats); | ||
| 1103 | int (*ndo_set_vf_port)(struct net_device *dev, | 1107 | int (*ndo_set_vf_port)(struct net_device *dev, |
| 1104 | int vf, | 1108 | int vf, |
| 1105 | struct nlattr *port[]); | 1109 | struct nlattr *port[]); |
| @@ -1564,7 +1568,7 @@ struct net_device { | |||
| 1564 | const struct net_device_ops *netdev_ops; | 1568 | const struct net_device_ops *netdev_ops; |
| 1565 | const struct ethtool_ops *ethtool_ops; | 1569 | const struct ethtool_ops *ethtool_ops; |
| 1566 | #ifdef CONFIG_NET_SWITCHDEV | 1570 | #ifdef CONFIG_NET_SWITCHDEV |
| 1567 | const struct swdev_ops *swdev_ops; | 1571 | const struct switchdev_ops *switchdev_ops; |
| 1568 | #endif | 1572 | #endif |
| 1569 | 1573 | ||
| 1570 | const struct header_ops *header_ops; | 1574 | const struct header_ops *header_ops; |
| @@ -1652,7 +1656,14 @@ struct net_device { | |||
| 1652 | rx_handler_func_t __rcu *rx_handler; | 1656 | rx_handler_func_t __rcu *rx_handler; |
| 1653 | void __rcu *rx_handler_data; | 1657 | void __rcu *rx_handler_data; |
| 1654 | 1658 | ||
| 1659 | #ifdef CONFIG_NET_CLS_ACT | ||
| 1660 | struct tcf_proto __rcu *ingress_cl_list; | ||
| 1661 | #endif | ||
| 1655 | struct netdev_queue __rcu *ingress_queue; | 1662 | struct netdev_queue __rcu *ingress_queue; |
| 1663 | #ifdef CONFIG_NETFILTER_INGRESS | ||
| 1664 | struct list_head nf_hooks_ingress; | ||
| 1665 | #endif | ||
| 1666 | |||
| 1656 | unsigned char broadcast[MAX_ADDR_LEN]; | 1667 | unsigned char broadcast[MAX_ADDR_LEN]; |
| 1657 | #ifdef CONFIG_RFS_ACCEL | 1668 | #ifdef CONFIG_RFS_ACCEL |
| 1658 | struct cpu_rmap *rx_cpu_rmap; | 1669 | struct cpu_rmap *rx_cpu_rmap; |
| @@ -1990,6 +2001,7 @@ struct offload_callbacks { | |||
| 1990 | 2001 | ||
| 1991 | struct packet_offload { | 2002 | struct packet_offload { |
| 1992 | __be16 type; /* This is really htons(ether_type). */ | 2003 | __be16 type; /* This is really htons(ether_type). */ |
| 2004 | u16 priority; | ||
| 1993 | struct offload_callbacks callbacks; | 2005 | struct offload_callbacks callbacks; |
| 1994 | struct list_head list; | 2006 | struct list_head list; |
| 1995 | }; | 2007 | }; |
| @@ -2552,10 +2564,6 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev) | |||
| 2552 | 2564 | ||
| 2553 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) | 2565 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) |
| 2554 | { | 2566 | { |
| 2555 | if (WARN_ON(!dev_queue)) { | ||
| 2556 | pr_info("netif_stop_queue() cannot be called before register_netdev()\n"); | ||
| 2557 | return; | ||
| 2558 | } | ||
| 2559 | set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); | 2567 | set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
| 2560 | } | 2568 | } |
| 2561 | 2569 | ||
| @@ -2571,15 +2579,7 @@ static inline void netif_stop_queue(struct net_device *dev) | |||
| 2571 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); | 2579 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); |
| 2572 | } | 2580 | } |
| 2573 | 2581 | ||
| 2574 | static inline void netif_tx_stop_all_queues(struct net_device *dev) | 2582 | void netif_tx_stop_all_queues(struct net_device *dev); |
| 2575 | { | ||
| 2576 | unsigned int i; | ||
| 2577 | |||
| 2578 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
| 2579 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
| 2580 | netif_tx_stop_queue(txq); | ||
| 2581 | } | ||
| 2582 | } | ||
| 2583 | 2583 | ||
| 2584 | static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) | 2584 | static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) |
| 2585 | { | 2585 | { |
| @@ -2840,6 +2840,9 @@ static inline int netif_set_xps_queue(struct net_device *dev, | |||
| 2840 | } | 2840 | } |
| 2841 | #endif | 2841 | #endif |
| 2842 | 2842 | ||
| 2843 | u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, | ||
| 2844 | unsigned int num_tx_queues); | ||
| 2845 | |||
| 2843 | /* | 2846 | /* |
| 2844 | * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used | 2847 | * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used |
| 2845 | * as a distribution range limit for the returned value. | 2848 | * as a distribution range limit for the returned value. |
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 63560d0a8dfe..00050dfd9f23 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h | |||
| @@ -10,7 +10,8 @@ | |||
| 10 | #include <linux/wait.h> | 10 | #include <linux/wait.h> |
| 11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
| 12 | #include <linux/static_key.h> | 12 | #include <linux/static_key.h> |
| 13 | #include <uapi/linux/netfilter.h> | 13 | #include <linux/netfilter_defs.h> |
| 14 | |||
| 14 | #ifdef CONFIG_NETFILTER | 15 | #ifdef CONFIG_NETFILTER |
| 15 | static inline int NF_DROP_GETERR(int verdict) | 16 | static inline int NF_DROP_GETERR(int verdict) |
| 16 | { | 17 | { |
| @@ -38,9 +39,6 @@ static inline void nf_inet_addr_mask(const union nf_inet_addr *a1, | |||
| 38 | 39 | ||
| 39 | int netfilter_init(void); | 40 | int netfilter_init(void); |
| 40 | 41 | ||
| 41 | /* Largest hook number + 1 */ | ||
| 42 | #define NF_MAX_HOOKS 8 | ||
| 43 | |||
| 44 | struct sk_buff; | 42 | struct sk_buff; |
| 45 | 43 | ||
| 46 | struct nf_hook_ops; | 44 | struct nf_hook_ops; |
| @@ -54,10 +52,12 @@ struct nf_hook_state { | |||
| 54 | struct net_device *in; | 52 | struct net_device *in; |
| 55 | struct net_device *out; | 53 | struct net_device *out; |
| 56 | struct sock *sk; | 54 | struct sock *sk; |
| 55 | struct list_head *hook_list; | ||
| 57 | int (*okfn)(struct sock *, struct sk_buff *); | 56 | int (*okfn)(struct sock *, struct sk_buff *); |
| 58 | }; | 57 | }; |
| 59 | 58 | ||
| 60 | static inline void nf_hook_state_init(struct nf_hook_state *p, | 59 | static inline void nf_hook_state_init(struct nf_hook_state *p, |
| 60 | struct list_head *hook_list, | ||
| 61 | unsigned int hook, | 61 | unsigned int hook, |
| 62 | int thresh, u_int8_t pf, | 62 | int thresh, u_int8_t pf, |
| 63 | struct net_device *indev, | 63 | struct net_device *indev, |
| @@ -71,6 +71,7 @@ static inline void nf_hook_state_init(struct nf_hook_state *p, | |||
| 71 | p->in = indev; | 71 | p->in = indev; |
| 72 | p->out = outdev; | 72 | p->out = outdev; |
| 73 | p->sk = sk; | 73 | p->sk = sk; |
| 74 | p->hook_list = hook_list; | ||
| 74 | p->okfn = okfn; | 75 | p->okfn = okfn; |
| 75 | } | 76 | } |
| 76 | 77 | ||
| @@ -79,16 +80,17 @@ typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops, | |||
| 79 | const struct nf_hook_state *state); | 80 | const struct nf_hook_state *state); |
| 80 | 81 | ||
| 81 | struct nf_hook_ops { | 82 | struct nf_hook_ops { |
| 82 | struct list_head list; | 83 | struct list_head list; |
| 83 | 84 | ||
| 84 | /* User fills in from here down. */ | 85 | /* User fills in from here down. */ |
| 85 | nf_hookfn *hook; | 86 | nf_hookfn *hook; |
| 86 | struct module *owner; | 87 | struct net_device *dev; |
| 87 | void *priv; | 88 | struct module *owner; |
| 88 | u_int8_t pf; | 89 | void *priv; |
| 89 | unsigned int hooknum; | 90 | u_int8_t pf; |
| 91 | unsigned int hooknum; | ||
| 90 | /* Hooks are ordered in ascending priority. */ | 92 | /* Hooks are ordered in ascending priority. */ |
| 91 | int priority; | 93 | int priority; |
| 92 | }; | 94 | }; |
| 93 | 95 | ||
| 94 | struct nf_sockopt_ops { | 96 | struct nf_sockopt_ops { |
| @@ -131,26 +133,33 @@ extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; | |||
| 131 | #ifdef HAVE_JUMP_LABEL | 133 | #ifdef HAVE_JUMP_LABEL |
| 132 | extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; | 134 | extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; |
| 133 | 135 | ||
| 134 | static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) | 136 | static inline bool nf_hook_list_active(struct list_head *nf_hook_list, |
| 137 | u_int8_t pf, unsigned int hook) | ||
| 135 | { | 138 | { |
| 136 | if (__builtin_constant_p(pf) && | 139 | if (__builtin_constant_p(pf) && |
| 137 | __builtin_constant_p(hook)) | 140 | __builtin_constant_p(hook)) |
| 138 | return static_key_false(&nf_hooks_needed[pf][hook]); | 141 | return static_key_false(&nf_hooks_needed[pf][hook]); |
| 139 | 142 | ||
| 140 | return !list_empty(&nf_hooks[pf][hook]); | 143 | return !list_empty(nf_hook_list); |
| 141 | } | 144 | } |
| 142 | #else | 145 | #else |
| 143 | static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) | 146 | static inline bool nf_hook_list_active(struct list_head *nf_hook_list, |
| 147 | u_int8_t pf, unsigned int hook) | ||
| 144 | { | 148 | { |
| 145 | return !list_empty(&nf_hooks[pf][hook]); | 149 | return !list_empty(nf_hook_list); |
| 146 | } | 150 | } |
| 147 | #endif | 151 | #endif |
| 148 | 152 | ||
| 153 | static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) | ||
| 154 | { | ||
| 155 | return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook); | ||
| 156 | } | ||
| 157 | |||
| 149 | int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); | 158 | int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); |
| 150 | 159 | ||
| 151 | /** | 160 | /** |
| 152 | * nf_hook_thresh - call a netfilter hook | 161 | * nf_hook_thresh - call a netfilter hook |
| 153 | * | 162 | * |
| 154 | * Returns 1 if the hook has allowed the packet to pass. The function | 163 | * Returns 1 if the hook has allowed the packet to pass. The function |
| 155 | * okfn must be invoked by the caller in this case. Any other return | 164 | * okfn must be invoked by the caller in this case. Any other return |
| 156 | * value indicates the packet has been consumed by the hook. | 165 | * value indicates the packet has been consumed by the hook. |
| @@ -166,8 +175,8 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, | |||
| 166 | if (nf_hooks_active(pf, hook)) { | 175 | if (nf_hooks_active(pf, hook)) { |
| 167 | struct nf_hook_state state; | 176 | struct nf_hook_state state; |
| 168 | 177 | ||
| 169 | nf_hook_state_init(&state, hook, thresh, pf, | 178 | nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh, |
| 170 | indev, outdev, sk, okfn); | 179 | pf, indev, outdev, sk, okfn); |
| 171 | return nf_hook_slow(skb, &state); | 180 | return nf_hook_slow(skb, &state); |
| 172 | } | 181 | } |
| 173 | return 1; | 182 | return 1; |
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 34b172301558..48bb01edcf30 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h | |||
| @@ -108,8 +108,13 @@ struct ip_set_counter { | |||
| 108 | atomic64_t packets; | 108 | atomic64_t packets; |
| 109 | }; | 109 | }; |
| 110 | 110 | ||
| 111 | struct ip_set_comment_rcu { | ||
| 112 | struct rcu_head rcu; | ||
| 113 | char str[0]; | ||
| 114 | }; | ||
| 115 | |||
| 111 | struct ip_set_comment { | 116 | struct ip_set_comment { |
| 112 | char *str; | 117 | struct ip_set_comment_rcu __rcu *c; |
| 113 | }; | 118 | }; |
| 114 | 119 | ||
| 115 | struct ip_set_skbinfo { | 120 | struct ip_set_skbinfo { |
| @@ -122,13 +127,13 @@ struct ip_set_skbinfo { | |||
| 122 | struct ip_set; | 127 | struct ip_set; |
| 123 | 128 | ||
| 124 | #define ext_timeout(e, s) \ | 129 | #define ext_timeout(e, s) \ |
| 125 | (unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]) | 130 | ((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])) |
| 126 | #define ext_counter(e, s) \ | 131 | #define ext_counter(e, s) \ |
| 127 | (struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]) | 132 | ((struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])) |
| 128 | #define ext_comment(e, s) \ | 133 | #define ext_comment(e, s) \ |
| 129 | (struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]) | 134 | ((struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])) |
| 130 | #define ext_skbinfo(e, s) \ | 135 | #define ext_skbinfo(e, s) \ |
| 131 | (struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]) | 136 | ((struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO])) |
| 132 | 137 | ||
| 133 | typedef int (*ipset_adtfn)(struct ip_set *set, void *value, | 138 | typedef int (*ipset_adtfn)(struct ip_set *set, void *value, |
| 134 | const struct ip_set_ext *ext, | 139 | const struct ip_set_ext *ext, |
| @@ -176,6 +181,9 @@ struct ip_set_type_variant { | |||
| 176 | /* List elements */ | 181 | /* List elements */ |
| 177 | int (*list)(const struct ip_set *set, struct sk_buff *skb, | 182 | int (*list)(const struct ip_set *set, struct sk_buff *skb, |
| 178 | struct netlink_callback *cb); | 183 | struct netlink_callback *cb); |
| 184 | /* Keep listing private when resizing runs parallel */ | ||
| 185 | void (*uref)(struct ip_set *set, struct netlink_callback *cb, | ||
| 186 | bool start); | ||
| 179 | 187 | ||
| 180 | /* Return true if "b" set is the same as "a" | 188 | /* Return true if "b" set is the same as "a" |
| 181 | * according to the create set parameters */ | 189 | * according to the create set parameters */ |
| @@ -223,7 +231,7 @@ struct ip_set { | |||
| 223 | /* The name of the set */ | 231 | /* The name of the set */ |
| 224 | char name[IPSET_MAXNAMELEN]; | 232 | char name[IPSET_MAXNAMELEN]; |
| 225 | /* Lock protecting the set data */ | 233 | /* Lock protecting the set data */ |
| 226 | rwlock_t lock; | 234 | spinlock_t lock; |
| 227 | /* References to the set */ | 235 | /* References to the set */ |
| 228 | u32 ref; | 236 | u32 ref; |
| 229 | /* The core set type */ | 237 | /* The core set type */ |
| @@ -341,12 +349,11 @@ ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo) | |||
| 341 | cpu_to_be64((u64)skbinfo->skbmark << 32 | | 349 | cpu_to_be64((u64)skbinfo->skbmark << 32 | |
| 342 | skbinfo->skbmarkmask))) || | 350 | skbinfo->skbmarkmask))) || |
| 343 | (skbinfo->skbprio && | 351 | (skbinfo->skbprio && |
| 344 | nla_put_net32(skb, IPSET_ATTR_SKBPRIO, | 352 | nla_put_net32(skb, IPSET_ATTR_SKBPRIO, |
| 345 | cpu_to_be32(skbinfo->skbprio))) || | 353 | cpu_to_be32(skbinfo->skbprio))) || |
| 346 | (skbinfo->skbqueue && | 354 | (skbinfo->skbqueue && |
| 347 | nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, | 355 | nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, |
| 348 | cpu_to_be16(skbinfo->skbqueue))); | 356 | cpu_to_be16(skbinfo->skbqueue))); |
| 349 | |||
| 350 | } | 357 | } |
| 351 | 358 | ||
| 352 | static inline void | 359 | static inline void |
| @@ -380,12 +387,12 @@ ip_set_init_counter(struct ip_set_counter *counter, | |||
| 380 | 387 | ||
| 381 | /* Netlink CB args */ | 388 | /* Netlink CB args */ |
| 382 | enum { | 389 | enum { |
| 383 | IPSET_CB_NET = 0, | 390 | IPSET_CB_NET = 0, /* net namespace */ |
| 384 | IPSET_CB_DUMP, | 391 | IPSET_CB_DUMP, /* dump single set/all sets */ |
| 385 | IPSET_CB_INDEX, | 392 | IPSET_CB_INDEX, /* set index */ |
| 386 | IPSET_CB_ARG0, | 393 | IPSET_CB_PRIVATE, /* set private data */ |
| 394 | IPSET_CB_ARG0, /* type specific */ | ||
| 387 | IPSET_CB_ARG1, | 395 | IPSET_CB_ARG1, |
| 388 | IPSET_CB_ARG2, | ||
| 389 | }; | 396 | }; |
| 390 | 397 | ||
| 391 | /* register and unregister set references */ | 398 | /* register and unregister set references */ |
| @@ -533,29 +540,9 @@ bitmap_bytes(u32 a, u32 b) | |||
| 533 | #include <linux/netfilter/ipset/ip_set_timeout.h> | 540 | #include <linux/netfilter/ipset/ip_set_timeout.h> |
| 534 | #include <linux/netfilter/ipset/ip_set_comment.h> | 541 | #include <linux/netfilter/ipset/ip_set_comment.h> |
| 535 | 542 | ||
| 536 | static inline int | 543 | int |
| 537 | ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, | 544 | ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, |
| 538 | const void *e, bool active) | 545 | const void *e, bool active); |
| 539 | { | ||
| 540 | if (SET_WITH_TIMEOUT(set)) { | ||
| 541 | unsigned long *timeout = ext_timeout(e, set); | ||
| 542 | |||
| 543 | if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, | ||
| 544 | htonl(active ? ip_set_timeout_get(timeout) | ||
| 545 | : *timeout))) | ||
| 546 | return -EMSGSIZE; | ||
| 547 | } | ||
| 548 | if (SET_WITH_COUNTER(set) && | ||
| 549 | ip_set_put_counter(skb, ext_counter(e, set))) | ||
| 550 | return -EMSGSIZE; | ||
| 551 | if (SET_WITH_COMMENT(set) && | ||
| 552 | ip_set_put_comment(skb, ext_comment(e, set))) | ||
| 553 | return -EMSGSIZE; | ||
| 554 | if (SET_WITH_SKBINFO(set) && | ||
| 555 | ip_set_put_skbinfo(skb, ext_skbinfo(e, set))) | ||
| 556 | return -EMSGSIZE; | ||
| 557 | return 0; | ||
| 558 | } | ||
| 559 | 546 | ||
| 560 | #define IP_SET_INIT_KEXT(skb, opt, set) \ | 547 | #define IP_SET_INIT_KEXT(skb, opt, set) \ |
| 561 | { .bytes = (skb)->len, .packets = 1, \ | 548 | { .bytes = (skb)->len, .packets = 1, \ |
| @@ -565,8 +552,6 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, | |||
| 565 | { .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \ | 552 | { .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \ |
| 566 | .timeout = (set)->timeout } | 553 | .timeout = (set)->timeout } |
| 567 | 554 | ||
| 568 | #define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b)) | ||
| 569 | |||
| 570 | #define IPSET_CONCAT(a, b) a##b | 555 | #define IPSET_CONCAT(a, b) a##b |
| 571 | #define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b) | 556 | #define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b) |
| 572 | 557 | ||
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h index 21217ea008d7..8d0248525957 100644 --- a/include/linux/netfilter/ipset/ip_set_comment.h +++ b/include/linux/netfilter/ipset/ip_set_comment.h | |||
| @@ -16,41 +16,57 @@ ip_set_comment_uget(struct nlattr *tb) | |||
| 16 | return nla_data(tb); | 16 | return nla_data(tb); |
| 17 | } | 17 | } |
| 18 | 18 | ||
| 19 | /* Called from uadd only, protected by the set spinlock. | ||
| 20 | * The kadt functions don't use the comment extensions in any way. | ||
| 21 | */ | ||
| 19 | static inline void | 22 | static inline void |
| 20 | ip_set_init_comment(struct ip_set_comment *comment, | 23 | ip_set_init_comment(struct ip_set_comment *comment, |
| 21 | const struct ip_set_ext *ext) | 24 | const struct ip_set_ext *ext) |
| 22 | { | 25 | { |
| 26 | struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1); | ||
| 23 | size_t len = ext->comment ? strlen(ext->comment) : 0; | 27 | size_t len = ext->comment ? strlen(ext->comment) : 0; |
| 24 | 28 | ||
| 25 | if (unlikely(comment->str)) { | 29 | if (unlikely(c)) { |
| 26 | kfree(comment->str); | 30 | kfree_rcu(c, rcu); |
| 27 | comment->str = NULL; | 31 | rcu_assign_pointer(comment->c, NULL); |
| 28 | } | 32 | } |
| 29 | if (!len) | 33 | if (!len) |
| 30 | return; | 34 | return; |
| 31 | if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) | 35 | if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) |
| 32 | len = IPSET_MAX_COMMENT_SIZE; | 36 | len = IPSET_MAX_COMMENT_SIZE; |
| 33 | comment->str = kzalloc(len + 1, GFP_ATOMIC); | 37 | c = kzalloc(sizeof(*c) + len + 1, GFP_ATOMIC); |
| 34 | if (unlikely(!comment->str)) | 38 | if (unlikely(!c)) |
| 35 | return; | 39 | return; |
| 36 | strlcpy(comment->str, ext->comment, len + 1); | 40 | strlcpy(c->str, ext->comment, len + 1); |
| 41 | rcu_assign_pointer(comment->c, c); | ||
| 37 | } | 42 | } |
| 38 | 43 | ||
| 44 | /* Used only when dumping a set, protected by rcu_read_lock_bh() */ | ||
| 39 | static inline int | 45 | static inline int |
| 40 | ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment) | 46 | ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment) |
| 41 | { | 47 | { |
| 42 | if (!comment->str) | 48 | struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c); |
| 49 | |||
| 50 | if (!c) | ||
| 43 | return 0; | 51 | return 0; |
| 44 | return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str); | 52 | return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str); |
| 45 | } | 53 | } |
| 46 | 54 | ||
| 55 | /* Called from uadd/udel, flush or the garbage collectors protected | ||
| 56 | * by the set spinlock. | ||
| 57 | * Called when the set is destroyed and when there can't be any user | ||
| 58 | * of the set data anymore. | ||
| 59 | */ | ||
| 47 | static inline void | 60 | static inline void |
| 48 | ip_set_comment_free(struct ip_set_comment *comment) | 61 | ip_set_comment_free(struct ip_set_comment *comment) |
| 49 | { | 62 | { |
| 50 | if (unlikely(!comment->str)) | 63 | struct ip_set_comment_rcu *c; |
| 64 | |||
| 65 | c = rcu_dereference_protected(comment->c, 1); | ||
| 66 | if (unlikely(!c)) | ||
| 51 | return; | 67 | return; |
| 52 | kfree(comment->str); | 68 | kfree_rcu(c, rcu); |
| 53 | comment->str = NULL; | 69 | rcu_assign_pointer(comment->c, NULL); |
| 54 | } | 70 | } |
| 55 | 71 | ||
| 56 | #endif | 72 | #endif |
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h index 83c2f9e0886c..1d6a935c1ac5 100644 --- a/include/linux/netfilter/ipset/ip_set_timeout.h +++ b/include/linux/netfilter/ipset/ip_set_timeout.h | |||
| @@ -40,38 +40,33 @@ ip_set_timeout_uget(struct nlattr *tb) | |||
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | static inline bool | 42 | static inline bool |
| 43 | ip_set_timeout_test(unsigned long timeout) | 43 | ip_set_timeout_expired(unsigned long *t) |
| 44 | { | 44 | { |
| 45 | return timeout == IPSET_ELEM_PERMANENT || | 45 | return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t); |
| 46 | time_is_after_jiffies(timeout); | ||
| 47 | } | ||
| 48 | |||
| 49 | static inline bool | ||
| 50 | ip_set_timeout_expired(unsigned long *timeout) | ||
| 51 | { | ||
| 52 | return *timeout != IPSET_ELEM_PERMANENT && | ||
| 53 | time_is_before_jiffies(*timeout); | ||
| 54 | } | 46 | } |
| 55 | 47 | ||
| 56 | static inline void | 48 | static inline void |
| 57 | ip_set_timeout_set(unsigned long *timeout, u32 t) | 49 | ip_set_timeout_set(unsigned long *timeout, u32 value) |
| 58 | { | 50 | { |
| 59 | if (!t) { | 51 | unsigned long t; |
| 52 | |||
| 53 | if (!value) { | ||
| 60 | *timeout = IPSET_ELEM_PERMANENT; | 54 | *timeout = IPSET_ELEM_PERMANENT; |
| 61 | return; | 55 | return; |
| 62 | } | 56 | } |
| 63 | 57 | ||
| 64 | *timeout = msecs_to_jiffies(t * 1000) + jiffies; | 58 | t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies; |
| 65 | if (*timeout == IPSET_ELEM_PERMANENT) | 59 | if (t == IPSET_ELEM_PERMANENT) |
| 66 | /* Bingo! :-) */ | 60 | /* Bingo! :-) */ |
| 67 | (*timeout)--; | 61 | t--; |
| 62 | *timeout = t; | ||
| 68 | } | 63 | } |
| 69 | 64 | ||
| 70 | static inline u32 | 65 | static inline u32 |
| 71 | ip_set_timeout_get(unsigned long *timeout) | 66 | ip_set_timeout_get(unsigned long *timeout) |
| 72 | { | 67 | { |
| 73 | return *timeout == IPSET_ELEM_PERMANENT ? 0 : | 68 | return *timeout == IPSET_ELEM_PERMANENT ? 0 : |
| 74 | jiffies_to_msecs(*timeout - jiffies)/1000; | 69 | jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; |
| 75 | } | 70 | } |
| 76 | 71 | ||
| 77 | #endif /* __KERNEL__ */ | 72 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index a3e215bb0241..286098a5667f 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h | |||
| @@ -62,6 +62,7 @@ struct xt_mtchk_param { | |||
| 62 | void *matchinfo; | 62 | void *matchinfo; |
| 63 | unsigned int hook_mask; | 63 | unsigned int hook_mask; |
| 64 | u_int8_t family; | 64 | u_int8_t family; |
| 65 | bool nft_compat; | ||
| 65 | }; | 66 | }; |
| 66 | 67 | ||
| 67 | /** | 68 | /** |
| @@ -92,6 +93,7 @@ struct xt_tgchk_param { | |||
| 92 | void *targinfo; | 93 | void *targinfo; |
| 93 | unsigned int hook_mask; | 94 | unsigned int hook_mask; |
| 94 | u_int8_t family; | 95 | u_int8_t family; |
| 96 | bool nft_compat; | ||
| 95 | }; | 97 | }; |
| 96 | 98 | ||
| 97 | /* Target destructor parameters */ | 99 | /* Target destructor parameters */ |
| @@ -222,13 +224,10 @@ struct xt_table_info { | |||
| 222 | unsigned int stacksize; | 224 | unsigned int stacksize; |
| 223 | unsigned int __percpu *stackptr; | 225 | unsigned int __percpu *stackptr; |
| 224 | void ***jumpstack; | 226 | void ***jumpstack; |
| 225 | /* ipt_entry tables: one per CPU */ | 227 | |
| 226 | /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */ | 228 | unsigned char entries[0] __aligned(8); |
| 227 | void *entries[1]; | ||
| 228 | }; | 229 | }; |
| 229 | 230 | ||
| 230 | #define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \ | ||
| 231 | + nr_cpu_ids * sizeof(char *)) | ||
| 232 | int xt_register_target(struct xt_target *target); | 231 | int xt_register_target(struct xt_target *target); |
| 233 | void xt_unregister_target(struct xt_target *target); | 232 | void xt_unregister_target(struct xt_target *target); |
| 234 | int xt_register_targets(struct xt_target *target, unsigned int n); | 233 | int xt_register_targets(struct xt_target *target, unsigned int n); |
| @@ -351,6 +350,57 @@ static inline unsigned long ifname_compare_aligned(const char *_a, | |||
| 351 | return ret; | 350 | return ret; |
| 352 | } | 351 | } |
| 353 | 352 | ||
| 353 | |||
| 354 | /* On SMP, ip(6)t_entry->counters.pcnt holds address of the | ||
| 355 | * real (percpu) counter. On !SMP, its just the packet count, | ||
| 356 | * so nothing needs to be done there. | ||
| 357 | * | ||
| 358 | * xt_percpu_counter_alloc returns the address of the percpu | ||
| 359 | * counter, or 0 on !SMP. We force an alignment of 16 bytes | ||
| 360 | * so that bytes/packets share a common cache line. | ||
| 361 | * | ||
| 362 | * Hence caller must use IS_ERR_VALUE to check for error, this | ||
| 363 | * allows us to return 0 for single core systems without forcing | ||
| 364 | * callers to deal with SMP vs. NONSMP issues. | ||
| 365 | */ | ||
| 366 | static inline u64 xt_percpu_counter_alloc(void) | ||
| 367 | { | ||
| 368 | if (nr_cpu_ids > 1) { | ||
| 369 | void __percpu *res = __alloc_percpu(sizeof(struct xt_counters), | ||
| 370 | sizeof(struct xt_counters)); | ||
| 371 | |||
| 372 | if (res == NULL) | ||
| 373 | return (u64) -ENOMEM; | ||
| 374 | |||
| 375 | return (u64) (__force unsigned long) res; | ||
| 376 | } | ||
| 377 | |||
| 378 | return 0; | ||
| 379 | } | ||
| 380 | static inline void xt_percpu_counter_free(u64 pcnt) | ||
| 381 | { | ||
| 382 | if (nr_cpu_ids > 1) | ||
| 383 | free_percpu((void __percpu *) (unsigned long) pcnt); | ||
| 384 | } | ||
| 385 | |||
| 386 | static inline struct xt_counters * | ||
| 387 | xt_get_this_cpu_counter(struct xt_counters *cnt) | ||
| 388 | { | ||
| 389 | if (nr_cpu_ids > 1) | ||
| 390 | return this_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt); | ||
| 391 | |||
| 392 | return cnt; | ||
| 393 | } | ||
| 394 | |||
| 395 | static inline struct xt_counters * | ||
| 396 | xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu) | ||
| 397 | { | ||
| 398 | if (nr_cpu_ids > 1) | ||
| 399 | return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu); | ||
| 400 | |||
| 401 | return cnt; | ||
| 402 | } | ||
| 403 | |||
| 354 | struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *); | 404 | struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *); |
| 355 | void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *); | 405 | void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *); |
| 356 | 406 | ||
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index f2fdb5a52070..6d80fc686323 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h | |||
| @@ -20,13 +20,6 @@ enum nf_br_hook_priorities { | |||
| 20 | #define BRNF_BRIDGED_DNAT 0x02 | 20 | #define BRNF_BRIDGED_DNAT 0x02 |
| 21 | #define BRNF_NF_BRIDGE_PREROUTING 0x08 | 21 | #define BRNF_NF_BRIDGE_PREROUTING 0x08 |
| 22 | 22 | ||
| 23 | static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) | ||
| 24 | { | ||
| 25 | if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE) | ||
| 26 | return PPPOE_SES_HLEN; | ||
| 27 | return 0; | ||
| 28 | } | ||
| 29 | |||
| 30 | int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb); | 23 | int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb); |
| 31 | 24 | ||
| 32 | static inline void br_drop_fake_rtable(struct sk_buff *skb) | 25 | static inline void br_drop_fake_rtable(struct sk_buff *skb) |
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index f1bd3962e6b6..8ca6d6464ea3 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | * | 6 | * |
| 7 | * ebtables.c,v 2.0, April, 2002 | 7 | * ebtables.c,v 2.0, April, 2002 |
| 8 | * | 8 | * |
| 9 | * This code is stongly inspired on the iptables code which is | 9 | * This code is strongly inspired by the iptables code which is |
| 10 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | 10 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling |
| 11 | */ | 11 | */ |
| 12 | #ifndef __LINUX_BRIDGE_EFF_H | 12 | #ifndef __LINUX_BRIDGE_EFF_H |
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h new file mode 100644 index 000000000000..d3a7f8597e82 --- /dev/null +++ b/include/linux/netfilter_defs.h | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | #ifndef __LINUX_NETFILTER_CORE_H_ | ||
| 2 | #define __LINUX_NETFILTER_CORE_H_ | ||
| 3 | |||
| 4 | #include <uapi/linux/netfilter.h> | ||
| 5 | |||
| 6 | /* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */ | ||
| 7 | #define NF_MAX_HOOKS 8 | ||
| 8 | |||
| 9 | #endif | ||
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h new file mode 100644 index 000000000000..cb0727fe2b3d --- /dev/null +++ b/include/linux/netfilter_ingress.h | |||
| @@ -0,0 +1,41 @@ | |||
| 1 | #ifndef _NETFILTER_INGRESS_H_ | ||
| 2 | #define _NETFILTER_INGRESS_H_ | ||
| 3 | |||
| 4 | #include <linux/netfilter.h> | ||
| 5 | #include <linux/netdevice.h> | ||
| 6 | |||
| 7 | #ifdef CONFIG_NETFILTER_INGRESS | ||
| 8 | static inline int nf_hook_ingress_active(struct sk_buff *skb) | ||
| 9 | { | ||
| 10 | return nf_hook_list_active(&skb->dev->nf_hooks_ingress, | ||
| 11 | NFPROTO_NETDEV, NF_NETDEV_INGRESS); | ||
| 12 | } | ||
| 13 | |||
| 14 | static inline int nf_hook_ingress(struct sk_buff *skb) | ||
| 15 | { | ||
| 16 | struct nf_hook_state state; | ||
| 17 | |||
| 18 | nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress, | ||
| 19 | NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL, | ||
| 20 | skb->dev, NULL, NULL); | ||
| 21 | return nf_hook_slow(skb, &state); | ||
| 22 | } | ||
| 23 | |||
| 24 | static inline void nf_hook_ingress_init(struct net_device *dev) | ||
| 25 | { | ||
| 26 | INIT_LIST_HEAD(&dev->nf_hooks_ingress); | ||
| 27 | } | ||
| 28 | #else /* CONFIG_NETFILTER_INGRESS */ | ||
| 29 | static inline int nf_hook_ingress_active(struct sk_buff *skb) | ||
| 30 | { | ||
| 31 | return 0; | ||
| 32 | } | ||
| 33 | |||
| 34 | static inline int nf_hook_ingress(struct sk_buff *skb) | ||
| 35 | { | ||
| 36 | return 0; | ||
| 37 | } | ||
| 38 | |||
| 39 | static inline void nf_hook_ingress_init(struct net_device *dev) {} | ||
| 40 | #endif /* CONFIG_NETFILTER_INGRESS */ | ||
| 41 | #endif /* _NETFILTER_INGRESS_H_ */ | ||
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 64dad1cc1a4b..8b7d28f3aada 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h | |||
| @@ -25,6 +25,9 @@ void ipv6_netfilter_fini(void); | |||
| 25 | struct nf_ipv6_ops { | 25 | struct nf_ipv6_ops { |
| 26 | int (*chk_addr)(struct net *net, const struct in6_addr *addr, | 26 | int (*chk_addr)(struct net *net, const struct in6_addr *addr, |
| 27 | const struct net_device *dev, int strict); | 27 | const struct net_device *dev, int strict); |
| 28 | void (*route_input)(struct sk_buff *skb); | ||
| 29 | int (*fragment)(struct sock *sk, struct sk_buff *skb, | ||
| 30 | int (*output)(struct sock *, struct sk_buff *)); | ||
| 28 | }; | 31 | }; |
| 29 | 32 | ||
| 30 | extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; | 33 | extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; |
diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 6835c1279df7..9120edb650a0 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h | |||
| @@ -28,6 +28,8 @@ struct netlink_skb_parms { | |||
| 28 | __u32 dst_group; | 28 | __u32 dst_group; |
| 29 | __u32 flags; | 29 | __u32 flags; |
| 30 | struct sock *sk; | 30 | struct sock *sk; |
| 31 | bool nsid_is_set; | ||
| 32 | int nsid; | ||
| 31 | }; | 33 | }; |
| 32 | 34 | ||
| 33 | #define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) | 35 | #define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) |
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 32201c269890..b8e72aad919c 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h | |||
| @@ -500,6 +500,7 @@ enum { | |||
| 500 | NFSPROC4_CLNT_SEEK, | 500 | NFSPROC4_CLNT_SEEK, |
| 501 | NFSPROC4_CLNT_ALLOCATE, | 501 | NFSPROC4_CLNT_ALLOCATE, |
| 502 | NFSPROC4_CLNT_DEALLOCATE, | 502 | NFSPROC4_CLNT_DEALLOCATE, |
| 503 | NFSPROC4_CLNT_LAYOUTSTATS, | ||
| 503 | }; | 504 | }; |
| 504 | 505 | ||
| 505 | /* nfs41 types */ | 506 | /* nfs41 types */ |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index b95f914ce083..874b77228fb9 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
| @@ -219,6 +219,7 @@ struct nfs_inode { | |||
| 219 | #define NFS_INO_COMMIT (7) /* inode is committing unstable writes */ | 219 | #define NFS_INO_COMMIT (7) /* inode is committing unstable writes */ |
| 220 | #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ | 220 | #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ |
| 221 | #define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ | 221 | #define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ |
| 222 | #define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */ | ||
| 222 | 223 | ||
| 223 | static inline struct nfs_inode *NFS_I(const struct inode *inode) | 224 | static inline struct nfs_inode *NFS_I(const struct inode *inode) |
| 224 | { | 225 | { |
| @@ -291,9 +292,12 @@ static inline void nfs_mark_for_revalidate(struct inode *inode) | |||
| 291 | struct nfs_inode *nfsi = NFS_I(inode); | 292 | struct nfs_inode *nfsi = NFS_I(inode); |
| 292 | 293 | ||
| 293 | spin_lock(&inode->i_lock); | 294 | spin_lock(&inode->i_lock); |
| 294 | nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS; | 295 | nfsi->cache_validity |= NFS_INO_INVALID_ATTR | |
| 296 | NFS_INO_REVAL_PAGECACHE | | ||
| 297 | NFS_INO_INVALID_ACCESS | | ||
| 298 | NFS_INO_INVALID_ACL; | ||
| 295 | if (S_ISDIR(inode->i_mode)) | 299 | if (S_ISDIR(inode->i_mode)) |
| 296 | nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA; | 300 | nfsi->cache_validity |= NFS_INO_INVALID_DATA; |
| 297 | spin_unlock(&inode->i_lock); | 301 | spin_unlock(&inode->i_lock); |
| 298 | } | 302 | } |
| 299 | 303 | ||
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 5e1273d4de14..20bc8e51b161 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
| @@ -220,7 +220,7 @@ struct nfs_server { | |||
| 220 | #define NFS_CAP_SYMLINKS (1U << 2) | 220 | #define NFS_CAP_SYMLINKS (1U << 2) |
| 221 | #define NFS_CAP_ACLS (1U << 3) | 221 | #define NFS_CAP_ACLS (1U << 3) |
| 222 | #define NFS_CAP_ATOMIC_OPEN (1U << 4) | 222 | #define NFS_CAP_ATOMIC_OPEN (1U << 4) |
| 223 | #define NFS_CAP_CHANGE_ATTR (1U << 5) | 223 | /* #define NFS_CAP_CHANGE_ATTR (1U << 5) */ |
| 224 | #define NFS_CAP_FILEID (1U << 6) | 224 | #define NFS_CAP_FILEID (1U << 6) |
| 225 | #define NFS_CAP_MODE (1U << 7) | 225 | #define NFS_CAP_MODE (1U << 7) |
| 226 | #define NFS_CAP_NLINK (1U << 8) | 226 | #define NFS_CAP_NLINK (1U << 8) |
| @@ -237,5 +237,6 @@ struct nfs_server { | |||
| 237 | #define NFS_CAP_SEEK (1U << 19) | 237 | #define NFS_CAP_SEEK (1U << 19) |
| 238 | #define NFS_CAP_ALLOCATE (1U << 20) | 238 | #define NFS_CAP_ALLOCATE (1U << 20) |
| 239 | #define NFS_CAP_DEALLOCATE (1U << 21) | 239 | #define NFS_CAP_DEALLOCATE (1U << 21) |
| 240 | #define NFS_CAP_LAYOUTSTATS (1U << 22) | ||
| 240 | 241 | ||
| 241 | #endif | 242 | #endif |
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 3eb072dbce83..f2f650f136ee 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h | |||
| @@ -67,7 +67,6 @@ struct nfs_rw_ops { | |||
| 67 | const fmode_t rw_mode; | 67 | const fmode_t rw_mode; |
| 68 | struct nfs_pgio_header *(*rw_alloc_header)(void); | 68 | struct nfs_pgio_header *(*rw_alloc_header)(void); |
| 69 | void (*rw_free_header)(struct nfs_pgio_header *); | 69 | void (*rw_free_header)(struct nfs_pgio_header *); |
| 70 | void (*rw_release)(struct nfs_pgio_header *); | ||
| 71 | int (*rw_done)(struct rpc_task *, struct nfs_pgio_header *, | 70 | int (*rw_done)(struct rpc_task *, struct nfs_pgio_header *, |
| 72 | struct inode *); | 71 | struct inode *); |
| 73 | void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *); | 72 | void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *); |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 93ab6071bbe9..7bbe50504211 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
| @@ -316,6 +316,49 @@ struct nfs4_layoutreturn { | |||
| 316 | int rpc_status; | 316 | int rpc_status; |
| 317 | }; | 317 | }; |
| 318 | 318 | ||
| 319 | #define PNFS_LAYOUTSTATS_MAXSIZE 256 | ||
| 320 | |||
| 321 | struct nfs42_layoutstat_args; | ||
| 322 | struct nfs42_layoutstat_devinfo; | ||
| 323 | typedef void (*layoutstats_encode_t)(struct xdr_stream *, | ||
| 324 | struct nfs42_layoutstat_args *, | ||
| 325 | struct nfs42_layoutstat_devinfo *); | ||
| 326 | |||
| 327 | /* Per file per deviceid layoutstats */ | ||
| 328 | struct nfs42_layoutstat_devinfo { | ||
| 329 | struct nfs4_deviceid dev_id; | ||
| 330 | __u64 offset; | ||
| 331 | __u64 length; | ||
| 332 | __u64 read_count; | ||
| 333 | __u64 read_bytes; | ||
| 334 | __u64 write_count; | ||
| 335 | __u64 write_bytes; | ||
| 336 | __u32 layout_type; | ||
| 337 | layoutstats_encode_t layoutstats_encode; | ||
| 338 | void *layout_private; | ||
| 339 | }; | ||
| 340 | |||
| 341 | struct nfs42_layoutstat_args { | ||
| 342 | struct nfs4_sequence_args seq_args; | ||
| 343 | struct nfs_fh *fh; | ||
| 344 | struct inode *inode; | ||
| 345 | nfs4_stateid stateid; | ||
| 346 | int num_dev; | ||
| 347 | struct nfs42_layoutstat_devinfo *devinfo; | ||
| 348 | }; | ||
| 349 | |||
| 350 | struct nfs42_layoutstat_res { | ||
| 351 | struct nfs4_sequence_res seq_res; | ||
| 352 | int num_dev; | ||
| 353 | int rpc_status; | ||
| 354 | }; | ||
| 355 | |||
| 356 | struct nfs42_layoutstat_data { | ||
| 357 | struct inode *inode; | ||
| 358 | struct nfs42_layoutstat_args args; | ||
| 359 | struct nfs42_layoutstat_res res; | ||
| 360 | }; | ||
| 361 | |||
| 319 | struct stateowner_id { | 362 | struct stateowner_id { |
| 320 | __u64 create_time; | 363 | __u64 create_time; |
| 321 | __u32 uniquifier; | 364 | __u32 uniquifier; |
| @@ -984,17 +1027,14 @@ struct nfs4_readlink_res { | |||
| 984 | struct nfs4_sequence_res seq_res; | 1027 | struct nfs4_sequence_res seq_res; |
| 985 | }; | 1028 | }; |
| 986 | 1029 | ||
| 987 | #define NFS4_SETCLIENTID_NAMELEN (127) | ||
| 988 | struct nfs4_setclientid { | 1030 | struct nfs4_setclientid { |
| 989 | const nfs4_verifier * sc_verifier; | 1031 | const nfs4_verifier * sc_verifier; |
| 990 | unsigned int sc_name_len; | ||
| 991 | char sc_name[NFS4_SETCLIENTID_NAMELEN + 1]; | ||
| 992 | u32 sc_prog; | 1032 | u32 sc_prog; |
| 993 | unsigned int sc_netid_len; | 1033 | unsigned int sc_netid_len; |
| 994 | char sc_netid[RPCBIND_MAXNETIDLEN + 1]; | 1034 | char sc_netid[RPCBIND_MAXNETIDLEN + 1]; |
| 995 | unsigned int sc_uaddr_len; | 1035 | unsigned int sc_uaddr_len; |
| 996 | char sc_uaddr[RPCBIND_MAXUADDRLEN + 1]; | 1036 | char sc_uaddr[RPCBIND_MAXUADDRLEN + 1]; |
| 997 | u32 sc_cb_ident; | 1037 | struct nfs_client *sc_clnt; |
| 998 | struct rpc_cred *sc_cred; | 1038 | struct rpc_cred *sc_cred; |
| 999 | }; | 1039 | }; |
| 1000 | 1040 | ||
| @@ -1142,12 +1182,9 @@ struct nfs41_state_protection { | |||
| 1142 | struct nfs4_op_map allow; | 1182 | struct nfs4_op_map allow; |
| 1143 | }; | 1183 | }; |
| 1144 | 1184 | ||
| 1145 | #define NFS4_EXCHANGE_ID_LEN (48) | ||
| 1146 | struct nfs41_exchange_id_args { | 1185 | struct nfs41_exchange_id_args { |
| 1147 | struct nfs_client *client; | 1186 | struct nfs_client *client; |
| 1148 | nfs4_verifier *verifier; | 1187 | nfs4_verifier *verifier; |
| 1149 | unsigned int id_len; | ||
| 1150 | char id[NFS4_EXCHANGE_ID_LEN]; | ||
| 1151 | u32 flags; | 1188 | u32 flags; |
| 1152 | struct nfs41_state_protection state_protect; | 1189 | struct nfs41_state_protection state_protect; |
| 1153 | }; | 1190 | }; |
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 3d46fb4708e0..f94da0e65dea 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h | |||
| @@ -67,6 +67,7 @@ extern int nmi_watchdog_enabled; | |||
| 67 | extern int soft_watchdog_enabled; | 67 | extern int soft_watchdog_enabled; |
| 68 | extern int watchdog_user_enabled; | 68 | extern int watchdog_user_enabled; |
| 69 | extern int watchdog_thresh; | 69 | extern int watchdog_thresh; |
| 70 | extern unsigned long *watchdog_cpumask_bits; | ||
| 70 | extern int sysctl_softlockup_all_cpu_backtrace; | 71 | extern int sysctl_softlockup_all_cpu_backtrace; |
| 71 | struct ctl_table; | 72 | struct ctl_table; |
| 72 | extern int proc_watchdog(struct ctl_table *, int , | 73 | extern int proc_watchdog(struct ctl_table *, int , |
| @@ -77,6 +78,8 @@ extern int proc_soft_watchdog(struct ctl_table *, int , | |||
| 77 | void __user *, size_t *, loff_t *); | 78 | void __user *, size_t *, loff_t *); |
| 78 | extern int proc_watchdog_thresh(struct ctl_table *, int , | 79 | extern int proc_watchdog_thresh(struct ctl_table *, int , |
| 79 | void __user *, size_t *, loff_t *); | 80 | void __user *, size_t *, loff_t *); |
| 81 | extern int proc_watchdog_cpumask(struct ctl_table *, int, | ||
| 82 | void __user *, size_t *, loff_t *); | ||
| 80 | #endif | 83 | #endif |
| 81 | 84 | ||
| 82 | #ifdef CONFIG_HAVE_ACPI_APEI_NMI | 85 | #ifdef CONFIG_HAVE_ACPI_APEI_NMI |
diff --git a/include/linux/ntb.h b/include/linux/ntb.h index 9ac1a62fc6f5..b02f72bb8e32 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h | |||
| @@ -4,15 +4,20 @@ | |||
| 4 | * | 4 | * |
| 5 | * GPL LICENSE SUMMARY | 5 | * GPL LICENSE SUMMARY |
| 6 | * | 6 | * |
| 7 | * Copyright(c) 2012 Intel Corporation. All rights reserved. | 7 | * Copyright (C) 2015 EMC Corporation. All Rights Reserved. |
| 8 | * | 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of version 2 of the GNU General Public License as | 10 | * it under the terms of version 2 of the GNU General Public License as |
| 11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
| 12 | * | 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, but | ||
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 16 | * General Public License for more details. | ||
| 17 | * | ||
| 13 | * BSD LICENSE | 18 | * BSD LICENSE |
| 14 | * | 19 | * |
| 15 | * Copyright(c) 2012 Intel Corporation. All rights reserved. | 20 | * Copyright (C) 2015 EMC Corporation. All Rights Reserved. |
| 16 | * | 21 | * |
| 17 | * Redistribution and use in source and binary forms, with or without | 22 | * Redistribution and use in source and binary forms, with or without |
| 18 | * modification, are permitted provided that the following conditions | 23 | * modification, are permitted provided that the following conditions |
| @@ -40,49 +45,940 @@ | |||
| 40 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 45 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 41 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 46 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 42 | * | 47 | * |
| 43 | * Intel PCIe NTB Linux driver | 48 | * PCIe NTB Linux driver |
| 44 | * | 49 | * |
| 45 | * Contact Information: | 50 | * Contact Information: |
| 46 | * Jon Mason <jon.mason@intel.com> | 51 | * Allen Hubbe <Allen.Hubbe@emc.com> |
| 47 | */ | 52 | */ |
| 48 | 53 | ||
| 49 | struct ntb_transport_qp; | 54 | #ifndef _NTB_H_ |
| 55 | #define _NTB_H_ | ||
| 50 | 56 | ||
| 51 | struct ntb_client { | 57 | #include <linux/completion.h> |
| 52 | struct device_driver driver; | 58 | #include <linux/device.h> |
| 53 | int (*probe)(struct pci_dev *pdev); | 59 | |
| 54 | void (*remove)(struct pci_dev *pdev); | 60 | struct ntb_client; |
| 61 | struct ntb_dev; | ||
| 62 | struct pci_dev; | ||
| 63 | |||
| 64 | /** | ||
| 65 | * enum ntb_topo - NTB connection topology | ||
| 66 | * @NTB_TOPO_NONE: Topology is unknown or invalid. | ||
| 67 | * @NTB_TOPO_PRI: On primary side of local ntb. | ||
| 68 | * @NTB_TOPO_SEC: On secondary side of remote ntb. | ||
| 69 | * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb. | ||
| 70 | * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb. | ||
| 71 | */ | ||
| 72 | enum ntb_topo { | ||
| 73 | NTB_TOPO_NONE = -1, | ||
| 74 | NTB_TOPO_PRI, | ||
| 75 | NTB_TOPO_SEC, | ||
| 76 | NTB_TOPO_B2B_USD, | ||
| 77 | NTB_TOPO_B2B_DSD, | ||
| 78 | }; | ||
| 79 | |||
| 80 | static inline int ntb_topo_is_b2b(enum ntb_topo topo) | ||
| 81 | { | ||
| 82 | switch ((int)topo) { | ||
| 83 | case NTB_TOPO_B2B_USD: | ||
| 84 | case NTB_TOPO_B2B_DSD: | ||
| 85 | return 1; | ||
| 86 | } | ||
| 87 | return 0; | ||
| 88 | } | ||
| 89 | |||
| 90 | static inline char *ntb_topo_string(enum ntb_topo topo) | ||
| 91 | { | ||
| 92 | switch (topo) { | ||
| 93 | case NTB_TOPO_NONE: return "NTB_TOPO_NONE"; | ||
| 94 | case NTB_TOPO_PRI: return "NTB_TOPO_PRI"; | ||
| 95 | case NTB_TOPO_SEC: return "NTB_TOPO_SEC"; | ||
| 96 | case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD"; | ||
| 97 | case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD"; | ||
| 98 | } | ||
| 99 | return "NTB_TOPO_INVALID"; | ||
| 100 | } | ||
| 101 | |||
| 102 | /** | ||
| 103 | * enum ntb_speed - NTB link training speed | ||
| 104 | * @NTB_SPEED_AUTO: Request the max supported speed. | ||
| 105 | * @NTB_SPEED_NONE: Link is not trained to any speed. | ||
| 106 | * @NTB_SPEED_GEN1: Link is trained to gen1 speed. | ||
| 107 | * @NTB_SPEED_GEN2: Link is trained to gen2 speed. | ||
| 108 | * @NTB_SPEED_GEN3: Link is trained to gen3 speed. | ||
| 109 | */ | ||
| 110 | enum ntb_speed { | ||
| 111 | NTB_SPEED_AUTO = -1, | ||
| 112 | NTB_SPEED_NONE = 0, | ||
| 113 | NTB_SPEED_GEN1 = 1, | ||
| 114 | NTB_SPEED_GEN2 = 2, | ||
| 115 | NTB_SPEED_GEN3 = 3, | ||
| 116 | }; | ||
| 117 | |||
| 118 | /** | ||
| 119 | * enum ntb_width - NTB link training width | ||
| 120 | * @NTB_WIDTH_AUTO: Request the max supported width. | ||
| 121 | * @NTB_WIDTH_NONE: Link is not trained to any width. | ||
| 122 | * @NTB_WIDTH_1: Link is trained to 1 lane width. | ||
| 123 | * @NTB_WIDTH_2: Link is trained to 2 lane width. | ||
| 124 | * @NTB_WIDTH_4: Link is trained to 4 lane width. | ||
| 125 | * @NTB_WIDTH_8: Link is trained to 8 lane width. | ||
| 126 | * @NTB_WIDTH_12: Link is trained to 12 lane width. | ||
| 127 | * @NTB_WIDTH_16: Link is trained to 16 lane width. | ||
| 128 | * @NTB_WIDTH_32: Link is trained to 32 lane width. | ||
| 129 | */ | ||
| 130 | enum ntb_width { | ||
| 131 | NTB_WIDTH_AUTO = -1, | ||
| 132 | NTB_WIDTH_NONE = 0, | ||
| 133 | NTB_WIDTH_1 = 1, | ||
| 134 | NTB_WIDTH_2 = 2, | ||
| 135 | NTB_WIDTH_4 = 4, | ||
| 136 | NTB_WIDTH_8 = 8, | ||
| 137 | NTB_WIDTH_12 = 12, | ||
| 138 | NTB_WIDTH_16 = 16, | ||
| 139 | NTB_WIDTH_32 = 32, | ||
| 140 | }; | ||
| 141 | |||
| 142 | /** | ||
| 143 | * struct ntb_client_ops - ntb client operations | ||
| 144 | * @probe: Notify client of a new device. | ||
| 145 | * @remove: Notify client to remove a device. | ||
| 146 | */ | ||
| 147 | struct ntb_client_ops { | ||
| 148 | int (*probe)(struct ntb_client *client, struct ntb_dev *ntb); | ||
| 149 | void (*remove)(struct ntb_client *client, struct ntb_dev *ntb); | ||
| 150 | }; | ||
| 151 | |||
| 152 | static inline int ntb_client_ops_is_valid(const struct ntb_client_ops *ops) | ||
| 153 | { | ||
| 154 | /* commented callbacks are not required: */ | ||
| 155 | return | ||
| 156 | ops->probe && | ||
| 157 | ops->remove && | ||
| 158 | 1; | ||
| 159 | } | ||
| 160 | |||
| 161 | /** | ||
| 162 | * struct ntb_ctx_ops - ntb driver context operations | ||
| 163 | * @link_event: See ntb_link_event(). | ||
| 164 | * @db_event: See ntb_db_event(). | ||
| 165 | */ | ||
| 166 | struct ntb_ctx_ops { | ||
| 167 | void (*link_event)(void *ctx); | ||
| 168 | void (*db_event)(void *ctx, int db_vector); | ||
| 169 | }; | ||
| 170 | |||
| 171 | static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops) | ||
| 172 | { | ||
| 173 | /* commented callbacks are not required: */ | ||
| 174 | return | ||
| 175 | /* ops->link_event && */ | ||
| 176 | /* ops->db_event && */ | ||
| 177 | 1; | ||
| 178 | } | ||
| 179 | |||
| 180 | /** | ||
| 181 | * struct ntb_ctx_ops - ntb device operations | ||
| 182 | * @mw_count: See ntb_mw_count(). | ||
| 183 | * @mw_get_range: See ntb_mw_get_range(). | ||
| 184 | * @mw_set_trans: See ntb_mw_set_trans(). | ||
| 185 | * @mw_clear_trans: See ntb_mw_clear_trans(). | ||
| 186 | * @link_is_up: See ntb_link_is_up(). | ||
| 187 | * @link_enable: See ntb_link_enable(). | ||
| 188 | * @link_disable: See ntb_link_disable(). | ||
| 189 | * @db_is_unsafe: See ntb_db_is_unsafe(). | ||
| 190 | * @db_valid_mask: See ntb_db_valid_mask(). | ||
| 191 | * @db_vector_count: See ntb_db_vector_count(). | ||
| 192 | * @db_vector_mask: See ntb_db_vector_mask(). | ||
| 193 | * @db_read: See ntb_db_read(). | ||
| 194 | * @db_set: See ntb_db_set(). | ||
| 195 | * @db_clear: See ntb_db_clear(). | ||
| 196 | * @db_read_mask: See ntb_db_read_mask(). | ||
| 197 | * @db_set_mask: See ntb_db_set_mask(). | ||
| 198 | * @db_clear_mask: See ntb_db_clear_mask(). | ||
| 199 | * @peer_db_addr: See ntb_peer_db_addr(). | ||
| 200 | * @peer_db_read: See ntb_peer_db_read(). | ||
| 201 | * @peer_db_set: See ntb_peer_db_set(). | ||
| 202 | * @peer_db_clear: See ntb_peer_db_clear(). | ||
| 203 | * @peer_db_read_mask: See ntb_peer_db_read_mask(). | ||
| 204 | * @peer_db_set_mask: See ntb_peer_db_set_mask(). | ||
| 205 | * @peer_db_clear_mask: See ntb_peer_db_clear_mask(). | ||
| 206 | * @spad_is_unsafe: See ntb_spad_is_unsafe(). | ||
| 207 | * @spad_count: See ntb_spad_count(). | ||
| 208 | * @spad_read: See ntb_spad_read(). | ||
| 209 | * @spad_write: See ntb_spad_write(). | ||
| 210 | * @peer_spad_addr: See ntb_peer_spad_addr(). | ||
| 211 | * @peer_spad_read: See ntb_peer_spad_read(). | ||
| 212 | * @peer_spad_write: See ntb_peer_spad_write(). | ||
| 213 | */ | ||
| 214 | struct ntb_dev_ops { | ||
| 215 | int (*mw_count)(struct ntb_dev *ntb); | ||
| 216 | int (*mw_get_range)(struct ntb_dev *ntb, int idx, | ||
| 217 | phys_addr_t *base, resource_size_t *size, | ||
| 218 | resource_size_t *align, resource_size_t *align_size); | ||
| 219 | int (*mw_set_trans)(struct ntb_dev *ntb, int idx, | ||
| 220 | dma_addr_t addr, resource_size_t size); | ||
| 221 | int (*mw_clear_trans)(struct ntb_dev *ntb, int idx); | ||
| 222 | |||
| 223 | int (*link_is_up)(struct ntb_dev *ntb, | ||
| 224 | enum ntb_speed *speed, enum ntb_width *width); | ||
| 225 | int (*link_enable)(struct ntb_dev *ntb, | ||
| 226 | enum ntb_speed max_speed, enum ntb_width max_width); | ||
| 227 | int (*link_disable)(struct ntb_dev *ntb); | ||
| 228 | |||
| 229 | int (*db_is_unsafe)(struct ntb_dev *ntb); | ||
| 230 | u64 (*db_valid_mask)(struct ntb_dev *ntb); | ||
| 231 | int (*db_vector_count)(struct ntb_dev *ntb); | ||
| 232 | u64 (*db_vector_mask)(struct ntb_dev *ntb, int db_vector); | ||
| 233 | |||
| 234 | u64 (*db_read)(struct ntb_dev *ntb); | ||
| 235 | int (*db_set)(struct ntb_dev *ntb, u64 db_bits); | ||
| 236 | int (*db_clear)(struct ntb_dev *ntb, u64 db_bits); | ||
| 237 | |||
| 238 | u64 (*db_read_mask)(struct ntb_dev *ntb); | ||
| 239 | int (*db_set_mask)(struct ntb_dev *ntb, u64 db_bits); | ||
| 240 | int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits); | ||
| 241 | |||
| 242 | int (*peer_db_addr)(struct ntb_dev *ntb, | ||
| 243 | phys_addr_t *db_addr, resource_size_t *db_size); | ||
| 244 | u64 (*peer_db_read)(struct ntb_dev *ntb); | ||
| 245 | int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits); | ||
| 246 | int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits); | ||
| 247 | |||
| 248 | u64 (*peer_db_read_mask)(struct ntb_dev *ntb); | ||
| 249 | int (*peer_db_set_mask)(struct ntb_dev *ntb, u64 db_bits); | ||
| 250 | int (*peer_db_clear_mask)(struct ntb_dev *ntb, u64 db_bits); | ||
| 251 | |||
| 252 | int (*spad_is_unsafe)(struct ntb_dev *ntb); | ||
| 253 | int (*spad_count)(struct ntb_dev *ntb); | ||
| 254 | |||
| 255 | u32 (*spad_read)(struct ntb_dev *ntb, int idx); | ||
| 256 | int (*spad_write)(struct ntb_dev *ntb, int idx, u32 val); | ||
| 257 | |||
| 258 | int (*peer_spad_addr)(struct ntb_dev *ntb, int idx, | ||
| 259 | phys_addr_t *spad_addr); | ||
| 260 | u32 (*peer_spad_read)(struct ntb_dev *ntb, int idx); | ||
| 261 | int (*peer_spad_write)(struct ntb_dev *ntb, int idx, u32 val); | ||
| 55 | }; | 262 | }; |
| 56 | 263 | ||
| 57 | enum { | 264 | static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops) |
| 58 | NTB_LINK_DOWN = 0, | 265 | { |
| 59 | NTB_LINK_UP, | 266 | /* commented callbacks are not required: */ |
| 267 | return | ||
| 268 | ops->mw_count && | ||
| 269 | ops->mw_get_range && | ||
| 270 | ops->mw_set_trans && | ||
| 271 | /* ops->mw_clear_trans && */ | ||
| 272 | ops->link_is_up && | ||
| 273 | ops->link_enable && | ||
| 274 | ops->link_disable && | ||
| 275 | /* ops->db_is_unsafe && */ | ||
| 276 | ops->db_valid_mask && | ||
| 277 | |||
| 278 | /* both set, or both unset */ | ||
| 279 | (!ops->db_vector_count == !ops->db_vector_mask) && | ||
| 280 | |||
| 281 | ops->db_read && | ||
| 282 | /* ops->db_set && */ | ||
| 283 | ops->db_clear && | ||
| 284 | /* ops->db_read_mask && */ | ||
| 285 | ops->db_set_mask && | ||
| 286 | ops->db_clear_mask && | ||
| 287 | ops->peer_db_addr && | ||
| 288 | /* ops->peer_db_read && */ | ||
| 289 | ops->peer_db_set && | ||
| 290 | /* ops->peer_db_clear && */ | ||
| 291 | /* ops->peer_db_read_mask && */ | ||
| 292 | /* ops->peer_db_set_mask && */ | ||
| 293 | /* ops->peer_db_clear_mask && */ | ||
| 294 | /* ops->spad_is_unsafe && */ | ||
| 295 | ops->spad_count && | ||
| 296 | ops->spad_read && | ||
| 297 | ops->spad_write && | ||
| 298 | ops->peer_spad_addr && | ||
| 299 | /* ops->peer_spad_read && */ | ||
| 300 | ops->peer_spad_write && | ||
| 301 | 1; | ||
| 302 | } | ||
| 303 | |||
| 304 | /** | ||
| 305 | * struct ntb_client - client interested in ntb devices | ||
| 306 | * @drv: Linux driver object. | ||
| 307 | * @ops: See &ntb_client_ops. | ||
| 308 | */ | ||
| 309 | struct ntb_client { | ||
| 310 | struct device_driver drv; | ||
| 311 | const struct ntb_client_ops ops; | ||
| 60 | }; | 312 | }; |
| 61 | 313 | ||
| 62 | int ntb_register_client(struct ntb_client *drvr); | 314 | #define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv) |
| 63 | void ntb_unregister_client(struct ntb_client *drvr); | 315 | |
| 64 | int ntb_register_client_dev(char *device_name); | 316 | /** |
| 65 | void ntb_unregister_client_dev(char *device_name); | 317 | * struct ntb_device - ntb device |
| 66 | 318 | * @dev: Linux device object. | |
| 67 | struct ntb_queue_handlers { | 319 | * @pdev: Pci device entry of the ntb. |
| 68 | void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, | 320 | * @topo: Detected topology of the ntb. |
| 69 | void *data, int len); | 321 | * @ops: See &ntb_dev_ops. |
| 70 | void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, | 322 | * @ctx: See &ntb_ctx_ops. |
| 71 | void *data, int len); | 323 | * @ctx_ops: See &ntb_ctx_ops. |
| 72 | void (*event_handler)(void *data, int status); | 324 | */ |
| 325 | struct ntb_dev { | ||
| 326 | struct device dev; | ||
| 327 | struct pci_dev *pdev; | ||
| 328 | enum ntb_topo topo; | ||
| 329 | const struct ntb_dev_ops *ops; | ||
| 330 | void *ctx; | ||
| 331 | const struct ntb_ctx_ops *ctx_ops; | ||
| 332 | |||
| 333 | /* private: */ | ||
| 334 | |||
| 335 | /* synchronize setting, clearing, and calling ctx_ops */ | ||
| 336 | spinlock_t ctx_lock; | ||
| 337 | /* block unregister until device is fully released */ | ||
| 338 | struct completion released; | ||
| 73 | }; | 339 | }; |
| 74 | 340 | ||
| 75 | unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp); | 341 | #define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev) |
| 76 | unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp); | 342 | |
| 77 | struct ntb_transport_qp * | 343 | /** |
| 78 | ntb_transport_create_queue(void *data, struct pci_dev *pdev, | 344 | * ntb_register_client() - register a client for interest in ntb devices |
| 79 | const struct ntb_queue_handlers *handlers); | 345 | * @client: Client context. |
| 80 | void ntb_transport_free_queue(struct ntb_transport_qp *qp); | 346 | * |
| 81 | int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, | 347 | * The client will be added to the list of clients interested in ntb devices. |
| 82 | unsigned int len); | 348 | * The client will be notified of any ntb devices that are not already |
| 83 | int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, | 349 | * associated with a client, or if ntb devices are registered later. |
| 84 | unsigned int len); | 350 | * |
| 85 | void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len); | 351 | * Return: Zero if the client is registered, otherwise an error number. |
| 86 | void ntb_transport_link_up(struct ntb_transport_qp *qp); | 352 | */ |
| 87 | void ntb_transport_link_down(struct ntb_transport_qp *qp); | 353 | #define ntb_register_client(client) \ |
| 88 | bool ntb_transport_link_query(struct ntb_transport_qp *qp); | 354 | __ntb_register_client((client), THIS_MODULE, KBUILD_MODNAME) |
| 355 | |||
| 356 | int __ntb_register_client(struct ntb_client *client, struct module *mod, | ||
| 357 | const char *mod_name); | ||
| 358 | |||
| 359 | /** | ||
| 360 | * ntb_unregister_client() - unregister a client for interest in ntb devices | ||
| 361 | * @client: Client context. | ||
| 362 | * | ||
| 363 | * The client will be removed from the list of clients interested in ntb | ||
| 364 | * devices. If any ntb devices are associated with the client, the client will | ||
| 365 | * be notified to remove those devices. | ||
| 366 | */ | ||
| 367 | void ntb_unregister_client(struct ntb_client *client); | ||
| 368 | |||
| 369 | #define module_ntb_client(__ntb_client) \ | ||
| 370 | module_driver(__ntb_client, ntb_register_client, \ | ||
| 371 | ntb_unregister_client) | ||
| 372 | |||
| 373 | /** | ||
| 374 | * ntb_register_device() - register a ntb device | ||
| 375 | * @ntb: NTB device context. | ||
| 376 | * | ||
| 377 | * The device will be added to the list of ntb devices. If any clients are | ||
| 378 | * interested in ntb devices, each client will be notified of the ntb device, | ||
| 379 | * until at most one client accepts the device. | ||
| 380 | * | ||
| 381 | * Return: Zero if the device is registered, otherwise an error number. | ||
| 382 | */ | ||
| 383 | int ntb_register_device(struct ntb_dev *ntb); | ||
| 384 | |||
| 385 | /** | ||
| 386 | * ntb_register_device() - unregister a ntb device | ||
| 387 | * @ntb: NTB device context. | ||
| 388 | * | ||
| 389 | * The device will be removed from the list of ntb devices. If the ntb device | ||
| 390 | * is associated with a client, the client will be notified to remove the | ||
| 391 | * device. | ||
| 392 | */ | ||
| 393 | void ntb_unregister_device(struct ntb_dev *ntb); | ||
| 394 | |||
| 395 | /** | ||
| 396 | * ntb_set_ctx() - associate a driver context with an ntb device | ||
| 397 | * @ntb: NTB device context. | ||
| 398 | * @ctx: Driver context. | ||
| 399 | * @ctx_ops: Driver context operations. | ||
| 400 | * | ||
| 401 | * Associate a driver context and operations with a ntb device. The context is | ||
| 402 | * provided by the client driver, and the driver may associate a different | ||
| 403 | * context with each ntb device. | ||
| 404 | * | ||
| 405 | * Return: Zero if the context is associated, otherwise an error number. | ||
| 406 | */ | ||
| 407 | int ntb_set_ctx(struct ntb_dev *ntb, void *ctx, | ||
| 408 | const struct ntb_ctx_ops *ctx_ops); | ||
| 409 | |||
| 410 | /** | ||
| 411 | * ntb_clear_ctx() - disassociate any driver context from an ntb device | ||
| 412 | * @ntb: NTB device context. | ||
| 413 | * | ||
| 414 | * Clear any association that may exist between a driver context and the ntb | ||
| 415 | * device. | ||
| 416 | */ | ||
| 417 | void ntb_clear_ctx(struct ntb_dev *ntb); | ||
| 418 | |||
| 419 | /** | ||
| 420 | * ntb_link_event() - notify driver context of a change in link status | ||
| 421 | * @ntb: NTB device context. | ||
| 422 | * | ||
| 423 | * Notify the driver context that the link status may have changed. The driver | ||
| 424 | * should call ntb_link_is_up() to get the current status. | ||
| 425 | */ | ||
| 426 | void ntb_link_event(struct ntb_dev *ntb); | ||
| 427 | |||
| 428 | /** | ||
| 429 | * ntb_db_event() - notify driver context of a doorbell event | ||
| 430 | * @ntb: NTB device context. | ||
| 431 | * @vector: Interrupt vector number. | ||
| 432 | * | ||
| 433 | * Notify the driver context of a doorbell event. If hardware supports | ||
| 434 | * multiple interrupt vectors for doorbells, the vector number indicates which | ||
| 435 | * vector received the interrupt. The vector number is relative to the first | ||
| 436 | * vector used for doorbells, starting at zero, and must be less than | ||
| 437 | ** ntb_db_vector_count(). The driver may call ntb_db_read() to check which | ||
| 438 | * doorbell bits need service, and ntb_db_vector_mask() to determine which of | ||
| 439 | * those bits are associated with the vector number. | ||
| 440 | */ | ||
| 441 | void ntb_db_event(struct ntb_dev *ntb, int vector); | ||
| 442 | |||
| 443 | /** | ||
| 444 | * ntb_mw_count() - get the number of memory windows | ||
| 445 | * @ntb: NTB device context. | ||
| 446 | * | ||
| 447 | * Hardware and topology may support a different number of memory windows. | ||
| 448 | * | ||
| 449 | * Return: the number of memory windows. | ||
| 450 | */ | ||
| 451 | static inline int ntb_mw_count(struct ntb_dev *ntb) | ||
| 452 | { | ||
| 453 | return ntb->ops->mw_count(ntb); | ||
| 454 | } | ||
| 455 | |||
| 456 | /** | ||
| 457 | * ntb_mw_get_range() - get the range of a memory window | ||
| 458 | * @ntb: NTB device context. | ||
| 459 | * @idx: Memory window number. | ||
| 460 | * @base: OUT - the base address for mapping the memory window | ||
| 461 | * @size: OUT - the size for mapping the memory window | ||
| 462 | * @align: OUT - the base alignment for translating the memory window | ||
| 463 | * @align_size: OUT - the size alignment for translating the memory window | ||
| 464 | * | ||
| 465 | * Get the range of a memory window. NULL may be given for any output | ||
| 466 | * parameter if the value is not needed. The base and size may be used for | ||
| 467 | * mapping the memory window, to access the peer memory. The alignment and | ||
| 468 | * size may be used for translating the memory window, for the peer to access | ||
| 469 | * memory on the local system. | ||
| 470 | * | ||
| 471 | * Return: Zero on success, otherwise an error number. | ||
| 472 | */ | ||
| 473 | static inline int ntb_mw_get_range(struct ntb_dev *ntb, int idx, | ||
| 474 | phys_addr_t *base, resource_size_t *size, | ||
| 475 | resource_size_t *align, resource_size_t *align_size) | ||
| 476 | { | ||
| 477 | return ntb->ops->mw_get_range(ntb, idx, base, size, | ||
| 478 | align, align_size); | ||
| 479 | } | ||
| 480 | |||
| 481 | /** | ||
| 482 | * ntb_mw_set_trans() - set the translation of a memory window | ||
| 483 | * @ntb: NTB device context. | ||
| 484 | * @idx: Memory window number. | ||
| 485 | * @addr: The dma address local memory to expose to the peer. | ||
| 486 | * @size: The size of the local memory to expose to the peer. | ||
| 487 | * | ||
| 488 | * Set the translation of a memory window. The peer may access local memory | ||
| 489 | * through the window starting at the address, up to the size. The address | ||
| 490 | * must be aligned to the alignment specified by ntb_mw_get_range(). The size | ||
| 491 | * must be aligned to the size alignment specified by ntb_mw_get_range(). | ||
| 492 | * | ||
| 493 | * Return: Zero on success, otherwise an error number. | ||
| 494 | */ | ||
| 495 | static inline int ntb_mw_set_trans(struct ntb_dev *ntb, int idx, | ||
| 496 | dma_addr_t addr, resource_size_t size) | ||
| 497 | { | ||
| 498 | return ntb->ops->mw_set_trans(ntb, idx, addr, size); | ||
| 499 | } | ||
| 500 | |||
| 501 | /** | ||
| 502 | * ntb_mw_clear_trans() - clear the translation of a memory window | ||
| 503 | * @ntb: NTB device context. | ||
| 504 | * @idx: Memory window number. | ||
| 505 | * | ||
| 506 | * Clear the translation of a memory window. The peer may no longer access | ||
| 507 | * local memory through the window. | ||
| 508 | * | ||
| 509 | * Return: Zero on success, otherwise an error number. | ||
| 510 | */ | ||
| 511 | static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int idx) | ||
| 512 | { | ||
| 513 | if (!ntb->ops->mw_clear_trans) | ||
| 514 | return ntb->ops->mw_set_trans(ntb, idx, 0, 0); | ||
| 515 | |||
| 516 | return ntb->ops->mw_clear_trans(ntb, idx); | ||
| 517 | } | ||
| 518 | |||
| 519 | /** | ||
| 520 | * ntb_link_is_up() - get the current ntb link state | ||
| 521 | * @ntb: NTB device context. | ||
| 522 | * @speed: OUT - The link speed expressed as PCIe generation number. | ||
| 523 | * @width: OUT - The link width expressed as the number of PCIe lanes. | ||
| 524 | * | ||
| 525 | * Set the translation of a memory window. The peer may access local memory | ||
| 526 | * through the window starting at the address, up to the size. The address | ||
| 527 | * must be aligned to the alignment specified by ntb_mw_get_range(). The size | ||
| 528 | * must be aligned to the size alignment specified by ntb_mw_get_range(). | ||
| 529 | * | ||
| 530 | * Return: One if the link is up, zero if the link is down, otherwise a | ||
| 531 | * negative value indicating the error number. | ||
| 532 | */ | ||
| 533 | static inline int ntb_link_is_up(struct ntb_dev *ntb, | ||
| 534 | enum ntb_speed *speed, enum ntb_width *width) | ||
| 535 | { | ||
| 536 | return ntb->ops->link_is_up(ntb, speed, width); | ||
| 537 | } | ||
| 538 | |||
| 539 | /** | ||
| 540 | * ntb_link_enable() - enable the link on the secondary side of the ntb | ||
| 541 | * @ntb: NTB device context. | ||
| 542 | * @max_speed: The maximum link speed expressed as PCIe generation number. | ||
| 543 | * @max_width: The maximum link width expressed as the number of PCIe lanes. | ||
| 544 | * | ||
| 545 | * Enable the link on the secondary side of the ntb. This can only be done | ||
| 546 | * from the primary side of the ntb in primary or b2b topology. The ntb device | ||
| 547 | * should train the link to its maximum speed and width, or the requested speed | ||
| 548 | * and width, whichever is smaller, if supported. | ||
| 549 | * | ||
| 550 | * Return: Zero on success, otherwise an error number. | ||
| 551 | */ | ||
| 552 | static inline int ntb_link_enable(struct ntb_dev *ntb, | ||
| 553 | enum ntb_speed max_speed, | ||
| 554 | enum ntb_width max_width) | ||
| 555 | { | ||
| 556 | return ntb->ops->link_enable(ntb, max_speed, max_width); | ||
| 557 | } | ||
| 558 | |||
| 559 | /** | ||
| 560 | * ntb_link_disable() - disable the link on the secondary side of the ntb | ||
| 561 | * @ntb: NTB device context. | ||
| 562 | * | ||
| 563 | * Disable the link on the secondary side of the ntb. This can only be | ||
| 564 | * done from the primary side of the ntb in primary or b2b topology. The ntb | ||
| 565 | * device should disable the link. Returning from this call must indicate that | ||
| 566 | * a barrier has passed, though with no more writes may pass in either | ||
| 567 | * direction across the link, except if this call returns an error number. | ||
| 568 | * | ||
| 569 | * Return: Zero on success, otherwise an error number. | ||
| 570 | */ | ||
| 571 | static inline int ntb_link_disable(struct ntb_dev *ntb) | ||
| 572 | { | ||
| 573 | return ntb->ops->link_disable(ntb); | ||
| 574 | } | ||
| 575 | |||
| 576 | /** | ||
| 577 | * ntb_db_is_unsafe() - check if it is safe to use hardware doorbell | ||
| 578 | * @ntb: NTB device context. | ||
| 579 | * | ||
| 580 | * It is possible for some ntb hardware to be affected by errata. Hardware | ||
| 581 | * drivers can advise clients to avoid using doorbells. Clients may ignore | ||
| 582 | * this advice, though caution is recommended. | ||
| 583 | * | ||
| 584 | * Return: Zero if it is safe to use doorbells, or One if it is not safe. | ||
| 585 | */ | ||
| 586 | static inline int ntb_db_is_unsafe(struct ntb_dev *ntb) | ||
| 587 | { | ||
| 588 | if (!ntb->ops->db_is_unsafe) | ||
| 589 | return 0; | ||
| 590 | |||
| 591 | return ntb->ops->db_is_unsafe(ntb); | ||
| 592 | } | ||
| 593 | |||
| 594 | /** | ||
| 595 | * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb | ||
| 596 | * @ntb: NTB device context. | ||
| 597 | * | ||
| 598 | * Hardware may support different number or arrangement of doorbell bits. | ||
| 599 | * | ||
| 600 | * Return: A mask of doorbell bits supported by the ntb. | ||
| 601 | */ | ||
| 602 | static inline u64 ntb_db_valid_mask(struct ntb_dev *ntb) | ||
| 603 | { | ||
| 604 | return ntb->ops->db_valid_mask(ntb); | ||
| 605 | } | ||
| 606 | |||
| 607 | /** | ||
| 608 | * ntb_db_vector_count() - get the number of doorbell interrupt vectors | ||
| 609 | * @ntb: NTB device context. | ||
| 610 | * | ||
| 611 | * Hardware may support different number of interrupt vectors. | ||
| 612 | * | ||
| 613 | * Return: The number of doorbell interrupt vectors. | ||
| 614 | */ | ||
| 615 | static inline int ntb_db_vector_count(struct ntb_dev *ntb) | ||
| 616 | { | ||
| 617 | if (!ntb->ops->db_vector_count) | ||
| 618 | return 1; | ||
| 619 | |||
| 620 | return ntb->ops->db_vector_count(ntb); | ||
| 621 | } | ||
| 622 | |||
| 623 | /** | ||
| 624 | * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector | ||
| 625 | * @ntb: NTB device context. | ||
| 626 | * @vector: Doorbell vector number. | ||
| 627 | * | ||
| 628 | * Each interrupt vector may have a different number or arrangement of bits. | ||
| 629 | * | ||
| 630 | * Return: A mask of doorbell bits serviced by a vector. | ||
| 631 | */ | ||
| 632 | static inline u64 ntb_db_vector_mask(struct ntb_dev *ntb, int vector) | ||
| 633 | { | ||
| 634 | if (!ntb->ops->db_vector_mask) | ||
| 635 | return ntb_db_valid_mask(ntb); | ||
| 636 | |||
| 637 | return ntb->ops->db_vector_mask(ntb, vector); | ||
| 638 | } | ||
| 639 | |||
| 640 | /** | ||
| 641 | * ntb_db_read() - read the local doorbell register | ||
| 642 | * @ntb: NTB device context. | ||
| 643 | * | ||
| 644 | * Read the local doorbell register, and return the bits that are set. | ||
| 645 | * | ||
| 646 | * Return: The bits currently set in the local doorbell register. | ||
| 647 | */ | ||
| 648 | static inline u64 ntb_db_read(struct ntb_dev *ntb) | ||
| 649 | { | ||
| 650 | return ntb->ops->db_read(ntb); | ||
| 651 | } | ||
| 652 | |||
| 653 | /** | ||
| 654 | * ntb_db_set() - set bits in the local doorbell register | ||
| 655 | * @ntb: NTB device context. | ||
| 656 | * @db_bits: Doorbell bits to set. | ||
| 657 | * | ||
| 658 | * Set bits in the local doorbell register, which may generate a local doorbell | ||
| 659 | * interrupt. Bits that were already set must remain set. | ||
| 660 | * | ||
| 661 | * This is unusual, and hardware may not support it. | ||
| 662 | * | ||
| 663 | * Return: Zero on success, otherwise an error number. | ||
| 664 | */ | ||
| 665 | static inline int ntb_db_set(struct ntb_dev *ntb, u64 db_bits) | ||
| 666 | { | ||
| 667 | if (!ntb->ops->db_set) | ||
| 668 | return -EINVAL; | ||
| 669 | |||
| 670 | return ntb->ops->db_set(ntb, db_bits); | ||
| 671 | } | ||
| 672 | |||
| 673 | /** | ||
| 674 | * ntb_db_clear() - clear bits in the local doorbell register | ||
| 675 | * @ntb: NTB device context. | ||
| 676 | * @db_bits: Doorbell bits to clear. | ||
| 677 | * | ||
| 678 | * Clear bits in the local doorbell register, arming the bits for the next | ||
| 679 | * doorbell. | ||
| 680 | * | ||
| 681 | * Return: Zero on success, otherwise an error number. | ||
| 682 | */ | ||
| 683 | static inline int ntb_db_clear(struct ntb_dev *ntb, u64 db_bits) | ||
| 684 | { | ||
| 685 | return ntb->ops->db_clear(ntb, db_bits); | ||
| 686 | } | ||
| 687 | |||
| 688 | /** | ||
| 689 | * ntb_db_read_mask() - read the local doorbell mask | ||
| 690 | * @ntb: NTB device context. | ||
| 691 | * | ||
| 692 | * Read the local doorbell mask register, and return the bits that are set. | ||
| 693 | * | ||
| 694 | * This is unusual, though hardware is likely to support it. | ||
| 695 | * | ||
| 696 | * Return: The bits currently set in the local doorbell mask register. | ||
| 697 | */ | ||
| 698 | static inline u64 ntb_db_read_mask(struct ntb_dev *ntb) | ||
| 699 | { | ||
| 700 | if (!ntb->ops->db_read_mask) | ||
| 701 | return 0; | ||
| 702 | |||
| 703 | return ntb->ops->db_read_mask(ntb); | ||
| 704 | } | ||
| 705 | |||
| 706 | /** | ||
| 707 | * ntb_db_set_mask() - set bits in the local doorbell mask | ||
| 708 | * @ntb: NTB device context. | ||
| 709 | * @db_bits: Doorbell mask bits to set. | ||
| 710 | * | ||
| 711 | * Set bits in the local doorbell mask register, preventing doorbell interrupts | ||
| 712 | * from being generated for those doorbell bits. Bits that were already set | ||
| 713 | * must remain set. | ||
| 714 | * | ||
| 715 | * Return: Zero on success, otherwise an error number. | ||
| 716 | */ | ||
| 717 | static inline int ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) | ||
| 718 | { | ||
| 719 | return ntb->ops->db_set_mask(ntb, db_bits); | ||
| 720 | } | ||
| 721 | |||
| 722 | /** | ||
| 723 | * ntb_db_clear_mask() - clear bits in the local doorbell mask | ||
| 724 | * @ntb: NTB device context. | ||
| 725 | * @db_bits: Doorbell bits to clear. | ||
| 726 | * | ||
| 727 | * Clear bits in the local doorbell mask register, allowing doorbell interrupts | ||
| 728 | * from being generated for those doorbell bits. If a doorbell bit is already | ||
| 729 | * set at the time the mask is cleared, and the corresponding mask bit is | ||
| 730 | * changed from set to clear, then the ntb driver must ensure that | ||
| 731 | * ntb_db_event() is called. If the hardware does not generate the interrupt | ||
| 732 | * on clearing the mask bit, then the driver must call ntb_db_event() anyway. | ||
| 733 | * | ||
| 734 | * Return: Zero on success, otherwise an error number. | ||
| 735 | */ | ||
| 736 | static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) | ||
| 737 | { | ||
| 738 | return ntb->ops->db_clear_mask(ntb, db_bits); | ||
| 739 | } | ||
| 740 | |||
| 741 | /** | ||
| 742 | * ntb_peer_db_addr() - address and size of the peer doorbell register | ||
| 743 | * @ntb: NTB device context. | ||
| 744 | * @db_addr: OUT - The address of the peer doorbell register. | ||
| 745 | * @db_size: OUT - The number of bytes to write the peer doorbell register. | ||
| 746 | * | ||
| 747 | * Return the address of the peer doorbell register. This may be used, for | ||
| 748 | * example, by drivers that offload memory copy operations to a dma engine. | ||
| 749 | * The drivers may wish to ring the peer doorbell at the completion of memory | ||
| 750 | * copy operations. For efficiency, and to simplify ordering of operations | ||
| 751 | * between the dma memory copies and the ringing doorbell, the driver may | ||
| 752 | * append one additional dma memory copy with the doorbell register as the | ||
| 753 | * destination, after the memory copy operations. | ||
| 754 | * | ||
| 755 | * Return: Zero on success, otherwise an error number. | ||
| 756 | */ | ||
| 757 | static inline int ntb_peer_db_addr(struct ntb_dev *ntb, | ||
| 758 | phys_addr_t *db_addr, | ||
| 759 | resource_size_t *db_size) | ||
| 760 | { | ||
| 761 | return ntb->ops->peer_db_addr(ntb, db_addr, db_size); | ||
| 762 | } | ||
| 763 | |||
| 764 | /** | ||
| 765 | * ntb_peer_db_read() - read the peer doorbell register | ||
| 766 | * @ntb: NTB device context. | ||
| 767 | * | ||
| 768 | * Read the peer doorbell register, and return the bits that are set. | ||
| 769 | * | ||
| 770 | * This is unusual, and hardware may not support it. | ||
| 771 | * | ||
| 772 | * Return: The bits currently set in the peer doorbell register. | ||
| 773 | */ | ||
| 774 | static inline u64 ntb_peer_db_read(struct ntb_dev *ntb) | ||
| 775 | { | ||
| 776 | if (!ntb->ops->peer_db_read) | ||
| 777 | return 0; | ||
| 778 | |||
| 779 | return ntb->ops->peer_db_read(ntb); | ||
| 780 | } | ||
| 781 | |||
| 782 | /** | ||
| 783 | * ntb_peer_db_set() - set bits in the peer doorbell register | ||
| 784 | * @ntb: NTB device context. | ||
| 785 | * @db_bits: Doorbell bits to set. | ||
| 786 | * | ||
| 787 | * Set bits in the peer doorbell register, which may generate a peer doorbell | ||
| 788 | * interrupt. Bits that were already set must remain set. | ||
| 789 | * | ||
| 790 | * Return: Zero on success, otherwise an error number. | ||
| 791 | */ | ||
| 792 | static inline int ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) | ||
| 793 | { | ||
| 794 | return ntb->ops->peer_db_set(ntb, db_bits); | ||
| 795 | } | ||
| 796 | |||
| 797 | /** | ||
| 798 | * ntb_peer_db_clear() - clear bits in the local doorbell register | ||
| 799 | * @ntb: NTB device context. | ||
| 800 | * @db_bits: Doorbell bits to clear. | ||
| 801 | * | ||
| 802 | * Clear bits in the peer doorbell register, arming the bits for the next | ||
| 803 | * doorbell. | ||
| 804 | * | ||
| 805 | * This is unusual, and hardware may not support it. | ||
| 806 | * | ||
| 807 | * Return: Zero on success, otherwise an error number. | ||
| 808 | */ | ||
| 809 | static inline int ntb_peer_db_clear(struct ntb_dev *ntb, u64 db_bits) | ||
| 810 | { | ||
| 811 | if (!ntb->ops->db_clear) | ||
| 812 | return -EINVAL; | ||
| 813 | |||
| 814 | return ntb->ops->peer_db_clear(ntb, db_bits); | ||
| 815 | } | ||
| 816 | |||
| 817 | /** | ||
| 818 | * ntb_peer_db_read_mask() - read the peer doorbell mask | ||
| 819 | * @ntb: NTB device context. | ||
| 820 | * | ||
| 821 | * Read the peer doorbell mask register, and return the bits that are set. | ||
| 822 | * | ||
| 823 | * This is unusual, and hardware may not support it. | ||
| 824 | * | ||
| 825 | * Return: The bits currently set in the peer doorbell mask register. | ||
| 826 | */ | ||
| 827 | static inline u64 ntb_peer_db_read_mask(struct ntb_dev *ntb) | ||
| 828 | { | ||
| 829 | if (!ntb->ops->db_read_mask) | ||
| 830 | return 0; | ||
| 831 | |||
| 832 | return ntb->ops->peer_db_read_mask(ntb); | ||
| 833 | } | ||
| 834 | |||
| 835 | /** | ||
| 836 | * ntb_peer_db_set_mask() - set bits in the peer doorbell mask | ||
| 837 | * @ntb: NTB device context. | ||
| 838 | * @db_bits: Doorbell mask bits to set. | ||
| 839 | * | ||
| 840 | * Set bits in the peer doorbell mask register, preventing doorbell interrupts | ||
| 841 | * from being generated for those doorbell bits. Bits that were already set | ||
| 842 | * must remain set. | ||
| 843 | * | ||
| 844 | * This is unusual, and hardware may not support it. | ||
| 845 | * | ||
| 846 | * Return: Zero on success, otherwise an error number. | ||
| 847 | */ | ||
| 848 | static inline int ntb_peer_db_set_mask(struct ntb_dev *ntb, u64 db_bits) | ||
| 849 | { | ||
| 850 | if (!ntb->ops->db_set_mask) | ||
| 851 | return -EINVAL; | ||
| 852 | |||
| 853 | return ntb->ops->peer_db_set_mask(ntb, db_bits); | ||
| 854 | } | ||
| 855 | |||
| 856 | /** | ||
| 857 | * ntb_peer_db_clear_mask() - clear bits in the peer doorbell mask | ||
| 858 | * @ntb: NTB device context. | ||
| 859 | * @db_bits: Doorbell bits to clear. | ||
| 860 | * | ||
| 861 | * Clear bits in the peer doorbell mask register, allowing doorbell interrupts | ||
| 862 | * from being generated for those doorbell bits. If the hardware does not | ||
| 863 | * generate the interrupt on clearing the mask bit, then the driver should not | ||
| 864 | * implement this function! | ||
| 865 | * | ||
| 866 | * This is unusual, and hardware may not support it. | ||
| 867 | * | ||
| 868 | * Return: Zero on success, otherwise an error number. | ||
| 869 | */ | ||
| 870 | static inline int ntb_peer_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) | ||
| 871 | { | ||
| 872 | if (!ntb->ops->db_clear_mask) | ||
| 873 | return -EINVAL; | ||
| 874 | |||
| 875 | return ntb->ops->peer_db_clear_mask(ntb, db_bits); | ||
| 876 | } | ||
| 877 | |||
| 878 | /** | ||
| 879 | * ntb_spad_is_unsafe() - check if it is safe to use the hardware scratchpads | ||
| 880 | * @ntb: NTB device context. | ||
| 881 | * | ||
| 882 | * It is possible for some ntb hardware to be affected by errata. Hardware | ||
| 883 | * drivers can advise clients to avoid using scratchpads. Clients may ignore | ||
| 884 | * this advice, though caution is recommended. | ||
| 885 | * | ||
| 886 | * Return: Zero if it is safe to use scratchpads, or One if it is not safe. | ||
| 887 | */ | ||
| 888 | static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb) | ||
| 889 | { | ||
| 890 | if (!ntb->ops->spad_is_unsafe) | ||
| 891 | return 0; | ||
| 892 | |||
| 893 | return ntb->ops->spad_is_unsafe(ntb); | ||
| 894 | } | ||
| 895 | |||
| 896 | /** | ||
| 897 | * ntb_mw_count() - get the number of scratchpads | ||
| 898 | * @ntb: NTB device context. | ||
| 899 | * | ||
| 900 | * Hardware and topology may support a different number of scratchpads. | ||
| 901 | * | ||
| 902 | * Return: the number of scratchpads. | ||
| 903 | */ | ||
| 904 | static inline int ntb_spad_count(struct ntb_dev *ntb) | ||
| 905 | { | ||
| 906 | return ntb->ops->spad_count(ntb); | ||
| 907 | } | ||
| 908 | |||
| 909 | /** | ||
| 910 | * ntb_spad_read() - read the local scratchpad register | ||
| 911 | * @ntb: NTB device context. | ||
| 912 | * @idx: Scratchpad index. | ||
| 913 | * | ||
| 914 | * Read the local scratchpad register, and return the value. | ||
| 915 | * | ||
| 916 | * Return: The value of the local scratchpad register. | ||
| 917 | */ | ||
| 918 | static inline u32 ntb_spad_read(struct ntb_dev *ntb, int idx) | ||
| 919 | { | ||
| 920 | return ntb->ops->spad_read(ntb, idx); | ||
| 921 | } | ||
| 922 | |||
| 923 | /** | ||
| 924 | * ntb_spad_write() - write the local scratchpad register | ||
| 925 | * @ntb: NTB device context. | ||
| 926 | * @idx: Scratchpad index. | ||
| 927 | * @val: Scratchpad value. | ||
| 928 | * | ||
| 929 | * Write the value to the local scratchpad register. | ||
| 930 | * | ||
| 931 | * Return: Zero on success, otherwise an error number. | ||
| 932 | */ | ||
| 933 | static inline int ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val) | ||
| 934 | { | ||
| 935 | return ntb->ops->spad_write(ntb, idx, val); | ||
| 936 | } | ||
| 937 | |||
| 938 | /** | ||
| 939 | * ntb_peer_spad_addr() - address of the peer scratchpad register | ||
| 940 | * @ntb: NTB device context. | ||
| 941 | * @idx: Scratchpad index. | ||
| 942 | * @spad_addr: OUT - The address of the peer scratchpad register. | ||
| 943 | * | ||
| 944 | * Return the address of the peer doorbell register. This may be used, for | ||
| 945 | * example, by drivers that offload memory copy operations to a dma engine. | ||
| 946 | * | ||
| 947 | * Return: Zero on success, otherwise an error number. | ||
| 948 | */ | ||
| 949 | static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx, | ||
| 950 | phys_addr_t *spad_addr) | ||
| 951 | { | ||
| 952 | return ntb->ops->peer_spad_addr(ntb, idx, spad_addr); | ||
| 953 | } | ||
| 954 | |||
| 955 | /** | ||
| 956 | * ntb_peer_spad_read() - read the peer scratchpad register | ||
| 957 | * @ntb: NTB device context. | ||
| 958 | * @idx: Scratchpad index. | ||
| 959 | * | ||
| 960 | * Read the peer scratchpad register, and return the value. | ||
| 961 | * | ||
| 962 | * Return: The value of the local scratchpad register. | ||
| 963 | */ | ||
| 964 | static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int idx) | ||
| 965 | { | ||
| 966 | return ntb->ops->peer_spad_read(ntb, idx); | ||
| 967 | } | ||
| 968 | |||
| 969 | /** | ||
| 970 | * ntb_peer_spad_write() - write the peer scratchpad register | ||
| 971 | * @ntb: NTB device context. | ||
| 972 | * @idx: Scratchpad index. | ||
| 973 | * @val: Scratchpad value. | ||
| 974 | * | ||
| 975 | * Write the value to the peer scratchpad register. | ||
| 976 | * | ||
| 977 | * Return: Zero on success, otherwise an error number. | ||
| 978 | */ | ||
| 979 | static inline int ntb_peer_spad_write(struct ntb_dev *ntb, int idx, u32 val) | ||
| 980 | { | ||
| 981 | return ntb->ops->peer_spad_write(ntb, idx, val); | ||
| 982 | } | ||
| 983 | |||
| 984 | #endif | ||
diff --git a/include/linux/ntb_transport.h b/include/linux/ntb_transport.h new file mode 100644 index 000000000000..2862861366a5 --- /dev/null +++ b/include/linux/ntb_transport.h | |||
| @@ -0,0 +1,85 @@ | |||
| 1 | /* | ||
| 2 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 3 | * redistributing this file, you may do so under either license. | ||
| 4 | * | ||
| 5 | * GPL LICENSE SUMMARY | ||
| 6 | * | ||
| 7 | * Copyright(c) 2012 Intel Corporation. All rights reserved. | ||
| 8 | * Copyright (C) 2015 EMC Corporation. All Rights Reserved. | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of version 2 of the GNU General Public License as | ||
| 12 | * published by the Free Software Foundation. | ||
| 13 | * | ||
| 14 | * BSD LICENSE | ||
| 15 | * | ||
| 16 | * Copyright(c) 2012 Intel Corporation. All rights reserved. | ||
| 17 | * Copyright (C) 2015 EMC Corporation. All Rights Reserved. | ||
| 18 | * | ||
| 19 | * Redistribution and use in source and binary forms, with or without | ||
| 20 | * modification, are permitted provided that the following conditions | ||
| 21 | * are met: | ||
| 22 | * | ||
| 23 | * * Redistributions of source code must retain the above copyright | ||
| 24 | * notice, this list of conditions and the following disclaimer. | ||
| 25 | * * Redistributions in binary form must reproduce the above copy | ||
| 26 | * notice, this list of conditions and the following disclaimer in | ||
| 27 | * the documentation and/or other materials provided with the | ||
| 28 | * distribution. | ||
| 29 | * * Neither the name of Intel Corporation nor the names of its | ||
| 30 | * contributors may be used to endorse or promote products derived | ||
| 31 | * from this software without specific prior written permission. | ||
| 32 | * | ||
| 33 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 34 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 35 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 36 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 37 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 38 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 39 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 40 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 41 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 42 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 43 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 44 | * | ||
| 45 | * PCIe NTB Transport Linux driver | ||
| 46 | * | ||
| 47 | * Contact Information: | ||
| 48 | * Jon Mason <jon.mason@intel.com> | ||
| 49 | */ | ||
| 50 | |||
| 51 | struct ntb_transport_qp; | ||
| 52 | |||
| 53 | struct ntb_transport_client { | ||
| 54 | struct device_driver driver; | ||
| 55 | int (*probe)(struct device *client_dev); | ||
| 56 | void (*remove)(struct device *client_dev); | ||
| 57 | }; | ||
| 58 | |||
| 59 | int ntb_transport_register_client(struct ntb_transport_client *drvr); | ||
| 60 | void ntb_transport_unregister_client(struct ntb_transport_client *drvr); | ||
| 61 | int ntb_transport_register_client_dev(char *device_name); | ||
| 62 | void ntb_transport_unregister_client_dev(char *device_name); | ||
| 63 | |||
| 64 | struct ntb_queue_handlers { | ||
| 65 | void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, | ||
| 66 | void *data, int len); | ||
| 67 | void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, | ||
| 68 | void *data, int len); | ||
| 69 | void (*event_handler)(void *data, int status); | ||
| 70 | }; | ||
| 71 | |||
| 72 | unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp); | ||
| 73 | unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp); | ||
| 74 | struct ntb_transport_qp * | ||
| 75 | ntb_transport_create_queue(void *data, struct device *client_dev, | ||
| 76 | const struct ntb_queue_handlers *handlers); | ||
| 77 | void ntb_transport_free_queue(struct ntb_transport_qp *qp); | ||
| 78 | int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, | ||
| 79 | unsigned int len); | ||
| 80 | int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, | ||
| 81 | unsigned int len); | ||
| 82 | void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len); | ||
| 83 | void ntb_transport_link_up(struct ntb_transport_qp *qp); | ||
| 84 | void ntb_transport_link_down(struct ntb_transport_qp *qp); | ||
| 85 | bool ntb_transport_link_query(struct ntb_transport_qp *qp); | ||
diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 8dbd05e70f09..c0d94ed8ce9a 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h | |||
| @@ -74,7 +74,7 @@ struct nvme_dev { | |||
| 74 | struct blk_mq_tag_set tagset; | 74 | struct blk_mq_tag_set tagset; |
| 75 | struct blk_mq_tag_set admin_tagset; | 75 | struct blk_mq_tag_set admin_tagset; |
| 76 | u32 __iomem *dbs; | 76 | u32 __iomem *dbs; |
| 77 | struct pci_dev *pci_dev; | 77 | struct device *dev; |
| 78 | struct dma_pool *prp_page_pool; | 78 | struct dma_pool *prp_page_pool; |
| 79 | struct dma_pool *prp_small_pool; | 79 | struct dma_pool *prp_small_pool; |
| 80 | int instance; | 80 | int instance; |
| @@ -92,6 +92,7 @@ struct nvme_dev { | |||
| 92 | work_func_t reset_workfn; | 92 | work_func_t reset_workfn; |
| 93 | struct work_struct reset_work; | 93 | struct work_struct reset_work; |
| 94 | struct work_struct probe_work; | 94 | struct work_struct probe_work; |
| 95 | struct work_struct scan_work; | ||
| 95 | char name[12]; | 96 | char name[12]; |
| 96 | char serial[20]; | 97 | char serial[20]; |
| 97 | char model[40]; | 98 | char model[40]; |
| @@ -146,25 +147,15 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) | |||
| 146 | return (sector >> (ns->lba_shift - 9)); | 147 | return (sector >> (ns->lba_shift - 9)); |
| 147 | } | 148 | } |
| 148 | 149 | ||
| 149 | /** | 150 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
| 150 | * nvme_free_iod - frees an nvme_iod | 151 | void *buf, unsigned bufflen); |
| 151 | * @dev: The device that the I/O was submitted to | 152 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
| 152 | * @iod: The memory to free | 153 | void *buffer, void __user *ubuffer, unsigned bufflen, |
| 153 | */ | 154 | u32 *result, unsigned timeout); |
| 154 | void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod); | 155 | int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id); |
| 155 | 156 | int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid, | |
| 156 | int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t); | 157 | struct nvme_id_ns **id); |
| 157 | struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, | 158 | int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log); |
| 158 | unsigned long addr, unsigned length); | ||
| 159 | void nvme_unmap_user_pages(struct nvme_dev *dev, int write, | ||
| 160 | struct nvme_iod *iod); | ||
| 161 | int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *, | ||
| 162 | struct nvme_command *, u32 *); | ||
| 163 | int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns); | ||
| 164 | int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *, | ||
| 165 | u32 *result); | ||
| 166 | int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns, | ||
| 167 | dma_addr_t dma_addr); | ||
| 168 | int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, | 159 | int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, |
| 169 | dma_addr_t dma_addr, u32 *result); | 160 | dma_addr_t dma_addr, u32 *result); |
| 170 | int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, | 161 | int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, |
diff --git a/include/linux/of.h b/include/linux/of.h index b871ff9d81d7..edc068d19c79 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
| @@ -120,6 +120,12 @@ extern struct device_node *of_aliases; | |||
| 120 | extern struct device_node *of_stdout; | 120 | extern struct device_node *of_stdout; |
| 121 | extern raw_spinlock_t devtree_lock; | 121 | extern raw_spinlock_t devtree_lock; |
| 122 | 122 | ||
| 123 | /* flag descriptions (need to be visible even when !CONFIG_OF) */ | ||
| 124 | #define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */ | ||
| 125 | #define OF_DETACHED 2 /* node has been detached from the device tree */ | ||
| 126 | #define OF_POPULATED 3 /* device already created for the node */ | ||
| 127 | #define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */ | ||
| 128 | |||
| 123 | #ifdef CONFIG_OF | 129 | #ifdef CONFIG_OF |
| 124 | void of_core_init(void); | 130 | void of_core_init(void); |
| 125 | 131 | ||
| @@ -128,7 +134,7 @@ static inline bool is_of_node(struct fwnode_handle *fwnode) | |||
| 128 | return fwnode && fwnode->type == FWNODE_OF; | 134 | return fwnode && fwnode->type == FWNODE_OF; |
| 129 | } | 135 | } |
| 130 | 136 | ||
| 131 | static inline struct device_node *of_node(struct fwnode_handle *fwnode) | 137 | static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) |
| 132 | { | 138 | { |
| 133 | return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL; | 139 | return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL; |
| 134 | } | 140 | } |
| @@ -219,12 +225,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) | |||
| 219 | #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) | 225 | #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) |
| 220 | #endif | 226 | #endif |
| 221 | 227 | ||
| 222 | /* flag descriptions */ | ||
| 223 | #define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */ | ||
| 224 | #define OF_DETACHED 2 /* node has been detached from the device tree */ | ||
| 225 | #define OF_POPULATED 3 /* device already created for the node */ | ||
| 226 | #define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */ | ||
| 227 | |||
| 228 | #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) | 228 | #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) |
| 229 | #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) | 229 | #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) |
| 230 | 230 | ||
| @@ -387,7 +387,7 @@ static inline bool is_of_node(struct fwnode_handle *fwnode) | |||
| 387 | return false; | 387 | return false; |
| 388 | } | 388 | } |
| 389 | 389 | ||
| 390 | static inline struct device_node *of_node(struct fwnode_handle *fwnode) | 390 | static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) |
| 391 | { | 391 | { |
| 392 | return NULL; | 392 | return NULL; |
| 393 | } | 393 | } |
| @@ -428,6 +428,11 @@ static inline struct device_node *of_find_node_opts_by_path(const char *path, | |||
| 428 | return NULL; | 428 | return NULL; |
| 429 | } | 429 | } |
| 430 | 430 | ||
| 431 | static inline struct device_node *of_find_node_by_phandle(phandle handle) | ||
| 432 | { | ||
| 433 | return NULL; | ||
| 434 | } | ||
| 435 | |||
| 431 | static inline struct device_node *of_get_parent(const struct device_node *node) | 436 | static inline struct device_node *of_get_parent(const struct device_node *node) |
| 432 | { | 437 | { |
| 433 | return NULL; | 438 | return NULL; |
| @@ -673,7 +678,10 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag | |||
| 673 | #if defined(CONFIG_OF) && defined(CONFIG_NUMA) | 678 | #if defined(CONFIG_OF) && defined(CONFIG_NUMA) |
| 674 | extern int of_node_to_nid(struct device_node *np); | 679 | extern int of_node_to_nid(struct device_node *np); |
| 675 | #else | 680 | #else |
| 676 | static inline int of_node_to_nid(struct device_node *device) { return 0; } | 681 | static inline int of_node_to_nid(struct device_node *device) |
| 682 | { | ||
| 683 | return NUMA_NO_NODE; | ||
| 684 | } | ||
| 677 | #endif | 685 | #endif |
| 678 | 686 | ||
| 679 | static inline struct device_node *of_find_matching_node( | 687 | static inline struct device_node *of_find_matching_node( |
| @@ -821,7 +829,7 @@ static inline int of_property_read_string_index(struct device_node *np, | |||
| 821 | * @propname: name of the property to be searched. | 829 | * @propname: name of the property to be searched. |
| 822 | * | 830 | * |
| 823 | * Search for a property in a device node. | 831 | * Search for a property in a device node. |
| 824 | * Returns true if the property exist false otherwise. | 832 | * Returns true if the property exists false otherwise. |
| 825 | */ | 833 | */ |
| 826 | static inline bool of_property_read_bool(const struct device_node *np, | 834 | static inline bool of_property_read_bool(const struct device_node *np, |
| 827 | const char *propname) | 835 | const char *propname) |
diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 22801b10cef5..cc7dd687a89d 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h | |||
| @@ -33,6 +33,8 @@ extern int of_device_add(struct platform_device *pdev); | |||
| 33 | extern int of_device_register(struct platform_device *ofdev); | 33 | extern int of_device_register(struct platform_device *ofdev); |
| 34 | extern void of_device_unregister(struct platform_device *ofdev); | 34 | extern void of_device_unregister(struct platform_device *ofdev); |
| 35 | 35 | ||
| 36 | extern const void *of_device_get_match_data(const struct device *dev); | ||
| 37 | |||
| 36 | extern ssize_t of_device_get_modalias(struct device *dev, | 38 | extern ssize_t of_device_get_modalias(struct device *dev, |
| 37 | char *str, ssize_t len); | 39 | char *str, ssize_t len); |
| 38 | 40 | ||
| @@ -57,7 +59,7 @@ void of_dma_configure(struct device *dev, struct device_node *np); | |||
| 57 | #else /* CONFIG_OF */ | 59 | #else /* CONFIG_OF */ |
| 58 | 60 | ||
| 59 | static inline int of_driver_match_device(struct device *dev, | 61 | static inline int of_driver_match_device(struct device *dev, |
| 60 | struct device_driver *drv) | 62 | const struct device_driver *drv) |
| 61 | { | 63 | { |
| 62 | return 0; | 64 | return 0; |
| 63 | } | 65 | } |
| @@ -65,6 +67,11 @@ static inline int of_driver_match_device(struct device *dev, | |||
| 65 | static inline void of_device_uevent(struct device *dev, | 67 | static inline void of_device_uevent(struct device *dev, |
| 66 | struct kobj_uevent_env *env) { } | 68 | struct kobj_uevent_env *env) { } |
| 67 | 69 | ||
| 70 | static inline const void *of_device_get_match_data(const struct device *dev) | ||
| 71 | { | ||
| 72 | return NULL; | ||
| 73 | } | ||
| 74 | |||
| 68 | static inline int of_device_get_modalias(struct device *dev, | 75 | static inline int of_device_get_modalias(struct device *dev, |
| 69 | char *str, ssize_t len) | 76 | char *str, ssize_t len) |
| 70 | { | 77 | { |
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index 56bc026c143f..98ba7525929e 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h | |||
| @@ -23,6 +23,9 @@ struct of_dma { | |||
| 23 | struct device_node *of_node; | 23 | struct device_node *of_node; |
| 24 | struct dma_chan *(*of_dma_xlate) | 24 | struct dma_chan *(*of_dma_xlate) |
| 25 | (struct of_phandle_args *, struct of_dma *); | 25 | (struct of_phandle_args *, struct of_dma *); |
| 26 | void *(*of_dma_route_allocate) | ||
| 27 | (struct of_phandle_args *, struct of_dma *); | ||
| 28 | struct dma_router *dma_router; | ||
| 26 | void *of_dma_data; | 29 | void *of_dma_data; |
| 27 | }; | 30 | }; |
| 28 | 31 | ||
| @@ -37,12 +40,20 @@ extern int of_dma_controller_register(struct device_node *np, | |||
| 37 | (struct of_phandle_args *, struct of_dma *), | 40 | (struct of_phandle_args *, struct of_dma *), |
| 38 | void *data); | 41 | void *data); |
| 39 | extern void of_dma_controller_free(struct device_node *np); | 42 | extern void of_dma_controller_free(struct device_node *np); |
| 43 | |||
| 44 | extern int of_dma_router_register(struct device_node *np, | ||
| 45 | void *(*of_dma_route_allocate) | ||
| 46 | (struct of_phandle_args *, struct of_dma *), | ||
| 47 | struct dma_router *dma_router); | ||
| 48 | #define of_dma_router_free of_dma_controller_free | ||
| 49 | |||
| 40 | extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | 50 | extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np, |
| 41 | const char *name); | 51 | const char *name); |
| 42 | extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, | 52 | extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, |
| 43 | struct of_dma *ofdma); | 53 | struct of_dma *ofdma); |
| 44 | extern struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec, | 54 | extern struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec, |
| 45 | struct of_dma *ofdma); | 55 | struct of_dma *ofdma); |
| 56 | |||
| 46 | #else | 57 | #else |
| 47 | static inline int of_dma_controller_register(struct device_node *np, | 58 | static inline int of_dma_controller_register(struct device_node *np, |
| 48 | struct dma_chan *(*of_dma_xlate) | 59 | struct dma_chan *(*of_dma_xlate) |
| @@ -56,6 +67,16 @@ static inline void of_dma_controller_free(struct device_node *np) | |||
| 56 | { | 67 | { |
| 57 | } | 68 | } |
| 58 | 69 | ||
| 70 | static inline int of_dma_router_register(struct device_node *np, | ||
| 71 | void *(*of_dma_route_allocate) | ||
| 72 | (struct of_phandle_args *, struct of_dma *), | ||
| 73 | struct dma_router *dma_router) | ||
| 74 | { | ||
| 75 | return -ENODEV; | ||
| 76 | } | ||
| 77 | |||
| 78 | #define of_dma_router_free of_dma_controller_free | ||
| 79 | |||
| 59 | static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | 80 | static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, |
| 60 | const char *name) | 81 | const char *name) |
| 61 | { | 82 | { |
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 587ee507965d..df9ef3801812 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h | |||
| @@ -37,7 +37,7 @@ extern bool of_fdt_is_big_endian(const void *blob, | |||
| 37 | unsigned long node); | 37 | unsigned long node); |
| 38 | extern int of_fdt_match(const void *blob, unsigned long node, | 38 | extern int of_fdt_match(const void *blob, unsigned long node, |
| 39 | const char *const *compat); | 39 | const char *const *compat); |
| 40 | extern void of_fdt_unflatten_tree(unsigned long *blob, | 40 | extern void of_fdt_unflatten_tree(const unsigned long *blob, |
| 41 | struct device_node **mynodes); | 41 | struct device_node **mynodes); |
| 42 | 42 | ||
| 43 | /* TBD: Temporary export of fdt globals - remove when code fully merged */ | 43 | /* TBD: Temporary export of fdt globals - remove when code fully merged */ |
| @@ -64,6 +64,7 @@ extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, | |||
| 64 | extern int early_init_dt_scan_memory(unsigned long node, const char *uname, | 64 | extern int early_init_dt_scan_memory(unsigned long node, const char *uname, |
| 65 | int depth, void *data); | 65 | int depth, void *data); |
| 66 | extern void early_init_fdt_scan_reserved_mem(void); | 66 | extern void early_init_fdt_scan_reserved_mem(void); |
| 67 | extern void early_init_fdt_reserve_self(void); | ||
| 67 | extern void early_init_dt_add_memory_arch(u64 base, u64 size); | 68 | extern void early_init_dt_add_memory_arch(u64 base, u64 size); |
| 68 | extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, | 69 | extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, |
| 69 | bool no_map); | 70 | bool no_map); |
| @@ -91,6 +92,7 @@ extern u64 fdt_translate_address(const void *blob, int node_offset); | |||
| 91 | extern void of_fdt_limit_memory(int limit); | 92 | extern void of_fdt_limit_memory(int limit); |
| 92 | #else /* CONFIG_OF_FLATTREE */ | 93 | #else /* CONFIG_OF_FLATTREE */ |
| 93 | static inline void early_init_fdt_scan_reserved_mem(void) {} | 94 | static inline void early_init_fdt_scan_reserved_mem(void) {} |
| 95 | static inline void early_init_fdt_reserve_self(void) {} | ||
| 94 | static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } | 96 | static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } |
| 95 | static inline void unflatten_device_tree(void) {} | 97 | static inline void unflatten_device_tree(void) {} |
| 96 | static inline void unflatten_and_copy_device_tree(void) {} | 98 | static inline void unflatten_and_copy_device_tree(void) {} |
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h index 7bc92e050608..f8bcd0e21a26 100644 --- a/include/linux/of_graph.h +++ b/include/linux/of_graph.h | |||
| @@ -45,6 +45,8 @@ int of_graph_parse_endpoint(const struct device_node *node, | |||
| 45 | struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id); | 45 | struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id); |
| 46 | struct device_node *of_graph_get_next_endpoint(const struct device_node *parent, | 46 | struct device_node *of_graph_get_next_endpoint(const struct device_node *parent, |
| 47 | struct device_node *previous); | 47 | struct device_node *previous); |
| 48 | struct device_node *of_graph_get_endpoint_by_regs( | ||
| 49 | const struct device_node *parent, int port_reg, int reg); | ||
| 48 | struct device_node *of_graph_get_remote_port_parent( | 50 | struct device_node *of_graph_get_remote_port_parent( |
| 49 | const struct device_node *node); | 51 | const struct device_node *node); |
| 50 | struct device_node *of_graph_get_remote_port(const struct device_node *node); | 52 | struct device_node *of_graph_get_remote_port(const struct device_node *node); |
| @@ -69,6 +71,12 @@ static inline struct device_node *of_graph_get_next_endpoint( | |||
| 69 | return NULL; | 71 | return NULL; |
| 70 | } | 72 | } |
| 71 | 73 | ||
| 74 | static inline struct device_node *of_graph_get_endpoint_by_regs( | ||
| 75 | const struct device_node *parent, int port_reg, int reg) | ||
| 76 | { | ||
| 77 | return NULL; | ||
| 78 | } | ||
| 79 | |||
| 72 | static inline struct device_node *of_graph_get_remote_port_parent( | 80 | static inline struct device_node *of_graph_get_remote_port_parent( |
| 73 | const struct device_node *node) | 81 | const struct device_node *node) |
| 74 | { | 82 | { |
diff --git a/include/linux/oom.h b/include/linux/oom.h index 44b2f6f7bbd8..7deecb7bca5e 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
| @@ -32,6 +32,8 @@ enum oom_scan_t { | |||
| 32 | /* Thread is the potential origin of an oom condition; kill first on oom */ | 32 | /* Thread is the potential origin of an oom condition; kill first on oom */ |
| 33 | #define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1) | 33 | #define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1) |
| 34 | 34 | ||
| 35 | extern struct mutex oom_lock; | ||
| 36 | |||
| 35 | static inline void set_current_oom_origin(void) | 37 | static inline void set_current_oom_origin(void) |
| 36 | { | 38 | { |
| 37 | current->signal->oom_flags |= OOM_FLAG_ORIGIN; | 39 | current->signal->oom_flags |= OOM_FLAG_ORIGIN; |
| @@ -47,9 +49,7 @@ static inline bool oom_task_origin(const struct task_struct *p) | |||
| 47 | return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN); | 49 | return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN); |
| 48 | } | 50 | } |
| 49 | 51 | ||
| 50 | extern void mark_tsk_oom_victim(struct task_struct *tsk); | 52 | extern void mark_oom_victim(struct task_struct *tsk); |
| 51 | |||
| 52 | extern void unmark_oom_victim(void); | ||
| 53 | 53 | ||
| 54 | extern unsigned long oom_badness(struct task_struct *p, | 54 | extern unsigned long oom_badness(struct task_struct *p, |
| 55 | struct mem_cgroup *memcg, const nodemask_t *nodemask, | 55 | struct mem_cgroup *memcg, const nodemask_t *nodemask, |
| @@ -62,9 +62,6 @@ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
| 62 | struct mem_cgroup *memcg, nodemask_t *nodemask, | 62 | struct mem_cgroup *memcg, nodemask_t *nodemask, |
| 63 | const char *message); | 63 | const char *message); |
| 64 | 64 | ||
| 65 | extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags); | ||
| 66 | extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags); | ||
| 67 | |||
| 68 | extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, | 65 | extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, |
| 69 | int order, const nodemask_t *nodemask, | 66 | int order, const nodemask_t *nodemask, |
| 70 | struct mem_cgroup *memcg); | 67 | struct mem_cgroup *memcg); |
| @@ -75,6 +72,9 @@ extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task, | |||
| 75 | 72 | ||
| 76 | extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | 73 | extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, |
| 77 | int order, nodemask_t *mask, bool force_kill); | 74 | int order, nodemask_t *mask, bool force_kill); |
| 75 | |||
| 76 | extern void exit_oom_victim(void); | ||
| 77 | |||
| 78 | extern int register_oom_notifier(struct notifier_block *nb); | 78 | extern int register_oom_notifier(struct notifier_block *nb); |
| 79 | extern int unregister_oom_notifier(struct notifier_block *nb); | 79 | extern int unregister_oom_notifier(struct notifier_block *nb); |
| 80 | 80 | ||
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f34e040b34e9..41c93844fb1d 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
| @@ -631,15 +631,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) | |||
| 631 | 1 << PG_private | 1 << PG_private_2 | \ | 631 | 1 << PG_private | 1 << PG_private_2 | \ |
| 632 | 1 << PG_writeback | 1 << PG_reserved | \ | 632 | 1 << PG_writeback | 1 << PG_reserved | \ |
| 633 | 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ | 633 | 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ |
| 634 | 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ | 634 | 1 << PG_unevictable | __PG_MLOCKED | \ |
| 635 | __PG_COMPOUND_LOCK) | 635 | __PG_COMPOUND_LOCK) |
| 636 | 636 | ||
| 637 | /* | 637 | /* |
| 638 | * Flags checked when a page is prepped for return by the page allocator. | 638 | * Flags checked when a page is prepped for return by the page allocator. |
| 639 | * Pages being prepped should not have any flags set. It they are set, | 639 | * Pages being prepped should not have these flags set. It they are set, |
| 640 | * there has been a kernel bug or struct page corruption. | 640 | * there has been a kernel bug or struct page corruption. |
| 641 | * | ||
| 642 | * __PG_HWPOISON is exceptional because it needs to be kept beyond page's | ||
| 643 | * alloc-free cycle to prevent from reusing the page. | ||
| 641 | */ | 644 | */ |
| 642 | #define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) | 645 | #define PAGE_FLAGS_CHECK_AT_PREP \ |
| 646 | (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) | ||
| 643 | 647 | ||
| 644 | #define PAGE_FLAGS_PRIVATE \ | 648 | #define PAGE_FLAGS_PRIVATE \ |
| 645 | (1 << PG_private | 1 << PG_private_2) | 649 | (1 << PG_private | 1 << PG_private_2) |
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index b48c3471c254..cacaabea8a09 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h | |||
| @@ -8,6 +8,7 @@ extern struct page_ext_operations page_owner_ops; | |||
| 8 | extern void __reset_page_owner(struct page *page, unsigned int order); | 8 | extern void __reset_page_owner(struct page *page, unsigned int order); |
| 9 | extern void __set_page_owner(struct page *page, | 9 | extern void __set_page_owner(struct page *page, |
| 10 | unsigned int order, gfp_t gfp_mask); | 10 | unsigned int order, gfp_t gfp_mask); |
| 11 | extern gfp_t __get_page_owner_gfp(struct page *page); | ||
| 11 | 12 | ||
| 12 | static inline void reset_page_owner(struct page *page, unsigned int order) | 13 | static inline void reset_page_owner(struct page *page, unsigned int order) |
| 13 | { | 14 | { |
| @@ -25,6 +26,14 @@ static inline void set_page_owner(struct page *page, | |||
| 25 | 26 | ||
| 26 | __set_page_owner(page, order, gfp_mask); | 27 | __set_page_owner(page, order, gfp_mask); |
| 27 | } | 28 | } |
| 29 | |||
| 30 | static inline gfp_t get_page_owner_gfp(struct page *page) | ||
| 31 | { | ||
| 32 | if (likely(!page_owner_inited)) | ||
| 33 | return 0; | ||
| 34 | |||
| 35 | return __get_page_owner_gfp(page); | ||
| 36 | } | ||
| 28 | #else | 37 | #else |
| 29 | static inline void reset_page_owner(struct page *page, unsigned int order) | 38 | static inline void reset_page_owner(struct page *page, unsigned int order) |
| 30 | { | 39 | { |
| @@ -33,6 +42,10 @@ static inline void set_page_owner(struct page *page, | |||
| 33 | unsigned int order, gfp_t gfp_mask) | 42 | unsigned int order, gfp_t gfp_mask) |
| 34 | { | 43 | { |
| 35 | } | 44 | } |
| 45 | static inline gfp_t get_page_owner_gfp(struct page *page) | ||
| 46 | { | ||
| 47 | return 0; | ||
| 48 | } | ||
| 36 | 49 | ||
| 37 | #endif /* CONFIG_PAGE_OWNER */ | 50 | #endif /* CONFIG_PAGE_OWNER */ |
| 38 | #endif /* __LINUX_PAGE_OWNER_H */ | 51 | #endif /* __LINUX_PAGE_OWNER_H */ |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 4b3736f7065c..a6c78e00ea96 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
| @@ -651,7 +651,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | |||
| 651 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | 651 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
| 652 | pgoff_t index, gfp_t gfp_mask); | 652 | pgoff_t index, gfp_t gfp_mask); |
| 653 | extern void delete_from_page_cache(struct page *page); | 653 | extern void delete_from_page_cache(struct page *page); |
| 654 | extern void __delete_from_page_cache(struct page *page, void *shadow); | 654 | extern void __delete_from_page_cache(struct page *page, void *shadow, |
| 655 | struct mem_cgroup *memcg); | ||
| 655 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); | 656 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); |
| 656 | 657 | ||
| 657 | /* | 658 | /* |
| @@ -670,4 +671,10 @@ static inline int add_to_page_cache(struct page *page, | |||
| 670 | return error; | 671 | return error; |
| 671 | } | 672 | } |
| 672 | 673 | ||
| 674 | static inline unsigned long dir_pages(struct inode *inode) | ||
| 675 | { | ||
| 676 | return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >> | ||
| 677 | PAGE_CACHE_SHIFT; | ||
| 678 | } | ||
| 679 | |||
| 673 | #endif /* _LINUX_PAGEMAP_H */ | 680 | #endif /* _LINUX_PAGEMAP_H */ |
diff --git a/include/linux/parport.h b/include/linux/parport.h index c22f12547324..58e3c64c6b49 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/wait.h> | 13 | #include <linux/wait.h> |
| 14 | #include <linux/irqreturn.h> | 14 | #include <linux/irqreturn.h> |
| 15 | #include <linux/semaphore.h> | 15 | #include <linux/semaphore.h> |
| 16 | #include <linux/device.h> | ||
| 16 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
| 17 | #include <uapi/linux/parport.h> | 18 | #include <uapi/linux/parport.h> |
| 18 | 19 | ||
| @@ -145,6 +146,8 @@ struct pardevice { | |||
| 145 | unsigned int flags; | 146 | unsigned int flags; |
| 146 | struct pardevice *next; | 147 | struct pardevice *next; |
| 147 | struct pardevice *prev; | 148 | struct pardevice *prev; |
| 149 | struct device dev; | ||
| 150 | bool devmodel; | ||
| 148 | struct parport_state *state; /* saved status over preemption */ | 151 | struct parport_state *state; /* saved status over preemption */ |
| 149 | wait_queue_head_t wait_q; | 152 | wait_queue_head_t wait_q; |
| 150 | unsigned long int time; | 153 | unsigned long int time; |
| @@ -156,6 +159,8 @@ struct pardevice { | |||
| 156 | void * sysctl_table; | 159 | void * sysctl_table; |
| 157 | }; | 160 | }; |
| 158 | 161 | ||
| 162 | #define to_pardevice(n) container_of(n, struct pardevice, dev) | ||
| 163 | |||
| 159 | /* IEEE1284 information */ | 164 | /* IEEE1284 information */ |
| 160 | 165 | ||
| 161 | /* IEEE1284 phases. These are exposed to userland through ppdev IOCTL | 166 | /* IEEE1284 phases. These are exposed to userland through ppdev IOCTL |
| @@ -195,7 +200,7 @@ struct parport { | |||
| 195 | * This may unfortulately be null if the | 200 | * This may unfortulately be null if the |
| 196 | * port has a legacy driver. | 201 | * port has a legacy driver. |
| 197 | */ | 202 | */ |
| 198 | 203 | struct device bus_dev; /* to link with the bus */ | |
| 199 | struct parport *physport; | 204 | struct parport *physport; |
| 200 | /* If this is a non-default mux | 205 | /* If this is a non-default mux |
| 201 | parport, i.e. we're a clone of a real | 206 | parport, i.e. we're a clone of a real |
| @@ -245,15 +250,26 @@ struct parport { | |||
| 245 | struct parport *slaves[3]; | 250 | struct parport *slaves[3]; |
| 246 | }; | 251 | }; |
| 247 | 252 | ||
| 253 | #define to_parport_dev(n) container_of(n, struct parport, bus_dev) | ||
| 254 | |||
| 248 | #define DEFAULT_SPIN_TIME 500 /* us */ | 255 | #define DEFAULT_SPIN_TIME 500 /* us */ |
| 249 | 256 | ||
| 250 | struct parport_driver { | 257 | struct parport_driver { |
| 251 | const char *name; | 258 | const char *name; |
| 252 | void (*attach) (struct parport *); | 259 | void (*attach) (struct parport *); |
| 253 | void (*detach) (struct parport *); | 260 | void (*detach) (struct parport *); |
| 261 | void (*match_port)(struct parport *); | ||
| 262 | int (*probe)(struct pardevice *); | ||
| 263 | struct device_driver driver; | ||
| 264 | bool devmodel; | ||
| 254 | struct list_head list; | 265 | struct list_head list; |
| 255 | }; | 266 | }; |
| 256 | 267 | ||
| 268 | #define to_parport_driver(n) container_of(n, struct parport_driver, driver) | ||
| 269 | |||
| 270 | int parport_bus_init(void); | ||
| 271 | void parport_bus_exit(void); | ||
| 272 | |||
| 257 | /* parport_register_port registers a new parallel port at the given | 273 | /* parport_register_port registers a new parallel port at the given |
| 258 | address (if one does not already exist) and returns a pointer to it. | 274 | address (if one does not already exist) and returns a pointer to it. |
| 259 | This entails claiming the I/O region, IRQ and DMA. NULL is returned | 275 | This entails claiming the I/O region, IRQ and DMA. NULL is returned |
| @@ -272,10 +288,20 @@ void parport_announce_port (struct parport *port); | |||
| 272 | extern void parport_remove_port(struct parport *port); | 288 | extern void parport_remove_port(struct parport *port); |
| 273 | 289 | ||
| 274 | /* Register a new high-level driver. */ | 290 | /* Register a new high-level driver. */ |
| 275 | extern int parport_register_driver (struct parport_driver *); | 291 | |
| 292 | int __must_check __parport_register_driver(struct parport_driver *, | ||
| 293 | struct module *, | ||
| 294 | const char *mod_name); | ||
| 295 | /* | ||
| 296 | * parport_register_driver must be a macro so that KBUILD_MODNAME can | ||
| 297 | * be expanded | ||
| 298 | */ | ||
| 299 | #define parport_register_driver(driver) \ | ||
| 300 | __parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) | ||
| 276 | 301 | ||
| 277 | /* Unregister a high-level driver. */ | 302 | /* Unregister a high-level driver. */ |
| 278 | extern void parport_unregister_driver (struct parport_driver *); | 303 | extern void parport_unregister_driver (struct parport_driver *); |
| 304 | void parport_unregister_driver(struct parport_driver *); | ||
| 279 | 305 | ||
| 280 | /* If parport_register_driver doesn't fit your needs, perhaps | 306 | /* If parport_register_driver doesn't fit your needs, perhaps |
| 281 | * parport_find_xxx does. */ | 307 | * parport_find_xxx does. */ |
| @@ -288,6 +314,15 @@ extern irqreturn_t parport_irq_handler(int irq, void *dev_id); | |||
| 288 | /* Reference counting for ports. */ | 314 | /* Reference counting for ports. */ |
| 289 | extern struct parport *parport_get_port (struct parport *); | 315 | extern struct parport *parport_get_port (struct parport *); |
| 290 | extern void parport_put_port (struct parport *); | 316 | extern void parport_put_port (struct parport *); |
| 317 | void parport_del_port(struct parport *); | ||
| 318 | |||
| 319 | struct pardev_cb { | ||
| 320 | int (*preempt)(void *); | ||
| 321 | void (*wakeup)(void *); | ||
| 322 | void *private; | ||
| 323 | void (*irq_func)(void *); | ||
| 324 | unsigned int flags; | ||
| 325 | }; | ||
| 291 | 326 | ||
| 292 | /* parport_register_device declares that a device is connected to a | 327 | /* parport_register_device declares that a device is connected to a |
| 293 | port, and tells the kernel all it needs to know. | 328 | port, and tells the kernel all it needs to know. |
| @@ -301,6 +336,10 @@ struct pardevice *parport_register_device(struct parport *port, | |||
| 301 | void (*irq_func)(void *), | 336 | void (*irq_func)(void *), |
| 302 | int flags, void *handle); | 337 | int flags, void *handle); |
| 303 | 338 | ||
| 339 | struct pardevice * | ||
| 340 | parport_register_dev_model(struct parport *port, const char *name, | ||
| 341 | const struct pardev_cb *par_dev_cb, int cnt); | ||
| 342 | |||
| 304 | /* parport_unregister unlinks a device from the chain. */ | 343 | /* parport_unregister unlinks a device from the chain. */ |
| 305 | extern void parport_unregister_device(struct pardevice *dev); | 344 | extern void parport_unregister_device(struct pardevice *dev); |
| 306 | 345 | ||
diff --git a/include/linux/pata_arasan_cf_data.h b/include/linux/pata_arasan_cf_data.h index 3cc21c9cc1e8..9fade5dd2e86 100644 --- a/include/linux/pata_arasan_cf_data.h +++ b/include/linux/pata_arasan_cf_data.h | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | * Arasan Compact Flash host controller platform data header file | 4 | * Arasan Compact Flash host controller platform data header file |
| 5 | * | 5 | * |
| 6 | * Copyright (C) 2011 ST Microelectronics | 6 | * Copyright (C) 2011 ST Microelectronics |
| 7 | * Viresh Kumar <viresh.linux@gmail.com> | 7 | * Viresh Kumar <vireshk@kernel.org> |
| 8 | * | 8 | * |
| 9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
| 10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index cb63a7b522ef..fcff8f865341 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
| @@ -2330,6 +2330,8 @@ | |||
| 2330 | #define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea | 2330 | #define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea |
| 2331 | #define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb | 2331 | #define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb |
| 2332 | 2332 | ||
| 2333 | #define PCI_VENDOR_ID_CAVIUM 0x177d | ||
| 2334 | |||
| 2333 | #define PCI_VENDOR_ID_BELKIN 0x1799 | 2335 | #define PCI_VENDOR_ID_BELKIN 0x1799 |
| 2334 | #define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f | 2336 | #define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f |
| 2335 | 2337 | ||
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1b82d44b0a02..2027809433b3 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -300,6 +300,11 @@ struct pmu { | |||
| 300 | * Free pmu-private AUX data structures | 300 | * Free pmu-private AUX data structures |
| 301 | */ | 301 | */ |
| 302 | void (*free_aux) (void *aux); /* optional */ | 302 | void (*free_aux) (void *aux); /* optional */ |
| 303 | |||
| 304 | /* | ||
| 305 | * Filter events for PMU-specific reasons. | ||
| 306 | */ | ||
| 307 | int (*filter_match) (struct perf_event *event); /* optional */ | ||
| 303 | }; | 308 | }; |
| 304 | 309 | ||
| 305 | /** | 310 | /** |
| @@ -479,7 +484,7 @@ struct perf_event { | |||
| 479 | void *overflow_handler_context; | 484 | void *overflow_handler_context; |
| 480 | 485 | ||
| 481 | #ifdef CONFIG_EVENT_TRACING | 486 | #ifdef CONFIG_EVENT_TRACING |
| 482 | struct ftrace_event_call *tp_event; | 487 | struct trace_event_call *tp_event; |
| 483 | struct event_filter *filter; | 488 | struct event_filter *filter; |
| 484 | #ifdef CONFIG_FUNCTION_TRACER | 489 | #ifdef CONFIG_FUNCTION_TRACER |
| 485 | struct ftrace_ops ftrace_ops; | 490 | struct ftrace_ops ftrace_ops; |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 685809835b5c..a26c3f84b8dd 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
| @@ -181,6 +181,9 @@ struct mii_bus { | |||
| 181 | /* PHY addresses to be ignored when probing */ | 181 | /* PHY addresses to be ignored when probing */ |
| 182 | u32 phy_mask; | 182 | u32 phy_mask; |
| 183 | 183 | ||
| 184 | /* PHY addresses to ignore the TA/read failure */ | ||
| 185 | u32 phy_ignore_ta_mask; | ||
| 186 | |||
| 184 | /* | 187 | /* |
| 185 | * Pointer to an array of interrupts, each PHY's | 188 | * Pointer to an array of interrupts, each PHY's |
| 186 | * interrupt at the index matching its address | 189 | * interrupt at the index matching its address |
| @@ -675,6 +678,17 @@ static inline bool phy_is_internal(struct phy_device *phydev) | |||
| 675 | } | 678 | } |
| 676 | 679 | ||
| 677 | /** | 680 | /** |
| 681 | * phy_interface_is_rgmii - Convenience function for testing if a PHY interface | ||
| 682 | * is RGMII (all variants) | ||
| 683 | * @phydev: the phy_device struct | ||
| 684 | */ | ||
| 685 | static inline bool phy_interface_is_rgmii(struct phy_device *phydev) | ||
| 686 | { | ||
| 687 | return phydev->interface >= PHY_INTERFACE_MODE_RGMII && | ||
| 688 | phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID; | ||
| 689 | } | ||
| 690 | |||
| 691 | /** | ||
| 678 | * phy_write_mmd - Convenience function for writing a register | 692 | * phy_write_mmd - Convenience function for writing a register |
| 679 | * on an MMD on a given PHY. | 693 | * on an MMD on a given PHY. |
| 680 | * @phydev: The phy_device struct | 694 | * @phydev: The phy_device struct |
diff --git a/include/linux/phy/phy-sun4i-usb.h b/include/linux/phy/phy-sun4i-usb.h new file mode 100644 index 000000000000..50aed92ea89c --- /dev/null +++ b/include/linux/phy/phy-sun4i-usb.h | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2015 Hans de Goede <hdegoede@redhat.com> | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 and | ||
| 6 | * only version 2 as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #ifndef PHY_SUN4I_USB_H_ | ||
| 15 | #define PHY_SUN4I_USB_H_ | ||
| 16 | |||
| 17 | #include "phy.h" | ||
| 18 | |||
| 19 | /** | ||
| 20 | * sun4i_usb_phy_set_squelch_detect() - Enable/disable squelch detect | ||
| 21 | * @phy: reference to a sun4i usb phy | ||
| 22 | * @enabled: wether to enable or disable squelch detect | ||
| 23 | */ | ||
| 24 | void sun4i_usb_phy_set_squelch_detect(struct phy *phy, bool enabled); | ||
| 25 | |||
| 26 | #endif | ||
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index a0197fa1b116..8cf05e341cff 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h | |||
| @@ -133,6 +133,8 @@ struct phy *devm_phy_get(struct device *dev, const char *string); | |||
| 133 | struct phy *devm_phy_optional_get(struct device *dev, const char *string); | 133 | struct phy *devm_phy_optional_get(struct device *dev, const char *string); |
| 134 | struct phy *devm_of_phy_get(struct device *dev, struct device_node *np, | 134 | struct phy *devm_of_phy_get(struct device *dev, struct device_node *np, |
| 135 | const char *con_id); | 135 | const char *con_id); |
| 136 | struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np, | ||
| 137 | int index); | ||
| 136 | void phy_put(struct phy *phy); | 138 | void phy_put(struct phy *phy); |
| 137 | void devm_phy_put(struct device *dev, struct phy *phy); | 139 | void devm_phy_put(struct device *dev, struct phy *phy); |
| 138 | struct phy *of_phy_get(struct device_node *np, const char *con_id); | 140 | struct phy *of_phy_get(struct device_node *np, const char *con_id); |
| @@ -261,6 +263,13 @@ static inline struct phy *devm_of_phy_get(struct device *dev, | |||
| 261 | return ERR_PTR(-ENOSYS); | 263 | return ERR_PTR(-ENOSYS); |
| 262 | } | 264 | } |
| 263 | 265 | ||
| 266 | static inline struct phy *devm_of_phy_get_by_index(struct device *dev, | ||
| 267 | struct device_node *np, | ||
| 268 | int index) | ||
| 269 | { | ||
| 270 | return ERR_PTR(-ENOSYS); | ||
| 271 | } | ||
| 272 | |||
| 264 | static inline void phy_put(struct phy *phy) | 273 | static inline void phy_put(struct phy *phy) |
| 265 | { | 274 | { |
| 266 | } | 275 | } |
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h index 18eccefea06e..d7e5d608faa7 100644 --- a/include/linux/pinctrl/consumer.h +++ b/include/linux/pinctrl/consumer.h | |||
| @@ -142,7 +142,7 @@ static inline struct pinctrl * __must_check pinctrl_get_select( | |||
| 142 | s = pinctrl_lookup_state(p, name); | 142 | s = pinctrl_lookup_state(p, name); |
| 143 | if (IS_ERR(s)) { | 143 | if (IS_ERR(s)) { |
| 144 | pinctrl_put(p); | 144 | pinctrl_put(p); |
| 145 | return ERR_PTR(PTR_ERR(s)); | 145 | return ERR_CAST(s); |
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | ret = pinctrl_select_state(p, s); | 148 | ret = pinctrl_select_state(p, s); |
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index 66e4697516de..9ba59fcba549 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h | |||
| @@ -127,7 +127,7 @@ struct pinctrl_ops { | |||
| 127 | */ | 127 | */ |
| 128 | struct pinctrl_desc { | 128 | struct pinctrl_desc { |
| 129 | const char *name; | 129 | const char *name; |
| 130 | struct pinctrl_pin_desc const *pins; | 130 | const struct pinctrl_pin_desc *pins; |
| 131 | unsigned int npins; | 131 | unsigned int npins; |
| 132 | const struct pinctrl_ops *pctlops; | 132 | const struct pinctrl_ops *pctlops; |
| 133 | const struct pinmux_ops *pmxops; | 133 | const struct pinmux_ops *pmxops; |
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h index 511bda9ed4bf..ace60d775b20 100644 --- a/include/linux/pinctrl/pinmux.h +++ b/include/linux/pinctrl/pinmux.h | |||
| @@ -56,6 +56,9 @@ struct pinctrl_dev; | |||
| 56 | * depending on whether the GPIO is configured as input or output, | 56 | * depending on whether the GPIO is configured as input or output, |
| 57 | * a direction selector function may be implemented as a backing | 57 | * a direction selector function may be implemented as a backing |
| 58 | * to the GPIO controllers that need pin muxing. | 58 | * to the GPIO controllers that need pin muxing. |
| 59 | * @strict: do not allow simultaneous use of the same pin for GPIO and another | ||
| 60 | * function. Check both gpio_owner and mux_owner strictly before approving | ||
| 61 | * the pin request. | ||
| 59 | */ | 62 | */ |
| 60 | struct pinmux_ops { | 63 | struct pinmux_ops { |
| 61 | int (*request) (struct pinctrl_dev *pctldev, unsigned offset); | 64 | int (*request) (struct pinctrl_dev *pctldev, unsigned offset); |
| @@ -66,7 +69,7 @@ struct pinmux_ops { | |||
| 66 | int (*get_function_groups) (struct pinctrl_dev *pctldev, | 69 | int (*get_function_groups) (struct pinctrl_dev *pctldev, |
| 67 | unsigned selector, | 70 | unsigned selector, |
| 68 | const char * const **groups, | 71 | const char * const **groups, |
| 69 | unsigned * const num_groups); | 72 | unsigned *num_groups); |
| 70 | int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector, | 73 | int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector, |
| 71 | unsigned group_selector); | 74 | unsigned group_selector); |
| 72 | int (*gpio_request_enable) (struct pinctrl_dev *pctldev, | 75 | int (*gpio_request_enable) (struct pinctrl_dev *pctldev, |
| @@ -79,6 +82,7 @@ struct pinmux_ops { | |||
| 79 | struct pinctrl_gpio_range *range, | 82 | struct pinctrl_gpio_range *range, |
| 80 | unsigned offset, | 83 | unsigned offset, |
| 81 | bool input); | 84 | bool input); |
| 85 | bool strict; | ||
| 82 | }; | 86 | }; |
| 83 | 87 | ||
| 84 | #endif /* CONFIG_PINMUX */ | 88 | #endif /* CONFIG_PINMUX */ |
diff --git a/include/linux/platform_data/dma-rcar-audmapp.h b/include/linux/platform_data/dma-rcar-audmapp.h deleted file mode 100644 index 471fffebbeb4..000000000000 --- a/include/linux/platform_data/dma-rcar-audmapp.h +++ /dev/null | |||
| @@ -1,34 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * This is for Renesas R-Car Audio-DMAC-peri-peri. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014 Renesas Electronics Corporation | ||
| 5 | * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> | ||
| 6 | * | ||
| 7 | * This file is based on the include/linux/sh_dma.h | ||
| 8 | * | ||
| 9 | * Header for the new SH dmaengine driver | ||
| 10 | * | ||
| 11 | * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
| 12 | * | ||
| 13 | * This program is free software; you can redistribute it and/or modify | ||
| 14 | * it under the terms of the GNU General Public License version 2 as | ||
| 15 | * published by the Free Software Foundation. | ||
| 16 | */ | ||
| 17 | #ifndef SH_AUDMAPP_H | ||
| 18 | #define SH_AUDMAPP_H | ||
| 19 | |||
| 20 | #include <linux/dmaengine.h> | ||
| 21 | |||
| 22 | struct audmapp_slave_config { | ||
| 23 | int slave_id; | ||
| 24 | dma_addr_t src; | ||
| 25 | dma_addr_t dst; | ||
| 26 | u32 chcr; | ||
| 27 | }; | ||
| 28 | |||
| 29 | struct audmapp_pdata { | ||
| 30 | struct audmapp_slave_config *slave; | ||
| 31 | int slave_num; | ||
| 32 | }; | ||
| 33 | |||
| 34 | #endif /* SH_AUDMAPP_H */ | ||
diff --git a/include/linux/platform_data/gpio-ath79.h b/include/linux/platform_data/gpio-ath79.h new file mode 100644 index 000000000000..88b0db7bee74 --- /dev/null +++ b/include/linux/platform_data/gpio-ath79.h | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | /* | ||
| 2 | * Atheros AR7XXX/AR9XXX GPIO controller platform data | ||
| 3 | * | ||
| 4 | * Copyright (C) 2015 Alban Bedel <albeu@free.fr> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef __LINUX_PLATFORM_DATA_GPIO_ATH79_H | ||
| 12 | #define __LINUX_PLATFORM_DATA_GPIO_ATH79_H | ||
| 13 | |||
| 14 | struct ath79_gpio_platform_data { | ||
| 15 | unsigned ngpios; | ||
| 16 | bool oe_inverted; | ||
| 17 | }; | ||
| 18 | |||
| 19 | #endif | ||
diff --git a/include/linux/platform_data/keyboard-spear.h b/include/linux/platform_data/keyboard-spear.h index 9248e3a7e333..5e3ff653900c 100644 --- a/include/linux/platform_data/keyboard-spear.h +++ b/include/linux/platform_data/keyboard-spear.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2010 ST Microelectronics | 2 | * Copyright (C) 2010 ST Microelectronics |
| 3 | * Rajeev Kumar<rajeev-dlh.kumar@st.com> | 3 | * Rajeev Kumar <rajeevkumar.linux@gmail.com> |
| 4 | * | 4 | * |
| 5 | * This file is licensed under the terms of the GNU General Public | 5 | * This file is licensed under the terms of the GNU General Public |
| 6 | * License version 2. This program is licensed "as is" without any | 6 | * License version 2. This program is licensed "as is" without any |
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h index 044a124bfbbc..21b15f6fee25 100644 --- a/include/linux/platform_data/macb.h +++ b/include/linux/platform_data/macb.h | |||
| @@ -8,11 +8,19 @@ | |||
| 8 | #ifndef __MACB_PDATA_H__ | 8 | #ifndef __MACB_PDATA_H__ |
| 9 | #define __MACB_PDATA_H__ | 9 | #define __MACB_PDATA_H__ |
| 10 | 10 | ||
| 11 | /** | ||
| 12 | * struct macb_platform_data - platform data for MACB Ethernet | ||
| 13 | * @phy_mask: phy mask passed when register the MDIO bus | ||
| 14 | * within the driver | ||
| 15 | * @phy_irq_pin: PHY IRQ | ||
| 16 | * @is_rmii: using RMII interface? | ||
| 17 | * @rev_eth_addr: reverse Ethernet address byte order | ||
| 18 | */ | ||
| 11 | struct macb_platform_data { | 19 | struct macb_platform_data { |
| 12 | u32 phy_mask; | 20 | u32 phy_mask; |
| 13 | int phy_irq_pin; /* PHY IRQ */ | 21 | int phy_irq_pin; |
| 14 | u8 is_rmii; /* using RMII interface? */ | 22 | u8 is_rmii; |
| 15 | u8 rev_eth_addr; /* reverse Ethernet address byte order */ | 23 | u8 rev_eth_addr; |
| 16 | }; | 24 | }; |
| 17 | 25 | ||
| 18 | #endif /* __MACB_PDATA_H__ */ | 26 | #endif /* __MACB_PDATA_H__ */ |
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h index 75f70f6ac137..e1571efa3f2b 100644 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ b/include/linux/platform_data/mmc-esdhc-imx.h | |||
| @@ -43,7 +43,6 @@ struct esdhc_platform_data { | |||
| 43 | enum wp_types wp_type; | 43 | enum wp_types wp_type; |
| 44 | enum cd_types cd_type; | 44 | enum cd_types cd_type; |
| 45 | int max_bus_width; | 45 | int max_bus_width; |
| 46 | unsigned int f_max; | ||
| 47 | bool support_vsel; | 46 | bool support_vsel; |
| 48 | unsigned int delay_line; | 47 | unsigned int delay_line; |
| 49 | }; | 48 | }; |
diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h new file mode 100644 index 000000000000..ac91707dabcb --- /dev/null +++ b/include/linux/platform_data/nfcmrvl.h | |||
| @@ -0,0 +1,40 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2015, Marvell International Ltd. | ||
| 3 | * | ||
| 4 | * This software file (the "File") is distributed by Marvell International | ||
| 5 | * Ltd. under the terms of the GNU General Public License Version 2, June 1991 | ||
| 6 | * (the "License"). You may use, redistribute and/or modify this File in | ||
| 7 | * accordance with the terms and conditions of the License, a copy of which | ||
| 8 | * is available on the worldwide web at | ||
| 9 | * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. | ||
| 10 | * | ||
| 11 | * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE | ||
| 12 | * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE | ||
| 13 | * ARE EXPRESSLY DISCLAIMED. The License provides additional details about | ||
| 14 | * this warranty disclaimer. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #ifndef _NFCMRVL_PTF_H_ | ||
| 18 | #define _NFCMRVL_PTF_H_ | ||
| 19 | |||
| 20 | struct nfcmrvl_platform_data { | ||
| 21 | /* | ||
| 22 | * Generic | ||
| 23 | */ | ||
| 24 | |||
| 25 | /* GPIO that is wired to RESET_N signal */ | ||
| 26 | unsigned int reset_n_io; | ||
| 27 | /* Tell if transport is muxed in HCI one */ | ||
| 28 | unsigned int hci_muxed; | ||
| 29 | |||
| 30 | /* | ||
| 31 | * UART specific | ||
| 32 | */ | ||
| 33 | |||
| 34 | /* Tell if UART needs flow control at init */ | ||
| 35 | unsigned int flow_control; | ||
| 36 | /* Tell if firmware supports break control for power management */ | ||
| 37 | unsigned int break_control; | ||
| 38 | }; | ||
| 39 | |||
| 40 | #endif /* _NFCMRVL_PTF_H_ */ | ||
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st-nci.h index b023373d9874..d9d400a297bd 100644 --- a/include/linux/platform_data/st21nfcb.h +++ b/include/linux/platform_data/st-nci.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Driver include for the ST21NFCB NFC chip. | 2 | * Driver include for ST NCI NFC chip family. |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. | 4 | * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
| @@ -16,14 +16,14 @@ | |||
| 16 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | 16 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #ifndef _ST21NFCB_NCI_H_ | 19 | #ifndef _ST_NCI_H_ |
| 20 | #define _ST21NFCB_NCI_H_ | 20 | #define _ST_NCI_H_ |
| 21 | 21 | ||
| 22 | #define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" | 22 | #define ST_NCI_DRIVER_NAME "st_nci" |
| 23 | 23 | ||
| 24 | struct st21nfcb_nfc_platform_data { | 24 | struct st_nci_nfc_platform_data { |
| 25 | unsigned int gpio_reset; | 25 | unsigned int gpio_reset; |
| 26 | unsigned int irq_polarity; | 26 | unsigned int irq_polarity; |
| 27 | }; | 27 | }; |
| 28 | 28 | ||
| 29 | #endif /* _ST21NFCB_NCI_H_ */ | 29 | #endif /* _ST_NCI_H_ */ |
diff --git a/include/linux/platform_data/st_nci.h b/include/linux/platform_data/st_nci.h new file mode 100644 index 000000000000..d9d400a297bd --- /dev/null +++ b/include/linux/platform_data/st_nci.h | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | /* | ||
| 2 | * Driver include for ST NCI NFC chip family. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms and conditions of the GNU General Public License, | ||
| 8 | * version 2, as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #ifndef _ST_NCI_H_ | ||
| 20 | #define _ST_NCI_H_ | ||
| 21 | |||
| 22 | #define ST_NCI_DRIVER_NAME "st_nci" | ||
| 23 | |||
| 24 | struct st_nci_nfc_platform_data { | ||
| 25 | unsigned int gpio_reset; | ||
| 26 | unsigned int irq_polarity; | ||
| 27 | }; | ||
| 28 | |||
| 29 | #endif /* _ST_NCI_H_ */ | ||
diff --git a/include/linux/platform_data/usb-rcar-gen2-phy.h b/include/linux/platform_data/usb-rcar-gen2-phy.h deleted file mode 100644 index dd3ba46c0d90..000000000000 --- a/include/linux/platform_data/usb-rcar-gen2-phy.h +++ /dev/null | |||
| @@ -1,22 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2013 Renesas Solutions Corp. | ||
| 3 | * Copyright (C) 2013 Cogent Embedded, Inc. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License version 2 as | ||
| 7 | * published by the Free Software Foundation. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef __USB_RCAR_GEN2_PHY_H | ||
| 11 | #define __USB_RCAR_GEN2_PHY_H | ||
| 12 | |||
| 13 | #include <linux/types.h> | ||
| 14 | |||
| 15 | struct rcar_gen2_phy_platform_data { | ||
| 16 | /* USB channel 0 configuration */ | ||
| 17 | bool chan0_pci:1; /* true: PCI USB host 0, false: USBHS */ | ||
| 18 | /* USB channel 2 configuration */ | ||
| 19 | bool chan2_pci:1; /* true: PCI USB host 2, false: USBSS */ | ||
| 20 | }; | ||
| 21 | |||
| 22 | #endif | ||
diff --git a/include/linux/platform_data/video-msm_fb.h b/include/linux/platform_data/video-msm_fb.h deleted file mode 100644 index 31449be3eadb..000000000000 --- a/include/linux/platform_data/video-msm_fb.h +++ /dev/null | |||
| @@ -1,146 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Internal shared definitions for various MSM framebuffer parts. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2007 Google Incorporated | ||
| 5 | * | ||
| 6 | * This software is licensed under the terms of the GNU General Public | ||
| 7 | * License version 2, as published by the Free Software Foundation, and | ||
| 8 | * may be copied, distributed, and modified under those terms. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #ifndef _MSM_FB_H_ | ||
| 17 | #define _MSM_FB_H_ | ||
| 18 | |||
| 19 | #include <linux/device.h> | ||
| 20 | |||
| 21 | struct mddi_info; | ||
| 22 | |||
| 23 | struct msm_fb_data { | ||
| 24 | int xres; /* x resolution in pixels */ | ||
| 25 | int yres; /* y resolution in pixels */ | ||
| 26 | int width; /* disply width in mm */ | ||
| 27 | int height; /* display height in mm */ | ||
| 28 | unsigned output_format; | ||
| 29 | }; | ||
| 30 | |||
| 31 | struct msmfb_callback { | ||
| 32 | void (*func)(struct msmfb_callback *); | ||
| 33 | }; | ||
| 34 | |||
| 35 | enum { | ||
| 36 | MSM_MDDI_PMDH_INTERFACE, | ||
| 37 | MSM_MDDI_EMDH_INTERFACE, | ||
| 38 | MSM_EBI2_INTERFACE, | ||
| 39 | }; | ||
| 40 | |||
| 41 | #define MSMFB_CAP_PARTIAL_UPDATES (1 << 0) | ||
| 42 | |||
| 43 | struct msm_panel_data { | ||
| 44 | /* turns off the fb memory */ | ||
| 45 | int (*suspend)(struct msm_panel_data *); | ||
| 46 | /* turns on the fb memory */ | ||
| 47 | int (*resume)(struct msm_panel_data *); | ||
| 48 | /* turns off the panel */ | ||
| 49 | int (*blank)(struct msm_panel_data *); | ||
| 50 | /* turns on the panel */ | ||
| 51 | int (*unblank)(struct msm_panel_data *); | ||
| 52 | void (*wait_vsync)(struct msm_panel_data *); | ||
| 53 | void (*request_vsync)(struct msm_panel_data *, struct msmfb_callback *); | ||
| 54 | void (*clear_vsync)(struct msm_panel_data *); | ||
| 55 | /* from the enum above */ | ||
| 56 | unsigned interface_type; | ||
| 57 | /* data to be passed to the fb driver */ | ||
| 58 | struct msm_fb_data *fb_data; | ||
| 59 | |||
| 60 | /* capabilities supported by the panel */ | ||
| 61 | uint32_t caps; | ||
| 62 | }; | ||
| 63 | |||
| 64 | struct msm_mddi_client_data { | ||
| 65 | void (*suspend)(struct msm_mddi_client_data *); | ||
| 66 | void (*resume)(struct msm_mddi_client_data *); | ||
| 67 | void (*activate_link)(struct msm_mddi_client_data *); | ||
| 68 | void (*remote_write)(struct msm_mddi_client_data *, uint32_t val, | ||
| 69 | uint32_t reg); | ||
| 70 | uint32_t (*remote_read)(struct msm_mddi_client_data *, uint32_t reg); | ||
| 71 | void (*auto_hibernate)(struct msm_mddi_client_data *, int); | ||
| 72 | /* custom data that needs to be passed from the board file to a | ||
| 73 | * particular client */ | ||
| 74 | void *private_client_data; | ||
| 75 | struct resource *fb_resource; | ||
| 76 | /* from the list above */ | ||
| 77 | unsigned interface_type; | ||
| 78 | }; | ||
| 79 | |||
| 80 | struct msm_mddi_platform_data { | ||
| 81 | unsigned int clk_rate; | ||
| 82 | void (*power_client)(struct msm_mddi_client_data *, int on); | ||
| 83 | |||
| 84 | /* fixup the mfr name, product id */ | ||
| 85 | void (*fixup)(uint16_t *mfr_name, uint16_t *product_id); | ||
| 86 | |||
| 87 | struct resource *fb_resource; /*optional*/ | ||
| 88 | /* number of clients in the list that follows */ | ||
| 89 | int num_clients; | ||
| 90 | /* array of client information of clients */ | ||
| 91 | struct { | ||
| 92 | unsigned product_id; /* mfr id in top 16 bits, product id | ||
| 93 | * in lower 16 bits | ||
| 94 | */ | ||
| 95 | char *name; /* the device name will be the platform | ||
| 96 | * device name registered for the client, | ||
| 97 | * it should match the name of the associated | ||
| 98 | * driver | ||
| 99 | */ | ||
| 100 | unsigned id; /* id for mddi client device node, will also | ||
| 101 | * be used as device id of panel devices, if | ||
| 102 | * the client device will have multiple panels | ||
| 103 | * space must be left here for them | ||
| 104 | */ | ||
| 105 | void *client_data; /* required private client data */ | ||
| 106 | unsigned int clk_rate; /* optional: if the client requires a | ||
| 107 | * different mddi clk rate | ||
| 108 | */ | ||
| 109 | } client_platform_data[]; | ||
| 110 | }; | ||
| 111 | |||
| 112 | struct mdp_blit_req; | ||
| 113 | struct fb_info; | ||
| 114 | struct mdp_device { | ||
| 115 | struct device dev; | ||
| 116 | void (*dma)(struct mdp_device *mpd, uint32_t addr, | ||
| 117 | uint32_t stride, uint32_t w, uint32_t h, uint32_t x, | ||
| 118 | uint32_t y, struct msmfb_callback *callback, int interface); | ||
| 119 | void (*dma_wait)(struct mdp_device *mdp); | ||
| 120 | int (*blit)(struct mdp_device *mdp, struct fb_info *fb, | ||
| 121 | struct mdp_blit_req *req); | ||
| 122 | void (*set_grp_disp)(struct mdp_device *mdp, uint32_t disp_id); | ||
| 123 | }; | ||
| 124 | |||
| 125 | struct class_interface; | ||
| 126 | int register_mdp_client(struct class_interface *class_intf); | ||
| 127 | |||
| 128 | /**** private client data structs go below this line ***/ | ||
| 129 | |||
| 130 | struct msm_mddi_bridge_platform_data { | ||
| 131 | /* from board file */ | ||
| 132 | int (*init)(struct msm_mddi_bridge_platform_data *, | ||
| 133 | struct msm_mddi_client_data *); | ||
| 134 | int (*uninit)(struct msm_mddi_bridge_platform_data *, | ||
| 135 | struct msm_mddi_client_data *); | ||
| 136 | /* passed to panel for use by the fb driver */ | ||
| 137 | int (*blank)(struct msm_mddi_bridge_platform_data *, | ||
| 138 | struct msm_mddi_client_data *); | ||
| 139 | int (*unblank)(struct msm_mddi_bridge_platform_data *, | ||
| 140 | struct msm_mddi_client_data *); | ||
| 141 | struct msm_fb_data fb_data; | ||
| 142 | }; | ||
| 143 | |||
| 144 | |||
| 145 | |||
| 146 | #endif | ||
diff --git a/include/linux/platform_data/wkup_m3.h b/include/linux/platform_data/wkup_m3.h new file mode 100644 index 000000000000..3f1d77effd71 --- /dev/null +++ b/include/linux/platform_data/wkup_m3.h | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | /* | ||
| 2 | * TI Wakeup M3 remote processor platform data | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014-2015 Texas Instruments, Inc. | ||
| 5 | * | ||
| 6 | * Dave Gerlach <d-gerlach@ti.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef _LINUX_PLATFORM_DATA_WKUP_M3_H | ||
| 19 | #define _LINUX_PLATFORM_DATA_WKUP_M3_H | ||
| 20 | |||
| 21 | struct platform_device; | ||
| 22 | |||
| 23 | struct wkup_m3_platform_data { | ||
| 24 | const char *reset_name; | ||
| 25 | |||
| 26 | int (*assert_reset)(struct platform_device *pdev, const char *name); | ||
| 27 | int (*deassert_reset)(struct platform_device *pdev, const char *name); | ||
| 28 | }; | ||
| 29 | |||
| 30 | #endif /* _LINUX_PLATFORM_DATA_WKUP_M3_H */ | ||
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 58f1e75ba105..bba08f44cc97 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
| @@ -222,6 +222,15 @@ static inline void platform_set_drvdata(struct platform_device *pdev, | |||
| 222 | module_driver(__platform_driver, platform_driver_register, \ | 222 | module_driver(__platform_driver, platform_driver_register, \ |
| 223 | platform_driver_unregister) | 223 | platform_driver_unregister) |
| 224 | 224 | ||
| 225 | /* builtin_platform_driver() - Helper macro for builtin drivers that | ||
| 226 | * don't do anything special in driver init. This eliminates some | ||
| 227 | * boilerplate. Each driver may only use this macro once, and | ||
| 228 | * calling it replaces device_initcall(). Note this is meant to be | ||
| 229 | * a parallel of module_platform_driver() above, but w/o _exit stuff. | ||
| 230 | */ | ||
| 231 | #define builtin_platform_driver(__platform_driver) \ | ||
| 232 | builtin_driver(__platform_driver, platform_driver_register) | ||
| 233 | |||
| 225 | /* module_platform_driver_probe() - Helper macro for drivers that don't do | 234 | /* module_platform_driver_probe() - Helper macro for drivers that don't do |
| 226 | * anything special in module init/exit. This eliminates a lot of | 235 | * anything special in module init/exit. This eliminates a lot of |
| 227 | * boilerplate. Each module may only use this macro once, and | 236 | * boilerplate. Each module may only use this macro once, and |
| @@ -240,6 +249,20 @@ static void __exit __platform_driver##_exit(void) \ | |||
| 240 | } \ | 249 | } \ |
| 241 | module_exit(__platform_driver##_exit); | 250 | module_exit(__platform_driver##_exit); |
| 242 | 251 | ||
| 252 | /* builtin_platform_driver_probe() - Helper macro for drivers that don't do | ||
| 253 | * anything special in device init. This eliminates some boilerplate. Each | ||
| 254 | * driver may only use this macro once, and using it replaces device_initcall. | ||
| 255 | * This is meant to be a parallel of module_platform_driver_probe above, but | ||
| 256 | * without the __exit parts. | ||
| 257 | */ | ||
| 258 | #define builtin_platform_driver_probe(__platform_driver, __platform_probe) \ | ||
| 259 | static int __init __platform_driver##_init(void) \ | ||
| 260 | { \ | ||
| 261 | return platform_driver_probe(&(__platform_driver), \ | ||
| 262 | __platform_probe); \ | ||
| 263 | } \ | ||
| 264 | device_initcall(__platform_driver##_init); \ | ||
| 265 | |||
| 243 | #define platform_create_bundle(driver, probe, res, n_res, data, size) \ | 266 | #define platform_create_bundle(driver, probe, res, n_res, data, size) \ |
| 244 | __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE) | 267 | __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE) |
| 245 | extern struct platform_device *__platform_create_bundle( | 268 | extern struct platform_device *__platform_create_bundle( |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 2d29c64f8fb1..35d599e7250d 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
| @@ -342,6 +342,18 @@ struct dev_pm_ops { | |||
| 342 | #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) | 342 | #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) |
| 343 | #endif | 343 | #endif |
| 344 | 344 | ||
| 345 | #ifdef CONFIG_PM_SLEEP | ||
| 346 | #define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ | ||
| 347 | .suspend_noirq = suspend_fn, \ | ||
| 348 | .resume_noirq = resume_fn, \ | ||
| 349 | .freeze_noirq = suspend_fn, \ | ||
| 350 | .thaw_noirq = resume_fn, \ | ||
| 351 | .poweroff_noirq = suspend_fn, \ | ||
| 352 | .restore_noirq = resume_fn, | ||
| 353 | #else | ||
| 354 | #define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) | ||
| 355 | #endif | ||
| 356 | |||
| 345 | #ifdef CONFIG_PM | 357 | #ifdef CONFIG_PM |
| 346 | #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ | 358 | #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ |
| 347 | .runtime_suspend = suspend_fn, \ | 359 | .runtime_suspend = suspend_fn, \ |
| @@ -529,6 +541,7 @@ enum rpm_request { | |||
| 529 | }; | 541 | }; |
| 530 | 542 | ||
| 531 | struct wakeup_source; | 543 | struct wakeup_source; |
| 544 | struct wake_irq; | ||
| 532 | struct pm_domain_data; | 545 | struct pm_domain_data; |
| 533 | 546 | ||
| 534 | struct pm_subsys_data { | 547 | struct pm_subsys_data { |
| @@ -568,6 +581,7 @@ struct dev_pm_info { | |||
| 568 | unsigned long timer_expires; | 581 | unsigned long timer_expires; |
| 569 | struct work_struct work; | 582 | struct work_struct work; |
| 570 | wait_queue_head_t wait_queue; | 583 | wait_queue_head_t wait_queue; |
| 584 | struct wake_irq *wakeirq; | ||
| 571 | atomic_t usage_count; | 585 | atomic_t usage_count; |
| 572 | atomic_t child_count; | 586 | atomic_t child_count; |
| 573 | unsigned int disable_depth:3; | 587 | unsigned int disable_depth:3; |
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h index 0b0039634410..25266c600021 100644 --- a/include/linux/pm_clock.h +++ b/include/linux/pm_clock.h | |||
| @@ -20,6 +20,16 @@ struct pm_clk_notifier_block { | |||
| 20 | 20 | ||
| 21 | struct clk; | 21 | struct clk; |
| 22 | 22 | ||
| 23 | #ifdef CONFIG_PM | ||
| 24 | extern int pm_clk_runtime_suspend(struct device *dev); | ||
| 25 | extern int pm_clk_runtime_resume(struct device *dev); | ||
| 26 | #define USE_PM_CLK_RUNTIME_OPS \ | ||
| 27 | .runtime_suspend = pm_clk_runtime_suspend, \ | ||
| 28 | .runtime_resume = pm_clk_runtime_resume, | ||
| 29 | #else | ||
| 30 | #define USE_PM_CLK_RUNTIME_OPS | ||
| 31 | #endif | ||
| 32 | |||
| 23 | #ifdef CONFIG_PM_CLK | 33 | #ifdef CONFIG_PM_CLK |
| 24 | static inline bool pm_clk_no_clocks(struct device *dev) | 34 | static inline bool pm_clk_no_clocks(struct device *dev) |
| 25 | { | 35 | { |
diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h new file mode 100644 index 000000000000..cd5b62db9084 --- /dev/null +++ b/include/linux/pm_wakeirq.h | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | /* | ||
| 2 | * pm_wakeirq.h - Device wakeirq helper functions | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
| 9 | * kind, whether express or implied; without even the implied warranty | ||
| 10 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #ifndef _LINUX_PM_WAKEIRQ_H | ||
| 15 | #define _LINUX_PM_WAKEIRQ_H | ||
| 16 | |||
| 17 | #ifdef CONFIG_PM | ||
| 18 | |||
| 19 | extern int dev_pm_set_wake_irq(struct device *dev, int irq); | ||
| 20 | extern int dev_pm_set_dedicated_wake_irq(struct device *dev, | ||
| 21 | int irq); | ||
| 22 | extern void dev_pm_clear_wake_irq(struct device *dev); | ||
| 23 | extern void dev_pm_enable_wake_irq(struct device *dev); | ||
| 24 | extern void dev_pm_disable_wake_irq(struct device *dev); | ||
| 25 | |||
| 26 | #else /* !CONFIG_PM */ | ||
| 27 | |||
| 28 | static inline int dev_pm_set_wake_irq(struct device *dev, int irq) | ||
| 29 | { | ||
| 30 | return 0; | ||
| 31 | } | ||
| 32 | |||
| 33 | static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) | ||
| 34 | { | ||
| 35 | return 0; | ||
| 36 | } | ||
| 37 | |||
| 38 | static inline void dev_pm_clear_wake_irq(struct device *dev) | ||
| 39 | { | ||
| 40 | } | ||
| 41 | |||
| 42 | static inline void dev_pm_enable_wake_irq(struct device *dev) | ||
| 43 | { | ||
| 44 | } | ||
| 45 | |||
| 46 | static inline void dev_pm_disable_wake_irq(struct device *dev) | ||
| 47 | { | ||
| 48 | } | ||
| 49 | |||
| 50 | #endif /* CONFIG_PM */ | ||
| 51 | #endif /* _LINUX_PM_WAKEIRQ_H */ | ||
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index a0f70808d7f4..a3447932df1f 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h | |||
| @@ -28,9 +28,17 @@ | |||
| 28 | 28 | ||
| 29 | #include <linux/types.h> | 29 | #include <linux/types.h> |
| 30 | 30 | ||
| 31 | struct wake_irq; | ||
| 32 | |||
| 31 | /** | 33 | /** |
| 32 | * struct wakeup_source - Representation of wakeup sources | 34 | * struct wakeup_source - Representation of wakeup sources |
| 33 | * | 35 | * |
| 36 | * @name: Name of the wakeup source | ||
| 37 | * @entry: Wakeup source list entry | ||
| 38 | * @lock: Wakeup source lock | ||
| 39 | * @wakeirq: Optional device specific wakeirq | ||
| 40 | * @timer: Wakeup timer list | ||
| 41 | * @timer_expires: Wakeup timer expiration | ||
| 34 | * @total_time: Total time this wakeup source has been active. | 42 | * @total_time: Total time this wakeup source has been active. |
| 35 | * @max_time: Maximum time this wakeup source has been continuously active. | 43 | * @max_time: Maximum time this wakeup source has been continuously active. |
| 36 | * @last_time: Monotonic clock when the wakeup source's was touched last time. | 44 | * @last_time: Monotonic clock when the wakeup source's was touched last time. |
| @@ -47,6 +55,7 @@ struct wakeup_source { | |||
| 47 | const char *name; | 55 | const char *name; |
| 48 | struct list_head entry; | 56 | struct list_head entry; |
| 49 | spinlock_t lock; | 57 | spinlock_t lock; |
| 58 | struct wake_irq *wakeirq; | ||
| 50 | struct timer_list timer; | 59 | struct timer_list timer; |
| 51 | unsigned long timer_expires; | 60 | unsigned long timer_expires; |
| 52 | ktime_t total_time; | 61 | ktime_t total_time; |
diff --git a/include/linux/pmem.h b/include/linux/pmem.h new file mode 100644 index 000000000000..d2114045a6c4 --- /dev/null +++ b/include/linux/pmem.h | |||
| @@ -0,0 +1,152 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2015 Intel Corporation. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of version 2 of the GNU General Public License as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but | ||
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 11 | * General Public License for more details. | ||
| 12 | */ | ||
| 13 | #ifndef __PMEM_H__ | ||
| 14 | #define __PMEM_H__ | ||
| 15 | |||
| 16 | #include <linux/io.h> | ||
| 17 | |||
| 18 | #ifdef CONFIG_ARCH_HAS_PMEM_API | ||
| 19 | #include <asm/cacheflush.h> | ||
| 20 | #else | ||
| 21 | static inline void arch_wmb_pmem(void) | ||
| 22 | { | ||
| 23 | BUG(); | ||
| 24 | } | ||
| 25 | |||
| 26 | static inline bool __arch_has_wmb_pmem(void) | ||
| 27 | { | ||
| 28 | return false; | ||
| 29 | } | ||
| 30 | |||
| 31 | static inline void __pmem *arch_memremap_pmem(resource_size_t offset, | ||
| 32 | unsigned long size) | ||
| 33 | { | ||
| 34 | return NULL; | ||
| 35 | } | ||
| 36 | |||
| 37 | static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, | ||
| 38 | size_t n) | ||
| 39 | { | ||
| 40 | BUG(); | ||
| 41 | } | ||
| 42 | #endif | ||
| 43 | |||
| 44 | /* | ||
| 45 | * Architectures that define ARCH_HAS_PMEM_API must provide | ||
| 46 | * implementations for arch_memremap_pmem(), arch_memcpy_to_pmem(), | ||
| 47 | * arch_wmb_pmem(), and __arch_has_wmb_pmem(). | ||
| 48 | */ | ||
| 49 | |||
| 50 | static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) | ||
| 51 | { | ||
| 52 | memcpy(dst, (void __force const *) src, size); | ||
| 53 | } | ||
| 54 | |||
| 55 | static inline void memunmap_pmem(void __pmem *addr) | ||
| 56 | { | ||
| 57 | iounmap((void __force __iomem *) addr); | ||
| 58 | } | ||
| 59 | |||
| 60 | /** | ||
| 61 | * arch_has_wmb_pmem - true if wmb_pmem() ensures durability | ||
| 62 | * | ||
| 63 | * For a given cpu implementation within an architecture it is possible | ||
| 64 | * that wmb_pmem() resolves to a nop. In the case this returns | ||
| 65 | * false, pmem api users are unable to ensure durability and may want to | ||
| 66 | * fall back to a different data consistency model, or otherwise notify | ||
| 67 | * the user. | ||
| 68 | */ | ||
| 69 | static inline bool arch_has_wmb_pmem(void) | ||
| 70 | { | ||
| 71 | if (IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) | ||
| 72 | return __arch_has_wmb_pmem(); | ||
| 73 | return false; | ||
| 74 | } | ||
| 75 | |||
| 76 | static inline bool arch_has_pmem_api(void) | ||
| 77 | { | ||
| 78 | return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem(); | ||
| 79 | } | ||
| 80 | |||
| 81 | /* | ||
| 82 | * These defaults seek to offer decent performance and minimize the | ||
| 83 | * window between i/o completion and writes being durable on media. | ||
| 84 | * However, it is undefined / architecture specific whether | ||
| 85 | * default_memremap_pmem + default_memcpy_to_pmem is sufficient for | ||
| 86 | * making data durable relative to i/o completion. | ||
| 87 | */ | ||
| 88 | static void default_memcpy_to_pmem(void __pmem *dst, const void *src, | ||
| 89 | size_t size) | ||
| 90 | { | ||
| 91 | memcpy((void __force *) dst, src, size); | ||
| 92 | } | ||
| 93 | |||
| 94 | static void __pmem *default_memremap_pmem(resource_size_t offset, | ||
| 95 | unsigned long size) | ||
| 96 | { | ||
| 97 | return (void __pmem __force *)ioremap_wt(offset, size); | ||
| 98 | } | ||
| 99 | |||
| 100 | /** | ||
| 101 | * memremap_pmem - map physical persistent memory for pmem api | ||
| 102 | * @offset: physical address of persistent memory | ||
| 103 | * @size: size of the mapping | ||
| 104 | * | ||
| 105 | * Establish a mapping of the architecture specific memory type expected | ||
| 106 | * by memcpy_to_pmem() and wmb_pmem(). For example, it may be | ||
| 107 | * the case that an uncacheable or writethrough mapping is sufficient, | ||
| 108 | * or a writeback mapping provided memcpy_to_pmem() and | ||
| 109 | * wmb_pmem() arrange for the data to be written through the | ||
| 110 | * cache to persistent media. | ||
| 111 | */ | ||
| 112 | static inline void __pmem *memremap_pmem(resource_size_t offset, | ||
| 113 | unsigned long size) | ||
| 114 | { | ||
| 115 | if (arch_has_pmem_api()) | ||
| 116 | return arch_memremap_pmem(offset, size); | ||
| 117 | return default_memremap_pmem(offset, size); | ||
| 118 | } | ||
| 119 | |||
| 120 | /** | ||
| 121 | * memcpy_to_pmem - copy data to persistent memory | ||
| 122 | * @dst: destination buffer for the copy | ||
| 123 | * @src: source buffer for the copy | ||
| 124 | * @n: length of the copy in bytes | ||
| 125 | * | ||
| 126 | * Perform a memory copy that results in the destination of the copy | ||
| 127 | * being effectively evicted from, or never written to, the processor | ||
| 128 | * cache hierarchy after the copy completes. After memcpy_to_pmem() | ||
| 129 | * data may still reside in cpu or platform buffers, so this operation | ||
| 130 | * must be followed by a wmb_pmem(). | ||
| 131 | */ | ||
| 132 | static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) | ||
| 133 | { | ||
| 134 | if (arch_has_pmem_api()) | ||
| 135 | arch_memcpy_to_pmem(dst, src, n); | ||
| 136 | else | ||
| 137 | default_memcpy_to_pmem(dst, src, n); | ||
| 138 | } | ||
| 139 | |||
| 140 | /** | ||
| 141 | * wmb_pmem - synchronize writes to persistent memory | ||
| 142 | * | ||
| 143 | * After a series of memcpy_to_pmem() operations this drains data from | ||
| 144 | * cpu write buffers and any platform (memory controller) buffers to | ||
| 145 | * ensure that written data is durable on persistent memory media. | ||
| 146 | */ | ||
| 147 | static inline void wmb_pmem(void) | ||
| 148 | { | ||
| 149 | if (arch_has_pmem_api()) | ||
| 150 | arch_wmb_pmem(); | ||
| 151 | } | ||
| 152 | #endif /* __PMEM_H__ */ | ||
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h index cf112b4075c8..522757ac9cd4 100644 --- a/include/linux/power/max17042_battery.h +++ b/include/linux/power/max17042_battery.h | |||
| @@ -215,6 +215,10 @@ struct max17042_platform_data { | |||
| 215 | * the datasheet although it can be changed by board designers. | 215 | * the datasheet although it can be changed by board designers. |
| 216 | */ | 216 | */ |
| 217 | unsigned int r_sns; | 217 | unsigned int r_sns; |
| 218 | int vmin; /* in millivolts */ | ||
| 219 | int vmax; /* in millivolts */ | ||
| 220 | int temp_min; /* in tenths of degree Celsius */ | ||
| 221 | int temp_max; /* in tenths of degree Celsius */ | ||
| 218 | }; | 222 | }; |
| 219 | 223 | ||
| 220 | #endif /* __MAX17042_BATTERY_H_ */ | 224 | #endif /* __MAX17042_BATTERY_H_ */ |
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 75a1dd8dc56e..ef9f1592185d 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h | |||
| @@ -206,6 +206,11 @@ struct power_supply_desc { | |||
| 206 | int (*set_property)(struct power_supply *psy, | 206 | int (*set_property)(struct power_supply *psy, |
| 207 | enum power_supply_property psp, | 207 | enum power_supply_property psp, |
| 208 | const union power_supply_propval *val); | 208 | const union power_supply_propval *val); |
| 209 | /* | ||
| 210 | * property_is_writeable() will be called during registration | ||
| 211 | * of power supply. If this happens during device probe then it must | ||
| 212 | * not access internal data of device (because probe did not end). | ||
| 213 | */ | ||
| 209 | int (*property_is_writeable)(struct power_supply *psy, | 214 | int (*property_is_writeable)(struct power_supply *psy, |
| 210 | enum power_supply_property psp); | 215 | enum power_supply_property psp); |
| 211 | void (*external_power_changed)(struct power_supply *psy); | 216 | void (*external_power_changed)(struct power_supply *psy); |
| @@ -237,6 +242,7 @@ struct power_supply { | |||
| 237 | /* private */ | 242 | /* private */ |
| 238 | struct device dev; | 243 | struct device dev; |
| 239 | struct work_struct changed_work; | 244 | struct work_struct changed_work; |
| 245 | struct delayed_work deferred_register_work; | ||
| 240 | spinlock_t changed_lock; | 246 | spinlock_t changed_lock; |
| 241 | bool changed; | 247 | bool changed; |
| 242 | atomic_t use_cnt; | 248 | atomic_t use_cnt; |
| @@ -286,10 +292,15 @@ extern void power_supply_put(struct power_supply *psy); | |||
| 286 | #ifdef CONFIG_OF | 292 | #ifdef CONFIG_OF |
| 287 | extern struct power_supply *power_supply_get_by_phandle(struct device_node *np, | 293 | extern struct power_supply *power_supply_get_by_phandle(struct device_node *np, |
| 288 | const char *property); | 294 | const char *property); |
| 295 | extern struct power_supply *devm_power_supply_get_by_phandle( | ||
| 296 | struct device *dev, const char *property); | ||
| 289 | #else /* !CONFIG_OF */ | 297 | #else /* !CONFIG_OF */ |
| 290 | static inline struct power_supply * | 298 | static inline struct power_supply * |
| 291 | power_supply_get_by_phandle(struct device_node *np, const char *property) | 299 | power_supply_get_by_phandle(struct device_node *np, const char *property) |
| 292 | { return NULL; } | 300 | { return NULL; } |
| 301 | static inline struct power_supply * | ||
| 302 | devm_power_supply_get_by_phandle(struct device *dev, const char *property) | ||
| 303 | { return NULL; } | ||
| 293 | #endif /* CONFIG_OF */ | 304 | #endif /* CONFIG_OF */ |
| 294 | extern void power_supply_changed(struct power_supply *psy); | 305 | extern void power_supply_changed(struct power_supply *psy); |
| 295 | extern int power_supply_am_i_supplied(struct power_supply *psy); | 306 | extern int power_supply_am_i_supplied(struct power_supply *psy); |
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 0f1534acaf60..84991f185173 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
| @@ -293,6 +293,8 @@ struct preempt_notifier { | |||
| 293 | struct preempt_ops *ops; | 293 | struct preempt_ops *ops; |
| 294 | }; | 294 | }; |
| 295 | 295 | ||
| 296 | void preempt_notifier_inc(void); | ||
| 297 | void preempt_notifier_dec(void); | ||
| 296 | void preempt_notifier_register(struct preempt_notifier *notifier); | 298 | void preempt_notifier_register(struct preempt_notifier *notifier); |
| 297 | void preempt_notifier_unregister(struct preempt_notifier *notifier); | 299 | void preempt_notifier_unregister(struct preempt_notifier *notifier); |
| 298 | 300 | ||
diff --git a/include/linux/printk.h b/include/linux/printk.h index 9b30871c9149..a6298b27ac99 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h | |||
| @@ -30,6 +30,8 @@ static inline const char *printk_skip_level(const char *buffer) | |||
| 30 | return buffer; | 30 | return buffer; |
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | #define CONSOLE_EXT_LOG_MAX 8192 | ||
| 34 | |||
| 33 | /* printk's without a loglevel use this.. */ | 35 | /* printk's without a loglevel use this.. */ |
| 34 | #define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT | 36 | #define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT |
| 35 | 37 | ||
| @@ -120,7 +122,7 @@ static inline __printf(1, 2) __cold | |||
| 120 | void early_printk(const char *s, ...) { } | 122 | void early_printk(const char *s, ...) { } |
| 121 | #endif | 123 | #endif |
| 122 | 124 | ||
| 123 | typedef int(*printk_func_t)(const char *fmt, va_list args); | 125 | typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args); |
| 124 | 126 | ||
| 125 | #ifdef CONFIG_PRINTK | 127 | #ifdef CONFIG_PRINTK |
| 126 | asmlinkage __printf(5, 0) | 128 | asmlinkage __printf(5, 0) |
| @@ -164,7 +166,7 @@ char *log_buf_addr_get(void); | |||
| 164 | u32 log_buf_len_get(void); | 166 | u32 log_buf_len_get(void); |
| 165 | void log_buf_kexec_setup(void); | 167 | void log_buf_kexec_setup(void); |
| 166 | void __init setup_log_buf(int early); | 168 | void __init setup_log_buf(int early); |
| 167 | void dump_stack_set_arch_desc(const char *fmt, ...); | 169 | __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); |
| 168 | void dump_stack_print_info(const char *log_lvl); | 170 | void dump_stack_print_info(const char *log_lvl); |
| 169 | void show_regs_print_info(const char *log_lvl); | 171 | void show_regs_print_info(const char *log_lvl); |
| 170 | #else | 172 | #else |
| @@ -215,7 +217,7 @@ static inline void setup_log_buf(int early) | |||
| 215 | { | 217 | { |
| 216 | } | 218 | } |
| 217 | 219 | ||
| 218 | static inline void dump_stack_set_arch_desc(const char *fmt, ...) | 220 | static inline __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...) |
| 219 | { | 221 | { |
| 220 | } | 222 | } |
| 221 | 223 | ||
diff --git a/include/linux/property.h b/include/linux/property.h index de8bdf417a35..76ebde9c11d4 100644 --- a/include/linux/property.h +++ b/include/linux/property.h | |||
| @@ -164,4 +164,6 @@ struct property_set { | |||
| 164 | 164 | ||
| 165 | void device_add_property_set(struct device *dev, struct property_set *pset); | 165 | void device_add_property_set(struct device *dev, struct property_set *pset); |
| 166 | 166 | ||
| 167 | bool device_dma_is_coherent(struct device *dev); | ||
| 168 | |||
| 167 | #endif /* _LINUX_PROPERTY_H_ */ | 169 | #endif /* _LINUX_PROPERTY_H_ */ |
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index dab545bb66b3..0485bab061fd 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h | |||
| @@ -194,8 +194,9 @@ enum pxa_ssp_type { | |||
| 194 | PXA168_SSP, | 194 | PXA168_SSP, |
| 195 | PXA910_SSP, | 195 | PXA910_SSP, |
| 196 | CE4100_SSP, | 196 | CE4100_SSP, |
| 197 | LPSS_SSP, | ||
| 198 | QUARK_X1000_SSP, | 197 | QUARK_X1000_SSP, |
| 198 | LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ | ||
| 199 | LPSS_BYT_SSP, | ||
| 199 | }; | 200 | }; |
| 200 | 201 | ||
| 201 | struct ssp_device { | 202 | struct ssp_device { |
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index d7a974d5f57c..6e7d5ec65838 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | /* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | 1 | /* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. |
| 2 | * Copyright (C) 2015 Linaro Ltd. | 2 | * Copyright (C) 2015 Linaro Ltd. |
| 3 | * | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
| @@ -16,6 +16,17 @@ | |||
| 16 | extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); | 16 | extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); |
| 17 | extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); | 17 | extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); |
| 18 | 18 | ||
| 19 | #define QCOM_SCM_HDCP_MAX_REQ_CNT 5 | ||
| 20 | |||
| 21 | struct qcom_scm_hdcp_req { | ||
| 22 | u32 addr; | ||
| 23 | u32 val; | ||
| 24 | }; | ||
| 25 | |||
| 26 | extern bool qcom_scm_hdcp_available(void); | ||
| 27 | extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, | ||
| 28 | u32 *resp); | ||
| 29 | |||
| 19 | #define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 | 30 | #define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 |
| 20 | #define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 | 31 | #define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 |
| 21 | 32 | ||
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index fb31765e935a..830c4992088d 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | 31 | ||
| 32 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
| 33 | #include <linux/stddef.h> | 33 | #include <linux/stddef.h> |
| 34 | #include <linux/rcupdate.h> | ||
| 34 | 35 | ||
| 35 | struct rb_node { | 36 | struct rb_node { |
| 36 | unsigned long __rb_parent_color; | 37 | unsigned long __rb_parent_color; |
| @@ -73,11 +74,11 @@ extern struct rb_node *rb_first_postorder(const struct rb_root *); | |||
| 73 | extern struct rb_node *rb_next_postorder(const struct rb_node *); | 74 | extern struct rb_node *rb_next_postorder(const struct rb_node *); |
| 74 | 75 | ||
| 75 | /* Fast replacement of a single node without remove/rebalance/add/rebalance */ | 76 | /* Fast replacement of a single node without remove/rebalance/add/rebalance */ |
| 76 | extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, | 77 | extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, |
| 77 | struct rb_root *root); | 78 | struct rb_root *root); |
| 78 | 79 | ||
| 79 | static inline void rb_link_node(struct rb_node * node, struct rb_node * parent, | 80 | static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, |
| 80 | struct rb_node ** rb_link) | 81 | struct rb_node **rb_link) |
| 81 | { | 82 | { |
| 82 | node->__rb_parent_color = (unsigned long)parent; | 83 | node->__rb_parent_color = (unsigned long)parent; |
| 83 | node->rb_left = node->rb_right = NULL; | 84 | node->rb_left = node->rb_right = NULL; |
| @@ -85,6 +86,15 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent, | |||
| 85 | *rb_link = node; | 86 | *rb_link = node; |
| 86 | } | 87 | } |
| 87 | 88 | ||
| 89 | static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, | ||
| 90 | struct rb_node **rb_link) | ||
| 91 | { | ||
| 92 | node->__rb_parent_color = (unsigned long)parent; | ||
| 93 | node->rb_left = node->rb_right = NULL; | ||
| 94 | |||
| 95 | rcu_assign_pointer(*rb_link, node); | ||
| 96 | } | ||
| 97 | |||
| 88 | #define rb_entry_safe(ptr, type, member) \ | 98 | #define rb_entry_safe(ptr, type, member) \ |
| 89 | ({ typeof(ptr) ____ptr = (ptr); \ | 99 | ({ typeof(ptr) ____ptr = (ptr); \ |
| 90 | ____ptr ? rb_entry(____ptr, type, member) : NULL; \ | 100 | ____ptr ? rb_entry(____ptr, type, member) : NULL; \ |
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 378c5ee75f78..14d7b831b63a 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h | |||
| @@ -123,11 +123,11 @@ __rb_change_child(struct rb_node *old, struct rb_node *new, | |||
| 123 | { | 123 | { |
| 124 | if (parent) { | 124 | if (parent) { |
| 125 | if (parent->rb_left == old) | 125 | if (parent->rb_left == old) |
| 126 | parent->rb_left = new; | 126 | WRITE_ONCE(parent->rb_left, new); |
| 127 | else | 127 | else |
| 128 | parent->rb_right = new; | 128 | WRITE_ONCE(parent->rb_right, new); |
| 129 | } else | 129 | } else |
| 130 | root->rb_node = new; | 130 | WRITE_ONCE(root->rb_node, new); |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, | 133 | extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, |
| @@ -137,7 +137,8 @@ static __always_inline struct rb_node * | |||
| 137 | __rb_erase_augmented(struct rb_node *node, struct rb_root *root, | 137 | __rb_erase_augmented(struct rb_node *node, struct rb_root *root, |
| 138 | const struct rb_augment_callbacks *augment) | 138 | const struct rb_augment_callbacks *augment) |
| 139 | { | 139 | { |
| 140 | struct rb_node *child = node->rb_right, *tmp = node->rb_left; | 140 | struct rb_node *child = node->rb_right; |
| 141 | struct rb_node *tmp = node->rb_left; | ||
| 141 | struct rb_node *parent, *rebalance; | 142 | struct rb_node *parent, *rebalance; |
| 142 | unsigned long pc; | 143 | unsigned long pc; |
| 143 | 144 | ||
| @@ -167,6 +168,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root, | |||
| 167 | tmp = parent; | 168 | tmp = parent; |
| 168 | } else { | 169 | } else { |
| 169 | struct rb_node *successor = child, *child2; | 170 | struct rb_node *successor = child, *child2; |
| 171 | |||
| 170 | tmp = child->rb_left; | 172 | tmp = child->rb_left; |
| 171 | if (!tmp) { | 173 | if (!tmp) { |
| 172 | /* | 174 | /* |
| @@ -180,6 +182,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root, | |||
| 180 | */ | 182 | */ |
| 181 | parent = successor; | 183 | parent = successor; |
| 182 | child2 = successor->rb_right; | 184 | child2 = successor->rb_right; |
| 185 | |||
| 183 | augment->copy(node, successor); | 186 | augment->copy(node, successor); |
| 184 | } else { | 187 | } else { |
| 185 | /* | 188 | /* |
| @@ -201,19 +204,23 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root, | |||
| 201 | successor = tmp; | 204 | successor = tmp; |
| 202 | tmp = tmp->rb_left; | 205 | tmp = tmp->rb_left; |
| 203 | } while (tmp); | 206 | } while (tmp); |
| 204 | parent->rb_left = child2 = successor->rb_right; | 207 | child2 = successor->rb_right; |
| 205 | successor->rb_right = child; | 208 | WRITE_ONCE(parent->rb_left, child2); |
| 209 | WRITE_ONCE(successor->rb_right, child); | ||
| 206 | rb_set_parent(child, successor); | 210 | rb_set_parent(child, successor); |
| 211 | |||
| 207 | augment->copy(node, successor); | 212 | augment->copy(node, successor); |
| 208 | augment->propagate(parent, successor); | 213 | augment->propagate(parent, successor); |
| 209 | } | 214 | } |
| 210 | 215 | ||
| 211 | successor->rb_left = tmp = node->rb_left; | 216 | tmp = node->rb_left; |
| 217 | WRITE_ONCE(successor->rb_left, tmp); | ||
| 212 | rb_set_parent(tmp, successor); | 218 | rb_set_parent(tmp, successor); |
| 213 | 219 | ||
| 214 | pc = node->__rb_parent_color; | 220 | pc = node->__rb_parent_color; |
| 215 | tmp = __rb_parent(pc); | 221 | tmp = __rb_parent(pc); |
| 216 | __rb_change_child(node, successor, tmp, root); | 222 | __rb_change_child(node, successor, tmp, root); |
| 223 | |||
| 217 | if (child2) { | 224 | if (child2) { |
| 218 | successor->__rb_parent_color = pc; | 225 | successor->__rb_parent_color = pc; |
| 219 | rb_set_parent_color(child2, parent, RB_BLACK); | 226 | rb_set_parent_color(child2, parent, RB_BLACK); |
diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h new file mode 100644 index 000000000000..4f3432c61d12 --- /dev/null +++ b/include/linux/rbtree_latch.h | |||
| @@ -0,0 +1,212 @@ | |||
| 1 | /* | ||
| 2 | * Latched RB-trees | ||
| 3 | * | ||
| 4 | * Copyright (C) 2015 Intel Corp., Peter Zijlstra <peterz@infradead.org> | ||
| 5 | * | ||
| 6 | * Since RB-trees have non-atomic modifications they're not immediately suited | ||
| 7 | * for RCU/lockless queries. Even though we made RB-tree lookups non-fatal for | ||
| 8 | * lockless lookups; we cannot guarantee they return a correct result. | ||
| 9 | * | ||
| 10 | * The simplest solution is a seqlock + RB-tree, this will allow lockless | ||
| 11 | * lookups; but has the constraint (inherent to the seqlock) that read sides | ||
| 12 | * cannot nest in write sides. | ||
| 13 | * | ||
| 14 | * If we need to allow unconditional lookups (say as required for NMI context | ||
| 15 | * usage) we need a more complex setup; this data structure provides this by | ||
| 16 | * employing the latch technique -- see @raw_write_seqcount_latch -- to | ||
| 17 | * implement a latched RB-tree which does allow for unconditional lookups by | ||
| 18 | * virtue of always having (at least) one stable copy of the tree. | ||
| 19 | * | ||
| 20 | * However, while we have the guarantee that there is at all times one stable | ||
| 21 | * copy, this does not guarantee an iteration will not observe modifications. | ||
| 22 | * What might have been a stable copy at the start of the iteration, need not | ||
| 23 | * remain so for the duration of the iteration. | ||
| 24 | * | ||
| 25 | * Therefore, this does require a lockless RB-tree iteration to be non-fatal; | ||
| 26 | * see the comment in lib/rbtree.c. Note however that we only require the first | ||
| 27 | * condition -- not seeing partial stores -- because the latch thing isolates | ||
| 28 | * us from loops. If we were to interrupt a modification the lookup would be | ||
| 29 | * pointed at the stable tree and complete while the modification was halted. | ||
| 30 | */ | ||
| 31 | |||
| 32 | #ifndef RB_TREE_LATCH_H | ||
| 33 | #define RB_TREE_LATCH_H | ||
| 34 | |||
| 35 | #include <linux/rbtree.h> | ||
| 36 | #include <linux/seqlock.h> | ||
| 37 | |||
| 38 | struct latch_tree_node { | ||
| 39 | struct rb_node node[2]; | ||
| 40 | }; | ||
| 41 | |||
| 42 | struct latch_tree_root { | ||
| 43 | seqcount_t seq; | ||
| 44 | struct rb_root tree[2]; | ||
| 45 | }; | ||
| 46 | |||
| 47 | /** | ||
| 48 | * latch_tree_ops - operators to define the tree order | ||
| 49 | * @less: used for insertion; provides the (partial) order between two elements. | ||
| 50 | * @comp: used for lookups; provides the order between the search key and an element. | ||
| 51 | * | ||
| 52 | * The operators are related like: | ||
| 53 | * | ||
| 54 | * comp(a->key,b) < 0 := less(a,b) | ||
| 55 | * comp(a->key,b) > 0 := less(b,a) | ||
| 56 | * comp(a->key,b) == 0 := !less(a,b) && !less(b,a) | ||
| 57 | * | ||
| 58 | * If these operators define a partial order on the elements we make no | ||
| 59 | * guarantee on which of the elements matching the key is found. See | ||
| 60 | * latch_tree_find(). | ||
| 61 | */ | ||
| 62 | struct latch_tree_ops { | ||
| 63 | bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b); | ||
| 64 | int (*comp)(void *key, struct latch_tree_node *b); | ||
| 65 | }; | ||
| 66 | |||
| 67 | static __always_inline struct latch_tree_node * | ||
| 68 | __lt_from_rb(struct rb_node *node, int idx) | ||
| 69 | { | ||
| 70 | return container_of(node, struct latch_tree_node, node[idx]); | ||
| 71 | } | ||
| 72 | |||
| 73 | static __always_inline void | ||
| 74 | __lt_insert(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx, | ||
| 75 | bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b)) | ||
| 76 | { | ||
| 77 | struct rb_root *root = <r->tree[idx]; | ||
| 78 | struct rb_node **link = &root->rb_node; | ||
| 79 | struct rb_node *node = <n->node[idx]; | ||
| 80 | struct rb_node *parent = NULL; | ||
| 81 | struct latch_tree_node *ltp; | ||
| 82 | |||
| 83 | while (*link) { | ||
| 84 | parent = *link; | ||
| 85 | ltp = __lt_from_rb(parent, idx); | ||
| 86 | |||
| 87 | if (less(ltn, ltp)) | ||
| 88 | link = &parent->rb_left; | ||
| 89 | else | ||
| 90 | link = &parent->rb_right; | ||
| 91 | } | ||
| 92 | |||
| 93 | rb_link_node_rcu(node, parent, link); | ||
| 94 | rb_insert_color(node, root); | ||
| 95 | } | ||
| 96 | |||
| 97 | static __always_inline void | ||
| 98 | __lt_erase(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx) | ||
| 99 | { | ||
| 100 | rb_erase(<n->node[idx], <r->tree[idx]); | ||
| 101 | } | ||
| 102 | |||
| 103 | static __always_inline struct latch_tree_node * | ||
| 104 | __lt_find(void *key, struct latch_tree_root *ltr, int idx, | ||
| 105 | int (*comp)(void *key, struct latch_tree_node *node)) | ||
| 106 | { | ||
| 107 | struct rb_node *node = rcu_dereference_raw(ltr->tree[idx].rb_node); | ||
| 108 | struct latch_tree_node *ltn; | ||
| 109 | int c; | ||
| 110 | |||
| 111 | while (node) { | ||
| 112 | ltn = __lt_from_rb(node, idx); | ||
| 113 | c = comp(key, ltn); | ||
| 114 | |||
| 115 | if (c < 0) | ||
| 116 | node = rcu_dereference_raw(node->rb_left); | ||
| 117 | else if (c > 0) | ||
| 118 | node = rcu_dereference_raw(node->rb_right); | ||
| 119 | else | ||
| 120 | return ltn; | ||
| 121 | } | ||
| 122 | |||
| 123 | return NULL; | ||
| 124 | } | ||
| 125 | |||
| 126 | /** | ||
| 127 | * latch_tree_insert() - insert @node into the trees @root | ||
| 128 | * @node: nodes to insert | ||
| 129 | * @root: trees to insert @node into | ||
| 130 | * @ops: operators defining the node order | ||
| 131 | * | ||
| 132 | * It inserts @node into @root in an ordered fashion such that we can always | ||
| 133 | * observe one complete tree. See the comment for raw_write_seqcount_latch(). | ||
| 134 | * | ||
| 135 | * The inserts use rcu_assign_pointer() to publish the element such that the | ||
| 136 | * tree structure is stored before we can observe the new @node. | ||
| 137 | * | ||
| 138 | * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be | ||
| 139 | * serialized. | ||
| 140 | */ | ||
| 141 | static __always_inline void | ||
| 142 | latch_tree_insert(struct latch_tree_node *node, | ||
| 143 | struct latch_tree_root *root, | ||
| 144 | const struct latch_tree_ops *ops) | ||
| 145 | { | ||
| 146 | raw_write_seqcount_latch(&root->seq); | ||
| 147 | __lt_insert(node, root, 0, ops->less); | ||
| 148 | raw_write_seqcount_latch(&root->seq); | ||
| 149 | __lt_insert(node, root, 1, ops->less); | ||
| 150 | } | ||
| 151 | |||
| 152 | /** | ||
| 153 | * latch_tree_erase() - removes @node from the trees @root | ||
| 154 | * @node: nodes to remote | ||
| 155 | * @root: trees to remove @node from | ||
| 156 | * @ops: operators defining the node order | ||
| 157 | * | ||
| 158 | * Removes @node from the trees @root in an ordered fashion such that we can | ||
| 159 | * always observe one complete tree. See the comment for | ||
| 160 | * raw_write_seqcount_latch(). | ||
| 161 | * | ||
| 162 | * It is assumed that @node will observe one RCU quiescent state before being | ||
| 163 | * reused of freed. | ||
| 164 | * | ||
| 165 | * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be | ||
| 166 | * serialized. | ||
| 167 | */ | ||
| 168 | static __always_inline void | ||
| 169 | latch_tree_erase(struct latch_tree_node *node, | ||
| 170 | struct latch_tree_root *root, | ||
| 171 | const struct latch_tree_ops *ops) | ||
| 172 | { | ||
| 173 | raw_write_seqcount_latch(&root->seq); | ||
| 174 | __lt_erase(node, root, 0); | ||
| 175 | raw_write_seqcount_latch(&root->seq); | ||
| 176 | __lt_erase(node, root, 1); | ||
| 177 | } | ||
| 178 | |||
| 179 | /** | ||
| 180 | * latch_tree_find() - find the node matching @key in the trees @root | ||
| 181 | * @key: search key | ||
| 182 | * @root: trees to search for @key | ||
| 183 | * @ops: operators defining the node order | ||
| 184 | * | ||
| 185 | * Does a lockless lookup in the trees @root for the node matching @key. | ||
| 186 | * | ||
| 187 | * It is assumed that this is called while holding the appropriate RCU read | ||
| 188 | * side lock. | ||
| 189 | * | ||
| 190 | * If the operators define a partial order on the elements (there are multiple | ||
| 191 | * elements which have the same key value) it is undefined which of these | ||
| 192 | * elements will be found. Nor is it possible to iterate the tree to find | ||
| 193 | * further elements with the same key value. | ||
| 194 | * | ||
| 195 | * Returns: a pointer to the node matching @key or NULL. | ||
| 196 | */ | ||
| 197 | static __always_inline struct latch_tree_node * | ||
| 198 | latch_tree_find(void *key, struct latch_tree_root *root, | ||
| 199 | const struct latch_tree_ops *ops) | ||
| 200 | { | ||
| 201 | struct latch_tree_node *node; | ||
| 202 | unsigned int seq; | ||
| 203 | |||
| 204 | do { | ||
| 205 | seq = raw_read_seqcount_latch(&root->seq); | ||
| 206 | node = __lt_find(key, root, seq & 1, ops->comp); | ||
| 207 | } while (read_seqcount_retry(&root->seq, seq)); | ||
| 208 | |||
| 209 | return node; | ||
| 210 | } | ||
| 211 | |||
| 212 | #endif /* RB_TREE_LATCH_H */ | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 33a056bb886f..4cf5f51b4c9c 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -633,21 +633,6 @@ static inline void rcu_preempt_sleep_check(void) | |||
| 633 | #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) | 633 | #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) |
| 634 | 634 | ||
| 635 | /** | 635 | /** |
| 636 | * lockless_dereference() - safely load a pointer for later dereference | ||
| 637 | * @p: The pointer to load | ||
| 638 | * | ||
| 639 | * Similar to rcu_dereference(), but for situations where the pointed-to | ||
| 640 | * object's lifetime is managed by something other than RCU. That | ||
| 641 | * "something other" might be reference counting or simple immortality. | ||
| 642 | */ | ||
| 643 | #define lockless_dereference(p) \ | ||
| 644 | ({ \ | ||
| 645 | typeof(p) _________p1 = READ_ONCE(p); \ | ||
| 646 | smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ | ||
| 647 | (_________p1); \ | ||
| 648 | }) | ||
| 649 | |||
| 650 | /** | ||
| 651 | * rcu_assign_pointer() - assign to RCU-protected pointer | 636 | * rcu_assign_pointer() - assign to RCU-protected pointer |
| 652 | * @p: pointer to assign to | 637 | * @p: pointer to assign to |
| 653 | * @v: value to assign (publish) | 638 | * @v: value to assign (publish) |
diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 116655d92269..59c55ea0f0b5 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h | |||
| @@ -433,6 +433,8 @@ int regmap_update_bits_check_async(struct regmap *map, unsigned int reg, | |||
| 433 | unsigned int mask, unsigned int val, | 433 | unsigned int mask, unsigned int val, |
| 434 | bool *change); | 434 | bool *change); |
| 435 | int regmap_get_val_bytes(struct regmap *map); | 435 | int regmap_get_val_bytes(struct regmap *map); |
| 436 | int regmap_get_max_register(struct regmap *map); | ||
| 437 | int regmap_get_reg_stride(struct regmap *map); | ||
| 436 | int regmap_async_complete(struct regmap *map); | 438 | int regmap_async_complete(struct regmap *map); |
| 437 | bool regmap_can_raw_write(struct regmap *map); | 439 | bool regmap_can_raw_write(struct regmap *map); |
| 438 | 440 | ||
| @@ -676,6 +678,18 @@ static inline int regmap_get_val_bytes(struct regmap *map) | |||
| 676 | return -EINVAL; | 678 | return -EINVAL; |
| 677 | } | 679 | } |
| 678 | 680 | ||
| 681 | static inline int regmap_get_max_register(struct regmap *map) | ||
| 682 | { | ||
| 683 | WARN_ONCE(1, "regmap API is disabled"); | ||
| 684 | return -EINVAL; | ||
| 685 | } | ||
| 686 | |||
| 687 | static inline int regmap_get_reg_stride(struct regmap *map) | ||
| 688 | { | ||
| 689 | WARN_ONCE(1, "regmap API is disabled"); | ||
| 690 | return -EINVAL; | ||
| 691 | } | ||
| 692 | |||
| 679 | static inline int regcache_sync(struct regmap *map) | 693 | static inline int regcache_sync(struct regmap *map) |
| 680 | { | 694 | { |
| 681 | WARN_ONCE(1, "regmap API is disabled"); | 695 | WARN_ONCE(1, "regmap API is disabled"); |
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index fffa688ac3a7..4db9fbe4889d 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h | |||
| @@ -91,6 +91,7 @@ struct regulator_linear_range { | |||
| 91 | * @set_current_limit: Configure a limit for a current-limited regulator. | 91 | * @set_current_limit: Configure a limit for a current-limited regulator. |
| 92 | * The driver should select the current closest to max_uA. | 92 | * The driver should select the current closest to max_uA. |
| 93 | * @get_current_limit: Get the configured limit for a current-limited regulator. | 93 | * @get_current_limit: Get the configured limit for a current-limited regulator. |
| 94 | * @set_input_current_limit: Configure an input limit. | ||
| 94 | * | 95 | * |
| 95 | * @set_mode: Set the configured operating mode for the regulator. | 96 | * @set_mode: Set the configured operating mode for the regulator. |
| 96 | * @get_mode: Get the configured operating mode for the regulator. | 97 | * @get_mode: Get the configured operating mode for the regulator. |
| @@ -111,6 +112,7 @@ struct regulator_linear_range { | |||
| 111 | * to stabilise after being set to a new value, in microseconds. | 112 | * to stabilise after being set to a new value, in microseconds. |
| 112 | * The function provides the from and to voltage selector, the | 113 | * The function provides the from and to voltage selector, the |
| 113 | * function should return the worst case. | 114 | * function should return the worst case. |
| 115 | * @set_soft_start: Enable soft start for the regulator. | ||
| 114 | * | 116 | * |
| 115 | * @set_suspend_voltage: Set the voltage for the regulator when the system | 117 | * @set_suspend_voltage: Set the voltage for the regulator when the system |
| 116 | * is suspended. | 118 | * is suspended. |
| @@ -121,6 +123,9 @@ struct regulator_linear_range { | |||
| 121 | * @set_suspend_mode: Set the operating mode for the regulator when the | 123 | * @set_suspend_mode: Set the operating mode for the regulator when the |
| 122 | * system is suspended. | 124 | * system is suspended. |
| 123 | * | 125 | * |
| 126 | * @set_pull_down: Configure the regulator to pull down when the regulator | ||
| 127 | * is disabled. | ||
| 128 | * | ||
| 124 | * This struct describes regulator operations which can be implemented by | 129 | * This struct describes regulator operations which can be implemented by |
| 125 | * regulator chip drivers. | 130 | * regulator chip drivers. |
| 126 | */ | 131 | */ |
| @@ -142,6 +147,8 @@ struct regulator_ops { | |||
| 142 | int min_uA, int max_uA); | 147 | int min_uA, int max_uA); |
| 143 | int (*get_current_limit) (struct regulator_dev *); | 148 | int (*get_current_limit) (struct regulator_dev *); |
| 144 | 149 | ||
| 150 | int (*set_input_current_limit) (struct regulator_dev *, int lim_uA); | ||
| 151 | |||
| 145 | /* enable/disable regulator */ | 152 | /* enable/disable regulator */ |
| 146 | int (*enable) (struct regulator_dev *); | 153 | int (*enable) (struct regulator_dev *); |
| 147 | int (*disable) (struct regulator_dev *); | 154 | int (*disable) (struct regulator_dev *); |
| @@ -158,6 +165,8 @@ struct regulator_ops { | |||
| 158 | unsigned int old_selector, | 165 | unsigned int old_selector, |
| 159 | unsigned int new_selector); | 166 | unsigned int new_selector); |
| 160 | 167 | ||
| 168 | int (*set_soft_start) (struct regulator_dev *); | ||
| 169 | |||
| 161 | /* report regulator status ... most other accessors report | 170 | /* report regulator status ... most other accessors report |
| 162 | * control inputs, this reports results of combining inputs | 171 | * control inputs, this reports results of combining inputs |
| 163 | * from Linux (and other sources) with the actual load. | 172 | * from Linux (and other sources) with the actual load. |
| @@ -187,6 +196,8 @@ struct regulator_ops { | |||
| 187 | 196 | ||
| 188 | /* set regulator suspend operating mode (defined in consumer.h) */ | 197 | /* set regulator suspend operating mode (defined in consumer.h) */ |
| 189 | int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode); | 198 | int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode); |
| 199 | |||
| 200 | int (*set_pull_down) (struct regulator_dev *); | ||
| 190 | }; | 201 | }; |
| 191 | 202 | ||
| 192 | /* | 203 | /* |
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index b07562e082c4..b11be1260129 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h | |||
| @@ -75,6 +75,8 @@ struct regulator_state { | |||
| 75 | * | 75 | * |
| 76 | * @min_uA: Smallest current consumers may set. | 76 | * @min_uA: Smallest current consumers may set. |
| 77 | * @max_uA: Largest current consumers may set. | 77 | * @max_uA: Largest current consumers may set. |
| 78 | * @ilim_uA: Maximum input current. | ||
| 79 | * @system_load: Load that isn't captured by any consumer requests. | ||
| 78 | * | 80 | * |
| 79 | * @valid_modes_mask: Mask of modes which may be configured by consumers. | 81 | * @valid_modes_mask: Mask of modes which may be configured by consumers. |
| 80 | * @valid_ops_mask: Operations which may be performed by consumers. | 82 | * @valid_ops_mask: Operations which may be performed by consumers. |
| @@ -86,6 +88,8 @@ struct regulator_state { | |||
| 86 | * applied. | 88 | * applied. |
| 87 | * @apply_uV: Apply the voltage constraint when initialising. | 89 | * @apply_uV: Apply the voltage constraint when initialising. |
| 88 | * @ramp_disable: Disable ramp delay when initialising or when setting voltage. | 90 | * @ramp_disable: Disable ramp delay when initialising or when setting voltage. |
| 91 | * @soft_start: Enable soft start so that voltage ramps slowly. | ||
| 92 | * @pull_down: Enable pull down when regulator is disabled. | ||
| 89 | * | 93 | * |
| 90 | * @input_uV: Input voltage for regulator when supplied by another regulator. | 94 | * @input_uV: Input voltage for regulator when supplied by another regulator. |
| 91 | * | 95 | * |
| @@ -111,6 +115,9 @@ struct regulation_constraints { | |||
| 111 | /* current output range (inclusive) - for current control */ | 115 | /* current output range (inclusive) - for current control */ |
| 112 | int min_uA; | 116 | int min_uA; |
| 113 | int max_uA; | 117 | int max_uA; |
| 118 | int ilim_uA; | ||
| 119 | |||
| 120 | int system_load; | ||
| 114 | 121 | ||
| 115 | /* valid regulator operating modes for this machine */ | 122 | /* valid regulator operating modes for this machine */ |
| 116 | unsigned int valid_modes_mask; | 123 | unsigned int valid_modes_mask; |
| @@ -138,6 +145,8 @@ struct regulation_constraints { | |||
| 138 | unsigned boot_on:1; /* bootloader/firmware enabled regulator */ | 145 | unsigned boot_on:1; /* bootloader/firmware enabled regulator */ |
| 139 | unsigned apply_uV:1; /* apply uV constraint if min == max */ | 146 | unsigned apply_uV:1; /* apply uV constraint if min == max */ |
| 140 | unsigned ramp_disable:1; /* disable ramp delay */ | 147 | unsigned ramp_disable:1; /* disable ramp delay */ |
| 148 | unsigned soft_start:1; /* ramp voltage slowly */ | ||
| 149 | unsigned pull_down:1; /* pull down resistor when regulator off */ | ||
| 141 | }; | 150 | }; |
| 142 | 151 | ||
| 143 | /** | 152 | /** |
diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h index f8acc052e353..f6a8a16a0d4d 100644 --- a/include/linux/regulator/max8973-regulator.h +++ b/include/linux/regulator/max8973-regulator.h | |||
| @@ -58,6 +58,9 @@ | |||
| 58 | * control signal from EN input pin. If it is false then | 58 | * control signal from EN input pin. If it is false then |
| 59 | * voltage output will be enabled/disabled through EN bit of | 59 | * voltage output will be enabled/disabled through EN bit of |
| 60 | * device register. | 60 | * device register. |
| 61 | * @enable_gpio: Enable GPIO. If EN pin is controlled through GPIO from host | ||
| 62 | * then GPIO number can be provided. If no GPIO controlled then | ||
| 63 | * it should be -1. | ||
| 61 | * @dvs_gpio: GPIO for dvs. It should be -1 if this is tied with fixed logic. | 64 | * @dvs_gpio: GPIO for dvs. It should be -1 if this is tied with fixed logic. |
| 62 | * @dvs_def_state: Default state of dvs. 1 if it is high else 0. | 65 | * @dvs_def_state: Default state of dvs. 1 if it is high else 0. |
| 63 | */ | 66 | */ |
| @@ -65,6 +68,7 @@ struct max8973_regulator_platform_data { | |||
| 65 | struct regulator_init_data *reg_init_data; | 68 | struct regulator_init_data *reg_init_data; |
| 66 | unsigned long control_flags; | 69 | unsigned long control_flags; |
| 67 | bool enable_ext_control; | 70 | bool enable_ext_control; |
| 71 | int enable_gpio; | ||
| 68 | int dvs_gpio; | 72 | int dvs_gpio; |
| 69 | unsigned dvs_def_state:1; | 73 | unsigned dvs_def_state:1; |
| 70 | }; | 74 | }; |
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 78b8a9b9d40a..9c4e1384f636 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h | |||
| @@ -36,11 +36,11 @@ | |||
| 36 | #define REMOTEPROC_H | 36 | #define REMOTEPROC_H |
| 37 | 37 | ||
| 38 | #include <linux/types.h> | 38 | #include <linux/types.h> |
| 39 | #include <linux/klist.h> | ||
| 40 | #include <linux/mutex.h> | 39 | #include <linux/mutex.h> |
| 41 | #include <linux/virtio.h> | 40 | #include <linux/virtio.h> |
| 42 | #include <linux/completion.h> | 41 | #include <linux/completion.h> |
| 43 | #include <linux/idr.h> | 42 | #include <linux/idr.h> |
| 43 | #include <linux/of.h> | ||
| 44 | 44 | ||
| 45 | /** | 45 | /** |
| 46 | * struct resource_table - firmware resource table header | 46 | * struct resource_table - firmware resource table header |
| @@ -330,11 +330,13 @@ struct rproc; | |||
| 330 | * @start: power on the device and boot it | 330 | * @start: power on the device and boot it |
| 331 | * @stop: power off the device | 331 | * @stop: power off the device |
| 332 | * @kick: kick a virtqueue (virtqueue id given as a parameter) | 332 | * @kick: kick a virtqueue (virtqueue id given as a parameter) |
| 333 | * @da_to_va: optional platform hook to perform address translations | ||
| 333 | */ | 334 | */ |
| 334 | struct rproc_ops { | 335 | struct rproc_ops { |
| 335 | int (*start)(struct rproc *rproc); | 336 | int (*start)(struct rproc *rproc); |
| 336 | int (*stop)(struct rproc *rproc); | 337 | int (*stop)(struct rproc *rproc); |
| 337 | void (*kick)(struct rproc *rproc, int vqid); | 338 | void (*kick)(struct rproc *rproc, int vqid); |
| 339 | void * (*da_to_va)(struct rproc *rproc, u64 da, int len); | ||
| 338 | }; | 340 | }; |
| 339 | 341 | ||
| 340 | /** | 342 | /** |
| @@ -375,7 +377,7 @@ enum rproc_crash_type { | |||
| 375 | 377 | ||
| 376 | /** | 378 | /** |
| 377 | * struct rproc - represents a physical remote processor device | 379 | * struct rproc - represents a physical remote processor device |
| 378 | * @node: klist node of this rproc object | 380 | * @node: list node of this rproc object |
| 379 | * @domain: iommu domain | 381 | * @domain: iommu domain |
| 380 | * @name: human readable name of the rproc | 382 | * @name: human readable name of the rproc |
| 381 | * @firmware: name of firmware file to be loaded | 383 | * @firmware: name of firmware file to be loaded |
| @@ -407,7 +409,7 @@ enum rproc_crash_type { | |||
| 407 | * @has_iommu: flag to indicate if remote processor is behind an MMU | 409 | * @has_iommu: flag to indicate if remote processor is behind an MMU |
| 408 | */ | 410 | */ |
| 409 | struct rproc { | 411 | struct rproc { |
| 410 | struct klist_node node; | 412 | struct list_head node; |
| 411 | struct iommu_domain *domain; | 413 | struct iommu_domain *domain; |
| 412 | const char *name; | 414 | const char *name; |
| 413 | const char *firmware; | 415 | const char *firmware; |
| @@ -481,6 +483,7 @@ struct rproc_vdev { | |||
| 481 | u32 rsc_offset; | 483 | u32 rsc_offset; |
| 482 | }; | 484 | }; |
| 483 | 485 | ||
| 486 | struct rproc *rproc_get_by_phandle(phandle phandle); | ||
| 484 | struct rproc *rproc_alloc(struct device *dev, const char *name, | 487 | struct rproc *rproc_alloc(struct device *dev, const char *name, |
| 485 | const struct rproc_ops *ops, | 488 | const struct rproc_ops *ops, |
| 486 | const char *firmware, int len); | 489 | const char *firmware, int len); |
diff --git a/include/linux/reset/bcm63xx_pmb.h b/include/linux/reset/bcm63xx_pmb.h new file mode 100644 index 000000000000..bb4af7b5eb36 --- /dev/null +++ b/include/linux/reset/bcm63xx_pmb.h | |||
| @@ -0,0 +1,88 @@ | |||
| 1 | /* | ||
| 2 | * Broadcom BCM63xx Processor Monitor Bus shared routines (SMP and reset) | ||
| 3 | * | ||
| 4 | * Copyright (C) 2015, Broadcom Corporation | ||
| 5 | * Author: Florian Fainelli <f.fainelli@gmail.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or | ||
| 8 | * modify it under the terms of the GNU General Public License as | ||
| 9 | * published by the Free Software Foundation version 2. | ||
| 10 | * | ||
| 11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
| 12 | * kind, whether express or implied; without even the implied warranty | ||
| 13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | */ | ||
| 16 | #ifndef __BCM63XX_PMB_H | ||
| 17 | #define __BCM63XX_PMB_H | ||
| 18 | |||
| 19 | #include <linux/io.h> | ||
| 20 | #include <linux/types.h> | ||
| 21 | #include <linux/delay.h> | ||
| 22 | #include <linux/err.h> | ||
| 23 | |||
| 24 | /* PMB Master controller register */ | ||
| 25 | #define PMB_CTRL 0x00 | ||
| 26 | #define PMC_PMBM_START (1 << 31) | ||
| 27 | #define PMC_PMBM_TIMEOUT (1 << 30) | ||
| 28 | #define PMC_PMBM_SLAVE_ERR (1 << 29) | ||
| 29 | #define PMC_PMBM_BUSY (1 << 28) | ||
| 30 | #define PMC_PMBM_READ (0 << 20) | ||
| 31 | #define PMC_PMBM_WRITE (1 << 20) | ||
| 32 | #define PMB_WR_DATA 0x04 | ||
| 33 | #define PMB_TIMEOUT 0x08 | ||
| 34 | #define PMB_RD_DATA 0x0C | ||
| 35 | |||
| 36 | #define PMB_BUS_ID_SHIFT 8 | ||
| 37 | |||
| 38 | /* Perform the low-level PMB master operation, shared between reads and | ||
| 39 | * writes. | ||
| 40 | */ | ||
| 41 | static inline int __bpcm_do_op(void __iomem *master, unsigned int addr, | ||
| 42 | u32 off, u32 op) | ||
| 43 | { | ||
| 44 | unsigned int timeout = 1000; | ||
| 45 | u32 cmd; | ||
| 46 | |||
| 47 | cmd = (PMC_PMBM_START | op | (addr & 0xff) << 12 | off); | ||
| 48 | writel(cmd, master + PMB_CTRL); | ||
| 49 | do { | ||
| 50 | cmd = readl(master + PMB_CTRL); | ||
| 51 | if (!(cmd & PMC_PMBM_START)) | ||
| 52 | return 0; | ||
| 53 | |||
| 54 | if (cmd & PMC_PMBM_SLAVE_ERR) | ||
| 55 | return -EIO; | ||
| 56 | |||
| 57 | if (cmd & PMC_PMBM_TIMEOUT) | ||
| 58 | return -ETIMEDOUT; | ||
| 59 | |||
| 60 | udelay(1); | ||
| 61 | } while (timeout-- > 0); | ||
| 62 | |||
| 63 | return -ETIMEDOUT; | ||
| 64 | } | ||
| 65 | |||
| 66 | static inline int bpcm_rd(void __iomem *master, unsigned int addr, | ||
| 67 | u32 off, u32 *val) | ||
| 68 | { | ||
| 69 | int ret = 0; | ||
| 70 | |||
| 71 | ret = __bpcm_do_op(master, addr, off >> 2, PMC_PMBM_READ); | ||
| 72 | *val = readl(master + PMB_RD_DATA); | ||
| 73 | |||
| 74 | return ret; | ||
| 75 | } | ||
| 76 | |||
| 77 | static inline int bpcm_wr(void __iomem *master, unsigned int addr, | ||
| 78 | u32 off, u32 val) | ||
| 79 | { | ||
| 80 | int ret = 0; | ||
| 81 | |||
| 82 | writel(val, master + PMB_WR_DATA); | ||
| 83 | ret = __bpcm_do_op(master, addr, off >> 2, PMC_PMBM_WRITE); | ||
| 84 | |||
| 85 | return ret; | ||
| 86 | } | ||
| 87 | |||
| 88 | #endif /* __BCM63XX_PMB_H */ | ||
diff --git a/include/linux/rio.h b/include/linux/rio.h index 6bda06f21930..cde976e86b48 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h | |||
| @@ -298,7 +298,7 @@ struct rio_id_table { | |||
| 298 | * struct rio_net - RIO network info | 298 | * struct rio_net - RIO network info |
| 299 | * @node: Node in global list of RIO networks | 299 | * @node: Node in global list of RIO networks |
| 300 | * @devices: List of devices in this network | 300 | * @devices: List of devices in this network |
| 301 | * @switches: List of switches in this netowrk | 301 | * @switches: List of switches in this network |
| 302 | * @mports: List of master ports accessing this network | 302 | * @mports: List of master ports accessing this network |
| 303 | * @hport: Default port for accessing this network | 303 | * @hport: Default port for accessing this network |
| 304 | * @id: RIO network ID | 304 | * @id: RIO network ID |
diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 8dcf6825fa88..3359f0422c6b 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h | |||
| @@ -24,6 +24,14 @@ extern void rtc_time64_to_tm(time64_t time, struct rtc_time *tm); | |||
| 24 | ktime_t rtc_tm_to_ktime(struct rtc_time tm); | 24 | ktime_t rtc_tm_to_ktime(struct rtc_time tm); |
| 25 | struct rtc_time rtc_ktime_to_tm(ktime_t kt); | 25 | struct rtc_time rtc_ktime_to_tm(ktime_t kt); |
| 26 | 26 | ||
| 27 | /* | ||
| 28 | * rtc_tm_sub - Return the difference in seconds. | ||
| 29 | */ | ||
| 30 | static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs) | ||
| 31 | { | ||
| 32 | return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs); | ||
| 33 | } | ||
| 34 | |||
| 27 | /** | 35 | /** |
| 28 | * Deprecated. Use rtc_time64_to_tm(). | 36 | * Deprecated. Use rtc_time64_to_tm(). |
| 29 | */ | 37 | */ |
| @@ -101,8 +109,7 @@ struct rtc_timer { | |||
| 101 | /* flags */ | 109 | /* flags */ |
| 102 | #define RTC_DEV_BUSY 0 | 110 | #define RTC_DEV_BUSY 0 |
| 103 | 111 | ||
| 104 | struct rtc_device | 112 | struct rtc_device { |
| 105 | { | ||
| 106 | struct device dev; | 113 | struct device dev; |
| 107 | struct module *owner; | 114 | struct module *owner; |
| 108 | 115 | ||
| @@ -161,7 +168,6 @@ extern void devm_rtc_device_unregister(struct device *dev, | |||
| 161 | 168 | ||
| 162 | extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); | 169 | extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); |
| 163 | extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); | 170 | extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); |
| 164 | extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs); | ||
| 165 | extern int rtc_set_ntp_time(struct timespec64 now); | 171 | extern int rtc_set_ntp_time(struct timespec64 now); |
| 166 | int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); | 172 | int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); |
| 167 | extern int rtc_read_alarm(struct rtc_device *rtc, | 173 | extern int rtc_read_alarm(struct rtc_device *rtc, |
| @@ -198,10 +204,10 @@ int rtc_register(rtc_task_t *task); | |||
| 198 | int rtc_unregister(rtc_task_t *task); | 204 | int rtc_unregister(rtc_task_t *task); |
| 199 | int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); | 205 | int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); |
| 200 | 206 | ||
| 201 | void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data); | 207 | void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data); |
| 202 | int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer, | 208 | int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, |
| 203 | ktime_t expires, ktime_t period); | 209 | ktime_t expires, ktime_t period); |
| 204 | int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer); | 210 | void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer); |
| 205 | void rtc_timer_do_work(struct work_struct *work); | 211 | void rtc_timer_do_work(struct work_struct *work); |
| 206 | 212 | ||
| 207 | static inline bool is_leap_year(unsigned int year) | 213 | static inline bool is_leap_year(unsigned int year) |
diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h index 2c92e1c8e055..aefd997262e4 100644 --- a/include/linux/rtc/sirfsoc_rtciobrg.h +++ b/include/linux/rtc/sirfsoc_rtciobrg.h | |||
| @@ -9,10 +9,14 @@ | |||
| 9 | #ifndef _SIRFSOC_RTC_IOBRG_H_ | 9 | #ifndef _SIRFSOC_RTC_IOBRG_H_ |
| 10 | #define _SIRFSOC_RTC_IOBRG_H_ | 10 | #define _SIRFSOC_RTC_IOBRG_H_ |
| 11 | 11 | ||
| 12 | struct regmap_config; | ||
| 13 | |||
| 12 | extern void sirfsoc_rtc_iobrg_besyncing(void); | 14 | extern void sirfsoc_rtc_iobrg_besyncing(void); |
| 13 | 15 | ||
| 14 | extern u32 sirfsoc_rtc_iobrg_readl(u32 addr); | 16 | extern u32 sirfsoc_rtc_iobrg_readl(u32 addr); |
| 15 | 17 | ||
| 16 | extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr); | 18 | extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr); |
| 19 | struct regmap *devm_regmap_init_iobg(struct device *dev, | ||
| 20 | const struct regmap_config *config); | ||
| 17 | 21 | ||
| 18 | #endif | 22 | #endif |
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 7b8e260c4a27..39adaa9529eb 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
| @@ -79,17 +79,9 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) | |||
| 79 | 79 | ||
| 80 | struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); | 80 | struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); |
| 81 | 81 | ||
| 82 | #ifdef CONFIG_NET_CLS_ACT | 82 | #ifdef CONFIG_NET_INGRESS |
| 83 | void net_inc_ingress_queue(void); | 83 | void net_inc_ingress_queue(void); |
| 84 | void net_dec_ingress_queue(void); | 84 | void net_dec_ingress_queue(void); |
| 85 | #else | ||
| 86 | static inline void net_inc_ingress_queue(void) | ||
| 87 | { | ||
| 88 | } | ||
| 89 | |||
| 90 | static inline void net_dec_ingress_queue(void) | ||
| 91 | { | ||
| 92 | } | ||
| 93 | #endif | 85 | #endif |
| 94 | 86 | ||
| 95 | extern void rtnetlink_init(void); | 87 | extern void rtnetlink_init(void); |
| @@ -122,5 +114,9 @@ extern int ndo_dflt_fdb_del(struct ndmsg *ndm, | |||
| 122 | 114 | ||
| 123 | extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 115 | extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
| 124 | struct net_device *dev, u16 mode, | 116 | struct net_device *dev, u16 mode, |
| 125 | u32 flags, u32 mask, int nlflags); | 117 | u32 flags, u32 mask, int nlflags, |
| 118 | u32 filter_mask, | ||
| 119 | int (*vlan_fill)(struct sk_buff *skb, | ||
| 120 | struct net_device *dev, | ||
| 121 | u32 filter_mask)); | ||
| 126 | #endif /* __LINUX_RTNETLINK_H */ | 122 | #endif /* __LINUX_RTNETLINK_H */ |
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index a0edb992c9c3..9b1ef0c820a7 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h | |||
| @@ -2,13 +2,39 @@ | |||
| 2 | #define _LINUX_SCATTERLIST_H | 2 | #define _LINUX_SCATTERLIST_H |
| 3 | 3 | ||
| 4 | #include <linux/string.h> | 4 | #include <linux/string.h> |
| 5 | #include <linux/types.h> | ||
| 5 | #include <linux/bug.h> | 6 | #include <linux/bug.h> |
| 6 | #include <linux/mm.h> | 7 | #include <linux/mm.h> |
| 7 | |||
| 8 | #include <asm/types.h> | ||
| 9 | #include <asm/scatterlist.h> | ||
| 10 | #include <asm/io.h> | 8 | #include <asm/io.h> |
| 11 | 9 | ||
| 10 | struct scatterlist { | ||
| 11 | #ifdef CONFIG_DEBUG_SG | ||
| 12 | unsigned long sg_magic; | ||
| 13 | #endif | ||
| 14 | unsigned long page_link; | ||
| 15 | unsigned int offset; | ||
| 16 | unsigned int length; | ||
| 17 | dma_addr_t dma_address; | ||
| 18 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | ||
| 19 | unsigned int dma_length; | ||
| 20 | #endif | ||
| 21 | }; | ||
| 22 | |||
| 23 | /* | ||
| 24 | * These macros should be used after a dma_map_sg call has been done | ||
| 25 | * to get bus addresses of each of the SG entries and their lengths. | ||
| 26 | * You should only work with the number of sg entries dma_map_sg | ||
| 27 | * returns, or alternatively stop on the first sg_dma_len(sg) which | ||
| 28 | * is 0. | ||
| 29 | */ | ||
| 30 | #define sg_dma_address(sg) ((sg)->dma_address) | ||
| 31 | |||
| 32 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | ||
| 33 | #define sg_dma_len(sg) ((sg)->dma_length) | ||
| 34 | #else | ||
| 35 | #define sg_dma_len(sg) ((sg)->length) | ||
| 36 | #endif | ||
| 37 | |||
| 12 | struct sg_table { | 38 | struct sg_table { |
| 13 | struct scatterlist *sgl; /* the list */ | 39 | struct scatterlist *sgl; /* the list */ |
| 14 | unsigned int nents; /* number of mapped entries */ | 40 | unsigned int nents; /* number of mapped entries */ |
| @@ -18,10 +44,9 @@ struct sg_table { | |||
| 18 | /* | 44 | /* |
| 19 | * Notes on SG table design. | 45 | * Notes on SG table design. |
| 20 | * | 46 | * |
| 21 | * Architectures must provide an unsigned long page_link field in the | 47 | * We use the unsigned long page_link field in the scatterlist struct to place |
| 22 | * scatterlist struct. We use that to place the page pointer AND encode | 48 | * the page pointer AND encode information about the sg table as well. The two |
| 23 | * information about the sg table as well. The two lower bits are reserved | 49 | * lower bits are reserved for this information. |
| 24 | * for this information. | ||
| 25 | * | 50 | * |
| 26 | * If bit 0 is set, then the page_link contains a pointer to the next sg | 51 | * If bit 0 is set, then the page_link contains a pointer to the next sg |
| 27 | * table list. Otherwise the next entry is at sg + 1. | 52 | * table list. Otherwise the next entry is at sg + 1. |
| @@ -240,13 +265,16 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, | |||
| 240 | unsigned long offset, unsigned long size, | 265 | unsigned long offset, unsigned long size, |
| 241 | gfp_t gfp_mask); | 266 | gfp_t gfp_mask); |
| 242 | 267 | ||
| 268 | size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, | ||
| 269 | size_t buflen, off_t skip, bool to_buffer); | ||
| 270 | |||
| 243 | size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, | 271 | size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, |
| 244 | void *buf, size_t buflen); | 272 | const void *buf, size_t buflen); |
| 245 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, | 273 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, |
| 246 | void *buf, size_t buflen); | 274 | void *buf, size_t buflen); |
| 247 | 275 | ||
| 248 | size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, | 276 | size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, |
| 249 | void *buf, size_t buflen, off_t skip); | 277 | const void *buf, size_t buflen, off_t skip); |
| 250 | size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, | 278 | size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, |
| 251 | void *buf, size_t buflen, off_t skip); | 279 | void *buf, size_t buflen, off_t skip); |
| 252 | 280 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 6633e83e608a..04b5ada460b4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -58,6 +58,7 @@ struct sched_param { | |||
| 58 | #include <linux/uidgid.h> | 58 | #include <linux/uidgid.h> |
| 59 | #include <linux/gfp.h> | 59 | #include <linux/gfp.h> |
| 60 | #include <linux/magic.h> | 60 | #include <linux/magic.h> |
| 61 | #include <linux/cgroup-defs.h> | ||
| 61 | 62 | ||
| 62 | #include <asm/processor.h> | 63 | #include <asm/processor.h> |
| 63 | 64 | ||
| @@ -191,8 +192,6 @@ struct task_group; | |||
| 191 | #ifdef CONFIG_SCHED_DEBUG | 192 | #ifdef CONFIG_SCHED_DEBUG |
| 192 | extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); | 193 | extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); |
| 193 | extern void proc_sched_set_task(struct task_struct *p); | 194 | extern void proc_sched_set_task(struct task_struct *p); |
| 194 | extern void | ||
| 195 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); | ||
| 196 | #endif | 195 | #endif |
| 197 | 196 | ||
| 198 | /* | 197 | /* |
| @@ -755,18 +754,6 @@ struct signal_struct { | |||
| 755 | unsigned audit_tty_log_passwd; | 754 | unsigned audit_tty_log_passwd; |
| 756 | struct tty_audit_buf *tty_audit_buf; | 755 | struct tty_audit_buf *tty_audit_buf; |
| 757 | #endif | 756 | #endif |
| 758 | #ifdef CONFIG_CGROUPS | ||
| 759 | /* | ||
| 760 | * group_rwsem prevents new tasks from entering the threadgroup and | ||
| 761 | * member tasks from exiting,a more specifically, setting of | ||
| 762 | * PF_EXITING. fork and exit paths are protected with this rwsem | ||
| 763 | * using threadgroup_change_begin/end(). Users which require | ||
| 764 | * threadgroup to remain stable should use threadgroup_[un]lock() | ||
| 765 | * which also takes care of exec path. Currently, cgroup is the | ||
| 766 | * only user. | ||
| 767 | */ | ||
| 768 | struct rw_semaphore group_rwsem; | ||
| 769 | #endif | ||
| 770 | 757 | ||
| 771 | oom_flags_t oom_flags; | 758 | oom_flags_t oom_flags; |
| 772 | short oom_score_adj; /* OOM kill score adjustment */ | 759 | short oom_score_adj; /* OOM kill score adjustment */ |
| @@ -849,7 +836,7 @@ extern struct user_struct root_user; | |||
| 849 | struct backing_dev_info; | 836 | struct backing_dev_info; |
| 850 | struct reclaim_state; | 837 | struct reclaim_state; |
| 851 | 838 | ||
| 852 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 839 | #ifdef CONFIG_SCHED_INFO |
| 853 | struct sched_info { | 840 | struct sched_info { |
| 854 | /* cumulative counters */ | 841 | /* cumulative counters */ |
| 855 | unsigned long pcount; /* # of times run on this cpu */ | 842 | unsigned long pcount; /* # of times run on this cpu */ |
| @@ -859,7 +846,7 @@ struct sched_info { | |||
| 859 | unsigned long long last_arrival,/* when we last ran on a cpu */ | 846 | unsigned long long last_arrival,/* when we last ran on a cpu */ |
| 860 | last_queued; /* when we were last queued to run */ | 847 | last_queued; /* when we were last queued to run */ |
| 861 | }; | 848 | }; |
| 862 | #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ | 849 | #endif /* CONFIG_SCHED_INFO */ |
| 863 | 850 | ||
| 864 | #ifdef CONFIG_TASK_DELAY_ACCT | 851 | #ifdef CONFIG_TASK_DELAY_ACCT |
| 865 | struct task_delay_info { | 852 | struct task_delay_info { |
| @@ -1408,7 +1395,7 @@ struct task_struct { | |||
| 1408 | int rcu_tasks_idle_cpu; | 1395 | int rcu_tasks_idle_cpu; |
| 1409 | #endif /* #ifdef CONFIG_TASKS_RCU */ | 1396 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
| 1410 | 1397 | ||
| 1411 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 1398 | #ifdef CONFIG_SCHED_INFO |
| 1412 | struct sched_info sched_info; | 1399 | struct sched_info sched_info; |
| 1413 | #endif | 1400 | #endif |
| 1414 | 1401 | ||
| @@ -1535,8 +1522,6 @@ struct task_struct { | |||
| 1535 | /* hung task detection */ | 1522 | /* hung task detection */ |
| 1536 | unsigned long last_switch_count; | 1523 | unsigned long last_switch_count; |
| 1537 | #endif | 1524 | #endif |
| 1538 | /* CPU-specific state of this task */ | ||
| 1539 | struct thread_struct thread; | ||
| 1540 | /* filesystem information */ | 1525 | /* filesystem information */ |
| 1541 | struct fs_struct *fs; | 1526 | struct fs_struct *fs; |
| 1542 | /* open file information */ | 1527 | /* open file information */ |
| @@ -1791,8 +1776,22 @@ struct task_struct { | |||
| 1791 | unsigned long task_state_change; | 1776 | unsigned long task_state_change; |
| 1792 | #endif | 1777 | #endif |
| 1793 | int pagefault_disabled; | 1778 | int pagefault_disabled; |
| 1779 | /* CPU-specific state of this task */ | ||
| 1780 | struct thread_struct thread; | ||
| 1781 | /* | ||
| 1782 | * WARNING: on x86, 'thread_struct' contains a variable-sized | ||
| 1783 | * structure. It *MUST* be at the end of 'task_struct'. | ||
| 1784 | * | ||
| 1785 | * Do not put anything below here! | ||
| 1786 | */ | ||
| 1794 | }; | 1787 | }; |
| 1795 | 1788 | ||
| 1789 | #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT | ||
| 1790 | extern int arch_task_struct_size __read_mostly; | ||
| 1791 | #else | ||
| 1792 | # define arch_task_struct_size (sizeof(struct task_struct)) | ||
| 1793 | #endif | ||
| 1794 | |||
| 1796 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1795 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
| 1797 | #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) | 1796 | #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) |
| 1798 | 1797 | ||
| @@ -2432,7 +2431,6 @@ extern void sched_dead(struct task_struct *p); | |||
| 2432 | 2431 | ||
| 2433 | extern void proc_caches_init(void); | 2432 | extern void proc_caches_init(void); |
| 2434 | extern void flush_signals(struct task_struct *); | 2433 | extern void flush_signals(struct task_struct *); |
| 2435 | extern void __flush_signals(struct task_struct *); | ||
| 2436 | extern void ignore_signals(struct task_struct *); | 2434 | extern void ignore_signals(struct task_struct *); |
| 2437 | extern void flush_signal_handlers(struct task_struct *, int force_default); | 2435 | extern void flush_signal_handlers(struct task_struct *, int force_default); |
| 2438 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); | 2436 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); |
| @@ -2556,8 +2554,22 @@ extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); | |||
| 2556 | /* Remove the current tasks stale references to the old mm_struct */ | 2554 | /* Remove the current tasks stale references to the old mm_struct */ |
| 2557 | extern void mm_release(struct task_struct *, struct mm_struct *); | 2555 | extern void mm_release(struct task_struct *, struct mm_struct *); |
| 2558 | 2556 | ||
| 2557 | #ifdef CONFIG_HAVE_COPY_THREAD_TLS | ||
| 2558 | extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, | ||
| 2559 | struct task_struct *, unsigned long); | ||
| 2560 | #else | ||
| 2559 | extern int copy_thread(unsigned long, unsigned long, unsigned long, | 2561 | extern int copy_thread(unsigned long, unsigned long, unsigned long, |
| 2560 | struct task_struct *); | 2562 | struct task_struct *); |
| 2563 | |||
| 2564 | /* Architectures that haven't opted into copy_thread_tls get the tls argument | ||
| 2565 | * via pt_regs, so ignore the tls argument passed via C. */ | ||
| 2566 | static inline int copy_thread_tls( | ||
| 2567 | unsigned long clone_flags, unsigned long sp, unsigned long arg, | ||
| 2568 | struct task_struct *p, unsigned long tls) | ||
| 2569 | { | ||
| 2570 | return copy_thread(clone_flags, sp, arg, p); | ||
| 2571 | } | ||
| 2572 | #endif | ||
| 2561 | extern void flush_thread(void); | 2573 | extern void flush_thread(void); |
| 2562 | extern void exit_thread(void); | 2574 | extern void exit_thread(void); |
| 2563 | 2575 | ||
| @@ -2576,6 +2588,7 @@ extern int do_execveat(int, struct filename *, | |||
| 2576 | const char __user * const __user *, | 2588 | const char __user * const __user *, |
| 2577 | const char __user * const __user *, | 2589 | const char __user * const __user *, |
| 2578 | int); | 2590 | int); |
| 2591 | extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); | ||
| 2579 | extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); | 2592 | extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); |
| 2580 | struct task_struct *fork_idle(int); | 2593 | struct task_struct *fork_idle(int); |
| 2581 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | 2594 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); |
| @@ -2710,53 +2723,33 @@ static inline void unlock_task_sighand(struct task_struct *tsk, | |||
| 2710 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); | 2723 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); |
| 2711 | } | 2724 | } |
| 2712 | 2725 | ||
| 2713 | #ifdef CONFIG_CGROUPS | ||
| 2714 | static inline void threadgroup_change_begin(struct task_struct *tsk) | ||
| 2715 | { | ||
| 2716 | down_read(&tsk->signal->group_rwsem); | ||
| 2717 | } | ||
| 2718 | static inline void threadgroup_change_end(struct task_struct *tsk) | ||
| 2719 | { | ||
| 2720 | up_read(&tsk->signal->group_rwsem); | ||
| 2721 | } | ||
| 2722 | |||
| 2723 | /** | 2726 | /** |
| 2724 | * threadgroup_lock - lock threadgroup | 2727 | * threadgroup_change_begin - mark the beginning of changes to a threadgroup |
| 2725 | * @tsk: member task of the threadgroup to lock | 2728 | * @tsk: task causing the changes |
| 2726 | * | ||
| 2727 | * Lock the threadgroup @tsk belongs to. No new task is allowed to enter | ||
| 2728 | * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or | ||
| 2729 | * change ->group_leader/pid. This is useful for cases where the threadgroup | ||
| 2730 | * needs to stay stable across blockable operations. | ||
| 2731 | * | ||
| 2732 | * fork and exit paths explicitly call threadgroup_change_{begin|end}() for | ||
| 2733 | * synchronization. While held, no new task will be added to threadgroup | ||
| 2734 | * and no existing live task will have its PF_EXITING set. | ||
| 2735 | * | 2729 | * |
| 2736 | * de_thread() does threadgroup_change_{begin|end}() when a non-leader | 2730 | * All operations which modify a threadgroup - a new thread joining the |
| 2737 | * sub-thread becomes a new leader. | 2731 | * group, death of a member thread (the assertion of PF_EXITING) and |
| 2732 | * exec(2) dethreading the process and replacing the leader - are wrapped | ||
| 2733 | * by threadgroup_change_{begin|end}(). This is to provide a place which | ||
| 2734 | * subsystems needing threadgroup stability can hook into for | ||
| 2735 | * synchronization. | ||
| 2738 | */ | 2736 | */ |
| 2739 | static inline void threadgroup_lock(struct task_struct *tsk) | 2737 | static inline void threadgroup_change_begin(struct task_struct *tsk) |
| 2740 | { | 2738 | { |
| 2741 | down_write(&tsk->signal->group_rwsem); | 2739 | might_sleep(); |
| 2740 | cgroup_threadgroup_change_begin(tsk); | ||
| 2742 | } | 2741 | } |
| 2743 | 2742 | ||
| 2744 | /** | 2743 | /** |
| 2745 | * threadgroup_unlock - unlock threadgroup | 2744 | * threadgroup_change_end - mark the end of changes to a threadgroup |
| 2746 | * @tsk: member task of the threadgroup to unlock | 2745 | * @tsk: task causing the changes |
| 2747 | * | 2746 | * |
| 2748 | * Reverse threadgroup_lock(). | 2747 | * See threadgroup_change_begin(). |
| 2749 | */ | 2748 | */ |
| 2750 | static inline void threadgroup_unlock(struct task_struct *tsk) | 2749 | static inline void threadgroup_change_end(struct task_struct *tsk) |
| 2751 | { | 2750 | { |
| 2752 | up_write(&tsk->signal->group_rwsem); | 2751 | cgroup_threadgroup_change_end(tsk); |
| 2753 | } | 2752 | } |
| 2754 | #else | ||
| 2755 | static inline void threadgroup_change_begin(struct task_struct *tsk) {} | ||
| 2756 | static inline void threadgroup_change_end(struct task_struct *tsk) {} | ||
| 2757 | static inline void threadgroup_lock(struct task_struct *tsk) {} | ||
| 2758 | static inline void threadgroup_unlock(struct task_struct *tsk) {} | ||
| 2759 | #endif | ||
| 2760 | 2753 | ||
| 2761 | #ifndef __HAVE_THREAD_FUNCTIONS | 2754 | #ifndef __HAVE_THREAD_FUNCTIONS |
| 2762 | 2755 | ||
diff --git a/include/linux/scif.h b/include/linux/scif.h new file mode 100644 index 000000000000..44f4f3898bbe --- /dev/null +++ b/include/linux/scif.h | |||
| @@ -0,0 +1,993 @@ | |||
| 1 | /* | ||
| 2 | * Intel MIC Platform Software Stack (MPSS) | ||
| 3 | * | ||
| 4 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 5 | * redistributing this file, you may do so under either license. | ||
| 6 | * | ||
| 7 | * GPL LICENSE SUMMARY | ||
| 8 | * | ||
| 9 | * Copyright(c) 2014 Intel Corporation. | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of version 2 of the GNU General Public License as | ||
| 13 | * published by the Free Software Foundation. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, but | ||
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 18 | * General Public License for more details. | ||
| 19 | * | ||
| 20 | * BSD LICENSE | ||
| 21 | * | ||
| 22 | * Copyright(c) 2014 Intel Corporation. | ||
| 23 | * | ||
| 24 | * Redistribution and use in source and binary forms, with or without | ||
| 25 | * modification, are permitted provided that the following conditions | ||
| 26 | * are met: | ||
| 27 | * | ||
| 28 | * * Redistributions of source code must retain the above copyright | ||
| 29 | * notice, this list of conditions and the following disclaimer. | ||
| 30 | * * Redistributions in binary form must reproduce the above copyright | ||
| 31 | * notice, this list of conditions and the following disclaimer in | ||
| 32 | * the documentation and/or other materials provided with the | ||
| 33 | * distribution. | ||
| 34 | * * Neither the name of Intel Corporation nor the names of its | ||
| 35 | * contributors may be used to endorse or promote products derived | ||
| 36 | * from this software without specific prior written permission. | ||
| 37 | * | ||
| 38 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 39 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 40 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 41 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 42 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 43 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 44 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 45 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 46 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 47 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 48 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 49 | * | ||
| 50 | * Intel SCIF driver. | ||
| 51 | * | ||
| 52 | */ | ||
| 53 | #ifndef __SCIF_H__ | ||
| 54 | #define __SCIF_H__ | ||
| 55 | |||
| 56 | #include <linux/types.h> | ||
| 57 | #include <linux/poll.h> | ||
| 58 | #include <linux/scif_ioctl.h> | ||
| 59 | |||
| 60 | #define SCIF_ACCEPT_SYNC 1 | ||
| 61 | #define SCIF_SEND_BLOCK 1 | ||
| 62 | #define SCIF_RECV_BLOCK 1 | ||
| 63 | |||
| 64 | enum { | ||
| 65 | SCIF_PROT_READ = (1 << 0), | ||
| 66 | SCIF_PROT_WRITE = (1 << 1) | ||
| 67 | }; | ||
| 68 | |||
| 69 | enum { | ||
| 70 | SCIF_MAP_FIXED = 0x10, | ||
| 71 | SCIF_MAP_KERNEL = 0x20, | ||
| 72 | }; | ||
| 73 | |||
| 74 | enum { | ||
| 75 | SCIF_FENCE_INIT_SELF = (1 << 0), | ||
| 76 | SCIF_FENCE_INIT_PEER = (1 << 1), | ||
| 77 | SCIF_SIGNAL_LOCAL = (1 << 4), | ||
| 78 | SCIF_SIGNAL_REMOTE = (1 << 5) | ||
| 79 | }; | ||
| 80 | |||
| 81 | enum { | ||
| 82 | SCIF_RMA_USECPU = (1 << 0), | ||
| 83 | SCIF_RMA_USECACHE = (1 << 1), | ||
| 84 | SCIF_RMA_SYNC = (1 << 2), | ||
| 85 | SCIF_RMA_ORDERED = (1 << 3) | ||
| 86 | }; | ||
| 87 | |||
| 88 | /* End of SCIF Admin Reserved Ports */ | ||
| 89 | #define SCIF_ADMIN_PORT_END 1024 | ||
| 90 | |||
| 91 | /* End of SCIF Reserved Ports */ | ||
| 92 | #define SCIF_PORT_RSVD 1088 | ||
| 93 | |||
| 94 | typedef struct scif_endpt *scif_epd_t; | ||
| 95 | |||
| 96 | #define SCIF_OPEN_FAILED ((scif_epd_t)-1) | ||
| 97 | #define SCIF_REGISTER_FAILED ((off_t)-1) | ||
| 98 | #define SCIF_MMAP_FAILED ((void *)-1) | ||
| 99 | |||
| 100 | /** | ||
| 101 | * scif_open() - Create an endpoint | ||
| 102 | * | ||
| 103 | * Return: | ||
| 104 | * Upon successful completion, scif_open() returns an endpoint descriptor to | ||
| 105 | * be used in subsequent SCIF functions calls to refer to that endpoint; | ||
| 106 | * otherwise in user mode SCIF_OPEN_FAILED (that is ((scif_epd_t)-1)) is | ||
| 107 | * returned and errno is set to indicate the error; in kernel mode a NULL | ||
| 108 | * scif_epd_t is returned. | ||
| 109 | * | ||
| 110 | * Errors: | ||
| 111 | * ENOMEM - Insufficient kernel memory was available | ||
| 112 | */ | ||
| 113 | scif_epd_t scif_open(void); | ||
| 114 | |||
| 115 | /** | ||
| 116 | * scif_bind() - Bind an endpoint to a port | ||
| 117 | * @epd: endpoint descriptor | ||
| 118 | * @pn: port number | ||
| 119 | * | ||
| 120 | * scif_bind() binds endpoint epd to port pn, where pn is a port number on the | ||
| 121 | * local node. If pn is zero, a port number greater than or equal to | ||
| 122 | * SCIF_PORT_RSVD is assigned and returned. Each endpoint may be bound to | ||
| 123 | * exactly one local port. Ports less than 1024 when requested can only be bound | ||
| 124 | * by system (or root) processes or by processes executed by privileged users. | ||
| 125 | * | ||
| 126 | * Return: | ||
| 127 | * Upon successful completion, scif_bind() returns the port number to which epd | ||
| 128 | * is bound; otherwise in user mode -1 is returned and errno is set to | ||
| 129 | * indicate the error; in kernel mode the negative of one of the following | ||
| 130 | * errors is returned. | ||
| 131 | * | ||
| 132 | * Errors: | ||
| 133 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 134 | * EINVAL - the endpoint or the port is already bound | ||
| 135 | * EISCONN - The endpoint is already connected | ||
| 136 | * ENOSPC - No port number available for assignment | ||
| 137 | * EACCES - The port requested is protected and the user is not the superuser | ||
| 138 | */ | ||
| 139 | int scif_bind(scif_epd_t epd, u16 pn); | ||
| 140 | |||
| 141 | /** | ||
| 142 | * scif_listen() - Listen for connections on an endpoint | ||
| 143 | * @epd: endpoint descriptor | ||
| 144 | * @backlog: maximum pending connection requests | ||
| 145 | * | ||
| 146 | * scif_listen() marks the endpoint epd as a listening endpoint - that is, as | ||
| 147 | * an endpoint that will be used to accept incoming connection requests. Once | ||
| 148 | * so marked, the endpoint is said to be in the listening state and may not be | ||
| 149 | * used as the endpoint of a connection. | ||
| 150 | * | ||
| 151 | * The endpoint, epd, must have been bound to a port. | ||
| 152 | * | ||
| 153 | * The backlog argument defines the maximum length to which the queue of | ||
| 154 | * pending connections for epd may grow. If a connection request arrives when | ||
| 155 | * the queue is full, the client may receive an error with an indication that | ||
| 156 | * the connection was refused. | ||
| 157 | * | ||
| 158 | * Return: | ||
| 159 | * Upon successful completion, scif_listen() returns 0; otherwise in user mode | ||
| 160 | * -1 is returned and errno is set to indicate the error; in kernel mode the | ||
| 161 | * negative of one of the following errors is returned. | ||
| 162 | * | ||
| 163 | * Errors: | ||
| 164 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 165 | * EINVAL - the endpoint is not bound to a port | ||
| 166 | * EISCONN - The endpoint is already connected or listening | ||
| 167 | */ | ||
| 168 | int scif_listen(scif_epd_t epd, int backlog); | ||
| 169 | |||
| 170 | /** | ||
| 171 | * scif_connect() - Initiate a connection on a port | ||
| 172 | * @epd: endpoint descriptor | ||
| 173 | * @dst: global id of port to which to connect | ||
| 174 | * | ||
| 175 | * The scif_connect() function requests the connection of endpoint epd to remote | ||
| 176 | * port dst. If the connection is successful, a peer endpoint, bound to dst, is | ||
| 177 | * created on node dst.node. On successful return, the connection is complete. | ||
| 178 | * | ||
| 179 | * If the endpoint epd has not already been bound to a port, scif_connect() | ||
| 180 | * will bind it to an unused local port. | ||
| 181 | * | ||
| 182 | * A connection is terminated when an endpoint of the connection is closed, | ||
| 183 | * either explicitly by scif_close(), or when a process that owns one of the | ||
| 184 | * endpoints of the connection is terminated. | ||
| 185 | * | ||
| 186 | * In user space, scif_connect() supports an asynchronous connection mode | ||
| 187 | * if the application has set the O_NONBLOCK flag on the endpoint via the | ||
| 188 | * fcntl() system call. Setting this flag will result in the calling process | ||
| 189 | * not to wait during scif_connect(). | ||
| 190 | * | ||
| 191 | * Return: | ||
| 192 | * Upon successful completion, scif_connect() returns the port ID to which the | ||
| 193 | * endpoint, epd, is bound; otherwise in user mode -1 is returned and errno is | ||
| 194 | * set to indicate the error; in kernel mode the negative of one of the | ||
| 195 | * following errors is returned. | ||
| 196 | * | ||
| 197 | * Errors: | ||
| 198 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 199 | * ECONNREFUSED - The destination was not listening for connections or refused | ||
| 200 | * the connection request | ||
| 201 | * EINVAL - dst.port is not a valid port ID | ||
| 202 | * EISCONN - The endpoint is already connected | ||
| 203 | * ENOMEM - No buffer space is available | ||
| 204 | * ENODEV - The destination node does not exist, or the node is lost or existed, | ||
| 205 | * but is not currently in the network since it may have crashed | ||
| 206 | * ENOSPC - No port number available for assignment | ||
| 207 | * EOPNOTSUPP - The endpoint is listening and cannot be connected | ||
| 208 | */ | ||
| 209 | int scif_connect(scif_epd_t epd, struct scif_port_id *dst); | ||
| 210 | |||
| 211 | /** | ||
| 212 | * scif_accept() - Accept a connection on an endpoint | ||
| 213 | * @epd: endpoint descriptor | ||
| 214 | * @peer: global id of port to which connected | ||
| 215 | * @newepd: new connected endpoint descriptor | ||
| 216 | * @flags: flags | ||
| 217 | * | ||
| 218 | * The scif_accept() call extracts the first connection request from the queue | ||
| 219 | * of pending connections for the port on which epd is listening. scif_accept() | ||
| 220 | * creates a new endpoint, bound to the same port as epd, and allocates a new | ||
| 221 | * SCIF endpoint descriptor, returned in newepd, for the endpoint. The new | ||
| 222 | * endpoint is connected to the endpoint through which the connection was | ||
| 223 | * requested. epd is unaffected by this call, and remains in the listening | ||
| 224 | * state. | ||
| 225 | * | ||
| 226 | * On successful return, peer holds the global port identifier (node id and | ||
| 227 | * local port number) of the port which requested the connection. | ||
| 228 | * | ||
| 229 | * A connection is terminated when an endpoint of the connection is closed, | ||
| 230 | * either explicitly by scif_close(), or when a process that owns one of the | ||
| 231 | * endpoints of the connection is terminated. | ||
| 232 | * | ||
| 233 | * The number of connections that can (subsequently) be accepted on epd is only | ||
| 234 | * limited by system resources (memory). | ||
| 235 | * | ||
| 236 | * The flags argument is formed by OR'ing together zero or more of the | ||
| 237 | * following values. | ||
| 238 | * SCIF_ACCEPT_SYNC - block until a connection request is presented. If | ||
| 239 | * SCIF_ACCEPT_SYNC is not in flags, and no pending | ||
| 240 | * connections are present on the queue, scif_accept() | ||
| 241 | * fails with an EAGAIN error | ||
| 242 | * | ||
| 243 | * In user mode, the select() and poll() functions can be used to determine | ||
| 244 | * when there is a connection request. In kernel mode, the scif_poll() | ||
| 245 | * function may be used for this purpose. A readable event will be delivered | ||
| 246 | * when a connection is requested. | ||
| 247 | * | ||
| 248 | * Return: | ||
| 249 | * Upon successful completion, scif_accept() returns 0; otherwise in user mode | ||
| 250 | * -1 is returned and errno is set to indicate the error; in kernel mode the | ||
| 251 | * negative of one of the following errors is returned. | ||
| 252 | * | ||
| 253 | * Errors: | ||
| 254 | * EAGAIN - SCIF_ACCEPT_SYNC is not set and no connections are present to be | ||
| 255 | * accepted or SCIF_ACCEPT_SYNC is not set and remote node failed to complete | ||
| 256 | * its connection request | ||
| 257 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 258 | * EINTR - Interrupted function | ||
| 259 | * EINVAL - epd is not a listening endpoint, or flags is invalid, or peer is | ||
| 260 | * NULL, or newepd is NULL | ||
| 261 | * ENODEV - The requesting node is lost or existed, but is not currently in the | ||
| 262 | * network since it may have crashed | ||
| 263 | * ENOMEM - Not enough space | ||
| 264 | * ENOENT - Secondary part of epd registration failed | ||
| 265 | */ | ||
| 266 | int scif_accept(scif_epd_t epd, struct scif_port_id *peer, scif_epd_t | ||
| 267 | *newepd, int flags); | ||
| 268 | |||
| 269 | /** | ||
| 270 | * scif_close() - Close an endpoint | ||
| 271 | * @epd: endpoint descriptor | ||
| 272 | * | ||
| 273 | * scif_close() closes an endpoint and performs necessary teardown of | ||
| 274 | * facilities associated with that endpoint. | ||
| 275 | * | ||
| 276 | * If epd is a listening endpoint then it will no longer accept connection | ||
| 277 | * requests on the port to which it is bound. Any pending connection requests | ||
| 278 | * are rejected. | ||
| 279 | * | ||
| 280 | * If epd is a connected endpoint, then its peer endpoint is also closed. RMAs | ||
| 281 | * which are in-process through epd or its peer endpoint will complete before | ||
| 282 | * scif_close() returns. Registered windows of the local and peer endpoints are | ||
| 283 | * released as if scif_unregister() was called against each window. | ||
| 284 | * | ||
| 285 | * Closing a SCIF endpoint does not affect local registered memory mapped by | ||
| 286 | * a SCIF endpoint on a remote node. The local memory remains mapped by the peer | ||
| 287 | * SCIF endpoint explicitly removed by calling munmap(..) by the peer. | ||
| 288 | * | ||
| 289 | * If the peer endpoint's receive queue is not empty at the time that epd is | ||
| 290 | * closed, then the peer endpoint can be passed as the endpoint parameter to | ||
| 291 | * scif_recv() until the receive queue is empty. | ||
| 292 | * | ||
| 293 | * epd is freed and may no longer be accessed. | ||
| 294 | * | ||
| 295 | * Return: | ||
| 296 | * Upon successful completion, scif_close() returns 0; otherwise in user mode | ||
| 297 | * -1 is returned and errno is set to indicate the error; in kernel mode the | ||
| 298 | * negative of one of the following errors is returned. | ||
| 299 | * | ||
| 300 | * Errors: | ||
| 301 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 302 | */ | ||
| 303 | int scif_close(scif_epd_t epd); | ||
| 304 | |||
| 305 | /** | ||
| 306 | * scif_send() - Send a message | ||
| 307 | * @epd: endpoint descriptor | ||
| 308 | * @msg: message buffer address | ||
| 309 | * @len: message length | ||
| 310 | * @flags: blocking mode flags | ||
| 311 | * | ||
| 312 | * scif_send() sends data to the peer of endpoint epd. Up to len bytes of data | ||
| 313 | * are copied from memory starting at address msg. On successful execution the | ||
| 314 | * return value of scif_send() is the number of bytes that were sent, and is | ||
| 315 | * zero if no bytes were sent because len was zero. scif_send() may be called | ||
| 316 | * only when the endpoint is in a connected state. | ||
| 317 | * | ||
| 318 | * If a scif_send() call is non-blocking, then it sends only those bytes which | ||
| 319 | * can be sent without waiting, up to a maximum of len bytes. | ||
| 320 | * | ||
| 321 | * If a scif_send() call is blocking, then it normally returns after sending | ||
| 322 | * all len bytes. If a blocking call is interrupted or the connection is | ||
| 323 | * reset, the call is considered successful if some bytes were sent or len is | ||
| 324 | * zero, otherwise the call is considered unsuccessful. | ||
| 325 | * | ||
| 326 | * In user mode, the select() and poll() functions can be used to determine | ||
| 327 | * when the send queue is not full. In kernel mode, the scif_poll() function | ||
| 328 | * may be used for this purpose. | ||
| 329 | * | ||
| 330 | * It is recommended that scif_send()/scif_recv() only be used for short | ||
| 331 | * control-type message communication between SCIF endpoints. The SCIF RMA | ||
| 332 | * APIs are expected to provide better performance for transfer sizes of | ||
| 333 | * 1024 bytes or longer for the current MIC hardware and software | ||
| 334 | * implementation. | ||
| 335 | * | ||
| 336 | * scif_send() will block until the entire message is sent if SCIF_SEND_BLOCK | ||
| 337 | * is passed as the flags argument. | ||
| 338 | * | ||
| 339 | * Return: | ||
| 340 | * Upon successful completion, scif_send() returns the number of bytes sent; | ||
| 341 | * otherwise in user mode -1 is returned and errno is set to indicate the | ||
| 342 | * error; in kernel mode the negative of one of the following errors is | ||
| 343 | * returned. | ||
| 344 | * | ||
| 345 | * Errors: | ||
| 346 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 347 | * ECONNRESET - Connection reset by peer | ||
| 348 | * EFAULT - An invalid address was specified for a parameter | ||
| 349 | * EINVAL - flags is invalid, or len is negative | ||
| 350 | * ENODEV - The remote node is lost or existed, but is not currently in the | ||
| 351 | * network since it may have crashed | ||
| 352 | * ENOMEM - Not enough space | ||
| 353 | * ENOTCONN - The endpoint is not connected | ||
| 354 | */ | ||
| 355 | int scif_send(scif_epd_t epd, void *msg, int len, int flags); | ||
| 356 | |||
| 357 | /** | ||
| 358 | * scif_recv() - Receive a message | ||
| 359 | * @epd: endpoint descriptor | ||
| 360 | * @msg: message buffer address | ||
| 361 | * @len: message buffer length | ||
| 362 | * @flags: blocking mode flags | ||
| 363 | * | ||
| 364 | * scif_recv() receives data from the peer of endpoint epd. Up to len bytes of | ||
| 365 | * data are copied to memory starting at address msg. On successful execution | ||
| 366 | * the return value of scif_recv() is the number of bytes that were received, | ||
| 367 | * and is zero if no bytes were received because len was zero. scif_recv() may | ||
| 368 | * be called only when the endpoint is in a connected state. | ||
| 369 | * | ||
| 370 | * If a scif_recv() call is non-blocking, then it receives only those bytes | ||
| 371 | * which can be received without waiting, up to a maximum of len bytes. | ||
| 372 | * | ||
| 373 | * If a scif_recv() call is blocking, then it normally returns after receiving | ||
| 374 | * all len bytes. If the blocking call was interrupted due to a disconnection, | ||
| 375 | * subsequent calls to scif_recv() will copy all bytes received upto the point | ||
| 376 | * of disconnection. | ||
| 377 | * | ||
| 378 | * In user mode, the select() and poll() functions can be used to determine | ||
| 379 | * when data is available to be received. In kernel mode, the scif_poll() | ||
| 380 | * function may be used for this purpose. | ||
| 381 | * | ||
| 382 | * It is recommended that scif_send()/scif_recv() only be used for short | ||
| 383 | * control-type message communication between SCIF endpoints. The SCIF RMA | ||
| 384 | * APIs are expected to provide better performance for transfer sizes of | ||
| 385 | * 1024 bytes or longer for the current MIC hardware and software | ||
| 386 | * implementation. | ||
| 387 | * | ||
| 388 | * scif_recv() will block until the entire message is received if | ||
| 389 | * SCIF_RECV_BLOCK is passed as the flags argument. | ||
| 390 | * | ||
| 391 | * Return: | ||
| 392 | * Upon successful completion, scif_recv() returns the number of bytes | ||
| 393 | * received; otherwise in user mode -1 is returned and errno is set to | ||
| 394 | * indicate the error; in kernel mode the negative of one of the following | ||
| 395 | * errors is returned. | ||
| 396 | * | ||
| 397 | * Errors: | ||
| 398 | * EAGAIN - The destination node is returning from a low power state | ||
| 399 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 400 | * ECONNRESET - Connection reset by peer | ||
| 401 | * EFAULT - An invalid address was specified for a parameter | ||
| 402 | * EINVAL - flags is invalid, or len is negative | ||
| 403 | * ENODEV - The remote node is lost or existed, but is not currently in the | ||
| 404 | * network since it may have crashed | ||
| 405 | * ENOMEM - Not enough space | ||
| 406 | * ENOTCONN - The endpoint is not connected | ||
| 407 | */ | ||
| 408 | int scif_recv(scif_epd_t epd, void *msg, int len, int flags); | ||
| 409 | |||
| 410 | /** | ||
| 411 | * scif_register() - Mark a memory region for remote access. | ||
| 412 | * @epd: endpoint descriptor | ||
| 413 | * @addr: starting virtual address | ||
| 414 | * @len: length of range | ||
| 415 | * @offset: offset of window | ||
| 416 | * @prot_flags: read/write protection flags | ||
| 417 | * @map_flags: mapping flags | ||
| 418 | * | ||
| 419 | * The scif_register() function opens a window, a range of whole pages of the | ||
| 420 | * registered address space of the endpoint epd, starting at offset po and | ||
| 421 | * continuing for len bytes. The value of po, further described below, is a | ||
| 422 | * function of the parameters offset and len, and the value of map_flags. Each | ||
| 423 | * page of the window represents the physical memory page which backs the | ||
| 424 | * corresponding page of the range of virtual address pages starting at addr | ||
| 425 | * and continuing for len bytes. addr and len are constrained to be multiples | ||
| 426 | * of the page size. A successful scif_register() call returns po. | ||
| 427 | * | ||
| 428 | * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset | ||
| 429 | * exactly, and offset is constrained to be a multiple of the page size. The | ||
| 430 | * mapping established by scif_register() will not replace any existing | ||
| 431 | * registration; an error is returned if any page within the range [offset, | ||
| 432 | * offset + len - 1] intersects an existing window. | ||
| 433 | * | ||
| 434 | * When SCIF_MAP_FIXED is not set, the implementation uses offset in an | ||
| 435 | * implementation-defined manner to arrive at po. The po value so chosen will | ||
| 436 | * be an area of the registered address space that the implementation deems | ||
| 437 | * suitable for a mapping of len bytes. An offset value of 0 is interpreted as | ||
| 438 | * granting the implementation complete freedom in selecting po, subject to | ||
| 439 | * constraints described below. A non-zero value of offset is taken to be a | ||
| 440 | * suggestion of an offset near which the mapping should be placed. When the | ||
| 441 | * implementation selects a value for po, it does not replace any extant | ||
| 442 | * window. In all cases, po will be a multiple of the page size. | ||
| 443 | * | ||
| 444 | * The physical pages which are so represented by a window are available for | ||
| 445 | * access in calls to mmap(), scif_readfrom(), scif_writeto(), | ||
| 446 | * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the | ||
| 447 | * physical pages represented by the window will not be reused by the memory | ||
| 448 | * subsystem for any other purpose. Note that the same physical page may be | ||
| 449 | * represented by multiple windows. | ||
| 450 | * | ||
| 451 | * Subsequent operations which change the memory pages to which virtual | ||
| 452 | * addresses are mapped (such as mmap(), munmap()) have no effect on | ||
| 453 | * existing window. | ||
| 454 | * | ||
| 455 | * If the process will fork(), it is recommended that the registered | ||
| 456 | * virtual address range be marked with MADV_DONTFORK. Doing so will prevent | ||
| 457 | * problems due to copy-on-write semantics. | ||
| 458 | * | ||
| 459 | * The prot_flags argument is formed by OR'ing together one or more of the | ||
| 460 | * following values. | ||
| 461 | * SCIF_PROT_READ - allow read operations from the window | ||
| 462 | * SCIF_PROT_WRITE - allow write operations to the window | ||
| 463 | * | ||
| 464 | * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a | ||
| 465 | * fixed offset. | ||
| 466 | * | ||
| 467 | * Return: | ||
| 468 | * Upon successful completion, scif_register() returns the offset at which the | ||
| 469 | * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that | ||
| 470 | * is (off_t *)-1) is returned and errno is set to indicate the error; in | ||
| 471 | * kernel mode the negative of one of the following errors is returned. | ||
| 472 | * | ||
| 473 | * Errors: | ||
| 474 | * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags, and pages in the range | ||
| 475 | * [offset, offset + len -1] are already registered | ||
| 476 | * EAGAIN - The mapping could not be performed due to lack of resources | ||
| 477 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 478 | * ECONNRESET - Connection reset by peer | ||
| 479 | * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid | ||
| 480 | * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is | ||
| 481 | * set in flags, and offset is not a multiple of the page size, or addr is not a | ||
| 482 | * multiple of the page size, or len is not a multiple of the page size, or is | ||
| 483 | * 0, or offset is negative | ||
| 484 | * ENODEV - The remote node is lost or existed, but is not currently in the | ||
| 485 | * network since it may have crashed | ||
| 486 | * ENOMEM - Not enough space | ||
| 487 | * ENOTCONN -The endpoint is not connected | ||
| 488 | */ | ||
| 489 | off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset, | ||
| 490 | int prot_flags, int map_flags); | ||
| 491 | |||
| 492 | /** | ||
| 493 | * scif_unregister() - Mark a memory region for remote access. | ||
| 494 | * @epd: endpoint descriptor | ||
| 495 | * @offset: start of range to unregister | ||
| 496 | * @len: length of range to unregister | ||
| 497 | * | ||
| 498 | * The scif_unregister() function closes those previously registered windows | ||
| 499 | * which are entirely within the range [offset, offset + len - 1]. It is an | ||
| 500 | * error to specify a range which intersects only a subrange of a window. | ||
| 501 | * | ||
| 502 | * On a successful return, pages within the window may no longer be specified | ||
| 503 | * in calls to mmap(), scif_readfrom(), scif_writeto(), scif_vreadfrom(), | ||
| 504 | * scif_vwriteto(), scif_get_pages, and scif_fence_signal(). The window, | ||
| 505 | * however, continues to exist until all previous references against it are | ||
| 506 | * removed. A window is referenced if there is a mapping to it created by | ||
| 507 | * mmap(), or if scif_get_pages() was called against the window | ||
| 508 | * (and the pages have not been returned via scif_put_pages()). A window is | ||
| 509 | * also referenced while an RMA, in which some range of the window is a source | ||
| 510 | * or destination, is in progress. Finally a window is referenced while some | ||
| 511 | * offset in that window was specified to scif_fence_signal(), and the RMAs | ||
| 512 | * marked by that call to scif_fence_signal() have not completed. While a | ||
| 513 | * window is in this state, its registered address space pages are not | ||
| 514 | * available for use in a new registered window. | ||
| 515 | * | ||
| 516 | * When all such references to the window have been removed, its references to | ||
| 517 | * all the physical pages which it represents are removed. Similarly, the | ||
| 518 | * registered address space pages of the window become available for | ||
| 519 | * registration in a new window. | ||
| 520 | * | ||
| 521 | * Return: | ||
| 522 | * Upon successful completion, scif_unregister() returns 0; otherwise in user | ||
| 523 | * mode -1 is returned and errno is set to indicate the error; in kernel mode | ||
| 524 | * the negative of one of the following errors is returned. In the event of an | ||
| 525 | * error, no windows are unregistered. | ||
| 526 | * | ||
| 527 | * Errors: | ||
| 528 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 529 | * ECONNRESET - Connection reset by peer | ||
| 530 | * EINVAL - the range [offset, offset + len - 1] intersects a subrange of a | ||
| 531 | * window, or offset is negative | ||
| 532 | * ENODEV - The remote node is lost or existed, but is not currently in the | ||
| 533 | * network since it may have crashed | ||
| 534 | * ENOTCONN - The endpoint is not connected | ||
| 535 | * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid for the | ||
| 536 | * registered address space of epd | ||
| 537 | */ | ||
| 538 | int scif_unregister(scif_epd_t epd, off_t offset, size_t len); | ||
| 539 | |||
| 540 | /** | ||
| 541 | * scif_readfrom() - Copy from a remote address space | ||
| 542 | * @epd: endpoint descriptor | ||
| 543 | * @loffset: offset in local registered address space to | ||
| 544 | * which to copy | ||
| 545 | * @len: length of range to copy | ||
| 546 | * @roffset: offset in remote registered address space | ||
| 547 | * from which to copy | ||
| 548 | * @rma_flags: transfer mode flags | ||
| 549 | * | ||
| 550 | * scif_readfrom() copies len bytes from the remote registered address space of | ||
| 551 | * the peer of endpoint epd, starting at the offset roffset to the local | ||
| 552 | * registered address space of epd, starting at the offset loffset. | ||
| 553 | * | ||
| 554 | * Each of the specified ranges [loffset, loffset + len - 1] and [roffset, | ||
| 555 | * roffset + len - 1] must be within some registered window or windows of the | ||
| 556 | * local and remote nodes. A range may intersect multiple registered windows, | ||
| 557 | * but only if those windows are contiguous in the registered address space. | ||
| 558 | * | ||
| 559 | * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using | ||
| 560 | * programmed read/writes. Otherwise the data is copied using DMA. If rma_- | ||
| 561 | * flags includes SCIF_RMA_SYNC, then scif_readfrom() will return after the | ||
| 562 | * transfer is complete. Otherwise, the transfer may be performed asynchron- | ||
| 563 | * ously. The order in which any two asynchronous RMA operations complete | ||
| 564 | * is non-deterministic. The synchronization functions, scif_fence_mark()/ | ||
| 565 | * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to | ||
| 566 | * the completion of asynchronous RMA operations on the same endpoint. | ||
| 567 | * | ||
| 568 | * The DMA transfer of individual bytes is not guaranteed to complete in | ||
| 569 | * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last | ||
| 570 | * cacheline or partial cacheline of the source range will become visible on | ||
| 571 | * the destination node after all other transferred data in the source | ||
| 572 | * range has become visible on the destination node. | ||
| 573 | * | ||
| 574 | * The optimal DMA performance will likely be realized if both | ||
| 575 | * loffset and roffset are cacheline aligned (are a multiple of 64). Lower | ||
| 576 | * performance will likely be realized if loffset and roffset are not | ||
| 577 | * cacheline aligned but are separated by some multiple of 64. The lowest level | ||
| 578 | * of performance is likely if loffset and roffset are not separated by a | ||
| 579 | * multiple of 64. | ||
| 580 | * | ||
| 581 | * The rma_flags argument is formed by ORing together zero or more of the | ||
| 582 | * following values. | ||
| 583 | * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA | ||
| 584 | * engine. | ||
| 585 | * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the | ||
| 586 | * transfer has completed. Passing this flag results in the | ||
| 587 | * current implementation busy waiting and consuming CPU cycles | ||
| 588 | * while the DMA transfer is in progress for best performance by | ||
| 589 | * avoiding the interrupt latency. | ||
| 590 | * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of | ||
| 591 | * the source range becomes visible on the destination node | ||
| 592 | * after all other transferred data in the source range has | ||
| 593 | * become visible on the destination | ||
| 594 | * | ||
| 595 | * Return: | ||
| 596 | * Upon successful completion, scif_readfrom() returns 0; otherwise in user | ||
| 597 | * mode -1 is returned and errno is set to indicate the error; in kernel mode | ||
| 598 | * the negative of one of the following errors is returned. | ||
| 599 | * | ||
| 600 | * Errors: | ||
| 601 | * EACCESS - Attempt to write to a read-only range | ||
| 602 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 603 | * ECONNRESET - Connection reset by peer | ||
| 604 | * EINVAL - rma_flags is invalid | ||
| 605 | * ENODEV - The remote node is lost or existed, but is not currently in the | ||
| 606 | * network since it may have crashed | ||
| 607 | * ENOTCONN - The endpoint is not connected | ||
| 608 | * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered | ||
| 609 | * address space of epd, or, The range [roffset, roffset + len - 1] is invalid | ||
| 610 | * for the registered address space of the peer of epd, or loffset or roffset | ||
| 611 | * is negative | ||
| 612 | */ | ||
| 613 | int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, off_t | ||
| 614 | roffset, int rma_flags); | ||
| 615 | |||
| 616 | /** | ||
| 617 | * scif_writeto() - Copy to a remote address space | ||
| 618 | * @epd: endpoint descriptor | ||
| 619 | * @loffset: offset in local registered address space | ||
| 620 | * from which to copy | ||
| 621 | * @len: length of range to copy | ||
| 622 | * @roffset: offset in remote registered address space to | ||
| 623 | * which to copy | ||
| 624 | * @rma_flags: transfer mode flags | ||
| 625 | * | ||
| 626 | * scif_writeto() copies len bytes from the local registered address space of | ||
| 627 | * epd, starting at the offset loffset to the remote registered address space | ||
| 628 | * of the peer of endpoint epd, starting at the offset roffset. | ||
| 629 | * | ||
| 630 | * Each of the specified ranges [loffset, loffset + len - 1] and [roffset, | ||
| 631 | * roffset + len - 1] must be within some registered window or windows of the | ||
| 632 | * local and remote nodes. A range may intersect multiple registered windows, | ||
| 633 | * but only if those windows are contiguous in the registered address space. | ||
| 634 | * | ||
| 635 | * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using | ||
| 636 | * programmed read/writes. Otherwise the data is copied using DMA. If rma_- | ||
| 637 | * flags includes SCIF_RMA_SYNC, then scif_writeto() will return after the | ||
| 638 | * transfer is complete. Otherwise, the transfer may be performed asynchron- | ||
| 639 | * ously. The order in which any two asynchronous RMA operations complete | ||
| 640 | * is non-deterministic. The synchronization functions, scif_fence_mark()/ | ||
| 641 | * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to | ||
| 642 | * the completion of asynchronous RMA operations on the same endpoint. | ||
| 643 | * | ||
| 644 | * The DMA transfer of individual bytes is not guaranteed to complete in | ||
| 645 | * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last | ||
| 646 | * cacheline or partial cacheline of the source range will become visible on | ||
| 647 | * the destination node after all other transferred data in the source | ||
| 648 | * range has become visible on the destination node. | ||
| 649 | * | ||
| 650 | * The optimal DMA performance will likely be realized if both | ||
| 651 | * loffset and roffset are cacheline aligned (are a multiple of 64). Lower | ||
| 652 | * performance will likely be realized if loffset and roffset are not cacheline | ||
| 653 | * aligned but are separated by some multiple of 64. The lowest level of | ||
| 654 | * performance is likely if loffset and roffset are not separated by a multiple | ||
| 655 | * of 64. | ||
| 656 | * | ||
| 657 | * The rma_flags argument is formed by ORing together zero or more of the | ||
| 658 | * following values. | ||
| 659 | * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA | ||
| 660 | * engine. | ||
| 661 | * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the | ||
| 662 | * transfer has completed. Passing this flag results in the | ||
| 663 | * current implementation busy waiting and consuming CPU cycles | ||
| 664 | * while the DMA transfer is in progress for best performance by | ||
| 665 | * avoiding the interrupt latency. | ||
| 666 | * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of | ||
| 667 | * the source range becomes visible on the destination node | ||
| 668 | * after all other transferred data in the source range has | ||
| 669 | * become visible on the destination | ||
| 670 | * | ||
| 671 | * Return: | ||
| 672 | * Upon successful completion, scif_readfrom() returns 0; otherwise in user | ||
| 673 | * mode -1 is returned and errno is set to indicate the error; in kernel mode | ||
| 674 | * the negative of one of the following errors is returned. | ||
| 675 | * | ||
| 676 | * Errors: | ||
| 677 | * EACCESS - Attempt to write to a read-only range | ||
| 678 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 679 | * ECONNRESET - Connection reset by peer | ||
| 680 | * EINVAL - rma_flags is invalid | ||
| 681 | * ENODEV - The remote node is lost or existed, but is not currently in the | ||
| 682 | * network since it may have crashed | ||
| 683 | * ENOTCONN - The endpoint is not connected | ||
| 684 | * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered | ||
| 685 | * address space of epd, or, The range [roffset , roffset + len -1] is invalid | ||
| 686 | * for the registered address space of the peer of epd, or loffset or roffset | ||
| 687 | * is negative | ||
| 688 | */ | ||
| 689 | int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t | ||
| 690 | roffset, int rma_flags); | ||
| 691 | |||
| 692 | /** | ||
| 693 | * scif_vreadfrom() - Copy from a remote address space | ||
| 694 | * @epd: endpoint descriptor | ||
| 695 | * @addr: address to which to copy | ||
| 696 | * @len: length of range to copy | ||
| 697 | * @roffset: offset in remote registered address space | ||
| 698 | * from which to copy | ||
| 699 | * @rma_flags: transfer mode flags | ||
| 700 | * | ||
| 701 | * scif_vreadfrom() copies len bytes from the remote registered address | ||
| 702 | * space of the peer of endpoint epd, starting at the offset roffset, to local | ||
| 703 | * memory, starting at addr. | ||
| 704 | * | ||
| 705 | * The specified range [roffset, roffset + len - 1] must be within some | ||
| 706 | * registered window or windows of the remote nodes. The range may | ||
| 707 | * intersect multiple registered windows, but only if those windows are | ||
| 708 | * contiguous in the registered address space. | ||
| 709 | * | ||
| 710 | * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using | ||
| 711 | * programmed read/writes. Otherwise the data is copied using DMA. If rma_- | ||
| 712 | * flags includes SCIF_RMA_SYNC, then scif_vreadfrom() will return after the | ||
| 713 | * transfer is complete. Otherwise, the transfer may be performed asynchron- | ||
| 714 | * ously. The order in which any two asynchronous RMA operations complete | ||
| 715 | * is non-deterministic. The synchronization functions, scif_fence_mark()/ | ||
| 716 | * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to | ||
| 717 | * the completion of asynchronous RMA operations on the same endpoint. | ||
| 718 | * | ||
| 719 | * The DMA transfer of individual bytes is not guaranteed to complete in | ||
| 720 | * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last | ||
| 721 | * cacheline or partial cacheline of the source range will become visible on | ||
| 722 | * the destination node after all other transferred data in the source | ||
| 723 | * range has become visible on the destination node. | ||
| 724 | * | ||
| 725 | * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back | ||
| 726 | * the specified local memory range may be remain in a pinned state even after | ||
| 727 | * the specified transfer completes. This may reduce overhead if some or all of | ||
| 728 | * the same virtual address range is referenced in a subsequent call of | ||
| 729 | * scif_vreadfrom() or scif_vwriteto(). | ||
| 730 | * | ||
| 731 | * The optimal DMA performance will likely be realized if both | ||
| 732 | * addr and roffset are cacheline aligned (are a multiple of 64). Lower | ||
| 733 | * performance will likely be realized if addr and roffset are not | ||
| 734 | * cacheline aligned but are separated by some multiple of 64. The lowest level | ||
| 735 | * of performance is likely if addr and roffset are not separated by a | ||
| 736 | * multiple of 64. | ||
| 737 | * | ||
| 738 | * The rma_flags argument is formed by ORing together zero or more of the | ||
| 739 | * following values. | ||
| 740 | * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA | ||
| 741 | * engine. | ||
| 742 | * SCIF_RMA_USECACHE - enable registration caching | ||
| 743 | * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the | ||
| 744 | * transfer has completed. Passing this flag results in the | ||
| 745 | * current implementation busy waiting and consuming CPU cycles | ||
| 746 | * while the DMA transfer is in progress for best performance by | ||
| 747 | * avoiding the interrupt latency. | ||
| 748 | * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of | ||
| 749 | * the source range becomes visible on the destination node | ||
| 750 | * after all other transferred data in the source range has | ||
| 751 | * become visible on the destination | ||
| 752 | * | ||
| 753 | * Return: | ||
| 754 | * Upon successful completion, scif_vreadfrom() returns 0; otherwise in user | ||
| 755 | * mode -1 is returned and errno is set to indicate the error; in kernel mode | ||
| 756 | * the negative of one of the following errors is returned. | ||
| 757 | * | ||
| 758 | * Errors: | ||
| 759 | * EACCESS - Attempt to write to a read-only range | ||
| 760 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 761 | * ECONNRESET - Connection reset by peer | ||
| 762 | * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid | ||
| 763 | * EINVAL - rma_flags is invalid | ||
| 764 | * ENODEV - The remote node is lost or existed, but is not currently in the | ||
| 765 | * network since it may have crashed | ||
| 766 | * ENOTCONN - The endpoint is not connected | ||
| 767 | * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the | ||
| 768 | * registered address space of epd | ||
| 769 | */ | ||
| 770 | int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset, | ||
| 771 | int rma_flags); | ||
| 772 | |||
| 773 | /** | ||
| 774 | * scif_vwriteto() - Copy to a remote address space | ||
| 775 | * @epd: endpoint descriptor | ||
| 776 | * @addr: address from which to copy | ||
| 777 | * @len: length of range to copy | ||
| 778 | * @roffset: offset in remote registered address space to | ||
| 779 | * which to copy | ||
| 780 | * @rma_flags: transfer mode flags | ||
| 781 | * | ||
| 782 | * scif_vwriteto() copies len bytes from the local memory, starting at addr, to | ||
| 783 | * the remote registered address space of the peer of endpoint epd, starting at | ||
| 784 | * the offset roffset. | ||
| 785 | * | ||
| 786 | * The specified range [roffset, roffset + len - 1] must be within some | ||
| 787 | * registered window or windows of the remote nodes. The range may intersect | ||
| 788 | * multiple registered windows, but only if those windows are contiguous in the | ||
| 789 | * registered address space. | ||
| 790 | * | ||
| 791 | * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using | ||
| 792 | * programmed read/writes. Otherwise the data is copied using DMA. If rma_- | ||
| 793 | * flags includes SCIF_RMA_SYNC, then scif_vwriteto() will return after the | ||
| 794 | * transfer is complete. Otherwise, the transfer may be performed asynchron- | ||
| 795 | * ously. The order in which any two asynchronous RMA operations complete | ||
| 796 | * is non-deterministic. The synchronization functions, scif_fence_mark()/ | ||
| 797 | * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to | ||
| 798 | * the completion of asynchronous RMA operations on the same endpoint. | ||
| 799 | * | ||
| 800 | * The DMA transfer of individual bytes is not guaranteed to complete in | ||
| 801 | * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last | ||
| 802 | * cacheline or partial cacheline of the source range will become visible on | ||
| 803 | * the destination node after all other transferred data in the source | ||
| 804 | * range has become visible on the destination node. | ||
| 805 | * | ||
| 806 | * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back | ||
| 807 | * the specified local memory range may be remain in a pinned state even after | ||
| 808 | * the specified transfer completes. This may reduce overhead if some or all of | ||
| 809 | * the same virtual address range is referenced in a subsequent call of | ||
| 810 | * scif_vreadfrom() or scif_vwriteto(). | ||
| 811 | * | ||
| 812 | * The optimal DMA performance will likely be realized if both | ||
| 813 | * addr and offset are cacheline aligned (are a multiple of 64). Lower | ||
| 814 | * performance will likely be realized if addr and offset are not cacheline | ||
| 815 | * aligned but are separated by some multiple of 64. The lowest level of | ||
| 816 | * performance is likely if addr and offset are not separated by a multiple of | ||
| 817 | * 64. | ||
| 818 | * | ||
| 819 | * The rma_flags argument is formed by ORing together zero or more of the | ||
| 820 | * following values. | ||
| 821 | * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA | ||
| 822 | * engine. | ||
| 823 | * SCIF_RMA_USECACHE - allow registration caching | ||
| 824 | * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the | ||
| 825 | * transfer has completed. Passing this flag results in the | ||
| 826 | * current implementation busy waiting and consuming CPU cycles | ||
| 827 | * while the DMA transfer is in progress for best performance by | ||
| 828 | * avoiding the interrupt latency. | ||
| 829 | * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of | ||
| 830 | * the source range becomes visible on the destination node | ||
| 831 | * after all other transferred data in the source range has | ||
| 832 | * become visible on the destination | ||
| 833 | * | ||
| 834 | * Return: | ||
| 835 | * Upon successful completion, scif_vwriteto() returns 0; otherwise in user | ||
| 836 | * mode -1 is returned and errno is set to indicate the error; in kernel mode | ||
| 837 | * the negative of one of the following errors is returned. | ||
| 838 | * | ||
| 839 | * Errors: | ||
| 840 | * EACCESS - Attempt to write to a read-only range | ||
| 841 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 842 | * ECONNRESET - Connection reset by peer | ||
| 843 | * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid | ||
| 844 | * EINVAL - rma_flags is invalid | ||
| 845 | * ENODEV - The remote node is lost or existed, but is not currently in the | ||
| 846 | * network since it may have crashed | ||
| 847 | * ENOTCONN - The endpoint is not connected | ||
| 848 | * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the | ||
| 849 | * registered address space of epd | ||
| 850 | */ | ||
| 851 | int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, off_t roffset, | ||
| 852 | int rma_flags); | ||
| 853 | |||
| 854 | /** | ||
| 855 | * scif_fence_mark() - Mark previously issued RMAs | ||
| 856 | * @epd: endpoint descriptor | ||
| 857 | * @flags: control flags | ||
| 858 | * @mark: marked value returned as output. | ||
| 859 | * | ||
| 860 | * scif_fence_mark() returns after marking the current set of all uncompleted | ||
| 861 | * RMAs initiated through the endpoint epd or the current set of all | ||
| 862 | * uncompleted RMAs initiated through the peer of endpoint epd. The RMAs are | ||
| 863 | * marked with a value returned at mark. The application may subsequently call | ||
| 864 | * scif_fence_wait(), passing the value returned at mark, to await completion | ||
| 865 | * of all RMAs so marked. | ||
| 866 | * | ||
| 867 | * The flags argument has exactly one of the following values. | ||
| 868 | * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint | ||
| 869 | * epd are marked | ||
| 870 | * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer | ||
| 871 | * of endpoint epd are marked | ||
| 872 | * | ||
| 873 | * Return: | ||
| 874 | * Upon successful completion, scif_fence_mark() returns 0; otherwise in user | ||
| 875 | * mode -1 is returned and errno is set to indicate the error; in kernel mode | ||
| 876 | * the negative of one of the following errors is returned. | ||
| 877 | * | ||
| 878 | * Errors: | ||
| 879 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 880 | * ECONNRESET - Connection reset by peer | ||
| 881 | * EINVAL - flags is invalid | ||
| 882 | * ENODEV - The remote node is lost or existed, but is not currently in the | ||
| 883 | * network since it may have crashed | ||
| 884 | * ENOTCONN - The endpoint is not connected | ||
| 885 | * ENOMEM - Insufficient kernel memory was available | ||
| 886 | */ | ||
| 887 | int scif_fence_mark(scif_epd_t epd, int flags, int *mark); | ||
| 888 | |||
| 889 | /** | ||
| 890 | * scif_fence_wait() - Wait for completion of marked RMAs | ||
| 891 | * @epd: endpoint descriptor | ||
| 892 | * @mark: mark request | ||
| 893 | * | ||
| 894 | * scif_fence_wait() returns after all RMAs marked with mark have completed. | ||
| 895 | * The value passed in mark must have been obtained in a previous call to | ||
| 896 | * scif_fence_mark(). | ||
| 897 | * | ||
| 898 | * Return: | ||
| 899 | * Upon successful completion, scif_fence_wait() returns 0; otherwise in user | ||
| 900 | * mode -1 is returned and errno is set to indicate the error; in kernel mode | ||
| 901 | * the negative of one of the following errors is returned. | ||
| 902 | * | ||
| 903 | * Errors: | ||
| 904 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 905 | * ECONNRESET - Connection reset by peer | ||
| 906 | * ENODEV - The remote node is lost or existed, but is not currently in the | ||
| 907 | * network since it may have crashed | ||
| 908 | * ENOTCONN - The endpoint is not connected | ||
| 909 | * ENOMEM - Insufficient kernel memory was available | ||
| 910 | */ | ||
| 911 | int scif_fence_wait(scif_epd_t epd, int mark); | ||
| 912 | |||
| 913 | /** | ||
| 914 | * scif_fence_signal() - Request a memory update on completion of RMAs | ||
| 915 | * @epd: endpoint descriptor | ||
| 916 | * @loff: local offset | ||
| 917 | * @lval: local value to write to loffset | ||
| 918 | * @roff: remote offset | ||
| 919 | * @rval: remote value to write to roffset | ||
| 920 | * @flags: flags | ||
| 921 | * | ||
| 922 | * scif_fence_signal() returns after marking the current set of all uncompleted | ||
| 923 | * RMAs initiated through the endpoint epd or marking the current set of all | ||
| 924 | * uncompleted RMAs initiated through the peer of endpoint epd. | ||
| 925 | * | ||
| 926 | * If flags includes SCIF_SIGNAL_LOCAL, then on completion of the RMAs in the | ||
| 927 | * marked set, lval is written to memory at the address corresponding to offset | ||
| 928 | * loff in the local registered address space of epd. loff must be within a | ||
| 929 | * registered window. If flags includes SCIF_SIGNAL_REMOTE, then on completion | ||
| 930 | * of the RMAs in the marked set, rval is written to memory at the address | ||
| 931 | * corresponding to offset roff in the remote registered address space of epd. | ||
| 932 | * roff must be within a remote registered window of the peer of epd. Note | ||
| 933 | * that any specified offset must be DWORD (4 byte / 32 bit) aligned. | ||
| 934 | * | ||
| 935 | * The flags argument is formed by OR'ing together the following. | ||
| 936 | * Exactly one of the following values. | ||
| 937 | * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint | ||
| 938 | * epd are marked | ||
| 939 | * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer | ||
| 940 | * of endpoint epd are marked | ||
| 941 | * One or more of the following values. | ||
| 942 | * SCIF_SIGNAL_LOCAL - On completion of the marked set of RMAs, write lval to | ||
| 943 | * memory at the address corresponding to offset loff in the local | ||
| 944 | * registered address space of epd. | ||
| 945 | * SCIF_SIGNAL_REMOTE - On completion of the marked set of RMAs, write rval to | ||
| 946 | * memory at the address corresponding to offset roff in the remote | ||
| 947 | * registered address space of epd. | ||
| 948 | * | ||
| 949 | * Return: | ||
| 950 | * Upon successful completion, scif_fence_signal() returns 0; otherwise in | ||
| 951 | * user mode -1 is returned and errno is set to indicate the error; in kernel | ||
| 952 | * mode the negative of one of the following errors is returned. | ||
| 953 | * | ||
| 954 | * Errors: | ||
| 955 | * EBADF, ENOTTY - epd is not a valid endpoint descriptor | ||
| 956 | * ECONNRESET - Connection reset by peer | ||
| 957 | * EINVAL - flags is invalid, or loff or roff are not DWORD aligned | ||
| 958 | * ENODEV - The remote node is lost or existed, but is not currently in the | ||
| 959 | * network since it may have crashed | ||
| 960 | * ENOTCONN - The endpoint is not connected | ||
| 961 | * ENXIO - loff is invalid for the registered address of epd, or roff is invalid | ||
| 962 | * for the registered address space, of the peer of epd | ||
| 963 | */ | ||
| 964 | int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff, | ||
| 965 | u64 rval, int flags); | ||
| 966 | |||
| 967 | /** | ||
| 968 | * scif_get_node_ids() - Return information about online nodes | ||
| 969 | * @nodes: array in which to return online node IDs | ||
| 970 | * @len: number of entries in the nodes array | ||
| 971 | * @self: address to place the node ID of the local node | ||
| 972 | * | ||
| 973 | * scif_get_node_ids() fills in the nodes array with up to len node IDs of the | ||
| 974 | * nodes in the SCIF network. If there is not enough space in nodes, as | ||
| 975 | * indicated by the len parameter, only len node IDs are returned in nodes. The | ||
| 976 | * return value of scif_get_node_ids() is the total number of nodes currently in | ||
| 977 | * the SCIF network. By checking the return value against the len parameter, | ||
| 978 | * the user may determine if enough space for nodes was allocated. | ||
| 979 | * | ||
| 980 | * The node ID of the local node is returned at self. | ||
| 981 | * | ||
| 982 | * Return: | ||
| 983 | * Upon successful completion, scif_get_node_ids() returns the actual number of | ||
| 984 | * online nodes in the SCIF network including 'self'; otherwise in user mode | ||
| 985 | * -1 is returned and errno is set to indicate the error; in kernel mode no | ||
| 986 | * errors are returned. | ||
| 987 | * | ||
| 988 | * Errors: | ||
| 989 | * EFAULT - Bad address | ||
| 990 | */ | ||
| 991 | int scif_get_node_ids(u16 *nodes, int len, u16 *self); | ||
| 992 | |||
| 993 | #endif /* __SCIF_H__ */ | ||
diff --git a/include/linux/security.h b/include/linux/security.h index 52febde52479..79d85ddf8093 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
| 28 | #include <linux/err.h> | 28 | #include <linux/err.h> |
| 29 | #include <linux/string.h> | 29 | #include <linux/string.h> |
| 30 | #include <linux/mm.h> | ||
| 30 | 31 | ||
| 31 | struct linux_binprm; | 32 | struct linux_binprm; |
| 32 | struct cred; | 33 | struct cred; |
| @@ -53,9 +54,6 @@ struct xattr; | |||
| 53 | struct xfrm_sec_ctx; | 54 | struct xfrm_sec_ctx; |
| 54 | struct mm_struct; | 55 | struct mm_struct; |
| 55 | 56 | ||
| 56 | /* Maximum number of letters for an LSM name string */ | ||
| 57 | #define SECURITY_NAME_MAX 10 | ||
| 58 | |||
| 59 | /* If capable should audit the security request */ | 57 | /* If capable should audit the security request */ |
| 60 | #define SECURITY_CAP_NOAUDIT 0 | 58 | #define SECURITY_CAP_NOAUDIT 0 |
| 61 | #define SECURITY_CAP_AUDIT 1 | 59 | #define SECURITY_CAP_AUDIT 1 |
| @@ -68,10 +66,7 @@ struct audit_krule; | |||
| 68 | struct user_namespace; | 66 | struct user_namespace; |
| 69 | struct timezone; | 67 | struct timezone; |
| 70 | 68 | ||
| 71 | /* | 69 | /* These functions are in security/commoncap.c */ |
| 72 | * These functions are in security/capability.c and are used | ||
| 73 | * as the default capabilities functions | ||
| 74 | */ | ||
| 75 | extern int cap_capable(const struct cred *cred, struct user_namespace *ns, | 70 | extern int cap_capable(const struct cred *cred, struct user_namespace *ns, |
| 76 | int cap, int audit); | 71 | int cap, int audit); |
| 77 | extern int cap_settime(const struct timespec *ts, const struct timezone *tz); | 72 | extern int cap_settime(const struct timespec *ts, const struct timezone *tz); |
| @@ -113,10 +108,6 @@ struct xfrm_state; | |||
| 113 | struct xfrm_user_sec_ctx; | 108 | struct xfrm_user_sec_ctx; |
| 114 | struct seq_file; | 109 | struct seq_file; |
| 115 | 110 | ||
| 116 | extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); | ||
| 117 | |||
| 118 | void reset_security_ops(void); | ||
| 119 | |||
| 120 | #ifdef CONFIG_MMU | 111 | #ifdef CONFIG_MMU |
| 121 | extern unsigned long mmap_min_addr; | 112 | extern unsigned long mmap_min_addr; |
| 122 | extern unsigned long dac_mmap_min_addr; | 113 | extern unsigned long dac_mmap_min_addr; |
| @@ -187,1583 +178,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
| 187 | opts->num_mnt_opts = 0; | 178 | opts->num_mnt_opts = 0; |
| 188 | } | 179 | } |
| 189 | 180 | ||
| 190 | /** | ||
| 191 | * struct security_operations - main security structure | ||
| 192 | * | ||
| 193 | * Security module identifier. | ||
| 194 | * | ||
| 195 | * @name: | ||
| 196 | * A string that acts as a unique identifier for the LSM with max number | ||
| 197 | * of characters = SECURITY_NAME_MAX. | ||
| 198 | * | ||
| 199 | * Security hooks for program execution operations. | ||
| 200 | * | ||
| 201 | * @bprm_set_creds: | ||
| 202 | * Save security information in the bprm->security field, typically based | ||
| 203 | * on information about the bprm->file, for later use by the apply_creds | ||
| 204 | * hook. This hook may also optionally check permissions (e.g. for | ||
| 205 | * transitions between security domains). | ||
| 206 | * This hook may be called multiple times during a single execve, e.g. for | ||
| 207 | * interpreters. The hook can tell whether it has already been called by | ||
| 208 | * checking to see if @bprm->security is non-NULL. If so, then the hook | ||
| 209 | * may decide either to retain the security information saved earlier or | ||
| 210 | * to replace it. | ||
| 211 | * @bprm contains the linux_binprm structure. | ||
| 212 | * Return 0 if the hook is successful and permission is granted. | ||
| 213 | * @bprm_check_security: | ||
| 214 | * This hook mediates the point when a search for a binary handler will | ||
| 215 | * begin. It allows a check the @bprm->security value which is set in the | ||
| 216 | * preceding set_creds call. The primary difference from set_creds is | ||
| 217 | * that the argv list and envp list are reliably available in @bprm. This | ||
| 218 | * hook may be called multiple times during a single execve; and in each | ||
| 219 | * pass set_creds is called first. | ||
| 220 | * @bprm contains the linux_binprm structure. | ||
| 221 | * Return 0 if the hook is successful and permission is granted. | ||
| 222 | * @bprm_committing_creds: | ||
| 223 | * Prepare to install the new security attributes of a process being | ||
| 224 | * transformed by an execve operation, based on the old credentials | ||
| 225 | * pointed to by @current->cred and the information set in @bprm->cred by | ||
| 226 | * the bprm_set_creds hook. @bprm points to the linux_binprm structure. | ||
| 227 | * This hook is a good place to perform state changes on the process such | ||
| 228 | * as closing open file descriptors to which access will no longer be | ||
| 229 | * granted when the attributes are changed. This is called immediately | ||
| 230 | * before commit_creds(). | ||
| 231 | * @bprm_committed_creds: | ||
| 232 | * Tidy up after the installation of the new security attributes of a | ||
| 233 | * process being transformed by an execve operation. The new credentials | ||
| 234 | * have, by this point, been set to @current->cred. @bprm points to the | ||
| 235 | * linux_binprm structure. This hook is a good place to perform state | ||
| 236 | * changes on the process such as clearing out non-inheritable signal | ||
| 237 | * state. This is called immediately after commit_creds(). | ||
| 238 | * @bprm_secureexec: | ||
| 239 | * Return a boolean value (0 or 1) indicating whether a "secure exec" | ||
| 240 | * is required. The flag is passed in the auxiliary table | ||
| 241 | * on the initial stack to the ELF interpreter to indicate whether libc | ||
| 242 | * should enable secure mode. | ||
| 243 | * @bprm contains the linux_binprm structure. | ||
| 244 | * | ||
| 245 | * Security hooks for filesystem operations. | ||
| 246 | * | ||
| 247 | * @sb_alloc_security: | ||
| 248 | * Allocate and attach a security structure to the sb->s_security field. | ||
| 249 | * The s_security field is initialized to NULL when the structure is | ||
| 250 | * allocated. | ||
| 251 | * @sb contains the super_block structure to be modified. | ||
| 252 | * Return 0 if operation was successful. | ||
| 253 | * @sb_free_security: | ||
| 254 | * Deallocate and clear the sb->s_security field. | ||
| 255 | * @sb contains the super_block structure to be modified. | ||
| 256 | * @sb_statfs: | ||
| 257 | * Check permission before obtaining filesystem statistics for the @mnt | ||
| 258 | * mountpoint. | ||
| 259 | * @dentry is a handle on the superblock for the filesystem. | ||
| 260 | * Return 0 if permission is granted. | ||
| 261 | * @sb_mount: | ||
| 262 | * Check permission before an object specified by @dev_name is mounted on | ||
| 263 | * the mount point named by @nd. For an ordinary mount, @dev_name | ||
| 264 | * identifies a device if the file system type requires a device. For a | ||
| 265 | * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a | ||
| 266 | * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the | ||
| 267 | * pathname of the object being mounted. | ||
| 268 | * @dev_name contains the name for object being mounted. | ||
| 269 | * @path contains the path for mount point object. | ||
| 270 | * @type contains the filesystem type. | ||
| 271 | * @flags contains the mount flags. | ||
| 272 | * @data contains the filesystem-specific data. | ||
| 273 | * Return 0 if permission is granted. | ||
| 274 | * @sb_copy_data: | ||
| 275 | * Allow mount option data to be copied prior to parsing by the filesystem, | ||
| 276 | * so that the security module can extract security-specific mount | ||
| 277 | * options cleanly (a filesystem may modify the data e.g. with strsep()). | ||
| 278 | * This also allows the original mount data to be stripped of security- | ||
| 279 | * specific options to avoid having to make filesystems aware of them. | ||
| 280 | * @type the type of filesystem being mounted. | ||
| 281 | * @orig the original mount data copied from userspace. | ||
| 282 | * @copy copied data which will be passed to the security module. | ||
| 283 | * Returns 0 if the copy was successful. | ||
| 284 | * @sb_remount: | ||
| 285 | * Extracts security system specific mount options and verifies no changes | ||
| 286 | * are being made to those options. | ||
| 287 | * @sb superblock being remounted | ||
| 288 | * @data contains the filesystem-specific data. | ||
| 289 | * Return 0 if permission is granted. | ||
| 290 | * @sb_umount: | ||
| 291 | * Check permission before the @mnt file system is unmounted. | ||
| 292 | * @mnt contains the mounted file system. | ||
| 293 | * @flags contains the unmount flags, e.g. MNT_FORCE. | ||
| 294 | * Return 0 if permission is granted. | ||
| 295 | * @sb_pivotroot: | ||
| 296 | * Check permission before pivoting the root filesystem. | ||
| 297 | * @old_path contains the path for the new location of the current root (put_old). | ||
| 298 | * @new_path contains the path for the new root (new_root). | ||
| 299 | * Return 0 if permission is granted. | ||
| 300 | * @sb_set_mnt_opts: | ||
| 301 | * Set the security relevant mount options used for a superblock | ||
| 302 | * @sb the superblock to set security mount options for | ||
| 303 | * @opts binary data structure containing all lsm mount data | ||
| 304 | * @sb_clone_mnt_opts: | ||
| 305 | * Copy all security options from a given superblock to another | ||
| 306 | * @oldsb old superblock which contain information to clone | ||
| 307 | * @newsb new superblock which needs filled in | ||
| 308 | * @sb_parse_opts_str: | ||
| 309 | * Parse a string of security data filling in the opts structure | ||
| 310 | * @options string containing all mount options known by the LSM | ||
| 311 | * @opts binary data structure usable by the LSM | ||
| 312 | * @dentry_init_security: | ||
| 313 | * Compute a context for a dentry as the inode is not yet available | ||
| 314 | * since NFSv4 has no label backed by an EA anyway. | ||
| 315 | * @dentry dentry to use in calculating the context. | ||
| 316 | * @mode mode used to determine resource type. | ||
| 317 | * @name name of the last path component used to create file | ||
| 318 | * @ctx pointer to place the pointer to the resulting context in. | ||
| 319 | * @ctxlen point to place the length of the resulting context. | ||
| 320 | * | ||
| 321 | * | ||
| 322 | * Security hooks for inode operations. | ||
| 323 | * | ||
| 324 | * @inode_alloc_security: | ||
| 325 | * Allocate and attach a security structure to @inode->i_security. The | ||
| 326 | * i_security field is initialized to NULL when the inode structure is | ||
| 327 | * allocated. | ||
| 328 | * @inode contains the inode structure. | ||
| 329 | * Return 0 if operation was successful. | ||
| 330 | * @inode_free_security: | ||
| 331 | * @inode contains the inode structure. | ||
| 332 | * Deallocate the inode security structure and set @inode->i_security to | ||
| 333 | * NULL. | ||
| 334 | * @inode_init_security: | ||
| 335 | * Obtain the security attribute name suffix and value to set on a newly | ||
| 336 | * created inode and set up the incore security field for the new inode. | ||
| 337 | * This hook is called by the fs code as part of the inode creation | ||
| 338 | * transaction and provides for atomic labeling of the inode, unlike | ||
| 339 | * the post_create/mkdir/... hooks called by the VFS. The hook function | ||
| 340 | * is expected to allocate the name and value via kmalloc, with the caller | ||
| 341 | * being responsible for calling kfree after using them. | ||
| 342 | * If the security module does not use security attributes or does | ||
| 343 | * not wish to put a security attribute on this particular inode, | ||
| 344 | * then it should return -EOPNOTSUPP to skip this processing. | ||
| 345 | * @inode contains the inode structure of the newly created inode. | ||
| 346 | * @dir contains the inode structure of the parent directory. | ||
| 347 | * @qstr contains the last path component of the new object | ||
| 348 | * @name will be set to the allocated name suffix (e.g. selinux). | ||
| 349 | * @value will be set to the allocated attribute value. | ||
| 350 | * @len will be set to the length of the value. | ||
| 351 | * Returns 0 if @name and @value have been successfully set, | ||
| 352 | * -EOPNOTSUPP if no security attribute is needed, or | ||
| 353 | * -ENOMEM on memory allocation failure. | ||
| 354 | * @inode_create: | ||
| 355 | * Check permission to create a regular file. | ||
| 356 | * @dir contains inode structure of the parent of the new file. | ||
| 357 | * @dentry contains the dentry structure for the file to be created. | ||
| 358 | * @mode contains the file mode of the file to be created. | ||
| 359 | * Return 0 if permission is granted. | ||
| 360 | * @inode_link: | ||
| 361 | * Check permission before creating a new hard link to a file. | ||
| 362 | * @old_dentry contains the dentry structure for an existing link to the file. | ||
| 363 | * @dir contains the inode structure of the parent directory of the new link. | ||
| 364 | * @new_dentry contains the dentry structure for the new link. | ||
| 365 | * Return 0 if permission is granted. | ||
| 366 | * @path_link: | ||
| 367 | * Check permission before creating a new hard link to a file. | ||
| 368 | * @old_dentry contains the dentry structure for an existing link | ||
| 369 | * to the file. | ||
| 370 | * @new_dir contains the path structure of the parent directory of | ||
| 371 | * the new link. | ||
| 372 | * @new_dentry contains the dentry structure for the new link. | ||
| 373 | * Return 0 if permission is granted. | ||
| 374 | * @inode_unlink: | ||
| 375 | * Check the permission to remove a hard link to a file. | ||
| 376 | * @dir contains the inode structure of parent directory of the file. | ||
| 377 | * @dentry contains the dentry structure for file to be unlinked. | ||
| 378 | * Return 0 if permission is granted. | ||
| 379 | * @path_unlink: | ||
| 380 | * Check the permission to remove a hard link to a file. | ||
| 381 | * @dir contains the path structure of parent directory of the file. | ||
| 382 | * @dentry contains the dentry structure for file to be unlinked. | ||
| 383 | * Return 0 if permission is granted. | ||
| 384 | * @inode_symlink: | ||
| 385 | * Check the permission to create a symbolic link to a file. | ||
| 386 | * @dir contains the inode structure of parent directory of the symbolic link. | ||
| 387 | * @dentry contains the dentry structure of the symbolic link. | ||
| 388 | * @old_name contains the pathname of file. | ||
| 389 | * Return 0 if permission is granted. | ||
| 390 | * @path_symlink: | ||
| 391 | * Check the permission to create a symbolic link to a file. | ||
| 392 | * @dir contains the path structure of parent directory of | ||
| 393 | * the symbolic link. | ||
| 394 | * @dentry contains the dentry structure of the symbolic link. | ||
| 395 | * @old_name contains the pathname of file. | ||
| 396 | * Return 0 if permission is granted. | ||
| 397 | * @inode_mkdir: | ||
| 398 | * Check permissions to create a new directory in the existing directory | ||
| 399 | * associated with inode structure @dir. | ||
| 400 | * @dir contains the inode structure of parent of the directory to be created. | ||
| 401 | * @dentry contains the dentry structure of new directory. | ||
| 402 | * @mode contains the mode of new directory. | ||
| 403 | * Return 0 if permission is granted. | ||
| 404 | * @path_mkdir: | ||
| 405 | * Check permissions to create a new directory in the existing directory | ||
| 406 | * associated with path structure @path. | ||
| 407 | * @dir contains the path structure of parent of the directory | ||
| 408 | * to be created. | ||
| 409 | * @dentry contains the dentry structure of new directory. | ||
| 410 | * @mode contains the mode of new directory. | ||
| 411 | * Return 0 if permission is granted. | ||
| 412 | * @inode_rmdir: | ||
| 413 | * Check the permission to remove a directory. | ||
| 414 | * @dir contains the inode structure of parent of the directory to be removed. | ||
| 415 | * @dentry contains the dentry structure of directory to be removed. | ||
| 416 | * Return 0 if permission is granted. | ||
| 417 | * @path_rmdir: | ||
| 418 | * Check the permission to remove a directory. | ||
| 419 | * @dir contains the path structure of parent of the directory to be | ||
| 420 | * removed. | ||
| 421 | * @dentry contains the dentry structure of directory to be removed. | ||
| 422 | * Return 0 if permission is granted. | ||
| 423 | * @inode_mknod: | ||
| 424 | * Check permissions when creating a special file (or a socket or a fifo | ||
| 425 | * file created via the mknod system call). Note that if mknod operation | ||
| 426 | * is being done for a regular file, then the create hook will be called | ||
| 427 | * and not this hook. | ||
| 428 | * @dir contains the inode structure of parent of the new file. | ||
| 429 | * @dentry contains the dentry structure of the new file. | ||
| 430 | * @mode contains the mode of the new file. | ||
| 431 | * @dev contains the device number. | ||
| 432 | * Return 0 if permission is granted. | ||
| 433 | * @path_mknod: | ||
| 434 | * Check permissions when creating a file. Note that this hook is called | ||
| 435 | * even if mknod operation is being done for a regular file. | ||
| 436 | * @dir contains the path structure of parent of the new file. | ||
| 437 | * @dentry contains the dentry structure of the new file. | ||
| 438 | * @mode contains the mode of the new file. | ||
| 439 | * @dev contains the undecoded device number. Use new_decode_dev() to get | ||
| 440 | * the decoded device number. | ||
| 441 | * Return 0 if permission is granted. | ||
| 442 | * @inode_rename: | ||
| 443 | * Check for permission to rename a file or directory. | ||
| 444 | * @old_dir contains the inode structure for parent of the old link. | ||
| 445 | * @old_dentry contains the dentry structure of the old link. | ||
| 446 | * @new_dir contains the inode structure for parent of the new link. | ||
| 447 | * @new_dentry contains the dentry structure of the new link. | ||
| 448 | * Return 0 if permission is granted. | ||
| 449 | * @path_rename: | ||
| 450 | * Check for permission to rename a file or directory. | ||
| 451 | * @old_dir contains the path structure for parent of the old link. | ||
| 452 | * @old_dentry contains the dentry structure of the old link. | ||
| 453 | * @new_dir contains the path structure for parent of the new link. | ||
| 454 | * @new_dentry contains the dentry structure of the new link. | ||
| 455 | * Return 0 if permission is granted. | ||
| 456 | * @path_chmod: | ||
| 457 | * Check for permission to change DAC's permission of a file or directory. | ||
| 458 | * @dentry contains the dentry structure. | ||
| 459 | * @mnt contains the vfsmnt structure. | ||
| 460 | * @mode contains DAC's mode. | ||
| 461 | * Return 0 if permission is granted. | ||
| 462 | * @path_chown: | ||
| 463 | * Check for permission to change owner/group of a file or directory. | ||
| 464 | * @path contains the path structure. | ||
| 465 | * @uid contains new owner's ID. | ||
| 466 | * @gid contains new group's ID. | ||
| 467 | * Return 0 if permission is granted. | ||
| 468 | * @path_chroot: | ||
| 469 | * Check for permission to change root directory. | ||
| 470 | * @path contains the path structure. | ||
| 471 | * Return 0 if permission is granted. | ||
| 472 | * @inode_readlink: | ||
| 473 | * Check the permission to read the symbolic link. | ||
| 474 | * @dentry contains the dentry structure for the file link. | ||
| 475 | * Return 0 if permission is granted. | ||
| 476 | * @inode_follow_link: | ||
| 477 | * Check permission to follow a symbolic link when looking up a pathname. | ||
| 478 | * @dentry contains the dentry structure for the link. | ||
| 479 | * @inode contains the inode, which itself is not stable in RCU-walk | ||
| 480 | * @rcu indicates whether we are in RCU-walk mode. | ||
| 481 | * Return 0 if permission is granted. | ||
| 482 | * @inode_permission: | ||
| 483 | * Check permission before accessing an inode. This hook is called by the | ||
| 484 | * existing Linux permission function, so a security module can use it to | ||
| 485 | * provide additional checking for existing Linux permission checks. | ||
| 486 | * Notice that this hook is called when a file is opened (as well as many | ||
| 487 | * other operations), whereas the file_security_ops permission hook is | ||
| 488 | * called when the actual read/write operations are performed. | ||
| 489 | * @inode contains the inode structure to check. | ||
| 490 | * @mask contains the permission mask. | ||
| 491 | * Return 0 if permission is granted. | ||
| 492 | * @inode_setattr: | ||
| 493 | * Check permission before setting file attributes. Note that the kernel | ||
| 494 | * call to notify_change is performed from several locations, whenever | ||
| 495 | * file attributes change (such as when a file is truncated, chown/chmod | ||
| 496 | * operations, transferring disk quotas, etc). | ||
| 497 | * @dentry contains the dentry structure for the file. | ||
| 498 | * @attr is the iattr structure containing the new file attributes. | ||
| 499 | * Return 0 if permission is granted. | ||
| 500 | * @path_truncate: | ||
| 501 | * Check permission before truncating a file. | ||
| 502 | * @path contains the path structure for the file. | ||
| 503 | * Return 0 if permission is granted. | ||
| 504 | * @inode_getattr: | ||
| 505 | * Check permission before obtaining file attributes. | ||
| 506 | * @mnt is the vfsmount where the dentry was looked up | ||
| 507 | * @dentry contains the dentry structure for the file. | ||
| 508 | * Return 0 if permission is granted. | ||
| 509 | * @inode_setxattr: | ||
| 510 | * Check permission before setting the extended attributes | ||
| 511 | * @value identified by @name for @dentry. | ||
| 512 | * Return 0 if permission is granted. | ||
| 513 | * @inode_post_setxattr: | ||
| 514 | * Update inode security field after successful setxattr operation. | ||
| 515 | * @value identified by @name for @dentry. | ||
| 516 | * @inode_getxattr: | ||
| 517 | * Check permission before obtaining the extended attributes | ||
| 518 | * identified by @name for @dentry. | ||
| 519 | * Return 0 if permission is granted. | ||
| 520 | * @inode_listxattr: | ||
| 521 | * Check permission before obtaining the list of extended attribute | ||
| 522 | * names for @dentry. | ||
| 523 | * Return 0 if permission is granted. | ||
| 524 | * @inode_removexattr: | ||
| 525 | * Check permission before removing the extended attribute | ||
| 526 | * identified by @name for @dentry. | ||
| 527 | * Return 0 if permission is granted. | ||
| 528 | * @inode_getsecurity: | ||
| 529 | * Retrieve a copy of the extended attribute representation of the | ||
| 530 | * security label associated with @name for @inode via @buffer. Note that | ||
| 531 | * @name is the remainder of the attribute name after the security prefix | ||
| 532 | * has been removed. @alloc is used to specify of the call should return a | ||
| 533 | * value via the buffer or just the value length Return size of buffer on | ||
| 534 | * success. | ||
| 535 | * @inode_setsecurity: | ||
| 536 | * Set the security label associated with @name for @inode from the | ||
| 537 | * extended attribute value @value. @size indicates the size of the | ||
| 538 | * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0. | ||
| 539 | * Note that @name is the remainder of the attribute name after the | ||
| 540 | * security. prefix has been removed. | ||
| 541 | * Return 0 on success. | ||
| 542 | * @inode_listsecurity: | ||
| 543 | * Copy the extended attribute names for the security labels | ||
| 544 | * associated with @inode into @buffer. The maximum size of @buffer | ||
| 545 | * is specified by @buffer_size. @buffer may be NULL to request | ||
| 546 | * the size of the buffer required. | ||
| 547 | * Returns number of bytes used/required on success. | ||
| 548 | * @inode_need_killpriv: | ||
| 549 | * Called when an inode has been changed. | ||
| 550 | * @dentry is the dentry being changed. | ||
| 551 | * Return <0 on error to abort the inode change operation. | ||
| 552 | * Return 0 if inode_killpriv does not need to be called. | ||
| 553 | * Return >0 if inode_killpriv does need to be called. | ||
| 554 | * @inode_killpriv: | ||
| 555 | * The setuid bit is being removed. Remove similar security labels. | ||
| 556 | * Called with the dentry->d_inode->i_mutex held. | ||
| 557 | * @dentry is the dentry being changed. | ||
| 558 | * Return 0 on success. If error is returned, then the operation | ||
| 559 | * causing setuid bit removal is failed. | ||
| 560 | * @inode_getsecid: | ||
| 561 | * Get the secid associated with the node. | ||
| 562 | * @inode contains a pointer to the inode. | ||
| 563 | * @secid contains a pointer to the location where result will be saved. | ||
| 564 | * In case of failure, @secid will be set to zero. | ||
| 565 | * | ||
| 566 | * Security hooks for file operations | ||
| 567 | * | ||
| 568 | * @file_permission: | ||
| 569 | * Check file permissions before accessing an open file. This hook is | ||
| 570 | * called by various operations that read or write files. A security | ||
| 571 | * module can use this hook to perform additional checking on these | ||
| 572 | * operations, e.g. to revalidate permissions on use to support privilege | ||
| 573 | * bracketing or policy changes. Notice that this hook is used when the | ||
| 574 | * actual read/write operations are performed, whereas the | ||
| 575 | * inode_security_ops hook is called when a file is opened (as well as | ||
| 576 | * many other operations). | ||
| 577 | * Caveat: Although this hook can be used to revalidate permissions for | ||
| 578 | * various system call operations that read or write files, it does not | ||
| 579 | * address the revalidation of permissions for memory-mapped files. | ||
| 580 | * Security modules must handle this separately if they need such | ||
| 581 | * revalidation. | ||
| 582 | * @file contains the file structure being accessed. | ||
| 583 | * @mask contains the requested permissions. | ||
| 584 | * Return 0 if permission is granted. | ||
| 585 | * @file_alloc_security: | ||
| 586 | * Allocate and attach a security structure to the file->f_security field. | ||
| 587 | * The security field is initialized to NULL when the structure is first | ||
| 588 | * created. | ||
| 589 | * @file contains the file structure to secure. | ||
| 590 | * Return 0 if the hook is successful and permission is granted. | ||
| 591 | * @file_free_security: | ||
| 592 | * Deallocate and free any security structures stored in file->f_security. | ||
| 593 | * @file contains the file structure being modified. | ||
| 594 | * @file_ioctl: | ||
| 595 | * @file contains the file structure. | ||
| 596 | * @cmd contains the operation to perform. | ||
| 597 | * @arg contains the operational arguments. | ||
| 598 | * Check permission for an ioctl operation on @file. Note that @arg | ||
| 599 | * sometimes represents a user space pointer; in other cases, it may be a | ||
| 600 | * simple integer value. When @arg represents a user space pointer, it | ||
| 601 | * should never be used by the security module. | ||
| 602 | * Return 0 if permission is granted. | ||
| 603 | * @mmap_addr : | ||
| 604 | * Check permissions for a mmap operation at @addr. | ||
| 605 | * @addr contains virtual address that will be used for the operation. | ||
| 606 | * Return 0 if permission is granted. | ||
| 607 | * @mmap_file : | ||
| 608 | * Check permissions for a mmap operation. The @file may be NULL, e.g. | ||
| 609 | * if mapping anonymous memory. | ||
| 610 | * @file contains the file structure for file to map (may be NULL). | ||
| 611 | * @reqprot contains the protection requested by the application. | ||
| 612 | * @prot contains the protection that will be applied by the kernel. | ||
| 613 | * @flags contains the operational flags. | ||
| 614 | * Return 0 if permission is granted. | ||
| 615 | * @file_mprotect: | ||
| 616 | * Check permissions before changing memory access permissions. | ||
| 617 | * @vma contains the memory region to modify. | ||
| 618 | * @reqprot contains the protection requested by the application. | ||
| 619 | * @prot contains the protection that will be applied by the kernel. | ||
| 620 | * Return 0 if permission is granted. | ||
| 621 | * @file_lock: | ||
| 622 | * Check permission before performing file locking operations. | ||
| 623 | * Note: this hook mediates both flock and fcntl style locks. | ||
| 624 | * @file contains the file structure. | ||
| 625 | * @cmd contains the posix-translated lock operation to perform | ||
| 626 | * (e.g. F_RDLCK, F_WRLCK). | ||
| 627 | * Return 0 if permission is granted. | ||
| 628 | * @file_fcntl: | ||
| 629 | * Check permission before allowing the file operation specified by @cmd | ||
| 630 | * from being performed on the file @file. Note that @arg sometimes | ||
| 631 | * represents a user space pointer; in other cases, it may be a simple | ||
| 632 | * integer value. When @arg represents a user space pointer, it should | ||
| 633 | * never be used by the security module. | ||
| 634 | * @file contains the file structure. | ||
| 635 | * @cmd contains the operation to be performed. | ||
| 636 | * @arg contains the operational arguments. | ||
| 637 | * Return 0 if permission is granted. | ||
| 638 | * @file_set_fowner: | ||
| 639 | * Save owner security information (typically from current->security) in | ||
| 640 | * file->f_security for later use by the send_sigiotask hook. | ||
| 641 | * @file contains the file structure to update. | ||
| 642 | * Return 0 on success. | ||
| 643 | * @file_send_sigiotask: | ||
| 644 | * Check permission for the file owner @fown to send SIGIO or SIGURG to the | ||
| 645 | * process @tsk. Note that this hook is sometimes called from interrupt. | ||
| 646 | * Note that the fown_struct, @fown, is never outside the context of a | ||
| 647 | * struct file, so the file structure (and associated security information) | ||
| 648 | * can always be obtained: | ||
| 649 | * container_of(fown, struct file, f_owner) | ||
| 650 | * @tsk contains the structure of task receiving signal. | ||
| 651 | * @fown contains the file owner information. | ||
| 652 | * @sig is the signal that will be sent. When 0, kernel sends SIGIO. | ||
| 653 | * Return 0 if permission is granted. | ||
| 654 | * @file_receive: | ||
| 655 | * This hook allows security modules to control the ability of a process | ||
| 656 | * to receive an open file descriptor via socket IPC. | ||
| 657 | * @file contains the file structure being received. | ||
| 658 | * Return 0 if permission is granted. | ||
| 659 | * @file_open | ||
| 660 | * Save open-time permission checking state for later use upon | ||
| 661 | * file_permission, and recheck access if anything has changed | ||
| 662 | * since inode_permission. | ||
| 663 | * | ||
| 664 | * Security hooks for task operations. | ||
| 665 | * | ||
| 666 | * @task_create: | ||
| 667 | * Check permission before creating a child process. See the clone(2) | ||
| 668 | * manual page for definitions of the @clone_flags. | ||
| 669 | * @clone_flags contains the flags indicating what should be shared. | ||
| 670 | * Return 0 if permission is granted. | ||
| 671 | * @task_free: | ||
| 672 | * @task task being freed | ||
| 673 | * Handle release of task-related resources. (Note that this can be called | ||
| 674 | * from interrupt context.) | ||
| 675 | * @cred_alloc_blank: | ||
| 676 | * @cred points to the credentials. | ||
| 677 | * @gfp indicates the atomicity of any memory allocations. | ||
| 678 | * Only allocate sufficient memory and attach to @cred such that | ||
| 679 | * cred_transfer() will not get ENOMEM. | ||
| 680 | * @cred_free: | ||
| 681 | * @cred points to the credentials. | ||
| 682 | * Deallocate and clear the cred->security field in a set of credentials. | ||
| 683 | * @cred_prepare: | ||
| 684 | * @new points to the new credentials. | ||
| 685 | * @old points to the original credentials. | ||
| 686 | * @gfp indicates the atomicity of any memory allocations. | ||
| 687 | * Prepare a new set of credentials by copying the data from the old set. | ||
| 688 | * @cred_transfer: | ||
| 689 | * @new points to the new credentials. | ||
| 690 | * @old points to the original credentials. | ||
| 691 | * Transfer data from original creds to new creds | ||
| 692 | * @kernel_act_as: | ||
| 693 | * Set the credentials for a kernel service to act as (subjective context). | ||
| 694 | * @new points to the credentials to be modified. | ||
| 695 | * @secid specifies the security ID to be set | ||
| 696 | * The current task must be the one that nominated @secid. | ||
| 697 | * Return 0 if successful. | ||
| 698 | * @kernel_create_files_as: | ||
| 699 | * Set the file creation context in a set of credentials to be the same as | ||
| 700 | * the objective context of the specified inode. | ||
| 701 | * @new points to the credentials to be modified. | ||
| 702 | * @inode points to the inode to use as a reference. | ||
| 703 | * The current task must be the one that nominated @inode. | ||
| 704 | * Return 0 if successful. | ||
| 705 | * @kernel_fw_from_file: | ||
| 706 | * Load firmware from userspace (not called for built-in firmware). | ||
| 707 | * @file contains the file structure pointing to the file containing | ||
| 708 | * the firmware to load. This argument will be NULL if the firmware | ||
| 709 | * was loaded via the uevent-triggered blob-based interface exposed | ||
| 710 | * by CONFIG_FW_LOADER_USER_HELPER. | ||
| 711 | * @buf pointer to buffer containing firmware contents. | ||
| 712 | * @size length of the firmware contents. | ||
| 713 | * Return 0 if permission is granted. | ||
| 714 | * @kernel_module_request: | ||
| 715 | * Ability to trigger the kernel to automatically upcall to userspace for | ||
| 716 | * userspace to load a kernel module with the given name. | ||
| 717 | * @kmod_name name of the module requested by the kernel | ||
| 718 | * Return 0 if successful. | ||
| 719 | * @kernel_module_from_file: | ||
| 720 | * Load a kernel module from userspace. | ||
| 721 | * @file contains the file structure pointing to the file containing | ||
| 722 | * the kernel module to load. If the module is being loaded from a blob, | ||
| 723 | * this argument will be NULL. | ||
| 724 | * Return 0 if permission is granted. | ||
| 725 | * @task_fix_setuid: | ||
| 726 | * Update the module's state after setting one or more of the user | ||
| 727 | * identity attributes of the current process. The @flags parameter | ||
| 728 | * indicates which of the set*uid system calls invoked this hook. If | ||
| 729 | * @new is the set of credentials that will be installed. Modifications | ||
| 730 | * should be made to this rather than to @current->cred. | ||
| 731 | * @old is the set of credentials that are being replaces | ||
| 732 | * @flags contains one of the LSM_SETID_* values. | ||
| 733 | * Return 0 on success. | ||
| 734 | * @task_setpgid: | ||
| 735 | * Check permission before setting the process group identifier of the | ||
| 736 | * process @p to @pgid. | ||
| 737 | * @p contains the task_struct for process being modified. | ||
| 738 | * @pgid contains the new pgid. | ||
| 739 | * Return 0 if permission is granted. | ||
| 740 | * @task_getpgid: | ||
| 741 | * Check permission before getting the process group identifier of the | ||
| 742 | * process @p. | ||
| 743 | * @p contains the task_struct for the process. | ||
| 744 | * Return 0 if permission is granted. | ||
| 745 | * @task_getsid: | ||
| 746 | * Check permission before getting the session identifier of the process | ||
| 747 | * @p. | ||
| 748 | * @p contains the task_struct for the process. | ||
| 749 | * Return 0 if permission is granted. | ||
| 750 | * @task_getsecid: | ||
| 751 | * Retrieve the security identifier of the process @p. | ||
| 752 | * @p contains the task_struct for the process and place is into @secid. | ||
| 753 | * In case of failure, @secid will be set to zero. | ||
| 754 | * | ||
| 755 | * @task_setnice: | ||
| 756 | * Check permission before setting the nice value of @p to @nice. | ||
| 757 | * @p contains the task_struct of process. | ||
| 758 | * @nice contains the new nice value. | ||
| 759 | * Return 0 if permission is granted. | ||
| 760 | * @task_setioprio | ||
| 761 | * Check permission before setting the ioprio value of @p to @ioprio. | ||
| 762 | * @p contains the task_struct of process. | ||
| 763 | * @ioprio contains the new ioprio value | ||
| 764 | * Return 0 if permission is granted. | ||
| 765 | * @task_getioprio | ||
| 766 | * Check permission before getting the ioprio value of @p. | ||
| 767 | * @p contains the task_struct of process. | ||
| 768 | * Return 0 if permission is granted. | ||
| 769 | * @task_setrlimit: | ||
| 770 | * Check permission before setting the resource limits of the current | ||
| 771 | * process for @resource to @new_rlim. The old resource limit values can | ||
| 772 | * be examined by dereferencing (current->signal->rlim + resource). | ||
| 773 | * @resource contains the resource whose limit is being set. | ||
| 774 | * @new_rlim contains the new limits for @resource. | ||
| 775 | * Return 0 if permission is granted. | ||
| 776 | * @task_setscheduler: | ||
| 777 | * Check permission before setting scheduling policy and/or parameters of | ||
| 778 | * process @p based on @policy and @lp. | ||
| 779 | * @p contains the task_struct for process. | ||
| 780 | * @policy contains the scheduling policy. | ||
| 781 | * @lp contains the scheduling parameters. | ||
| 782 | * Return 0 if permission is granted. | ||
| 783 | * @task_getscheduler: | ||
| 784 | * Check permission before obtaining scheduling information for process | ||
| 785 | * @p. | ||
| 786 | * @p contains the task_struct for process. | ||
| 787 | * Return 0 if permission is granted. | ||
| 788 | * @task_movememory | ||
| 789 | * Check permission before moving memory owned by process @p. | ||
| 790 | * @p contains the task_struct for process. | ||
| 791 | * Return 0 if permission is granted. | ||
| 792 | * @task_kill: | ||
| 793 | * Check permission before sending signal @sig to @p. @info can be NULL, | ||
| 794 | * the constant 1, or a pointer to a siginfo structure. If @info is 1 or | ||
| 795 | * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming | ||
| 796 | * from the kernel and should typically be permitted. | ||
| 797 | * SIGIO signals are handled separately by the send_sigiotask hook in | ||
| 798 | * file_security_ops. | ||
| 799 | * @p contains the task_struct for process. | ||
| 800 | * @info contains the signal information. | ||
| 801 | * @sig contains the signal value. | ||
| 802 | * @secid contains the sid of the process where the signal originated | ||
| 803 | * Return 0 if permission is granted. | ||
| 804 | * @task_wait: | ||
| 805 | * Check permission before allowing a process to reap a child process @p | ||
| 806 | * and collect its status information. | ||
| 807 | * @p contains the task_struct for process. | ||
| 808 | * Return 0 if permission is granted. | ||
| 809 | * @task_prctl: | ||
| 810 | * Check permission before performing a process control operation on the | ||
| 811 | * current process. | ||
| 812 | * @option contains the operation. | ||
| 813 | * @arg2 contains a argument. | ||
| 814 | * @arg3 contains a argument. | ||
| 815 | * @arg4 contains a argument. | ||
| 816 | * @arg5 contains a argument. | ||
| 817 | * Return -ENOSYS if no-one wanted to handle this op, any other value to | ||
| 818 | * cause prctl() to return immediately with that value. | ||
| 819 | * @task_to_inode: | ||
| 820 | * Set the security attributes for an inode based on an associated task's | ||
| 821 | * security attributes, e.g. for /proc/pid inodes. | ||
| 822 | * @p contains the task_struct for the task. | ||
| 823 | * @inode contains the inode structure for the inode. | ||
| 824 | * | ||
| 825 | * Security hooks for Netlink messaging. | ||
| 826 | * | ||
| 827 | * @netlink_send: | ||
| 828 | * Save security information for a netlink message so that permission | ||
| 829 | * checking can be performed when the message is processed. The security | ||
| 830 | * information can be saved using the eff_cap field of the | ||
| 831 | * netlink_skb_parms structure. Also may be used to provide fine | ||
| 832 | * grained control over message transmission. | ||
| 833 | * @sk associated sock of task sending the message. | ||
| 834 | * @skb contains the sk_buff structure for the netlink message. | ||
| 835 | * Return 0 if the information was successfully saved and message | ||
| 836 | * is allowed to be transmitted. | ||
| 837 | * | ||
| 838 | * Security hooks for Unix domain networking. | ||
| 839 | * | ||
| 840 | * @unix_stream_connect: | ||
| 841 | * Check permissions before establishing a Unix domain stream connection | ||
| 842 | * between @sock and @other. | ||
| 843 | * @sock contains the sock structure. | ||
| 844 | * @other contains the peer sock structure. | ||
| 845 | * @newsk contains the new sock structure. | ||
| 846 | * Return 0 if permission is granted. | ||
| 847 | * @unix_may_send: | ||
| 848 | * Check permissions before connecting or sending datagrams from @sock to | ||
| 849 | * @other. | ||
| 850 | * @sock contains the socket structure. | ||
| 851 | * @other contains the peer socket structure. | ||
| 852 | * Return 0 if permission is granted. | ||
| 853 | * | ||
| 854 | * The @unix_stream_connect and @unix_may_send hooks were necessary because | ||
| 855 | * Linux provides an alternative to the conventional file name space for Unix | ||
| 856 | * domain sockets. Whereas binding and connecting to sockets in the file name | ||
| 857 | * space is mediated by the typical file permissions (and caught by the mknod | ||
| 858 | * and permission hooks in inode_security_ops), binding and connecting to | ||
| 859 | * sockets in the abstract name space is completely unmediated. Sufficient | ||
| 860 | * control of Unix domain sockets in the abstract name space isn't possible | ||
| 861 | * using only the socket layer hooks, since we need to know the actual target | ||
| 862 | * socket, which is not looked up until we are inside the af_unix code. | ||
| 863 | * | ||
| 864 | * Security hooks for socket operations. | ||
| 865 | * | ||
| 866 | * @socket_create: | ||
| 867 | * Check permissions prior to creating a new socket. | ||
| 868 | * @family contains the requested protocol family. | ||
| 869 | * @type contains the requested communications type. | ||
| 870 | * @protocol contains the requested protocol. | ||
| 871 | * @kern set to 1 if a kernel socket. | ||
| 872 | * Return 0 if permission is granted. | ||
| 873 | * @socket_post_create: | ||
| 874 | * This hook allows a module to update or allocate a per-socket security | ||
| 875 | * structure. Note that the security field was not added directly to the | ||
| 876 | * socket structure, but rather, the socket security information is stored | ||
| 877 | * in the associated inode. Typically, the inode alloc_security hook will | ||
| 878 | * allocate and and attach security information to | ||
| 879 | * sock->inode->i_security. This hook may be used to update the | ||
| 880 | * sock->inode->i_security field with additional information that wasn't | ||
| 881 | * available when the inode was allocated. | ||
| 882 | * @sock contains the newly created socket structure. | ||
| 883 | * @family contains the requested protocol family. | ||
| 884 | * @type contains the requested communications type. | ||
| 885 | * @protocol contains the requested protocol. | ||
| 886 | * @kern set to 1 if a kernel socket. | ||
| 887 | * @socket_bind: | ||
| 888 | * Check permission before socket protocol layer bind operation is | ||
| 889 | * performed and the socket @sock is bound to the address specified in the | ||
| 890 | * @address parameter. | ||
| 891 | * @sock contains the socket structure. | ||
| 892 | * @address contains the address to bind to. | ||
| 893 | * @addrlen contains the length of address. | ||
| 894 | * Return 0 if permission is granted. | ||
| 895 | * @socket_connect: | ||
| 896 | * Check permission before socket protocol layer connect operation | ||
| 897 | * attempts to connect socket @sock to a remote address, @address. | ||
| 898 | * @sock contains the socket structure. | ||
| 899 | * @address contains the address of remote endpoint. | ||
| 900 | * @addrlen contains the length of address. | ||
| 901 | * Return 0 if permission is granted. | ||
| 902 | * @socket_listen: | ||
| 903 | * Check permission before socket protocol layer listen operation. | ||
| 904 | * @sock contains the socket structure. | ||
| 905 | * @backlog contains the maximum length for the pending connection queue. | ||
| 906 | * Return 0 if permission is granted. | ||
| 907 | * @socket_accept: | ||
| 908 | * Check permission before accepting a new connection. Note that the new | ||
| 909 | * socket, @newsock, has been created and some information copied to it, | ||
| 910 | * but the accept operation has not actually been performed. | ||
| 911 | * @sock contains the listening socket structure. | ||
| 912 | * @newsock contains the newly created server socket for connection. | ||
| 913 | * Return 0 if permission is granted. | ||
| 914 | * @socket_sendmsg: | ||
| 915 | * Check permission before transmitting a message to another socket. | ||
| 916 | * @sock contains the socket structure. | ||
| 917 | * @msg contains the message to be transmitted. | ||
| 918 | * @size contains the size of message. | ||
| 919 | * Return 0 if permission is granted. | ||
| 920 | * @socket_recvmsg: | ||
| 921 | * Check permission before receiving a message from a socket. | ||
| 922 | * @sock contains the socket structure. | ||
| 923 | * @msg contains the message structure. | ||
| 924 | * @size contains the size of message structure. | ||
| 925 | * @flags contains the operational flags. | ||
| 926 | * Return 0 if permission is granted. | ||
| 927 | * @socket_getsockname: | ||
| 928 | * Check permission before the local address (name) of the socket object | ||
| 929 | * @sock is retrieved. | ||
| 930 | * @sock contains the socket structure. | ||
| 931 | * Return 0 if permission is granted. | ||
| 932 | * @socket_getpeername: | ||
| 933 | * Check permission before the remote address (name) of a socket object | ||
| 934 | * @sock is retrieved. | ||
| 935 | * @sock contains the socket structure. | ||
| 936 | * Return 0 if permission is granted. | ||
| 937 | * @socket_getsockopt: | ||
| 938 | * Check permissions before retrieving the options associated with socket | ||
| 939 | * @sock. | ||
| 940 | * @sock contains the socket structure. | ||
| 941 | * @level contains the protocol level to retrieve option from. | ||
| 942 | * @optname contains the name of option to retrieve. | ||
| 943 | * Return 0 if permission is granted. | ||
| 944 | * @socket_setsockopt: | ||
| 945 | * Check permissions before setting the options associated with socket | ||
| 946 | * @sock. | ||
| 947 | * @sock contains the socket structure. | ||
| 948 | * @level contains the protocol level to set options for. | ||
| 949 | * @optname contains the name of the option to set. | ||
| 950 | * Return 0 if permission is granted. | ||
| 951 | * @socket_shutdown: | ||
| 952 | * Checks permission before all or part of a connection on the socket | ||
| 953 | * @sock is shut down. | ||
| 954 | * @sock contains the socket structure. | ||
| 955 | * @how contains the flag indicating how future sends and receives are handled. | ||
| 956 | * Return 0 if permission is granted. | ||
| 957 | * @socket_sock_rcv_skb: | ||
| 958 | * Check permissions on incoming network packets. This hook is distinct | ||
| 959 | * from Netfilter's IP input hooks since it is the first time that the | ||
| 960 | * incoming sk_buff @skb has been associated with a particular socket, @sk. | ||
| 961 | * Must not sleep inside this hook because some callers hold spinlocks. | ||
| 962 | * @sk contains the sock (not socket) associated with the incoming sk_buff. | ||
| 963 | * @skb contains the incoming network data. | ||
| 964 | * @socket_getpeersec_stream: | ||
| 965 | * This hook allows the security module to provide peer socket security | ||
| 966 | * state for unix or connected tcp sockets to userspace via getsockopt | ||
| 967 | * SO_GETPEERSEC. For tcp sockets this can be meaningful if the | ||
| 968 | * socket is associated with an ipsec SA. | ||
| 969 | * @sock is the local socket. | ||
| 970 | * @optval userspace memory where the security state is to be copied. | ||
| 971 | * @optlen userspace int where the module should copy the actual length | ||
| 972 | * of the security state. | ||
| 973 | * @len as input is the maximum length to copy to userspace provided | ||
| 974 | * by the caller. | ||
| 975 | * Return 0 if all is well, otherwise, typical getsockopt return | ||
| 976 | * values. | ||
| 977 | * @socket_getpeersec_dgram: | ||
| 978 | * This hook allows the security module to provide peer socket security | ||
| 979 | * state for udp sockets on a per-packet basis to userspace via | ||
| 980 | * getsockopt SO_GETPEERSEC. The application must first have indicated | ||
| 981 | * the IP_PASSSEC option via getsockopt. It can then retrieve the | ||
| 982 | * security state returned by this hook for a packet via the SCM_SECURITY | ||
| 983 | * ancillary message type. | ||
| 984 | * @skb is the skbuff for the packet being queried | ||
| 985 | * @secdata is a pointer to a buffer in which to copy the security data | ||
| 986 | * @seclen is the maximum length for @secdata | ||
| 987 | * Return 0 on success, error on failure. | ||
| 988 | * @sk_alloc_security: | ||
| 989 | * Allocate and attach a security structure to the sk->sk_security field, | ||
| 990 | * which is used to copy security attributes between local stream sockets. | ||
| 991 | * @sk_free_security: | ||
| 992 | * Deallocate security structure. | ||
| 993 | * @sk_clone_security: | ||
| 994 | * Clone/copy security structure. | ||
| 995 | * @sk_getsecid: | ||
| 996 | * Retrieve the LSM-specific secid for the sock to enable caching of network | ||
| 997 | * authorizations. | ||
| 998 | * @sock_graft: | ||
| 999 | * Sets the socket's isec sid to the sock's sid. | ||
| 1000 | * @inet_conn_request: | ||
| 1001 | * Sets the openreq's sid to socket's sid with MLS portion taken from peer sid. | ||
| 1002 | * @inet_csk_clone: | ||
| 1003 | * Sets the new child socket's sid to the openreq sid. | ||
| 1004 | * @inet_conn_established: | ||
| 1005 | * Sets the connection's peersid to the secmark on skb. | ||
| 1006 | * @secmark_relabel_packet: | ||
| 1007 | * check if the process should be allowed to relabel packets to the given secid | ||
| 1008 | * @security_secmark_refcount_inc | ||
| 1009 | * tells the LSM to increment the number of secmark labeling rules loaded | ||
| 1010 | * @security_secmark_refcount_dec | ||
| 1011 | * tells the LSM to decrement the number of secmark labeling rules loaded | ||
| 1012 | * @req_classify_flow: | ||
| 1013 | * Sets the flow's sid to the openreq sid. | ||
| 1014 | * @tun_dev_alloc_security: | ||
| 1015 | * This hook allows a module to allocate a security structure for a TUN | ||
| 1016 | * device. | ||
| 1017 | * @security pointer to a security structure pointer. | ||
| 1018 | * Returns a zero on success, negative values on failure. | ||
| 1019 | * @tun_dev_free_security: | ||
| 1020 | * This hook allows a module to free the security structure for a TUN | ||
| 1021 | * device. | ||
| 1022 | * @security pointer to the TUN device's security structure | ||
| 1023 | * @tun_dev_create: | ||
| 1024 | * Check permissions prior to creating a new TUN device. | ||
| 1025 | * @tun_dev_attach_queue: | ||
| 1026 | * Check permissions prior to attaching to a TUN device queue. | ||
| 1027 | * @security pointer to the TUN device's security structure. | ||
| 1028 | * @tun_dev_attach: | ||
| 1029 | * This hook can be used by the module to update any security state | ||
| 1030 | * associated with the TUN device's sock structure. | ||
| 1031 | * @sk contains the existing sock structure. | ||
| 1032 | * @security pointer to the TUN device's security structure. | ||
| 1033 | * @tun_dev_open: | ||
| 1034 | * This hook can be used by the module to update any security state | ||
| 1035 | * associated with the TUN device's security structure. | ||
| 1036 | * @security pointer to the TUN devices's security structure. | ||
| 1037 | * @skb_owned_by: | ||
| 1038 | * This hook sets the packet's owning sock. | ||
| 1039 | * @skb is the packet. | ||
| 1040 | * @sk the sock which owns the packet. | ||
| 1041 | * | ||
| 1042 | * Security hooks for XFRM operations. | ||
| 1043 | * | ||
| 1044 | * @xfrm_policy_alloc_security: | ||
| 1045 | * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy | ||
| 1046 | * Database used by the XFRM system. | ||
| 1047 | * @sec_ctx contains the security context information being provided by | ||
| 1048 | * the user-level policy update program (e.g., setkey). | ||
| 1049 | * Allocate a security structure to the xp->security field; the security | ||
| 1050 | * field is initialized to NULL when the xfrm_policy is allocated. | ||
| 1051 | * Return 0 if operation was successful (memory to allocate, legal context) | ||
| 1052 | * @gfp is to specify the context for the allocation | ||
| 1053 | * @xfrm_policy_clone_security: | ||
| 1054 | * @old_ctx contains an existing xfrm_sec_ctx. | ||
| 1055 | * @new_ctxp contains a new xfrm_sec_ctx being cloned from old. | ||
| 1056 | * Allocate a security structure in new_ctxp that contains the | ||
| 1057 | * information from the old_ctx structure. | ||
| 1058 | * Return 0 if operation was successful (memory to allocate). | ||
| 1059 | * @xfrm_policy_free_security: | ||
| 1060 | * @ctx contains the xfrm_sec_ctx | ||
| 1061 | * Deallocate xp->security. | ||
| 1062 | * @xfrm_policy_delete_security: | ||
| 1063 | * @ctx contains the xfrm_sec_ctx. | ||
| 1064 | * Authorize deletion of xp->security. | ||
| 1065 | * @xfrm_state_alloc: | ||
| 1066 | * @x contains the xfrm_state being added to the Security Association | ||
| 1067 | * Database by the XFRM system. | ||
| 1068 | * @sec_ctx contains the security context information being provided by | ||
| 1069 | * the user-level SA generation program (e.g., setkey or racoon). | ||
| 1070 | * Allocate a security structure to the x->security field; the security | ||
| 1071 | * field is initialized to NULL when the xfrm_state is allocated. Set the | ||
| 1072 | * context to correspond to sec_ctx. Return 0 if operation was successful | ||
| 1073 | * (memory to allocate, legal context). | ||
| 1074 | * @xfrm_state_alloc_acquire: | ||
| 1075 | * @x contains the xfrm_state being added to the Security Association | ||
| 1076 | * Database by the XFRM system. | ||
| 1077 | * @polsec contains the policy's security context. | ||
| 1078 | * @secid contains the secid from which to take the mls portion of the | ||
| 1079 | * context. | ||
| 1080 | * Allocate a security structure to the x->security field; the security | ||
| 1081 | * field is initialized to NULL when the xfrm_state is allocated. Set the | ||
| 1082 | * context to correspond to secid. Return 0 if operation was successful | ||
| 1083 | * (memory to allocate, legal context). | ||
| 1084 | * @xfrm_state_free_security: | ||
| 1085 | * @x contains the xfrm_state. | ||
| 1086 | * Deallocate x->security. | ||
| 1087 | * @xfrm_state_delete_security: | ||
| 1088 | * @x contains the xfrm_state. | ||
| 1089 | * Authorize deletion of x->security. | ||
| 1090 | * @xfrm_policy_lookup: | ||
| 1091 | * @ctx contains the xfrm_sec_ctx for which the access control is being | ||
| 1092 | * checked. | ||
| 1093 | * @fl_secid contains the flow security label that is used to authorize | ||
| 1094 | * access to the policy xp. | ||
| 1095 | * @dir contains the direction of the flow (input or output). | ||
| 1096 | * Check permission when a flow selects a xfrm_policy for processing | ||
| 1097 | * XFRMs on a packet. The hook is called when selecting either a | ||
| 1098 | * per-socket policy or a generic xfrm policy. | ||
| 1099 | * Return 0 if permission is granted, -ESRCH otherwise, or -errno | ||
| 1100 | * on other errors. | ||
| 1101 | * @xfrm_state_pol_flow_match: | ||
| 1102 | * @x contains the state to match. | ||
| 1103 | * @xp contains the policy to check for a match. | ||
| 1104 | * @fl contains the flow to check for a match. | ||
| 1105 | * Return 1 if there is a match. | ||
| 1106 | * @xfrm_decode_session: | ||
| 1107 | * @skb points to skb to decode. | ||
| 1108 | * @secid points to the flow key secid to set. | ||
| 1109 | * @ckall says if all xfrms used should be checked for same secid. | ||
| 1110 | * Return 0 if ckall is zero or all xfrms used have the same secid. | ||
| 1111 | * | ||
| 1112 | * Security hooks affecting all Key Management operations | ||
| 1113 | * | ||
| 1114 | * @key_alloc: | ||
| 1115 | * Permit allocation of a key and assign security data. Note that key does | ||
| 1116 | * not have a serial number assigned at this point. | ||
| 1117 | * @key points to the key. | ||
| 1118 | * @flags is the allocation flags | ||
| 1119 | * Return 0 if permission is granted, -ve error otherwise. | ||
| 1120 | * @key_free: | ||
| 1121 | * Notification of destruction; free security data. | ||
| 1122 | * @key points to the key. | ||
| 1123 | * No return value. | ||
| 1124 | * @key_permission: | ||
| 1125 | * See whether a specific operational right is granted to a process on a | ||
| 1126 | * key. | ||
| 1127 | * @key_ref refers to the key (key pointer + possession attribute bit). | ||
| 1128 | * @cred points to the credentials to provide the context against which to | ||
| 1129 | * evaluate the security data on the key. | ||
| 1130 | * @perm describes the combination of permissions required of this key. | ||
| 1131 | * Return 0 if permission is granted, -ve error otherwise. | ||
| 1132 | * @key_getsecurity: | ||
| 1133 | * Get a textual representation of the security context attached to a key | ||
| 1134 | * for the purposes of honouring KEYCTL_GETSECURITY. This function | ||
| 1135 | * allocates the storage for the NUL-terminated string and the caller | ||
| 1136 | * should free it. | ||
| 1137 | * @key points to the key to be queried. | ||
| 1138 | * @_buffer points to a pointer that should be set to point to the | ||
| 1139 | * resulting string (if no label or an error occurs). | ||
| 1140 | * Return the length of the string (including terminating NUL) or -ve if | ||
| 1141 | * an error. | ||
| 1142 | * May also return 0 (and a NULL buffer pointer) if there is no label. | ||
| 1143 | * | ||
| 1144 | * Security hooks affecting all System V IPC operations. | ||
| 1145 | * | ||
| 1146 | * @ipc_permission: | ||
| 1147 | * Check permissions for access to IPC | ||
| 1148 | * @ipcp contains the kernel IPC permission structure | ||
| 1149 | * @flag contains the desired (requested) permission set | ||
| 1150 | * Return 0 if permission is granted. | ||
| 1151 | * @ipc_getsecid: | ||
| 1152 | * Get the secid associated with the ipc object. | ||
| 1153 | * @ipcp contains the kernel IPC permission structure. | ||
| 1154 | * @secid contains a pointer to the location where result will be saved. | ||
| 1155 | * In case of failure, @secid will be set to zero. | ||
| 1156 | * | ||
| 1157 | * Security hooks for individual messages held in System V IPC message queues | ||
| 1158 | * @msg_msg_alloc_security: | ||
| 1159 | * Allocate and attach a security structure to the msg->security field. | ||
| 1160 | * The security field is initialized to NULL when the structure is first | ||
| 1161 | * created. | ||
| 1162 | * @msg contains the message structure to be modified. | ||
| 1163 | * Return 0 if operation was successful and permission is granted. | ||
| 1164 | * @msg_msg_free_security: | ||
| 1165 | * Deallocate the security structure for this message. | ||
| 1166 | * @msg contains the message structure to be modified. | ||
| 1167 | * | ||
| 1168 | * Security hooks for System V IPC Message Queues | ||
| 1169 | * | ||
| 1170 | * @msg_queue_alloc_security: | ||
| 1171 | * Allocate and attach a security structure to the | ||
| 1172 | * msq->q_perm.security field. The security field is initialized to | ||
| 1173 | * NULL when the structure is first created. | ||
| 1174 | * @msq contains the message queue structure to be modified. | ||
| 1175 | * Return 0 if operation was successful and permission is granted. | ||
| 1176 | * @msg_queue_free_security: | ||
| 1177 | * Deallocate security structure for this message queue. | ||
| 1178 | * @msq contains the message queue structure to be modified. | ||
| 1179 | * @msg_queue_associate: | ||
| 1180 | * Check permission when a message queue is requested through the | ||
| 1181 | * msgget system call. This hook is only called when returning the | ||
| 1182 | * message queue identifier for an existing message queue, not when a | ||
| 1183 | * new message queue is created. | ||
| 1184 | * @msq contains the message queue to act upon. | ||
| 1185 | * @msqflg contains the operation control flags. | ||
| 1186 | * Return 0 if permission is granted. | ||
| 1187 | * @msg_queue_msgctl: | ||
| 1188 | * Check permission when a message control operation specified by @cmd | ||
| 1189 | * is to be performed on the message queue @msq. | ||
| 1190 | * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO. | ||
| 1191 | * @msq contains the message queue to act upon. May be NULL. | ||
| 1192 | * @cmd contains the operation to be performed. | ||
| 1193 | * Return 0 if permission is granted. | ||
| 1194 | * @msg_queue_msgsnd: | ||
| 1195 | * Check permission before a message, @msg, is enqueued on the message | ||
| 1196 | * queue, @msq. | ||
| 1197 | * @msq contains the message queue to send message to. | ||
| 1198 | * @msg contains the message to be enqueued. | ||
| 1199 | * @msqflg contains operational flags. | ||
| 1200 | * Return 0 if permission is granted. | ||
| 1201 | * @msg_queue_msgrcv: | ||
| 1202 | * Check permission before a message, @msg, is removed from the message | ||
| 1203 | * queue, @msq. The @target task structure contains a pointer to the | ||
| 1204 | * process that will be receiving the message (not equal to the current | ||
| 1205 | * process when inline receives are being performed). | ||
| 1206 | * @msq contains the message queue to retrieve message from. | ||
| 1207 | * @msg contains the message destination. | ||
| 1208 | * @target contains the task structure for recipient process. | ||
| 1209 | * @type contains the type of message requested. | ||
| 1210 | * @mode contains the operational flags. | ||
| 1211 | * Return 0 if permission is granted. | ||
| 1212 | * | ||
| 1213 | * Security hooks for System V Shared Memory Segments | ||
| 1214 | * | ||
| 1215 | * @shm_alloc_security: | ||
| 1216 | * Allocate and attach a security structure to the shp->shm_perm.security | ||
| 1217 | * field. The security field is initialized to NULL when the structure is | ||
| 1218 | * first created. | ||
| 1219 | * @shp contains the shared memory structure to be modified. | ||
| 1220 | * Return 0 if operation was successful and permission is granted. | ||
| 1221 | * @shm_free_security: | ||
| 1222 | * Deallocate the security struct for this memory segment. | ||
| 1223 | * @shp contains the shared memory structure to be modified. | ||
| 1224 | * @shm_associate: | ||
| 1225 | * Check permission when a shared memory region is requested through the | ||
| 1226 | * shmget system call. This hook is only called when returning the shared | ||
| 1227 | * memory region identifier for an existing region, not when a new shared | ||
| 1228 | * memory region is created. | ||
| 1229 | * @shp contains the shared memory structure to be modified. | ||
| 1230 | * @shmflg contains the operation control flags. | ||
| 1231 | * Return 0 if permission is granted. | ||
| 1232 | * @shm_shmctl: | ||
| 1233 | * Check permission when a shared memory control operation specified by | ||
| 1234 | * @cmd is to be performed on the shared memory region @shp. | ||
| 1235 | * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO. | ||
| 1236 | * @shp contains shared memory structure to be modified. | ||
| 1237 | * @cmd contains the operation to be performed. | ||
| 1238 | * Return 0 if permission is granted. | ||
| 1239 | * @shm_shmat: | ||
| 1240 | * Check permissions prior to allowing the shmat system call to attach the | ||
| 1241 | * shared memory segment @shp to the data segment of the calling process. | ||
| 1242 | * The attaching address is specified by @shmaddr. | ||
| 1243 | * @shp contains the shared memory structure to be modified. | ||
| 1244 | * @shmaddr contains the address to attach memory region to. | ||
| 1245 | * @shmflg contains the operational flags. | ||
| 1246 | * Return 0 if permission is granted. | ||
| 1247 | * | ||
| 1248 | * Security hooks for System V Semaphores | ||
| 1249 | * | ||
| 1250 | * @sem_alloc_security: | ||
| 1251 | * Allocate and attach a security structure to the sma->sem_perm.security | ||
| 1252 | * field. The security field is initialized to NULL when the structure is | ||
| 1253 | * first created. | ||
| 1254 | * @sma contains the semaphore structure | ||
| 1255 | * Return 0 if operation was successful and permission is granted. | ||
| 1256 | * @sem_free_security: | ||
| 1257 | * deallocate security struct for this semaphore | ||
| 1258 | * @sma contains the semaphore structure. | ||
| 1259 | * @sem_associate: | ||
| 1260 | * Check permission when a semaphore is requested through the semget | ||
| 1261 | * system call. This hook is only called when returning the semaphore | ||
| 1262 | * identifier for an existing semaphore, not when a new one must be | ||
| 1263 | * created. | ||
| 1264 | * @sma contains the semaphore structure. | ||
| 1265 | * @semflg contains the operation control flags. | ||
| 1266 | * Return 0 if permission is granted. | ||
| 1267 | * @sem_semctl: | ||
| 1268 | * Check permission when a semaphore operation specified by @cmd is to be | ||
| 1269 | * performed on the semaphore @sma. The @sma may be NULL, e.g. for | ||
| 1270 | * IPC_INFO or SEM_INFO. | ||
| 1271 | * @sma contains the semaphore structure. May be NULL. | ||
| 1272 | * @cmd contains the operation to be performed. | ||
| 1273 | * Return 0 if permission is granted. | ||
| 1274 | * @sem_semop | ||
| 1275 | * Check permissions before performing operations on members of the | ||
| 1276 | * semaphore set @sma. If the @alter flag is nonzero, the semaphore set | ||
| 1277 | * may be modified. | ||
| 1278 | * @sma contains the semaphore structure. | ||
| 1279 | * @sops contains the operations to perform. | ||
| 1280 | * @nsops contains the number of operations to perform. | ||
| 1281 | * @alter contains the flag indicating whether changes are to be made. | ||
| 1282 | * Return 0 if permission is granted. | ||
| 1283 | * | ||
| 1284 | * @binder_set_context_mgr | ||
| 1285 | * Check whether @mgr is allowed to be the binder context manager. | ||
| 1286 | * @mgr contains the task_struct for the task being registered. | ||
| 1287 | * Return 0 if permission is granted. | ||
| 1288 | * @binder_transaction | ||
| 1289 | * Check whether @from is allowed to invoke a binder transaction call | ||
| 1290 | * to @to. | ||
| 1291 | * @from contains the task_struct for the sending task. | ||
| 1292 | * @to contains the task_struct for the receiving task. | ||
| 1293 | * @binder_transfer_binder | ||
| 1294 | * Check whether @from is allowed to transfer a binder reference to @to. | ||
| 1295 | * @from contains the task_struct for the sending task. | ||
| 1296 | * @to contains the task_struct for the receiving task. | ||
| 1297 | * @binder_transfer_file | ||
| 1298 | * Check whether @from is allowed to transfer @file to @to. | ||
| 1299 | * @from contains the task_struct for the sending task. | ||
| 1300 | * @file contains the struct file being transferred. | ||
| 1301 | * @to contains the task_struct for the receiving task. | ||
| 1302 | * | ||
| 1303 | * @ptrace_access_check: | ||
| 1304 | * Check permission before allowing the current process to trace the | ||
| 1305 | * @child process. | ||
| 1306 | * Security modules may also want to perform a process tracing check | ||
| 1307 | * during an execve in the set_security or apply_creds hooks of | ||
| 1308 | * tracing check during an execve in the bprm_set_creds hook of | ||
| 1309 | * binprm_security_ops if the process is being traced and its security | ||
| 1310 | * attributes would be changed by the execve. | ||
| 1311 | * @child contains the task_struct structure for the target process. | ||
| 1312 | * @mode contains the PTRACE_MODE flags indicating the form of access. | ||
| 1313 | * Return 0 if permission is granted. | ||
| 1314 | * @ptrace_traceme: | ||
| 1315 | * Check that the @parent process has sufficient permission to trace the | ||
| 1316 | * current process before allowing the current process to present itself | ||
| 1317 | * to the @parent process for tracing. | ||
| 1318 | * @parent contains the task_struct structure for debugger process. | ||
| 1319 | * Return 0 if permission is granted. | ||
| 1320 | * @capget: | ||
| 1321 | * Get the @effective, @inheritable, and @permitted capability sets for | ||
| 1322 | * the @target process. The hook may also perform permission checking to | ||
| 1323 | * determine if the current process is allowed to see the capability sets | ||
| 1324 | * of the @target process. | ||
| 1325 | * @target contains the task_struct structure for target process. | ||
| 1326 | * @effective contains the effective capability set. | ||
| 1327 | * @inheritable contains the inheritable capability set. | ||
| 1328 | * @permitted contains the permitted capability set. | ||
| 1329 | * Return 0 if the capability sets were successfully obtained. | ||
| 1330 | * @capset: | ||
| 1331 | * Set the @effective, @inheritable, and @permitted capability sets for | ||
| 1332 | * the current process. | ||
| 1333 | * @new contains the new credentials structure for target process. | ||
| 1334 | * @old contains the current credentials structure for target process. | ||
| 1335 | * @effective contains the effective capability set. | ||
| 1336 | * @inheritable contains the inheritable capability set. | ||
| 1337 | * @permitted contains the permitted capability set. | ||
| 1338 | * Return 0 and update @new if permission is granted. | ||
| 1339 | * @capable: | ||
| 1340 | * Check whether the @tsk process has the @cap capability in the indicated | ||
| 1341 | * credentials. | ||
| 1342 | * @cred contains the credentials to use. | ||
| 1343 | * @ns contains the user namespace we want the capability in | ||
| 1344 | * @cap contains the capability <include/linux/capability.h>. | ||
| 1345 | * @audit: Whether to write an audit message or not | ||
| 1346 | * Return 0 if the capability is granted for @tsk. | ||
| 1347 | * @syslog: | ||
| 1348 | * Check permission before accessing the kernel message ring or changing | ||
| 1349 | * logging to the console. | ||
| 1350 | * See the syslog(2) manual page for an explanation of the @type values. | ||
| 1351 | * @type contains the type of action. | ||
| 1352 | * @from_file indicates the context of action (if it came from /proc). | ||
| 1353 | * Return 0 if permission is granted. | ||
| 1354 | * @settime: | ||
| 1355 | * Check permission to change the system time. | ||
| 1356 | * struct timespec and timezone are defined in include/linux/time.h | ||
| 1357 | * @ts contains new time | ||
| 1358 | * @tz contains new timezone | ||
| 1359 | * Return 0 if permission is granted. | ||
| 1360 | * @vm_enough_memory: | ||
| 1361 | * Check permissions for allocating a new virtual mapping. | ||
| 1362 | * @mm contains the mm struct it is being added to. | ||
| 1363 | * @pages contains the number of pages. | ||
| 1364 | * Return 0 if permission is granted. | ||
| 1365 | * | ||
| 1366 | * @ismaclabel: | ||
| 1367 | * Check if the extended attribute specified by @name | ||
| 1368 | * represents a MAC label. Returns 1 if name is a MAC | ||
| 1369 | * attribute otherwise returns 0. | ||
| 1370 | * @name full extended attribute name to check against | ||
| 1371 | * LSM as a MAC label. | ||
| 1372 | * | ||
| 1373 | * @secid_to_secctx: | ||
| 1374 | * Convert secid to security context. If secdata is NULL the length of | ||
| 1375 | * the result will be returned in seclen, but no secdata will be returned. | ||
| 1376 | * This does mean that the length could change between calls to check the | ||
| 1377 | * length and the next call which actually allocates and returns the secdata. | ||
| 1378 | * @secid contains the security ID. | ||
| 1379 | * @secdata contains the pointer that stores the converted security context. | ||
| 1380 | * @seclen pointer which contains the length of the data | ||
| 1381 | * @secctx_to_secid: | ||
| 1382 | * Convert security context to secid. | ||
| 1383 | * @secid contains the pointer to the generated security ID. | ||
| 1384 | * @secdata contains the security context. | ||
| 1385 | * | ||
| 1386 | * @release_secctx: | ||
| 1387 | * Release the security context. | ||
| 1388 | * @secdata contains the security context. | ||
| 1389 | * @seclen contains the length of the security context. | ||
| 1390 | * | ||
| 1391 | * Security hooks for Audit | ||
| 1392 | * | ||
| 1393 | * @audit_rule_init: | ||
| 1394 | * Allocate and initialize an LSM audit rule structure. | ||
| 1395 | * @field contains the required Audit action. Fields flags are defined in include/linux/audit.h | ||
| 1396 | * @op contains the operator the rule uses. | ||
| 1397 | * @rulestr contains the context where the rule will be applied to. | ||
| 1398 | * @lsmrule contains a pointer to receive the result. | ||
| 1399 | * Return 0 if @lsmrule has been successfully set, | ||
| 1400 | * -EINVAL in case of an invalid rule. | ||
| 1401 | * | ||
| 1402 | * @audit_rule_known: | ||
| 1403 | * Specifies whether given @rule contains any fields related to current LSM. | ||
| 1404 | * @rule contains the audit rule of interest. | ||
| 1405 | * Return 1 in case of relation found, 0 otherwise. | ||
| 1406 | * | ||
| 1407 | * @audit_rule_match: | ||
| 1408 | * Determine if given @secid matches a rule previously approved | ||
| 1409 | * by @audit_rule_known. | ||
| 1410 | * @secid contains the security id in question. | ||
| 1411 | * @field contains the field which relates to current LSM. | ||
| 1412 | * @op contains the operator that will be used for matching. | ||
| 1413 | * @rule points to the audit rule that will be checked against. | ||
| 1414 | * @actx points to the audit context associated with the check. | ||
| 1415 | * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure. | ||
| 1416 | * | ||
| 1417 | * @audit_rule_free: | ||
| 1418 | * Deallocate the LSM audit rule structure previously allocated by | ||
| 1419 | * audit_rule_init. | ||
| 1420 | * @rule contains the allocated rule | ||
| 1421 | * | ||
| 1422 | * @inode_notifysecctx: | ||
| 1423 | * Notify the security module of what the security context of an inode | ||
| 1424 | * should be. Initializes the incore security context managed by the | ||
| 1425 | * security module for this inode. Example usage: NFS client invokes | ||
| 1426 | * this hook to initialize the security context in its incore inode to the | ||
| 1427 | * value provided by the server for the file when the server returned the | ||
| 1428 | * file's attributes to the client. | ||
| 1429 | * | ||
| 1430 | * Must be called with inode->i_mutex locked. | ||
| 1431 | * | ||
| 1432 | * @inode we wish to set the security context of. | ||
| 1433 | * @ctx contains the string which we wish to set in the inode. | ||
| 1434 | * @ctxlen contains the length of @ctx. | ||
| 1435 | * | ||
| 1436 | * @inode_setsecctx: | ||
| 1437 | * Change the security context of an inode. Updates the | ||
| 1438 | * incore security context managed by the security module and invokes the | ||
| 1439 | * fs code as needed (via __vfs_setxattr_noperm) to update any backing | ||
| 1440 | * xattrs that represent the context. Example usage: NFS server invokes | ||
| 1441 | * this hook to change the security context in its incore inode and on the | ||
| 1442 | * backing filesystem to a value provided by the client on a SETATTR | ||
| 1443 | * operation. | ||
| 1444 | * | ||
| 1445 | * Must be called with inode->i_mutex locked. | ||
| 1446 | * | ||
| 1447 | * @dentry contains the inode we wish to set the security context of. | ||
| 1448 | * @ctx contains the string which we wish to set in the inode. | ||
| 1449 | * @ctxlen contains the length of @ctx. | ||
| 1450 | * | ||
| 1451 | * @inode_getsecctx: | ||
| 1452 | * On success, returns 0 and fills out @ctx and @ctxlen with the security | ||
| 1453 | * context for the given @inode. | ||
| 1454 | * | ||
| 1455 | * @inode we wish to get the security context of. | ||
| 1456 | * @ctx is a pointer in which to place the allocated security context. | ||
| 1457 | * @ctxlen points to the place to put the length of @ctx. | ||
| 1458 | * This is the main security structure. | ||
| 1459 | */ | ||
| 1460 | struct security_operations { | ||
| 1461 | char name[SECURITY_NAME_MAX + 1]; | ||
| 1462 | |||
| 1463 | int (*binder_set_context_mgr) (struct task_struct *mgr); | ||
| 1464 | int (*binder_transaction) (struct task_struct *from, | ||
| 1465 | struct task_struct *to); | ||
| 1466 | int (*binder_transfer_binder) (struct task_struct *from, | ||
| 1467 | struct task_struct *to); | ||
| 1468 | int (*binder_transfer_file) (struct task_struct *from, | ||
| 1469 | struct task_struct *to, struct file *file); | ||
| 1470 | |||
| 1471 | int (*ptrace_access_check) (struct task_struct *child, unsigned int mode); | ||
| 1472 | int (*ptrace_traceme) (struct task_struct *parent); | ||
| 1473 | int (*capget) (struct task_struct *target, | ||
| 1474 | kernel_cap_t *effective, | ||
| 1475 | kernel_cap_t *inheritable, kernel_cap_t *permitted); | ||
| 1476 | int (*capset) (struct cred *new, | ||
| 1477 | const struct cred *old, | ||
| 1478 | const kernel_cap_t *effective, | ||
| 1479 | const kernel_cap_t *inheritable, | ||
| 1480 | const kernel_cap_t *permitted); | ||
| 1481 | int (*capable) (const struct cred *cred, struct user_namespace *ns, | ||
| 1482 | int cap, int audit); | ||
| 1483 | int (*quotactl) (int cmds, int type, int id, struct super_block *sb); | ||
| 1484 | int (*quota_on) (struct dentry *dentry); | ||
| 1485 | int (*syslog) (int type); | ||
| 1486 | int (*settime) (const struct timespec *ts, const struct timezone *tz); | ||
| 1487 | int (*vm_enough_memory) (struct mm_struct *mm, long pages); | ||
| 1488 | |||
| 1489 | int (*bprm_set_creds) (struct linux_binprm *bprm); | ||
| 1490 | int (*bprm_check_security) (struct linux_binprm *bprm); | ||
| 1491 | int (*bprm_secureexec) (struct linux_binprm *bprm); | ||
| 1492 | void (*bprm_committing_creds) (struct linux_binprm *bprm); | ||
| 1493 | void (*bprm_committed_creds) (struct linux_binprm *bprm); | ||
| 1494 | |||
| 1495 | int (*sb_alloc_security) (struct super_block *sb); | ||
| 1496 | void (*sb_free_security) (struct super_block *sb); | ||
| 1497 | int (*sb_copy_data) (char *orig, char *copy); | ||
| 1498 | int (*sb_remount) (struct super_block *sb, void *data); | ||
| 1499 | int (*sb_kern_mount) (struct super_block *sb, int flags, void *data); | ||
| 1500 | int (*sb_show_options) (struct seq_file *m, struct super_block *sb); | ||
| 1501 | int (*sb_statfs) (struct dentry *dentry); | ||
| 1502 | int (*sb_mount) (const char *dev_name, struct path *path, | ||
| 1503 | const char *type, unsigned long flags, void *data); | ||
| 1504 | int (*sb_umount) (struct vfsmount *mnt, int flags); | ||
| 1505 | int (*sb_pivotroot) (struct path *old_path, | ||
| 1506 | struct path *new_path); | ||
| 1507 | int (*sb_set_mnt_opts) (struct super_block *sb, | ||
| 1508 | struct security_mnt_opts *opts, | ||
| 1509 | unsigned long kern_flags, | ||
| 1510 | unsigned long *set_kern_flags); | ||
| 1511 | int (*sb_clone_mnt_opts) (const struct super_block *oldsb, | ||
| 1512 | struct super_block *newsb); | ||
| 1513 | int (*sb_parse_opts_str) (char *options, struct security_mnt_opts *opts); | ||
| 1514 | int (*dentry_init_security) (struct dentry *dentry, int mode, | ||
| 1515 | struct qstr *name, void **ctx, | ||
| 1516 | u32 *ctxlen); | ||
| 1517 | |||
| 1518 | |||
| 1519 | #ifdef CONFIG_SECURITY_PATH | ||
| 1520 | int (*path_unlink) (struct path *dir, struct dentry *dentry); | ||
| 1521 | int (*path_mkdir) (struct path *dir, struct dentry *dentry, umode_t mode); | ||
| 1522 | int (*path_rmdir) (struct path *dir, struct dentry *dentry); | ||
| 1523 | int (*path_mknod) (struct path *dir, struct dentry *dentry, umode_t mode, | ||
| 1524 | unsigned int dev); | ||
| 1525 | int (*path_truncate) (struct path *path); | ||
| 1526 | int (*path_symlink) (struct path *dir, struct dentry *dentry, | ||
| 1527 | const char *old_name); | ||
| 1528 | int (*path_link) (struct dentry *old_dentry, struct path *new_dir, | ||
| 1529 | struct dentry *new_dentry); | ||
| 1530 | int (*path_rename) (struct path *old_dir, struct dentry *old_dentry, | ||
| 1531 | struct path *new_dir, struct dentry *new_dentry); | ||
| 1532 | int (*path_chmod) (struct path *path, umode_t mode); | ||
| 1533 | int (*path_chown) (struct path *path, kuid_t uid, kgid_t gid); | ||
| 1534 | int (*path_chroot) (struct path *path); | ||
| 1535 | #endif | ||
| 1536 | |||
| 1537 | int (*inode_alloc_security) (struct inode *inode); | ||
| 1538 | void (*inode_free_security) (struct inode *inode); | ||
| 1539 | int (*inode_init_security) (struct inode *inode, struct inode *dir, | ||
| 1540 | const struct qstr *qstr, const char **name, | ||
| 1541 | void **value, size_t *len); | ||
| 1542 | int (*inode_create) (struct inode *dir, | ||
| 1543 | struct dentry *dentry, umode_t mode); | ||
| 1544 | int (*inode_link) (struct dentry *old_dentry, | ||
| 1545 | struct inode *dir, struct dentry *new_dentry); | ||
| 1546 | int (*inode_unlink) (struct inode *dir, struct dentry *dentry); | ||
| 1547 | int (*inode_symlink) (struct inode *dir, | ||
| 1548 | struct dentry *dentry, const char *old_name); | ||
| 1549 | int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, umode_t mode); | ||
| 1550 | int (*inode_rmdir) (struct inode *dir, struct dentry *dentry); | ||
| 1551 | int (*inode_mknod) (struct inode *dir, struct dentry *dentry, | ||
| 1552 | umode_t mode, dev_t dev); | ||
| 1553 | int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry, | ||
| 1554 | struct inode *new_dir, struct dentry *new_dentry); | ||
| 1555 | int (*inode_readlink) (struct dentry *dentry); | ||
| 1556 | int (*inode_follow_link) (struct dentry *dentry, struct inode *inode, | ||
| 1557 | bool rcu); | ||
| 1558 | int (*inode_permission) (struct inode *inode, int mask); | ||
| 1559 | int (*inode_setattr) (struct dentry *dentry, struct iattr *attr); | ||
| 1560 | int (*inode_getattr) (const struct path *path); | ||
| 1561 | int (*inode_setxattr) (struct dentry *dentry, const char *name, | ||
| 1562 | const void *value, size_t size, int flags); | ||
| 1563 | void (*inode_post_setxattr) (struct dentry *dentry, const char *name, | ||
| 1564 | const void *value, size_t size, int flags); | ||
| 1565 | int (*inode_getxattr) (struct dentry *dentry, const char *name); | ||
| 1566 | int (*inode_listxattr) (struct dentry *dentry); | ||
| 1567 | int (*inode_removexattr) (struct dentry *dentry, const char *name); | ||
| 1568 | int (*inode_need_killpriv) (struct dentry *dentry); | ||
| 1569 | int (*inode_killpriv) (struct dentry *dentry); | ||
| 1570 | int (*inode_getsecurity) (const struct inode *inode, const char *name, void **buffer, bool alloc); | ||
| 1571 | int (*inode_setsecurity) (struct inode *inode, const char *name, const void *value, size_t size, int flags); | ||
| 1572 | int (*inode_listsecurity) (struct inode *inode, char *buffer, size_t buffer_size); | ||
| 1573 | void (*inode_getsecid) (const struct inode *inode, u32 *secid); | ||
| 1574 | |||
| 1575 | int (*file_permission) (struct file *file, int mask); | ||
| 1576 | int (*file_alloc_security) (struct file *file); | ||
| 1577 | void (*file_free_security) (struct file *file); | ||
| 1578 | int (*file_ioctl) (struct file *file, unsigned int cmd, | ||
| 1579 | unsigned long arg); | ||
| 1580 | int (*mmap_addr) (unsigned long addr); | ||
| 1581 | int (*mmap_file) (struct file *file, | ||
| 1582 | unsigned long reqprot, unsigned long prot, | ||
| 1583 | unsigned long flags); | ||
| 1584 | int (*file_mprotect) (struct vm_area_struct *vma, | ||
| 1585 | unsigned long reqprot, | ||
| 1586 | unsigned long prot); | ||
| 1587 | int (*file_lock) (struct file *file, unsigned int cmd); | ||
| 1588 | int (*file_fcntl) (struct file *file, unsigned int cmd, | ||
| 1589 | unsigned long arg); | ||
| 1590 | void (*file_set_fowner) (struct file *file); | ||
| 1591 | int (*file_send_sigiotask) (struct task_struct *tsk, | ||
| 1592 | struct fown_struct *fown, int sig); | ||
| 1593 | int (*file_receive) (struct file *file); | ||
| 1594 | int (*file_open) (struct file *file, const struct cred *cred); | ||
| 1595 | |||
| 1596 | int (*task_create) (unsigned long clone_flags); | ||
| 1597 | void (*task_free) (struct task_struct *task); | ||
| 1598 | int (*cred_alloc_blank) (struct cred *cred, gfp_t gfp); | ||
| 1599 | void (*cred_free) (struct cred *cred); | ||
| 1600 | int (*cred_prepare)(struct cred *new, const struct cred *old, | ||
| 1601 | gfp_t gfp); | ||
| 1602 | void (*cred_transfer)(struct cred *new, const struct cred *old); | ||
| 1603 | int (*kernel_act_as)(struct cred *new, u32 secid); | ||
| 1604 | int (*kernel_create_files_as)(struct cred *new, struct inode *inode); | ||
| 1605 | int (*kernel_fw_from_file)(struct file *file, char *buf, size_t size); | ||
| 1606 | int (*kernel_module_request)(char *kmod_name); | ||
| 1607 | int (*kernel_module_from_file)(struct file *file); | ||
| 1608 | int (*task_fix_setuid) (struct cred *new, const struct cred *old, | ||
| 1609 | int flags); | ||
| 1610 | int (*task_setpgid) (struct task_struct *p, pid_t pgid); | ||
| 1611 | int (*task_getpgid) (struct task_struct *p); | ||
| 1612 | int (*task_getsid) (struct task_struct *p); | ||
| 1613 | void (*task_getsecid) (struct task_struct *p, u32 *secid); | ||
| 1614 | int (*task_setnice) (struct task_struct *p, int nice); | ||
| 1615 | int (*task_setioprio) (struct task_struct *p, int ioprio); | ||
| 1616 | int (*task_getioprio) (struct task_struct *p); | ||
| 1617 | int (*task_setrlimit) (struct task_struct *p, unsigned int resource, | ||
| 1618 | struct rlimit *new_rlim); | ||
| 1619 | int (*task_setscheduler) (struct task_struct *p); | ||
| 1620 | int (*task_getscheduler) (struct task_struct *p); | ||
| 1621 | int (*task_movememory) (struct task_struct *p); | ||
| 1622 | int (*task_kill) (struct task_struct *p, | ||
| 1623 | struct siginfo *info, int sig, u32 secid); | ||
| 1624 | int (*task_wait) (struct task_struct *p); | ||
| 1625 | int (*task_prctl) (int option, unsigned long arg2, | ||
| 1626 | unsigned long arg3, unsigned long arg4, | ||
| 1627 | unsigned long arg5); | ||
| 1628 | void (*task_to_inode) (struct task_struct *p, struct inode *inode); | ||
| 1629 | |||
| 1630 | int (*ipc_permission) (struct kern_ipc_perm *ipcp, short flag); | ||
| 1631 | void (*ipc_getsecid) (struct kern_ipc_perm *ipcp, u32 *secid); | ||
| 1632 | |||
| 1633 | int (*msg_msg_alloc_security) (struct msg_msg *msg); | ||
| 1634 | void (*msg_msg_free_security) (struct msg_msg *msg); | ||
| 1635 | |||
| 1636 | int (*msg_queue_alloc_security) (struct msg_queue *msq); | ||
| 1637 | void (*msg_queue_free_security) (struct msg_queue *msq); | ||
| 1638 | int (*msg_queue_associate) (struct msg_queue *msq, int msqflg); | ||
| 1639 | int (*msg_queue_msgctl) (struct msg_queue *msq, int cmd); | ||
| 1640 | int (*msg_queue_msgsnd) (struct msg_queue *msq, | ||
| 1641 | struct msg_msg *msg, int msqflg); | ||
| 1642 | int (*msg_queue_msgrcv) (struct msg_queue *msq, | ||
| 1643 | struct msg_msg *msg, | ||
| 1644 | struct task_struct *target, | ||
| 1645 | long type, int mode); | ||
| 1646 | |||
| 1647 | int (*shm_alloc_security) (struct shmid_kernel *shp); | ||
| 1648 | void (*shm_free_security) (struct shmid_kernel *shp); | ||
| 1649 | int (*shm_associate) (struct shmid_kernel *shp, int shmflg); | ||
| 1650 | int (*shm_shmctl) (struct shmid_kernel *shp, int cmd); | ||
| 1651 | int (*shm_shmat) (struct shmid_kernel *shp, | ||
| 1652 | char __user *shmaddr, int shmflg); | ||
| 1653 | |||
| 1654 | int (*sem_alloc_security) (struct sem_array *sma); | ||
| 1655 | void (*sem_free_security) (struct sem_array *sma); | ||
| 1656 | int (*sem_associate) (struct sem_array *sma, int semflg); | ||
| 1657 | int (*sem_semctl) (struct sem_array *sma, int cmd); | ||
| 1658 | int (*sem_semop) (struct sem_array *sma, | ||
| 1659 | struct sembuf *sops, unsigned nsops, int alter); | ||
| 1660 | |||
| 1661 | int (*netlink_send) (struct sock *sk, struct sk_buff *skb); | ||
| 1662 | |||
| 1663 | void (*d_instantiate) (struct dentry *dentry, struct inode *inode); | ||
| 1664 | |||
| 1665 | int (*getprocattr) (struct task_struct *p, char *name, char **value); | ||
| 1666 | int (*setprocattr) (struct task_struct *p, char *name, void *value, size_t size); | ||
| 1667 | int (*ismaclabel) (const char *name); | ||
| 1668 | int (*secid_to_secctx) (u32 secid, char **secdata, u32 *seclen); | ||
| 1669 | int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid); | ||
| 1670 | void (*release_secctx) (char *secdata, u32 seclen); | ||
| 1671 | |||
| 1672 | int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen); | ||
| 1673 | int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen); | ||
| 1674 | int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen); | ||
| 1675 | |||
| 1676 | #ifdef CONFIG_SECURITY_NETWORK | ||
| 1677 | int (*unix_stream_connect) (struct sock *sock, struct sock *other, struct sock *newsk); | ||
| 1678 | int (*unix_may_send) (struct socket *sock, struct socket *other); | ||
| 1679 | |||
| 1680 | int (*socket_create) (int family, int type, int protocol, int kern); | ||
| 1681 | int (*socket_post_create) (struct socket *sock, int family, | ||
| 1682 | int type, int protocol, int kern); | ||
| 1683 | int (*socket_bind) (struct socket *sock, | ||
| 1684 | struct sockaddr *address, int addrlen); | ||
| 1685 | int (*socket_connect) (struct socket *sock, | ||
| 1686 | struct sockaddr *address, int addrlen); | ||
| 1687 | int (*socket_listen) (struct socket *sock, int backlog); | ||
| 1688 | int (*socket_accept) (struct socket *sock, struct socket *newsock); | ||
| 1689 | int (*socket_sendmsg) (struct socket *sock, | ||
| 1690 | struct msghdr *msg, int size); | ||
| 1691 | int (*socket_recvmsg) (struct socket *sock, | ||
| 1692 | struct msghdr *msg, int size, int flags); | ||
| 1693 | int (*socket_getsockname) (struct socket *sock); | ||
| 1694 | int (*socket_getpeername) (struct socket *sock); | ||
| 1695 | int (*socket_getsockopt) (struct socket *sock, int level, int optname); | ||
| 1696 | int (*socket_setsockopt) (struct socket *sock, int level, int optname); | ||
| 1697 | int (*socket_shutdown) (struct socket *sock, int how); | ||
| 1698 | int (*socket_sock_rcv_skb) (struct sock *sk, struct sk_buff *skb); | ||
| 1699 | int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len); | ||
| 1700 | int (*socket_getpeersec_dgram) (struct socket *sock, struct sk_buff *skb, u32 *secid); | ||
| 1701 | int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority); | ||
| 1702 | void (*sk_free_security) (struct sock *sk); | ||
| 1703 | void (*sk_clone_security) (const struct sock *sk, struct sock *newsk); | ||
| 1704 | void (*sk_getsecid) (struct sock *sk, u32 *secid); | ||
| 1705 | void (*sock_graft) (struct sock *sk, struct socket *parent); | ||
| 1706 | int (*inet_conn_request) (struct sock *sk, struct sk_buff *skb, | ||
| 1707 | struct request_sock *req); | ||
| 1708 | void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); | ||
| 1709 | void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); | ||
| 1710 | int (*secmark_relabel_packet) (u32 secid); | ||
| 1711 | void (*secmark_refcount_inc) (void); | ||
| 1712 | void (*secmark_refcount_dec) (void); | ||
| 1713 | void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); | ||
| 1714 | int (*tun_dev_alloc_security) (void **security); | ||
| 1715 | void (*tun_dev_free_security) (void *security); | ||
| 1716 | int (*tun_dev_create) (void); | ||
| 1717 | int (*tun_dev_attach_queue) (void *security); | ||
| 1718 | int (*tun_dev_attach) (struct sock *sk, void *security); | ||
| 1719 | int (*tun_dev_open) (void *security); | ||
| 1720 | #endif /* CONFIG_SECURITY_NETWORK */ | ||
| 1721 | |||
| 1722 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | ||
| 1723 | int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp, | ||
| 1724 | struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp); | ||
| 1725 | int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); | ||
| 1726 | void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx); | ||
| 1727 | int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx); | ||
| 1728 | int (*xfrm_state_alloc) (struct xfrm_state *x, | ||
| 1729 | struct xfrm_user_sec_ctx *sec_ctx); | ||
| 1730 | int (*xfrm_state_alloc_acquire) (struct xfrm_state *x, | ||
| 1731 | struct xfrm_sec_ctx *polsec, | ||
| 1732 | u32 secid); | ||
| 1733 | void (*xfrm_state_free_security) (struct xfrm_state *x); | ||
| 1734 | int (*xfrm_state_delete_security) (struct xfrm_state *x); | ||
| 1735 | int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); | ||
| 1736 | int (*xfrm_state_pol_flow_match) (struct xfrm_state *x, | ||
| 1737 | struct xfrm_policy *xp, | ||
| 1738 | const struct flowi *fl); | ||
| 1739 | int (*xfrm_decode_session) (struct sk_buff *skb, u32 *secid, int ckall); | ||
| 1740 | #endif /* CONFIG_SECURITY_NETWORK_XFRM */ | ||
| 1741 | |||
| 1742 | /* key management security hooks */ | ||
| 1743 | #ifdef CONFIG_KEYS | ||
| 1744 | int (*key_alloc) (struct key *key, const struct cred *cred, unsigned long flags); | ||
| 1745 | void (*key_free) (struct key *key); | ||
| 1746 | int (*key_permission) (key_ref_t key_ref, | ||
| 1747 | const struct cred *cred, | ||
| 1748 | unsigned perm); | ||
| 1749 | int (*key_getsecurity)(struct key *key, char **_buffer); | ||
| 1750 | #endif /* CONFIG_KEYS */ | ||
| 1751 | |||
| 1752 | #ifdef CONFIG_AUDIT | ||
| 1753 | int (*audit_rule_init) (u32 field, u32 op, char *rulestr, void **lsmrule); | ||
| 1754 | int (*audit_rule_known) (struct audit_krule *krule); | ||
| 1755 | int (*audit_rule_match) (u32 secid, u32 field, u32 op, void *lsmrule, | ||
| 1756 | struct audit_context *actx); | ||
| 1757 | void (*audit_rule_free) (void *lsmrule); | ||
| 1758 | #endif /* CONFIG_AUDIT */ | ||
| 1759 | }; | ||
| 1760 | |||
| 1761 | /* prototypes */ | 181 | /* prototypes */ |
| 1762 | extern int security_init(void); | 182 | extern int security_init(void); |
| 1763 | extern int security_module_enable(struct security_operations *ops); | ||
| 1764 | extern int register_security(struct security_operations *ops); | ||
| 1765 | extern void __init security_fixup_ops(struct security_operations *ops); | ||
| 1766 | |||
| 1767 | 183 | ||
| 1768 | /* Security operations */ | 184 | /* Security operations */ |
| 1769 | int security_binder_set_context_mgr(struct task_struct *mgr); | 185 | int security_binder_set_context_mgr(struct task_struct *mgr); |
| @@ -2049,7 +465,7 @@ static inline int security_settime(const struct timespec *ts, | |||
| 2049 | 465 | ||
| 2050 | static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) | 466 | static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) |
| 2051 | { | 467 | { |
| 2052 | return cap_vm_enough_memory(mm, pages); | 468 | return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages)); |
| 2053 | } | 469 | } |
| 2054 | 470 | ||
| 2055 | static inline int security_bprm_set_creds(struct linux_binprm *bprm) | 471 | static inline int security_bprm_set_creds(struct linux_binprm *bprm) |
| @@ -2653,7 +1069,7 @@ static inline int security_setprocattr(struct task_struct *p, char *name, void * | |||
| 2653 | 1069 | ||
| 2654 | static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb) | 1070 | static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb) |
| 2655 | { | 1071 | { |
| 2656 | return cap_netlink_send(sk, skb); | 1072 | return 0; |
| 2657 | } | 1073 | } |
| 2658 | 1074 | ||
| 2659 | static inline int security_ismaclabel(const char *name) | 1075 | static inline int security_ismaclabel(const char *name) |
| @@ -3221,36 +1637,5 @@ static inline void free_secdata(void *secdata) | |||
| 3221 | { } | 1637 | { } |
| 3222 | #endif /* CONFIG_SECURITY */ | 1638 | #endif /* CONFIG_SECURITY */ |
| 3223 | 1639 | ||
| 3224 | #ifdef CONFIG_SECURITY_YAMA | ||
| 3225 | extern int yama_ptrace_access_check(struct task_struct *child, | ||
| 3226 | unsigned int mode); | ||
| 3227 | extern int yama_ptrace_traceme(struct task_struct *parent); | ||
| 3228 | extern void yama_task_free(struct task_struct *task); | ||
| 3229 | extern int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, | ||
| 3230 | unsigned long arg4, unsigned long arg5); | ||
| 3231 | #else | ||
| 3232 | static inline int yama_ptrace_access_check(struct task_struct *child, | ||
| 3233 | unsigned int mode) | ||
| 3234 | { | ||
| 3235 | return 0; | ||
| 3236 | } | ||
| 3237 | |||
| 3238 | static inline int yama_ptrace_traceme(struct task_struct *parent) | ||
| 3239 | { | ||
| 3240 | return 0; | ||
| 3241 | } | ||
| 3242 | |||
| 3243 | static inline void yama_task_free(struct task_struct *task) | ||
| 3244 | { | ||
| 3245 | } | ||
| 3246 | |||
| 3247 | static inline int yama_task_prctl(int option, unsigned long arg2, | ||
| 3248 | unsigned long arg3, unsigned long arg4, | ||
| 3249 | unsigned long arg5) | ||
| 3250 | { | ||
| 3251 | return -ENOSYS; | ||
| 3252 | } | ||
| 3253 | #endif /* CONFIG_SECURITY_YAMA */ | ||
| 3254 | |||
| 3255 | #endif /* ! __LINUX_SECURITY_H */ | 1640 | #endif /* ! __LINUX_SECURITY_H */ |
| 3256 | 1641 | ||
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index afbb1fd77c77..912a7c482649 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h | |||
| @@ -123,6 +123,7 @@ __printf(2, 3) int seq_printf(struct seq_file *, const char *, ...); | |||
| 123 | __printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args); | 123 | __printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args); |
| 124 | 124 | ||
| 125 | int seq_path(struct seq_file *, const struct path *, const char *); | 125 | int seq_path(struct seq_file *, const struct path *, const char *); |
| 126 | int seq_file_path(struct seq_file *, struct file *, const char *); | ||
| 126 | int seq_dentry(struct seq_file *, struct dentry *, const char *); | 127 | int seq_dentry(struct seq_file *, struct dentry *, const char *); |
| 127 | int seq_path_root(struct seq_file *m, const struct path *path, | 128 | int seq_path_root(struct seq_file *m, const struct path *path, |
| 128 | const struct path *root, const char *esc); | 129 | const struct path *root, const char *esc); |
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 486e685a226a..e0582106ef4f 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
| 36 | #include <linux/preempt.h> | 36 | #include <linux/preempt.h> |
| 37 | #include <linux/lockdep.h> | 37 | #include <linux/lockdep.h> |
| 38 | #include <linux/compiler.h> | ||
| 38 | #include <asm/processor.h> | 39 | #include <asm/processor.h> |
| 39 | 40 | ||
| 40 | /* | 41 | /* |
| @@ -274,9 +275,87 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s) | |||
| 274 | s->sequence++; | 275 | s->sequence++; |
| 275 | } | 276 | } |
| 276 | 277 | ||
| 277 | /* | 278 | static inline int raw_read_seqcount_latch(seqcount_t *s) |
| 279 | { | ||
| 280 | return lockless_dereference(s->sequence); | ||
| 281 | } | ||
| 282 | |||
| 283 | /** | ||
| 278 | * raw_write_seqcount_latch - redirect readers to even/odd copy | 284 | * raw_write_seqcount_latch - redirect readers to even/odd copy |
| 279 | * @s: pointer to seqcount_t | 285 | * @s: pointer to seqcount_t |
| 286 | * | ||
| 287 | * The latch technique is a multiversion concurrency control method that allows | ||
| 288 | * queries during non-atomic modifications. If you can guarantee queries never | ||
| 289 | * interrupt the modification -- e.g. the concurrency is strictly between CPUs | ||
| 290 | * -- you most likely do not need this. | ||
| 291 | * | ||
| 292 | * Where the traditional RCU/lockless data structures rely on atomic | ||
| 293 | * modifications to ensure queries observe either the old or the new state the | ||
| 294 | * latch allows the same for non-atomic updates. The trade-off is doubling the | ||
| 295 | * cost of storage; we have to maintain two copies of the entire data | ||
| 296 | * structure. | ||
| 297 | * | ||
| 298 | * Very simply put: we first modify one copy and then the other. This ensures | ||
| 299 | * there is always one copy in a stable state, ready to give us an answer. | ||
| 300 | * | ||
| 301 | * The basic form is a data structure like: | ||
| 302 | * | ||
| 303 | * struct latch_struct { | ||
| 304 | * seqcount_t seq; | ||
| 305 | * struct data_struct data[2]; | ||
| 306 | * }; | ||
| 307 | * | ||
| 308 | * Where a modification, which is assumed to be externally serialized, does the | ||
| 309 | * following: | ||
| 310 | * | ||
| 311 | * void latch_modify(struct latch_struct *latch, ...) | ||
| 312 | * { | ||
| 313 | * smp_wmb(); <- Ensure that the last data[1] update is visible | ||
| 314 | * latch->seq++; | ||
| 315 | * smp_wmb(); <- Ensure that the seqcount update is visible | ||
| 316 | * | ||
| 317 | * modify(latch->data[0], ...); | ||
| 318 | * | ||
| 319 | * smp_wmb(); <- Ensure that the data[0] update is visible | ||
| 320 | * latch->seq++; | ||
| 321 | * smp_wmb(); <- Ensure that the seqcount update is visible | ||
| 322 | * | ||
| 323 | * modify(latch->data[1], ...); | ||
| 324 | * } | ||
| 325 | * | ||
| 326 | * The query will have a form like: | ||
| 327 | * | ||
| 328 | * struct entry *latch_query(struct latch_struct *latch, ...) | ||
| 329 | * { | ||
| 330 | * struct entry *entry; | ||
| 331 | * unsigned seq, idx; | ||
| 332 | * | ||
| 333 | * do { | ||
| 334 | * seq = lockless_dereference(latch->seq); | ||
| 335 | * | ||
| 336 | * idx = seq & 0x01; | ||
| 337 | * entry = data_query(latch->data[idx], ...); | ||
| 338 | * | ||
| 339 | * smp_rmb(); | ||
| 340 | * } while (seq != latch->seq); | ||
| 341 | * | ||
| 342 | * return entry; | ||
| 343 | * } | ||
| 344 | * | ||
| 345 | * So during the modification, queries are first redirected to data[1]. Then we | ||
| 346 | * modify data[0]. When that is complete, we redirect queries back to data[0] | ||
| 347 | * and we can modify data[1]. | ||
| 348 | * | ||
| 349 | * NOTE: The non-requirement for atomic modifications does _NOT_ include | ||
| 350 | * the publishing of new entries in the case where data is a dynamic | ||
| 351 | * data structure. | ||
| 352 | * | ||
| 353 | * An iteration might start in data[0] and get suspended long enough | ||
| 354 | * to miss an entire modification sequence, once it resumes it might | ||
| 355 | * observe the new entry. | ||
| 356 | * | ||
| 357 | * NOTE: When data is a dynamic data structure; one should use regular RCU | ||
| 358 | * patterns to manage the lifetimes of the objects within. | ||
| 280 | */ | 359 | */ |
| 281 | static inline void raw_write_seqcount_latch(seqcount_t *s) | 360 | static inline void raw_write_seqcount_latch(seqcount_t *s) |
| 282 | { | 361 | { |
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 78097e7a330a..ba82c07feb95 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #define _LINUX_SERIAL_8250_H | 12 | #define _LINUX_SERIAL_8250_H |
| 13 | 13 | ||
| 14 | #include <linux/serial_core.h> | 14 | #include <linux/serial_core.h> |
| 15 | #include <linux/serial_reg.h> | ||
| 15 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
| 16 | 17 | ||
| 17 | /* | 18 | /* |
| @@ -137,6 +138,8 @@ extern int early_serial_setup(struct uart_port *port); | |||
| 137 | 138 | ||
| 138 | extern unsigned int serial8250_early_in(struct uart_port *port, int offset); | 139 | extern unsigned int serial8250_early_in(struct uart_port *port, int offset); |
| 139 | extern void serial8250_early_out(struct uart_port *port, int offset, int value); | 140 | extern void serial8250_early_out(struct uart_port *port, int offset, int value); |
| 141 | extern int early_serial8250_setup(struct earlycon_device *device, | ||
| 142 | const char *options); | ||
| 140 | extern void serial8250_do_set_termios(struct uart_port *port, | 143 | extern void serial8250_do_set_termios(struct uart_port *port, |
| 141 | struct ktermios *termios, struct ktermios *old); | 144 | struct ktermios *termios, struct ktermios *old); |
| 142 | extern int serial8250_do_startup(struct uart_port *port); | 145 | extern int serial8250_do_startup(struct uart_port *port); |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 025dad9dcde4..297d4fa1cfe5 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | #define uart_console(port) \ | 35 | #define uart_console(port) \ |
| 36 | ((port)->cons && (port)->cons->index == (port)->line) | 36 | ((port)->cons && (port)->cons->index == (port)->line) |
| 37 | #else | 37 | #else |
| 38 | #define uart_console(port) (0) | 38 | #define uart_console(port) ({ (void)port; 0; }) |
| 39 | #endif | 39 | #endif |
| 40 | 40 | ||
| 41 | struct uart_port; | 41 | struct uart_port; |
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index 6c5e3bb282b0..7c536ac5be05 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | #ifndef __LINUX_SERIAL_SCI_H | 1 | #ifndef __LINUX_SERIAL_SCI_H |
| 2 | #define __LINUX_SERIAL_SCI_H | 2 | #define __LINUX_SERIAL_SCI_H |
| 3 | 3 | ||
| 4 | #include <linux/bitops.h> | ||
| 4 | #include <linux/serial_core.h> | 5 | #include <linux/serial_core.h> |
| 5 | #include <linux/sh_dma.h> | 6 | #include <linux/sh_dma.h> |
| 6 | 7 | ||
| @@ -10,59 +11,16 @@ | |||
| 10 | 11 | ||
| 11 | #define SCIx_NOT_SUPPORTED (-1) | 12 | #define SCIx_NOT_SUPPORTED (-1) |
| 12 | 13 | ||
| 13 | /* SCSMR (Serial Mode Register) */ | ||
| 14 | #define SCSMR_CHR (1 << 6) /* 7-bit Character Length */ | ||
| 15 | #define SCSMR_PE (1 << 5) /* Parity Enable */ | ||
| 16 | #define SCSMR_ODD (1 << 4) /* Odd Parity */ | ||
| 17 | #define SCSMR_STOP (1 << 3) /* Stop Bit Length */ | ||
| 18 | #define SCSMR_CKS 0x0003 /* Clock Select */ | ||
| 19 | |||
| 20 | /* Serial Control Register (@ = not supported by all parts) */ | 14 | /* Serial Control Register (@ = not supported by all parts) */ |
| 21 | #define SCSCR_TIE (1 << 7) /* Transmit Interrupt Enable */ | 15 | #define SCSCR_TIE BIT(7) /* Transmit Interrupt Enable */ |
| 22 | #define SCSCR_RIE (1 << 6) /* Receive Interrupt Enable */ | 16 | #define SCSCR_RIE BIT(6) /* Receive Interrupt Enable */ |
| 23 | #define SCSCR_TE (1 << 5) /* Transmit Enable */ | 17 | #define SCSCR_TE BIT(5) /* Transmit Enable */ |
| 24 | #define SCSCR_RE (1 << 4) /* Receive Enable */ | 18 | #define SCSCR_RE BIT(4) /* Receive Enable */ |
| 25 | #define SCSCR_REIE (1 << 3) /* Receive Error Interrupt Enable @ */ | 19 | #define SCSCR_REIE BIT(3) /* Receive Error Interrupt Enable @ */ |
| 26 | #define SCSCR_TOIE (1 << 2) /* Timeout Interrupt Enable @ */ | 20 | #define SCSCR_TOIE BIT(2) /* Timeout Interrupt Enable @ */ |
| 27 | #define SCSCR_CKE1 (1 << 1) /* Clock Enable 1 */ | 21 | #define SCSCR_CKE1 BIT(1) /* Clock Enable 1 */ |
| 28 | #define SCSCR_CKE0 (1 << 0) /* Clock Enable 0 */ | 22 | #define SCSCR_CKE0 BIT(0) /* Clock Enable 0 */ |
| 29 | /* SCIFA/SCIFB only */ | 23 | |
| 30 | #define SCSCR_TDRQE (1 << 15) /* Tx Data Transfer Request Enable */ | ||
| 31 | #define SCSCR_RDRQE (1 << 14) /* Rx Data Transfer Request Enable */ | ||
| 32 | |||
| 33 | /* SCxSR (Serial Status Register) on SCI */ | ||
| 34 | #define SCI_TDRE 0x80 /* Transmit Data Register Empty */ | ||
| 35 | #define SCI_RDRF 0x40 /* Receive Data Register Full */ | ||
| 36 | #define SCI_ORER 0x20 /* Overrun Error */ | ||
| 37 | #define SCI_FER 0x10 /* Framing Error */ | ||
| 38 | #define SCI_PER 0x08 /* Parity Error */ | ||
| 39 | #define SCI_TEND 0x04 /* Transmit End */ | ||
| 40 | |||
| 41 | #define SCI_DEFAULT_ERROR_MASK (SCI_PER | SCI_FER) | ||
| 42 | |||
| 43 | /* SCxSR (Serial Status Register) on SCIF, HSCIF */ | ||
| 44 | #define SCIF_ER 0x0080 /* Receive Error */ | ||
| 45 | #define SCIF_TEND 0x0040 /* Transmission End */ | ||
| 46 | #define SCIF_TDFE 0x0020 /* Transmit FIFO Data Empty */ | ||
| 47 | #define SCIF_BRK 0x0010 /* Break Detect */ | ||
| 48 | #define SCIF_FER 0x0008 /* Framing Error */ | ||
| 49 | #define SCIF_PER 0x0004 /* Parity Error */ | ||
| 50 | #define SCIF_RDF 0x0002 /* Receive FIFO Data Full */ | ||
| 51 | #define SCIF_DR 0x0001 /* Receive Data Ready */ | ||
| 52 | |||
| 53 | #define SCIF_DEFAULT_ERROR_MASK (SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK) | ||
| 54 | |||
| 55 | /* SCFCR (FIFO Control Register) */ | ||
| 56 | #define SCFCR_LOOP (1 << 0) /* Loopback Test */ | ||
| 57 | |||
| 58 | /* SCSPTR (Serial Port Register), optional */ | ||
| 59 | #define SCSPTR_RTSIO (1 << 7) /* Serial Port RTS Pin Input/Output */ | ||
| 60 | #define SCSPTR_CTSIO (1 << 5) /* Serial Port CTS Pin Input/Output */ | ||
| 61 | #define SCSPTR_SPB2IO (1 << 1) /* Serial Port Break Input/Output */ | ||
| 62 | #define SCSPTR_SPB2DT (1 << 0) /* Serial Port Break Data */ | ||
| 63 | |||
| 64 | /* HSSRR HSCIF */ | ||
| 65 | #define HSCIF_SRE 0x8000 /* Sampling Rate Register Enable */ | ||
| 66 | 24 | ||
| 67 | enum { | 25 | enum { |
| 68 | SCIx_PROBE_REGTYPE, | 26 | SCIx_PROBE_REGTYPE, |
| @@ -82,28 +40,6 @@ enum { | |||
| 82 | SCIx_NR_REGTYPES, | 40 | SCIx_NR_REGTYPES, |
| 83 | }; | 41 | }; |
| 84 | 42 | ||
| 85 | /* | ||
| 86 | * SCI register subset common for all port types. | ||
| 87 | * Not all registers will exist on all parts. | ||
| 88 | */ | ||
| 89 | enum { | ||
| 90 | SCSMR, /* Serial Mode Register */ | ||
| 91 | SCBRR, /* Bit Rate Register */ | ||
| 92 | SCSCR, /* Serial Control Register */ | ||
| 93 | SCxSR, /* Serial Status Register */ | ||
| 94 | SCFCR, /* FIFO Control Register */ | ||
| 95 | SCFDR, /* FIFO Data Count Register */ | ||
| 96 | SCxTDR, /* Transmit (FIFO) Data Register */ | ||
| 97 | SCxRDR, /* Receive (FIFO) Data Register */ | ||
| 98 | SCLSR, /* Line Status Register */ | ||
| 99 | SCTFDR, /* Transmit FIFO Data Count Register */ | ||
| 100 | SCRFDR, /* Receive FIFO Data Count Register */ | ||
| 101 | SCSPTR, /* Serial Port Register */ | ||
| 102 | HSSRR, /* Sampling Rate Register */ | ||
| 103 | |||
| 104 | SCIx_NR_REGS, | ||
| 105 | }; | ||
| 106 | |||
| 107 | struct device; | 43 | struct device; |
| 108 | 44 | ||
| 109 | struct plat_sci_port_ops { | 45 | struct plat_sci_port_ops { |
| @@ -113,7 +49,7 @@ struct plat_sci_port_ops { | |||
| 113 | /* | 49 | /* |
| 114 | * Port-specific capabilities | 50 | * Port-specific capabilities |
| 115 | */ | 51 | */ |
| 116 | #define SCIx_HAVE_RTSCTS (1 << 0) | 52 | #define SCIx_HAVE_RTSCTS BIT(0) |
| 117 | 53 | ||
| 118 | /* | 54 | /* |
| 119 | * Platform device specific platform_data struct | 55 | * Platform device specific platform_data struct |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f15154a879c7..d6cdd6e87d53 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -34,7 +34,9 @@ | |||
| 34 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
| 35 | #include <linux/netdev_features.h> | 35 | #include <linux/netdev_features.h> |
| 36 | #include <linux/sched.h> | 36 | #include <linux/sched.h> |
| 37 | #include <net/flow_keys.h> | 37 | #include <net/flow_dissector.h> |
| 38 | #include <linux/splice.h> | ||
| 39 | #include <linux/in6.h> | ||
| 38 | 40 | ||
| 39 | /* A. Checksumming of received packets by device. | 41 | /* A. Checksumming of received packets by device. |
| 40 | * | 42 | * |
| @@ -170,13 +172,19 @@ struct nf_bridge_info { | |||
| 170 | BRNF_PROTO_UNCHANGED, | 172 | BRNF_PROTO_UNCHANGED, |
| 171 | BRNF_PROTO_8021Q, | 173 | BRNF_PROTO_8021Q, |
| 172 | BRNF_PROTO_PPPOE | 174 | BRNF_PROTO_PPPOE |
| 173 | } orig_proto; | 175 | } orig_proto:8; |
| 174 | bool pkt_otherhost; | 176 | bool pkt_otherhost; |
| 177 | __u16 frag_max_size; | ||
| 175 | unsigned int mask; | 178 | unsigned int mask; |
| 176 | struct net_device *physindev; | 179 | struct net_device *physindev; |
| 177 | struct net_device *physoutdev; | 180 | union { |
| 178 | char neigh_header[8]; | 181 | struct net_device *physoutdev; |
| 179 | __be32 ipv4_daddr; | 182 | char neigh_header[8]; |
| 183 | }; | ||
| 184 | union { | ||
| 185 | __be32 ipv4_daddr; | ||
| 186 | struct in6_addr ipv6_daddr; | ||
| 187 | }; | ||
| 180 | }; | 188 | }; |
| 181 | #endif | 189 | #endif |
| 182 | 190 | ||
| @@ -859,6 +867,9 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, | |||
| 859 | int len, int odd, struct sk_buff *skb), | 867 | int len, int odd, struct sk_buff *skb), |
| 860 | void *from, int length); | 868 | void *from, int length); |
| 861 | 869 | ||
| 870 | int skb_append_pagefrags(struct sk_buff *skb, struct page *page, | ||
| 871 | int offset, size_t size); | ||
| 872 | |||
| 862 | struct skb_seq_state { | 873 | struct skb_seq_state { |
| 863 | __u32 lower_offset; | 874 | __u32 lower_offset; |
| 864 | __u32 upper_offset; | 875 | __u32 upper_offset; |
| @@ -919,7 +930,6 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) | |||
| 919 | skb->hash = hash; | 930 | skb->hash = hash; |
| 920 | } | 931 | } |
| 921 | 932 | ||
| 922 | void __skb_get_hash(struct sk_buff *skb); | ||
| 923 | static inline __u32 skb_get_hash(struct sk_buff *skb) | 933 | static inline __u32 skb_get_hash(struct sk_buff *skb) |
| 924 | { | 934 | { |
| 925 | if (!skb->l4_hash && !skb->sw_hash) | 935 | if (!skb->l4_hash && !skb->sw_hash) |
| @@ -928,6 +938,8 @@ static inline __u32 skb_get_hash(struct sk_buff *skb) | |||
| 928 | return skb->hash; | 938 | return skb->hash; |
| 929 | } | 939 | } |
| 930 | 940 | ||
| 941 | __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb); | ||
| 942 | |||
| 931 | static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) | 943 | static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) |
| 932 | { | 944 | { |
| 933 | return skb->hash; | 945 | return skb->hash; |
| @@ -1935,8 +1947,8 @@ static inline void skb_probe_transport_header(struct sk_buff *skb, | |||
| 1935 | 1947 | ||
| 1936 | if (skb_transport_header_was_set(skb)) | 1948 | if (skb_transport_header_was_set(skb)) |
| 1937 | return; | 1949 | return; |
| 1938 | else if (skb_flow_dissect(skb, &keys)) | 1950 | else if (skb_flow_dissect_flow_keys(skb, &keys)) |
| 1939 | skb_set_transport_header(skb, keys.thoff); | 1951 | skb_set_transport_header(skb, keys.control.thoff); |
| 1940 | else | 1952 | else |
| 1941 | skb_set_transport_header(skb, offset_hint); | 1953 | skb_set_transport_header(skb, offset_hint); |
| 1942 | } | 1954 | } |
| @@ -2127,10 +2139,6 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) | |||
| 2127 | kfree_skb(skb); | 2139 | kfree_skb(skb); |
| 2128 | } | 2140 | } |
| 2129 | 2141 | ||
| 2130 | #define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768) | ||
| 2131 | #define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER) | ||
| 2132 | #define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE | ||
| 2133 | |||
| 2134 | void *netdev_alloc_frag(unsigned int fragsz); | 2142 | void *netdev_alloc_frag(unsigned int fragsz); |
| 2135 | 2143 | ||
| 2136 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, | 2144 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, |
| @@ -2185,6 +2193,11 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, | |||
| 2185 | return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); | 2193 | return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); |
| 2186 | } | 2194 | } |
| 2187 | 2195 | ||
| 2196 | static inline void skb_free_frag(void *addr) | ||
| 2197 | { | ||
| 2198 | __free_page_frag(addr); | ||
| 2199 | } | ||
| 2200 | |||
| 2188 | void *napi_alloc_frag(unsigned int fragsz); | 2201 | void *napi_alloc_frag(unsigned int fragsz); |
| 2189 | struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, | 2202 | struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, |
| 2190 | unsigned int length, gfp_t gfp_mask); | 2203 | unsigned int length, gfp_t gfp_mask); |
| @@ -2692,9 +2705,15 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); | |||
| 2692 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); | 2705 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); |
| 2693 | __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, | 2706 | __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, |
| 2694 | int len, __wsum csum); | 2707 | int len, __wsum csum); |
| 2695 | int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | 2708 | ssize_t skb_socket_splice(struct sock *sk, |
| 2709 | struct pipe_inode_info *pipe, | ||
| 2710 | struct splice_pipe_desc *spd); | ||
| 2711 | int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, | ||
| 2696 | struct pipe_inode_info *pipe, unsigned int len, | 2712 | struct pipe_inode_info *pipe, unsigned int len, |
| 2697 | unsigned int flags); | 2713 | unsigned int flags, |
| 2714 | ssize_t (*splice_cb)(struct sock *, | ||
| 2715 | struct pipe_inode_info *, | ||
| 2716 | struct splice_pipe_desc *)); | ||
| 2698 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); | 2717 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); |
| 2699 | unsigned int skb_zerocopy_headlen(const struct sk_buff *from); | 2718 | unsigned int skb_zerocopy_headlen(const struct sk_buff *from); |
| 2700 | int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, | 2719 | int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, |
| @@ -2729,8 +2748,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, | |||
| 2729 | __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, | 2748 | __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, |
| 2730 | __wsum csum); | 2749 | __wsum csum); |
| 2731 | 2750 | ||
| 2732 | static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset, | 2751 | static inline void * __must_check |
| 2733 | int len, void *data, int hlen, void *buffer) | 2752 | __skb_header_pointer(const struct sk_buff *skb, int offset, |
| 2753 | int len, void *data, int hlen, void *buffer) | ||
| 2734 | { | 2754 | { |
| 2735 | if (hlen - offset >= len) | 2755 | if (hlen - offset >= len) |
| 2736 | return data + offset; | 2756 | return data + offset; |
| @@ -2742,8 +2762,8 @@ static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset, | |||
| 2742 | return buffer; | 2762 | return buffer; |
| 2743 | } | 2763 | } |
| 2744 | 2764 | ||
| 2745 | static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, | 2765 | static inline void * __must_check |
| 2746 | int len, void *buffer) | 2766 | skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) |
| 2747 | { | 2767 | { |
| 2748 | return __skb_header_pointer(skb, offset, len, skb->data, | 2768 | return __skb_header_pointer(skb, offset, len, skb->data, |
| 2749 | skb_headlen(skb), buffer); | 2769 | skb_headlen(skb), buffer); |
| @@ -3050,7 +3070,7 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, | |||
| 3050 | } | 3070 | } |
| 3051 | } else if (skb->csum_bad) { | 3071 | } else if (skb->csum_bad) { |
| 3052 | /* ip_summed == CHECKSUM_NONE in this case */ | 3072 | /* ip_summed == CHECKSUM_NONE in this case */ |
| 3053 | return 1; | 3073 | return (__force __sum16)1; |
| 3054 | } | 3074 | } |
| 3055 | 3075 | ||
| 3056 | skb->csum = psum; | 3076 | skb->csum = psum; |
| @@ -3298,9 +3318,6 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) | |||
| 3298 | return skb->queue_mapping != 0; | 3318 | return skb->queue_mapping != 0; |
| 3299 | } | 3319 | } |
| 3300 | 3320 | ||
| 3301 | u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, | ||
| 3302 | unsigned int num_tx_queues); | ||
| 3303 | |||
| 3304 | static inline struct sec_path *skb_sec_path(struct sk_buff *skb) | 3321 | static inline struct sec_path *skb_sec_path(struct sk_buff *skb) |
| 3305 | { | 3322 | { |
| 3306 | #ifdef CONFIG_XFRM | 3323 | #ifdef CONFIG_XFRM |
| @@ -3355,15 +3372,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) | |||
| 3355 | static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) | 3372 | static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) |
| 3356 | { | 3373 | { |
| 3357 | int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) - | 3374 | int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) - |
| 3358 | skb_transport_offset(skb); | 3375 | skb_transport_offset(skb); |
| 3359 | __u16 csum; | 3376 | __wsum partial; |
| 3360 | 3377 | ||
| 3361 | csum = csum_fold(csum_partial(skb_transport_header(skb), | 3378 | partial = csum_partial(skb_transport_header(skb), plen, skb->csum); |
| 3362 | plen, skb->csum)); | ||
| 3363 | skb->csum = res; | 3379 | skb->csum = res; |
| 3364 | SKB_GSO_CB(skb)->csum_start -= plen; | 3380 | SKB_GSO_CB(skb)->csum_start -= plen; |
| 3365 | 3381 | ||
| 3366 | return csum; | 3382 | return csum_fold(partial); |
| 3367 | } | 3383 | } |
| 3368 | 3384 | ||
| 3369 | static inline bool skb_is_gso(const struct sk_buff *skb) | 3385 | static inline bool skb_is_gso(const struct sk_buff *skb) |
| @@ -3418,10 +3434,9 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb) | |||
| 3418 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); | 3434 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); |
| 3419 | 3435 | ||
| 3420 | int skb_checksum_setup(struct sk_buff *skb, bool recalculate); | 3436 | int skb_checksum_setup(struct sk_buff *skb, bool recalculate); |
| 3421 | 3437 | struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, | |
| 3422 | u32 skb_get_poff(const struct sk_buff *skb); | 3438 | unsigned int transport_len, |
| 3423 | u32 __skb_get_poff(const struct sk_buff *skb, void *data, | 3439 | __sum16(*skb_chkf)(struct sk_buff *skb)); |
| 3424 | const struct flow_keys *keys, int hlen); | ||
| 3425 | 3440 | ||
| 3426 | /** | 3441 | /** |
| 3427 | * skb_head_is_locked - Determine if the skb->head is locked down | 3442 | * skb_head_is_locked - Determine if the skb->head is locked down |
diff --git a/include/linux/slab.h b/include/linux/slab.h index ffd24c830151..a99f0e5243e1 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -240,8 +240,8 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; | |||
| 240 | * belongs to. | 240 | * belongs to. |
| 241 | * 0 = zero alloc | 241 | * 0 = zero alloc |
| 242 | * 1 = 65 .. 96 bytes | 242 | * 1 = 65 .. 96 bytes |
| 243 | * 2 = 120 .. 192 bytes | 243 | * 2 = 129 .. 192 bytes |
| 244 | * n = 2^(n-1) .. 2^n -1 | 244 | * n = 2^(n-1)+1 .. 2^n |
| 245 | */ | 245 | */ |
| 246 | static __always_inline int kmalloc_index(size_t size) | 246 | static __always_inline int kmalloc_index(size_t size) |
| 247 | { | 247 | { |
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h index d600afb21926..da3c593f9845 100644 --- a/include/linux/smpboot.h +++ b/include/linux/smpboot.h | |||
| @@ -27,6 +27,8 @@ struct smpboot_thread_data; | |||
| 27 | * @pre_unpark: Optional unpark function, called before the thread is | 27 | * @pre_unpark: Optional unpark function, called before the thread is |
| 28 | * unparked (cpu online). This is not guaranteed to be | 28 | * unparked (cpu online). This is not guaranteed to be |
| 29 | * called on the target cpu of the thread. Careful! | 29 | * called on the target cpu of the thread. Careful! |
| 30 | * @cpumask: Internal state. To update which threads are unparked, | ||
| 31 | * call smpboot_update_cpumask_percpu_thread(). | ||
| 30 | * @selfparking: Thread is not parked by the park function. | 32 | * @selfparking: Thread is not parked by the park function. |
| 31 | * @thread_comm: The base name of the thread | 33 | * @thread_comm: The base name of the thread |
| 32 | */ | 34 | */ |
| @@ -41,11 +43,14 @@ struct smp_hotplug_thread { | |||
| 41 | void (*park)(unsigned int cpu); | 43 | void (*park)(unsigned int cpu); |
| 42 | void (*unpark)(unsigned int cpu); | 44 | void (*unpark)(unsigned int cpu); |
| 43 | void (*pre_unpark)(unsigned int cpu); | 45 | void (*pre_unpark)(unsigned int cpu); |
| 46 | cpumask_var_t cpumask; | ||
| 44 | bool selfparking; | 47 | bool selfparking; |
| 45 | const char *thread_comm; | 48 | const char *thread_comm; |
| 46 | }; | 49 | }; |
| 47 | 50 | ||
| 48 | int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread); | 51 | int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread); |
| 49 | void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); | 52 | void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); |
| 53 | int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, | ||
| 54 | const struct cpumask *); | ||
| 50 | 55 | ||
| 51 | #endif | 56 | #endif |
diff --git a/include/linux/soc/sunxi/sunxi_sram.h b/include/linux/soc/sunxi/sunxi_sram.h new file mode 100644 index 000000000000..c5f663bba9c2 --- /dev/null +++ b/include/linux/soc/sunxi/sunxi_sram.h | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | /* | ||
| 2 | * Allwinner SoCs SRAM Controller Driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2015 Maxime Ripard | ||
| 5 | * | ||
| 6 | * Author: Maxime Ripard <maxime.ripard@free-electrons.com> | ||
| 7 | * | ||
| 8 | * This file is licensed under the terms of the GNU General Public | ||
| 9 | * License version 2. This program is licensed "as is" without any | ||
| 10 | * warranty of any kind, whether express or implied. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #ifndef _SUNXI_SRAM_H_ | ||
| 14 | #define _SUNXI_SRAM_H_ | ||
| 15 | |||
| 16 | int sunxi_sram_claim(struct device *dev); | ||
| 17 | int sunxi_sram_release(struct device *dev); | ||
| 18 | |||
| 19 | #endif /* _SUNXI_SRAM_H_ */ | ||
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h index 083ac388098e..fddebc617469 100644 --- a/include/linux/sock_diag.h +++ b/include/linux/sock_diag.h | |||
| @@ -1,7 +1,10 @@ | |||
| 1 | #ifndef __SOCK_DIAG_H__ | 1 | #ifndef __SOCK_DIAG_H__ |
| 2 | #define __SOCK_DIAG_H__ | 2 | #define __SOCK_DIAG_H__ |
| 3 | 3 | ||
| 4 | #include <linux/netlink.h> | ||
| 4 | #include <linux/user_namespace.h> | 5 | #include <linux/user_namespace.h> |
| 6 | #include <net/net_namespace.h> | ||
| 7 | #include <net/sock.h> | ||
| 5 | #include <uapi/linux/sock_diag.h> | 8 | #include <uapi/linux/sock_diag.h> |
| 6 | 9 | ||
| 7 | struct sk_buff; | 10 | struct sk_buff; |
| @@ -11,6 +14,7 @@ struct sock; | |||
| 11 | struct sock_diag_handler { | 14 | struct sock_diag_handler { |
| 12 | __u8 family; | 15 | __u8 family; |
| 13 | int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); | 16 | int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); |
| 17 | int (*get_info)(struct sk_buff *skb, struct sock *sk); | ||
| 14 | }; | 18 | }; |
| 15 | 19 | ||
| 16 | int sock_diag_register(const struct sock_diag_handler *h); | 20 | int sock_diag_register(const struct sock_diag_handler *h); |
| @@ -26,4 +30,42 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); | |||
| 26 | int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, | 30 | int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, |
| 27 | struct sk_buff *skb, int attrtype); | 31 | struct sk_buff *skb, int attrtype); |
| 28 | 32 | ||
| 33 | static inline | ||
| 34 | enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk) | ||
| 35 | { | ||
| 36 | switch (sk->sk_family) { | ||
| 37 | case AF_INET: | ||
| 38 | switch (sk->sk_protocol) { | ||
| 39 | case IPPROTO_TCP: | ||
| 40 | return SKNLGRP_INET_TCP_DESTROY; | ||
| 41 | case IPPROTO_UDP: | ||
| 42 | return SKNLGRP_INET_UDP_DESTROY; | ||
| 43 | default: | ||
| 44 | return SKNLGRP_NONE; | ||
| 45 | } | ||
| 46 | case AF_INET6: | ||
| 47 | switch (sk->sk_protocol) { | ||
| 48 | case IPPROTO_TCP: | ||
| 49 | return SKNLGRP_INET6_TCP_DESTROY; | ||
| 50 | case IPPROTO_UDP: | ||
| 51 | return SKNLGRP_INET6_UDP_DESTROY; | ||
| 52 | default: | ||
| 53 | return SKNLGRP_NONE; | ||
| 54 | } | ||
| 55 | default: | ||
| 56 | return SKNLGRP_NONE; | ||
| 57 | } | ||
| 58 | } | ||
| 59 | |||
| 60 | static inline | ||
| 61 | bool sock_diag_has_destroy_listeners(const struct sock *sk) | ||
| 62 | { | ||
| 63 | const struct net *n = sock_net(sk); | ||
| 64 | const enum sknetlink_groups group = sock_diag_destroy_group(sk); | ||
| 65 | |||
| 66 | return group != SKNLGRP_NONE && n->diag_nlsk && | ||
| 67 | netlink_has_listeners(n->diag_nlsk, group); | ||
| 68 | } | ||
| 69 | void sock_diag_broadcast_destroy(struct sock *sk); | ||
| 70 | |||
| 29 | #endif | 71 | #endif |
diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h index e741e8baad92..85b8ee67e937 100644 --- a/include/linux/spi/cc2520.h +++ b/include/linux/spi/cc2520.h | |||
| @@ -21,7 +21,6 @@ struct cc2520_platform_data { | |||
| 21 | int sfd; | 21 | int sfd; |
| 22 | int reset; | 22 | int reset; |
| 23 | int vreg; | 23 | int vreg; |
| 24 | bool amplified; | ||
| 25 | }; | 24 | }; |
| 26 | 25 | ||
| 27 | #endif | 26 | #endif |
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 4568a5cc9ab8..c3d1a525bacc 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h | |||
| @@ -29,10 +29,13 @@ struct ssb_sprom { | |||
| 29 | u8 il0mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11b/g */ | 29 | u8 il0mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11b/g */ |
| 30 | u8 et0mac[6] __aligned(sizeof(u16)); /* MAC address for Ethernet */ | 30 | u8 et0mac[6] __aligned(sizeof(u16)); /* MAC address for Ethernet */ |
| 31 | u8 et1mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11a */ | 31 | u8 et1mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11a */ |
| 32 | u8 et2mac[6] __aligned(sizeof(u16)); /* MAC address for extra Ethernet */ | ||
| 32 | u8 et0phyaddr; /* MII address for enet0 */ | 33 | u8 et0phyaddr; /* MII address for enet0 */ |
| 33 | u8 et1phyaddr; /* MII address for enet1 */ | 34 | u8 et1phyaddr; /* MII address for enet1 */ |
| 35 | u8 et2phyaddr; /* MII address for enet2 */ | ||
| 34 | u8 et0mdcport; /* MDIO for enet0 */ | 36 | u8 et0mdcport; /* MDIO for enet0 */ |
| 35 | u8 et1mdcport; /* MDIO for enet1 */ | 37 | u8 et1mdcport; /* MDIO for enet1 */ |
| 38 | u8 et2mdcport; /* MDIO for enet2 */ | ||
| 36 | u16 dev_id; /* Device ID overriding e.g. PCI ID */ | 39 | u16 dev_id; /* Device ID overriding e.g. PCI ID */ |
| 37 | u16 board_rev; /* Board revision number from SPROM. */ | 40 | u16 board_rev; /* Board revision number from SPROM. */ |
| 38 | u16 board_num; /* Board number from SPROM. */ | 41 | u16 board_num; /* Board number from SPROM. */ |
| @@ -88,11 +91,14 @@ struct ssb_sprom { | |||
| 88 | u32 ofdm5glpo; /* 5.2GHz OFDM power offset */ | 91 | u32 ofdm5glpo; /* 5.2GHz OFDM power offset */ |
| 89 | u32 ofdm5gpo; /* 5.3GHz OFDM power offset */ | 92 | u32 ofdm5gpo; /* 5.3GHz OFDM power offset */ |
| 90 | u32 ofdm5ghpo; /* 5.8GHz OFDM power offset */ | 93 | u32 ofdm5ghpo; /* 5.8GHz OFDM power offset */ |
| 94 | u32 boardflags; | ||
| 95 | u32 boardflags2; | ||
| 96 | u32 boardflags3; | ||
| 97 | /* TODO: Switch all drivers to new u32 fields and drop below ones */ | ||
| 91 | u16 boardflags_lo; /* Board flags (bits 0-15) */ | 98 | u16 boardflags_lo; /* Board flags (bits 0-15) */ |
| 92 | u16 boardflags_hi; /* Board flags (bits 16-31) */ | 99 | u16 boardflags_hi; /* Board flags (bits 16-31) */ |
| 93 | u16 boardflags2_lo; /* Board flags (bits 32-47) */ | 100 | u16 boardflags2_lo; /* Board flags (bits 32-47) */ |
| 94 | u16 boardflags2_hi; /* Board flags (bits 48-63) */ | 101 | u16 boardflags2_hi; /* Board flags (bits 48-63) */ |
| 95 | /* TODO store board flags in a single u64 */ | ||
| 96 | 102 | ||
| 97 | struct ssb_sprom_core_pwr_info core_pwr_info[4]; | 103 | struct ssb_sprom_core_pwr_info core_pwr_info[4]; |
| 98 | 104 | ||
diff --git a/include/linux/stddef.h b/include/linux/stddef.h index 076af437284d..9c61c7cda936 100644 --- a/include/linux/stddef.h +++ b/include/linux/stddef.h | |||
| @@ -3,7 +3,6 @@ | |||
| 3 | 3 | ||
| 4 | #include <uapi/linux/stddef.h> | 4 | #include <uapi/linux/stddef.h> |
| 5 | 5 | ||
| 6 | |||
| 7 | #undef NULL | 6 | #undef NULL |
| 8 | #define NULL ((void *)0) | 7 | #define NULL ((void *)0) |
| 9 | 8 | ||
| @@ -14,10 +13,9 @@ enum { | |||
| 14 | 13 | ||
| 15 | #undef offsetof | 14 | #undef offsetof |
| 16 | #ifdef __compiler_offsetof | 15 | #ifdef __compiler_offsetof |
| 17 | #define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER) | 16 | #define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER) |
| 18 | #else | 17 | #else |
| 19 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) | 18 | #define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) |
| 20 | #endif | ||
| 21 | #endif | 19 | #endif |
| 22 | 20 | ||
| 23 | /** | 21 | /** |
| @@ -28,3 +26,5 @@ enum { | |||
| 28 | */ | 26 | */ |
| 29 | #define offsetofend(TYPE, MEMBER) \ | 27 | #define offsetofend(TYPE, MEMBER) \ |
| 30 | (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) | 28 | (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) |
| 29 | |||
| 30 | #endif | ||
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 7f484a239f53..c735f5c91eea 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h | |||
| @@ -99,6 +99,7 @@ struct plat_stmmacenet_data { | |||
| 99 | int phy_addr; | 99 | int phy_addr; |
| 100 | int interface; | 100 | int interface; |
| 101 | struct stmmac_mdio_bus_data *mdio_bus_data; | 101 | struct stmmac_mdio_bus_data *mdio_bus_data; |
| 102 | struct device_node *phy_node; | ||
| 102 | struct stmmac_dma_cfg *dma_cfg; | 103 | struct stmmac_dma_cfg *dma_cfg; |
| 103 | int clk_csr; | 104 | int clk_csr; |
| 104 | int has_gmac; | 105 | int has_gmac; |
diff --git a/include/linux/string.h b/include/linux/string.h index e40099e585c9..a8d90db9c4b0 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
| @@ -111,6 +111,7 @@ extern int memcmp(const void *,const void *,__kernel_size_t); | |||
| 111 | extern void * memchr(const void *,int,__kernel_size_t); | 111 | extern void * memchr(const void *,int,__kernel_size_t); |
| 112 | #endif | 112 | #endif |
| 113 | void *memchr_inv(const void *s, int c, size_t n); | 113 | void *memchr_inv(const void *s, int c, size_t n); |
| 114 | char *strreplace(char *s, char old, char new); | ||
| 114 | 115 | ||
| 115 | extern void kfree_const(const void *x); | 116 | extern void kfree_const(const void *x); |
| 116 | 117 | ||
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index 2ca67b55e0fe..8df43c9f11dc 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h | |||
| @@ -37,7 +37,6 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied); | |||
| 37 | void xprt_free_bc_request(struct rpc_rqst *req); | 37 | void xprt_free_bc_request(struct rpc_rqst *req); |
| 38 | int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); | 38 | int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); |
| 39 | void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); | 39 | void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); |
| 40 | int bc_send(struct rpc_rqst *req); | ||
| 41 | 40 | ||
| 42 | /* | 41 | /* |
| 43 | * Determine if a shared backchannel is in use | 42 | * Determine if a shared backchannel is in use |
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 598ba80ec30c..131032f15cc1 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
| @@ -56,6 +56,7 @@ struct rpc_clnt { | |||
| 56 | struct rpc_rtt * cl_rtt; /* RTO estimator data */ | 56 | struct rpc_rtt * cl_rtt; /* RTO estimator data */ |
| 57 | const struct rpc_timeout *cl_timeout; /* Timeout strategy */ | 57 | const struct rpc_timeout *cl_timeout; /* Timeout strategy */ |
| 58 | 58 | ||
| 59 | atomic_t cl_swapper; /* swapfile count */ | ||
| 59 | int cl_nodelen; /* nodename length */ | 60 | int cl_nodelen; /* nodename length */ |
| 60 | char cl_nodename[UNX_MAXNODENAME+1]; | 61 | char cl_nodename[UNX_MAXNODENAME+1]; |
| 61 | struct rpc_pipe_dir_head cl_pipedir_objects; | 62 | struct rpc_pipe_dir_head cl_pipedir_objects; |
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 5f1e6bd4c316..d703f0ef37d8 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
| @@ -205,8 +205,7 @@ struct rpc_wait_queue { | |||
| 205 | */ | 205 | */ |
| 206 | struct rpc_task *rpc_new_task(const struct rpc_task_setup *); | 206 | struct rpc_task *rpc_new_task(const struct rpc_task_setup *); |
| 207 | struct rpc_task *rpc_run_task(const struct rpc_task_setup *); | 207 | struct rpc_task *rpc_run_task(const struct rpc_task_setup *); |
| 208 | struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, | 208 | struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req); |
| 209 | const struct rpc_call_ops *ops); | ||
| 210 | void rpc_put_task(struct rpc_task *); | 209 | void rpc_put_task(struct rpc_task *); |
| 211 | void rpc_put_task_async(struct rpc_task *); | 210 | void rpc_put_task_async(struct rpc_task *); |
| 212 | void rpc_exit_task(struct rpc_task *); | 211 | void rpc_exit_task(struct rpc_task *); |
| @@ -269,4 +268,20 @@ static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q, | |||
| 269 | } | 268 | } |
| 270 | #endif | 269 | #endif |
| 271 | 270 | ||
| 271 | #if IS_ENABLED(CONFIG_SUNRPC_SWAP) | ||
| 272 | int rpc_clnt_swap_activate(struct rpc_clnt *clnt); | ||
| 273 | void rpc_clnt_swap_deactivate(struct rpc_clnt *clnt); | ||
| 274 | #else | ||
| 275 | static inline int | ||
| 276 | rpc_clnt_swap_activate(struct rpc_clnt *clnt) | ||
| 277 | { | ||
| 278 | return -EINVAL; | ||
| 279 | } | ||
| 280 | |||
| 281 | static inline void | ||
| 282 | rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) | ||
| 283 | { | ||
| 284 | } | ||
| 285 | #endif /* CONFIG_SUNRPC_SWAP */ | ||
| 286 | |||
| 272 | #endif /* _LINUX_SUNRPC_SCHED_H_ */ | 287 | #endif /* _LINUX_SUNRPC_SCHED_H_ */ |
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index df8edf8ec914..cb94ee4181d4 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h | |||
| @@ -172,6 +172,13 @@ struct svcxprt_rdma { | |||
| 172 | #define RDMAXPRT_SQ_PENDING 2 | 172 | #define RDMAXPRT_SQ_PENDING 2 |
| 173 | #define RDMAXPRT_CONN_PENDING 3 | 173 | #define RDMAXPRT_CONN_PENDING 3 |
| 174 | 174 | ||
| 175 | #define RPCRDMA_MAX_SVC_SEGS (64) /* server max scatter/gather */ | ||
| 176 | #if RPCSVC_MAXPAYLOAD < (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT) | ||
| 177 | #define RPCRDMA_MAXPAYLOAD RPCSVC_MAXPAYLOAD | ||
| 178 | #else | ||
| 179 | #define RPCRDMA_MAXPAYLOAD (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT) | ||
| 180 | #endif | ||
| 181 | |||
| 175 | #define RPCRDMA_LISTEN_BACKLOG 10 | 182 | #define RPCRDMA_LISTEN_BACKLOG 10 |
| 176 | /* The default ORD value is based on two outstanding full-size writes with a | 183 | /* The default ORD value is based on two outstanding full-size writes with a |
| 177 | * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */ | 184 | * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */ |
| @@ -182,10 +189,9 @@ struct svcxprt_rdma { | |||
| 182 | 189 | ||
| 183 | /* svc_rdma_marshal.c */ | 190 | /* svc_rdma_marshal.c */ |
| 184 | extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *); | 191 | extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *); |
| 185 | extern int svc_rdma_xdr_decode_deferred_req(struct svc_rqst *); | ||
| 186 | extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, | 192 | extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, |
| 187 | struct rpcrdma_msg *, | 193 | struct rpcrdma_msg *, |
| 188 | enum rpcrdma_errcode, u32 *); | 194 | enum rpcrdma_errcode, __be32 *); |
| 189 | extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int); | 195 | extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int); |
| 190 | extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int); | 196 | extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int); |
| 191 | extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int, | 197 | extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int, |
| @@ -212,7 +218,6 @@ extern int svc_rdma_sendto(struct svc_rqst *); | |||
| 212 | extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); | 218 | extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); |
| 213 | extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, | 219 | extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, |
| 214 | enum rpcrdma_errcode); | 220 | enum rpcrdma_errcode); |
| 215 | struct page *svc_rdma_get_page(void); | ||
| 216 | extern int svc_rdma_post_recv(struct svcxprt_rdma *); | 221 | extern int svc_rdma_post_recv(struct svcxprt_rdma *); |
| 217 | extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); | 222 | extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); |
| 218 | extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); | 223 | extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 8b93ef53df3c..0fb9acbb4780 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
| @@ -133,6 +133,9 @@ struct rpc_xprt_ops { | |||
| 133 | void (*close)(struct rpc_xprt *xprt); | 133 | void (*close)(struct rpc_xprt *xprt); |
| 134 | void (*destroy)(struct rpc_xprt *xprt); | 134 | void (*destroy)(struct rpc_xprt *xprt); |
| 135 | void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); | 135 | void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); |
| 136 | int (*enable_swap)(struct rpc_xprt *xprt); | ||
| 137 | void (*disable_swap)(struct rpc_xprt *xprt); | ||
| 138 | void (*inject_disconnect)(struct rpc_xprt *xprt); | ||
| 136 | }; | 139 | }; |
| 137 | 140 | ||
| 138 | /* | 141 | /* |
| @@ -180,7 +183,7 @@ struct rpc_xprt { | |||
| 180 | atomic_t num_reqs; /* total slots */ | 183 | atomic_t num_reqs; /* total slots */ |
| 181 | unsigned long state; /* transport state */ | 184 | unsigned long state; /* transport state */ |
| 182 | unsigned char resvport : 1; /* use a reserved port */ | 185 | unsigned char resvport : 1; /* use a reserved port */ |
| 183 | unsigned int swapper; /* we're swapping over this | 186 | atomic_t swapper; /* we're swapping over this |
| 184 | transport */ | 187 | transport */ |
| 185 | unsigned int bind_index; /* bind function index */ | 188 | unsigned int bind_index; /* bind function index */ |
| 186 | 189 | ||
| @@ -212,7 +215,8 @@ struct rpc_xprt { | |||
| 212 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) | 215 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
| 213 | struct svc_serv *bc_serv; /* The RPC service which will */ | 216 | struct svc_serv *bc_serv; /* The RPC service which will */ |
| 214 | /* process the callback */ | 217 | /* process the callback */ |
| 215 | unsigned int bc_alloc_count; /* Total number of preallocs */ | 218 | int bc_alloc_count; /* Total number of preallocs */ |
| 219 | atomic_t bc_free_slots; | ||
| 216 | spinlock_t bc_pa_lock; /* Protects the preallocated | 220 | spinlock_t bc_pa_lock; /* Protects the preallocated |
| 217 | * items */ | 221 | * items */ |
| 218 | struct list_head bc_pa_list; /* List of preallocated | 222 | struct list_head bc_pa_list; /* List of preallocated |
| @@ -241,6 +245,7 @@ struct rpc_xprt { | |||
| 241 | const char *address_strings[RPC_DISPLAY_MAX]; | 245 | const char *address_strings[RPC_DISPLAY_MAX]; |
| 242 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | 246 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 243 | struct dentry *debugfs; /* debugfs directory */ | 247 | struct dentry *debugfs; /* debugfs directory */ |
| 248 | atomic_t inject_disconnect; | ||
| 244 | #endif | 249 | #endif |
| 245 | }; | 250 | }; |
| 246 | 251 | ||
| @@ -327,6 +332,18 @@ static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 * | |||
| 327 | return p + xprt->tsh_size; | 332 | return p + xprt->tsh_size; |
| 328 | } | 333 | } |
| 329 | 334 | ||
| 335 | static inline int | ||
| 336 | xprt_enable_swap(struct rpc_xprt *xprt) | ||
| 337 | { | ||
| 338 | return xprt->ops->enable_swap(xprt); | ||
| 339 | } | ||
| 340 | |||
| 341 | static inline void | ||
| 342 | xprt_disable_swap(struct rpc_xprt *xprt) | ||
| 343 | { | ||
| 344 | xprt->ops->disable_swap(xprt); | ||
| 345 | } | ||
| 346 | |||
| 330 | /* | 347 | /* |
| 331 | * Transport switch helper functions | 348 | * Transport switch helper functions |
| 332 | */ | 349 | */ |
| @@ -345,7 +362,6 @@ void xprt_release_rqst_cong(struct rpc_task *task); | |||
| 345 | void xprt_disconnect_done(struct rpc_xprt *xprt); | 362 | void xprt_disconnect_done(struct rpc_xprt *xprt); |
| 346 | void xprt_force_disconnect(struct rpc_xprt *xprt); | 363 | void xprt_force_disconnect(struct rpc_xprt *xprt); |
| 347 | void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); | 364 | void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); |
| 348 | int xs_swapper(struct rpc_xprt *xprt, int enable); | ||
| 349 | 365 | ||
| 350 | bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *); | 366 | bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *); |
| 351 | void xprt_unlock_connect(struct rpc_xprt *, void *); | 367 | void xprt_unlock_connect(struct rpc_xprt *, void *); |
| @@ -431,6 +447,23 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt) | |||
| 431 | return test_and_set_bit(XPRT_BINDING, &xprt->state); | 447 | return test_and_set_bit(XPRT_BINDING, &xprt->state); |
| 432 | } | 448 | } |
| 433 | 449 | ||
| 450 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
| 451 | extern unsigned int rpc_inject_disconnect; | ||
| 452 | static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) | ||
| 453 | { | ||
| 454 | if (!rpc_inject_disconnect) | ||
| 455 | return; | ||
| 456 | if (atomic_dec_return(&xprt->inject_disconnect)) | ||
| 457 | return; | ||
| 458 | atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect); | ||
| 459 | xprt->ops->inject_disconnect(xprt); | ||
| 460 | } | ||
| 461 | #else | ||
| 462 | static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) | ||
| 463 | { | ||
| 464 | } | ||
| 465 | #endif | ||
| 466 | |||
| 434 | #endif /* __KERNEL__*/ | 467 | #endif /* __KERNEL__*/ |
| 435 | 468 | ||
| 436 | #endif /* _LINUX_SUNRPC_XPRT_H */ | 469 | #endif /* _LINUX_SUNRPC_XPRT_H */ |
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h index c984c85981ea..b17613052cc3 100644 --- a/include/linux/sunrpc/xprtrdma.h +++ b/include/linux/sunrpc/xprtrdma.h | |||
| @@ -56,7 +56,8 @@ | |||
| 56 | 56 | ||
| 57 | #define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */ | 57 | #define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */ |
| 58 | 58 | ||
| 59 | /* memory registration strategies */ | 59 | /* Memory registration strategies, by number. |
| 60 | * This is part of a kernel / user space API. Do not remove. */ | ||
| 60 | enum rpcrdma_memreg { | 61 | enum rpcrdma_memreg { |
| 61 | RPCRDMA_BOUNCEBUFFERS = 0, | 62 | RPCRDMA_BOUNCEBUFFERS = 0, |
| 62 | RPCRDMA_REGISTER, | 63 | RPCRDMA_REGISTER, |
diff --git a/include/linux/swap.h b/include/linux/swap.h index cee108cbe2d5..38874729dc5f 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -377,7 +377,6 @@ extern void end_swap_bio_write(struct bio *bio, int err); | |||
| 377 | extern int __swap_writepage(struct page *page, struct writeback_control *wbc, | 377 | extern int __swap_writepage(struct page *page, struct writeback_control *wbc, |
| 378 | void (*end_write_func)(struct bio *, int)); | 378 | void (*end_write_func)(struct bio *, int)); |
| 379 | extern int swap_set_page_dirty(struct page *page); | 379 | extern int swap_set_page_dirty(struct page *page); |
| 380 | extern void end_swap_bio_read(struct bio *bio, int err); | ||
| 381 | 380 | ||
| 382 | int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, | 381 | int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, |
| 383 | unsigned long nr_pages, sector_t start_block); | 382 | unsigned long nr_pages, sector_t start_block); |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 76d1e38aabe1..b45c45b8c829 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -111,14 +111,14 @@ union bpf_attr; | |||
| 111 | #define __SC_STR_ADECL(t, a) #a | 111 | #define __SC_STR_ADECL(t, a) #a |
| 112 | #define __SC_STR_TDECL(t, a) #t | 112 | #define __SC_STR_TDECL(t, a) #t |
| 113 | 113 | ||
| 114 | extern struct ftrace_event_class event_class_syscall_enter; | 114 | extern struct trace_event_class event_class_syscall_enter; |
| 115 | extern struct ftrace_event_class event_class_syscall_exit; | 115 | extern struct trace_event_class event_class_syscall_exit; |
| 116 | extern struct trace_event_functions enter_syscall_print_funcs; | 116 | extern struct trace_event_functions enter_syscall_print_funcs; |
| 117 | extern struct trace_event_functions exit_syscall_print_funcs; | 117 | extern struct trace_event_functions exit_syscall_print_funcs; |
| 118 | 118 | ||
| 119 | #define SYSCALL_TRACE_ENTER_EVENT(sname) \ | 119 | #define SYSCALL_TRACE_ENTER_EVENT(sname) \ |
| 120 | static struct syscall_metadata __syscall_meta_##sname; \ | 120 | static struct syscall_metadata __syscall_meta_##sname; \ |
| 121 | static struct ftrace_event_call __used \ | 121 | static struct trace_event_call __used \ |
| 122 | event_enter_##sname = { \ | 122 | event_enter_##sname = { \ |
| 123 | .class = &event_class_syscall_enter, \ | 123 | .class = &event_class_syscall_enter, \ |
| 124 | { \ | 124 | { \ |
| @@ -128,13 +128,13 @@ extern struct trace_event_functions exit_syscall_print_funcs; | |||
| 128 | .data = (void *)&__syscall_meta_##sname,\ | 128 | .data = (void *)&__syscall_meta_##sname,\ |
| 129 | .flags = TRACE_EVENT_FL_CAP_ANY, \ | 129 | .flags = TRACE_EVENT_FL_CAP_ANY, \ |
| 130 | }; \ | 130 | }; \ |
| 131 | static struct ftrace_event_call __used \ | 131 | static struct trace_event_call __used \ |
| 132 | __attribute__((section("_ftrace_events"))) \ | 132 | __attribute__((section("_ftrace_events"))) \ |
| 133 | *__event_enter_##sname = &event_enter_##sname; | 133 | *__event_enter_##sname = &event_enter_##sname; |
| 134 | 134 | ||
| 135 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ | 135 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ |
| 136 | static struct syscall_metadata __syscall_meta_##sname; \ | 136 | static struct syscall_metadata __syscall_meta_##sname; \ |
| 137 | static struct ftrace_event_call __used \ | 137 | static struct trace_event_call __used \ |
| 138 | event_exit_##sname = { \ | 138 | event_exit_##sname = { \ |
| 139 | .class = &event_class_syscall_exit, \ | 139 | .class = &event_class_syscall_exit, \ |
| 140 | { \ | 140 | { \ |
| @@ -144,7 +144,7 @@ extern struct trace_event_functions exit_syscall_print_funcs; | |||
| 144 | .data = (void *)&__syscall_meta_##sname,\ | 144 | .data = (void *)&__syscall_meta_##sname,\ |
| 145 | .flags = TRACE_EVENT_FL_CAP_ANY, \ | 145 | .flags = TRACE_EVENT_FL_CAP_ANY, \ |
| 146 | }; \ | 146 | }; \ |
| 147 | static struct ftrace_event_call __used \ | 147 | static struct trace_event_call __used \ |
| 148 | __attribute__((section("_ftrace_events"))) \ | 148 | __attribute__((section("_ftrace_events"))) \ |
| 149 | *__event_exit_##sname = &event_exit_##sname; | 149 | *__event_exit_##sname = &event_exit_##sname; |
| 150 | 150 | ||
| @@ -827,15 +827,15 @@ asmlinkage long sys_syncfs(int fd); | |||
| 827 | asmlinkage long sys_fork(void); | 827 | asmlinkage long sys_fork(void); |
| 828 | asmlinkage long sys_vfork(void); | 828 | asmlinkage long sys_vfork(void); |
| 829 | #ifdef CONFIG_CLONE_BACKWARDS | 829 | #ifdef CONFIG_CLONE_BACKWARDS |
| 830 | asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int, | 830 | asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, unsigned long, |
| 831 | int __user *); | 831 | int __user *); |
| 832 | #else | 832 | #else |
| 833 | #ifdef CONFIG_CLONE_BACKWARDS3 | 833 | #ifdef CONFIG_CLONE_BACKWARDS3 |
| 834 | asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *, | 834 | asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *, |
| 835 | int __user *, int); | 835 | int __user *, unsigned long); |
| 836 | #else | 836 | #else |
| 837 | asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, | 837 | asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, |
| 838 | int __user *, int); | 838 | int __user *, unsigned long); |
| 839 | #endif | 839 | #endif |
| 840 | #endif | 840 | #endif |
| 841 | 841 | ||
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 795d5fea5697..fa7bc29925c9 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h | |||
| @@ -188,6 +188,9 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, | |||
| 188 | void unregister_sysctl_table(struct ctl_table_header * table); | 188 | void unregister_sysctl_table(struct ctl_table_header * table); |
| 189 | 189 | ||
| 190 | extern int sysctl_init(void); | 190 | extern int sysctl_init(void); |
| 191 | |||
| 192 | extern struct ctl_table sysctl_mount_point[]; | ||
| 193 | |||
| 191 | #else /* CONFIG_SYSCTL */ | 194 | #else /* CONFIG_SYSCTL */ |
| 192 | static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table) | 195 | static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table) |
| 193 | { | 196 | { |
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 99382c0df17e..9f65758311a4 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h | |||
| @@ -210,6 +210,10 @@ int __must_check sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name, | |||
| 210 | int __must_check sysfs_move_dir_ns(struct kobject *kobj, | 210 | int __must_check sysfs_move_dir_ns(struct kobject *kobj, |
| 211 | struct kobject *new_parent_kobj, | 211 | struct kobject *new_parent_kobj, |
| 212 | const void *new_ns); | 212 | const void *new_ns); |
| 213 | int __must_check sysfs_create_mount_point(struct kobject *parent_kobj, | ||
| 214 | const char *name); | ||
| 215 | void sysfs_remove_mount_point(struct kobject *parent_kobj, | ||
| 216 | const char *name); | ||
| 213 | 217 | ||
| 214 | int __must_check sysfs_create_file_ns(struct kobject *kobj, | 218 | int __must_check sysfs_create_file_ns(struct kobject *kobj, |
| 215 | const struct attribute *attr, | 219 | const struct attribute *attr, |
| @@ -298,6 +302,17 @@ static inline int sysfs_move_dir_ns(struct kobject *kobj, | |||
| 298 | return 0; | 302 | return 0; |
| 299 | } | 303 | } |
| 300 | 304 | ||
| 305 | static inline int sysfs_create_mount_point(struct kobject *parent_kobj, | ||
| 306 | const char *name) | ||
| 307 | { | ||
| 308 | return 0; | ||
| 309 | } | ||
| 310 | |||
| 311 | static inline void sysfs_remove_mount_point(struct kobject *parent_kobj, | ||
| 312 | const char *name) | ||
| 313 | { | ||
| 314 | } | ||
| 315 | |||
| 301 | static inline int sysfs_create_file_ns(struct kobject *kobj, | 316 | static inline int sysfs_create_file_ns(struct kobject *kobj, |
| 302 | const struct attribute *attr, | 317 | const struct attribute *attr, |
| 303 | const void *ns) | 318 | const void *ns) |
diff --git a/include/linux/syslog.h b/include/linux/syslog.h index 4b7b875a7ce1..c3a7f0cc3a27 100644 --- a/include/linux/syslog.h +++ b/include/linux/syslog.h | |||
| @@ -47,12 +47,12 @@ | |||
| 47 | #define SYSLOG_FROM_READER 0 | 47 | #define SYSLOG_FROM_READER 0 |
| 48 | #define SYSLOG_FROM_PROC 1 | 48 | #define SYSLOG_FROM_PROC 1 |
| 49 | 49 | ||
| 50 | int do_syslog(int type, char __user *buf, int count, bool from_file); | 50 | int do_syslog(int type, char __user *buf, int count, int source); |
| 51 | 51 | ||
| 52 | #ifdef CONFIG_PRINTK | 52 | #ifdef CONFIG_PRINTK |
| 53 | int check_syslog_permissions(int type, bool from_file); | 53 | int check_syslog_permissions(int type, int source); |
| 54 | #else | 54 | #else |
| 55 | static inline int check_syslog_permissions(int type, bool from_file) | 55 | static inline int check_syslog_permissions(int type, int source) |
| 56 | { | 56 | { |
| 57 | return 0; | 57 | return 0; |
| 58 | } | 58 | } |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index e8bbf403618f..48c3696e8645 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
| @@ -149,11 +149,16 @@ struct tcp_sock { | |||
| 149 | * sum(delta(rcv_nxt)), or how many bytes | 149 | * sum(delta(rcv_nxt)), or how many bytes |
| 150 | * were acked. | 150 | * were acked. |
| 151 | */ | 151 | */ |
| 152 | u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn | ||
| 153 | * total number of segments in. | ||
| 154 | */ | ||
| 152 | u32 rcv_nxt; /* What we want to receive next */ | 155 | u32 rcv_nxt; /* What we want to receive next */ |
| 153 | u32 copied_seq; /* Head of yet unread data */ | 156 | u32 copied_seq; /* Head of yet unread data */ |
| 154 | u32 rcv_wup; /* rcv_nxt on last window update sent */ | 157 | u32 rcv_wup; /* rcv_nxt on last window update sent */ |
| 155 | u32 snd_nxt; /* Next sequence we send */ | 158 | u32 snd_nxt; /* Next sequence we send */ |
| 156 | 159 | u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut | |
| 160 | * The total number of segments sent. | ||
| 161 | */ | ||
| 157 | u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked | 162 | u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked |
| 158 | * sum(delta(snd_una)), or how many bytes | 163 | * sum(delta(snd_una)), or how many bytes |
| 159 | * were acked. | 164 | * were acked. |
| @@ -201,6 +206,7 @@ struct tcp_sock { | |||
| 201 | syn_fastopen:1, /* SYN includes Fast Open option */ | 206 | syn_fastopen:1, /* SYN includes Fast Open option */ |
| 202 | syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ | 207 | syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ |
| 203 | syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ | 208 | syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ |
| 209 | save_syn:1, /* Save headers of SYN packet */ | ||
| 204 | is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ | 210 | is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ |
| 205 | u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ | 211 | u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ |
| 206 | 212 | ||
| @@ -328,6 +334,7 @@ struct tcp_sock { | |||
| 328 | * socket. Used to retransmit SYNACKs etc. | 334 | * socket. Used to retransmit SYNACKs etc. |
| 329 | */ | 335 | */ |
| 330 | struct request_sock *fastopen_rsk; | 336 | struct request_sock *fastopen_rsk; |
| 337 | u32 *saved_syn; | ||
| 331 | }; | 338 | }; |
| 332 | 339 | ||
| 333 | enum tsq_flags { | 340 | enum tsq_flags { |
| @@ -395,4 +402,10 @@ static inline int fastopen_init_queue(struct sock *sk, int backlog) | |||
| 395 | return 0; | 402 | return 0; |
| 396 | } | 403 | } |
| 397 | 404 | ||
| 405 | static inline void tcp_saved_syn_free(struct tcp_sock *tp) | ||
| 406 | { | ||
| 407 | kfree(tp->saved_syn); | ||
| 408 | tp->saved_syn = NULL; | ||
| 409 | } | ||
| 410 | |||
| 398 | #endif /* _LINUX_TCP_H */ | 411 | #endif /* _LINUX_TCP_H */ |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 5eac316490ea..037e9df2f610 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
| @@ -40,6 +40,9 @@ | |||
| 40 | /* No upper/lower limit requirement */ | 40 | /* No upper/lower limit requirement */ |
| 41 | #define THERMAL_NO_LIMIT ((u32)~0) | 41 | #define THERMAL_NO_LIMIT ((u32)~0) |
| 42 | 42 | ||
| 43 | /* Default weight of a bound cooling device */ | ||
| 44 | #define THERMAL_WEIGHT_DEFAULT 0 | ||
| 45 | |||
| 43 | /* Unit conversion macros */ | 46 | /* Unit conversion macros */ |
| 44 | #define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \ | 47 | #define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \ |
| 45 | ((long)t-2732+5)/10 : ((long)t-2732-5)/10) | 48 | ((long)t-2732+5)/10 : ((long)t-2732-5)/10) |
| @@ -56,10 +59,13 @@ | |||
| 56 | #define DEFAULT_THERMAL_GOVERNOR "fair_share" | 59 | #define DEFAULT_THERMAL_GOVERNOR "fair_share" |
| 57 | #elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE) | 60 | #elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE) |
| 58 | #define DEFAULT_THERMAL_GOVERNOR "user_space" | 61 | #define DEFAULT_THERMAL_GOVERNOR "user_space" |
| 62 | #elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR) | ||
| 63 | #define DEFAULT_THERMAL_GOVERNOR "power_allocator" | ||
| 59 | #endif | 64 | #endif |
| 60 | 65 | ||
| 61 | struct thermal_zone_device; | 66 | struct thermal_zone_device; |
| 62 | struct thermal_cooling_device; | 67 | struct thermal_cooling_device; |
| 68 | struct thermal_instance; | ||
| 63 | 69 | ||
| 64 | enum thermal_device_mode { | 70 | enum thermal_device_mode { |
| 65 | THERMAL_DEVICE_DISABLED = 0, | 71 | THERMAL_DEVICE_DISABLED = 0, |
| @@ -113,6 +119,12 @@ struct thermal_cooling_device_ops { | |||
| 113 | int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); | 119 | int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); |
| 114 | int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); | 120 | int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); |
| 115 | int (*set_cur_state) (struct thermal_cooling_device *, unsigned long); | 121 | int (*set_cur_state) (struct thermal_cooling_device *, unsigned long); |
| 122 | int (*get_requested_power)(struct thermal_cooling_device *, | ||
| 123 | struct thermal_zone_device *, u32 *); | ||
| 124 | int (*state2power)(struct thermal_cooling_device *, | ||
| 125 | struct thermal_zone_device *, unsigned long, u32 *); | ||
| 126 | int (*power2state)(struct thermal_cooling_device *, | ||
| 127 | struct thermal_zone_device *, u32, unsigned long *); | ||
| 116 | }; | 128 | }; |
| 117 | 129 | ||
| 118 | struct thermal_cooling_device { | 130 | struct thermal_cooling_device { |
| @@ -144,8 +156,7 @@ struct thermal_attr { | |||
| 144 | * @devdata: private pointer for device private data | 156 | * @devdata: private pointer for device private data |
| 145 | * @trips: number of trip points the thermal zone supports | 157 | * @trips: number of trip points the thermal zone supports |
| 146 | * @passive_delay: number of milliseconds to wait between polls when | 158 | * @passive_delay: number of milliseconds to wait between polls when |
| 147 | * performing passive cooling. Currenty only used by the | 159 | * performing passive cooling. |
| 148 | * step-wise governor | ||
| 149 | * @polling_delay: number of milliseconds to wait between polls when | 160 | * @polling_delay: number of milliseconds to wait between polls when |
| 150 | * checking whether trip points have been crossed (0 for | 161 | * checking whether trip points have been crossed (0 for |
| 151 | * interrupt driven systems) | 162 | * interrupt driven systems) |
| @@ -155,13 +166,13 @@ struct thermal_attr { | |||
| 155 | * @last_temperature: previous temperature read | 166 | * @last_temperature: previous temperature read |
| 156 | * @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION | 167 | * @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION |
| 157 | * @passive: 1 if you've crossed a passive trip point, 0 otherwise. | 168 | * @passive: 1 if you've crossed a passive trip point, 0 otherwise. |
| 158 | * Currenty only used by the step-wise governor. | ||
| 159 | * @forced_passive: If > 0, temperature at which to switch on all ACPI | 169 | * @forced_passive: If > 0, temperature at which to switch on all ACPI |
| 160 | * processor cooling devices. Currently only used by the | 170 | * processor cooling devices. Currently only used by the |
| 161 | * step-wise governor. | 171 | * step-wise governor. |
| 162 | * @ops: operations this &thermal_zone_device supports | 172 | * @ops: operations this &thermal_zone_device supports |
| 163 | * @tzp: thermal zone parameters | 173 | * @tzp: thermal zone parameters |
| 164 | * @governor: pointer to the governor for this thermal zone | 174 | * @governor: pointer to the governor for this thermal zone |
| 175 | * @governor_data: private pointer for governor data | ||
| 165 | * @thermal_instances: list of &struct thermal_instance of this thermal zone | 176 | * @thermal_instances: list of &struct thermal_instance of this thermal zone |
| 166 | * @idr: &struct idr to generate unique id for this zone's cooling | 177 | * @idr: &struct idr to generate unique id for this zone's cooling |
| 167 | * devices | 178 | * devices |
| @@ -186,8 +197,9 @@ struct thermal_zone_device { | |||
| 186 | int passive; | 197 | int passive; |
| 187 | unsigned int forced_passive; | 198 | unsigned int forced_passive; |
| 188 | struct thermal_zone_device_ops *ops; | 199 | struct thermal_zone_device_ops *ops; |
| 189 | const struct thermal_zone_params *tzp; | 200 | struct thermal_zone_params *tzp; |
| 190 | struct thermal_governor *governor; | 201 | struct thermal_governor *governor; |
| 202 | void *governor_data; | ||
| 191 | struct list_head thermal_instances; | 203 | struct list_head thermal_instances; |
| 192 | struct idr idr; | 204 | struct idr idr; |
| 193 | struct mutex lock; | 205 | struct mutex lock; |
| @@ -198,12 +210,19 @@ struct thermal_zone_device { | |||
| 198 | /** | 210 | /** |
| 199 | * struct thermal_governor - structure that holds thermal governor information | 211 | * struct thermal_governor - structure that holds thermal governor information |
| 200 | * @name: name of the governor | 212 | * @name: name of the governor |
| 213 | * @bind_to_tz: callback called when binding to a thermal zone. If it | ||
| 214 | * returns 0, the governor is bound to the thermal zone, | ||
| 215 | * otherwise it fails. | ||
| 216 | * @unbind_from_tz: callback called when a governor is unbound from a | ||
| 217 | * thermal zone. | ||
| 201 | * @throttle: callback called for every trip point even if temperature is | 218 | * @throttle: callback called for every trip point even if temperature is |
| 202 | * below the trip point temperature | 219 | * below the trip point temperature |
| 203 | * @governor_list: node in thermal_governor_list (in thermal_core.c) | 220 | * @governor_list: node in thermal_governor_list (in thermal_core.c) |
| 204 | */ | 221 | */ |
| 205 | struct thermal_governor { | 222 | struct thermal_governor { |
| 206 | char name[THERMAL_NAME_LENGTH]; | 223 | char name[THERMAL_NAME_LENGTH]; |
| 224 | int (*bind_to_tz)(struct thermal_zone_device *tz); | ||
| 225 | void (*unbind_from_tz)(struct thermal_zone_device *tz); | ||
| 207 | int (*throttle)(struct thermal_zone_device *tz, int trip); | 226 | int (*throttle)(struct thermal_zone_device *tz, int trip); |
| 208 | struct list_head governor_list; | 227 | struct list_head governor_list; |
| 209 | }; | 228 | }; |
| @@ -214,9 +233,12 @@ struct thermal_bind_params { | |||
| 214 | 233 | ||
| 215 | /* | 234 | /* |
| 216 | * This is a measure of 'how effectively these devices can | 235 | * This is a measure of 'how effectively these devices can |
| 217 | * cool 'this' thermal zone. The shall be determined by platform | 236 | * cool 'this' thermal zone. It shall be determined by |
| 218 | * characterization. This is on a 'percentage' scale. | 237 | * platform characterization. This value is relative to the |
| 219 | * See Documentation/thermal/sysfs-api.txt for more information. | 238 | * rest of the weights so a cooling device whose weight is |
| 239 | * double that of another cooling device is twice as | ||
| 240 | * effective. See Documentation/thermal/sysfs-api.txt for more | ||
| 241 | * information. | ||
| 220 | */ | 242 | */ |
| 221 | int weight; | 243 | int weight; |
| 222 | 244 | ||
| @@ -253,6 +275,44 @@ struct thermal_zone_params { | |||
| 253 | 275 | ||
| 254 | int num_tbps; /* Number of tbp entries */ | 276 | int num_tbps; /* Number of tbp entries */ |
| 255 | struct thermal_bind_params *tbp; | 277 | struct thermal_bind_params *tbp; |
| 278 | |||
| 279 | /* | ||
| 280 | * Sustainable power (heat) that this thermal zone can dissipate in | ||
| 281 | * mW | ||
| 282 | */ | ||
| 283 | u32 sustainable_power; | ||
| 284 | |||
| 285 | /* | ||
| 286 | * Proportional parameter of the PID controller when | ||
| 287 | * overshooting (i.e., when temperature is below the target) | ||
| 288 | */ | ||
| 289 | s32 k_po; | ||
| 290 | |||
| 291 | /* | ||
| 292 | * Proportional parameter of the PID controller when | ||
| 293 | * undershooting | ||
| 294 | */ | ||
| 295 | s32 k_pu; | ||
| 296 | |||
| 297 | /* Integral parameter of the PID controller */ | ||
| 298 | s32 k_i; | ||
| 299 | |||
| 300 | /* Derivative parameter of the PID controller */ | ||
| 301 | s32 k_d; | ||
| 302 | |||
| 303 | /* threshold below which the error is no longer accumulated */ | ||
| 304 | s32 integral_cutoff; | ||
| 305 | |||
| 306 | /* | ||
| 307 | * @slope: slope of a linear temperature adjustment curve. | ||
| 308 | * Used by thermal zone drivers. | ||
| 309 | */ | ||
| 310 | int slope; | ||
| 311 | /* | ||
| 312 | * @offset: offset of a linear temperature adjustment curve. | ||
| 313 | * Used by thermal zone drivers (default 0). | ||
| 314 | */ | ||
| 315 | int offset; | ||
| 256 | }; | 316 | }; |
| 257 | 317 | ||
| 258 | struct thermal_genl_event { | 318 | struct thermal_genl_event { |
| @@ -316,14 +376,25 @@ void thermal_zone_of_sensor_unregister(struct device *dev, | |||
| 316 | #endif | 376 | #endif |
| 317 | 377 | ||
| 318 | #if IS_ENABLED(CONFIG_THERMAL) | 378 | #if IS_ENABLED(CONFIG_THERMAL) |
| 379 | static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) | ||
| 380 | { | ||
| 381 | return cdev->ops->get_requested_power && cdev->ops->state2power && | ||
| 382 | cdev->ops->power2state; | ||
| 383 | } | ||
| 384 | |||
| 385 | int power_actor_get_max_power(struct thermal_cooling_device *, | ||
| 386 | struct thermal_zone_device *tz, u32 *max_power); | ||
| 387 | int power_actor_set_power(struct thermal_cooling_device *, | ||
| 388 | struct thermal_instance *, u32); | ||
| 319 | struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, | 389 | struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, |
| 320 | void *, struct thermal_zone_device_ops *, | 390 | void *, struct thermal_zone_device_ops *, |
| 321 | const struct thermal_zone_params *, int, int); | 391 | struct thermal_zone_params *, int, int); |
| 322 | void thermal_zone_device_unregister(struct thermal_zone_device *); | 392 | void thermal_zone_device_unregister(struct thermal_zone_device *); |
| 323 | 393 | ||
| 324 | int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, | 394 | int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, |
| 325 | struct thermal_cooling_device *, | 395 | struct thermal_cooling_device *, |
| 326 | unsigned long, unsigned long); | 396 | unsigned long, unsigned long, |
| 397 | unsigned int); | ||
| 327 | int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, | 398 | int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, |
| 328 | struct thermal_cooling_device *); | 399 | struct thermal_cooling_device *); |
| 329 | void thermal_zone_device_update(struct thermal_zone_device *); | 400 | void thermal_zone_device_update(struct thermal_zone_device *); |
| @@ -343,6 +414,14 @@ struct thermal_instance *get_thermal_instance(struct thermal_zone_device *, | |||
| 343 | void thermal_cdev_update(struct thermal_cooling_device *); | 414 | void thermal_cdev_update(struct thermal_cooling_device *); |
| 344 | void thermal_notify_framework(struct thermal_zone_device *, int); | 415 | void thermal_notify_framework(struct thermal_zone_device *, int); |
| 345 | #else | 416 | #else |
| 417 | static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) | ||
| 418 | { return false; } | ||
| 419 | static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, | ||
| 420 | struct thermal_zone_device *tz, u32 *max_power) | ||
| 421 | { return 0; } | ||
| 422 | static inline int power_actor_set_power(struct thermal_cooling_device *cdev, | ||
| 423 | struct thermal_instance *tz, u32 power) | ||
| 424 | { return 0; } | ||
| 346 | static inline struct thermal_zone_device *thermal_zone_device_register( | 425 | static inline struct thermal_zone_device *thermal_zone_device_register( |
| 347 | const char *type, int trips, int mask, void *devdata, | 426 | const char *type, int trips, int mask, void *devdata, |
| 348 | struct thermal_zone_device_ops *ops, | 427 | struct thermal_zone_device_ops *ops, |
diff --git a/include/linux/tick.h b/include/linux/tick.h index 4191b5623a28..edbfc9a5293e 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
| @@ -13,8 +13,6 @@ | |||
| 13 | 13 | ||
| 14 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | 14 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
| 15 | extern void __init tick_init(void); | 15 | extern void __init tick_init(void); |
| 16 | extern void tick_freeze(void); | ||
| 17 | extern void tick_unfreeze(void); | ||
| 18 | /* Should be core only, but ARM BL switcher requires it */ | 16 | /* Should be core only, but ARM BL switcher requires it */ |
| 19 | extern void tick_suspend_local(void); | 17 | extern void tick_suspend_local(void); |
| 20 | /* Should be core only, but XEN resume magic and ARM BL switcher require it */ | 18 | /* Should be core only, but XEN resume magic and ARM BL switcher require it */ |
| @@ -23,14 +21,20 @@ extern void tick_handover_do_timer(void); | |||
| 23 | extern void tick_cleanup_dead_cpu(int cpu); | 21 | extern void tick_cleanup_dead_cpu(int cpu); |
| 24 | #else /* CONFIG_GENERIC_CLOCKEVENTS */ | 22 | #else /* CONFIG_GENERIC_CLOCKEVENTS */ |
| 25 | static inline void tick_init(void) { } | 23 | static inline void tick_init(void) { } |
| 26 | static inline void tick_freeze(void) { } | ||
| 27 | static inline void tick_unfreeze(void) { } | ||
| 28 | static inline void tick_suspend_local(void) { } | 24 | static inline void tick_suspend_local(void) { } |
| 29 | static inline void tick_resume_local(void) { } | 25 | static inline void tick_resume_local(void) { } |
| 30 | static inline void tick_handover_do_timer(void) { } | 26 | static inline void tick_handover_do_timer(void) { } |
| 31 | static inline void tick_cleanup_dead_cpu(int cpu) { } | 27 | static inline void tick_cleanup_dead_cpu(int cpu) { } |
| 32 | #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ | 28 | #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ |
| 33 | 29 | ||
| 30 | #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND) | ||
| 31 | extern void tick_freeze(void); | ||
| 32 | extern void tick_unfreeze(void); | ||
| 33 | #else | ||
| 34 | static inline void tick_freeze(void) { } | ||
| 35 | static inline void tick_unfreeze(void) { } | ||
| 36 | #endif | ||
| 37 | |||
| 34 | #ifdef CONFIG_TICK_ONESHOT | 38 | #ifdef CONFIG_TICK_ONESHOT |
| 35 | extern void tick_irq_enter(void); | 39 | extern void tick_irq_enter(void); |
| 36 | # ifndef arch_needs_cpu | 40 | # ifndef arch_needs_cpu |
| @@ -63,10 +67,13 @@ extern void tick_broadcast_control(enum tick_broadcast_mode mode); | |||
| 63 | static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } | 67 | static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } |
| 64 | #endif /* BROADCAST */ | 68 | #endif /* BROADCAST */ |
| 65 | 69 | ||
| 66 | #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) | 70 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
| 67 | extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); | 71 | extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); |
| 68 | #else | 72 | #else |
| 69 | static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) { return 0; } | 73 | static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) |
| 74 | { | ||
| 75 | return 0; | ||
| 76 | } | ||
| 70 | #endif | 77 | #endif |
| 71 | 78 | ||
| 72 | static inline void tick_broadcast_enable(void) | 79 | static inline void tick_broadcast_enable(void) |
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 3aa72e648650..6e191e4e6ab6 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h | |||
| @@ -145,7 +145,6 @@ static inline void getboottime(struct timespec *ts) | |||
| 145 | } | 145 | } |
| 146 | #endif | 146 | #endif |
| 147 | 147 | ||
| 148 | #define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) | ||
| 149 | #define ktime_get_real_ts64(ts) getnstimeofday64(ts) | 148 | #define ktime_get_real_ts64(ts) getnstimeofday64(ts) |
| 150 | 149 | ||
| 151 | /* | 150 | /* |
diff --git a/include/linux/ftrace_event.h b/include/linux/trace_events.h index f9ecf63d47f1..1063c850dbab 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/trace_events.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | 1 | ||
| 2 | #ifndef _LINUX_FTRACE_EVENT_H | 2 | #ifndef _LINUX_TRACE_EVENT_H |
| 3 | #define _LINUX_FTRACE_EVENT_H | 3 | #define _LINUX_TRACE_EVENT_H |
| 4 | 4 | ||
| 5 | #include <linux/ring_buffer.h> | 5 | #include <linux/ring_buffer.h> |
| 6 | #include <linux/trace_seq.h> | 6 | #include <linux/trace_seq.h> |
| @@ -25,35 +25,35 @@ struct trace_print_flags_u64 { | |||
| 25 | const char *name; | 25 | const char *name; |
| 26 | }; | 26 | }; |
| 27 | 27 | ||
| 28 | const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, | 28 | const char *trace_print_flags_seq(struct trace_seq *p, const char *delim, |
| 29 | unsigned long flags, | 29 | unsigned long flags, |
| 30 | const struct trace_print_flags *flag_array); | 30 | const struct trace_print_flags *flag_array); |
| 31 | 31 | ||
| 32 | const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | 32 | const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val, |
| 33 | const struct trace_print_flags *symbol_array); | 33 | const struct trace_print_flags *symbol_array); |
| 34 | 34 | ||
| 35 | #if BITS_PER_LONG == 32 | 35 | #if BITS_PER_LONG == 32 |
| 36 | const char *ftrace_print_symbols_seq_u64(struct trace_seq *p, | 36 | const char *trace_print_symbols_seq_u64(struct trace_seq *p, |
| 37 | unsigned long long val, | 37 | unsigned long long val, |
| 38 | const struct trace_print_flags_u64 | 38 | const struct trace_print_flags_u64 |
| 39 | *symbol_array); | 39 | *symbol_array); |
| 40 | #endif | 40 | #endif |
| 41 | 41 | ||
| 42 | const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, | 42 | const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, |
| 43 | unsigned int bitmask_size); | 43 | unsigned int bitmask_size); |
| 44 | 44 | ||
| 45 | const char *ftrace_print_hex_seq(struct trace_seq *p, | 45 | const char *trace_print_hex_seq(struct trace_seq *p, |
| 46 | const unsigned char *buf, int len); | 46 | const unsigned char *buf, int len); |
| 47 | 47 | ||
| 48 | const char *ftrace_print_array_seq(struct trace_seq *p, | 48 | const char *trace_print_array_seq(struct trace_seq *p, |
| 49 | const void *buf, int count, | 49 | const void *buf, int count, |
| 50 | size_t el_size); | 50 | size_t el_size); |
| 51 | 51 | ||
| 52 | struct trace_iterator; | 52 | struct trace_iterator; |
| 53 | struct trace_event; | 53 | struct trace_event; |
| 54 | 54 | ||
| 55 | int ftrace_raw_output_prep(struct trace_iterator *iter, | 55 | int trace_raw_output_prep(struct trace_iterator *iter, |
| 56 | struct trace_event *event); | 56 | struct trace_event *event); |
| 57 | 57 | ||
| 58 | /* | 58 | /* |
| 59 | * The trace entry - the most basic unit of tracing. This is what | 59 | * The trace entry - the most basic unit of tracing. This is what |
| @@ -68,7 +68,7 @@ struct trace_entry { | |||
| 68 | int pid; | 68 | int pid; |
| 69 | }; | 69 | }; |
| 70 | 70 | ||
| 71 | #define FTRACE_MAX_EVENT \ | 71 | #define TRACE_EVENT_TYPE_MAX \ |
| 72 | ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) | 72 | ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) |
| 73 | 73 | ||
| 74 | /* | 74 | /* |
| @@ -132,8 +132,8 @@ struct trace_event { | |||
| 132 | struct trace_event_functions *funcs; | 132 | struct trace_event_functions *funcs; |
| 133 | }; | 133 | }; |
| 134 | 134 | ||
| 135 | extern int register_ftrace_event(struct trace_event *event); | 135 | extern int register_trace_event(struct trace_event *event); |
| 136 | extern int unregister_ftrace_event(struct trace_event *event); | 136 | extern int unregister_trace_event(struct trace_event *event); |
| 137 | 137 | ||
| 138 | /* Return values for print_line callback */ | 138 | /* Return values for print_line callback */ |
| 139 | enum print_line_t { | 139 | enum print_line_t { |
| @@ -157,11 +157,11 @@ static inline enum print_line_t trace_handle_return(struct trace_seq *s) | |||
| 157 | void tracing_generic_entry_update(struct trace_entry *entry, | 157 | void tracing_generic_entry_update(struct trace_entry *entry, |
| 158 | unsigned long flags, | 158 | unsigned long flags, |
| 159 | int pc); | 159 | int pc); |
| 160 | struct ftrace_event_file; | 160 | struct trace_event_file; |
| 161 | 161 | ||
| 162 | struct ring_buffer_event * | 162 | struct ring_buffer_event * |
| 163 | trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, | 163 | trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, |
| 164 | struct ftrace_event_file *ftrace_file, | 164 | struct trace_event_file *trace_file, |
| 165 | int type, unsigned long len, | 165 | int type, unsigned long len, |
| 166 | unsigned long flags, int pc); | 166 | unsigned long flags, int pc); |
| 167 | struct ring_buffer_event * | 167 | struct ring_buffer_event * |
| @@ -183,7 +183,7 @@ void trace_current_buffer_discard_commit(struct ring_buffer *buffer, | |||
| 183 | 183 | ||
| 184 | void tracing_record_cmdline(struct task_struct *tsk); | 184 | void tracing_record_cmdline(struct task_struct *tsk); |
| 185 | 185 | ||
| 186 | int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...); | 186 | int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...); |
| 187 | 187 | ||
| 188 | struct event_filter; | 188 | struct event_filter; |
| 189 | 189 | ||
| @@ -200,50 +200,39 @@ enum trace_reg { | |||
| 200 | #endif | 200 | #endif |
| 201 | }; | 201 | }; |
| 202 | 202 | ||
| 203 | struct ftrace_event_call; | 203 | struct trace_event_call; |
| 204 | 204 | ||
| 205 | struct ftrace_event_class { | 205 | struct trace_event_class { |
| 206 | const char *system; | 206 | const char *system; |
| 207 | void *probe; | 207 | void *probe; |
| 208 | #ifdef CONFIG_PERF_EVENTS | 208 | #ifdef CONFIG_PERF_EVENTS |
| 209 | void *perf_probe; | 209 | void *perf_probe; |
| 210 | #endif | 210 | #endif |
| 211 | int (*reg)(struct ftrace_event_call *event, | 211 | int (*reg)(struct trace_event_call *event, |
| 212 | enum trace_reg type, void *data); | 212 | enum trace_reg type, void *data); |
| 213 | int (*define_fields)(struct ftrace_event_call *); | 213 | int (*define_fields)(struct trace_event_call *); |
| 214 | struct list_head *(*get_fields)(struct ftrace_event_call *); | 214 | struct list_head *(*get_fields)(struct trace_event_call *); |
| 215 | struct list_head fields; | 215 | struct list_head fields; |
| 216 | int (*raw_init)(struct ftrace_event_call *); | 216 | int (*raw_init)(struct trace_event_call *); |
| 217 | }; | 217 | }; |
| 218 | 218 | ||
| 219 | extern int ftrace_event_reg(struct ftrace_event_call *event, | 219 | extern int trace_event_reg(struct trace_event_call *event, |
| 220 | enum trace_reg type, void *data); | 220 | enum trace_reg type, void *data); |
| 221 | 221 | ||
| 222 | int ftrace_output_event(struct trace_iterator *iter, struct ftrace_event_call *event, | 222 | struct trace_event_buffer { |
| 223 | char *fmt, ...); | ||
| 224 | |||
| 225 | int ftrace_event_define_field(struct ftrace_event_call *call, | ||
| 226 | char *type, int len, char *item, int offset, | ||
| 227 | int field_size, int sign, int filter); | ||
| 228 | |||
| 229 | struct ftrace_event_buffer { | ||
| 230 | struct ring_buffer *buffer; | 223 | struct ring_buffer *buffer; |
| 231 | struct ring_buffer_event *event; | 224 | struct ring_buffer_event *event; |
| 232 | struct ftrace_event_file *ftrace_file; | 225 | struct trace_event_file *trace_file; |
| 233 | void *entry; | 226 | void *entry; |
| 234 | unsigned long flags; | 227 | unsigned long flags; |
| 235 | int pc; | 228 | int pc; |
| 236 | }; | 229 | }; |
| 237 | 230 | ||
| 238 | void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, | 231 | void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, |
| 239 | struct ftrace_event_file *ftrace_file, | 232 | struct trace_event_file *trace_file, |
| 240 | unsigned long len); | 233 | unsigned long len); |
| 241 | 234 | ||
| 242 | void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer); | 235 | void trace_event_buffer_commit(struct trace_event_buffer *fbuffer); |
| 243 | |||
| 244 | int ftrace_event_define_field(struct ftrace_event_call *call, | ||
| 245 | char *type, int len, char *item, int offset, | ||
| 246 | int field_size, int sign, int filter); | ||
| 247 | 236 | ||
| 248 | enum { | 237 | enum { |
| 249 | TRACE_EVENT_FL_FILTERED_BIT, | 238 | TRACE_EVENT_FL_FILTERED_BIT, |
| @@ -261,11 +250,11 @@ enum { | |||
| 261 | * FILTERED - The event has a filter attached | 250 | * FILTERED - The event has a filter attached |
| 262 | * CAP_ANY - Any user can enable for perf | 251 | * CAP_ANY - Any user can enable for perf |
| 263 | * NO_SET_FILTER - Set when filter has error and is to be ignored | 252 | * NO_SET_FILTER - Set when filter has error and is to be ignored |
| 264 | * IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file | 253 | * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file |
| 265 | * WAS_ENABLED - Set and stays set when an event was ever enabled | 254 | * WAS_ENABLED - Set and stays set when an event was ever enabled |
| 266 | * (used for module unloading, if a module event is enabled, | 255 | * (used for module unloading, if a module event is enabled, |
| 267 | * it is best to clear the buffers that used it). | 256 | * it is best to clear the buffers that used it). |
| 268 | * USE_CALL_FILTER - For ftrace internal events, don't use file filter | 257 | * USE_CALL_FILTER - For trace internal events, don't use file filter |
| 269 | * TRACEPOINT - Event is a tracepoint | 258 | * TRACEPOINT - Event is a tracepoint |
| 270 | * KPROBE - Event is a kprobe | 259 | * KPROBE - Event is a kprobe |
| 271 | */ | 260 | */ |
| @@ -280,9 +269,9 @@ enum { | |||
| 280 | TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), | 269 | TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), |
| 281 | }; | 270 | }; |
| 282 | 271 | ||
| 283 | struct ftrace_event_call { | 272 | struct trace_event_call { |
| 284 | struct list_head list; | 273 | struct list_head list; |
| 285 | struct ftrace_event_class *class; | 274 | struct trace_event_class *class; |
| 286 | union { | 275 | union { |
| 287 | char *name; | 276 | char *name; |
| 288 | /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ | 277 | /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ |
| @@ -297,7 +286,7 @@ struct ftrace_event_call { | |||
| 297 | * bit 0: filter_active | 286 | * bit 0: filter_active |
| 298 | * bit 1: allow trace by non root (cap any) | 287 | * bit 1: allow trace by non root (cap any) |
| 299 | * bit 2: failed to apply filter | 288 | * bit 2: failed to apply filter |
| 300 | * bit 3: ftrace internal event (do not enable) | 289 | * bit 3: trace internal event (do not enable) |
| 301 | * bit 4: Event was enabled by module | 290 | * bit 4: Event was enabled by module |
| 302 | * bit 5: use call filter rather than file filter | 291 | * bit 5: use call filter rather than file filter |
| 303 | * bit 6: Event is a tracepoint | 292 | * bit 6: Event is a tracepoint |
| @@ -309,13 +298,13 @@ struct ftrace_event_call { | |||
| 309 | struct hlist_head __percpu *perf_events; | 298 | struct hlist_head __percpu *perf_events; |
| 310 | struct bpf_prog *prog; | 299 | struct bpf_prog *prog; |
| 311 | 300 | ||
| 312 | int (*perf_perm)(struct ftrace_event_call *, | 301 | int (*perf_perm)(struct trace_event_call *, |
| 313 | struct perf_event *); | 302 | struct perf_event *); |
| 314 | #endif | 303 | #endif |
| 315 | }; | 304 | }; |
| 316 | 305 | ||
| 317 | static inline const char * | 306 | static inline const char * |
| 318 | ftrace_event_name(struct ftrace_event_call *call) | 307 | trace_event_name(struct trace_event_call *call) |
| 319 | { | 308 | { |
| 320 | if (call->flags & TRACE_EVENT_FL_TRACEPOINT) | 309 | if (call->flags & TRACE_EVENT_FL_TRACEPOINT) |
| 321 | return call->tp ? call->tp->name : NULL; | 310 | return call->tp ? call->tp->name : NULL; |
| @@ -324,21 +313,21 @@ ftrace_event_name(struct ftrace_event_call *call) | |||
| 324 | } | 313 | } |
| 325 | 314 | ||
| 326 | struct trace_array; | 315 | struct trace_array; |
| 327 | struct ftrace_subsystem_dir; | 316 | struct trace_subsystem_dir; |
| 328 | 317 | ||
| 329 | enum { | 318 | enum { |
| 330 | FTRACE_EVENT_FL_ENABLED_BIT, | 319 | EVENT_FILE_FL_ENABLED_BIT, |
| 331 | FTRACE_EVENT_FL_RECORDED_CMD_BIT, | 320 | EVENT_FILE_FL_RECORDED_CMD_BIT, |
| 332 | FTRACE_EVENT_FL_FILTERED_BIT, | 321 | EVENT_FILE_FL_FILTERED_BIT, |
| 333 | FTRACE_EVENT_FL_NO_SET_FILTER_BIT, | 322 | EVENT_FILE_FL_NO_SET_FILTER_BIT, |
| 334 | FTRACE_EVENT_FL_SOFT_MODE_BIT, | 323 | EVENT_FILE_FL_SOFT_MODE_BIT, |
| 335 | FTRACE_EVENT_FL_SOFT_DISABLED_BIT, | 324 | EVENT_FILE_FL_SOFT_DISABLED_BIT, |
| 336 | FTRACE_EVENT_FL_TRIGGER_MODE_BIT, | 325 | EVENT_FILE_FL_TRIGGER_MODE_BIT, |
| 337 | FTRACE_EVENT_FL_TRIGGER_COND_BIT, | 326 | EVENT_FILE_FL_TRIGGER_COND_BIT, |
| 338 | }; | 327 | }; |
| 339 | 328 | ||
| 340 | /* | 329 | /* |
| 341 | * Ftrace event file flags: | 330 | * Event file flags: |
| 342 | * ENABLED - The event is enabled | 331 | * ENABLED - The event is enabled |
| 343 | * RECORDED_CMD - The comms should be recorded at sched_switch | 332 | * RECORDED_CMD - The comms should be recorded at sched_switch |
| 344 | * FILTERED - The event has a filter attached | 333 | * FILTERED - The event has a filter attached |
| @@ -350,23 +339,23 @@ enum { | |||
| 350 | * TRIGGER_COND - When set, one or more triggers has an associated filter | 339 | * TRIGGER_COND - When set, one or more triggers has an associated filter |
| 351 | */ | 340 | */ |
| 352 | enum { | 341 | enum { |
| 353 | FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), | 342 | EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), |
| 354 | FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT), | 343 | EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT), |
| 355 | FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT), | 344 | EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT), |
| 356 | FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT), | 345 | EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT), |
| 357 | FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), | 346 | EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT), |
| 358 | FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), | 347 | EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT), |
| 359 | FTRACE_EVENT_FL_TRIGGER_MODE = (1 << FTRACE_EVENT_FL_TRIGGER_MODE_BIT), | 348 | EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), |
| 360 | FTRACE_EVENT_FL_TRIGGER_COND = (1 << FTRACE_EVENT_FL_TRIGGER_COND_BIT), | 349 | EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), |
| 361 | }; | 350 | }; |
| 362 | 351 | ||
| 363 | struct ftrace_event_file { | 352 | struct trace_event_file { |
| 364 | struct list_head list; | 353 | struct list_head list; |
| 365 | struct ftrace_event_call *event_call; | 354 | struct trace_event_call *event_call; |
| 366 | struct event_filter *filter; | 355 | struct event_filter *filter; |
| 367 | struct dentry *dir; | 356 | struct dentry *dir; |
| 368 | struct trace_array *tr; | 357 | struct trace_array *tr; |
| 369 | struct ftrace_subsystem_dir *system; | 358 | struct trace_subsystem_dir *system; |
| 370 | struct list_head triggers; | 359 | struct list_head triggers; |
| 371 | 360 | ||
| 372 | /* | 361 | /* |
| @@ -399,7 +388,7 @@ struct ftrace_event_file { | |||
| 399 | early_initcall(trace_init_flags_##name); | 388 | early_initcall(trace_init_flags_##name); |
| 400 | 389 | ||
| 401 | #define __TRACE_EVENT_PERF_PERM(name, expr...) \ | 390 | #define __TRACE_EVENT_PERF_PERM(name, expr...) \ |
| 402 | static int perf_perm_##name(struct ftrace_event_call *tp_event, \ | 391 | static int perf_perm_##name(struct trace_event_call *tp_event, \ |
| 403 | struct perf_event *p_event) \ | 392 | struct perf_event *p_event) \ |
| 404 | { \ | 393 | { \ |
| 405 | return ({ expr; }); \ | 394 | return ({ expr; }); \ |
| @@ -425,19 +414,19 @@ enum event_trigger_type { | |||
| 425 | 414 | ||
| 426 | extern int filter_match_preds(struct event_filter *filter, void *rec); | 415 | extern int filter_match_preds(struct event_filter *filter, void *rec); |
| 427 | 416 | ||
| 428 | extern int filter_check_discard(struct ftrace_event_file *file, void *rec, | 417 | extern int filter_check_discard(struct trace_event_file *file, void *rec, |
| 429 | struct ring_buffer *buffer, | 418 | struct ring_buffer *buffer, |
| 430 | struct ring_buffer_event *event); | 419 | struct ring_buffer_event *event); |
| 431 | extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec, | 420 | extern int call_filter_check_discard(struct trace_event_call *call, void *rec, |
| 432 | struct ring_buffer *buffer, | 421 | struct ring_buffer *buffer, |
| 433 | struct ring_buffer_event *event); | 422 | struct ring_buffer_event *event); |
| 434 | extern enum event_trigger_type event_triggers_call(struct ftrace_event_file *file, | 423 | extern enum event_trigger_type event_triggers_call(struct trace_event_file *file, |
| 435 | void *rec); | 424 | void *rec); |
| 436 | extern void event_triggers_post_call(struct ftrace_event_file *file, | 425 | extern void event_triggers_post_call(struct trace_event_file *file, |
| 437 | enum event_trigger_type tt); | 426 | enum event_trigger_type tt); |
| 438 | 427 | ||
| 439 | /** | 428 | /** |
| 440 | * ftrace_trigger_soft_disabled - do triggers and test if soft disabled | 429 | * trace_trigger_soft_disabled - do triggers and test if soft disabled |
| 441 | * @file: The file pointer of the event to test | 430 | * @file: The file pointer of the event to test |
| 442 | * | 431 | * |
| 443 | * If any triggers without filters are attached to this event, they | 432 | * If any triggers without filters are attached to this event, they |
| @@ -446,14 +435,14 @@ extern void event_triggers_post_call(struct ftrace_event_file *file, | |||
| 446 | * otherwise false. | 435 | * otherwise false. |
| 447 | */ | 436 | */ |
| 448 | static inline bool | 437 | static inline bool |
| 449 | ftrace_trigger_soft_disabled(struct ftrace_event_file *file) | 438 | trace_trigger_soft_disabled(struct trace_event_file *file) |
| 450 | { | 439 | { |
| 451 | unsigned long eflags = file->flags; | 440 | unsigned long eflags = file->flags; |
| 452 | 441 | ||
| 453 | if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { | 442 | if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { |
| 454 | if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) | 443 | if (eflags & EVENT_FILE_FL_TRIGGER_MODE) |
| 455 | event_triggers_call(file, NULL); | 444 | event_triggers_call(file, NULL); |
| 456 | if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) | 445 | if (eflags & EVENT_FILE_FL_SOFT_DISABLED) |
| 457 | return true; | 446 | return true; |
| 458 | } | 447 | } |
| 459 | return false; | 448 | return false; |
| @@ -473,7 +462,7 @@ ftrace_trigger_soft_disabled(struct ftrace_event_file *file) | |||
| 473 | * Returns true if the event is discarded, false otherwise. | 462 | * Returns true if the event is discarded, false otherwise. |
| 474 | */ | 463 | */ |
| 475 | static inline bool | 464 | static inline bool |
| 476 | __event_trigger_test_discard(struct ftrace_event_file *file, | 465 | __event_trigger_test_discard(struct trace_event_file *file, |
| 477 | struct ring_buffer *buffer, | 466 | struct ring_buffer *buffer, |
| 478 | struct ring_buffer_event *event, | 467 | struct ring_buffer_event *event, |
| 479 | void *entry, | 468 | void *entry, |
| @@ -481,10 +470,10 @@ __event_trigger_test_discard(struct ftrace_event_file *file, | |||
| 481 | { | 470 | { |
| 482 | unsigned long eflags = file->flags; | 471 | unsigned long eflags = file->flags; |
| 483 | 472 | ||
| 484 | if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) | 473 | if (eflags & EVENT_FILE_FL_TRIGGER_COND) |
| 485 | *tt = event_triggers_call(file, entry); | 474 | *tt = event_triggers_call(file, entry); |
| 486 | 475 | ||
| 487 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags)) | 476 | if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags)) |
| 488 | ring_buffer_discard_commit(buffer, event); | 477 | ring_buffer_discard_commit(buffer, event); |
| 489 | else if (!filter_check_discard(file, entry, buffer, event)) | 478 | else if (!filter_check_discard(file, entry, buffer, event)) |
| 490 | return false; | 479 | return false; |
| @@ -506,7 +495,7 @@ __event_trigger_test_discard(struct ftrace_event_file *file, | |||
| 506 | * if the event is soft disabled and should be discarded. | 495 | * if the event is soft disabled and should be discarded. |
| 507 | */ | 496 | */ |
| 508 | static inline void | 497 | static inline void |
| 509 | event_trigger_unlock_commit(struct ftrace_event_file *file, | 498 | event_trigger_unlock_commit(struct trace_event_file *file, |
| 510 | struct ring_buffer *buffer, | 499 | struct ring_buffer *buffer, |
| 511 | struct ring_buffer_event *event, | 500 | struct ring_buffer_event *event, |
| 512 | void *entry, unsigned long irq_flags, int pc) | 501 | void *entry, unsigned long irq_flags, int pc) |
| @@ -537,7 +526,7 @@ event_trigger_unlock_commit(struct ftrace_event_file *file, | |||
| 537 | * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). | 526 | * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). |
| 538 | */ | 527 | */ |
| 539 | static inline void | 528 | static inline void |
| 540 | event_trigger_unlock_commit_regs(struct ftrace_event_file *file, | 529 | event_trigger_unlock_commit_regs(struct trace_event_file *file, |
| 541 | struct ring_buffer *buffer, | 530 | struct ring_buffer *buffer, |
| 542 | struct ring_buffer_event *event, | 531 | struct ring_buffer_event *event, |
| 543 | void *entry, unsigned long irq_flags, int pc, | 532 | void *entry, unsigned long irq_flags, int pc, |
| @@ -570,12 +559,12 @@ enum { | |||
| 570 | FILTER_TRACE_FN, | 559 | FILTER_TRACE_FN, |
| 571 | }; | 560 | }; |
| 572 | 561 | ||
| 573 | extern int trace_event_raw_init(struct ftrace_event_call *call); | 562 | extern int trace_event_raw_init(struct trace_event_call *call); |
| 574 | extern int trace_define_field(struct ftrace_event_call *call, const char *type, | 563 | extern int trace_define_field(struct trace_event_call *call, const char *type, |
| 575 | const char *name, int offset, int size, | 564 | const char *name, int offset, int size, |
| 576 | int is_signed, int filter_type); | 565 | int is_signed, int filter_type); |
| 577 | extern int trace_add_event_call(struct ftrace_event_call *call); | 566 | extern int trace_add_event_call(struct trace_event_call *call); |
| 578 | extern int trace_remove_event_call(struct ftrace_event_call *call); | 567 | extern int trace_remove_event_call(struct trace_event_call *call); |
| 579 | 568 | ||
| 580 | #define is_signed_type(type) (((type)(-1)) < (type)1) | 569 | #define is_signed_type(type) (((type)(-1)) < (type)1) |
| 581 | 570 | ||
| @@ -624,4 +613,4 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, | |||
| 624 | } | 613 | } |
| 625 | #endif | 614 | #endif |
| 626 | 615 | ||
| 627 | #endif /* _LINUX_FTRACE_EVENT_H */ | 616 | #endif /* _LINUX_TRACE_EVENT_H */ |
diff --git a/include/linux/tty.h b/include/linux/tty.h index d76631f615c2..ad6c8913aa3e 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
| @@ -422,7 +422,7 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) | |||
| 422 | 422 | ||
| 423 | extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, | 423 | extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, |
| 424 | const char *routine); | 424 | const char *routine); |
| 425 | extern char *tty_name(struct tty_struct *tty, char *buf); | 425 | extern const char *tty_name(const struct tty_struct *tty); |
| 426 | extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); | 426 | extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); |
| 427 | extern int tty_check_change(struct tty_struct *tty); | 427 | extern int tty_check_change(struct tty_struct *tty); |
| 428 | extern void __stop_tty(struct tty_struct *tty); | 428 | extern void __stop_tty(struct tty_struct *tty); |
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index 4b4439e75f45..df89c9bcba7d 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h | |||
| @@ -68,11 +68,12 @@ struct u64_stats_sync { | |||
| 68 | }; | 68 | }; |
| 69 | 69 | ||
| 70 | 70 | ||
| 71 | static inline void u64_stats_init(struct u64_stats_sync *syncp) | ||
| 72 | { | ||
| 71 | #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) | 73 | #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) |
| 72 | # define u64_stats_init(syncp) seqcount_init(syncp.seq) | 74 | seqcount_init(&syncp->seq); |
| 73 | #else | ||
| 74 | # define u64_stats_init(syncp) do { } while (0) | ||
| 75 | #endif | 75 | #endif |
| 76 | } | ||
| 76 | 77 | ||
| 77 | static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) | 78 | static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) |
| 78 | { | 79 | { |
diff --git a/include/linux/ulpi/driver.h b/include/linux/ulpi/driver.h new file mode 100644 index 000000000000..388f6e08b9d4 --- /dev/null +++ b/include/linux/ulpi/driver.h | |||
| @@ -0,0 +1,60 @@ | |||
| 1 | #ifndef __LINUX_ULPI_DRIVER_H | ||
| 2 | #define __LINUX_ULPI_DRIVER_H | ||
| 3 | |||
| 4 | #include <linux/mod_devicetable.h> | ||
| 5 | |||
| 6 | #include <linux/device.h> | ||
| 7 | |||
| 8 | struct ulpi_ops; | ||
| 9 | |||
| 10 | /** | ||
| 11 | * struct ulpi - describes ULPI PHY device | ||
| 12 | * @id: vendor and product ids for ULPI device | ||
| 13 | * @ops: I/O access | ||
| 14 | * @dev: device interface | ||
| 15 | */ | ||
| 16 | struct ulpi { | ||
| 17 | struct ulpi_device_id id; | ||
| 18 | struct ulpi_ops *ops; | ||
| 19 | struct device dev; | ||
| 20 | }; | ||
| 21 | |||
| 22 | #define to_ulpi_dev(d) container_of(d, struct ulpi, dev) | ||
| 23 | |||
| 24 | static inline void ulpi_set_drvdata(struct ulpi *ulpi, void *data) | ||
| 25 | { | ||
| 26 | dev_set_drvdata(&ulpi->dev, data); | ||
| 27 | } | ||
| 28 | |||
| 29 | static inline void *ulpi_get_drvdata(struct ulpi *ulpi) | ||
| 30 | { | ||
| 31 | return dev_get_drvdata(&ulpi->dev); | ||
| 32 | } | ||
| 33 | |||
| 34 | /** | ||
| 35 | * struct ulpi_driver - describes a ULPI PHY driver | ||
| 36 | * @id_table: array of device identifiers supported by this driver | ||
| 37 | * @probe: binds this driver to ULPI device | ||
| 38 | * @remove: unbinds this driver from ULPI device | ||
| 39 | * @driver: the name and owner members must be initialized by the drivers | ||
| 40 | */ | ||
| 41 | struct ulpi_driver { | ||
| 42 | const struct ulpi_device_id *id_table; | ||
| 43 | int (*probe)(struct ulpi *ulpi); | ||
| 44 | void (*remove)(struct ulpi *ulpi); | ||
| 45 | struct device_driver driver; | ||
| 46 | }; | ||
| 47 | |||
| 48 | #define to_ulpi_driver(d) container_of(d, struct ulpi_driver, driver) | ||
| 49 | |||
| 50 | int ulpi_register_driver(struct ulpi_driver *drv); | ||
| 51 | void ulpi_unregister_driver(struct ulpi_driver *drv); | ||
| 52 | |||
| 53 | #define module_ulpi_driver(__ulpi_driver) \ | ||
| 54 | module_driver(__ulpi_driver, ulpi_register_driver, \ | ||
| 55 | ulpi_unregister_driver) | ||
| 56 | |||
| 57 | int ulpi_read(struct ulpi *ulpi, u8 addr); | ||
| 58 | int ulpi_write(struct ulpi *ulpi, u8 addr, u8 val); | ||
| 59 | |||
| 60 | #endif /* __LINUX_ULPI_DRIVER_H */ | ||
diff --git a/include/linux/ulpi/interface.h b/include/linux/ulpi/interface.h new file mode 100644 index 000000000000..4de8ab491038 --- /dev/null +++ b/include/linux/ulpi/interface.h | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | #ifndef __LINUX_ULPI_INTERFACE_H | ||
| 2 | #define __LINUX_ULPI_INTERFACE_H | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | |||
| 6 | struct ulpi; | ||
| 7 | |||
| 8 | /** | ||
| 9 | * struct ulpi_ops - ULPI register access | ||
| 10 | * @dev: the interface provider | ||
| 11 | * @read: read operation for ULPI register access | ||
| 12 | * @write: write operation for ULPI register access | ||
| 13 | */ | ||
| 14 | struct ulpi_ops { | ||
| 15 | struct device *dev; | ||
| 16 | int (*read)(struct ulpi_ops *ops, u8 addr); | ||
| 17 | int (*write)(struct ulpi_ops *ops, u8 addr, u8 val); | ||
| 18 | }; | ||
| 19 | |||
| 20 | struct ulpi *ulpi_register_interface(struct device *, struct ulpi_ops *); | ||
| 21 | void ulpi_unregister_interface(struct ulpi *); | ||
| 22 | |||
| 23 | #endif /* __LINUX_ULPI_INTERFACE_H */ | ||
diff --git a/include/linux/ulpi/regs.h b/include/linux/ulpi/regs.h new file mode 100644 index 000000000000..b5b8b8804560 --- /dev/null +++ b/include/linux/ulpi/regs.h | |||
| @@ -0,0 +1,130 @@ | |||
| 1 | #ifndef __LINUX_ULPI_REGS_H | ||
| 2 | #define __LINUX_ULPI_REGS_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Macros for Set and Clear | ||
| 6 | * See ULPI 1.1 specification to find the registers with Set and Clear offsets | ||
| 7 | */ | ||
| 8 | #define ULPI_SET(a) (a + 1) | ||
| 9 | #define ULPI_CLR(a) (a + 2) | ||
| 10 | |||
| 11 | /* | ||
| 12 | * Register Map | ||
| 13 | */ | ||
| 14 | #define ULPI_VENDOR_ID_LOW 0x00 | ||
| 15 | #define ULPI_VENDOR_ID_HIGH 0x01 | ||
| 16 | #define ULPI_PRODUCT_ID_LOW 0x02 | ||
| 17 | #define ULPI_PRODUCT_ID_HIGH 0x03 | ||
| 18 | #define ULPI_FUNC_CTRL 0x04 | ||
| 19 | #define ULPI_IFC_CTRL 0x07 | ||
| 20 | #define ULPI_OTG_CTRL 0x0a | ||
| 21 | #define ULPI_USB_INT_EN_RISE 0x0d | ||
| 22 | #define ULPI_USB_INT_EN_FALL 0x10 | ||
| 23 | #define ULPI_USB_INT_STS 0x13 | ||
| 24 | #define ULPI_USB_INT_LATCH 0x14 | ||
| 25 | #define ULPI_DEBUG 0x15 | ||
| 26 | #define ULPI_SCRATCH 0x16 | ||
| 27 | /* Optional Carkit Registers */ | ||
| 28 | #define ULPI_CARKIT_CTRL 0x19 | ||
| 29 | #define ULPI_CARKIT_INT_DELAY 0x1c | ||
| 30 | #define ULPI_CARKIT_INT_EN 0x1d | ||
| 31 | #define ULPI_CARKIT_INT_STS 0x20 | ||
| 32 | #define ULPI_CARKIT_INT_LATCH 0x21 | ||
| 33 | #define ULPI_CARKIT_PLS_CTRL 0x22 | ||
| 34 | /* Other Optional Registers */ | ||
| 35 | #define ULPI_TX_POS_WIDTH 0x25 | ||
| 36 | #define ULPI_TX_NEG_WIDTH 0x26 | ||
| 37 | #define ULPI_POLARITY_RECOVERY 0x27 | ||
| 38 | /* Access Extended Register Set */ | ||
| 39 | #define ULPI_ACCESS_EXTENDED 0x2f | ||
| 40 | /* Vendor Specific */ | ||
| 41 | #define ULPI_VENDOR_SPECIFIC 0x30 | ||
| 42 | /* Extended Registers */ | ||
| 43 | #define ULPI_EXT_VENDOR_SPECIFIC 0x80 | ||
| 44 | |||
| 45 | /* | ||
| 46 | * Register Bits | ||
| 47 | */ | ||
| 48 | |||
| 49 | /* Function Control */ | ||
| 50 | #define ULPI_FUNC_CTRL_XCVRSEL BIT(0) | ||
| 51 | #define ULPI_FUNC_CTRL_XCVRSEL_MASK 0x3 | ||
| 52 | #define ULPI_FUNC_CTRL_HIGH_SPEED 0x0 | ||
| 53 | #define ULPI_FUNC_CTRL_FULL_SPEED 0x1 | ||
| 54 | #define ULPI_FUNC_CTRL_LOW_SPEED 0x2 | ||
| 55 | #define ULPI_FUNC_CTRL_FS4LS 0x3 | ||
| 56 | #define ULPI_FUNC_CTRL_TERMSELECT BIT(2) | ||
| 57 | #define ULPI_FUNC_CTRL_OPMODE BIT(3) | ||
| 58 | #define ULPI_FUNC_CTRL_OPMODE_MASK (0x3 << 3) | ||
| 59 | #define ULPI_FUNC_CTRL_OPMODE_NORMAL (0x0 << 3) | ||
| 60 | #define ULPI_FUNC_CTRL_OPMODE_NONDRIVING (0x1 << 3) | ||
| 61 | #define ULPI_FUNC_CTRL_OPMODE_DISABLE_NRZI (0x2 << 3) | ||
| 62 | #define ULPI_FUNC_CTRL_OPMODE_NOSYNC_NOEOP (0x3 << 3) | ||
| 63 | #define ULPI_FUNC_CTRL_RESET BIT(5) | ||
| 64 | #define ULPI_FUNC_CTRL_SUSPENDM BIT(6) | ||
| 65 | |||
| 66 | /* Interface Control */ | ||
| 67 | #define ULPI_IFC_CTRL_6_PIN_SERIAL_MODE BIT(0) | ||
| 68 | #define ULPI_IFC_CTRL_3_PIN_SERIAL_MODE BIT(1) | ||
| 69 | #define ULPI_IFC_CTRL_CARKITMODE BIT(2) | ||
| 70 | #define ULPI_IFC_CTRL_CLOCKSUSPENDM BIT(3) | ||
| 71 | #define ULPI_IFC_CTRL_AUTORESUME BIT(4) | ||
| 72 | #define ULPI_IFC_CTRL_EXTERNAL_VBUS BIT(5) | ||
| 73 | #define ULPI_IFC_CTRL_PASSTHRU BIT(6) | ||
| 74 | #define ULPI_IFC_CTRL_PROTECT_IFC_DISABLE BIT(7) | ||
| 75 | |||
| 76 | /* OTG Control */ | ||
| 77 | #define ULPI_OTG_CTRL_ID_PULLUP BIT(0) | ||
| 78 | #define ULPI_OTG_CTRL_DP_PULLDOWN BIT(1) | ||
| 79 | #define ULPI_OTG_CTRL_DM_PULLDOWN BIT(2) | ||
| 80 | #define ULPI_OTG_CTRL_DISCHRGVBUS BIT(3) | ||
| 81 | #define ULPI_OTG_CTRL_CHRGVBUS BIT(4) | ||
| 82 | #define ULPI_OTG_CTRL_DRVVBUS BIT(5) | ||
| 83 | #define ULPI_OTG_CTRL_DRVVBUS_EXT BIT(6) | ||
| 84 | #define ULPI_OTG_CTRL_EXTVBUSIND BIT(7) | ||
| 85 | |||
| 86 | /* USB Interrupt Enable Rising, | ||
| 87 | * USB Interrupt Enable Falling, | ||
| 88 | * USB Interrupt Status and | ||
| 89 | * USB Interrupt Latch | ||
| 90 | */ | ||
| 91 | #define ULPI_INT_HOST_DISCONNECT BIT(0) | ||
| 92 | #define ULPI_INT_VBUS_VALID BIT(1) | ||
| 93 | #define ULPI_INT_SESS_VALID BIT(2) | ||
| 94 | #define ULPI_INT_SESS_END BIT(3) | ||
| 95 | #define ULPI_INT_IDGRD BIT(4) | ||
| 96 | |||
| 97 | /* Debug */ | ||
| 98 | #define ULPI_DEBUG_LINESTATE0 BIT(0) | ||
| 99 | #define ULPI_DEBUG_LINESTATE1 BIT(1) | ||
| 100 | |||
| 101 | /* Carkit Control */ | ||
| 102 | #define ULPI_CARKIT_CTRL_CARKITPWR BIT(0) | ||
| 103 | #define ULPI_CARKIT_CTRL_IDGNDDRV BIT(1) | ||
| 104 | #define ULPI_CARKIT_CTRL_TXDEN BIT(2) | ||
| 105 | #define ULPI_CARKIT_CTRL_RXDEN BIT(3) | ||
| 106 | #define ULPI_CARKIT_CTRL_SPKLEFTEN BIT(4) | ||
| 107 | #define ULPI_CARKIT_CTRL_SPKRIGHTEN BIT(5) | ||
| 108 | #define ULPI_CARKIT_CTRL_MICEN BIT(6) | ||
| 109 | |||
| 110 | /* Carkit Interrupt Enable */ | ||
| 111 | #define ULPI_CARKIT_INT_EN_IDFLOAT_RISE BIT(0) | ||
| 112 | #define ULPI_CARKIT_INT_EN_IDFLOAT_FALL BIT(1) | ||
| 113 | #define ULPI_CARKIT_INT_EN_CARINTDET BIT(2) | ||
| 114 | #define ULPI_CARKIT_INT_EN_DP_RISE BIT(3) | ||
| 115 | #define ULPI_CARKIT_INT_EN_DP_FALL BIT(4) | ||
| 116 | |||
| 117 | /* Carkit Interrupt Status and | ||
| 118 | * Carkit Interrupt Latch | ||
| 119 | */ | ||
| 120 | #define ULPI_CARKIT_INT_IDFLOAT BIT(0) | ||
| 121 | #define ULPI_CARKIT_INT_CARINTDET BIT(1) | ||
| 122 | #define ULPI_CARKIT_INT_DP BIT(2) | ||
| 123 | |||
| 124 | /* Carkit Pulse Control*/ | ||
| 125 | #define ULPI_CARKIT_PLS_CTRL_TXPLSEN BIT(0) | ||
| 126 | #define ULPI_CARKIT_PLS_CTRL_RXPLSEN BIT(1) | ||
| 127 | #define ULPI_CARKIT_PLS_CTRL_SPKRLEFT_BIASEN BIT(2) | ||
| 128 | #define ULPI_CARKIT_PLS_CTRL_SPKRRIGHT_BIASEN BIT(3) | ||
| 129 | |||
| 130 | #endif /* __LINUX_ULPI_REGS_H */ | ||
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 7c9b484735c5..1f6526c76ee8 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h | |||
| @@ -80,6 +80,9 @@ | |||
| 80 | #define CDC_NCM_TIMER_INTERVAL_MIN 5UL | 80 | #define CDC_NCM_TIMER_INTERVAL_MIN 5UL |
| 81 | #define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC) | 81 | #define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC) |
| 82 | 82 | ||
| 83 | /* Driver flags */ | ||
| 84 | #define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */ | ||
| 85 | |||
| 83 | #define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ | 86 | #define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ |
| 84 | (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) | 87 | (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) |
| 85 | #define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) | 88 | #define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) |
| @@ -103,9 +106,11 @@ struct cdc_ncm_ctx { | |||
| 103 | 106 | ||
| 104 | spinlock_t mtx; | 107 | spinlock_t mtx; |
| 105 | atomic_t stop; | 108 | atomic_t stop; |
| 109 | int drvflags; | ||
| 106 | 110 | ||
| 107 | u32 timer_interval; | 111 | u32 timer_interval; |
| 108 | u32 max_ndp_size; | 112 | u32 max_ndp_size; |
| 113 | struct usb_cdc_ncm_ndp16 *delayed_ndp16; | ||
| 109 | 114 | ||
| 110 | u32 tx_timer_pending; | 115 | u32 tx_timer_pending; |
| 111 | u32 tx_curr_frame_num; | 116 | u32 tx_curr_frame_num; |
| @@ -133,7 +138,7 @@ struct cdc_ncm_ctx { | |||
| 133 | }; | 138 | }; |
| 134 | 139 | ||
| 135 | u8 cdc_ncm_select_altsetting(struct usb_interface *intf); | 140 | u8 cdc_ncm_select_altsetting(struct usb_interface *intf); |
| 136 | int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); | 141 | int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags); |
| 137 | void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); | 142 | void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); |
| 138 | struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); | 143 | struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); |
| 139 | int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in); | 144 | int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in); |
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 68b1e836dff1..c9aa7792de10 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h | |||
| @@ -622,8 +622,6 @@ extern struct list_head usb_bus_list; | |||
| 622 | extern struct mutex usb_bus_list_lock; | 622 | extern struct mutex usb_bus_list_lock; |
| 623 | extern wait_queue_head_t usb_kill_urb_queue; | 623 | extern wait_queue_head_t usb_kill_urb_queue; |
| 624 | 624 | ||
| 625 | extern int usb_find_interface_driver(struct usb_device *dev, | ||
| 626 | struct usb_interface *interface); | ||
| 627 | 625 | ||
| 628 | #define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN)) | 626 | #define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN)) |
| 629 | 627 | ||
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h index 7dbecf9a4656..e55a1504266e 100644 --- a/include/linux/usb/msm_hsusb.h +++ b/include/linux/usb/msm_hsusb.h | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #ifndef __ASM_ARCH_MSM_HSUSB_H | 18 | #ifndef __ASM_ARCH_MSM_HSUSB_H |
| 19 | #define __ASM_ARCH_MSM_HSUSB_H | 19 | #define __ASM_ARCH_MSM_HSUSB_H |
| 20 | 20 | ||
| 21 | #include <linux/extcon.h> | ||
| 21 | #include <linux/types.h> | 22 | #include <linux/types.h> |
| 22 | #include <linux/usb/otg.h> | 23 | #include <linux/usb/otg.h> |
| 23 | #include <linux/clk.h> | 24 | #include <linux/clk.h> |
| @@ -120,6 +121,17 @@ struct msm_otg_platform_data { | |||
| 120 | }; | 121 | }; |
| 121 | 122 | ||
| 122 | /** | 123 | /** |
| 124 | * struct msm_usb_cable - structure for exteternal connector cable | ||
| 125 | * state tracking | ||
| 126 | * @nb: hold event notification callback | ||
| 127 | * @conn: used for notification registration | ||
| 128 | */ | ||
| 129 | struct msm_usb_cable { | ||
| 130 | struct notifier_block nb; | ||
| 131 | struct extcon_specific_cable_nb conn; | ||
| 132 | }; | ||
| 133 | |||
| 134 | /** | ||
| 123 | * struct msm_otg: OTG driver data. Shared by HCD and DCD. | 135 | * struct msm_otg: OTG driver data. Shared by HCD and DCD. |
| 124 | * @otg: USB OTG Transceiver structure. | 136 | * @otg: USB OTG Transceiver structure. |
| 125 | * @pdata: otg device platform data. | 137 | * @pdata: otg device platform data. |
| @@ -138,6 +150,11 @@ struct msm_otg_platform_data { | |||
| 138 | * @chg_type: The type of charger attached. | 150 | * @chg_type: The type of charger attached. |
| 139 | * @dcd_retires: The retry count used to track Data contact | 151 | * @dcd_retires: The retry count used to track Data contact |
| 140 | * detection process. | 152 | * detection process. |
| 153 | * @manual_pullup: true if VBUS is not routed to USB controller/phy | ||
| 154 | * and controller driver therefore enables pull-up explicitly before | ||
| 155 | * starting controller using usbcmd run/stop bit. | ||
| 156 | * @vbus: VBUS signal state trakining, using extcon framework | ||
| 157 | * @id: ID signal state trakining, using extcon framework | ||
| 141 | */ | 158 | */ |
| 142 | struct msm_otg { | 159 | struct msm_otg { |
| 143 | struct usb_phy phy; | 160 | struct usb_phy phy; |
| @@ -166,6 +183,11 @@ struct msm_otg { | |||
| 166 | struct reset_control *phy_rst; | 183 | struct reset_control *phy_rst; |
| 167 | struct reset_control *link_rst; | 184 | struct reset_control *link_rst; |
| 168 | int vdd_levels[3]; | 185 | int vdd_levels[3]; |
| 186 | |||
| 187 | bool manual_pullup; | ||
| 188 | |||
| 189 | struct msm_usb_cable vbus; | ||
| 190 | struct msm_usb_cable id; | ||
| 169 | }; | 191 | }; |
| 170 | 192 | ||
| 171 | #endif | 193 | #endif |
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h index a29f6030afb1..e159b39f67a2 100644 --- a/include/linux/usb/msm_hsusb_hw.h +++ b/include/linux/usb/msm_hsusb_hw.h | |||
| @@ -21,6 +21,8 @@ | |||
| 21 | 21 | ||
| 22 | #define USB_AHBBURST (MSM_USB_BASE + 0x0090) | 22 | #define USB_AHBBURST (MSM_USB_BASE + 0x0090) |
| 23 | #define USB_AHBMODE (MSM_USB_BASE + 0x0098) | 23 | #define USB_AHBMODE (MSM_USB_BASE + 0x0098) |
| 24 | #define USB_GENCONFIG_2 (MSM_USB_BASE + 0x00a0) | ||
| 25 | |||
| 24 | #define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */ | 26 | #define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */ |
| 25 | 27 | ||
| 26 | #define USB_USBCMD (MSM_USB_BASE + 0x0140) | 28 | #define USB_USBCMD (MSM_USB_BASE + 0x0140) |
| @@ -30,6 +32,9 @@ | |||
| 30 | #define USB_PHY_CTRL (MSM_USB_BASE + 0x0240) | 32 | #define USB_PHY_CTRL (MSM_USB_BASE + 0x0240) |
| 31 | #define USB_PHY_CTRL2 (MSM_USB_BASE + 0x0278) | 33 | #define USB_PHY_CTRL2 (MSM_USB_BASE + 0x0278) |
| 32 | 34 | ||
| 35 | #define GENCONFIG_2_SESS_VLD_CTRL_EN BIT(7) | ||
| 36 | #define USBCMD_SESS_VLD_CTRL BIT(25) | ||
| 37 | |||
| 33 | #define USBCMD_RESET 2 | 38 | #define USBCMD_RESET 2 |
| 34 | #define USB_USBINTR (MSM_USB_BASE + 0x0148) | 39 | #define USB_USBINTR (MSM_USB_BASE + 0x0148) |
| 35 | 40 | ||
| @@ -50,6 +55,10 @@ | |||
| 50 | #define ULPI_PWR_CLK_MNG_REG 0x88 | 55 | #define ULPI_PWR_CLK_MNG_REG 0x88 |
| 51 | #define OTG_COMP_DISABLE BIT(0) | 56 | #define OTG_COMP_DISABLE BIT(0) |
| 52 | 57 | ||
| 58 | #define ULPI_MISC_A 0x96 | ||
| 59 | #define ULPI_MISC_A_VBUSVLDEXTSEL BIT(1) | ||
| 60 | #define ULPI_MISC_A_VBUSVLDEXT BIT(0) | ||
| 61 | |||
| 53 | #define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */ | 62 | #define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */ |
| 54 | #define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */ | 63 | #define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */ |
| 55 | #define PHY_RETEN (1 << 1) /* PHY retention enable/disable */ | 64 | #define PHY_RETEN (1 << 1) /* PHY retention enable/disable */ |
diff --git a/include/linux/usb/net2280.h b/include/linux/usb/net2280.h index 148b8fa5b1a2..725120224472 100644 --- a/include/linux/usb/net2280.h +++ b/include/linux/usb/net2280.h | |||
| @@ -168,6 +168,9 @@ struct net2280_regs { | |||
| 168 | #define ENDPOINT_B_INTERRUPT 2 | 168 | #define ENDPOINT_B_INTERRUPT 2 |
| 169 | #define ENDPOINT_A_INTERRUPT 1 | 169 | #define ENDPOINT_A_INTERRUPT 1 |
| 170 | #define ENDPOINT_0_INTERRUPT 0 | 170 | #define ENDPOINT_0_INTERRUPT 0 |
| 171 | #define USB3380_IRQSTAT0_EP_INTR_MASK_IN (0xF << 17) | ||
| 172 | #define USB3380_IRQSTAT0_EP_INTR_MASK_OUT (0xF << 1) | ||
| 173 | |||
| 171 | u32 irqstat1; | 174 | u32 irqstat1; |
| 172 | #define POWER_STATE_CHANGE_INTERRUPT 27 | 175 | #define POWER_STATE_CHANGE_INTERRUPT 27 |
| 173 | #define PCI_ARBITER_TIMEOUT_INTERRUPT 26 | 176 | #define PCI_ARBITER_TIMEOUT_INTERRUPT 26 |
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index bc91b5d380fd..e39f251cf861 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h | |||
| @@ -205,6 +205,8 @@ extern struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index); | |||
| 205 | extern struct usb_phy *devm_usb_get_phy_dev(struct device *dev, u8 index); | 205 | extern struct usb_phy *devm_usb_get_phy_dev(struct device *dev, u8 index); |
| 206 | extern struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev, | 206 | extern struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev, |
| 207 | const char *phandle, u8 index); | 207 | const char *phandle, u8 index); |
| 208 | extern struct usb_phy *devm_usb_get_phy_by_node(struct device *dev, | ||
| 209 | struct device_node *node, struct notifier_block *nb); | ||
| 208 | extern void usb_put_phy(struct usb_phy *); | 210 | extern void usb_put_phy(struct usb_phy *); |
| 209 | extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x); | 211 | extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x); |
| 210 | extern int usb_bind_phy(const char *dev_name, u8 index, | 212 | extern int usb_bind_phy(const char *dev_name, u8 index, |
| @@ -238,6 +240,12 @@ static inline struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev, | |||
| 238 | return ERR_PTR(-ENXIO); | 240 | return ERR_PTR(-ENXIO); |
| 239 | } | 241 | } |
| 240 | 242 | ||
| 243 | static inline struct usb_phy *devm_usb_get_phy_by_node(struct device *dev, | ||
| 244 | struct device_node *node, struct notifier_block *nb) | ||
| 245 | { | ||
| 246 | return ERR_PTR(-ENXIO); | ||
| 247 | } | ||
| 248 | |||
| 241 | static inline void usb_put_phy(struct usb_phy *x) | 249 | static inline void usb_put_phy(struct usb_phy *x) |
| 242 | { | 250 | { |
| 243 | } | 251 | } |
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index f06529c14141..3dd5a781da99 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h | |||
| @@ -169,8 +169,7 @@ struct renesas_usbhs_driver_param { | |||
| 169 | #define USBHS_USB_DMAC_XFER_SIZE 32 /* hardcode the xfer size */ | 169 | #define USBHS_USB_DMAC_XFER_SIZE 32 /* hardcode the xfer size */ |
| 170 | }; | 170 | }; |
| 171 | 171 | ||
| 172 | #define USBHS_TYPE_R8A7790 1 | 172 | #define USBHS_TYPE_RCAR_GEN2 1 |
| 173 | #define USBHS_TYPE_R8A7791 2 | ||
| 174 | 173 | ||
| 175 | /* | 174 | /* |
| 176 | * option: | 175 | * option: |
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h index 5c295c26ad37..5f07407a367a 100644 --- a/include/linux/usb/ulpi.h +++ b/include/linux/usb/ulpi.h | |||
| @@ -12,6 +12,8 @@ | |||
| 12 | #define __LINUX_USB_ULPI_H | 12 | #define __LINUX_USB_ULPI_H |
| 13 | 13 | ||
| 14 | #include <linux/usb/otg.h> | 14 | #include <linux/usb/otg.h> |
| 15 | #include <linux/ulpi/regs.h> | ||
| 16 | |||
| 15 | /*-------------------------------------------------------------------------*/ | 17 | /*-------------------------------------------------------------------------*/ |
| 16 | 18 | ||
| 17 | /* | 19 | /* |
| @@ -49,138 +51,6 @@ | |||
| 49 | 51 | ||
| 50 | /*-------------------------------------------------------------------------*/ | 52 | /*-------------------------------------------------------------------------*/ |
| 51 | 53 | ||
| 52 | /* | ||
| 53 | * Macros for Set and Clear | ||
| 54 | * See ULPI 1.1 specification to find the registers with Set and Clear offsets | ||
| 55 | */ | ||
| 56 | #define ULPI_SET(a) (a + 1) | ||
| 57 | #define ULPI_CLR(a) (a + 2) | ||
| 58 | |||
| 59 | /*-------------------------------------------------------------------------*/ | ||
| 60 | |||
| 61 | /* | ||
| 62 | * Register Map | ||
| 63 | */ | ||
| 64 | #define ULPI_VENDOR_ID_LOW 0x00 | ||
| 65 | #define ULPI_VENDOR_ID_HIGH 0x01 | ||
| 66 | #define ULPI_PRODUCT_ID_LOW 0x02 | ||
| 67 | #define ULPI_PRODUCT_ID_HIGH 0x03 | ||
| 68 | #define ULPI_FUNC_CTRL 0x04 | ||
| 69 | #define ULPI_IFC_CTRL 0x07 | ||
| 70 | #define ULPI_OTG_CTRL 0x0a | ||
| 71 | #define ULPI_USB_INT_EN_RISE 0x0d | ||
| 72 | #define ULPI_USB_INT_EN_FALL 0x10 | ||
| 73 | #define ULPI_USB_INT_STS 0x13 | ||
| 74 | #define ULPI_USB_INT_LATCH 0x14 | ||
| 75 | #define ULPI_DEBUG 0x15 | ||
| 76 | #define ULPI_SCRATCH 0x16 | ||
| 77 | /* Optional Carkit Registers */ | ||
| 78 | #define ULPI_CARCIT_CTRL 0x19 | ||
| 79 | #define ULPI_CARCIT_INT_DELAY 0x1c | ||
| 80 | #define ULPI_CARCIT_INT_EN 0x1d | ||
| 81 | #define ULPI_CARCIT_INT_STS 0x20 | ||
| 82 | #define ULPI_CARCIT_INT_LATCH 0x21 | ||
| 83 | #define ULPI_CARCIT_PLS_CTRL 0x22 | ||
| 84 | /* Other Optional Registers */ | ||
| 85 | #define ULPI_TX_POS_WIDTH 0x25 | ||
| 86 | #define ULPI_TX_NEG_WIDTH 0x26 | ||
| 87 | #define ULPI_POLARITY_RECOVERY 0x27 | ||
| 88 | /* Access Extended Register Set */ | ||
| 89 | #define ULPI_ACCESS_EXTENDED 0x2f | ||
| 90 | /* Vendor Specific */ | ||
| 91 | #define ULPI_VENDOR_SPECIFIC 0x30 | ||
| 92 | /* Extended Registers */ | ||
| 93 | #define ULPI_EXT_VENDOR_SPECIFIC 0x80 | ||
| 94 | |||
| 95 | /*-------------------------------------------------------------------------*/ | ||
| 96 | |||
| 97 | /* | ||
| 98 | * Register Bits | ||
| 99 | */ | ||
| 100 | |||
| 101 | /* Function Control */ | ||
| 102 | #define ULPI_FUNC_CTRL_XCVRSEL (1 << 0) | ||
| 103 | #define ULPI_FUNC_CTRL_XCVRSEL_MASK (3 << 0) | ||
| 104 | #define ULPI_FUNC_CTRL_HIGH_SPEED (0 << 0) | ||
| 105 | #define ULPI_FUNC_CTRL_FULL_SPEED (1 << 0) | ||
| 106 | #define ULPI_FUNC_CTRL_LOW_SPEED (2 << 0) | ||
| 107 | #define ULPI_FUNC_CTRL_FS4LS (3 << 0) | ||
| 108 | #define ULPI_FUNC_CTRL_TERMSELECT (1 << 2) | ||
| 109 | #define ULPI_FUNC_CTRL_OPMODE (1 << 3) | ||
| 110 | #define ULPI_FUNC_CTRL_OPMODE_MASK (3 << 3) | ||
| 111 | #define ULPI_FUNC_CTRL_OPMODE_NORMAL (0 << 3) | ||
| 112 | #define ULPI_FUNC_CTRL_OPMODE_NONDRIVING (1 << 3) | ||
| 113 | #define ULPI_FUNC_CTRL_OPMODE_DISABLE_NRZI (2 << 3) | ||
| 114 | #define ULPI_FUNC_CTRL_OPMODE_NOSYNC_NOEOP (3 << 3) | ||
| 115 | #define ULPI_FUNC_CTRL_RESET (1 << 5) | ||
| 116 | #define ULPI_FUNC_CTRL_SUSPENDM (1 << 6) | ||
| 117 | |||
| 118 | /* Interface Control */ | ||
| 119 | #define ULPI_IFC_CTRL_6_PIN_SERIAL_MODE (1 << 0) | ||
| 120 | #define ULPI_IFC_CTRL_3_PIN_SERIAL_MODE (1 << 1) | ||
| 121 | #define ULPI_IFC_CTRL_CARKITMODE (1 << 2) | ||
| 122 | #define ULPI_IFC_CTRL_CLOCKSUSPENDM (1 << 3) | ||
| 123 | #define ULPI_IFC_CTRL_AUTORESUME (1 << 4) | ||
| 124 | #define ULPI_IFC_CTRL_EXTERNAL_VBUS (1 << 5) | ||
| 125 | #define ULPI_IFC_CTRL_PASSTHRU (1 << 6) | ||
| 126 | #define ULPI_IFC_CTRL_PROTECT_IFC_DISABLE (1 << 7) | ||
| 127 | |||
| 128 | /* OTG Control */ | ||
| 129 | #define ULPI_OTG_CTRL_ID_PULLUP (1 << 0) | ||
| 130 | #define ULPI_OTG_CTRL_DP_PULLDOWN (1 << 1) | ||
| 131 | #define ULPI_OTG_CTRL_DM_PULLDOWN (1 << 2) | ||
| 132 | #define ULPI_OTG_CTRL_DISCHRGVBUS (1 << 3) | ||
| 133 | #define ULPI_OTG_CTRL_CHRGVBUS (1 << 4) | ||
| 134 | #define ULPI_OTG_CTRL_DRVVBUS (1 << 5) | ||
| 135 | #define ULPI_OTG_CTRL_DRVVBUS_EXT (1 << 6) | ||
| 136 | #define ULPI_OTG_CTRL_EXTVBUSIND (1 << 7) | ||
| 137 | |||
| 138 | /* USB Interrupt Enable Rising, | ||
| 139 | * USB Interrupt Enable Falling, | ||
| 140 | * USB Interrupt Status and | ||
| 141 | * USB Interrupt Latch | ||
| 142 | */ | ||
| 143 | #define ULPI_INT_HOST_DISCONNECT (1 << 0) | ||
| 144 | #define ULPI_INT_VBUS_VALID (1 << 1) | ||
| 145 | #define ULPI_INT_SESS_VALID (1 << 2) | ||
| 146 | #define ULPI_INT_SESS_END (1 << 3) | ||
| 147 | #define ULPI_INT_IDGRD (1 << 4) | ||
| 148 | |||
| 149 | /* Debug */ | ||
| 150 | #define ULPI_DEBUG_LINESTATE0 (1 << 0) | ||
| 151 | #define ULPI_DEBUG_LINESTATE1 (1 << 1) | ||
| 152 | |||
| 153 | /* Carkit Control */ | ||
| 154 | #define ULPI_CARKIT_CTRL_CARKITPWR (1 << 0) | ||
| 155 | #define ULPI_CARKIT_CTRL_IDGNDDRV (1 << 1) | ||
| 156 | #define ULPI_CARKIT_CTRL_TXDEN (1 << 2) | ||
| 157 | #define ULPI_CARKIT_CTRL_RXDEN (1 << 3) | ||
| 158 | #define ULPI_CARKIT_CTRL_SPKLEFTEN (1 << 4) | ||
| 159 | #define ULPI_CARKIT_CTRL_SPKRIGHTEN (1 << 5) | ||
| 160 | #define ULPI_CARKIT_CTRL_MICEN (1 << 6) | ||
| 161 | |||
| 162 | /* Carkit Interrupt Enable */ | ||
| 163 | #define ULPI_CARKIT_INT_EN_IDFLOAT_RISE (1 << 0) | ||
| 164 | #define ULPI_CARKIT_INT_EN_IDFLOAT_FALL (1 << 1) | ||
| 165 | #define ULPI_CARKIT_INT_EN_CARINTDET (1 << 2) | ||
| 166 | #define ULPI_CARKIT_INT_EN_DP_RISE (1 << 3) | ||
| 167 | #define ULPI_CARKIT_INT_EN_DP_FALL (1 << 4) | ||
| 168 | |||
| 169 | /* Carkit Interrupt Status and | ||
| 170 | * Carkit Interrupt Latch | ||
| 171 | */ | ||
| 172 | #define ULPI_CARKIT_INT_IDFLOAT (1 << 0) | ||
| 173 | #define ULPI_CARKIT_INT_CARINTDET (1 << 1) | ||
| 174 | #define ULPI_CARKIT_INT_DP (1 << 2) | ||
| 175 | |||
| 176 | /* Carkit Pulse Control*/ | ||
| 177 | #define ULPI_CARKIT_PLS_CTRL_TXPLSEN (1 << 0) | ||
| 178 | #define ULPI_CARKIT_PLS_CTRL_RXPLSEN (1 << 1) | ||
| 179 | #define ULPI_CARKIT_PLS_CTRL_SPKRLEFT_BIASEN (1 << 2) | ||
| 180 | #define ULPI_CARKIT_PLS_CTRL_SPKRRIGHT_BIASEN (1 << 3) | ||
| 181 | |||
| 182 | /*-------------------------------------------------------------------------*/ | ||
| 183 | |||
| 184 | #if IS_ENABLED(CONFIG_USB_ULPI) | 54 | #if IS_ENABLED(CONFIG_USB_ULPI) |
| 185 | struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, | 55 | struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, |
| 186 | unsigned int flags); | 56 | unsigned int flags); |
diff --git a/include/linux/usb/usb338x.h b/include/linux/usb/usb338x.h index f92eb635b9d3..11525d8d89a7 100644 --- a/include/linux/usb/usb338x.h +++ b/include/linux/usb/usb338x.h | |||
| @@ -43,6 +43,10 @@ | |||
| 43 | #define IN_ENDPOINT_TYPE 12 | 43 | #define IN_ENDPOINT_TYPE 12 |
| 44 | #define OUT_ENDPOINT_ENABLE 10 | 44 | #define OUT_ENDPOINT_ENABLE 10 |
| 45 | #define OUT_ENDPOINT_TYPE 8 | 45 | #define OUT_ENDPOINT_TYPE 8 |
| 46 | #define USB3380_EP_CFG_MASK_IN ((0x3 << IN_ENDPOINT_TYPE) | \ | ||
| 47 | BIT(IN_ENDPOINT_ENABLE)) | ||
| 48 | #define USB3380_EP_CFG_MASK_OUT ((0x3 << OUT_ENDPOINT_TYPE) | \ | ||
| 49 | BIT(OUT_ENDPOINT_ENABLE)) | ||
| 46 | 50 | ||
| 47 | struct usb338x_usb_ext_regs { | 51 | struct usb338x_usb_ext_regs { |
| 48 | u32 usbclass; | 52 | u32 usbclass; |
diff --git a/include/linux/virtio_byteorder.h b/include/linux/virtio_byteorder.h index 51865d05b267..ce63a2c3a612 100644 --- a/include/linux/virtio_byteorder.h +++ b/include/linux/virtio_byteorder.h | |||
| @@ -3,17 +3,21 @@ | |||
| 3 | #include <linux/types.h> | 3 | #include <linux/types.h> |
| 4 | #include <uapi/linux/virtio_types.h> | 4 | #include <uapi/linux/virtio_types.h> |
| 5 | 5 | ||
| 6 | /* | 6 | static inline bool virtio_legacy_is_little_endian(void) |
| 7 | * Low-level memory accessors for handling virtio in modern little endian and in | 7 | { |
| 8 | * compatibility native endian format. | 8 | #ifdef __LITTLE_ENDIAN |
| 9 | */ | 9 | return true; |
| 10 | #else | ||
| 11 | return false; | ||
| 12 | #endif | ||
| 13 | } | ||
| 10 | 14 | ||
| 11 | static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val) | 15 | static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val) |
| 12 | { | 16 | { |
| 13 | if (little_endian) | 17 | if (little_endian) |
| 14 | return le16_to_cpu((__force __le16)val); | 18 | return le16_to_cpu((__force __le16)val); |
| 15 | else | 19 | else |
| 16 | return (__force u16)val; | 20 | return be16_to_cpu((__force __be16)val); |
| 17 | } | 21 | } |
| 18 | 22 | ||
| 19 | static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val) | 23 | static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val) |
| @@ -21,7 +25,7 @@ static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val) | |||
| 21 | if (little_endian) | 25 | if (little_endian) |
| 22 | return (__force __virtio16)cpu_to_le16(val); | 26 | return (__force __virtio16)cpu_to_le16(val); |
| 23 | else | 27 | else |
| 24 | return (__force __virtio16)val; | 28 | return (__force __virtio16)cpu_to_be16(val); |
| 25 | } | 29 | } |
| 26 | 30 | ||
| 27 | static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val) | 31 | static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val) |
| @@ -29,7 +33,7 @@ static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val) | |||
| 29 | if (little_endian) | 33 | if (little_endian) |
| 30 | return le32_to_cpu((__force __le32)val); | 34 | return le32_to_cpu((__force __le32)val); |
| 31 | else | 35 | else |
| 32 | return (__force u32)val; | 36 | return be32_to_cpu((__force __be32)val); |
| 33 | } | 37 | } |
| 34 | 38 | ||
| 35 | static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val) | 39 | static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val) |
| @@ -37,7 +41,7 @@ static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val) | |||
| 37 | if (little_endian) | 41 | if (little_endian) |
| 38 | return (__force __virtio32)cpu_to_le32(val); | 42 | return (__force __virtio32)cpu_to_le32(val); |
| 39 | else | 43 | else |
| 40 | return (__force __virtio32)val; | 44 | return (__force __virtio32)cpu_to_be32(val); |
| 41 | } | 45 | } |
| 42 | 46 | ||
| 43 | static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val) | 47 | static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val) |
| @@ -45,7 +49,7 @@ static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val) | |||
| 45 | if (little_endian) | 49 | if (little_endian) |
| 46 | return le64_to_cpu((__force __le64)val); | 50 | return le64_to_cpu((__force __le64)val); |
| 47 | else | 51 | else |
| 48 | return (__force u64)val; | 52 | return be64_to_cpu((__force __be64)val); |
| 49 | } | 53 | } |
| 50 | 54 | ||
| 51 | static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val) | 55 | static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val) |
| @@ -53,7 +57,7 @@ static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val) | |||
| 53 | if (little_endian) | 57 | if (little_endian) |
| 54 | return (__force __virtio64)cpu_to_le64(val); | 58 | return (__force __virtio64)cpu_to_le64(val); |
| 55 | else | 59 | else |
| 56 | return (__force __virtio64)val; | 60 | return (__force __virtio64)cpu_to_be64(val); |
| 57 | } | 61 | } |
| 58 | 62 | ||
| 59 | #endif /* _LINUX_VIRTIO_BYTEORDER */ | 63 | #endif /* _LINUX_VIRTIO_BYTEORDER */ |
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 1e306f727edc..e5ce8ab0b8b0 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h | |||
| @@ -205,35 +205,41 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu) | |||
| 205 | return 0; | 205 | return 0; |
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | static inline bool virtio_is_little_endian(struct virtio_device *vdev) | ||
| 209 | { | ||
| 210 | return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || | ||
| 211 | virtio_legacy_is_little_endian(); | ||
| 212 | } | ||
| 213 | |||
| 208 | /* Memory accessors */ | 214 | /* Memory accessors */ |
| 209 | static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val) | 215 | static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val) |
| 210 | { | 216 | { |
| 211 | return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | 217 | return __virtio16_to_cpu(virtio_is_little_endian(vdev), val); |
| 212 | } | 218 | } |
| 213 | 219 | ||
| 214 | static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val) | 220 | static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val) |
| 215 | { | 221 | { |
| 216 | return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | 222 | return __cpu_to_virtio16(virtio_is_little_endian(vdev), val); |
| 217 | } | 223 | } |
| 218 | 224 | ||
| 219 | static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val) | 225 | static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val) |
| 220 | { | 226 | { |
| 221 | return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | 227 | return __virtio32_to_cpu(virtio_is_little_endian(vdev), val); |
| 222 | } | 228 | } |
| 223 | 229 | ||
| 224 | static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val) | 230 | static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val) |
| 225 | { | 231 | { |
| 226 | return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | 232 | return __cpu_to_virtio32(virtio_is_little_endian(vdev), val); |
| 227 | } | 233 | } |
| 228 | 234 | ||
| 229 | static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val) | 235 | static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val) |
| 230 | { | 236 | { |
| 231 | return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | 237 | return __virtio64_to_cpu(virtio_is_little_endian(vdev), val); |
| 232 | } | 238 | } |
| 233 | 239 | ||
| 234 | static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) | 240 | static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) |
| 235 | { | 241 | { |
| 236 | return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); | 242 | return __cpu_to_virtio64(virtio_is_little_endian(vdev), val); |
| 237 | } | 243 | } |
| 238 | 244 | ||
| 239 | /* Config space accessors. */ | 245 | /* Config space accessors. */ |
diff --git a/include/linux/vme.h b/include/linux/vme.h index 79242e9c06b8..c0131358f351 100644 --- a/include/linux/vme.h +++ b/include/linux/vme.h | |||
| @@ -120,6 +120,8 @@ void vme_free_consistent(struct vme_resource *, size_t, void *, | |||
| 120 | dma_addr_t); | 120 | dma_addr_t); |
| 121 | 121 | ||
| 122 | size_t vme_get_size(struct vme_resource *); | 122 | size_t vme_get_size(struct vme_resource *); |
| 123 | int vme_check_window(u32 aspace, unsigned long long vme_base, | ||
| 124 | unsigned long long size); | ||
| 123 | 125 | ||
| 124 | struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32); | 126 | struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32); |
| 125 | int vme_slave_set(struct vme_resource *, int, unsigned long long, | 127 | int vme_slave_set(struct vme_resource *, int, unsigned long long, |
diff --git a/include/linux/vringh.h b/include/linux/vringh.h index a3fa537e717a..bc6c28d04263 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h | |||
| @@ -226,33 +226,39 @@ static inline void vringh_notify(struct vringh *vrh) | |||
| 226 | vrh->notify(vrh); | 226 | vrh->notify(vrh); |
| 227 | } | 227 | } |
| 228 | 228 | ||
| 229 | static inline bool vringh_is_little_endian(const struct vringh *vrh) | ||
| 230 | { | ||
| 231 | return vrh->little_endian || | ||
| 232 | virtio_legacy_is_little_endian(); | ||
| 233 | } | ||
| 234 | |||
| 229 | static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val) | 235 | static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val) |
| 230 | { | 236 | { |
| 231 | return __virtio16_to_cpu(vrh->little_endian, val); | 237 | return __virtio16_to_cpu(vringh_is_little_endian(vrh), val); |
| 232 | } | 238 | } |
| 233 | 239 | ||
| 234 | static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val) | 240 | static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val) |
| 235 | { | 241 | { |
| 236 | return __cpu_to_virtio16(vrh->little_endian, val); | 242 | return __cpu_to_virtio16(vringh_is_little_endian(vrh), val); |
| 237 | } | 243 | } |
| 238 | 244 | ||
| 239 | static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val) | 245 | static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val) |
| 240 | { | 246 | { |
| 241 | return __virtio32_to_cpu(vrh->little_endian, val); | 247 | return __virtio32_to_cpu(vringh_is_little_endian(vrh), val); |
| 242 | } | 248 | } |
| 243 | 249 | ||
| 244 | static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val) | 250 | static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val) |
| 245 | { | 251 | { |
| 246 | return __cpu_to_virtio32(vrh->little_endian, val); | 252 | return __cpu_to_virtio32(vringh_is_little_endian(vrh), val); |
| 247 | } | 253 | } |
| 248 | 254 | ||
| 249 | static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val) | 255 | static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val) |
| 250 | { | 256 | { |
| 251 | return __virtio64_to_cpu(vrh->little_endian, val); | 257 | return __virtio64_to_cpu(vringh_is_little_endian(vrh), val); |
| 252 | } | 258 | } |
| 253 | 259 | ||
| 254 | static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val) | 260 | static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val) |
| 255 | { | 261 | { |
| 256 | return __cpu_to_virtio64(vrh->little_endian, val); | 262 | return __cpu_to_virtio64(vringh_is_little_endian(vrh), val); |
| 257 | } | 263 | } |
| 258 | #endif /* _LINUX_VRINGH_H */ | 264 | #endif /* _LINUX_VRINGH_H */ |
diff --git a/include/linux/wait.h b/include/linux/wait.h index d69ac4ecc88b..1e1bf9f963a9 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -358,6 +358,19 @@ do { \ | |||
| 358 | __ret; \ | 358 | __ret; \ |
| 359 | }) | 359 | }) |
| 360 | 360 | ||
| 361 | #define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \ | ||
| 362 | (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \ | ||
| 363 | cmd1; schedule(); cmd2) | ||
| 364 | /* | ||
| 365 | * Just like wait_event_cmd(), except it sets exclusive flag | ||
| 366 | */ | ||
| 367 | #define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \ | ||
| 368 | do { \ | ||
| 369 | if (condition) \ | ||
| 370 | break; \ | ||
| 371 | __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \ | ||
| 372 | } while (0) | ||
| 373 | |||
| 361 | #define __wait_event_cmd(wq, condition, cmd1, cmd2) \ | 374 | #define __wait_event_cmd(wq, condition, cmd1, cmd2) \ |
| 362 | (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ | 375 | (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ |
| 363 | cmd1; schedule(); cmd2) | 376 | cmd1; schedule(); cmd2) |
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index a746bf5216f8..f47feada5b42 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h | |||
| @@ -65,6 +65,8 @@ struct watchdog_ops { | |||
| 65 | * @driver-data:Pointer to the drivers private data. | 65 | * @driver-data:Pointer to the drivers private data. |
| 66 | * @lock: Lock for watchdog core internal use only. | 66 | * @lock: Lock for watchdog core internal use only. |
| 67 | * @status: Field that contains the devices internal status bits. | 67 | * @status: Field that contains the devices internal status bits. |
| 68 | * @deferred: entry in wtd_deferred_reg_list which is used to | ||
| 69 | * register early initialized watchdogs. | ||
| 68 | * | 70 | * |
| 69 | * The watchdog_device structure contains all information about a | 71 | * The watchdog_device structure contains all information about a |
| 70 | * watchdog timer device. | 72 | * watchdog timer device. |
| @@ -95,6 +97,7 @@ struct watchdog_device { | |||
| 95 | #define WDOG_ALLOW_RELEASE 2 /* Did we receive the magic char ? */ | 97 | #define WDOG_ALLOW_RELEASE 2 /* Did we receive the magic char ? */ |
| 96 | #define WDOG_NO_WAY_OUT 3 /* Is 'nowayout' feature set ? */ | 98 | #define WDOG_NO_WAY_OUT 3 /* Is 'nowayout' feature set ? */ |
| 97 | #define WDOG_UNREGISTERED 4 /* Has the device been unregistered */ | 99 | #define WDOG_UNREGISTERED 4 /* Has the device been unregistered */ |
| 100 | struct list_head deferred; | ||
| 98 | }; | 101 | }; |
| 99 | 102 | ||
| 100 | #define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT) | 103 | #define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT) |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index deee212af8e0..738b30b39b68 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -424,6 +424,7 @@ struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask); | |||
| 424 | void free_workqueue_attrs(struct workqueue_attrs *attrs); | 424 | void free_workqueue_attrs(struct workqueue_attrs *attrs); |
| 425 | int apply_workqueue_attrs(struct workqueue_struct *wq, | 425 | int apply_workqueue_attrs(struct workqueue_struct *wq, |
| 426 | const struct workqueue_attrs *attrs); | 426 | const struct workqueue_attrs *attrs); |
| 427 | int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); | ||
| 427 | 428 | ||
| 428 | extern bool queue_work_on(int cpu, struct workqueue_struct *wq, | 429 | extern bool queue_work_on(int cpu, struct workqueue_struct *wq, |
| 429 | struct work_struct *work); | 430 | struct work_struct *work); |
| @@ -434,7 +435,6 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
| 434 | 435 | ||
| 435 | extern void flush_workqueue(struct workqueue_struct *wq); | 436 | extern void flush_workqueue(struct workqueue_struct *wq); |
| 436 | extern void drain_workqueue(struct workqueue_struct *wq); | 437 | extern void drain_workqueue(struct workqueue_struct *wq); |
| 437 | extern void flush_scheduled_work(void); | ||
| 438 | 438 | ||
| 439 | extern int schedule_on_each_cpu(work_func_t func); | 439 | extern int schedule_on_each_cpu(work_func_t func); |
| 440 | 440 | ||
| @@ -531,6 +531,35 @@ static inline bool schedule_work(struct work_struct *work) | |||
| 531 | } | 531 | } |
| 532 | 532 | ||
| 533 | /** | 533 | /** |
| 534 | * flush_scheduled_work - ensure that any scheduled work has run to completion. | ||
| 535 | * | ||
| 536 | * Forces execution of the kernel-global workqueue and blocks until its | ||
| 537 | * completion. | ||
| 538 | * | ||
| 539 | * Think twice before calling this function! It's very easy to get into | ||
| 540 | * trouble if you don't take great care. Either of the following situations | ||
| 541 | * will lead to deadlock: | ||
| 542 | * | ||
| 543 | * One of the work items currently on the workqueue needs to acquire | ||
| 544 | * a lock held by your code or its caller. | ||
| 545 | * | ||
| 546 | * Your code is running in the context of a work routine. | ||
| 547 | * | ||
| 548 | * They will be detected by lockdep when they occur, but the first might not | ||
| 549 | * occur very often. It depends on what work items are on the workqueue and | ||
| 550 | * what locks they need, which you have no control over. | ||
| 551 | * | ||
| 552 | * In most situations flushing the entire workqueue is overkill; you merely | ||
| 553 | * need to know that a particular work item isn't queued and isn't running. | ||
| 554 | * In such cases you should use cancel_delayed_work_sync() or | ||
| 555 | * cancel_work_sync() instead. | ||
| 556 | */ | ||
| 557 | static inline void flush_scheduled_work(void) | ||
| 558 | { | ||
| 559 | flush_workqueue(system_wq); | ||
| 560 | } | ||
| 561 | |||
| 562 | /** | ||
| 534 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 563 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay |
| 535 | * @cpu: cpu to use | 564 | * @cpu: cpu to use |
| 536 | * @dwork: job to be done | 565 | * @dwork: job to be done |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index b2dd371ec0ca..b333c945e571 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
| @@ -7,6 +7,8 @@ | |||
| 7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
| 8 | #include <linux/workqueue.h> | 8 | #include <linux/workqueue.h> |
| 9 | #include <linux/fs.h> | 9 | #include <linux/fs.h> |
| 10 | #include <linux/flex_proportions.h> | ||
| 11 | #include <linux/backing-dev-defs.h> | ||
| 10 | 12 | ||
| 11 | DECLARE_PER_CPU(int, dirty_throttle_leaks); | 13 | DECLARE_PER_CPU(int, dirty_throttle_leaks); |
| 12 | 14 | ||
| @@ -84,18 +86,95 @@ struct writeback_control { | |||
| 84 | unsigned for_reclaim:1; /* Invoked from the page allocator */ | 86 | unsigned for_reclaim:1; /* Invoked from the page allocator */ |
| 85 | unsigned range_cyclic:1; /* range_start is cyclic */ | 87 | unsigned range_cyclic:1; /* range_start is cyclic */ |
| 86 | unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ | 88 | unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ |
| 89 | #ifdef CONFIG_CGROUP_WRITEBACK | ||
| 90 | struct bdi_writeback *wb; /* wb this writeback is issued under */ | ||
| 91 | struct inode *inode; /* inode being written out */ | ||
| 92 | |||
| 93 | /* foreign inode detection, see wbc_detach_inode() */ | ||
| 94 | int wb_id; /* current wb id */ | ||
| 95 | int wb_lcand_id; /* last foreign candidate wb id */ | ||
| 96 | int wb_tcand_id; /* this foreign candidate wb id */ | ||
| 97 | size_t wb_bytes; /* bytes written by current wb */ | ||
| 98 | size_t wb_lcand_bytes; /* bytes written by last candidate */ | ||
| 99 | size_t wb_tcand_bytes; /* bytes written by this candidate */ | ||
| 100 | #endif | ||
| 87 | }; | 101 | }; |
| 88 | 102 | ||
| 89 | /* | 103 | /* |
| 104 | * A wb_domain represents a domain that wb's (bdi_writeback's) belong to | ||
| 105 | * and are measured against each other in. There always is one global | ||
| 106 | * domain, global_wb_domain, that every wb in the system is a member of. | ||
| 107 | * This allows measuring the relative bandwidth of each wb to distribute | ||
| 108 | * dirtyable memory accordingly. | ||
| 109 | */ | ||
| 110 | struct wb_domain { | ||
| 111 | spinlock_t lock; | ||
| 112 | |||
| 113 | /* | ||
| 114 | * Scale the writeback cache size proportional to the relative | ||
| 115 | * writeout speed. | ||
| 116 | * | ||
| 117 | * We do this by keeping a floating proportion between BDIs, based | ||
| 118 | * on page writeback completions [end_page_writeback()]. Those | ||
| 119 | * devices that write out pages fastest will get the larger share, | ||
| 120 | * while the slower will get a smaller share. | ||
| 121 | * | ||
| 122 | * We use page writeout completions because we are interested in | ||
| 123 | * getting rid of dirty pages. Having them written out is the | ||
| 124 | * primary goal. | ||
| 125 | * | ||
| 126 | * We introduce a concept of time, a period over which we measure | ||
| 127 | * these events, because demand can/will vary over time. The length | ||
| 128 | * of this period itself is measured in page writeback completions. | ||
| 129 | */ | ||
| 130 | struct fprop_global completions; | ||
| 131 | struct timer_list period_timer; /* timer for aging of completions */ | ||
| 132 | unsigned long period_time; | ||
| 133 | |||
| 134 | /* | ||
| 135 | * The dirtyable memory and dirty threshold could be suddenly | ||
| 136 | * knocked down by a large amount (eg. on the startup of KVM in a | ||
| 137 | * swapless system). This may throw the system into deep dirty | ||
| 138 | * exceeded state and throttle heavy/light dirtiers alike. To | ||
| 139 | * retain good responsiveness, maintain global_dirty_limit for | ||
| 140 | * tracking slowly down to the knocked down dirty threshold. | ||
| 141 | * | ||
| 142 | * Both fields are protected by ->lock. | ||
| 143 | */ | ||
| 144 | unsigned long dirty_limit_tstamp; | ||
| 145 | unsigned long dirty_limit; | ||
| 146 | }; | ||
| 147 | |||
| 148 | /** | ||
| 149 | * wb_domain_size_changed - memory available to a wb_domain has changed | ||
| 150 | * @dom: wb_domain of interest | ||
| 151 | * | ||
| 152 | * This function should be called when the amount of memory available to | ||
| 153 | * @dom has changed. It resets @dom's dirty limit parameters to prevent | ||
| 154 | * the past values which don't match the current configuration from skewing | ||
| 155 | * dirty throttling. Without this, when memory size of a wb_domain is | ||
| 156 | * greatly reduced, the dirty throttling logic may allow too many pages to | ||
| 157 | * be dirtied leading to consecutive unnecessary OOMs and may get stuck in | ||
| 158 | * that situation. | ||
| 159 | */ | ||
| 160 | static inline void wb_domain_size_changed(struct wb_domain *dom) | ||
| 161 | { | ||
| 162 | spin_lock(&dom->lock); | ||
| 163 | dom->dirty_limit_tstamp = jiffies; | ||
| 164 | dom->dirty_limit = 0; | ||
| 165 | spin_unlock(&dom->lock); | ||
| 166 | } | ||
| 167 | |||
| 168 | /* | ||
| 90 | * fs/fs-writeback.c | 169 | * fs/fs-writeback.c |
| 91 | */ | 170 | */ |
| 92 | struct bdi_writeback; | 171 | struct bdi_writeback; |
| 93 | void writeback_inodes_sb(struct super_block *, enum wb_reason reason); | 172 | void writeback_inodes_sb(struct super_block *, enum wb_reason reason); |
| 94 | void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, | 173 | void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, |
| 95 | enum wb_reason reason); | 174 | enum wb_reason reason); |
| 96 | int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason); | 175 | bool try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason); |
| 97 | int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr, | 176 | bool try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr, |
| 98 | enum wb_reason reason); | 177 | enum wb_reason reason); |
| 99 | void sync_inodes_sb(struct super_block *); | 178 | void sync_inodes_sb(struct super_block *); |
| 100 | void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); | 179 | void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); |
| 101 | void inode_wait_for_writeback(struct inode *inode); | 180 | void inode_wait_for_writeback(struct inode *inode); |
| @@ -107,6 +186,123 @@ static inline void wait_on_inode(struct inode *inode) | |||
| 107 | wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE); | 186 | wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE); |
| 108 | } | 187 | } |
| 109 | 188 | ||
| 189 | #ifdef CONFIG_CGROUP_WRITEBACK | ||
| 190 | |||
| 191 | #include <linux/cgroup.h> | ||
| 192 | #include <linux/bio.h> | ||
| 193 | |||
| 194 | void __inode_attach_wb(struct inode *inode, struct page *page); | ||
| 195 | void wbc_attach_and_unlock_inode(struct writeback_control *wbc, | ||
| 196 | struct inode *inode) | ||
| 197 | __releases(&inode->i_lock); | ||
| 198 | void wbc_detach_inode(struct writeback_control *wbc); | ||
| 199 | void wbc_account_io(struct writeback_control *wbc, struct page *page, | ||
| 200 | size_t bytes); | ||
| 201 | |||
| 202 | /** | ||
| 203 | * inode_attach_wb - associate an inode with its wb | ||
| 204 | * @inode: inode of interest | ||
| 205 | * @page: page being dirtied (may be NULL) | ||
| 206 | * | ||
| 207 | * If @inode doesn't have its wb, associate it with the wb matching the | ||
| 208 | * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o | ||
| 209 | * @inode->i_lock. | ||
| 210 | */ | ||
| 211 | static inline void inode_attach_wb(struct inode *inode, struct page *page) | ||
| 212 | { | ||
| 213 | if (!inode->i_wb) | ||
| 214 | __inode_attach_wb(inode, page); | ||
| 215 | } | ||
| 216 | |||
| 217 | /** | ||
| 218 | * inode_detach_wb - disassociate an inode from its wb | ||
| 219 | * @inode: inode of interest | ||
| 220 | * | ||
| 221 | * @inode is being freed. Detach from its wb. | ||
| 222 | */ | ||
| 223 | static inline void inode_detach_wb(struct inode *inode) | ||
| 224 | { | ||
| 225 | if (inode->i_wb) { | ||
| 226 | wb_put(inode->i_wb); | ||
| 227 | inode->i_wb = NULL; | ||
| 228 | } | ||
| 229 | } | ||
| 230 | |||
| 231 | /** | ||
| 232 | * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite | ||
| 233 | * @wbc: writeback_control of interest | ||
| 234 | * @inode: target inode | ||
| 235 | * | ||
| 236 | * This function is to be used by __filemap_fdatawrite_range(), which is an | ||
| 237 | * alternative entry point into writeback code, and first ensures @inode is | ||
| 238 | * associated with a bdi_writeback and attaches it to @wbc. | ||
| 239 | */ | ||
| 240 | static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, | ||
| 241 | struct inode *inode) | ||
| 242 | { | ||
| 243 | spin_lock(&inode->i_lock); | ||
| 244 | inode_attach_wb(inode, NULL); | ||
| 245 | wbc_attach_and_unlock_inode(wbc, inode); | ||
| 246 | } | ||
| 247 | |||
| 248 | /** | ||
| 249 | * wbc_init_bio - writeback specific initializtion of bio | ||
| 250 | * @wbc: writeback_control for the writeback in progress | ||
| 251 | * @bio: bio to be initialized | ||
| 252 | * | ||
| 253 | * @bio is a part of the writeback in progress controlled by @wbc. Perform | ||
| 254 | * writeback specific initialization. This is used to apply the cgroup | ||
| 255 | * writeback context. | ||
| 256 | */ | ||
| 257 | static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) | ||
| 258 | { | ||
| 259 | /* | ||
| 260 | * pageout() path doesn't attach @wbc to the inode being written | ||
| 261 | * out. This is intentional as we don't want the function to block | ||
| 262 | * behind a slow cgroup. Ultimately, we want pageout() to kick off | ||
| 263 | * regular writeback instead of writing things out itself. | ||
| 264 | */ | ||
| 265 | if (wbc->wb) | ||
| 266 | bio_associate_blkcg(bio, wbc->wb->blkcg_css); | ||
| 267 | } | ||
| 268 | |||
| 269 | #else /* CONFIG_CGROUP_WRITEBACK */ | ||
| 270 | |||
| 271 | static inline void inode_attach_wb(struct inode *inode, struct page *page) | ||
| 272 | { | ||
| 273 | } | ||
| 274 | |||
| 275 | static inline void inode_detach_wb(struct inode *inode) | ||
| 276 | { | ||
| 277 | } | ||
| 278 | |||
| 279 | static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc, | ||
| 280 | struct inode *inode) | ||
| 281 | __releases(&inode->i_lock) | ||
| 282 | { | ||
| 283 | spin_unlock(&inode->i_lock); | ||
| 284 | } | ||
| 285 | |||
| 286 | static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, | ||
| 287 | struct inode *inode) | ||
| 288 | { | ||
| 289 | } | ||
| 290 | |||
| 291 | static inline void wbc_detach_inode(struct writeback_control *wbc) | ||
| 292 | { | ||
| 293 | } | ||
| 294 | |||
| 295 | static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) | ||
| 296 | { | ||
| 297 | } | ||
| 298 | |||
| 299 | static inline void wbc_account_io(struct writeback_control *wbc, | ||
| 300 | struct page *page, size_t bytes) | ||
| 301 | { | ||
| 302 | } | ||
| 303 | |||
| 304 | #endif /* CONFIG_CGROUP_WRITEBACK */ | ||
| 305 | |||
| 110 | /* | 306 | /* |
| 111 | * mm/page-writeback.c | 307 | * mm/page-writeback.c |
| 112 | */ | 308 | */ |
| @@ -120,8 +316,12 @@ static inline void laptop_sync_completion(void) { } | |||
| 120 | #endif | 316 | #endif |
| 121 | void throttle_vm_writeout(gfp_t gfp_mask); | 317 | void throttle_vm_writeout(gfp_t gfp_mask); |
| 122 | bool zone_dirty_ok(struct zone *zone); | 318 | bool zone_dirty_ok(struct zone *zone); |
| 319 | int wb_domain_init(struct wb_domain *dom, gfp_t gfp); | ||
| 320 | #ifdef CONFIG_CGROUP_WRITEBACK | ||
| 321 | void wb_domain_exit(struct wb_domain *dom); | ||
| 322 | #endif | ||
| 123 | 323 | ||
| 124 | extern unsigned long global_dirty_limit; | 324 | extern struct wb_domain global_wb_domain; |
| 125 | 325 | ||
| 126 | /* These are exported to sysctl. */ | 326 | /* These are exported to sysctl. */ |
| 127 | extern int dirty_background_ratio; | 327 | extern int dirty_background_ratio; |
| @@ -155,19 +355,12 @@ int dirty_writeback_centisecs_handler(struct ctl_table *, int, | |||
| 155 | void __user *, size_t *, loff_t *); | 355 | void __user *, size_t *, loff_t *); |
| 156 | 356 | ||
| 157 | void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); | 357 | void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); |
| 158 | unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, | 358 | unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); |
| 159 | unsigned long dirty); | ||
| 160 | |||
| 161 | void __bdi_update_bandwidth(struct backing_dev_info *bdi, | ||
| 162 | unsigned long thresh, | ||
| 163 | unsigned long bg_thresh, | ||
| 164 | unsigned long dirty, | ||
| 165 | unsigned long bdi_thresh, | ||
| 166 | unsigned long bdi_dirty, | ||
| 167 | unsigned long start_time); | ||
| 168 | 359 | ||
| 360 | void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time); | ||
| 169 | void page_writeback_init(void); | 361 | void page_writeback_init(void); |
| 170 | void balance_dirty_pages_ratelimited(struct address_space *mapping); | 362 | void balance_dirty_pages_ratelimited(struct address_space *mapping); |
| 363 | bool wb_over_bg_thresh(struct bdi_writeback *wb); | ||
| 171 | 364 | ||
| 172 | typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, | 365 | typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, |
| 173 | void *data); | 366 | void *data); |
diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 56529b34dc63..d30eff3d84d5 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h | |||
| @@ -81,7 +81,8 @@ struct zpool_driver { | |||
| 81 | atomic_t refcount; | 81 | atomic_t refcount; |
| 82 | struct list_head list; | 82 | struct list_head list; |
| 83 | 83 | ||
| 84 | void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops); | 84 | void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops, |
| 85 | struct zpool *zpool); | ||
| 85 | void (*destroy)(void *pool); | 86 | void (*destroy)(void *pool); |
| 86 | 87 | ||
| 87 | int (*malloc)(void *pool, size_t size, gfp_t gfp, | 88 | int (*malloc)(void *pool, size_t size, gfp_t gfp, |
| @@ -102,6 +103,4 @@ void zpool_register_driver(struct zpool_driver *driver); | |||
| 102 | 103 | ||
| 103 | int zpool_unregister_driver(struct zpool_driver *driver); | 104 | int zpool_unregister_driver(struct zpool_driver *driver); |
| 104 | 105 | ||
| 105 | int zpool_evict(void *pool, unsigned long handle); | ||
| 106 | |||
| 107 | #endif | 106 | #endif |
