From a822bea7962b500b0bcab41bf3500f7c40ae56b5 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Fri, 6 Jun 2008 01:34:00 -0400 Subject: Input: serio - mark serio_register_driver() __must_check Also remove extra declaration of serio_register_driver(). Signed-off-by: Dmitry Torokhov --- include/linux/serio.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/serio.h b/include/linux/serio.h index e72716cca577..25641d9e0ea8 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h @@ -87,11 +87,10 @@ void serio_unregister_port(struct serio *serio); void serio_unregister_child_port(struct serio *serio); int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name); -static inline int serio_register_driver(struct serio_driver *drv) +static inline int __must_check serio_register_driver(struct serio_driver *drv) { return __serio_register_driver(drv, THIS_MODULE, KBUILD_MODNAME); } -int serio_register_driver(struct serio_driver *drv); void serio_unregister_driver(struct serio_driver *drv); static inline int serio_write(struct serio *serio, unsigned char data) -- cgit v1.2.2 From 3f07af494dfa6de43137dae430431c9fbf929c0c Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Fri, 25 Jul 2008 22:25:13 -0400 Subject: of: adapt of_find_i2c_driver() to be usable by SPI also SPI has a similar problem as I2C in that it needs to determine an appropriate modalias value for each device node. This patch adapts the of_i2c of_find_i2c_driver() function to be usable by of_spi also. Signed-off-by: Grant Likely --- include/linux/of.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/of.h b/include/linux/of.h index 59a61bdc98b6..79886ade070f 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -70,5 +70,6 @@ extern int of_n_addr_cells(struct device_node *np); extern int of_n_size_cells(struct device_node *np); extern const struct of_device_id *of_match_node( const struct of_device_id *matches, const struct device_node *node); +extern int of_modalias_node(struct device_node *node, char *modalias, int len); #endif /* _LINUX_OF_H */ -- cgit v1.2.2 From dc87c98e8f635a718f1abb2c3e15fc77c0001651 Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Thu, 15 May 2008 16:50:22 -0600 Subject: spi: split up spi_new_device() to allow two stage registration. spi_new_device() allocates and registers an spi device all in one swoop. If the driver needs to add extra data to the spi_device before it is registered, then this causes problems. This is needed for OF device tree support so that the SPI device tree helper can add a pointer to the device node after the device is allocated, but before the device is registered. OF aware SPI devices can then retrieve data out of the device node to populate a platform data structure. This patch splits the allocation and registration portions of code out of spi_new_device() and creates two new functions; spi_alloc_device() and spi_register_device(). spi_new_device() is modified to use the new functions for allocation and registration. None of the existing users of spi_new_device() should be affected by this change. Drivers using the new API can forego the use of spi_board_info structure to describe the device layout and populate data into the spi_device structure directly. This change is in preparation for adding an OF device tree parser to generate spi_devices based on data in the device tree. Signed-off-by: Grant Likely Acked-by: David Brownell --- include/linux/spi/spi.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include/linux') diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index a9cc29d46653..4be01bb44377 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -778,7 +778,19 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n) * use spi_new_device() to describe each device. You can also call * spi_unregister_device() to start making that device vanish, but * normally that would be handled by spi_unregister_master(). + * + * You can also use spi_alloc_device() and spi_add_device() to use a two + * stage registration sequence for each spi_device. This gives the caller + * some more control over the spi_device structure before it is registered, + * but requires that caller to initialize fields that would otherwise + * be defined using the board info. */ +extern struct spi_device * +spi_alloc_device(struct spi_master *master); + +extern int +spi_add_device(struct spi_device *spi); + extern struct spi_device * spi_new_device(struct spi_master *, struct spi_board_info *); -- cgit v1.2.2 From 284b01897340974000bcc84de87a4e1becc8a83d Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Fri, 16 May 2008 11:37:09 -0600 Subject: spi: Add OF binding support for SPI busses This patch adds support for populating an SPI bus based on data in the OF device tree. This is useful for powerpc platforms which use the device tree instead of discrete code for describing platform layout. Signed-off-by: Grant Likely --- include/linux/of_spi.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 include/linux/of_spi.h (limited to 'include/linux') diff --git a/include/linux/of_spi.h b/include/linux/of_spi.h new file mode 100644 index 000000000000..5f71ee8c0868 --- /dev/null +++ b/include/linux/of_spi.h @@ -0,0 +1,18 @@ +/* + * OpenFirmware SPI support routines + * Copyright (C) 2008 Secret Lab Technologies Ltd. + * + * Support routines for deriving SPI device attachments from the device + * tree. + */ + +#ifndef __LINUX_OF_SPI_H +#define __LINUX_OF_SPI_H + +#include +#include + +extern void of_register_spi_devices(struct spi_master *master, + struct device_node *np); + +#endif /* __LINUX_OF_SPI */ -- cgit v1.2.2 From 3bc9f79ee1ddc913be0a6d3592036683ef8a3148 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 25 Jul 2008 14:57:58 +0200 Subject: iommu: add iommu_num_pages helper function Calculating the number of pages from given address and length numbers is a task required in multiple IOMMU implementations. So implement this as a generic function into the IOMMU helper code. Signed-off-by: Joerg Roedel Cc: iommu@lists.linux-foundation.org Cc: bhavna.sarathy@amd.com Cc: robert.richter@amd.com Cc: FUJITA Tomonori Signed-off-by: Ingo Molnar --- include/linux/iommu-helper.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h index c975caf75385..f8598f583944 100644 --- a/include/linux/iommu-helper.h +++ b/include/linux/iommu-helper.h @@ -8,3 +8,4 @@ extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, unsigned long align_mask); extern void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr); +extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len); -- cgit v1.2.2 From b8d317d10cca76cabe6b03ebfeb23cc99118b731 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Thu, 24 Jul 2008 18:21:29 -0700 Subject: cpumask: make cpumask_of_cpu_map generic If an arch doesn't define cpumask_of_cpu_map, create a generic statically-initialized one for them. This allows removal of the buggy cpumask_of_cpu() macro (&cpumask_of_cpu() gives address of out-of-scope var). An arch with NR_CPUS of 4096 probably wants to allocate this itself based on the actual number of CPUs, since otherwise they're using 2MB of rodata (1024 cpus means 128k). That's what CONFIG_HAVE_CPUMASK_OF_CPU_MAP is for (only x86/64 does so at the moment). In future as we support more CPUs, we'll need to resort to a get_cpu_map()/put_cpu_map() allocation scheme. Signed-off-by: Mike Travis Signed-off-by: Rusty Russell Cc: Andrew Morton Cc: Jack Steiner Signed-off-by: Ingo Molnar --- include/linux/cpumask.h | 41 +++-------------------------------------- 1 file changed, 3 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 1b5c98e7fef7..8fa3b6d4a320 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -62,15 +62,7 @@ * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids * * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set - *ifdef CONFIG_HAS_CPUMASK_OF_CPU - * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v - * cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu] - * cpumask_of_cpu_ptr(v, cpu) Combines above two operations - *else - * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v - * cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu) - * cpumask_of_cpu_ptr(v, cpu) Combines above two operations - *endif + * (can be used as an lvalue) * CPU_MASK_ALL Initializer - all bits set * CPU_MASK_NONE Initializer - no bits set * unsigned long *cpus_addr(mask) Array of unsigned long's in mask @@ -274,36 +266,9 @@ static inline void __cpus_shift_left(cpumask_t *dstp, } -#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP -extern cpumask_t *cpumask_of_cpu_map; +/* cpumask_of_cpu_map[] is in kernel/cpu.c */ +extern const cpumask_t *cpumask_of_cpu_map; #define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) -#define cpumask_of_cpu_ptr(v, cpu) \ - const cpumask_t *v = &cpumask_of_cpu(cpu) -#define cpumask_of_cpu_ptr_declare(v) \ - const cpumask_t *v -#define cpumask_of_cpu_ptr_next(v, cpu) \ - v = &cpumask_of_cpu(cpu) -#else -#define cpumask_of_cpu(cpu) \ -({ \ - typeof(_unused_cpumask_arg_) m; \ - if (sizeof(m) == sizeof(unsigned long)) { \ - m.bits[0] = 1UL<<(cpu); \ - } else { \ - cpus_clear(m); \ - cpu_set((cpu), m); \ - } \ - m; \ -}) -#define cpumask_of_cpu_ptr(v, cpu) \ - cpumask_t _##v = cpumask_of_cpu(cpu); \ - const cpumask_t *v = &_##v -#define cpumask_of_cpu_ptr_declare(v) \ - cpumask_t _##v; \ - const cpumask_t *v = &_##v -#define cpumask_of_cpu_ptr_next(v, cpu) \ - _##v = cpumask_of_cpu(cpu) -#endif #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) -- cgit v1.2.2 From fdd2a7e2dac56a3384068802be46b822f2aed703 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Sat, 26 Jul 2008 13:25:25 -0300 Subject: V4L/DVB (8500a): videotext.h: whitespace cleanup Signed-off-by: Mauro Carvalho Chehab --- include/linux/videotext.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/videotext.h b/include/linux/videotext.h index 018f92047ff8..3e68c8d1c7f7 100644 --- a/include/linux/videotext.h +++ b/include/linux/videotext.h @@ -45,10 +45,10 @@ #define VTXIOCCLRCACHE_OLD 0x710b /* clear cache on VTX-interface (if avail.) */ #define VTXIOCSETVIRT_OLD 0x710c /* turn on virtual mode (this disables TV-display) */ -/* +/* * Definitions for VTXIOCGETINFO */ - + #define SAA5243 0 #define SAA5246 1 #define SAA5249 2 @@ -57,10 +57,10 @@ typedef struct { int version_major, version_minor; /* version of driver; if version_major changes, driver */ - /* is not backward compatible!!! CHECK THIS!!! */ + /* is not backward compatible!!! CHECK THIS!!! */ int numpages; /* number of page-buffers of vtx-chipset */ int cct_type; /* type of vtx-chipset (SAA5243, SAA5246, SAA5248 or - * SAA5249) */ + * SAA5249) */ } vtx_info_t; @@ -81,7 +81,7 @@ vtx_info_t; #define PGMASK_HOUR (HR_TEN | HR_UNIT) #define PGMASK_MINUTE (MIN_TEN | MIN_UNIT) -typedef struct +typedef struct { int page; /* number of requested page (hexadecimal) */ int hour; /* requested hour (hexadecimal) */ @@ -98,11 +98,11 @@ vtx_pagereq_t; /* * Definitions for VTXIOC{GETSTAT,PUTSTAT} */ - + #define VTX_PAGESIZE (40 * 24) #define VTX_VIRTUALSIZE (40 * 49) -typedef struct +typedef struct { int pagenum; /* number of page (hexadecimal) */ int hour; /* hour (hexadecimal) */ @@ -121,5 +121,5 @@ typedef struct unsigned hamming : 1; /* hamming-error occurred */ } vtx_pageinfo_t; - + #endif /* _VTX_H */ -- cgit v1.2.2 From 9993e51c0c47ec69dce1f26c2321af6bb9165e9e Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Sat, 26 Jul 2008 13:53:46 -0300 Subject: V4L/DVB (8502): videodev2.h: CodingStyle cleanups Signed-off-by: Mauro Carvalho Chehab --- include/linux/videodev2.h | 378 ++++++++++++++++++++-------------------------- 1 file changed, 166 insertions(+), 212 deletions(-) (limited to 'include/linux') diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 2e66a95e8d32..cc0c8952323b 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -91,8 +91,8 @@ */ /* Four-character-code (FOURCC) */ -#define v4l2_fourcc(a,b,c,d)\ - (((__u32)(a)<<0)|((__u32)(b)<<8)|((__u32)(c)<<16)|((__u32)(d)<<24)) +#define v4l2_fourcc(a, b, c, d)\ + ((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24)) /* * E N U M S @@ -226,8 +226,7 @@ struct v4l2_fract { /* * D R I V E R C A P A B I L I T I E S */ -struct v4l2_capability -{ +struct v4l2_capability { __u8 driver[16]; /* i.e. "bttv" */ __u8 card[32]; /* i.e. "Hauppauge WinTV" */ __u8 bus_info[32]; /* "PCI:" + pci_name(pci_dev) */ @@ -259,8 +258,7 @@ struct v4l2_capability /* * V I D E O I M A G E F O R M A T */ -struct v4l2_pix_format -{ +struct v4l2_pix_format { __u32 width; __u32 height; __u32 pixelformat; @@ -272,68 +270,67 @@ struct v4l2_pix_format }; /* Pixel format FOURCC depth Description */ -#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R','G','B','1') /* 8 RGB-3-3-2 */ -#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R','4','4','4') /* 16 xxxxrrrr ggggbbbb */ -#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R','G','B','O') /* 16 RGB-5-5-5 */ -#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R','G','B','P') /* 16 RGB-5-6-5 */ -#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R','G','B','Q') /* 16 RGB-5-5-5 BE */ -#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R','G','B','R') /* 16 RGB-5-6-5 BE */ -#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B','G','R','3') /* 24 BGR-8-8-8 */ -#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R','G','B','3') /* 24 RGB-8-8-8 */ -#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B','G','R','4') /* 32 BGR-8-8-8-8 */ -#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R','G','B','4') /* 32 RGB-8-8-8-8 */ -#define V4L2_PIX_FMT_GREY v4l2_fourcc('G','R','E','Y') /* 8 Greyscale */ -#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y','1','6',' ') /* 16 Greyscale */ -#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P','A','L','8') /* 8 8-bit palette */ -#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y','V','U','9') /* 9 YVU 4:1:0 */ -#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y','V','1','2') /* 12 YVU 4:2:0 */ -#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y','U','Y','V') /* 16 YUV 4:2:2 */ -#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U','Y','V','Y') /* 16 YUV 4:2:2 */ -#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4','2','2','P') /* 16 YVU422 planar */ -#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4','1','1','P') /* 16 YVU411 planar */ -#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y','4','1','P') /* 12 YUV 4:1:1 */ -#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y','4','4','4') /* 16 xxxxyyyy uuuuvvvv */ -#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y','U','V','O') /* 16 YUV-5-5-5 */ -#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y','U','V','P') /* 16 YUV-5-6-5 */ -#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y','U','V','4') /* 32 YUV-8-8-8-8 */ +#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R', 'G', 'B', '1') /* 8 RGB-3-3-2 */ +#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R', '4', '4', '4') /* 16 xxxxrrrr ggggbbbb */ +#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */ +#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */ +#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */ +#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */ +#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */ +#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */ +#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */ +#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R', 'G', 'B', '4') /* 32 RGB-8-8-8-8 */ +#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */ +#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ +#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */ +#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y', 'V', 'U', '9') /* 9 YVU 4:1:0 */ +#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */ +#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */ +#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 16 YVU411 planar */ +#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */ +#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */ +#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */ +#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */ +#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */ /* two planes -- one Y, one Cr + Cb interleaved */ -#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N','V','1','2') /* 12 Y/CbCr 4:2:0 */ -#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N','V','2','1') /* 12 Y/CrCb 4:2:0 */ +#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */ /* The following formats are not defined in the V4L2 specification */ -#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y','U','V','9') /* 9 YUV 4:1:0 */ -#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y','U','1','2') /* 12 YUV 4:2:0 */ -#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y','Y','U','V') /* 16 YUV 4:2:2 */ -#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H','I','2','4') /* 8 8-bit color */ -#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H','M','1','2') /* 8 YUV 4:2:0 16x16 macroblocks */ +#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */ +#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y', 'U', '1', '2') /* 12 YUV 4:2:0 */ +#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y', 'Y', 'U', 'V') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */ +#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */ /* see http://www.siliconimaging.com/RGB%20Bayer.htm */ -#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B','A','8','1') /* 8 BGBG.. GRGR.. */ -#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G','B','R','G') /* 8 GBGB.. RGRG.. */ -#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B','Y','R','2') /* 16 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */ +#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */ /* compressed formats */ -#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M','J','P','G') /* Motion-JPEG */ -#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J','P','E','G') /* JFIF JPEG */ -#define V4L2_PIX_FMT_DV v4l2_fourcc('d','v','s','d') /* 1394 */ -#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M','P','E','G') /* MPEG-1/2/4 */ +#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG */ +#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J', 'P', 'E', 'G') /* JFIF JPEG */ +#define V4L2_PIX_FMT_DV v4l2_fourcc('d', 'v', 's', 'd') /* 1394 */ +#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 */ /* Vendor-specific formats */ -#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W','N','V','A') /* Winnov hw compress */ -#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S','9','1','0') /* SN9C10x compression */ -#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P','W','C','1') /* pwc older webcam */ -#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P','W','C','2') /* pwc newer webcam */ -#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E','6','2','5') /* ET61X251 compression */ -#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S','5','0','1') /* YUYV per line */ -#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S','5','6','1') /* compressed GBRG bayer */ -#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P','2','0','7') /* compressed BGGR bayer */ +#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */ +#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */ +#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */ +#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */ +#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */ +#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S', '5', '0', '1') /* YUYV per line */ +#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */ +#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */ /* * F O R M A T E N U M E R A T I O N */ -struct v4l2_fmtdesc -{ +struct v4l2_fmtdesc { __u32 index; /* Format number */ enum v4l2_buf_type type; /* buffer type */ __u32 flags; @@ -349,21 +346,18 @@ struct v4l2_fmtdesc /* * F R A M E S I Z E E N U M E R A T I O N */ -enum v4l2_frmsizetypes -{ +enum v4l2_frmsizetypes { V4L2_FRMSIZE_TYPE_DISCRETE = 1, V4L2_FRMSIZE_TYPE_CONTINUOUS = 2, V4L2_FRMSIZE_TYPE_STEPWISE = 3, }; -struct v4l2_frmsize_discrete -{ +struct v4l2_frmsize_discrete { __u32 width; /* Frame width [pixel] */ __u32 height; /* Frame height [pixel] */ }; -struct v4l2_frmsize_stepwise -{ +struct v4l2_frmsize_stepwise { __u32 min_width; /* Minimum frame width [pixel] */ __u32 max_width; /* Maximum frame width [pixel] */ __u32 step_width; /* Frame width step size [pixel] */ @@ -372,8 +366,7 @@ struct v4l2_frmsize_stepwise __u32 step_height; /* Frame height step size [pixel] */ }; -struct v4l2_frmsizeenum -{ +struct v4l2_frmsizeenum { __u32 index; /* Frame size number */ __u32 pixel_format; /* Pixel format */ __u32 type; /* Frame size type the device supports. */ @@ -389,22 +382,19 @@ struct v4l2_frmsizeenum /* * F R A M E R A T E E N U M E R A T I O N */ -enum v4l2_frmivaltypes -{ +enum v4l2_frmivaltypes { V4L2_FRMIVAL_TYPE_DISCRETE = 1, V4L2_FRMIVAL_TYPE_CONTINUOUS = 2, V4L2_FRMIVAL_TYPE_STEPWISE = 3, }; -struct v4l2_frmival_stepwise -{ +struct v4l2_frmival_stepwise { struct v4l2_fract min; /* Minimum frame interval [s] */ struct v4l2_fract max; /* Maximum frame interval [s] */ struct v4l2_fract step; /* Frame interval step size [s] */ }; -struct v4l2_frmivalenum -{ +struct v4l2_frmivalenum { __u32 index; /* Frame format index */ __u32 pixel_format; /* Pixel format */ __u32 width; /* Frame width */ @@ -423,8 +413,7 @@ struct v4l2_frmivalenum /* * T I M E C O D E */ -struct v4l2_timecode -{ +struct v4l2_timecode { __u32 type; __u32 flags; __u8 frames; @@ -449,8 +438,7 @@ struct v4l2_timecode #define V4L2_TC_USERBITS_8BITCHARS 0x0008 /* The above is based on SMPTE timecodes */ -struct v4l2_jpegcompression -{ +struct v4l2_jpegcompression { int quality; int APPn; /* Number of APP segment to be written, @@ -482,16 +470,14 @@ struct v4l2_jpegcompression /* * M E M O R Y - M A P P I N G B U F F E R S */ -struct v4l2_requestbuffers -{ +struct v4l2_requestbuffers { __u32 count; enum v4l2_buf_type type; enum v4l2_memory memory; __u32 reserved[2]; }; -struct v4l2_buffer -{ +struct v4l2_buffer { __u32 index; enum v4l2_buf_type type; __u32 bytesused; @@ -525,13 +511,12 @@ struct v4l2_buffer /* * O V E R L A Y P R E V I E W */ -struct v4l2_framebuffer -{ +struct v4l2_framebuffer { __u32 capability; __u32 flags; /* FIXME: in theory we should pass something like PCI device + memory * region + offset instead of some physical address */ - void* base; + void *base; struct v4l2_pix_format fmt; }; /* Flags for the 'capability' field. Read only */ @@ -550,14 +535,12 @@ struct v4l2_framebuffer #define V4L2_FBUF_FLAG_GLOBAL_ALPHA 0x0010 #define V4L2_FBUF_FLAG_LOCAL_INV_ALPHA 0x0020 -struct v4l2_clip -{ +struct v4l2_clip { struct v4l2_rect c; struct v4l2_clip __user *next; }; -struct v4l2_window -{ +struct v4l2_window { struct v4l2_rect w; enum v4l2_field field; __u32 chromakey; @@ -570,8 +553,7 @@ struct v4l2_window /* * C A P T U R E P A R A M E T E R S */ -struct v4l2_captureparm -{ +struct v4l2_captureparm { __u32 capability; /* Supported modes */ __u32 capturemode; /* Current mode */ struct v4l2_fract timeperframe; /* Time per frame in .1us units */ @@ -584,8 +566,7 @@ struct v4l2_captureparm #define V4L2_MODE_HIGHQUALITY 0x0001 /* High quality imaging mode */ #define V4L2_CAP_TIMEPERFRAME 0x1000 /* timeperframe field is supported */ -struct v4l2_outputparm -{ +struct v4l2_outputparm { __u32 capability; /* Supported modes */ __u32 outputmode; /* Current mode */ struct v4l2_fract timeperframe; /* Time per frame in seconds */ @@ -702,8 +683,7 @@ typedef __u64 v4l2_std_id; #define V4L2_STD_ALL (V4L2_STD_525_60 |\ V4L2_STD_625_50) -struct v4l2_standard -{ +struct v4l2_standard { __u32 index; v4l2_std_id id; __u8 name[24]; @@ -715,8 +695,7 @@ struct v4l2_standard /* * V I D E O I N P U T S */ -struct v4l2_input -{ +struct v4l2_input { __u32 index; /* Which input */ __u8 name[32]; /* Label */ __u32 type; /* Type of input */ @@ -753,8 +732,7 @@ struct v4l2_input /* * V I D E O O U T P U T S */ -struct v4l2_output -{ +struct v4l2_output { __u32 index; /* Which output */ __u8 name[32]; /* Label */ __u32 type; /* Type of output */ @@ -771,14 +749,12 @@ struct v4l2_output /* * C O N T R O L S */ -struct v4l2_control -{ +struct v4l2_control { __u32 id; __s32 value; }; -struct v4l2_ext_control -{ +struct v4l2_ext_control { __u32 id; __u32 reserved2[2]; union { @@ -788,8 +764,7 @@ struct v4l2_ext_control }; } __attribute__ ((packed)); -struct v4l2_ext_controls -{ +struct v4l2_ext_controls { __u32 ctrl_class; __u32 count; __u32 error_idx; @@ -807,8 +782,7 @@ struct v4l2_ext_controls #define V4L2_CTRL_DRIVER_PRIV(id) (((id) & 0xffff) >= 0x1000) /* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ -struct v4l2_queryctrl -{ +struct v4l2_queryctrl { __u32 id; enum v4l2_ctrl_type type; __u8 name[32]; /* Whatever */ @@ -821,8 +795,7 @@ struct v4l2_queryctrl }; /* Used in the VIDIOC_QUERYMENU ioctl for querying menu items */ -struct v4l2_querymenu -{ +struct v4l2_querymenu { __u32 id; __u32 index; __u8 name[32]; /* Whatever */ @@ -1104,8 +1077,7 @@ enum v4l2_exposure_auto_type { /* * T U N I N G */ -struct v4l2_tuner -{ +struct v4l2_tuner { __u32 index; __u8 name[32]; enum v4l2_tuner_type type; @@ -1119,8 +1091,7 @@ struct v4l2_tuner __u32 reserved[4]; }; -struct v4l2_modulator -{ +struct v4l2_modulator { __u32 index; __u8 name[32]; __u32 capability; @@ -1153,8 +1124,7 @@ struct v4l2_modulator #define V4L2_TUNER_MODE_LANG1 0x0003 #define V4L2_TUNER_MODE_LANG1_LANG2 0x0004 -struct v4l2_frequency -{ +struct v4l2_frequency { __u32 tuner; enum v4l2_tuner_type type; __u32 frequency; @@ -1172,8 +1142,7 @@ struct v4l2_hw_freq_seek { /* * A U D I O */ -struct v4l2_audio -{ +struct v4l2_audio { __u32 index; __u8 name[32]; __u32 capability; @@ -1188,8 +1157,7 @@ struct v4l2_audio /* Flags for the 'mode' field */ #define V4L2_AUDMODE_AVL 0x00001 -struct v4l2_audioout -{ +struct v4l2_audioout { __u32 index; __u8 name[32]; __u32 capability; @@ -1253,8 +1221,7 @@ struct v4l2_encoder_cmd { */ /* Raw VBI */ -struct v4l2_vbi_format -{ +struct v4l2_vbi_format { __u32 sampling_rate; /* in 1 Hz */ __u32 offset; __u32 samples_per_line; @@ -1266,8 +1233,8 @@ struct v4l2_vbi_format }; /* VBI flags */ -#define V4L2_VBI_UNSYNC (1<< 0) -#define V4L2_VBI_INTERLACED (1<< 1) +#define V4L2_VBI_UNSYNC (1 << 0) +#define V4L2_VBI_INTERLACED (1 << 1) /* Sliced VBI * @@ -1276,8 +1243,7 @@ struct v4l2_vbi_format * notice in the definitive implementation. */ -struct v4l2_sliced_vbi_format -{ +struct v4l2_sliced_vbi_format { __u16 service_set; /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field @@ -1301,8 +1267,7 @@ struct v4l2_sliced_vbi_format #define V4L2_SLICED_VBI_525 (V4L2_SLICED_CAPTION_525) #define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625) -struct v4l2_sliced_vbi_cap -{ +struct v4l2_sliced_vbi_cap { __u16 service_set; /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field @@ -1313,8 +1278,7 @@ struct v4l2_sliced_vbi_cap __u32 reserved[3]; /* must be 0 */ }; -struct v4l2_sliced_vbi_data -{ +struct v4l2_sliced_vbi_data { __u32 id; __u32 field; /* 0: first field, 1: second field */ __u32 line; /* 1-23 */ @@ -1328,27 +1292,23 @@ struct v4l2_sliced_vbi_data /* Stream data format */ -struct v4l2_format -{ +struct v4l2_format { enum v4l2_buf_type type; - union - { - struct v4l2_pix_format pix; // V4L2_BUF_TYPE_VIDEO_CAPTURE - struct v4l2_window win; // V4L2_BUF_TYPE_VIDEO_OVERLAY - struct v4l2_vbi_format vbi; // V4L2_BUF_TYPE_VBI_CAPTURE - struct v4l2_sliced_vbi_format sliced; // V4L2_BUF_TYPE_SLICED_VBI_CAPTURE - __u8 raw_data[200]; // user-defined + union { + struct v4l2_pix_format pix; /* V4L2_BUF_TYPE_VIDEO_CAPTURE */ + struct v4l2_window win; /* V4L2_BUF_TYPE_VIDEO_OVERLAY */ + struct v4l2_vbi_format vbi; /* V4L2_BUF_TYPE_VBI_CAPTURE */ + struct v4l2_sliced_vbi_format sliced; /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */ + __u8 raw_data[200]; /* user-defined */ } fmt; }; /* Stream type-dependent parameters */ -struct v4l2_streamparm -{ +struct v4l2_streamparm { enum v4l2_buf_type type; - union - { + union { struct v4l2_captureparm capture; struct v4l2_outputparm output; __u8 raw_data[200]; /* user-defined */ @@ -1386,92 +1346,86 @@ struct v4l2_chip_ident { * I O C T L C O D E S F O R V I D E O D E V I C E S * */ -#define VIDIOC_QUERYCAP _IOR ('V', 0, struct v4l2_capability) -#define VIDIOC_RESERVED _IO ('V', 1) -#define VIDIOC_ENUM_FMT _IOWR ('V', 2, struct v4l2_fmtdesc) -#define VIDIOC_G_FMT _IOWR ('V', 4, struct v4l2_format) -#define VIDIOC_S_FMT _IOWR ('V', 5, struct v4l2_format) -#define VIDIOC_REQBUFS _IOWR ('V', 8, struct v4l2_requestbuffers) -#define VIDIOC_QUERYBUF _IOWR ('V', 9, struct v4l2_buffer) -#define VIDIOC_G_FBUF _IOR ('V', 10, struct v4l2_framebuffer) -#define VIDIOC_S_FBUF _IOW ('V', 11, struct v4l2_framebuffer) -#define VIDIOC_OVERLAY _IOW ('V', 14, int) -#define VIDIOC_QBUF _IOWR ('V', 15, struct v4l2_buffer) -#define VIDIOC_DQBUF _IOWR ('V', 17, struct v4l2_buffer) -#define VIDIOC_STREAMON _IOW ('V', 18, int) -#define VIDIOC_STREAMOFF _IOW ('V', 19, int) -#define VIDIOC_G_PARM _IOWR ('V', 21, struct v4l2_streamparm) -#define VIDIOC_S_PARM _IOWR ('V', 22, struct v4l2_streamparm) -#define VIDIOC_G_STD _IOR ('V', 23, v4l2_std_id) -#define VIDIOC_S_STD _IOW ('V', 24, v4l2_std_id) -#define VIDIOC_ENUMSTD _IOWR ('V', 25, struct v4l2_standard) -#define VIDIOC_ENUMINPUT _IOWR ('V', 26, struct v4l2_input) -#define VIDIOC_G_CTRL _IOWR ('V', 27, struct v4l2_control) -#define VIDIOC_S_CTRL _IOWR ('V', 28, struct v4l2_control) -#define VIDIOC_G_TUNER _IOWR ('V', 29, struct v4l2_tuner) -#define VIDIOC_S_TUNER _IOW ('V', 30, struct v4l2_tuner) -#define VIDIOC_G_AUDIO _IOR ('V', 33, struct v4l2_audio) -#define VIDIOC_S_AUDIO _IOW ('V', 34, struct v4l2_audio) -#define VIDIOC_QUERYCTRL _IOWR ('V', 36, struct v4l2_queryctrl) -#define VIDIOC_QUERYMENU _IOWR ('V', 37, struct v4l2_querymenu) -#define VIDIOC_G_INPUT _IOR ('V', 38, int) -#define VIDIOC_S_INPUT _IOWR ('V', 39, int) -#define VIDIOC_G_OUTPUT _IOR ('V', 46, int) -#define VIDIOC_S_OUTPUT _IOWR ('V', 47, int) -#define VIDIOC_ENUMOUTPUT _IOWR ('V', 48, struct v4l2_output) -#define VIDIOC_G_AUDOUT _IOR ('V', 49, struct v4l2_audioout) -#define VIDIOC_S_AUDOUT _IOW ('V', 50, struct v4l2_audioout) -#define VIDIOC_G_MODULATOR _IOWR ('V', 54, struct v4l2_modulator) -#define VIDIOC_S_MODULATOR _IOW ('V', 55, struct v4l2_modulator) -#define VIDIOC_G_FREQUENCY _IOWR ('V', 56, struct v4l2_frequency) -#define VIDIOC_S_FREQUENCY _IOW ('V', 57, struct v4l2_frequency) -#define VIDIOC_CROPCAP _IOWR ('V', 58, struct v4l2_cropcap) -#define VIDIOC_G_CROP _IOWR ('V', 59, struct v4l2_crop) -#define VIDIOC_S_CROP _IOW ('V', 60, struct v4l2_crop) -#define VIDIOC_G_JPEGCOMP _IOR ('V', 61, struct v4l2_jpegcompression) -#define VIDIOC_S_JPEGCOMP _IOW ('V', 62, struct v4l2_jpegcompression) -#define VIDIOC_QUERYSTD _IOR ('V', 63, v4l2_std_id) -#define VIDIOC_TRY_FMT _IOWR ('V', 64, struct v4l2_format) -#define VIDIOC_ENUMAUDIO _IOWR ('V', 65, struct v4l2_audio) -#define VIDIOC_ENUMAUDOUT _IOWR ('V', 66, struct v4l2_audioout) -#define VIDIOC_G_PRIORITY _IOR ('V', 67, enum v4l2_priority) -#define VIDIOC_S_PRIORITY _IOW ('V', 68, enum v4l2_priority) -#define VIDIOC_G_SLICED_VBI_CAP _IOWR ('V', 69, struct v4l2_sliced_vbi_cap) -#define VIDIOC_LOG_STATUS _IO ('V', 70) -#define VIDIOC_G_EXT_CTRLS _IOWR ('V', 71, struct v4l2_ext_controls) -#define VIDIOC_S_EXT_CTRLS _IOWR ('V', 72, struct v4l2_ext_controls) -#define VIDIOC_TRY_EXT_CTRLS _IOWR ('V', 73, struct v4l2_ext_controls) +#define VIDIOC_QUERYCAP _IOR('V', 0, struct v4l2_capability) +#define VIDIOC_RESERVED _IO('V', 1) +#define VIDIOC_ENUM_FMT _IOWR('V', 2, struct v4l2_fmtdesc) +#define VIDIOC_G_FMT _IOWR('V', 4, struct v4l2_format) +#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format) +#define VIDIOC_REQBUFS _IOWR('V', 8, struct v4l2_requestbuffers) +#define VIDIOC_QUERYBUF _IOWR('V', 9, struct v4l2_buffer) +#define VIDIOC_G_FBUF _IOR('V', 10, struct v4l2_framebuffer) +#define VIDIOC_S_FBUF _IOW('V', 11, struct v4l2_framebuffer) +#define VIDIOC_OVERLAY _IOW('V', 14, int) +#define VIDIOC_QBUF _IOWR('V', 15, struct v4l2_buffer) +#define VIDIOC_DQBUF _IOWR('V', 17, struct v4l2_buffer) +#define VIDIOC_STREAMON _IOW('V', 18, int) +#define VIDIOC_STREAMOFF _IOW('V', 19, int) +#define VIDIOC_G_PARM _IOWR('V', 21, struct v4l2_streamparm) +#define VIDIOC_S_PARM _IOWR('V', 22, struct v4l2_streamparm) +#define VIDIOC_G_STD _IOR('V', 23, v4l2_std_id) +#define VIDIOC_S_STD _IOW('V', 24, v4l2_std_id) +#define VIDIOC_ENUMSTD _IOWR('V', 25, struct v4l2_standard) +#define VIDIOC_ENUMINPUT _IOWR('V', 26, struct v4l2_input) +#define VIDIOC_G_CTRL _IOWR('V', 27, struct v4l2_control) +#define VIDIOC_S_CTRL _IOWR('V', 28, struct v4l2_control) +#define VIDIOC_G_TUNER _IOWR('V', 29, struct v4l2_tuner) +#define VIDIOC_S_TUNER _IOW('V', 30, struct v4l2_tuner) +#define VIDIOC_G_AUDIO _IOR('V', 33, struct v4l2_audio) +#define VIDIOC_S_AUDIO _IOW('V', 34, struct v4l2_audio) +#define VIDIOC_QUERYCTRL _IOWR('V', 36, struct v4l2_queryctrl) +#define VIDIOC_QUERYMENU _IOWR('V', 37, struct v4l2_querymenu) +#define VIDIOC_G_INPUT _IOR('V', 38, int) +#define VIDIOC_S_INPUT _IOWR('V', 39, int) +#define VIDIOC_G_OUTPUT _IOR('V', 46, int) +#define VIDIOC_S_OUTPUT _IOWR('V', 47, int) +#define VIDIOC_ENUMOUTPUT _IOWR('V', 48, struct v4l2_output) +#define VIDIOC_G_AUDOUT _IOR('V', 49, struct v4l2_audioout) +#define VIDIOC_S_AUDOUT _IOW('V', 50, struct v4l2_audioout) +#define VIDIOC_G_MODULATOR _IOWR('V', 54, struct v4l2_modulator) +#define VIDIOC_S_MODULATOR _IOW('V', 55, struct v4l2_modulator) +#define VIDIOC_G_FREQUENCY _IOWR('V', 56, struct v4l2_frequency) +#define VIDIOC_S_FREQUENCY _IOW('V', 57, struct v4l2_frequency) +#define VIDIOC_CROPCAP _IOWR('V', 58, struct v4l2_cropcap) +#define VIDIOC_G_CROP _IOWR('V', 59, struct v4l2_crop) +#define VIDIOC_S_CROP _IOW('V', 60, struct v4l2_crop) +#define VIDIOC_G_JPEGCOMP _IOR('V', 61, struct v4l2_jpegcompression) +#define VIDIOC_S_JPEGCOMP _IOW('V', 62, struct v4l2_jpegcompression) +#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id) +#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format) +#define VIDIOC_ENUMAUDIO _IOWR('V', 65, struct v4l2_audio) +#define VIDIOC_ENUMAUDOUT _IOWR('V', 66, struct v4l2_audioout) +#define VIDIOC_G_PRIORITY _IOR('V', 67, enum v4l2_priority) +#define VIDIOC_S_PRIORITY _IOW('V', 68, enum v4l2_priority) +#define VIDIOC_G_SLICED_VBI_CAP _IOWR('V', 69, struct v4l2_sliced_vbi_cap) +#define VIDIOC_LOG_STATUS _IO('V', 70) +#define VIDIOC_G_EXT_CTRLS _IOWR('V', 71, struct v4l2_ext_controls) +#define VIDIOC_S_EXT_CTRLS _IOWR('V', 72, struct v4l2_ext_controls) +#define VIDIOC_TRY_EXT_CTRLS _IOWR('V', 73, struct v4l2_ext_controls) #if 1 -#define VIDIOC_ENUM_FRAMESIZES _IOWR ('V', 74, struct v4l2_frmsizeenum) -#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR ('V', 75, struct v4l2_frmivalenum) -#define VIDIOC_G_ENC_INDEX _IOR ('V', 76, struct v4l2_enc_idx) -#define VIDIOC_ENCODER_CMD _IOWR ('V', 77, struct v4l2_encoder_cmd) -#define VIDIOC_TRY_ENCODER_CMD _IOWR ('V', 78, struct v4l2_encoder_cmd) +#define VIDIOC_ENUM_FRAMESIZES _IOWR('V', 74, struct v4l2_frmsizeenum) +#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR('V', 75, struct v4l2_frmivalenum) +#define VIDIOC_G_ENC_INDEX _IOR('V', 76, struct v4l2_enc_idx) +#define VIDIOC_ENCODER_CMD _IOWR('V', 77, struct v4l2_encoder_cmd) +#define VIDIOC_TRY_ENCODER_CMD _IOWR('V', 78, struct v4l2_encoder_cmd) /* Experimental, only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */ -#define VIDIOC_DBG_S_REGISTER _IOW ('V', 79, struct v4l2_register) -#define VIDIOC_DBG_G_REGISTER _IOWR ('V', 80, struct v4l2_register) +#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_register) +#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_register) -#define VIDIOC_G_CHIP_IDENT _IOWR ('V', 81, struct v4l2_chip_ident) +#define VIDIOC_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_chip_ident) #endif -#define VIDIOC_S_HW_FREQ_SEEK _IOW ('V', 82, struct v4l2_hw_freq_seek) +#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) #ifdef __OLD_VIDIOC_ /* for compatibility, will go away some day */ -#define VIDIOC_OVERLAY_OLD _IOWR ('V', 14, int) -#define VIDIOC_S_PARM_OLD _IOW ('V', 22, struct v4l2_streamparm) -#define VIDIOC_S_CTRL_OLD _IOW ('V', 28, struct v4l2_control) -#define VIDIOC_G_AUDIO_OLD _IOWR ('V', 33, struct v4l2_audio) -#define VIDIOC_G_AUDOUT_OLD _IOWR ('V', 49, struct v4l2_audioout) -#define VIDIOC_CROPCAP_OLD _IOR ('V', 58, struct v4l2_cropcap) +#define VIDIOC_OVERLAY_OLD _IOWR('V', 14, int) +#define VIDIOC_S_PARM_OLD _IOW('V', 22, struct v4l2_streamparm) +#define VIDIOC_S_CTRL_OLD _IOW('V', 28, struct v4l2_control) +#define VIDIOC_G_AUDIO_OLD _IOWR('V', 33, struct v4l2_audio) +#define VIDIOC_G_AUDOUT_OLD _IOWR('V', 49, struct v4l2_audioout) +#define VIDIOC_CROPCAP_OLD _IOR('V', 58, struct v4l2_cropcap) #endif #define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */ #endif /* __LINUX_VIDEODEV2_H */ - -/* - * Local variables: - * c-basic-offset: 8 - * End: - */ -- cgit v1.2.2 From 1250ac6d4ab716dafe0ac245fd31cd3a7cbc0a98 Mon Sep 17 00:00:00 2001 From: Jean-Francois Moine Date: Sat, 26 Jul 2008 08:02:47 -0300 Subject: V4L/DVB (8518): gspca: Remove the remaining frame decoding functions from the subdrivers. SPCA505 and SPCA508 added in the pixel formats. Decode functions and associated resources removed in spca505, 506 and 508. The decode routines are now found in the V4L library. Signed-off-by: Jean-Francois Moine Signed-off-by: Mauro Carvalho Chehab --- include/linux/videodev2.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index cc0c8952323b..7d9ac046389e 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -324,6 +324,8 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */ #define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */ #define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S', '5', '0', '1') /* YUYV per line */ +#define V4L2_PIX_FMT_SPCA505 v4l2_fourcc('S','5','0','5') /* YYUV per line */ +#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S','5','0','8') /* YUVY per line */ #define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */ #define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */ -- cgit v1.2.2 From c1d7f4f1648cb8efd87f1b9560c40af2297e7c05 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Sat, 26 Jul 2008 08:33:47 -0300 Subject: V4L/DVB (8524): videodev: copy the VID_TYPE defines to videodev.h The VID_TYPE defines are V4L1 specific, so copy them back to videodev.h. In videodev2.h ensure that they are not used in the kernel (you need to include videodev.h instead) and mark them are deprecated. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/linux/videodev.h | 15 +++++++++++++++ include/linux/videodev2.h | 6 ++++++ 2 files changed, 21 insertions(+) (limited to 'include/linux') diff --git a/include/linux/videodev.h b/include/linux/videodev.h index 9385a566aed8..15a653d41132 100644 --- a/include/linux/videodev.h +++ b/include/linux/videodev.h @@ -17,6 +17,21 @@ #if defined(CONFIG_VIDEO_V4L1_COMPAT) || !defined (__KERNEL__) +#define VID_TYPE_CAPTURE 1 /* Can capture */ +#define VID_TYPE_TUNER 2 /* Can tune */ +#define VID_TYPE_TELETEXT 4 /* Does teletext */ +#define VID_TYPE_OVERLAY 8 /* Overlay onto frame buffer */ +#define VID_TYPE_CHROMAKEY 16 /* Overlay by chromakey */ +#define VID_TYPE_CLIPPING 32 /* Can clip */ +#define VID_TYPE_FRAMERAM 64 /* Uses the frame buffer memory */ +#define VID_TYPE_SCALES 128 /* Scalable */ +#define VID_TYPE_MONOCHROME 256 /* Monochrome only */ +#define VID_TYPE_SUBCAPTURE 512 /* Can capture subareas of the image */ +#define VID_TYPE_MPEG_DECODER 1024 /* Can decode MPEG streams */ +#define VID_TYPE_MPEG_ENCODER 2048 /* Can encode MPEG streams */ +#define VID_TYPE_MJPEG_DECODER 4096 /* Can decode MJPEG streams */ +#define VID_TYPE_MJPEG_ENCODER 8192 /* Can encode MJPEG streams */ + struct video_capability { char name[32]; diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 7d9ac046389e..f7195351a1e7 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -71,6 +71,11 @@ */ #define VIDEO_MAX_FRAME 32 +#ifndef __KERNEL__ + +/* These defines are V4L1 specific and should not be used with the V4L2 API! + They will be removed from this header in the future. */ + #define VID_TYPE_CAPTURE 1 /* Can capture */ #define VID_TYPE_TUNER 2 /* Can tune */ #define VID_TYPE_TELETEXT 4 /* Does teletext */ @@ -85,6 +90,7 @@ #define VID_TYPE_MPEG_ENCODER 2048 /* Can encode MPEG streams */ #define VID_TYPE_MJPEG_DECODER 4096 /* Can decode MJPEG streams */ #define VID_TYPE_MJPEG_ENCODER 8192 /* Can encode MJPEG streams */ +#endif /* * M I S C E L L A N E O U S -- cgit v1.2.2 From 9fa0f6db3a201bef49f28e69f80802559a38586b Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Sun, 27 Jul 2008 08:55:17 -0300 Subject: V4L/DVB (8522): videodev2: Fix merge conflict Signed-off-by: Mauro Carvalho Chehab --- include/linux/videodev2.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index f7195351a1e7..e466bd54a50e 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -330,8 +330,8 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */ #define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */ #define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S', '5', '0', '1') /* YUYV per line */ -#define V4L2_PIX_FMT_SPCA505 v4l2_fourcc('S','5','0','5') /* YYUV per line */ -#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S','5','0','8') /* YUVY per line */ +#define V4L2_PIX_FMT_SPCA505 v4l2_fourcc('S', '5', '0', '5') /* YYUV per line */ +#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */ #define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */ #define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */ -- cgit v1.2.2 From 5c2aed622571ac7c3c6ec182d6d3c318e4b45c8b Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Thu, 28 Feb 2008 11:33:03 -0500 Subject: stop_machine: add ALL_CPUS option -allow stop_mahcine_run() to call a function on all cpus. Calling stop_machine_run() with a 'ALL_CPUS' invokes this new behavior. stop_machine_run() proceeds as normal until the calling cpu has invoked 'fn'. Then, we tell all the other cpus to call 'fn'. Signed-off-by: Jason Baron Signed-off-by: Mathieu Desnoyers Signed-off-by: Rusty Russell CC: Adrian Bunk CC: Andi Kleen CC: Alexey Dobriyan CC: Christoph Hellwig CC: mingo@elte.hu CC: akpm@osdl.org --- include/linux/stop_machine.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 5bfc553bdb21..18af011c13af 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -8,11 +8,17 @@ #include #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) + +#define ALL_CPUS ~0U + /** * stop_machine_run: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr for the @fn() - * @cpu: the cpu to run @fn() on (or any, if @cpu == NR_CPUS. + * @cpu: if @cpu == n, run @fn() on cpu n + * if @cpu == NR_CPUS, run @fn() on any cpu + * if @cpu == ALL_CPUS, run @fn() first on the calling cpu, and then + * concurrently on all the other cpus * * Description: This causes a thread to be scheduled on every other cpu, * each of which disables interrupts, and finally interrupts are disabled -- cgit v1.2.2 From ffdb5976c47609c862917d4c186ecbb5706d2dda Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Mon, 28 Jul 2008 12:16:28 -0500 Subject: Simplify stop_machine stop_machine creates a kthread which creates kernel threads. We can create those threads directly and simplify things a little. Some care must be taken with CPU hotunplug, which has special needs, but that code seems more robust than it was in the past. Signed-off-by: Rusty Russell Acked-by: Christian Borntraeger --- include/linux/stop_machine.h | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 18af011c13af..36c2c7284eb3 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -17,13 +17,12 @@ * @data: the data ptr for the @fn() * @cpu: if @cpu == n, run @fn() on cpu n * if @cpu == NR_CPUS, run @fn() on any cpu - * if @cpu == ALL_CPUS, run @fn() first on the calling cpu, and then - * concurrently on all the other cpus + * if @cpu == ALL_CPUS, run @fn() on every online CPU. * - * Description: This causes a thread to be scheduled on every other cpu, - * each of which disables interrupts, and finally interrupts are disabled - * on the current CPU. The result is that noone is holding a spinlock - * or inside any other preempt-disabled region when @fn() runs. + * Description: This causes a thread to be scheduled on every cpu, + * each of which disables interrupts. The result is that noone is + * holding a spinlock or inside any other preempt-disabled region when + * @fn() runs. * * This can be thought of as a very heavy write lock, equivalent to * grabbing every spinlock in the kernel. */ @@ -35,13 +34,10 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu); * @data: the data ptr for the @fn * @cpu: the cpu to run @fn on (or any, if @cpu == NR_CPUS. * - * Description: This is a special version of the above, which returns the - * thread which has run @fn(): kthread_stop will return the return value - * of @fn(). Used by hotplug cpu. + * Description: This is a special version of the above, which assumes cpus + * won't come or go while it's being called. Used by hotplug cpu. */ -struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, - unsigned int cpu); - +int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu); #else static inline int stop_machine_run(int (*fn)(void *), void *data, -- cgit v1.2.2 From eeec4fad963490821348a331cca6102ae1c4a7a3 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Mon, 28 Jul 2008 12:16:30 -0500 Subject: stop_machine(): stop_machine_run() changed to use cpu mask Instead of a "cpu" arg with magic values NR_CPUS (any cpu) and ~0 (all cpus), pass a cpumask_t. Allow NULL for the common case (where we don't care which CPU the function is run on): temporary cpumask_t's are usually considered bad for stack space. This deprecates stop_machine_run, to be removed soon when all the callers are dead. Signed-off-by: Rusty Russell --- include/linux/stop_machine.h | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 36c2c7284eb3..f1cb0ba6d715 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -5,19 +5,19 @@ (and more). So the "read" side to such a lock is anything which diables preeempt. */ #include +#include #include #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) +/* Deprecated, but useful for transition. */ #define ALL_CPUS ~0U /** - * stop_machine_run: freeze the machine on all CPUs and run this function + * stop_machine: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr for the @fn() - * @cpu: if @cpu == n, run @fn() on cpu n - * if @cpu == NR_CPUS, run @fn() on any cpu - * if @cpu == ALL_CPUS, run @fn() on every online CPU. + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) * * Description: This causes a thread to be scheduled on every cpu, * each of which disables interrupts. The result is that noone is @@ -26,22 +26,22 @@ * * This can be thought of as a very heavy write lock, equivalent to * grabbing every spinlock in the kernel. */ -int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu); +int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); /** - * __stop_machine_run: freeze the machine on all CPUs and run this function + * __stop_machine: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr for the @fn - * @cpu: the cpu to run @fn on (or any, if @cpu == NR_CPUS. + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) * * Description: This is a special version of the above, which assumes cpus * won't come or go while it's being called. Used by hotplug cpu. */ -int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu); +int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); #else -static inline int stop_machine_run(int (*fn)(void *), void *data, - unsigned int cpu) +static inline int stop_machine(int (*fn)(void *), void *data, + const cpumask_t *cpus) { int ret; local_irq_disable(); @@ -50,4 +50,18 @@ static inline int stop_machine_run(int (*fn)(void *), void *data, return ret; } #endif /* CONFIG_SMP */ + +static inline int __deprecated stop_machine_run(int (*fn)(void *), void *data, + unsigned int cpu) +{ + /* If they don't care which cpu fn runs on, just pick one. */ + if (cpu == NR_CPUS) + return stop_machine(fn, data, NULL); + else if (cpu == ~0U) + return stop_machine(fn, data, &cpu_possible_map); + else { + cpumask_t cpus = cpumask_of_cpu(cpu); + return stop_machine(fn, data, &cpus); + } +} #endif /* _LINUX_STOP_MACHINE */ -- cgit v1.2.2 From 306cfd630a4d121cf4e08b894d8b4c4cf106e57e Mon Sep 17 00:00:00 2001 From: Adrian McMenamin Date: Sun, 15 Jun 2008 20:48:09 +0100 Subject: maple: tidy maple_driver code by removing redundant connect/disconnect The connect and disconnect functions are unnecessary - everything they do can be accomplished in the initial probe - so remove them. Signed-off-by: Adrian McMenamin Signed-off-by: Paul Mundt --- include/linux/maple.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/maple.h b/include/linux/maple.h index d31e36ebb436..523a286bb477 100644 --- a/include/linux/maple.h +++ b/include/linux/maple.h @@ -61,8 +61,6 @@ struct maple_device { struct maple_driver { unsigned long function; - int (*connect) (struct maple_device * dev); - void (*disconnect) (struct maple_device * dev); struct device_driver drv; }; -- cgit v1.2.2 From 7f71ac9374fec066e428892a68db158946cee1fb Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Mon, 28 Jul 2008 18:29:09 +0200 Subject: mfd: Coding style fixes Fix some coding style fixes in the mfd core driver. Signed-off-by: Ben Dooks Signed-off-by: Samuel Ortiz --- include/linux/mfd/core.h | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index bb3dd0545928..b7cbb9968339 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -1,5 +1,3 @@ -#ifndef MFD_CORE_H -#define MFD_CORE_H /* * drivers/mfd/mfd-core.h * @@ -13,6 +11,9 @@ * */ +#ifndef MFD_CORE_H +#define MFD_CORE_H + #include /* @@ -38,17 +39,15 @@ struct mfd_cell { const struct resource *resources; }; -static inline struct mfd_cell * -mfd_get_cell(struct platform_device *pdev) +static inline struct mfd_cell *mfd_get_cell(struct platform_device *pdev) { return (struct mfd_cell *)pdev->dev.platform_data; } -extern int mfd_add_devices( - struct platform_device *parent, - const struct mfd_cell *cells, int n_devs, - struct resource *mem_base, - int irq_base); +extern int mfd_add_devices(struct platform_device *parent, + const struct mfd_cell *cells, int n_devs, + struct resource *mem_base, + int irq_base); extern void mfd_remove_devices(struct platform_device *parent); -- cgit v1.2.2 From e56b3bc7942982ac2589c942fb345e38bc7a341a Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 28 Jul 2008 11:32:33 -0700 Subject: cpu masks: optimize and clean up cpumask_of_cpu() Clean up and optimize cpumask_of_cpu(), by sharing all the zero words. Instead of stupidly generating all possible i=0...NR_CPUS 2^i patterns creating a huge array of constant bitmasks, realize that the zero words can be shared. In other words, on a 64-bit architecture, we only ever need 64 of these arrays - with a different bit set in one single world (with enough zero words around it so that we can create any bitmask by just offsetting in that big array). And then we just put enough zeroes around it that we can point every single cpumask to be one of those things. So when we have 4k CPU's, instead of having 4k arrays (of 4k bits each, with one bit set in each array - 2MB memory total), we have exactly 64 arrays instead, each 8k bits in size (64kB total). And then we just point cpumask(n) to the right position (which we can calculate dynamically). Once we have the right arrays, getting "cpumask(n)" ends up being: static inline const cpumask_t *get_cpu_mask(unsigned int cpu) { const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; p -= cpu / BITS_PER_LONG; return (const cpumask_t *)p; } This brings other advantages and simplifications as well: - we are not wasting memory that is just filled with a single bit in various different places - we don't need all those games to re-create the arrays in some dense format, because they're already going to be dense enough. if we compile a kernel for up to 4k CPU's, "wasting" that 64kB of memory is a non-issue (especially since by doing this "overlapping" trick we probably get better cache behaviour anyway). [ mingo@elte.hu: Converted Linus's mails into a commit. See: http://lkml.org/lkml/2008/7/27/156 http://lkml.org/lkml/2008/7/28/320 Also applied a family filter - which also has the side-effect of leaving out the bits where Linus calls me an idio... Oh, never mind ;-) ] Signed-off-by: Ingo Molnar Cc: Rusty Russell Cc: Andrew Morton Cc: Al Viro Cc: Mike Travis Signed-off-by: Ingo Molnar --- include/linux/cpumask.h | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 8fa3b6d4a320..96d0509fb8d8 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -265,10 +265,30 @@ static inline void __cpus_shift_left(cpumask_t *dstp, bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } +/* + * Special-case data structure for "single bit set only" constant CPU masks. + * + * We pre-generate all the 64 (or 32) possible bit positions, with enough + * padding to the left and the right, and return the constant pointer + * appropriately offset. + */ +extern const unsigned long + cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; + +static inline const cpumask_t *get_cpu_mask(unsigned int cpu) +{ + const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; + p -= cpu / BITS_PER_LONG; + return (const cpumask_t *)p; +} + +/* + * In cases where we take the address of the cpumask immediately, + * gcc optimizes it out (it's a constant) and there's no huge stack + * variable created: + */ +#define cpumask_of_cpu(cpu) ({ *get_cpu_mask(cpu); }) -/* cpumask_of_cpu_map[] is in kernel/cpu.c */ -extern const cpumask_t *cpumask_of_cpu_map; -#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) -- cgit v1.2.2 From 5fde244d39b88625ac578d83e6625138714de031 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 23 Jul 2008 10:32:24 +0800 Subject: PCI: disable ASPM per ACPI FADT setting The ACPI FADT table includes an ASPM control bit. If the bit is set, do not enable ASPM since it may indicate that the platform doesn't actually support the feature. Tested-by: Jack Howarth Signed-off-by: Shaohua Li Signed-off-by: Jesse Barnes --- include/linux/pci-aspm.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h index a1a1e618e996..91ba0b338b47 100644 --- a/include/linux/pci-aspm.h +++ b/include/linux/pci-aspm.h @@ -27,6 +27,7 @@ extern void pcie_aspm_init_link_state(struct pci_dev *pdev); extern void pcie_aspm_exit_link_state(struct pci_dev *pdev); extern void pcie_aspm_pm_state_change(struct pci_dev *pdev); extern void pci_disable_link_state(struct pci_dev *pdev, int state); +extern void pcie_no_aspm(void); #else static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { @@ -40,6 +41,10 @@ static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) static inline void pci_disable_link_state(struct pci_dev *pdev, int state) { } + +static inline void pcie_no_aspm(void) +{ +} #endif #ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */ -- cgit v1.2.2 From 149e16372a2066c5474d8a8db9b252afd57eb427 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 23 Jul 2008 10:32:31 +0800 Subject: PCI: disable ASPM on pre-1.1 PCIe devices Disable ASPM on pre-1.1 PCIe devices, as many of them don't implement it correctly. Tested-by: Jack Howarth Signed-off-by: Shaohua Li Signed-off-by: Jesse Barnes --- include/linux/pci_regs.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index 19958b929905..450684f7eaac 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h @@ -374,6 +374,7 @@ #define PCI_EXP_DEVCAP_ATN_BUT 0x1000 /* Attention Button Present */ #define PCI_EXP_DEVCAP_ATN_IND 0x2000 /* Attention Indicator Present */ #define PCI_EXP_DEVCAP_PWR_IND 0x4000 /* Power Indicator Present */ +#define PCI_EXP_DEVCAP_RBER 0x8000 /* Role-Based Error Reporting */ #define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */ #define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */ #define PCI_EXP_DEVCTL 8 /* Device Control */ -- cgit v1.2.2 From 979b1791e5b8f8b556faeec4c48339e7ed63af9f Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Thu, 24 Jul 2008 17:18:38 +0100 Subject: PCI: add D3 power state avoidance quirk Libata has some hacks to deal with certain controllers going silly in D3 state. The right way to handle this is to keep a PCI device flag for such devices. That can then be generalised for no ATA devices with power problems. Signed-off-by: Alan Cox Signed-off-by: Jesse Barnes --- include/linux/pci.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pci.h b/include/linux/pci.h index 1d296d31abe0..825be3878f68 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -124,6 +124,8 @@ enum pci_dev_flags { * generation too. */ PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1, + /* Device configuration is irrevocably lost if disabled into D3 */ + PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, }; typedef unsigned short __bitwise pci_bus_flags_t; -- cgit v1.2.2 From 56edb58be157a06dc147a988af3588059556d392 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 29 Jul 2008 01:23:32 +0200 Subject: mfd: add platform_data to mfd_cell Adding platform_data to mfd_cell allows passing of platform data directly to the platform_device created for each cell and thus reuse of existing drivers. On the other side it can be used as a hook to mfd_cell itself removing the need in mfd_get_cell method. Signed-off-by: Mike Rapoport Acked-by: Dmitry Baryshkov Signed-off-by: Samuel Ortiz --- include/linux/mfd/core.h | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index b7cbb9968339..ea45d4a5a2ac 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -29,7 +29,13 @@ struct mfd_cell { int (*suspend)(struct platform_device *dev); int (*resume)(struct platform_device *dev); - void *driver_data; /* driver-specific data */ + /* driver-specific data for MFD-aware "cell" drivers */ + void *driver_data; + + /* platform_data can be used to either pass data to "generic" + driver or as a hook to mfd_cell for the "cell" drivers */ + void *platform_data; + size_t data_size; /* * This resources can be specified relatievly to the parent device. @@ -39,11 +45,6 @@ struct mfd_cell { const struct resource *resources; }; -static inline struct mfd_cell *mfd_get_cell(struct platform_device *pdev) -{ - return (struct mfd_cell *)pdev->dev.platform_data; -} - extern int mfd_add_devices(struct platform_device *parent, const struct mfd_cell *cells, int n_devs, struct resource *mem_base, -- cgit v1.2.2 From 6beeac76f5f96590fb751af5e138fbc3f62e8460 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Mon, 28 Jul 2008 15:46:22 -0700 Subject: mmu-notifiers: add list_del_init_rcu() Introduce list_del_init_rcu() and document it. Signed-off-by: Andrea Arcangeli Acked-by: Linus Torvalds Cc: "Paul E. McKenney" Cc: Ingo Molnar Cc: Christoph Lameter Cc: Jack Steiner Cc: Robin Holt Cc: Nick Piggin Cc: Peter Zijlstra Cc: Kanoj Sarcar Cc: Roland Dreier Cc: Steve Wise Cc: Avi Kivity Cc: Hugh Dickins Cc: Rusty Russell Cc: Anthony Liguori Cc: Chris Wright Cc: Marcelo Tosatti Cc: Eric Dumazet Cc: "Paul E. McKenney" Cc: Izik Eidus Cc: Anthony Liguori Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rculist.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'include/linux') diff --git a/include/linux/rculist.h b/include/linux/rculist.h index b0f39be08b6c..eb4443c7e05b 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -97,6 +97,34 @@ static inline void list_del_rcu(struct list_head *entry) entry->prev = LIST_POISON2; } +/** + * hlist_del_init_rcu - deletes entry from hash list with re-initialization + * @n: the element to delete from the hash list. + * + * Note: list_unhashed() on the node return true after this. It is + * useful for RCU based read lockfree traversal if the writer side + * must know if the list entry is still hashed or already unhashed. + * + * In particular, it means that we can not poison the forward pointers + * that may still be used for walking the hash list and we can only + * zero the pprev pointer so list_unhashed() will return true after + * this. + * + * The caller must take whatever precautions are necessary (such as + * holding appropriate locks) to avoid racing with another + * list-mutation primitive, such as hlist_add_head_rcu() or + * hlist_del_rcu(), running on this same list. However, it is + * perfectly legal to run concurrently with the _rcu list-traversal + * primitives, such as hlist_for_each_entry_rcu(). + */ +static inline void hlist_del_init_rcu(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + n->pprev = NULL; + } +} + /** * list_replace_rcu - replace old entry by new one * @old : the element to be replaced -- cgit v1.2.2 From 7906d00cd1f687268f0a3599442d113767795ae6 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Mon, 28 Jul 2008 15:46:26 -0700 Subject: mmu-notifiers: add mm_take_all_locks() operation mm_take_all_locks holds off reclaim from an entire mm_struct. This allows mmu notifiers to register into the mm at any time with the guarantee that no mmu operation is in progress on the mm. This operation locks against the VM for all pte/vma/mm related operations that could ever happen on a certain mm. This includes vmtruncate, try_to_unmap, and all page faults. The caller must take the mmap_sem in write mode before calling mm_take_all_locks(). The caller isn't allowed to release the mmap_sem until mm_drop_all_locks() returns. mmap_sem in write mode is required in order to block all operations that could modify pagetables and free pages without need of altering the vma layout (for example populate_range() with nonlinear vmas). It's also needed in write mode to avoid new anon_vmas to be associated with existing vmas. A single task can't take more than one mm_take_all_locks() in a row or it would deadlock. mm_take_all_locks() and mm_drop_all_locks are expensive operations that may have to take thousand of locks. mm_take_all_locks() can fail if it's interrupted by signals. When mmu_notifier_register returns, we must be sure that the driver is notified if some task is in the middle of a vmtruncate for the 'mm' where the mmu notifier was registered (mmu_notifier_invalidate_range_start/end is run around the vmtruncation but mmu_notifier_register can run after mmu_notifier_invalidate_range_start and before mmu_notifier_invalidate_range_end). Same problem for rmap paths. And we've to remove page pinning to avoid replicating the tlb_gather logic inside KVM (and GRU doesn't work well with page pinning regardless of needing tlb_gather), so without mm_take_all_locks when vmtruncate frees the page, kvm would have no way to notice that it mapped into sptes a page that is going into the freelist without a chance of any further mmu_notifier notification. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Andrea Arcangeli Acked-by: Linus Torvalds Cc: Christoph Lameter Cc: Jack Steiner Cc: Robin Holt Cc: Nick Piggin Cc: Peter Zijlstra Cc: Kanoj Sarcar Cc: Roland Dreier Cc: Steve Wise Cc: Avi Kivity Cc: Hugh Dickins Cc: Rusty Russell Cc: Anthony Liguori Cc: Chris Wright Cc: Marcelo Tosatti Cc: Eric Dumazet Cc: "Paul E. McKenney" Cc: Izik Eidus Cc: Anthony Liguori Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 3 +++ include/linux/pagemap.h | 1 + include/linux/rmap.h | 8 ++++++++ 3 files changed, 12 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 6e695eaab4ce..866a3dbe5c75 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1104,6 +1104,9 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff); extern void exit_mmap(struct mm_struct *); +extern int mm_take_all_locks(struct mm_struct *mm); +extern void mm_drop_all_locks(struct mm_struct *mm); + #ifdef CONFIG_PROC_FS /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ extern void added_exe_file_vma(struct mm_struct *mm); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a81d81890422..a39b38ccdc97 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -20,6 +20,7 @@ */ #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ +#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */ static inline void mapping_set_error(struct address_space *mapping, int error) { diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 1383692ac5bd..69407f85e10b 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -26,6 +26,14 @@ */ struct anon_vma { spinlock_t lock; /* Serialize access to vma list */ + /* + * NOTE: the LSB of the head.next is set by + * mm_take_all_locks() _after_ taking the above lock. So the + * head must only be read/written after taking the above lock + * to be sure to see a valid next pointer. The LSB bit itself + * is serialized by a system wide lock only visible to + * mm_take_all_locks() (mm_all_locks_mutex). + */ struct list_head head; /* List of private "related" vmas */ }; -- cgit v1.2.2 From cddb8a5c14aa89810b40495d94d3d2a0faee6619 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Mon, 28 Jul 2008 15:46:29 -0700 Subject: mmu-notifiers: core With KVM/GFP/XPMEM there isn't just the primary CPU MMU pointing to pages. There are secondary MMUs (with secondary sptes and secondary tlbs) too. sptes in the kvm case are shadow pagetables, but when I say spte in mmu-notifier context, I mean "secondary pte". In GRU case there's no actual secondary pte and there's only a secondary tlb because the GRU secondary MMU has no knowledge about sptes and every secondary tlb miss event in the MMU always generates a page fault that has to be resolved by the CPU (this is not the case of KVM where the a secondary tlb miss will walk sptes in hardware and it will refill the secondary tlb transparently to software if the corresponding spte is present). The same way zap_page_range has to invalidate the pte before freeing the page, the spte (and secondary tlb) must also be invalidated before any page is freed and reused. Currently we take a page_count pin on every page mapped by sptes, but that means the pages can't be swapped whenever they're mapped by any spte because they're part of the guest working set. Furthermore a spte unmap event can immediately lead to a page to be freed when the pin is released (so requiring the same complex and relatively slow tlb_gather smp safe logic we have in zap_page_range and that can be avoided completely if the spte unmap event doesn't require an unpin of the page previously mapped in the secondary MMU). The mmu notifiers allow kvm/GRU/XPMEM to attach to the tsk->mm and know when the VM is swapping or freeing or doing anything on the primary MMU so that the secondary MMU code can drop sptes before the pages are freed, avoiding all page pinning and allowing 100% reliable swapping of guest physical address space. Furthermore it avoids the code that teardown the mappings of the secondary MMU, to implement a logic like tlb_gather in zap_page_range that would require many IPI to flush other cpu tlbs, for each fixed number of spte unmapped. To make an example: if what happens on the primary MMU is a protection downgrade (from writeable to wrprotect) the secondary MMU mappings will be invalidated, and the next secondary-mmu-page-fault will call get_user_pages and trigger a do_wp_page through get_user_pages if it called get_user_pages with write=1, and it'll re-establishing an updated spte or secondary-tlb-mapping on the copied page. Or it will setup a readonly spte or readonly tlb mapping if it's a guest-read, if it calls get_user_pages with write=0. This is just an example. This allows to map any page pointed by any pte (and in turn visible in the primary CPU MMU), into a secondary MMU (be it a pure tlb like GRU, or an full MMU with both sptes and secondary-tlb like the shadow-pagetable layer with kvm), or a remote DMA in software like XPMEM (hence needing of schedule in XPMEM code to send the invalidate to the remote node, while no need to schedule in kvm/gru as it's an immediate event like invalidating primary-mmu pte). At least for KVM without this patch it's impossible to swap guests reliably. And having this feature and removing the page pin allows several other optimizations that simplify life considerably. Dependencies: 1) mm_take_all_locks() to register the mmu notifier when the whole VM isn't doing anything with "mm". This allows mmu notifier users to keep track if the VM is in the middle of the invalidate_range_begin/end critical section with an atomic counter incraese in range_begin and decreased in range_end. No secondary MMU page fault is allowed to map any spte or secondary tlb reference, while the VM is in the middle of range_begin/end as any page returned by get_user_pages in that critical section could later immediately be freed without any further ->invalidate_page notification (invalidate_range_begin/end works on ranges and ->invalidate_page isn't called immediately before freeing the page). To stop all page freeing and pagetable overwrites the mmap_sem must be taken in write mode and all other anon_vma/i_mmap locks must be taken too. 2) It'd be a waste to add branches in the VM if nobody could possibly run KVM/GRU/XPMEM on the kernel, so mmu notifiers will only enabled if CONFIG_KVM=m/y. In the current kernel kvm won't yet take advantage of mmu notifiers, but this already allows to compile a KVM external module against a kernel with mmu notifiers enabled and from the next pull from kvm.git we'll start using them. And GRU/XPMEM will also be able to continue the development by enabling KVM=m in their config, until they submit all GRU/XPMEM GPLv2 code to the mainline kernel. Then they can also enable MMU_NOTIFIERS in the same way KVM does it (even if KVM=n). This guarantees nobody selects MMU_NOTIFIER=y if KVM and GRU and XPMEM are all =n. The mmu_notifier_register call can fail because mm_take_all_locks may be interrupted by a signal and return -EINTR. Because mmu_notifier_reigster is used when a driver startup, a failure can be gracefully handled. Here an example of the change applied to kvm to register the mmu notifiers. Usually when a driver startups other allocations are required anyway and -ENOMEM failure paths exists already. struct kvm *kvm_arch_create_vm(void) { struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); + int err; if (!kvm) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); + kvm->arch.mmu_notifier.ops = &kvm_mmu_notifier_ops; + err = mmu_notifier_register(&kvm->arch.mmu_notifier, current->mm); + if (err) { + kfree(kvm); + return ERR_PTR(err); + } + return kvm; } mmu_notifier_unregister returns void and it's reliable. The patch also adds a few needed but missing includes that would prevent kernel to compile after these changes on non-x86 archs (x86 didn't need them by luck). [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: fix mm/filemap_xip.c build] [akpm@linux-foundation.org: fix mm/mmu_notifier.c build] Signed-off-by: Andrea Arcangeli Signed-off-by: Nick Piggin Signed-off-by: Christoph Lameter Cc: Jack Steiner Cc: Robin Holt Cc: Nick Piggin Cc: Peter Zijlstra Cc: Kanoj Sarcar Cc: Roland Dreier Cc: Steve Wise Cc: Avi Kivity Cc: Hugh Dickins Cc: Rusty Russell Cc: Anthony Liguori Cc: Chris Wright Cc: Marcelo Tosatti Cc: Eric Dumazet Cc: "Paul E. McKenney" Cc: Izik Eidus Cc: Anthony Liguori Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_types.h | 4 + include/linux/mmu_notifier.h | 279 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 283 insertions(+) create mode 100644 include/linux/mmu_notifier.h (limited to 'include/linux') diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 746f975b58ef..386edbe2cb4e 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -253,6 +254,9 @@ struct mm_struct { struct file *exe_file; unsigned long num_exe_file_vmas; #endif +#ifdef CONFIG_MMU_NOTIFIER + struct mmu_notifier_mm *mmu_notifier_mm; +#endif }; #endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h new file mode 100644 index 000000000000..b77486d152cd --- /dev/null +++ b/include/linux/mmu_notifier.h @@ -0,0 +1,279 @@ +#ifndef _LINUX_MMU_NOTIFIER_H +#define _LINUX_MMU_NOTIFIER_H + +#include +#include +#include + +struct mmu_notifier; +struct mmu_notifier_ops; + +#ifdef CONFIG_MMU_NOTIFIER + +/* + * The mmu notifier_mm structure is allocated and installed in + * mm->mmu_notifier_mm inside the mm_take_all_locks() protected + * critical section and it's released only when mm_count reaches zero + * in mmdrop(). + */ +struct mmu_notifier_mm { + /* all mmu notifiers registerd in this mm are queued in this list */ + struct hlist_head list; + /* to serialize the list modifications and hlist_unhashed */ + spinlock_t lock; +}; + +struct mmu_notifier_ops { + /* + * Called either by mmu_notifier_unregister or when the mm is + * being destroyed by exit_mmap, always before all pages are + * freed. This can run concurrently with other mmu notifier + * methods (the ones invoked outside the mm context) and it + * should tear down all secondary mmu mappings and freeze the + * secondary mmu. If this method isn't implemented you've to + * be sure that nothing could possibly write to the pages + * through the secondary mmu by the time the last thread with + * tsk->mm == mm exits. + * + * As side note: the pages freed after ->release returns could + * be immediately reallocated by the gart at an alias physical + * address with a different cache model, so if ->release isn't + * implemented because all _software_ driven memory accesses + * through the secondary mmu are terminated by the time the + * last thread of this mm quits, you've also to be sure that + * speculative _hardware_ operations can't allocate dirty + * cachelines in the cpu that could not be snooped and made + * coherent with the other read and write operations happening + * through the gart alias address, so leading to memory + * corruption. + */ + void (*release)(struct mmu_notifier *mn, + struct mm_struct *mm); + + /* + * clear_flush_young is called after the VM is + * test-and-clearing the young/accessed bitflag in the + * pte. This way the VM will provide proper aging to the + * accesses to the page through the secondary MMUs and not + * only to the ones through the Linux pte. + */ + int (*clear_flush_young)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + + /* + * Before this is invoked any secondary MMU is still ok to + * read/write to the page previously pointed to by the Linux + * pte because the page hasn't been freed yet and it won't be + * freed until this returns. If required set_page_dirty has to + * be called internally to this method. + */ + void (*invalidate_page)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + + /* + * invalidate_range_start() and invalidate_range_end() must be + * paired and are called only when the mmap_sem and/or the + * locks protecting the reverse maps are held. The subsystem + * must guarantee that no additional references are taken to + * the pages in the range established between the call to + * invalidate_range_start() and the matching call to + * invalidate_range_end(). + * + * Invalidation of multiple concurrent ranges may be + * optionally permitted by the driver. Either way the + * establishment of sptes is forbidden in the range passed to + * invalidate_range_begin/end for the whole duration of the + * invalidate_range_begin/end critical section. + * + * invalidate_range_start() is called when all pages in the + * range are still mapped and have at least a refcount of one. + * + * invalidate_range_end() is called when all pages in the + * range have been unmapped and the pages have been freed by + * the VM. + * + * The VM will remove the page table entries and potentially + * the page between invalidate_range_start() and + * invalidate_range_end(). If the page must not be freed + * because of pending I/O or other circumstances then the + * invalidate_range_start() callback (or the initial mapping + * by the driver) must make sure that the refcount is kept + * elevated. + * + * If the driver increases the refcount when the pages are + * initially mapped into an address space then either + * invalidate_range_start() or invalidate_range_end() may + * decrease the refcount. If the refcount is decreased on + * invalidate_range_start() then the VM can free pages as page + * table entries are removed. If the refcount is only + * droppped on invalidate_range_end() then the driver itself + * will drop the last refcount but it must take care to flush + * any secondary tlb before doing the final free on the + * page. Pages will no longer be referenced by the linux + * address space but may still be referenced by sptes until + * the last refcount is dropped. + */ + void (*invalidate_range_start)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end); + void (*invalidate_range_end)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end); +}; + +/* + * The notifier chains are protected by mmap_sem and/or the reverse map + * semaphores. Notifier chains are only changed when all reverse maps and + * the mmap_sem locks are taken. + * + * Therefore notifier chains can only be traversed when either + * + * 1. mmap_sem is held. + * 2. One of the reverse map locks is held (i_mmap_lock or anon_vma->lock). + * 3. No other concurrent thread can access the list (release) + */ +struct mmu_notifier { + struct hlist_node hlist; + const struct mmu_notifier_ops *ops; +}; + +static inline int mm_has_notifiers(struct mm_struct *mm) +{ + return unlikely(mm->mmu_notifier_mm); +} + +extern int mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm); +extern int __mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void mmu_notifier_unregister(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); +extern void __mmu_notifier_release(struct mm_struct *mm); +extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address); +extern void __mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address); +extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end); + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_release(mm); +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + return __mmu_notifier_clear_flush_young(mm, address); + return 0; +} + +static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_page(mm, address); +} + +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_start(mm, start, end); +} + +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_end(mm, start, end); +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ + mm->mmu_notifier_mm = NULL; +} + +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_mm_destroy(mm); +} + +/* + * These two macros will sometime replace ptep_clear_flush. + * ptep_clear_flush is impleemnted as macro itself, so this also is + * implemented as a macro until ptep_clear_flush will converted to an + * inline function, to diminish the risk of compilation failure. The + * invalidate_page method over time can be moved outside the PT lock + * and these two macros can be later removed. + */ +#define ptep_clear_flush_notify(__vma, __address, __ptep) \ +({ \ + pte_t __pte; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __pte = ptep_clear_flush(___vma, ___address, __ptep); \ + mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \ + __pte; \ +}) + +#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ +({ \ + int __young; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __young = ptep_clear_flush_young(___vma, ___address, __ptep); \ + __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ + ___address); \ + __young; \ +}) + +#else /* CONFIG_MMU_NOTIFIER */ + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + return 0; +} + +static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address) +{ +} + +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ +} + +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) +{ +} + +#define ptep_clear_flush_young_notify ptep_clear_flush_young +#define ptep_clear_flush_notify ptep_clear_flush + +#endif /* CONFIG_MMU_NOTIFIER */ + +#endif /* _LINUX_MMU_NOTIFIER_H */ -- cgit v1.2.2 From 8ab22b9abb5c55413802e4adc9aa6223324547c3 Mon Sep 17 00:00:00 2001 From: Hisashi Hifumi Date: Mon, 28 Jul 2008 15:46:36 -0700 Subject: vfs: pagecache usage optimization for pagesize!=blocksize When we read some part of a file through pagecache, if there is a pagecache of corresponding index but this page is not uptodate, read IO is issued and this page will be uptodate. I think this is good for pagesize == blocksize environment but there is room for improvement on pagesize != blocksize environment. Because in this case a page can have multiple buffers and even if a page is not uptodate, some buffers can be uptodate. So I suggest that when all buffers which correspond to a part of a file that we want to read are uptodate, use this pagecache and copy data from this pagecache to user buffer even if a page is not uptodate. This can reduce read IO and improve system throughput. I wrote a benchmark program and got result number with this program. This benchmark do: 1: mount and open a test file. 2: create a 512MB file. 3: close a file and umount. 4: mount and again open a test file. 5: pwrite randomly 300000 times on a test file. offset is aligned by IO size(1024bytes). 6: measure time of preading randomly 100000 times on a test file. The result was: 2.6.26 330 sec 2.6.26-patched 226 sec Arch:i386 Filesystem:ext3 Blocksize:1024 bytes Memory: 1GB On ext3/4, a file is written through buffer/block. So random read/write mixed workloads or random read after random write workloads are optimized with this patch under pagesize != blocksize environment. This test result showed this. The benchmark program is as follows: #include #include #include #include #include #include #include #include #include #define LEN 1024 #define LOOP 1024*512 /* 512MB */ main(void) { unsigned long i, offset, filesize; int fd; char buf[LEN]; time_t t1, t2; if (mount("/dev/sda1", "/root/test1/", "ext3", 0, 0) < 0) { perror("cannot mount\n"); exit(1); } memset(buf, 0, LEN); fd = open("/root/test1/testfile", O_CREAT|O_RDWR|O_TRUNC); if (fd < 0) { perror("cannot open file\n"); exit(1); } for (i = 0; i < LOOP; i++) write(fd, buf, LEN); close(fd); if (umount("/root/test1/") < 0) { perror("cannot umount\n"); exit(1); } if (mount("/dev/sda1", "/root/test1/", "ext3", 0, 0) < 0) { perror("cannot mount\n"); exit(1); } fd = open("/root/test1/testfile", O_RDWR); if (fd < 0) { perror("cannot open file\n"); exit(1); } filesize = LEN * LOOP; for (i = 0; i < 300000; i++){ offset = (random() % filesize) & (~(LEN - 1)); pwrite(fd, buf, LEN, offset); } printf("start test\n"); time(&t1); for (i = 0; i < 100000; i++){ offset = (random() % filesize) & (~(LEN - 1)); pread(fd, buf, LEN, offset); } time(&t2); printf("%ld sec\n", t2-t1); close(fd); if (umount("/root/test1/") < 0) { perror("cannot umount\n"); exit(1); } } Signed-off-by: Hisashi Hifumi Cc: Nick Piggin Cc: Christoph Hellwig Cc: Jan Kara Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/buffer_head.h | 2 ++ include/linux/fs.h | 44 +++++++++++++++++++++++--------------------- 2 files changed, 25 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 82aa36c53ea7..50cfe8ceb478 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -205,6 +205,8 @@ void block_invalidatepage(struct page *page, unsigned long offset); int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); int block_read_full_page(struct page*, get_block_t*); +int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, + unsigned long from); int block_write_begin(struct file *, struct address_space *, loff_t, unsigned, unsigned, struct page **, void **, get_block_t*); diff --git a/include/linux/fs.h b/include/linux/fs.h index 8252b045e624..580b513668fe 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -443,6 +443,27 @@ static inline size_t iov_iter_count(struct iov_iter *i) return i->count; } +/* + * "descriptor" for what we're up to with a read. + * This allows us to use the same read code yet + * have multiple different users of the data that + * we read from a file. + * + * The simplest case just copies the data to user + * mode. + */ +typedef struct { + size_t written; + size_t count; + union { + char __user *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + +typedef int (*read_actor_t)(read_descriptor_t *, struct page *, + unsigned long, unsigned long); struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); @@ -484,6 +505,8 @@ struct address_space_operations { int (*migratepage) (struct address_space *, struct page *, struct page *); int (*launder_page) (struct page *); + int (*is_partially_uptodate) (struct page *, read_descriptor_t *, + unsigned long); }; /* @@ -1198,27 +1221,6 @@ struct block_device_operations { struct module *owner; }; -/* - * "descriptor" for what we're up to with a read. - * This allows us to use the same read code yet - * have multiple different users of the data that - * we read from a file. - * - * The simplest case just copies the data to user - * mode. - */ -typedef struct { - size_t written; - size_t count; - union { - char __user * buf; - void *data; - } arg; - int error; -} read_descriptor_t; - -typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long); - /* These macros are for out of kernel modules to test that * the kernel supports the unlocked_ioctl and compat_ioctl * fields in struct file_operations. */ -- cgit v1.2.2 From 424f525a1241351da947fb48a938128ddd774511 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Tue, 29 Jul 2008 01:30:26 +0200 Subject: mfd: accept pure device as a parent, not only platform_device Signed-off-by: Dmitry Baryshkov Signed-off-by: Samuel Ortiz --- include/linux/mfd/core.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index ea45d4a5a2ac..49ef857cdb2d 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -45,11 +45,11 @@ struct mfd_cell { const struct resource *resources; }; -extern int mfd_add_devices(struct platform_device *parent, +extern int mfd_add_devices(struct device *parent, int id, const struct mfd_cell *cells, int n_devs, struct resource *mem_base, int irq_base); -extern void mfd_remove_devices(struct platform_device *parent); +extern void mfd_remove_devices(struct device *parent); #endif -- cgit v1.2.2