aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/actbl1.h27
-rw-r--r--include/asm-alpha/scatterlist.h5
-rw-r--r--include/asm-arm/dma-mapping.h10
-rw-r--r--include/asm-arm/scatterlist.h5
-rw-r--r--include/asm-avr32/dma-mapping.h7
-rw-r--r--include/asm-avr32/scatterlist.h5
-rw-r--r--include/asm-blackfin/scatterlist.h6
-rw-r--r--include/asm-cris/scatterlist.h5
-rw-r--r--include/asm-frv/scatterlist.h13
-rw-r--r--include/asm-h8300/scatterlist.h5
-rw-r--r--include/asm-ia64/scatterlist.h5
-rw-r--r--include/asm-m32r/scatterlist.h5
-rw-r--r--include/asm-m68k/scatterlist.h5
-rw-r--r--include/asm-m68knommu/module.h12
-rw-r--r--include/asm-m68knommu/scatterlist.h6
-rw-r--r--include/asm-m68knommu/uaccess.h4
-rw-r--r--include/asm-mips/gt64120.h5
-rw-r--r--include/asm-mips/i8253.h6
-rw-r--r--include/asm-mips/scatterlist.h5
-rw-r--r--include/asm-mips/sibyte/sb1250.h2
-rw-r--r--include/asm-parisc/scatterlist.h7
-rw-r--r--include/asm-powerpc/dma-mapping.h10
-rw-r--r--include/asm-powerpc/mpc52xx.h9
-rw-r--r--include/asm-powerpc/scatterlist.h5
-rw-r--r--include/asm-ppc/system.h1
-rw-r--r--include/asm-s390/cpu.h25
-rw-r--r--include/asm-s390/mmu_context.h50
-rw-r--r--include/asm-s390/page.h4
-rw-r--r--include/asm-s390/pgalloc.h250
-rw-r--r--include/asm-s390/pgtable.h429
-rw-r--r--include/asm-s390/processor.h20
-rw-r--r--include/asm-s390/scatterlist.h5
-rw-r--r--include/asm-s390/tlb.h129
-rw-r--r--include/asm-s390/tlbflush.h152
-rw-r--r--include/asm-sh/dma-mapping.h12
-rw-r--r--include/asm-sh/scatterlist.h5
-rw-r--r--include/asm-sh64/dma-mapping.h12
-rw-r--r--include/asm-sh64/scatterlist.h5
-rw-r--r--include/asm-sparc/scatterlist.h5
-rw-r--r--include/asm-sparc64/scatterlist.h5
-rw-r--r--include/asm-v850/scatterlist.h5
-rw-r--r--include/asm-x86/bootparam.h9
-rw-r--r--include/asm-x86/cacheflush.h1
-rw-r--r--include/asm-x86/device.h3
-rw-r--r--include/asm-x86/dma-mapping_32.h4
-rw-r--r--include/asm-x86/scatterlist_32.h5
-rw-r--r--include/asm-x86/scatterlist_64.h5
-rw-r--r--include/asm-xtensa/scatterlist.h5
-rw-r--r--include/linux/capability.h6
-rw-r--r--include/linux/dmar.h86
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/efs_fs.h6
-rw-r--r--include/linux/exportfs.h141
-rw-r--r--include/linux/ext2_fs.h1
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/i8042.h35
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/linkage.h6
-rw-r--r--include/linux/memory.h31
-rw-r--r--include/linux/net.h4
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/reiserfs_fs.h12
-rw-r--r--include/linux/scatterlist.h202
-rw-r--r--include/linux/skbuff.h15
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/videodev.h42
-rw-r--r--include/linux/videodev2.h92
-rw-r--r--include/media/v4l2-dev.h5
-rw-r--r--include/net/bluetooth/hci.h604
-rw-r--r--include/net/bluetooth/hci_core.h13
-rw-r--r--include/net/bluetooth/l2cap.h37
-rw-r--r--include/sound/version.h2
74 files changed, 1554 insertions, 1123 deletions
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 4e5d3ca53a8e..a1b1b2ee3e51 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -257,7 +257,8 @@ struct acpi_table_dbgp {
257struct acpi_table_dmar { 257struct acpi_table_dmar {
258 struct acpi_table_header header; /* Common ACPI table header */ 258 struct acpi_table_header header; /* Common ACPI table header */
259 u8 width; /* Host Address Width */ 259 u8 width; /* Host Address Width */
260 u8 reserved[11]; 260 u8 flags;
261 u8 reserved[10];
261}; 262};
262 263
263/* DMAR subtable header */ 264/* DMAR subtable header */
@@ -265,8 +266,6 @@ struct acpi_table_dmar {
265struct acpi_dmar_header { 266struct acpi_dmar_header {
266 u16 type; 267 u16 type;
267 u16 length; 268 u16 length;
268 u8 flags;
269 u8 reserved[3];
270}; 269};
271 270
272/* Values for subtable type in struct acpi_dmar_header */ 271/* Values for subtable type in struct acpi_dmar_header */
@@ -274,13 +273,15 @@ struct acpi_dmar_header {
274enum acpi_dmar_type { 273enum acpi_dmar_type {
275 ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, 274 ACPI_DMAR_TYPE_HARDWARE_UNIT = 0,
276 ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, 275 ACPI_DMAR_TYPE_RESERVED_MEMORY = 1,
277 ACPI_DMAR_TYPE_RESERVED = 2 /* 2 and greater are reserved */ 276 ACPI_DMAR_TYPE_ATSR = 2,
277 ACPI_DMAR_TYPE_RESERVED = 3 /* 3 and greater are reserved */
278}; 278};
279 279
280struct acpi_dmar_device_scope { 280struct acpi_dmar_device_scope {
281 u8 entry_type; 281 u8 entry_type;
282 u8 length; 282 u8 length;
283 u8 segment; 283 u16 reserved;
284 u8 enumeration_id;
284 u8 bus; 285 u8 bus;
285}; 286};
286 287
@@ -290,7 +291,14 @@ enum acpi_dmar_scope_type {
290 ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0, 291 ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0,
291 ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1, 292 ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1,
292 ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, 293 ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2,
293 ACPI_DMAR_SCOPE_TYPE_RESERVED = 3 /* 3 and greater are reserved */ 294 ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3,
295 ACPI_DMAR_SCOPE_TYPE_HPET = 4,
296 ACPI_DMAR_SCOPE_TYPE_RESERVED = 5 /* 5 and greater are reserved */
297};
298
299struct acpi_dmar_pci_path {
300 u8 dev;
301 u8 fn;
294}; 302};
295 303
296/* 304/*
@@ -301,6 +309,9 @@ enum acpi_dmar_scope_type {
301 309
302struct acpi_dmar_hardware_unit { 310struct acpi_dmar_hardware_unit {
303 struct acpi_dmar_header header; 311 struct acpi_dmar_header header;
312 u8 flags;
313 u8 reserved;
314 u16 segment;
304 u64 address; /* Register Base Address */ 315 u64 address; /* Register Base Address */
305}; 316};
306 317
@@ -312,7 +323,9 @@ struct acpi_dmar_hardware_unit {
312 323
313struct acpi_dmar_reserved_memory { 324struct acpi_dmar_reserved_memory {
314 struct acpi_dmar_header header; 325 struct acpi_dmar_header header;
315 u64 address; /* 4_k aligned base address */ 326 u16 reserved;
327 u16 segment;
328 u64 base_address; /* 4_k aligned base address */
316 u64 end_address; /* 4_k aligned limit address */ 329 u64 end_address; /* 4_k aligned limit address */
317}; 330};
318 331
diff --git a/include/asm-alpha/scatterlist.h b/include/asm-alpha/scatterlist.h
index 917365405e83..440747ca6349 100644
--- a/include/asm-alpha/scatterlist.h
+++ b/include/asm-alpha/scatterlist.h
@@ -5,7 +5,10 @@
5#include <asm/types.h> 5#include <asm/types.h>
6 6
7struct scatterlist { 7struct scatterlist {
8 struct page *page; 8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
9 unsigned int offset; 12 unsigned int offset;
10 13
11 unsigned int length; 14 unsigned int length;
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index 1eb8aac43228..e99406a7bece 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -5,7 +5,7 @@
5 5
6#include <linux/mm.h> /* need struct page */ 6#include <linux/mm.h> /* need struct page */
7 7
8#include <asm/scatterlist.h> 8#include <linux/scatterlist.h>
9 9
10/* 10/*
11 * DMA-consistent mapping functions. These allocate/free a region of 11 * DMA-consistent mapping functions. These allocate/free a region of
@@ -274,8 +274,8 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
274 for (i = 0; i < nents; i++, sg++) { 274 for (i = 0; i < nents; i++, sg++) {
275 char *virt; 275 char *virt;
276 276
277 sg->dma_address = page_to_dma(dev, sg->page) + sg->offset; 277 sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset;
278 virt = page_address(sg->page) + sg->offset; 278 virt = sg_virt(sg);
279 279
280 if (!arch_is_coherent()) 280 if (!arch_is_coherent())
281 dma_cache_maint(virt, sg->length, dir); 281 dma_cache_maint(virt, sg->length, dir);
@@ -371,7 +371,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
371 int i; 371 int i;
372 372
373 for (i = 0; i < nents; i++, sg++) { 373 for (i = 0; i < nents; i++, sg++) {
374 char *virt = page_address(sg->page) + sg->offset; 374 char *virt = sg_virt(sg);
375 if (!arch_is_coherent()) 375 if (!arch_is_coherent())
376 dma_cache_maint(virt, sg->length, dir); 376 dma_cache_maint(virt, sg->length, dir);
377 } 377 }
@@ -384,7 +384,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
384 int i; 384 int i;
385 385
386 for (i = 0; i < nents; i++, sg++) { 386 for (i = 0; i < nents; i++, sg++) {
387 char *virt = page_address(sg->page) + sg->offset; 387 char *virt = sg_virt(sg);
388 if (!arch_is_coherent()) 388 if (!arch_is_coherent())
389 dma_cache_maint(virt, sg->length, dir); 389 dma_cache_maint(virt, sg->length, dir);
390 } 390 }
diff --git a/include/asm-arm/scatterlist.h b/include/asm-arm/scatterlist.h
index de2f65eb42ed..ca0a37d03400 100644
--- a/include/asm-arm/scatterlist.h
+++ b/include/asm-arm/scatterlist.h
@@ -5,7 +5,10 @@
5#include <asm/types.h> 5#include <asm/types.h>
6 6
7struct scatterlist { 7struct scatterlist {
8 struct page *page; /* buffer page */ 8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
9 unsigned int offset; /* buffer offset */ 12 unsigned int offset; /* buffer offset */
10 dma_addr_t dma_address; /* dma address */ 13 dma_addr_t dma_address; /* dma address */
11 unsigned int length; /* length */ 14 unsigned int length; /* length */
diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h
index 81e342636ac4..a7131630c057 100644
--- a/include/asm-avr32/dma-mapping.h
+++ b/include/asm-avr32/dma-mapping.h
@@ -217,8 +217,8 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
217 for (i = 0; i < nents; i++) { 217 for (i = 0; i < nents; i++) {
218 char *virt; 218 char *virt;
219 219
220 sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset; 220 sg[i].dma_address = page_to_bus(sg_page(&sg[i])) + sg[i].offset;
221 virt = page_address(sg[i].page) + sg[i].offset; 221 virt = sg_virt(&sg[i]);
222 dma_cache_sync(dev, virt, sg[i].length, direction); 222 dma_cache_sync(dev, virt, sg[i].length, direction);
223 } 223 }
224 224
@@ -327,8 +327,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
327 int i; 327 int i;
328 328
329 for (i = 0; i < nents; i++) { 329 for (i = 0; i < nents; i++) {
330 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, 330 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, direction);
331 sg[i].length, direction);
332 } 331 }
333} 332}
334 333
diff --git a/include/asm-avr32/scatterlist.h b/include/asm-avr32/scatterlist.h
index c6d5ce3b3a25..377320e3bd17 100644
--- a/include/asm-avr32/scatterlist.h
+++ b/include/asm-avr32/scatterlist.h
@@ -4,7 +4,10 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page *page; 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset; 11 unsigned int offset;
9 dma_addr_t dma_address; 12 dma_addr_t dma_address;
10 unsigned int length; 13 unsigned int length;
diff --git a/include/asm-blackfin/scatterlist.h b/include/asm-blackfin/scatterlist.h
index 60e07b92044c..04f448711cd0 100644
--- a/include/asm-blackfin/scatterlist.h
+++ b/include/asm-blackfin/scatterlist.h
@@ -4,7 +4,10 @@
4#include <linux/mm.h> 4#include <linux/mm.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page *page; 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset; 11 unsigned int offset;
9 dma_addr_t dma_address; 12 dma_addr_t dma_address;
10 unsigned int length; 13 unsigned int length;
@@ -17,7 +20,6 @@ struct scatterlist {
17 * returns, or alternatively stop on the first sg_dma_len(sg) which 20 * returns, or alternatively stop on the first sg_dma_len(sg) which
18 * is 0. 21 * is 0.
19 */ 22 */
20#define sg_address(sg) (page_address((sg)->page) + (sg)->offset)
21#define sg_dma_address(sg) ((sg)->dma_address) 23#define sg_dma_address(sg) ((sg)->dma_address)
22#define sg_dma_len(sg) ((sg)->length) 24#define sg_dma_len(sg) ((sg)->length)
23 25
diff --git a/include/asm-cris/scatterlist.h b/include/asm-cris/scatterlist.h
index 4bdc44c4ac3d..faff53ad1f96 100644
--- a/include/asm-cris/scatterlist.h
+++ b/include/asm-cris/scatterlist.h
@@ -2,11 +2,14 @@
2#define __ASM_CRIS_SCATTERLIST_H 2#define __ASM_CRIS_SCATTERLIST_H
3 3
4struct scatterlist { 4struct scatterlist {
5#ifdef CONFIG_DEBUG_SG
6 unsigned long sg_magic;
7#endif
5 char * address; /* Location data is to be transferred to */ 8 char * address; /* Location data is to be transferred to */
6 unsigned int length; 9 unsigned int length;
7 10
8 /* The following is i386 highmem junk - not used by us */ 11 /* The following is i386 highmem junk - not used by us */
9 struct page * page; /* Location for highmem page, if any */ 12 unsigned long page_link;
10 unsigned int offset;/* for highmem, page offset */ 13 unsigned int offset;/* for highmem, page offset */
11 14
12}; 15};
diff --git a/include/asm-frv/scatterlist.h b/include/asm-frv/scatterlist.h
index 8e827fa853f1..99ba76edc42a 100644
--- a/include/asm-frv/scatterlist.h
+++ b/include/asm-frv/scatterlist.h
@@ -4,25 +4,28 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6/* 6/*
7 * Drivers must set either ->address or (preferred) ->page and ->offset 7 * Drivers must set either ->address or (preferred) page and ->offset
8 * to indicate where data must be transferred to/from. 8 * to indicate where data must be transferred to/from.
9 * 9 *
10 * Using ->page is recommended since it handles highmem data as well as 10 * Using page is recommended since it handles highmem data as well as
11 * low mem. ->address is restricted to data which has a virtual mapping, and 11 * low mem. ->address is restricted to data which has a virtual mapping, and
12 * it will go away in the future. Updating to ->page can be automated very 12 * it will go away in the future. Updating to page can be automated very
13 * easily -- something like 13 * easily -- something like
14 * 14 *
15 * sg->address = some_ptr; 15 * sg->address = some_ptr;
16 * 16 *
17 * can be rewritten as 17 * can be rewritten as
18 * 18 *
19 * sg->page = virt_to_page(some_ptr); 19 * sg_set_page(virt_to_page(some_ptr));
20 * sg->offset = (unsigned long) some_ptr & ~PAGE_MASK; 20 * sg->offset = (unsigned long) some_ptr & ~PAGE_MASK;
21 * 21 *
22 * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens 22 * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens
23 */ 23 */
24struct scatterlist { 24struct scatterlist {
25 struct page *page; /* Location for highmem page, if any */ 25#ifdef CONFIG_DEBUG_SG
26 unsigned long sg_magic;
27#endif
28 unsigned long page_link;
26 unsigned int offset; /* for highmem, page offset */ 29 unsigned int offset; /* for highmem, page offset */
27 30
28 dma_addr_t dma_address; 31 dma_addr_t dma_address;
diff --git a/include/asm-h8300/scatterlist.h b/include/asm-h8300/scatterlist.h
index 985fdf54eaca..d3ecdd87ac90 100644
--- a/include/asm-h8300/scatterlist.h
+++ b/include/asm-h8300/scatterlist.h
@@ -4,7 +4,10 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page *page; 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset; 11 unsigned int offset;
9 dma_addr_t dma_address; 12 dma_addr_t dma_address;
10 unsigned int length; 13 unsigned int length;
diff --git a/include/asm-ia64/scatterlist.h b/include/asm-ia64/scatterlist.h
index 7d5234d50312..d6f57874041d 100644
--- a/include/asm-ia64/scatterlist.h
+++ b/include/asm-ia64/scatterlist.h
@@ -9,7 +9,10 @@
9#include <asm/types.h> 9#include <asm/types.h>
10 10
11struct scatterlist { 11struct scatterlist {
12 struct page *page; 12#ifdef CONFIG_DEBUG_SG
13 unsigned long sg_magic;
14#endif
15 unsigned long page_link;
13 unsigned int offset; 16 unsigned int offset;
14 unsigned int length; /* buffer length */ 17 unsigned int length; /* buffer length */
15 18
diff --git a/include/asm-m32r/scatterlist.h b/include/asm-m32r/scatterlist.h
index 352415ff5eb9..1ed372c73d0b 100644
--- a/include/asm-m32r/scatterlist.h
+++ b/include/asm-m32r/scatterlist.h
@@ -4,9 +4,12 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
7 char * address; /* Location data is to be transferred to, NULL for 10 char * address; /* Location data is to be transferred to, NULL for
8 * highmem page */ 11 * highmem page */
9 struct page * page; /* Location for highmem page, if any */ 12 unsigned long page_link;
10 unsigned int offset;/* for highmem, page offset */ 13 unsigned int offset;/* for highmem, page offset */
11 14
12 dma_addr_t dma_address; 15 dma_addr_t dma_address;
diff --git a/include/asm-m68k/scatterlist.h b/include/asm-m68k/scatterlist.h
index 24887a2d9c7b..d3a7a0edfeca 100644
--- a/include/asm-m68k/scatterlist.h
+++ b/include/asm-m68k/scatterlist.h
@@ -4,7 +4,10 @@
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page *page; 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset; 11 unsigned int offset;
9 unsigned int length; 12 unsigned int length;
10 13
diff --git a/include/asm-m68knommu/module.h b/include/asm-m68knommu/module.h
index 57e95cc01ad5..2e45ab50b232 100644
--- a/include/asm-m68knommu/module.h
+++ b/include/asm-m68knommu/module.h
@@ -1 +1,11 @@
1#include <asm-m68k/module.h> 1#ifndef ASM_M68KNOMMU_MODULE_H
2#define ASM_M68KNOMMU_MODULE_H
3
4struct mod_arch_specific {
5};
6
7#define Elf_Shdr Elf32_Shdr
8#define Elf_Sym Elf32_Sym
9#define Elf_Ehdr Elf32_Ehdr
10
11#endif /* ASM_M68KNOMMU_MODULE_H */
diff --git a/include/asm-m68knommu/scatterlist.h b/include/asm-m68knommu/scatterlist.h
index 4da79d3d3f34..afc4788b0d2c 100644
--- a/include/asm-m68knommu/scatterlist.h
+++ b/include/asm-m68knommu/scatterlist.h
@@ -5,13 +5,15 @@
5#include <asm/types.h> 5#include <asm/types.h>
6 6
7struct scatterlist { 7struct scatterlist {
8 struct page *page; 8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
9 unsigned int offset; 12 unsigned int offset;
10 dma_addr_t dma_address; 13 dma_addr_t dma_address;
11 unsigned int length; 14 unsigned int length;
12}; 15};
13 16
14#define sg_address(sg) (page_address((sg)->page) + (sg)->offset)
15#define sg_dma_address(sg) ((sg)->dma_address) 17#define sg_dma_address(sg) ((sg)->dma_address)
16#define sg_dma_len(sg) ((sg)->length) 18#define sg_dma_len(sg) ((sg)->length)
17 19
diff --git a/include/asm-m68knommu/uaccess.h b/include/asm-m68knommu/uaccess.h
index 9ed9169a8849..68bbe9b312f1 100644
--- a/include/asm-m68knommu/uaccess.h
+++ b/include/asm-m68knommu/uaccess.h
@@ -170,10 +170,12 @@ static inline long strnlen_user(const char *src, long n)
170 */ 170 */
171 171
172static inline unsigned long 172static inline unsigned long
173clear_user(void *to, unsigned long n) 173__clear_user(void *to, unsigned long n)
174{ 174{
175 memset(to, 0, n); 175 memset(to, 0, n);
176 return 0; 176 return 0;
177} 177}
178 178
179#define clear_user(to,n) __clear_user(to,n)
180
179#endif /* _M68KNOMMU_UACCESS_H */ 181#endif /* _M68KNOMMU_UACCESS_H */
diff --git a/include/asm-mips/gt64120.h b/include/asm-mips/gt64120.h
index 4bf8e28f8850..e64b41093c49 100644
--- a/include/asm-mips/gt64120.h
+++ b/include/asm-mips/gt64120.h
@@ -21,6 +21,8 @@
21#ifndef _ASM_GT64120_H 21#ifndef _ASM_GT64120_H
22#define _ASM_GT64120_H 22#define _ASM_GT64120_H
23 23
24#include <linux/clocksource.h>
25
24#include <asm/addrspace.h> 26#include <asm/addrspace.h>
25#include <asm/byteorder.h> 27#include <asm/byteorder.h>
26 28
@@ -572,4 +574,7 @@
572#define GT_READ(ofs) le32_to_cpu(__GT_READ(ofs)) 574#define GT_READ(ofs) le32_to_cpu(__GT_READ(ofs))
573#define GT_WRITE(ofs, data) __GT_WRITE(ofs, cpu_to_le32(data)) 575#define GT_WRITE(ofs, data) __GT_WRITE(ofs, cpu_to_le32(data))
574 576
577extern void gt641xx_set_base_clock(unsigned int clock);
578extern int gt641xx_timer0_state(void);
579
575#endif /* _ASM_GT64120_H */ 580#endif /* _ASM_GT64120_H */
diff --git a/include/asm-mips/i8253.h b/include/asm-mips/i8253.h
index 8f689d7df6b1..affb32ce4af9 100644
--- a/include/asm-mips/i8253.h
+++ b/include/asm-mips/i8253.h
@@ -2,8 +2,8 @@
2 * Machine specific IO port address definition for generic. 2 * Machine specific IO port address definition for generic.
3 * Written by Osamu Tomita <tomita@cinet.co.jp> 3 * Written by Osamu Tomita <tomita@cinet.co.jp>
4 */ 4 */
5#ifndef _MACH_IO_PORTS_H 5#ifndef __ASM_I8253_H
6#define _MACH_IO_PORTS_H 6#define __ASM_I8253_H
7 7
8/* i8253A PIT registers */ 8/* i8253A PIT registers */
9#define PIT_MODE 0x43 9#define PIT_MODE 0x43
@@ -27,4 +27,4 @@
27 27
28extern void setup_pit_timer(void); 28extern void setup_pit_timer(void);
29 29
30#endif /* !_MACH_IO_PORTS_H */ 30#endif /* __ASM_I8253_H */
diff --git a/include/asm-mips/scatterlist.h b/include/asm-mips/scatterlist.h
index 7af104c95b20..83d69fe17c9f 100644
--- a/include/asm-mips/scatterlist.h
+++ b/include/asm-mips/scatterlist.h
@@ -4,7 +4,10 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page * page; 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset; 11 unsigned int offset;
9 dma_addr_t dma_address; 12 dma_addr_t dma_address;
10 unsigned int length; 13 unsigned int length;
diff --git a/include/asm-mips/sibyte/sb1250.h b/include/asm-mips/sibyte/sb1250.h
index 494aa65dcfbd..0dad844a3b5b 100644
--- a/include/asm-mips/sibyte/sb1250.h
+++ b/include/asm-mips/sibyte/sb1250.h
@@ -45,13 +45,11 @@ extern unsigned int soc_type;
45extern unsigned int periph_rev; 45extern unsigned int periph_rev;
46extern unsigned int zbbus_mhz; 46extern unsigned int zbbus_mhz;
47 47
48extern void sb1250_hpt_setup(void);
49extern void sb1250_time_init(void); 48extern void sb1250_time_init(void);
50extern void sb1250_mask_irq(int cpu, int irq); 49extern void sb1250_mask_irq(int cpu, int irq);
51extern void sb1250_unmask_irq(int cpu, int irq); 50extern void sb1250_unmask_irq(int cpu, int irq);
52extern void sb1250_smp_finish(void); 51extern void sb1250_smp_finish(void);
53 52
54extern void bcm1480_hpt_setup(void);
55extern void bcm1480_time_init(void); 53extern void bcm1480_time_init(void);
56extern void bcm1480_mask_irq(int cpu, int irq); 54extern void bcm1480_mask_irq(int cpu, int irq);
57extern void bcm1480_unmask_irq(int cpu, int irq); 55extern void bcm1480_unmask_irq(int cpu, int irq);
diff --git a/include/asm-parisc/scatterlist.h b/include/asm-parisc/scatterlist.h
index e7211c748446..62269b31ebf4 100644
--- a/include/asm-parisc/scatterlist.h
+++ b/include/asm-parisc/scatterlist.h
@@ -5,7 +5,10 @@
5#include <asm/types.h> 5#include <asm/types.h>
6 6
7struct scatterlist { 7struct scatterlist {
8 struct page *page; 8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
9 unsigned int offset; 12 unsigned int offset;
10 13
11 unsigned int length; 14 unsigned int length;
@@ -15,7 +18,7 @@ struct scatterlist {
15 __u32 iova_length; /* bytes mapped */ 18 __u32 iova_length; /* bytes mapped */
16}; 19};
17 20
18#define sg_virt_addr(sg) ((unsigned long)(page_address(sg->page) + sg->offset)) 21#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
19#define sg_dma_address(sg) ((sg)->iova) 22#define sg_dma_address(sg) ((sg)->iova)
20#define sg_dma_len(sg) ((sg)->iova_length) 23#define sg_dma_len(sg) ((sg)->iova_length)
21 24
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index 65be95dd03a5..ff52013c0e2d 100644
--- a/include/asm-powerpc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -285,9 +285,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
285 BUG_ON(direction == DMA_NONE); 285 BUG_ON(direction == DMA_NONE);
286 286
287 for_each_sg(sgl, sg, nents, i) { 287 for_each_sg(sgl, sg, nents, i) {
288 BUG_ON(!sg->page); 288 BUG_ON(!sg_page(sg));
289 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 289 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
290 sg->dma_address = page_to_bus(sg->page) + sg->offset; 290 sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
291 } 291 }
292 292
293 return nents; 293 return nents;
@@ -328,7 +328,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev,
328 BUG_ON(direction == DMA_NONE); 328 BUG_ON(direction == DMA_NONE);
329 329
330 for_each_sg(sgl, sg, nents, i) 330 for_each_sg(sgl, sg, nents, i)
331 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 331 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
332} 332}
333 333
334static inline void dma_sync_sg_for_device(struct device *dev, 334static inline void dma_sync_sg_for_device(struct device *dev,
@@ -341,7 +341,7 @@ static inline void dma_sync_sg_for_device(struct device *dev,
341 BUG_ON(direction == DMA_NONE); 341 BUG_ON(direction == DMA_NONE);
342 342
343 for_each_sg(sgl, sg, nents, i) 343 for_each_sg(sgl, sg, nents, i)
344 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 344 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
345} 345}
346 346
347static inline int dma_mapping_error(dma_addr_t dma_addr) 347static inline int dma_mapping_error(dma_addr_t dma_addr)
diff --git a/include/asm-powerpc/mpc52xx.h b/include/asm-powerpc/mpc52xx.h
index 568135fe52ea..fcb2ebbfddbc 100644
--- a/include/asm-powerpc/mpc52xx.h
+++ b/include/asm-powerpc/mpc52xx.h
@@ -20,6 +20,11 @@
20 20
21#include <linux/suspend.h> 21#include <linux/suspend.h>
22 22
23/* Variants of the 5200(B) */
24#define MPC5200_SVR 0x80110010
25#define MPC5200_SVR_MASK 0xfffffff0
26#define MPC5200B_SVR 0x80110020
27#define MPC5200B_SVR_MASK 0xfffffff0
23 28
24/* ======================================================================== */ 29/* ======================================================================== */
25/* Structures mapping of some unit register set */ 30/* Structures mapping of some unit register set */
@@ -244,6 +249,7 @@ struct mpc52xx_cdm {
244#ifndef __ASSEMBLY__ 249#ifndef __ASSEMBLY__
245 250
246extern void __iomem * mpc52xx_find_and_map(const char *); 251extern void __iomem * mpc52xx_find_and_map(const char *);
252extern void __iomem * mpc52xx_find_and_map_path(const char *path);
247extern unsigned int mpc52xx_find_ipb_freq(struct device_node *node); 253extern unsigned int mpc52xx_find_ipb_freq(struct device_node *node);
248extern void mpc5200_setup_xlb_arbiter(void); 254extern void mpc5200_setup_xlb_arbiter(void);
249extern void mpc52xx_declare_of_platform_devices(void); 255extern void mpc52xx_declare_of_platform_devices(void);
@@ -253,6 +259,9 @@ extern unsigned int mpc52xx_get_irq(void);
253 259
254extern int __init mpc52xx_add_bridge(struct device_node *node); 260extern int __init mpc52xx_add_bridge(struct device_node *node);
255 261
262extern void __init mpc52xx_map_wdt(void);
263extern void mpc52xx_restart(char *cmd);
264
256#endif /* __ASSEMBLY__ */ 265#endif /* __ASSEMBLY__ */
257 266
258#ifdef CONFIG_PM 267#ifdef CONFIG_PM
diff --git a/include/asm-powerpc/scatterlist.h b/include/asm-powerpc/scatterlist.h
index b075f619c3b7..fcf7d55afe45 100644
--- a/include/asm-powerpc/scatterlist.h
+++ b/include/asm-powerpc/scatterlist.h
@@ -14,7 +14,10 @@
14#include <asm/dma.h> 14#include <asm/dma.h>
15 15
16struct scatterlist { 16struct scatterlist {
17 struct page *page; 17#ifdef CONFIG_DEBUG_SG
18 unsigned long sg_magic;
19#endif
20 unsigned long page_link;
18 unsigned int offset; 21 unsigned int offset;
19 unsigned int length; 22 unsigned int length;
20 23
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index cc45780421ca..51df94c73846 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -33,6 +33,7 @@
33 33
34#define set_mb(var, value) do { var = value; mb(); } while (0) 34#define set_mb(var, value) do { var = value; mb(); } while (0)
35 35
36#define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */
36#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
37#define smp_mb() mb() 38#define smp_mb() mb()
38#define smp_rmb() rmb() 39#define smp_rmb() rmb()
diff --git a/include/asm-s390/cpu.h b/include/asm-s390/cpu.h
new file mode 100644
index 000000000000..352dde194f3c
--- /dev/null
+++ b/include/asm-s390/cpu.h
@@ -0,0 +1,25 @@
1/*
2 * include/asm-s390/cpu.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#ifndef _ASM_S390_CPU_H_
9#define _ASM_S390_CPU_H_
10
11#include <linux/types.h>
12#include <linux/percpu.h>
13#include <linux/spinlock.h>
14
15struct s390_idle_data {
16 spinlock_t lock;
17 unsigned int in_idle;
18 unsigned long long idle_count;
19 unsigned long long idle_enter;
20 unsigned long long idle_time;
21};
22
23DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
24
25#endif /* _ASM_S390_CPU_H_ */
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index 501cb9b06314..05b842126b99 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -21,45 +21,43 @@
21 21
22#ifndef __s390x__ 22#ifndef __s390x__
23#define LCTL_OPCODE "lctl" 23#define LCTL_OPCODE "lctl"
24#define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK)
25#else 24#else
26#define LCTL_OPCODE "lctlg" 25#define LCTL_OPCODE "lctlg"
27#define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK)
28#endif 26#endif
29 27
30static inline void enter_lazy_tlb(struct mm_struct *mm, 28static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
31 struct task_struct *tsk)
32{ 29{
30 pgd_t *pgd = mm->pgd;
31 unsigned long asce_bits;
32
33 /* Calculate asce bits from the first pgd table entry. */
34 asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
35#ifdef CONFIG_64BIT
36 asce_bits |= _ASCE_TYPE_REGION3;
37#endif
38 S390_lowcore.user_asce = asce_bits | __pa(pgd);
39 if (switch_amode) {
40 /* Load primary space page table origin. */
41 pgd_t *shadow_pgd = get_shadow_table(pgd) ? : pgd;
42 S390_lowcore.user_exec_asce = asce_bits | __pa(shadow_pgd);
43 asm volatile(LCTL_OPCODE" 1,1,%0\n"
44 : : "m" (S390_lowcore.user_exec_asce) );
45 } else
46 /* Load home space page table origin. */
47 asm volatile(LCTL_OPCODE" 13,13,%0"
48 : : "m" (S390_lowcore.user_asce) );
33} 49}
34 50
35static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 51static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
36 struct task_struct *tsk) 52 struct task_struct *tsk)
37{ 53{
38 pgd_t *shadow_pgd = get_shadow_pgd(next->pgd); 54 if (unlikely(prev == next))
39 55 return;
40 if (prev != next) {
41 S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) |
42 PGTABLE_BITS;
43 if (shadow_pgd) {
44 /* Load primary/secondary space page table origin. */
45 S390_lowcore.user_exec_asce =
46 (__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS;
47 asm volatile(LCTL_OPCODE" 1,1,%0\n"
48 LCTL_OPCODE" 7,7,%1"
49 : : "m" (S390_lowcore.user_exec_asce),
50 "m" (S390_lowcore.user_asce) );
51 } else if (switch_amode) {
52 /* Load primary space page table origin. */
53 asm volatile(LCTL_OPCODE" 1,1,%0"
54 : : "m" (S390_lowcore.user_asce) );
55 } else
56 /* Load home space page table origin. */
57 asm volatile(LCTL_OPCODE" 13,13,%0"
58 : : "m" (S390_lowcore.user_asce) );
59 }
60 cpu_set(smp_processor_id(), next->cpu_vm_mask); 56 cpu_set(smp_processor_id(), next->cpu_vm_mask);
57 update_mm(next, tsk);
61} 58}
62 59
60#define enter_lazy_tlb(mm,tsk) do { } while (0)
63#define deactivate_mm(tsk,mm) do { } while (0) 61#define deactivate_mm(tsk,mm) do { } while (0)
64 62
65static inline void activate_mm(struct mm_struct *prev, 63static inline void activate_mm(struct mm_struct *prev,
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index ceec3826a67c..584d0ee3c7f6 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -82,6 +82,7 @@ typedef struct { unsigned long pte; } pte_t;
82#ifndef __s390x__ 82#ifndef __s390x__
83 83
84typedef struct { unsigned long pmd; } pmd_t; 84typedef struct { unsigned long pmd; } pmd_t;
85typedef struct { unsigned long pud; } pud_t;
85typedef struct { 86typedef struct {
86 unsigned long pgd0; 87 unsigned long pgd0;
87 unsigned long pgd1; 88 unsigned long pgd1;
@@ -90,6 +91,7 @@ typedef struct {
90 } pgd_t; 91 } pgd_t;
91 92
92#define pmd_val(x) ((x).pmd) 93#define pmd_val(x) ((x).pmd)
94#define pud_val(x) ((x).pud)
93#define pgd_val(x) ((x).pgd0) 95#define pgd_val(x) ((x).pgd0)
94 96
95#else /* __s390x__ */ 97#else /* __s390x__ */
@@ -98,10 +100,12 @@ typedef struct {
98 unsigned long pmd0; 100 unsigned long pmd0;
99 unsigned long pmd1; 101 unsigned long pmd1;
100 } pmd_t; 102 } pmd_t;
103typedef struct { unsigned long pud; } pud_t;
101typedef struct { unsigned long pgd; } pgd_t; 104typedef struct { unsigned long pgd; } pgd_t;
102 105
103#define pmd_val(x) ((x).pmd0) 106#define pmd_val(x) ((x).pmd0)
104#define pmd_val1(x) ((x).pmd1) 107#define pmd_val1(x) ((x).pmd1)
108#define pud_val(x) ((x).pud)
105#define pgd_val(x) ((x).pgd) 109#define pgd_val(x) ((x).pgd)
106 110
107#endif /* __s390x__ */ 111#endif /* __s390x__ */
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index e45d3c9a4b7e..709dd1740956 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -19,140 +19,115 @@
19 19
20#define check_pgt_cache() do {} while (0) 20#define check_pgt_cache() do {} while (0)
21 21
22/* 22unsigned long *crst_table_alloc(struct mm_struct *, int);
23 * Page allocation orders. 23void crst_table_free(unsigned long *);
24 */
25#ifndef __s390x__
26# define PTE_ALLOC_ORDER 0
27# define PMD_ALLOC_ORDER 0
28# define PGD_ALLOC_ORDER 1
29#else /* __s390x__ */
30# define PTE_ALLOC_ORDER 0
31# define PMD_ALLOC_ORDER 2
32# define PGD_ALLOC_ORDER 2
33#endif /* __s390x__ */
34 24
35/* 25unsigned long *page_table_alloc(int);
36 * Allocate and free page tables. The xxx_kernel() versions are 26void page_table_free(unsigned long *);
37 * used to allocate a kernel page table - this turns on ASN bits
38 * if any.
39 */
40 27
41static inline pgd_t *pgd_alloc(struct mm_struct *mm) 28static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
42{ 29{
43 pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); 30 *s = val;
44 int i; 31 n = (n / 256) - 1;
45 32 asm volatile(
46 if (!pgd) 33#ifdef CONFIG_64BIT
47 return NULL; 34 " mvc 8(248,%0),0(%0)\n"
48 if (s390_noexec) {
49 pgd_t *shadow_pgd = (pgd_t *)
50 __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
51 struct page *page = virt_to_page(pgd);
52
53 if (!shadow_pgd) {
54 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
55 return NULL;
56 }
57 page->lru.next = (void *) shadow_pgd;
58 }
59 for (i = 0; i < PTRS_PER_PGD; i++)
60#ifndef __s390x__
61 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
62#else 35#else
63 pgd_clear(pgd + i); 36 " mvc 4(252,%0),0(%0)\n"
64#endif 37#endif
65 return pgd; 38 "0: mvc 256(256,%0),0(%0)\n"
39 " la %0,256(%0)\n"
40 " brct %1,0b\n"
41 : "+a" (s), "+d" (n));
66} 42}
67 43
68static inline void pgd_free(pgd_t *pgd) 44static inline void crst_table_init(unsigned long *crst, unsigned long entry)
69{ 45{
70 pgd_t *shadow_pgd = get_shadow_pgd(pgd); 46 clear_table(crst, entry, sizeof(unsigned long)*2048);
71 47 crst = get_shadow_table(crst);
72 if (shadow_pgd) 48 if (crst)
73 free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER); 49 clear_table(crst, entry, sizeof(unsigned long)*2048);
74 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
75} 50}
76 51
77#ifndef __s390x__ 52#ifndef __s390x__
78/* 53
79 * page middle directory allocation/free routines. 54static inline unsigned long pgd_entry_type(struct mm_struct *mm)
80 * We use pmd cache only on s390x, so these are dummy routines. This
81 * code never triggers because the pgd will always be present.
82 */
83#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
84#define pmd_free(x) do { } while (0)
85#define __pmd_free_tlb(tlb,x) do { } while (0)
86#define pgd_populate(mm, pmd, pte) BUG()
87#define pgd_populate_kernel(mm, pmd, pte) BUG()
88#else /* __s390x__ */
89static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
90{ 55{
91 pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); 56 return _SEGMENT_ENTRY_EMPTY;
92 int i;
93
94 if (!pmd)
95 return NULL;
96 if (s390_noexec) {
97 pmd_t *shadow_pmd = (pmd_t *)
98 __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
99 struct page *page = virt_to_page(pmd);
100
101 if (!shadow_pmd) {
102 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
103 return NULL;
104 }
105 page->lru.next = (void *) shadow_pmd;
106 }
107 for (i=0; i < PTRS_PER_PMD; i++)
108 pmd_clear(pmd + i);
109 return pmd;
110} 57}
111 58
112static inline void pmd_free (pmd_t *pmd) 59#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
60#define pud_free(x) do { } while (0)
61
62#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
63#define pmd_free(x) do { } while (0)
64
65#define pgd_populate(mm, pgd, pud) BUG()
66#define pgd_populate_kernel(mm, pgd, pud) BUG()
67
68#define pud_populate(mm, pud, pmd) BUG()
69#define pud_populate_kernel(mm, pud, pmd) BUG()
70
71#else /* __s390x__ */
72
73static inline unsigned long pgd_entry_type(struct mm_struct *mm)
113{ 74{
114 pmd_t *shadow_pmd = get_shadow_pmd(pmd); 75 return _REGION3_ENTRY_EMPTY;
76}
77
78#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
79#define pud_free(x) do { } while (0)
115 80
116 if (shadow_pmd) 81static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
117 free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER); 82{
118 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); 83 unsigned long *crst = crst_table_alloc(mm, s390_noexec);
84 if (crst)
85 crst_table_init(crst, _SEGMENT_ENTRY_EMPTY);
86 return (pmd_t *) crst;
119} 87}
88#define pmd_free(pmd) crst_table_free((unsigned long *) pmd)
120 89
121#define __pmd_free_tlb(tlb,pmd) \ 90#define pgd_populate(mm, pgd, pud) BUG()
122 do { \ 91#define pgd_populate_kernel(mm, pgd, pud) BUG()
123 tlb_flush_mmu(tlb, 0, 0); \
124 pmd_free(pmd); \
125 } while (0)
126 92
127static inline void 93static inline void pud_populate_kernel(struct mm_struct *mm,
128pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) 94 pud_t *pud, pmd_t *pmd)
129{ 95{
130 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); 96 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
131} 97}
132 98
133static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) 99static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
134{ 100{
135 pgd_t *shadow_pgd = get_shadow_pgd(pgd); 101 pud_t *shadow_pud = get_shadow_table(pud);
136 pmd_t *shadow_pmd = get_shadow_pmd(pmd); 102 pmd_t *shadow_pmd = get_shadow_table(pmd);
137 103
138 if (shadow_pgd && shadow_pmd) 104 if (shadow_pud && shadow_pmd)
139 pgd_populate_kernel(mm, shadow_pgd, shadow_pmd); 105 pud_populate_kernel(mm, shadow_pud, shadow_pmd);
140 pgd_populate_kernel(mm, pgd, pmd); 106 pud_populate_kernel(mm, pud, pmd);
141} 107}
142 108
143#endif /* __s390x__ */ 109#endif /* __s390x__ */
144 110
111static inline pgd_t *pgd_alloc(struct mm_struct *mm)
112{
113 unsigned long *crst = crst_table_alloc(mm, s390_noexec);
114 if (crst)
115 crst_table_init(crst, pgd_entry_type(mm));
116 return (pgd_t *) crst;
117}
118#define pgd_free(pgd) crst_table_free((unsigned long *) pgd)
119
145static inline void 120static inline void
146pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) 121pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
147{ 122{
148#ifndef __s390x__ 123#ifndef __s390x__
149 pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte); 124 pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte);
150 pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256); 125 pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256);
151 pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512); 126 pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512);
152 pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768); 127 pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768);
153#else /* __s390x__ */ 128#else /* __s390x__ */
154 pmd_val(*pmd) = _PMD_ENTRY + __pa(pte); 129 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
155 pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256); 130 pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256);
156#endif /* __s390x__ */ 131#endif /* __s390x__ */
157} 132}
158 133
@@ -160,7 +135,7 @@ static inline void
160pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) 135pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
161{ 136{
162 pte_t *pte = (pte_t *)page_to_phys(page); 137 pte_t *pte = (pte_t *)page_to_phys(page);
163 pmd_t *shadow_pmd = get_shadow_pmd(pmd); 138 pmd_t *shadow_pmd = get_shadow_table(pmd);
164 pte_t *shadow_pte = get_shadow_pte(pte); 139 pte_t *shadow_pte = get_shadow_pte(pte);
165 140
166 pmd_populate_kernel(mm, pmd, pte); 141 pmd_populate_kernel(mm, pmd, pte);
@@ -171,67 +146,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
171/* 146/*
172 * page table entry allocation/free routines. 147 * page table entry allocation/free routines.
173 */ 148 */
174static inline pte_t * 149#define pte_alloc_one_kernel(mm, vmaddr) \
175pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) 150 ((pte_t *) page_table_alloc(s390_noexec))
176{ 151#define pte_alloc_one(mm, vmaddr) \
177 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); 152 virt_to_page(page_table_alloc(s390_noexec))
178 int i; 153
179 154#define pte_free_kernel(pte) \
180 if (!pte) 155 page_table_free((unsigned long *) pte)
181 return NULL; 156#define pte_free(pte) \
182 if (s390_noexec) { 157 page_table_free((unsigned long *) page_to_phys((struct page *) pte))
183 pte_t *shadow_pte = (pte_t *)
184 __get_free_page(GFP_KERNEL|__GFP_REPEAT);
185 struct page *page = virt_to_page(pte);
186
187 if (!shadow_pte) {
188 free_page((unsigned long) pte);
189 return NULL;
190 }
191 page->lru.next = (void *) shadow_pte;
192 }
193 for (i=0; i < PTRS_PER_PTE; i++) {
194 pte_clear(mm, vmaddr, pte + i);
195 vmaddr += PAGE_SIZE;
196 }
197 return pte;
198}
199
200static inline struct page *
201pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
202{
203 pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
204 if (pte)
205 return virt_to_page(pte);
206 return NULL;
207}
208
209static inline void pte_free_kernel(pte_t *pte)
210{
211 pte_t *shadow_pte = get_shadow_pte(pte);
212
213 if (shadow_pte)
214 free_page((unsigned long) shadow_pte);
215 free_page((unsigned long) pte);
216}
217
218static inline void pte_free(struct page *pte)
219{
220 struct page *shadow_page = get_shadow_page(pte);
221
222 if (shadow_page)
223 __free_page(shadow_page);
224 __free_page(pte);
225}
226
227#define __pte_free_tlb(tlb, pte) \
228({ \
229 struct mmu_gather *__tlb = (tlb); \
230 struct page *__pte = (pte); \
231 struct page *shadow_page = get_shadow_page(__pte); \
232 if (shadow_page) \
233 tlb_remove_page(__tlb, shadow_page); \
234 tlb_remove_page(__tlb, __pte); \
235})
236 158
237#endif /* _S390_PGALLOC_H */ 159#endif /* _S390_PGALLOC_H */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 39bb5192dc31..f2cc25b74adf 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -13,8 +13,6 @@
13#ifndef _ASM_S390_PGTABLE_H 13#ifndef _ASM_S390_PGTABLE_H
14#define _ASM_S390_PGTABLE_H 14#define _ASM_S390_PGTABLE_H
15 15
16#include <asm-generic/4level-fixup.h>
17
18/* 16/*
19 * The Linux memory management assumes a three-level page table setup. For 17 * The Linux memory management assumes a three-level page table setup. For
20 * s390 31 bit we "fold" the mid level into the top-level page table, so 18 * s390 31 bit we "fold" the mid level into the top-level page table, so
@@ -35,9 +33,6 @@
35#include <asm/bug.h> 33#include <asm/bug.h>
36#include <asm/processor.h> 34#include <asm/processor.h>
37 35
38struct vm_area_struct; /* forward declaration (include/linux/mm.h) */
39struct mm_struct;
40
41extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 36extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
42extern void paging_init(void); 37extern void paging_init(void);
43extern void vmem_map_init(void); 38extern void vmem_map_init(void);
@@ -63,14 +58,18 @@ extern char empty_zero_page[PAGE_SIZE];
63 */ 58 */
64#ifndef __s390x__ 59#ifndef __s390x__
65# define PMD_SHIFT 22 60# define PMD_SHIFT 22
61# define PUD_SHIFT 22
66# define PGDIR_SHIFT 22 62# define PGDIR_SHIFT 22
67#else /* __s390x__ */ 63#else /* __s390x__ */
68# define PMD_SHIFT 21 64# define PMD_SHIFT 21
65# define PUD_SHIFT 31
69# define PGDIR_SHIFT 31 66# define PGDIR_SHIFT 31
70#endif /* __s390x__ */ 67#endif /* __s390x__ */
71 68
72#define PMD_SIZE (1UL << PMD_SHIFT) 69#define PMD_SIZE (1UL << PMD_SHIFT)
73#define PMD_MASK (~(PMD_SIZE-1)) 70#define PMD_MASK (~(PMD_SIZE-1))
71#define PUD_SIZE (1UL << PUD_SHIFT)
72#define PUD_MASK (~(PUD_SIZE-1))
74#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 73#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
75#define PGDIR_MASK (~(PGDIR_SIZE-1)) 74#define PGDIR_MASK (~(PGDIR_SIZE-1))
76 75
@@ -83,10 +82,12 @@ extern char empty_zero_page[PAGE_SIZE];
83#ifndef __s390x__ 82#ifndef __s390x__
84# define PTRS_PER_PTE 1024 83# define PTRS_PER_PTE 1024
85# define PTRS_PER_PMD 1 84# define PTRS_PER_PMD 1
85# define PTRS_PER_PUD 1
86# define PTRS_PER_PGD 512 86# define PTRS_PER_PGD 512
87#else /* __s390x__ */ 87#else /* __s390x__ */
88# define PTRS_PER_PTE 512 88# define PTRS_PER_PTE 512
89# define PTRS_PER_PMD 1024 89# define PTRS_PER_PMD 1024
90# define PTRS_PER_PUD 1
90# define PTRS_PER_PGD 2048 91# define PTRS_PER_PGD 2048
91#endif /* __s390x__ */ 92#endif /* __s390x__ */
92 93
@@ -96,6 +97,8 @@ extern char empty_zero_page[PAGE_SIZE];
96 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) 97 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
97#define pmd_ERROR(e) \ 98#define pmd_ERROR(e) \
98 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) 99 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
100#define pud_ERROR(e) \
101 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
99#define pgd_ERROR(e) \ 102#define pgd_ERROR(e) \
100 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) 103 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
101 104
@@ -195,7 +198,7 @@ extern unsigned long vmalloc_end;
195 * I Segment-Invalid Bit: Segment is not available for address-translation 198 * I Segment-Invalid Bit: Segment is not available for address-translation
196 * TT Type 01 199 * TT Type 01
197 * TF 200 * TF
198 * TL Table lenght 201 * TL Table length
199 * 202 *
200 * The 64 bit regiontable origin of S390 has following format: 203 * The 64 bit regiontable origin of S390 has following format:
201 * | region table origon | DTTL 204 * | region table origon | DTTL
@@ -221,6 +224,8 @@ extern unsigned long vmalloc_end;
221/* Hardware bits in the page table entry */ 224/* Hardware bits in the page table entry */
222#define _PAGE_RO 0x200 /* HW read-only bit */ 225#define _PAGE_RO 0x200 /* HW read-only bit */
223#define _PAGE_INVALID 0x400 /* HW invalid bit */ 226#define _PAGE_INVALID 0x400 /* HW invalid bit */
227
228/* Software bits in the page table entry */
224#define _PAGE_SWT 0x001 /* SW pte type bit t */ 229#define _PAGE_SWT 0x001 /* SW pte type bit t */
225#define _PAGE_SWX 0x002 /* SW pte type bit x */ 230#define _PAGE_SWX 0x002 /* SW pte type bit x */
226 231
@@ -264,60 +269,75 @@ extern unsigned long vmalloc_end;
264 269
265#ifndef __s390x__ 270#ifndef __s390x__
266 271
267/* Bits in the segment table entry */ 272/* Bits in the segment table address-space-control-element */
268#define _PAGE_TABLE_LEN 0xf /* only full page-tables */ 273#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
269#define _PAGE_TABLE_COM 0x10 /* common page-table */ 274#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
270#define _PAGE_TABLE_INV 0x20 /* invalid page-table */ 275#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
271#define _SEG_PRESENT 0x001 /* Software (overlap with PTL) */ 276#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
272 277#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
273/* Bits int the storage key */
274#define _PAGE_CHANGED 0x02 /* HW changed bit */
275#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
276
277#define _USER_SEG_TABLE_LEN 0x7f /* user-segment-table up to 2 GB */
278#define _KERNEL_SEG_TABLE_LEN 0x7f /* kernel-segment-table up to 2 GB */
279
280/*
281 * User and Kernel pagetables are identical
282 */
283#define _PAGE_TABLE _PAGE_TABLE_LEN
284#define _KERNPG_TABLE _PAGE_TABLE_LEN
285
286/*
287 * The Kernel segment-tables includes the User segment-table
288 */
289 278
290#define _SEGMENT_TABLE (_USER_SEG_TABLE_LEN|0x80000000|0x100) 279/* Bits in the segment table entry */
291#define _KERNSEG_TABLE _KERNEL_SEG_TABLE_LEN 280#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
281#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
282#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
283#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
292 284
293#define USER_STD_MASK 0x00000080UL 285#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
286#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
294 287
295#else /* __s390x__ */ 288#else /* __s390x__ */
296 289
290/* Bits in the segment/region table address-space-control-element */
291#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
292#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
293#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
294#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
295#define _ASCE_REAL_SPACE 0x20 /* real space control */
296#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
297#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
298#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
299#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
300#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
301#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
302
303/* Bits in the region table entry */
304#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
305#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
306#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
307#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
308#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
309#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
310#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
311
312#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
313#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
314#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
315#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
316#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
317#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
318
297/* Bits in the segment table entry */ 319/* Bits in the segment table entry */
298#define _PMD_ENTRY_INV 0x20 /* invalid segment table entry */ 320#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
299#define _PMD_ENTRY 0x00 321#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
322#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
323
324#define _SEGMENT_ENTRY (0)
325#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
300 326
301/* Bits in the region third table entry */ 327#endif /* __s390x__ */
302#define _PGD_ENTRY_INV 0x20 /* invalid region table entry */
303#define _PGD_ENTRY 0x07
304 328
305/* 329/*
306 * User and kernel page directory 330 * A user page table pointer has the space-switch-event bit, the
331 * private-space-control bit and the storage-alteration-event-control
332 * bit set. A kernel page table pointer doesn't need them.
307 */ 333 */
308#define _REGION_THIRD 0x4 334#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
309#define _REGION_THIRD_LEN 0x3 335 _ASCE_ALT_EVENT)
310#define _REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN|0x40|0x100)
311#define _KERN_REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN)
312
313#define USER_STD_MASK 0x0000000000000080UL
314 336
315/* Bits in the storage key */ 337/* Bits int the storage key */
316#define _PAGE_CHANGED 0x02 /* HW changed bit */ 338#define _PAGE_CHANGED 0x02 /* HW changed bit */
317#define _PAGE_REFERENCED 0x04 /* HW referenced bit */ 339#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
318 340
319#endif /* __s390x__ */
320
321/* 341/*
322 * Page protection definitions. 342 * Page protection definitions.
323 */ 343 */
@@ -358,65 +378,38 @@ extern unsigned long vmalloc_end;
358#define __S111 PAGE_EX_RW 378#define __S111 PAGE_EX_RW
359 379
360#ifndef __s390x__ 380#ifndef __s390x__
361# define PMD_SHADOW_SHIFT 1 381# define PxD_SHADOW_SHIFT 1
362# define PGD_SHADOW_SHIFT 1
363#else /* __s390x__ */ 382#else /* __s390x__ */
364# define PMD_SHADOW_SHIFT 2 383# define PxD_SHADOW_SHIFT 2
365# define PGD_SHADOW_SHIFT 2
366#endif /* __s390x__ */ 384#endif /* __s390x__ */
367 385
368static inline struct page *get_shadow_page(struct page *page) 386static inline struct page *get_shadow_page(struct page *page)
369{ 387{
370 if (s390_noexec && !list_empty(&page->lru)) 388 if (s390_noexec && page->index)
371 return virt_to_page(page->lru.next); 389 return virt_to_page((void *)(addr_t) page->index);
372 return NULL;
373}
374
375static inline pte_t *get_shadow_pte(pte_t *ptep)
376{
377 unsigned long pteptr = (unsigned long) (ptep);
378
379 if (s390_noexec) {
380 unsigned long offset = pteptr & (PAGE_SIZE - 1);
381 void *addr = (void *) (pteptr ^ offset);
382 struct page *page = virt_to_page(addr);
383 if (!list_empty(&page->lru))
384 return (pte_t *) ((unsigned long) page->lru.next |
385 offset);
386 }
387 return NULL; 390 return NULL;
388} 391}
389 392
390static inline pmd_t *get_shadow_pmd(pmd_t *pmdp) 393static inline void *get_shadow_pte(void *table)
391{ 394{
392 unsigned long pmdptr = (unsigned long) (pmdp); 395 unsigned long addr, offset;
396 struct page *page;
393 397
394 if (s390_noexec) { 398 addr = (unsigned long) table;
395 unsigned long offset = pmdptr & 399 offset = addr & (PAGE_SIZE - 1);
396 ((PAGE_SIZE << PMD_SHADOW_SHIFT) - 1); 400 page = virt_to_page((void *)(addr ^ offset));
397 void *addr = (void *) (pmdptr ^ offset); 401 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
398 struct page *page = virt_to_page(addr);
399 if (!list_empty(&page->lru))
400 return (pmd_t *) ((unsigned long) page->lru.next |
401 offset);
402 }
403 return NULL;
404} 402}
405 403
406static inline pgd_t *get_shadow_pgd(pgd_t *pgdp) 404static inline void *get_shadow_table(void *table)
407{ 405{
408 unsigned long pgdptr = (unsigned long) (pgdp); 406 unsigned long addr, offset;
407 struct page *page;
409 408
410 if (s390_noexec) { 409 addr = (unsigned long) table;
411 unsigned long offset = pgdptr & 410 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
412 ((PAGE_SIZE << PGD_SHADOW_SHIFT) - 1); 411 page = virt_to_page((void *)(addr ^ offset));
413 void *addr = (void *) (pgdptr ^ offset); 412 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
414 struct page *page = virt_to_page(addr);
415 if (!list_empty(&page->lru))
416 return (pgd_t *) ((unsigned long) page->lru.next |
417 offset);
418 }
419 return NULL;
420} 413}
421 414
422/* 415/*
@@ -424,7 +417,8 @@ static inline pgd_t *get_shadow_pgd(pgd_t *pgdp)
424 * within a page table are directly modified. Thus, the following 417 * within a page table are directly modified. Thus, the following
425 * hook is made available. 418 * hook is made available.
426 */ 419 */
427static inline void set_pte(pte_t *pteptr, pte_t pteval) 420static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
421 pte_t *pteptr, pte_t pteval)
428{ 422{
429 pte_t *shadow_pte = get_shadow_pte(pteptr); 423 pte_t *shadow_pte = get_shadow_pte(pteptr);
430 424
@@ -437,7 +431,6 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
437 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; 431 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
438 } 432 }
439} 433}
440#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
441 434
442/* 435/*
443 * pgd/pmd/pte query functions 436 * pgd/pmd/pte query functions
@@ -448,47 +441,50 @@ static inline int pgd_present(pgd_t pgd) { return 1; }
448static inline int pgd_none(pgd_t pgd) { return 0; } 441static inline int pgd_none(pgd_t pgd) { return 0; }
449static inline int pgd_bad(pgd_t pgd) { return 0; } 442static inline int pgd_bad(pgd_t pgd) { return 0; }
450 443
451static inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _SEG_PRESENT; } 444static inline int pud_present(pud_t pud) { return 1; }
452static inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; } 445static inline int pud_none(pud_t pud) { return 0; }
453static inline int pmd_bad(pmd_t pmd) 446static inline int pud_bad(pud_t pud) { return 0; }
454{
455 return (pmd_val(pmd) & (~PAGE_MASK & ~_PAGE_TABLE_INV)) != _PAGE_TABLE;
456}
457 447
458#else /* __s390x__ */ 448#else /* __s390x__ */
459 449
460static inline int pgd_present(pgd_t pgd) 450static inline int pgd_present(pgd_t pgd) { return 1; }
451static inline int pgd_none(pgd_t pgd) { return 0; }
452static inline int pgd_bad(pgd_t pgd) { return 0; }
453
454static inline int pud_present(pud_t pud)
461{ 455{
462 return (pgd_val(pgd) & ~PAGE_MASK) == _PGD_ENTRY; 456 return pud_val(pud) & _REGION_ENTRY_ORIGIN;
463} 457}
464 458
465static inline int pgd_none(pgd_t pgd) 459static inline int pud_none(pud_t pud)
466{ 460{
467 return pgd_val(pgd) & _PGD_ENTRY_INV; 461 return pud_val(pud) & _REGION_ENTRY_INV;
468} 462}
469 463
470static inline int pgd_bad(pgd_t pgd) 464static inline int pud_bad(pud_t pud)
471{ 465{
472 return (pgd_val(pgd) & (~PAGE_MASK & ~_PGD_ENTRY_INV)) != _PGD_ENTRY; 466 unsigned long mask = ~_REGION_ENTRY_ORIGIN & ~_REGION_ENTRY_INV;
467 return (pud_val(pud) & mask) != _REGION3_ENTRY;
473} 468}
474 469
470#endif /* __s390x__ */
471
475static inline int pmd_present(pmd_t pmd) 472static inline int pmd_present(pmd_t pmd)
476{ 473{
477 return (pmd_val(pmd) & ~PAGE_MASK) == _PMD_ENTRY; 474 return pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
478} 475}
479 476
480static inline int pmd_none(pmd_t pmd) 477static inline int pmd_none(pmd_t pmd)
481{ 478{
482 return pmd_val(pmd) & _PMD_ENTRY_INV; 479 return pmd_val(pmd) & _SEGMENT_ENTRY_INV;
483} 480}
484 481
485static inline int pmd_bad(pmd_t pmd) 482static inline int pmd_bad(pmd_t pmd)
486{ 483{
487 return (pmd_val(pmd) & (~PAGE_MASK & ~_PMD_ENTRY_INV)) != _PMD_ENTRY; 484 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
485 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
488} 486}
489 487
490#endif /* __s390x__ */
491
492static inline int pte_none(pte_t pte) 488static inline int pte_none(pte_t pte)
493{ 489{
494 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); 490 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
@@ -508,7 +504,8 @@ static inline int pte_file(pte_t pte)
508 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; 504 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
509} 505}
510 506
511#define pte_same(a,b) (pte_val(a) == pte_val(b)) 507#define __HAVE_ARCH_PTE_SAME
508#define pte_same(a,b) (pte_val(a) == pte_val(b))
512 509
513/* 510/*
514 * query functions pte_write/pte_dirty/pte_young only work if 511 * query functions pte_write/pte_dirty/pte_young only work if
@@ -543,58 +540,52 @@ static inline int pte_young(pte_t pte)
543 540
544#ifndef __s390x__ 541#ifndef __s390x__
545 542
546static inline void pgd_clear(pgd_t * pgdp) { } 543#define pgd_clear(pgd) do { } while (0)
544#define pud_clear(pud) do { } while (0)
547 545
548static inline void pmd_clear_kernel(pmd_t * pmdp) 546static inline void pmd_clear_kernel(pmd_t * pmdp)
549{ 547{
550 pmd_val(pmdp[0]) = _PAGE_TABLE_INV; 548 pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY;
551 pmd_val(pmdp[1]) = _PAGE_TABLE_INV; 549 pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY;
552 pmd_val(pmdp[2]) = _PAGE_TABLE_INV; 550 pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY;
553 pmd_val(pmdp[3]) = _PAGE_TABLE_INV; 551 pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY;
554}
555
556static inline void pmd_clear(pmd_t * pmdp)
557{
558 pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
559
560 pmd_clear_kernel(pmdp);
561 if (shadow_pmd)
562 pmd_clear_kernel(shadow_pmd);
563} 552}
564 553
565#else /* __s390x__ */ 554#else /* __s390x__ */
566 555
567static inline void pgd_clear_kernel(pgd_t * pgdp) 556#define pgd_clear(pgd) do { } while (0)
557
558static inline void pud_clear_kernel(pud_t *pud)
568{ 559{
569 pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; 560 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
570} 561}
571 562
572static inline void pgd_clear(pgd_t * pgdp) 563static inline void pud_clear(pud_t * pud)
573{ 564{
574 pgd_t *shadow_pgd = get_shadow_pgd(pgdp); 565 pud_t *shadow = get_shadow_table(pud);
575 566
576 pgd_clear_kernel(pgdp); 567 pud_clear_kernel(pud);
577 if (shadow_pgd) 568 if (shadow)
578 pgd_clear_kernel(shadow_pgd); 569 pud_clear_kernel(shadow);
579} 570}
580 571
581static inline void pmd_clear_kernel(pmd_t * pmdp) 572static inline void pmd_clear_kernel(pmd_t * pmdp)
582{ 573{
583 pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 574 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
584 pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 575 pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY;
585} 576}
586 577
578#endif /* __s390x__ */
579
587static inline void pmd_clear(pmd_t * pmdp) 580static inline void pmd_clear(pmd_t * pmdp)
588{ 581{
589 pmd_t *shadow_pmd = get_shadow_pmd(pmdp); 582 pmd_t *shadow_pmd = get_shadow_table(pmdp);
590 583
591 pmd_clear_kernel(pmdp); 584 pmd_clear_kernel(pmdp);
592 if (shadow_pmd) 585 if (shadow_pmd)
593 pmd_clear_kernel(shadow_pmd); 586 pmd_clear_kernel(shadow_pmd);
594} 587}
595 588
596#endif /* __s390x__ */
597
598static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 589static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
599{ 590{
600 pte_t *shadow_pte = get_shadow_pte(ptep); 591 pte_t *shadow_pte = get_shadow_pte(ptep);
@@ -663,24 +654,19 @@ static inline pte_t pte_mkyoung(pte_t pte)
663 return pte; 654 return pte;
664} 655}
665 656
666static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 657#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
658static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
659 unsigned long addr, pte_t *ptep)
667{ 660{
668 return 0; 661 return 0;
669} 662}
670 663
671static inline int 664#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
672ptep_clear_flush_young(struct vm_area_struct *vma, 665static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
673 unsigned long address, pte_t *ptep) 666 unsigned long address, pte_t *ptep)
674{ 667{
675 /* No need to flush TLB; bits are in storage key */ 668 /* No need to flush TLB; bits are in storage key */
676 return ptep_test_and_clear_young(vma, address, ptep); 669 return 0;
677}
678
679static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
680{
681 pte_t pte = *ptep;
682 pte_clear(mm, addr, ptep);
683 return pte;
684} 670}
685 671
686static inline void __ptep_ipte(unsigned long address, pte_t *ptep) 672static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
@@ -709,6 +695,32 @@ static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
709 __ptep_ipte(address, ptep); 695 __ptep_ipte(address, ptep);
710} 696}
711 697
698/*
699 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
700 * both clear the TLB for the unmapped pte. The reason is that
701 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
702 * to modify an active pte. The sequence is
703 * 1) ptep_get_and_clear
704 * 2) set_pte_at
705 * 3) flush_tlb_range
706 * On s390 the tlb needs to get flushed with the modification of the pte
707 * if the pte is active. The only way how this can be implemented is to
708 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
709 * is a nop.
710 */
711#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
712#define ptep_get_and_clear(__mm, __address, __ptep) \
713({ \
714 pte_t __pte = *(__ptep); \
715 if (atomic_read(&(__mm)->mm_users) > 1 || \
716 (__mm) != current->active_mm) \
717 ptep_invalidate(__address, __ptep); \
718 else \
719 pte_clear((__mm), (__address), (__ptep)); \
720 __pte; \
721})
722
723#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
712static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 724static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
713 unsigned long address, pte_t *ptep) 725 unsigned long address, pte_t *ptep)
714{ 726{
@@ -717,12 +729,40 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
717 return pte; 729 return pte;
718} 730}
719 731
720static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 732/*
733 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
734 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
735 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
736 * cannot be accessed while the batched unmap is running. In this case
737 * full==1 and a simple pte_clear is enough. See tlb.h.
738 */
739#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
740static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
741 unsigned long addr,
742 pte_t *ptep, int full)
721{ 743{
722 pte_t old_pte = *ptep; 744 pte_t pte = *ptep;
723 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 745
746 if (full)
747 pte_clear(mm, addr, ptep);
748 else
749 ptep_invalidate(addr, ptep);
750 return pte;
724} 751}
725 752
753#define __HAVE_ARCH_PTEP_SET_WRPROTECT
754#define ptep_set_wrprotect(__mm, __addr, __ptep) \
755({ \
756 pte_t __pte = *(__ptep); \
757 if (pte_write(__pte)) { \
758 if (atomic_read(&(__mm)->mm_users) > 1 || \
759 (__mm) != current->active_mm) \
760 ptep_invalidate(__addr, __ptep); \
761 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
762 } \
763})
764
765#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
726#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ 766#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
727({ \ 767({ \
728 int __changed = !pte_same(*(__ptep), __entry); \ 768 int __changed = !pte_same(*(__ptep), __entry); \
@@ -740,11 +780,13 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
740 * should therefore only be called if it is not mapped in any 780 * should therefore only be called if it is not mapped in any
741 * address space. 781 * address space.
742 */ 782 */
783#define __HAVE_ARCH_PAGE_TEST_DIRTY
743static inline int page_test_dirty(struct page *page) 784static inline int page_test_dirty(struct page *page)
744{ 785{
745 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; 786 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
746} 787}
747 788
789#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
748static inline void page_clear_dirty(struct page *page) 790static inline void page_clear_dirty(struct page *page)
749{ 791{
750 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); 792 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
@@ -753,6 +795,7 @@ static inline void page_clear_dirty(struct page *page)
753/* 795/*
754 * Test and clear referenced bit in storage key. 796 * Test and clear referenced bit in storage key.
755 */ 797 */
798#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
756static inline int page_test_and_clear_young(struct page *page) 799static inline int page_test_and_clear_young(struct page *page)
757{ 800{
758 unsigned long physpage = page_to_phys(page); 801 unsigned long physpage = page_to_phys(page);
@@ -784,63 +827,48 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
784 return mk_pte_phys(physpage, pgprot); 827 return mk_pte_phys(physpage, pgprot);
785} 828}
786 829
787static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) 830#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
788{ 831#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
789 unsigned long physpage = __pa((pfn) << PAGE_SHIFT); 832#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
790 833#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
791 return mk_pte_phys(physpage, pgprot);
792}
793
794#ifdef __s390x__
795
796static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
797{
798 unsigned long physpage = __pa((pfn) << PAGE_SHIFT);
799
800 return __pmd(physpage + pgprot_val(pgprot));
801}
802
803#endif /* __s390x__ */
804
805#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
806#define pte_page(x) pfn_to_page(pte_pfn(x))
807 834
808#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) 835#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
836#define pgd_offset_k(address) pgd_offset(&init_mm, address)
809 837
810#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 838#ifndef __s390x__
811 839
812#define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK) 840#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
841#define pud_deref(pmd) ({ BUG(); 0UL; })
842#define pgd_deref(pmd) ({ BUG(); 0UL; })
813 843
814#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) 844#define pud_offset(pgd, address) ((pud_t *) pgd)
845#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
815 846
816/* to find an entry in a page-table-directory */ 847#else /* __s390x__ */
817#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
818#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
819 848
820/* to find an entry in a kernel page-table-directory */ 849#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
821#define pgd_offset_k(address) pgd_offset(&init_mm, address) 850#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
851#define pgd_deref(pgd) ({ BUG(); 0UL; })
822 852
823#ifndef __s390x__ 853#define pud_offset(pgd, address) ((pud_t *) pgd)
824 854
825/* Find an entry in the second-level page table.. */ 855static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
826static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
827{ 856{
828 return (pmd_t *) dir; 857 pmd_t *pmd = (pmd_t *) pud_deref(*pud);
858 return pmd + pmd_index(address);
829} 859}
830 860
831#else /* __s390x__ */ 861#endif /* __s390x__ */
832 862
833/* Find an entry in the second-level page table.. */ 863#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
834#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 864#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
835#define pmd_offset(dir,addr) \ 865#define pte_page(x) pfn_to_page(pte_pfn(x))
836 ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(addr))
837 866
838#endif /* __s390x__ */ 867#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
839 868
840/* Find an entry in the third-level page table.. */ 869/* Find an entry in the lowest level page table.. */
841#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) 870#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
842#define pte_offset_kernel(pmd, address) \ 871#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
843 ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
844#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 872#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
845#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) 873#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
846#define pte_unmap(pte) do { } while (0) 874#define pte_unmap(pte) do { } while (0)
@@ -930,17 +958,6 @@ extern int remove_shared_memory(unsigned long start, unsigned long size);
930#define __HAVE_ARCH_MEMMAP_INIT 958#define __HAVE_ARCH_MEMMAP_INIT
931extern void memmap_init(unsigned long, int, unsigned long, unsigned long); 959extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
932 960
933#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
934#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
935#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
936#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
937#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
938#define __HAVE_ARCH_PTEP_SET_WRPROTECT
939#define __HAVE_ARCH_PTE_SAME
940#define __HAVE_ARCH_PAGE_TEST_DIRTY
941#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
942#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
943#include <asm-generic/pgtable.h> 961#include <asm-generic/pgtable.h>
944 962
945#endif /* _S390_PAGE_H */ 963#endif /* _S390_PAGE_H */
946
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 3b972d4c6b29..21d40a19355e 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -93,7 +93,6 @@ struct thread_struct {
93 s390_fp_regs fp_regs; 93 s390_fp_regs fp_regs;
94 unsigned int acrs[NUM_ACRS]; 94 unsigned int acrs[NUM_ACRS];
95 unsigned long ksp; /* kernel stack pointer */ 95 unsigned long ksp; /* kernel stack pointer */
96 unsigned long user_seg; /* HSTD */
97 mm_segment_t mm_segment; 96 mm_segment_t mm_segment;
98 unsigned long prot_addr; /* address of protection-excep. */ 97 unsigned long prot_addr; /* address of protection-excep. */
99 unsigned int error_code; /* error-code of last prog-excep. */ 98 unsigned int error_code; /* error-code of last prog-excep. */
@@ -128,22 +127,9 @@ struct stack_frame {
128 127
129#define ARCH_MIN_TASKALIGN 8 128#define ARCH_MIN_TASKALIGN 8
130 129
131#ifndef __s390x__ 130#define INIT_THREAD { \
132# define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _SEGMENT_TABLE 131 .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
133#else /* __s390x__ */ 132}
134# define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _REGION_TABLE
135#endif /* __s390x__ */
136
137#define INIT_THREAD {{0,{{0},{0},{0},{0},{0},{0},{0},{0},{0},{0}, \
138 {0},{0},{0},{0},{0},{0}}}, \
139 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, \
140 sizeof(init_stack) + (unsigned long) &init_stack, \
141 __SWAPPER_PG_DIR, \
142 {0}, \
143 0,0,0, \
144 (per_struct) {{{{0,}}},0,0,0,0,{{0,}}}, \
145 0, 0 \
146}
147 133
148/* 134/*
149 * Do necessary setup to start up a new thread. 135 * Do necessary setup to start up a new thread.
diff --git a/include/asm-s390/scatterlist.h b/include/asm-s390/scatterlist.h
index a43b3afc5e2d..29ec8e28c8df 100644
--- a/include/asm-s390/scatterlist.h
+++ b/include/asm-s390/scatterlist.h
@@ -2,7 +2,10 @@
2#define _ASMS390_SCATTERLIST_H 2#define _ASMS390_SCATTERLIST_H
3 3
4struct scatterlist { 4struct scatterlist {
5 struct page *page; 5#ifdef CONFIG_DEBUG_SG
6 unsigned long sg_magic;
7#endif
8 unsigned long page_link;
6 unsigned int offset; 9 unsigned int offset;
7 unsigned int length; 10 unsigned int length;
8}; 11};
diff --git a/include/asm-s390/tlb.h b/include/asm-s390/tlb.h
index 51bd957b85bd..618693cfc10f 100644
--- a/include/asm-s390/tlb.h
+++ b/include/asm-s390/tlb.h
@@ -2,19 +2,130 @@
2#define _S390_TLB_H 2#define _S390_TLB_H
3 3
4/* 4/*
5 * s390 doesn't need any special per-pte or 5 * TLB flushing on s390 is complicated. The following requirement
6 * per-vma handling.. 6 * from the principles of operation is the most arduous:
7 *
8 * "A valid table entry must not be changed while it is attached
9 * to any CPU and may be used for translation by that CPU except to
10 * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
11 * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
12 * table entry, or (3) make a change by means of a COMPARE AND SWAP
13 * AND PURGE instruction that purges the TLB."
14 *
15 * The modification of a pte of an active mm struct therefore is
16 * a two step process: i) invalidate the pte, ii) store the new pte.
17 * This is true for the page protection bit as well.
18 * The only possible optimization is to flush at the beginning of
19 * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
20 *
21 * Pages used for the page tables is a different story. FIXME: more
7 */ 22 */
8#define tlb_start_vma(tlb, vma) do { } while (0) 23
9#define tlb_end_vma(tlb, vma) do { } while (0) 24#include <linux/mm.h>
10#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) 25#include <linux/swap.h>
26#include <asm/processor.h>
27#include <asm/pgalloc.h>
28#include <asm/smp.h>
29#include <asm/tlbflush.h>
30
31#ifndef CONFIG_SMP
32#define TLB_NR_PTRS 1
33#else
34#define TLB_NR_PTRS 508
35#endif
36
37struct mmu_gather {
38 struct mm_struct *mm;
39 unsigned int fullmm;
40 unsigned int nr_ptes;
41 unsigned int nr_pmds;
42 void *array[TLB_NR_PTRS];
43};
44
45DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
46
47static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
48 unsigned int full_mm_flush)
49{
50 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
51
52 tlb->mm = mm;
53 tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) ||
54 (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm);
55 tlb->nr_ptes = 0;
56 tlb->nr_pmds = TLB_NR_PTRS;
57 if (tlb->fullmm)
58 __tlb_flush_mm(mm);
59 return tlb;
60}
61
62static inline void tlb_flush_mmu(struct mmu_gather *tlb,
63 unsigned long start, unsigned long end)
64{
65 if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pmds < TLB_NR_PTRS))
66 __tlb_flush_mm(tlb->mm);
67 while (tlb->nr_ptes > 0)
68 pte_free(tlb->array[--tlb->nr_ptes]);
69 while (tlb->nr_pmds < TLB_NR_PTRS)
70 pmd_free((pmd_t *) tlb->array[tlb->nr_pmds++]);
71}
72
73static inline void tlb_finish_mmu(struct mmu_gather *tlb,
74 unsigned long start, unsigned long end)
75{
76 tlb_flush_mmu(tlb, start, end);
77
78 /* keep the page table cache within bounds */
79 check_pgt_cache();
80
81 put_cpu_var(mmu_gathers);
82}
11 83
12/* 84/*
13 * .. because we flush the whole mm when it 85 * Release the page cache reference for a pte removed by
14 * fills up. 86 * tlb_ptep_clear_flush. In both flush modes the tlb fo a page cache page
87 * has already been freed, so just do free_page_and_swap_cache.
15 */ 88 */
16#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) 89static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
90{
91 free_page_and_swap_cache(page);
92}
17 93
18#include <asm-generic/tlb.h> 94/*
95 * pte_free_tlb frees a pte table and clears the CRSTE for the
96 * page table from the tlb.
97 */
98static inline void pte_free_tlb(struct mmu_gather *tlb, struct page *page)
99{
100 if (!tlb->fullmm) {
101 tlb->array[tlb->nr_ptes++] = page;
102 if (tlb->nr_ptes >= tlb->nr_pmds)
103 tlb_flush_mmu(tlb, 0, 0);
104 } else
105 pte_free(page);
106}
19 107
108/*
109 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
110 * segment table entry from the tlb.
111 */
112static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
113{
114#ifdef __s390x__
115 if (!tlb->fullmm) {
116 tlb->array[--tlb->nr_pmds] = (struct page *) pmd;
117 if (tlb->nr_ptes >= tlb->nr_pmds)
118 tlb_flush_mmu(tlb, 0, 0);
119 } else
120 pmd_free(pmd);
20#endif 121#endif
122}
123
124#define pud_free_tlb(tlb, pud) do { } while (0)
125
126#define tlb_start_vma(tlb, vma) do { } while (0)
127#define tlb_end_vma(tlb, vma) do { } while (0)
128#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
129#define tlb_migrate_finish(mm) do { } while (0)
130
131#endif /* _S390_TLB_H */
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index 6de2632a3e4f..a69bd2490d52 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -6,68 +6,19 @@
6#include <asm/pgalloc.h> 6#include <asm/pgalloc.h>
7 7
8/* 8/*
9 * TLB flushing: 9 * Flush all tlb entries on the local cpu.
10 *
11 * - flush_tlb() flushes the current mm struct TLBs
12 * - flush_tlb_all() flushes all processes TLBs
13 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
14 * - flush_tlb_page(vma, vmaddr) flushes one page
15 * - flush_tlb_range(vma, start, end) flushes a range of pages
16 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
17 */
18
19/*
20 * S/390 has three ways of flushing TLBs
21 * 'ptlb' does a flush of the local processor
22 * 'csp' flushes the TLBs on all PUs of a SMP
23 * 'ipte' invalidates a pte in a page table and flushes that out of
24 * the TLBs of all PUs of a SMP
25 */
26
27#define local_flush_tlb() \
28do { asm volatile("ptlb": : :"memory"); } while (0)
29
30#ifndef CONFIG_SMP
31
32/*
33 * We always need to flush, since s390 does not flush tlb
34 * on each context switch
35 */ 10 */
36 11static inline void __tlb_flush_local(void)
37static inline void flush_tlb(void)
38{ 12{
39 local_flush_tlb(); 13 asm volatile("ptlb" : : : "memory");
40} 14}
41static inline void flush_tlb_all(void)
42{
43 local_flush_tlb();
44}
45static inline void flush_tlb_mm(struct mm_struct *mm)
46{
47 local_flush_tlb();
48}
49static inline void flush_tlb_page(struct vm_area_struct *vma,
50 unsigned long addr)
51{
52 local_flush_tlb();
53}
54static inline void flush_tlb_range(struct vm_area_struct *vma,
55 unsigned long start, unsigned long end)
56{
57 local_flush_tlb();
58}
59
60#define flush_tlb_kernel_range(start, end) \
61 local_flush_tlb();
62
63#else
64 15
65#include <asm/smp.h> 16/*
66 17 * Flush all tlb entries on all cpus.
67extern void smp_ptlb_all(void); 18 */
68 19static inline void __tlb_flush_global(void)
69static inline void global_flush_tlb(void)
70{ 20{
21 extern void smp_ptlb_all(void);
71 register unsigned long reg2 asm("2"); 22 register unsigned long reg2 asm("2");
72 register unsigned long reg3 asm("3"); 23 register unsigned long reg3 asm("3");
73 register unsigned long reg4 asm("4"); 24 register unsigned long reg4 asm("4");
@@ -89,66 +40,75 @@ static inline void global_flush_tlb(void)
89} 40}
90 41
91/* 42/*
92 * We only have to do global flush of tlb if process run since last 43 * Flush all tlb entries of a page table on all cpus.
93 * flush on any other pu than current.
94 * If we have threads (mm->count > 1) we always do a global flush,
95 * since the process runs on more than one processor at the same time.
96 */ 44 */
45static inline void __tlb_flush_idte(pgd_t *pgd)
46{
47 asm volatile(
48 " .insn rrf,0xb98e0000,0,%0,%1,0"
49 : : "a" (2048), "a" (__pa(pgd) & PAGE_MASK) : "cc" );
50}
97 51
98static inline void __flush_tlb_mm(struct mm_struct * mm) 52static inline void __tlb_flush_mm(struct mm_struct * mm)
99{ 53{
100 cpumask_t local_cpumask; 54 cpumask_t local_cpumask;
101 55
102 if (unlikely(cpus_empty(mm->cpu_vm_mask))) 56 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
103 return; 57 return;
58 /*
59 * If the machine has IDTE we prefer to do a per mm flush
60 * on all cpus instead of doing a local flush if the mm
61 * only ran on the local cpu.
62 */
104 if (MACHINE_HAS_IDTE) { 63 if (MACHINE_HAS_IDTE) {
105 pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd); 64 pgd_t *shadow_pgd = get_shadow_table(mm->pgd);
106 65
107 if (shadow_pgd) { 66 if (shadow_pgd)
108 asm volatile( 67 __tlb_flush_idte(shadow_pgd);
109 " .insn rrf,0xb98e0000,0,%0,%1,0" 68 __tlb_flush_idte(mm->pgd);
110 : : "a" (2048),
111 "a" (__pa(shadow_pgd) & PAGE_MASK) : "cc" );
112 }
113 asm volatile(
114 " .insn rrf,0xb98e0000,0,%0,%1,0"
115 : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc");
116 return; 69 return;
117 } 70 }
118 preempt_disable(); 71 preempt_disable();
72 /*
73 * If the process only ran on the local cpu, do a local flush.
74 */
119 local_cpumask = cpumask_of_cpu(smp_processor_id()); 75 local_cpumask = cpumask_of_cpu(smp_processor_id());
120 if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) 76 if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
121 local_flush_tlb(); 77 __tlb_flush_local();
122 else 78 else
123 global_flush_tlb(); 79 __tlb_flush_global();
124 preempt_enable(); 80 preempt_enable();
125} 81}
126 82
127static inline void flush_tlb(void) 83static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
128{
129 __flush_tlb_mm(current->mm);
130}
131static inline void flush_tlb_all(void)
132{
133 global_flush_tlb();
134}
135static inline void flush_tlb_mm(struct mm_struct *mm)
136{
137 __flush_tlb_mm(mm);
138}
139static inline void flush_tlb_page(struct vm_area_struct *vma,
140 unsigned long addr)
141{
142 __flush_tlb_mm(vma->vm_mm);
143}
144static inline void flush_tlb_range(struct vm_area_struct *vma,
145 unsigned long start, unsigned long end)
146{ 84{
147 __flush_tlb_mm(vma->vm_mm); 85 if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm)
86 __tlb_flush_mm(mm);
148} 87}
149 88
150#define flush_tlb_kernel_range(start, end) global_flush_tlb() 89/*
90 * TLB flushing:
91 * flush_tlb() - flushes the current mm struct TLBs
92 * flush_tlb_all() - flushes all processes TLBs
93 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
94 * flush_tlb_page(vma, vmaddr) - flushes one page
95 * flush_tlb_range(vma, start, end) - flushes a range of pages
96 * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
97 */
151 98
152#endif 99/*
100 * flush_tlb_mm goes together with ptep_set_wrprotect for the
101 * copy_page_range operation and flush_tlb_range is related to
102 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
103 * ptep_get_and_clear do not flush the TLBs directly if the mm has
104 * only one user. At the end of the update the flush_tlb_mm and
105 * flush_tlb_range functions need to do the flush.
106 */
107#define flush_tlb() do { } while (0)
108#define flush_tlb_all() do { } while (0)
109#define flush_tlb_mm(mm) __tlb_flush_mm_cond(mm)
110#define flush_tlb_page(vma, addr) do { } while (0)
111#define flush_tlb_range(vma, start, end) __tlb_flush_mm_cond(mm)
112#define flush_tlb_kernel_range(start, end) __tlb_flush_mm(&init_mm)
153 113
154#endif /* _S390_TLBFLUSH_H */ 114#endif /* _S390_TLBFLUSH_H */
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 84fefdaa01a5..fcea067f7a9c 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -2,7 +2,7 @@
2#define __ASM_SH_DMA_MAPPING_H 2#define __ASM_SH_DMA_MAPPING_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <asm/scatterlist.h> 5#include <linux/scatterlist.h>
6#include <asm/cacheflush.h> 6#include <asm/cacheflush.h>
7#include <asm/io.h> 7#include <asm/io.h>
8 8
@@ -85,10 +85,9 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
85 85
86 for (i = 0; i < nents; i++) { 86 for (i = 0; i < nents; i++) {
87#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 87#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
88 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, 88 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
89 sg[i].length, dir);
90#endif 89#endif
91 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 90 sg[i].dma_address = sg_phys(&sg[i]);
92 } 91 }
93 92
94 return nents; 93 return nents;
@@ -138,10 +137,9 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
138 137
139 for (i = 0; i < nelems; i++) { 138 for (i = 0; i < nelems; i++) {
140#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 139#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
141 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, 140 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
142 sg[i].length, dir);
143#endif 141#endif
144 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 142 sg[i].dma_address = sg_phys(&sg[i]);
145 } 143 }
146} 144}
147 145
diff --git a/include/asm-sh/scatterlist.h b/include/asm-sh/scatterlist.h
index b9ae53c38365..a7d0d1856a99 100644
--- a/include/asm-sh/scatterlist.h
+++ b/include/asm-sh/scatterlist.h
@@ -4,7 +4,10 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page * page; /* Location for highmem page, if any */ 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset;/* for highmem, page offset */ 11 unsigned int offset;/* for highmem, page offset */
9 dma_addr_t dma_address; 12 dma_addr_t dma_address;
10 unsigned int length; 13 unsigned int length;
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h
index e661857f98dc..1438b763a5ea 100644
--- a/include/asm-sh64/dma-mapping.h
+++ b/include/asm-sh64/dma-mapping.h
@@ -2,7 +2,7 @@
2#define __ASM_SH_DMA_MAPPING_H 2#define __ASM_SH_DMA_MAPPING_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <asm/scatterlist.h> 5#include <linux/scatterlist.h>
6#include <asm/io.h> 6#include <asm/io.h>
7 7
8struct pci_dev; 8struct pci_dev;
@@ -71,10 +71,9 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
71 71
72 for (i = 0; i < nents; i++) { 72 for (i = 0; i < nents; i++) {
73#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 73#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
74 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, 74 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
75 sg[i].length, dir);
76#endif 75#endif
77 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 76 sg[i].dma_address = sg_phys(&sg[i]);
78 } 77 }
79 78
80 return nents; 79 return nents;
@@ -124,10 +123,9 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
124 123
125 for (i = 0; i < nelems; i++) { 124 for (i = 0; i < nelems; i++) {
126#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) 125#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
127 dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, 126 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
128 sg[i].length, dir);
129#endif 127#endif
130 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 128 sg[i].dma_address = sg_phys(&sg[i]);
131 } 129 }
132} 130}
133 131
diff --git a/include/asm-sh64/scatterlist.h b/include/asm-sh64/scatterlist.h
index 1c723f2d7a95..5109251970e7 100644
--- a/include/asm-sh64/scatterlist.h
+++ b/include/asm-sh64/scatterlist.h
@@ -14,7 +14,10 @@
14#include <asm/types.h> 14#include <asm/types.h>
15 15
16struct scatterlist { 16struct scatterlist {
17 struct page * page; /* Location for highmem page, if any */ 17#ifdef CONFIG_DEBUG_SG
18 unsigned long sg_magic;
19#endif
20 unsigned long page_link;
18 unsigned int offset;/* for highmem, page offset */ 21 unsigned int offset;/* for highmem, page offset */
19 dma_addr_t dma_address; 22 dma_addr_t dma_address;
20 unsigned int length; 23 unsigned int length;
diff --git a/include/asm-sparc/scatterlist.h b/include/asm-sparc/scatterlist.h
index 4055af90ad7e..e08d3d775b08 100644
--- a/include/asm-sparc/scatterlist.h
+++ b/include/asm-sparc/scatterlist.h
@@ -5,7 +5,10 @@
5#include <linux/types.h> 5#include <linux/types.h>
6 6
7struct scatterlist { 7struct scatterlist {
8 struct page *page; 8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
9 unsigned int offset; 12 unsigned int offset;
10 13
11 unsigned int length; 14 unsigned int length;
diff --git a/include/asm-sparc64/scatterlist.h b/include/asm-sparc64/scatterlist.h
index 703c5bbe6c8c..6df23f070b1a 100644
--- a/include/asm-sparc64/scatterlist.h
+++ b/include/asm-sparc64/scatterlist.h
@@ -6,7 +6,10 @@
6#include <asm/types.h> 6#include <asm/types.h>
7 7
8struct scatterlist { 8struct scatterlist {
9 struct page *page; 9#ifdef CONFIG_DEBUG_SG
10 unsigned long sg_magic;
11#endif
12 unsigned long page_link;
10 unsigned int offset; 13 unsigned int offset;
11 14
12 unsigned int length; 15 unsigned int length;
diff --git a/include/asm-v850/scatterlist.h b/include/asm-v850/scatterlist.h
index 56f402920db9..02d27b3fb061 100644
--- a/include/asm-v850/scatterlist.h
+++ b/include/asm-v850/scatterlist.h
@@ -17,7 +17,10 @@
17#include <asm/types.h> 17#include <asm/types.h>
18 18
19struct scatterlist { 19struct scatterlist {
20 struct page *page; 20#ifdef CONFIG_DEBUG_SG
21 unsigned long sg_magic;
22#endif
23 unsigned long page_link;
21 unsigned offset; 24 unsigned offset;
22 dma_addr_t dma_address; 25 dma_addr_t dma_address;
23 unsigned length; 26 unsigned length;
diff --git a/include/asm-x86/bootparam.h b/include/asm-x86/bootparam.h
index ef67b59dbdb9..dc031cf44633 100644
--- a/include/asm-x86/bootparam.h
+++ b/include/asm-x86/bootparam.h
@@ -28,8 +28,9 @@ struct setup_header {
28 u16 kernel_version; 28 u16 kernel_version;
29 u8 type_of_loader; 29 u8 type_of_loader;
30 u8 loadflags; 30 u8 loadflags;
31#define LOADED_HIGH 0x01 31#define LOADED_HIGH (1<<0)
32#define CAN_USE_HEAP 0x80 32#define KEEP_SEGMENTS (1<<6)
33#define CAN_USE_HEAP (1<<7)
33 u16 setup_move_size; 34 u16 setup_move_size;
34 u32 code32_start; 35 u32 code32_start;
35 u32 ramdisk_image; 36 u32 ramdisk_image;
@@ -41,6 +42,10 @@ struct setup_header {
41 u32 initrd_addr_max; 42 u32 initrd_addr_max;
42 u32 kernel_alignment; 43 u32 kernel_alignment;
43 u8 relocatable_kernel; 44 u8 relocatable_kernel;
45 u8 _pad2[3];
46 u32 cmdline_size;
47 u32 hardware_subarch;
48 u64 hardware_subarch_data;
44} __attribute__((packed)); 49} __attribute__((packed));
45 50
46struct sys_desc_table { 51struct sys_desc_table {
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index b3d43de44c59..9411a2d3f19c 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -27,6 +27,7 @@
27void global_flush_tlb(void); 27void global_flush_tlb(void);
28int change_page_attr(struct page *page, int numpages, pgprot_t prot); 28int change_page_attr(struct page *page, int numpages, pgprot_t prot);
29int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot); 29int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot);
30void clflush_cache_range(void *addr, int size);
30 31
31#ifdef CONFIG_DEBUG_PAGEALLOC 32#ifdef CONFIG_DEBUG_PAGEALLOC
32/* internal debugging function */ 33/* internal debugging function */
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h
index d9ee5e52e91b..87a715367a1b 100644
--- a/include/asm-x86/device.h
+++ b/include/asm-x86/device.h
@@ -5,6 +5,9 @@ struct dev_archdata {
5#ifdef CONFIG_ACPI 5#ifdef CONFIG_ACPI
6 void *acpi_handle; 6 void *acpi_handle;
7#endif 7#endif
8#ifdef CONFIG_DMAR
9 void *iommu; /* hook for IOMMU specific extension */
10#endif
8}; 11};
9 12
10#endif /* _ASM_X86_DEVICE_H */ 13#endif /* _ASM_X86_DEVICE_H */
diff --git a/include/asm-x86/dma-mapping_32.h b/include/asm-x86/dma-mapping_32.h
index 6a2d26cb5da6..55f01bd9e556 100644
--- a/include/asm-x86/dma-mapping_32.h
+++ b/include/asm-x86/dma-mapping_32.h
@@ -45,9 +45,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
45 WARN_ON(nents == 0 || sglist[0].length == 0); 45 WARN_ON(nents == 0 || sglist[0].length == 0);
46 46
47 for_each_sg(sglist, sg, nents, i) { 47 for_each_sg(sglist, sg, nents, i) {
48 BUG_ON(!sg->page); 48 BUG_ON(!sg_page(sg));
49 49
50 sg->dma_address = page_to_phys(sg->page) + sg->offset; 50 sg->dma_address = sg_phys(sg);
51 } 51 }
52 52
53 flush_write_buffers(); 53 flush_write_buffers();
diff --git a/include/asm-x86/scatterlist_32.h b/include/asm-x86/scatterlist_32.h
index bd5164aa8f63..0e7d997a34be 100644
--- a/include/asm-x86/scatterlist_32.h
+++ b/include/asm-x86/scatterlist_32.h
@@ -4,7 +4,10 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page *page; 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset; 11 unsigned int offset;
9 dma_addr_t dma_address; 12 dma_addr_t dma_address;
10 unsigned int length; 13 unsigned int length;
diff --git a/include/asm-x86/scatterlist_64.h b/include/asm-x86/scatterlist_64.h
index ef3986ba4b79..1847c72befeb 100644
--- a/include/asm-x86/scatterlist_64.h
+++ b/include/asm-x86/scatterlist_64.h
@@ -4,7 +4,10 @@
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6struct scatterlist { 6struct scatterlist {
7 struct page *page; 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
8 unsigned int offset; 11 unsigned int offset;
9 unsigned int length; 12 unsigned int length;
10 dma_addr_t dma_address; 13 dma_addr_t dma_address;
diff --git a/include/asm-xtensa/scatterlist.h b/include/asm-xtensa/scatterlist.h
index ca337a294290..810080bb0a2b 100644
--- a/include/asm-xtensa/scatterlist.h
+++ b/include/asm-xtensa/scatterlist.h
@@ -14,7 +14,10 @@
14#include <asm/types.h> 14#include <asm/types.h>
15 15
16struct scatterlist { 16struct scatterlist {
17 struct page *page; 17#ifdef CONFIG_DEBUG_SG
18 unsigned long sg_magic;
19#endif
20 unsigned long page_link;
18 unsigned int offset; 21 unsigned int offset;
19 dma_addr_t dma_address; 22 dma_addr_t dma_address;
20 unsigned int length; 23 unsigned int length;
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 7a8d7ade28a0..bb017edffd56 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -56,10 +56,8 @@ typedef struct __user_cap_data_struct {
56 56
57struct vfs_cap_data { 57struct vfs_cap_data {
58 __u32 magic_etc; /* Little endian */ 58 __u32 magic_etc; /* Little endian */
59 struct { 59 __u32 permitted; /* Little endian */
60 __u32 permitted; /* Little endian */ 60 __u32 inheritable; /* Little endian */
61 __u32 inheritable; /* Little endian */
62 } data[1];
63}; 61};
64 62
65#ifdef __KERNEL__ 63#ifdef __KERNEL__
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
new file mode 100644
index 000000000000..ffb6439cb5e6
--- /dev/null
+++ b/include/linux/dmar.h
@@ -0,0 +1,86 @@
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Ashok Raj <ashok.raj@intel.com>
18 * Copyright (C) Shaohua Li <shaohua.li@intel.com>
19 */
20
21#ifndef __DMAR_H__
22#define __DMAR_H__
23
24#include <linux/acpi.h>
25#include <linux/types.h>
26#include <linux/msi.h>
27
28#ifdef CONFIG_DMAR
29struct intel_iommu;
30
31extern char *dmar_get_fault_reason(u8 fault_reason);
32
33/* Can't use the common MSI interrupt functions
34 * since DMAR is not a pci device
35 */
36extern void dmar_msi_unmask(unsigned int irq);
37extern void dmar_msi_mask(unsigned int irq);
38extern void dmar_msi_read(int irq, struct msi_msg *msg);
39extern void dmar_msi_write(int irq, struct msi_msg *msg);
40extern int dmar_set_interrupt(struct intel_iommu *iommu);
41extern int arch_setup_dmar_msi(unsigned int irq);
42
43/* Intel IOMMU detection and initialization functions */
44extern void detect_intel_iommu(void);
45extern int intel_iommu_init(void);
46
47extern int dmar_table_init(void);
48extern int early_dmar_detect(void);
49
50extern struct list_head dmar_drhd_units;
51extern struct list_head dmar_rmrr_units;
52
53struct dmar_drhd_unit {
54 struct list_head list; /* list of drhd units */
55 u64 reg_base_addr; /* register base address*/
56 struct pci_dev **devices; /* target device array */
57 int devices_cnt; /* target device count */
58 u8 ignored:1; /* ignore drhd */
59 u8 include_all:1;
60 struct intel_iommu *iommu;
61};
62
63struct dmar_rmrr_unit {
64 struct list_head list; /* list of rmrr units */
65 u64 base_address; /* reserved base address*/
66 u64 end_address; /* reserved end address */
67 struct pci_dev **devices; /* target devices */
68 int devices_cnt; /* target device count */
69};
70
71#define for_each_drhd_unit(drhd) \
72 list_for_each_entry(drhd, &dmar_drhd_units, list)
73#define for_each_rmrr_units(rmrr) \
74 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
75#else
76static inline void detect_intel_iommu(void)
77{
78 return;
79}
80static inline int intel_iommu_init(void)
81{
82 return -ENODEV;
83}
84
85#endif /* !CONFIG_DMAR */
86#endif /* __DMAR_H__ */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 0b9579a4cd42..14813b595802 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -298,7 +298,7 @@ extern int efi_mem_attribute_range (unsigned long phys_addr, unsigned long size,
298 u64 attr); 298 u64 attr);
299extern int __init efi_uart_console_only (void); 299extern int __init efi_uart_console_only (void);
300extern void efi_initialize_iomem_resources(struct resource *code_resource, 300extern void efi_initialize_iomem_resources(struct resource *code_resource,
301 struct resource *data_resource); 301 struct resource *data_resource, struct resource *bss_resource);
302extern unsigned long efi_get_time(void); 302extern unsigned long efi_get_time(void);
303extern int efi_set_rtc_mmss(unsigned long nowtime); 303extern int efi_set_rtc_mmss(unsigned long nowtime);
304extern int is_available_memory(efi_memory_desc_t * md); 304extern int is_available_memory(efi_memory_desc_t * md);
diff --git a/include/linux/efs_fs.h b/include/linux/efs_fs.h
index 16cb25cbf7c5..dd57fe523e97 100644
--- a/include/linux/efs_fs.h
+++ b/include/linux/efs_fs.h
@@ -35,6 +35,7 @@ static inline struct efs_sb_info *SUPER_INFO(struct super_block *sb)
35} 35}
36 36
37struct statfs; 37struct statfs;
38struct fid;
38 39
39extern const struct inode_operations efs_dir_inode_operations; 40extern const struct inode_operations efs_dir_inode_operations;
40extern const struct file_operations efs_dir_operations; 41extern const struct file_operations efs_dir_operations;
@@ -45,7 +46,10 @@ extern efs_block_t efs_map_block(struct inode *, efs_block_t);
45extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int); 46extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int);
46 47
47extern struct dentry *efs_lookup(struct inode *, struct dentry *, struct nameidata *); 48extern struct dentry *efs_lookup(struct inode *, struct dentry *, struct nameidata *);
48extern struct dentry *efs_get_dentry(struct super_block *sb, void *vobjp); 49extern struct dentry *efs_fh_to_dentry(struct super_block *sb, struct fid *fid,
50 int fh_len, int fh_type);
51extern struct dentry *efs_fh_to_parent(struct super_block *sb, struct fid *fid,
52 int fh_len, int fh_type);
49extern struct dentry *efs_get_parent(struct dentry *); 53extern struct dentry *efs_get_parent(struct dentry *);
50extern int efs_bmap(struct inode *, int); 54extern int efs_bmap(struct inode *, int);
51 55
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 8872fe8392d6..51d214138814 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -4,9 +4,48 @@
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6struct dentry; 6struct dentry;
7struct inode;
7struct super_block; 8struct super_block;
8struct vfsmount; 9struct vfsmount;
9 10
11/*
12 * The fileid_type identifies how the file within the filesystem is encoded.
13 * In theory this is freely set and parsed by the filesystem, but we try to
14 * stick to conventions so we can share some generic code and don't confuse
15 * sniffers like ethereal/wireshark.
16 *
17 * The filesystem must not use the value '0' or '0xff'.
18 */
19enum fid_type {
20 /*
21 * The root, or export point, of the filesystem.
22 * (Never actually passed down to the filesystem.
23 */
24 FILEID_ROOT = 0,
25
26 /*
27 * 32bit inode number, 32 bit generation number.
28 */
29 FILEID_INO32_GEN = 1,
30
31 /*
32 * 32bit inode number, 32 bit generation number,
33 * 32 bit parent directory inode number.
34 */
35 FILEID_INO32_GEN_PARENT = 2,
36};
37
38struct fid {
39 union {
40 struct {
41 u32 ino;
42 u32 gen;
43 u32 parent_ino;
44 u32 parent_gen;
45 } i32;
46 __u32 raw[6];
47 };
48};
10 49
11/** 50/**
12 * struct export_operations - for nfsd to communicate with file systems 51 * struct export_operations - for nfsd to communicate with file systems
@@ -15,43 +54,9 @@ struct vfsmount;
15 * @get_name: find the name for a given inode in a given directory 54 * @get_name: find the name for a given inode in a given directory
16 * @get_parent: find the parent of a given directory 55 * @get_parent: find the parent of a given directory
17 * @get_dentry: find a dentry for the inode given a file handle sub-fragment 56 * @get_dentry: find a dentry for the inode given a file handle sub-fragment
18 * @find_exported_dentry:
19 * set by the exporting module to a standard helper function.
20 *
21 * Description:
22 * The export_operations structure provides a means for nfsd to communicate
23 * with a particular exported file system - particularly enabling nfsd and
24 * the filesystem to co-operate when dealing with file handles.
25 *
26 * export_operations contains two basic operation for dealing with file
27 * handles, decode_fh() and encode_fh(), and allows for some other
28 * operations to be defined which standard helper routines use to get
29 * specific information from the filesystem.
30 *
31 * nfsd encodes information use to determine which filesystem a filehandle
32 * applies to in the initial part of the file handle. The remainder, termed
33 * a file handle fragment, is controlled completely by the filesystem. The
34 * standard helper routines assume that this fragment will contain one or
35 * two sub-fragments, one which identifies the file, and one which may be
36 * used to identify the (a) directory containing the file.
37 * 57 *
38 * In some situations, nfsd needs to get a dentry which is connected into a 58 * See Documentation/filesystems/Exporting for details on how to use
39 * specific part of the file tree. To allow for this, it passes the 59 * this interface correctly.
40 * function acceptable() together with a @context which can be used to see
41 * if the dentry is acceptable. As there can be multiple dentrys for a
42 * given file, the filesystem should check each one for acceptability before
43 * looking for the next. As soon as an acceptable one is found, it should
44 * be returned.
45 *
46 * decode_fh:
47 * @decode_fh is given a &struct super_block (@sb), a file handle fragment
48 * (@fh, @fh_len) and an acceptability testing function (@acceptable,
49 * @context). It should return a &struct dentry which refers to the same
50 * file that the file handle fragment refers to, and which passes the
51 * acceptability test. If it cannot, it should return a %NULL pointer if
52 * the file was found but no acceptable &dentries were available, or a
53 * %ERR_PTR error code indicating why it couldn't be found (e.g. %ENOENT or
54 * %ENOMEM).
55 * 60 *
56 * encode_fh: 61 * encode_fh:
57 * @encode_fh should store in the file handle fragment @fh (using at most 62 * @encode_fh should store in the file handle fragment @fh (using at most
@@ -63,6 +68,21 @@ struct vfsmount;
63 * the filehandle fragment. encode_fh() should return the number of bytes 68 * the filehandle fragment. encode_fh() should return the number of bytes
64 * stored or a negative error code such as %-ENOSPC 69 * stored or a negative error code such as %-ENOSPC
65 * 70 *
71 * fh_to_dentry:
72 * @fh_to_dentry is given a &struct super_block (@sb) and a file handle
73 * fragment (@fh, @fh_len). It should return a &struct dentry which refers
74 * to the same file that the file handle fragment refers to. If it cannot,
75 * it should return a %NULL pointer if the file was found but no acceptable
76 * &dentries were available, or an %ERR_PTR error code indicating why it
77 * couldn't be found (e.g. %ENOENT or %ENOMEM). Any suitable dentry can be
78 * returned including, if necessary, a new dentry created with d_alloc_root.
79 * The caller can then find any other extant dentries by following the
80 * d_alias links.
81 *
82 * fh_to_parent:
83 * Same as @fh_to_dentry, except that it returns a pointer to the parent
84 * dentry if it was encoded into the filehandle fragment by @encode_fh.
85 *
66 * get_name: 86 * get_name:
67 * @get_name should find a name for the given @child in the given @parent 87 * @get_name should find a name for the given @child in the given @parent
68 * directory. The name should be stored in the @name (with the 88 * directory. The name should be stored in the @name (with the
@@ -75,52 +95,37 @@ struct vfsmount;
75 * is also a directory. In the event that it cannot be found, or storage 95 * is also a directory. In the event that it cannot be found, or storage
76 * space cannot be allocated, a %ERR_PTR should be returned. 96 * space cannot be allocated, a %ERR_PTR should be returned.
77 * 97 *
78 * get_dentry:
79 * Given a &super_block (@sb) and a pointer to a file-system specific inode
80 * identifier, possibly an inode number, (@inump) get_dentry() should find
81 * the identified inode and return a dentry for that inode. Any suitable
82 * dentry can be returned including, if necessary, a new dentry created with
83 * d_alloc_root. The caller can then find any other extant dentrys by
84 * following the d_alias links. If a new dentry was created using
85 * d_alloc_root, DCACHE_NFSD_DISCONNECTED should be set, and the dentry
86 * should be d_rehash()ed.
87 *
88 * If the inode cannot be found, either a %NULL pointer or an %ERR_PTR code
89 * can be returned. The @inump will be whatever was passed to
90 * nfsd_find_fh_dentry() in either the @obj or @parent parameters.
91 *
92 * Locking rules: 98 * Locking rules:
93 * get_parent is called with child->d_inode->i_mutex down 99 * get_parent is called with child->d_inode->i_mutex down
94 * get_name is not (which is possibly inconsistent) 100 * get_name is not (which is possibly inconsistent)
95 */ 101 */
96 102
97struct export_operations { 103struct export_operations {
98 struct dentry *(*decode_fh)(struct super_block *sb, __u32 *fh,
99 int fh_len, int fh_type,
100 int (*acceptable)(void *context, struct dentry *de),
101 void *context);
102 int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len, 104 int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
103 int connectable); 105 int connectable);
106 struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid,
107 int fh_len, int fh_type);
108 struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid,
109 int fh_len, int fh_type);
104 int (*get_name)(struct dentry *parent, char *name, 110 int (*get_name)(struct dentry *parent, char *name,
105 struct dentry *child); 111 struct dentry *child);
106 struct dentry * (*get_parent)(struct dentry *child); 112 struct dentry * (*get_parent)(struct dentry *child);
107 struct dentry * (*get_dentry)(struct super_block *sb, void *inump);
108
109 /* This is set by the exporting module to a standard helper */
110 struct dentry * (*find_exported_dentry)(
111 struct super_block *sb, void *obj, void *parent,
112 int (*acceptable)(void *context, struct dentry *de),
113 void *context);
114}; 113};
115 114
116extern struct dentry *find_exported_dentry(struct super_block *sb, void *obj, 115extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
117 void *parent, int (*acceptable)(void *context, struct dentry *de), 116 int *max_len, int connectable);
118 void *context); 117extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
119
120extern int exportfs_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len,
121 int connectable);
122extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, __u32 *fh,
123 int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *), 118 int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
124 void *context); 119 void *context);
125 120
121/*
122 * Generic helpers for filesystems.
123 */
124extern struct dentry *generic_fh_to_dentry(struct super_block *sb,
125 struct fid *fid, int fh_len, int fh_type,
126 struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
127extern struct dentry *generic_fh_to_parent(struct super_block *sb,
128 struct fid *fid, int fh_len, int fh_type,
129 struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
130
126#endif /* LINUX_EXPORTFS_H */ 131#endif /* LINUX_EXPORTFS_H */
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index c77c3bbfe4bb..0f6c86c634fd 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -561,6 +561,7 @@ enum {
561#define EXT2_DIR_ROUND (EXT2_DIR_PAD - 1) 561#define EXT2_DIR_ROUND (EXT2_DIR_PAD - 1)
562#define EXT2_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT2_DIR_ROUND) & \ 562#define EXT2_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT2_DIR_ROUND) & \
563 ~EXT2_DIR_ROUND) 563 ~EXT2_DIR_ROUND)
564#define EXT2_MAX_REC_LEN ((1<<16)-1)
564 565
565static inline ext2_fsblk_t 566static inline ext2_fsblk_t
566ext2_group_first_block_no(struct super_block *sb, unsigned long group_no) 567ext2_group_first_block_no(struct super_block *sb, unsigned long group_no)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 50078bb30a1c..b3ec4a496d64 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -987,7 +987,7 @@ struct super_block {
987 const struct super_operations *s_op; 987 const struct super_operations *s_op;
988 struct dquot_operations *dq_op; 988 struct dquot_operations *dq_op;
989 struct quotactl_ops *s_qcop; 989 struct quotactl_ops *s_qcop;
990 struct export_operations *s_export_op; 990 const struct export_operations *s_export_op;
991 unsigned long s_flags; 991 unsigned long s_flags;
992 unsigned long s_magic; 992 unsigned long s_magic;
993 struct dentry *s_root; 993 struct dentry *s_root;
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
new file mode 100644
index 000000000000..7907a72403ee
--- /dev/null
+++ b/include/linux/i8042.h
@@ -0,0 +1,35 @@
1#ifndef _LINUX_I8042_H
2#define _LINUX_I8042_H
3
4/*
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
10
11/*
12 * Standard commands.
13 */
14
15#define I8042_CMD_CTL_RCTR 0x0120
16#define I8042_CMD_CTL_WCTR 0x1060
17#define I8042_CMD_CTL_TEST 0x01aa
18
19#define I8042_CMD_KBD_DISABLE 0x00ad
20#define I8042_CMD_KBD_ENABLE 0x00ae
21#define I8042_CMD_KBD_TEST 0x01ab
22#define I8042_CMD_KBD_LOOP 0x11d2
23
24#define I8042_CMD_AUX_DISABLE 0x00a7
25#define I8042_CMD_AUX_ENABLE 0x00a8
26#define I8042_CMD_AUX_TEST 0x01a9
27#define I8042_CMD_AUX_SEND 0x10d4
28#define I8042_CMD_AUX_LOOP 0x11d3
29
30#define I8042_CMD_MUX_PFX 0x0090
31#define I8042_CMD_MUX_SEND 0x1090
32
33int i8042_command(unsigned char *param, int command);
34
35#endif
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 2e4b8dd03cfe..4ed4777bba67 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -667,7 +667,7 @@ typedef struct hwif_s {
667 u8 straight8; /* Alan's straight 8 check */ 667 u8 straight8; /* Alan's straight 8 check */
668 u8 bus_state; /* power state of the IDE bus */ 668 u8 bus_state; /* power state of the IDE bus */
669 669
670 u16 host_flags; 670 u32 host_flags;
671 671
672 u8 pio_mask; 672 u8 pio_mask;
673 673
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 6c9873f88287..ff203dd02919 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -34,6 +34,12 @@
34 name: 34 name:
35#endif 35#endif
36 36
37#ifndef WEAK
38#define WEAK(name) \
39 .weak name; \
40 name:
41#endif
42
37#define KPROBE_ENTRY(name) \ 43#define KPROBE_ENTRY(name) \
38 .pushsection .kprobes.text, "ax"; \ 44 .pushsection .kprobes.text, "ax"; \
39 ENTRY(name) 45 ENTRY(name)
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 654ef5544878..33f0ff0cf634 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -41,18 +41,15 @@ struct memory_block {
41#define MEM_ONLINE (1<<0) /* exposed to userspace */ 41#define MEM_ONLINE (1<<0) /* exposed to userspace */
42#define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */ 42#define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */
43#define MEM_OFFLINE (1<<2) /* exposed to userspace */ 43#define MEM_OFFLINE (1<<2) /* exposed to userspace */
44#define MEM_GOING_ONLINE (1<<3)
45#define MEM_CANCEL_ONLINE (1<<4)
46#define MEM_CANCEL_OFFLINE (1<<5)
44 47
45/* 48struct memory_notify {
46 * All of these states are currently kernel-internal for notifying 49 unsigned long start_pfn;
47 * kernel components and architectures. 50 unsigned long nr_pages;
48 * 51 int status_change_nid;
49 * For MEM_MAPPING_INVALID, all notifier chains with priority >0 52};
50 * are called before pfn_to_page() becomes invalid. The priority=0
51 * entry is reserved for the function that actually makes
52 * pfn_to_page() stop working. Any notifiers that want to be called
53 * after that should have priority <0.
54 */
55#define MEM_MAPPING_INVALID (1<<3)
56 53
57struct notifier_block; 54struct notifier_block;
58struct mem_section; 55struct mem_section;
@@ -69,21 +66,31 @@ static inline int register_memory_notifier(struct notifier_block *nb)
69static inline void unregister_memory_notifier(struct notifier_block *nb) 66static inline void unregister_memory_notifier(struct notifier_block *nb)
70{ 67{
71} 68}
69static inline int memory_notify(unsigned long val, void *v)
70{
71 return 0;
72}
72#else 73#else
74extern int register_memory_notifier(struct notifier_block *nb);
75extern void unregister_memory_notifier(struct notifier_block *nb);
73extern int register_new_memory(struct mem_section *); 76extern int register_new_memory(struct mem_section *);
74extern int unregister_memory_section(struct mem_section *); 77extern int unregister_memory_section(struct mem_section *);
75extern int memory_dev_init(void); 78extern int memory_dev_init(void);
76extern int remove_memory_block(unsigned long, struct mem_section *, int); 79extern int remove_memory_block(unsigned long, struct mem_section *, int);
77 80extern int memory_notify(unsigned long val, void *v);
78#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) 81#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
79 82
80 83
81#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 84#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
82 85
86#ifdef CONFIG_MEMORY_HOTPLUG
83#define hotplug_memory_notifier(fn, pri) { \ 87#define hotplug_memory_notifier(fn, pri) { \
84 static struct notifier_block fn##_mem_nb = \ 88 static struct notifier_block fn##_mem_nb = \
85 { .notifier_call = fn, .priority = pri }; \ 89 { .notifier_call = fn, .priority = pri }; \
86 register_memory_notifier(&fn##_mem_nb); \ 90 register_memory_notifier(&fn##_mem_nb); \
87} 91}
92#else
93#define hotplug_memory_notifier(fn, pri) do { } while (0)
94#endif
88 95
89#endif /* _LINUX_MEMORY_H_ */ 96#endif /* _LINUX_MEMORY_H_ */
diff --git a/include/linux/net.h b/include/linux/net.h
index c136abce7ef6..dd79cdb8c4cf 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -313,6 +313,10 @@ static const struct proto_ops name##_ops = { \
313#define MODULE_ALIAS_NET_PF_PROTO(pf, proto) \ 313#define MODULE_ALIAS_NET_PF_PROTO(pf, proto) \
314 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto)) 314 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto))
315 315
316#define MODULE_ALIAS_NET_PF_PROTO_TYPE(pf, proto, type) \
317 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
318 "-type-" __stringify(type))
319
316#ifdef CONFIG_SYSCTL 320#ifdef CONFIG_SYSCTL
317#include <linux/sysctl.h> 321#include <linux/sysctl.h>
318extern ctl_table net_table[]; 322extern ctl_table net_table[];
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 6f85db3535e2..4a3f54e358e5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -996,7 +996,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
996 * 996 *
997 * Check individual transmit queue of a device with multiple transmit queues. 997 * Check individual transmit queue of a device with multiple transmit queues.
998 */ 998 */
999static inline int netif_subqueue_stopped(const struct net_device *dev, 999static inline int __netif_subqueue_stopped(const struct net_device *dev,
1000 u16 queue_index) 1000 u16 queue_index)
1001{ 1001{
1002#ifdef CONFIG_NETDEVICES_MULTIQUEUE 1002#ifdef CONFIG_NETDEVICES_MULTIQUEUE
@@ -1007,6 +1007,11 @@ static inline int netif_subqueue_stopped(const struct net_device *dev,
1007#endif 1007#endif
1008} 1008}
1009 1009
1010static inline int netif_subqueue_stopped(const struct net_device *dev,
1011 struct sk_buff *skb)
1012{
1013 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1014}
1010 1015
1011/** 1016/**
1012 * netif_wake_subqueue - allow sending packets on subqueue 1017 * netif_wake_subqueue - allow sending packets on subqueue
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 768b93359f90..5d2281f661f7 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -141,6 +141,7 @@ struct pci_dev {
141 unsigned int class; /* 3 bytes: (base,sub,prog-if) */ 141 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
142 u8 revision; /* PCI revision, low byte of class word */ 142 u8 revision; /* PCI revision, low byte of class word */
143 u8 hdr_type; /* PCI header type (`multi' flag masked out) */ 143 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
144 u8 pcie_type; /* PCI-E device/port type */
144 u8 rom_base_reg; /* which config register controls the ROM */ 145 u8 rom_base_reg; /* which config register controls the ROM */
145 u8 pin; /* which interrupt pin this device uses */ 146 u8 pin; /* which interrupt pin this device uses */
146 147
@@ -183,6 +184,7 @@ struct pci_dev {
183 unsigned int msi_enabled:1; 184 unsigned int msi_enabled:1;
184 unsigned int msix_enabled:1; 185 unsigned int msix_enabled:1;
185 unsigned int is_managed:1; 186 unsigned int is_managed:1;
187 unsigned int is_pcie:1;
186 atomic_t enable_cnt; /* pci_enable_device has been called */ 188 atomic_t enable_cnt; /* pci_enable_device has been called */
187 189
188 u32 saved_config_space[16]; /* config space saved at suspend time */ 190 u32 saved_config_space[16]; /* config space saved at suspend time */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index df948b44edad..4e10a074ca56 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1943,6 +1943,7 @@
1943#define PCI_DEVICE_ID_TIGON3_5720 0x1658 1943#define PCI_DEVICE_ID_TIGON3_5720 0x1658
1944#define PCI_DEVICE_ID_TIGON3_5721 0x1659 1944#define PCI_DEVICE_ID_TIGON3_5721 0x1659
1945#define PCI_DEVICE_ID_TIGON3_5722 0x165a 1945#define PCI_DEVICE_ID_TIGON3_5722 0x165a
1946#define PCI_DEVICE_ID_TIGON3_5723 0x165b
1946#define PCI_DEVICE_ID_TIGON3_5705M 0x165d 1947#define PCI_DEVICE_ID_TIGON3_5705M 0x165d
1947#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e 1948#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e
1948#define PCI_DEVICE_ID_TIGON3_5714 0x1668 1949#define PCI_DEVICE_ID_TIGON3_5714 0x1668
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 72bfccd3da22..422eab4958a6 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -28,6 +28,8 @@
28#include <linux/reiserfs_fs_sb.h> 28#include <linux/reiserfs_fs_sb.h>
29#endif 29#endif
30 30
31struct fid;
32
31/* 33/*
32 * include/linux/reiser_fs.h 34 * include/linux/reiser_fs.h
33 * 35 *
@@ -1877,12 +1879,10 @@ void reiserfs_delete_inode(struct inode *inode);
1877int reiserfs_write_inode(struct inode *inode, int); 1879int reiserfs_write_inode(struct inode *inode, int);
1878int reiserfs_get_block(struct inode *inode, sector_t block, 1880int reiserfs_get_block(struct inode *inode, sector_t block,
1879 struct buffer_head *bh_result, int create); 1881 struct buffer_head *bh_result, int create);
1880struct dentry *reiserfs_get_dentry(struct super_block *, void *); 1882struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1881struct dentry *reiserfs_decode_fh(struct super_block *sb, __u32 * data, 1883 int fh_len, int fh_type);
1882 int len, int fhtype, 1884struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
1883 int (*acceptable) (void *contect, 1885 int fh_len, int fh_type);
1884 struct dentry * de),
1885 void *context);
1886int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp, 1886int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
1887 int connectable); 1887 int connectable);
1888 1888
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 2dc7464cce52..42daf5e15265 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -4,47 +4,95 @@
4#include <asm/scatterlist.h> 4#include <asm/scatterlist.h>
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/string.h> 6#include <linux/string.h>
7#include <asm/io.h>
7 8
9/*
10 * Notes on SG table design.
11 *
12 * Architectures must provide an unsigned long page_link field in the
13 * scatterlist struct. We use that to place the page pointer AND encode
14 * information about the sg table as well. The two lower bits are reserved
15 * for this information.
16 *
17 * If bit 0 is set, then the page_link contains a pointer to the next sg
18 * table list. Otherwise the next entry is at sg + 1.
19 *
20 * If bit 1 is set, then this sg entry is the last element in a list.
21 *
22 * See sg_next().
23 *
24 */
25
26#define SG_MAGIC 0x87654321
27
28/**
29 * sg_set_page - Set sg entry to point at given page
30 * @sg: SG entry
31 * @page: The page
32 *
33 * Description:
34 * Use this function to set an sg entry pointing at a page, never assign
35 * the page directly. We encode sg table information in the lower bits
36 * of the page pointer. See sg_page() for looking up the page belonging
37 * to an sg entry.
38 *
39 **/
40static inline void sg_set_page(struct scatterlist *sg, struct page *page)
41{
42 unsigned long page_link = sg->page_link & 0x3;
43
44#ifdef CONFIG_DEBUG_SG
45 BUG_ON(sg->sg_magic != SG_MAGIC);
46#endif
47 sg->page_link = page_link | (unsigned long) page;
48}
49
50#define sg_page(sg) ((struct page *) ((sg)->page_link & ~0x3))
51
52/**
53 * sg_set_buf - Set sg entry to point at given data
54 * @sg: SG entry
55 * @buf: Data
56 * @buflen: Data length
57 *
58 **/
8static inline void sg_set_buf(struct scatterlist *sg, const void *buf, 59static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
9 unsigned int buflen) 60 unsigned int buflen)
10{ 61{
11 sg->page = virt_to_page(buf); 62 sg_set_page(sg, virt_to_page(buf));
12 sg->offset = offset_in_page(buf); 63 sg->offset = offset_in_page(buf);
13 sg->length = buflen; 64 sg->length = buflen;
14} 65}
15 66
16static inline void sg_init_one(struct scatterlist *sg, const void *buf,
17 unsigned int buflen)
18{
19 memset(sg, 0, sizeof(*sg));
20 sg_set_buf(sg, buf, buflen);
21}
22
23/* 67/*
24 * We overload the LSB of the page pointer to indicate whether it's 68 * We overload the LSB of the page pointer to indicate whether it's
25 * a valid sg entry, or whether it points to the start of a new scatterlist. 69 * a valid sg entry, or whether it points to the start of a new scatterlist.
26 * Those low bits are there for everyone! (thanks mason :-) 70 * Those low bits are there for everyone! (thanks mason :-)
27 */ 71 */
28#define sg_is_chain(sg) ((unsigned long) (sg)->page & 0x01) 72#define sg_is_chain(sg) ((sg)->page_link & 0x01)
73#define sg_is_last(sg) ((sg)->page_link & 0x02)
29#define sg_chain_ptr(sg) \ 74#define sg_chain_ptr(sg) \
30 ((struct scatterlist *) ((unsigned long) (sg)->page & ~0x01)) 75 ((struct scatterlist *) ((sg)->page_link & ~0x03))
31 76
32/** 77/**
33 * sg_next - return the next scatterlist entry in a list 78 * sg_next - return the next scatterlist entry in a list
34 * @sg: The current sg entry 79 * @sg: The current sg entry
35 * 80 *
36 * Usually the next entry will be @sg@ + 1, but if this sg element is part 81 * Description:
37 * of a chained scatterlist, it could jump to the start of a new 82 * Usually the next entry will be @sg@ + 1, but if this sg element is part
38 * scatterlist array. 83 * of a chained scatterlist, it could jump to the start of a new
39 * 84 * scatterlist array.
40 * Note that the caller must ensure that there are further entries after
41 * the current entry, this function will NOT return NULL for an end-of-list.
42 * 85 *
43 */ 86 **/
44static inline struct scatterlist *sg_next(struct scatterlist *sg) 87static inline struct scatterlist *sg_next(struct scatterlist *sg)
45{ 88{
46 sg++; 89#ifdef CONFIG_DEBUG_SG
90 BUG_ON(sg->sg_magic != SG_MAGIC);
91#endif
92 if (sg_is_last(sg))
93 return NULL;
47 94
95 sg++;
48 if (unlikely(sg_is_chain(sg))) 96 if (unlikely(sg_is_chain(sg)))
49 sg = sg_chain_ptr(sg); 97 sg = sg_chain_ptr(sg);
50 98
@@ -62,14 +110,15 @@ static inline struct scatterlist *sg_next(struct scatterlist *sg)
62 * @sgl: First entry in the scatterlist 110 * @sgl: First entry in the scatterlist
63 * @nents: Number of entries in the scatterlist 111 * @nents: Number of entries in the scatterlist
64 * 112 *
65 * Should only be used casually, it (currently) scan the entire list 113 * Description:
66 * to get the last entry. 114 * Should only be used casually, it (currently) scan the entire list
115 * to get the last entry.
67 * 116 *
68 * Note that the @sgl@ pointer passed in need not be the first one, 117 * Note that the @sgl@ pointer passed in need not be the first one,
69 * the important bit is that @nents@ denotes the number of entries that 118 * the important bit is that @nents@ denotes the number of entries that
70 * exist from @sgl@. 119 * exist from @sgl@.
71 * 120 *
72 */ 121 **/
73static inline struct scatterlist *sg_last(struct scatterlist *sgl, 122static inline struct scatterlist *sg_last(struct scatterlist *sgl,
74 unsigned int nents) 123 unsigned int nents)
75{ 124{
@@ -83,6 +132,10 @@ static inline struct scatterlist *sg_last(struct scatterlist *sgl,
83 ret = sg; 132 ret = sg;
84 133
85#endif 134#endif
135#ifdef CONFIG_DEBUG_SG
136 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
137 BUG_ON(!sg_is_last(ret));
138#endif
86 return ret; 139 return ret;
87} 140}
88 141
@@ -92,16 +145,111 @@ static inline struct scatterlist *sg_last(struct scatterlist *sgl,
92 * @prv_nents: Number of entries in prv 145 * @prv_nents: Number of entries in prv
93 * @sgl: Second scatterlist 146 * @sgl: Second scatterlist
94 * 147 *
95 * Links @prv@ and @sgl@ together, to form a longer scatterlist. 148 * Description:
149 * Links @prv@ and @sgl@ together, to form a longer scatterlist.
96 * 150 *
97 */ 151 **/
98static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, 152static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
99 struct scatterlist *sgl) 153 struct scatterlist *sgl)
100{ 154{
101#ifndef ARCH_HAS_SG_CHAIN 155#ifndef ARCH_HAS_SG_CHAIN
102 BUG(); 156 BUG();
103#endif 157#endif
104 prv[prv_nents - 1].page = (struct page *) ((unsigned long) sgl | 0x01); 158 prv[prv_nents - 1].page_link = (unsigned long) sgl | 0x01;
159}
160
161/**
162 * sg_mark_end - Mark the end of the scatterlist
163 * @sgl: Scatterlist
164 * @nents: Number of entries in sgl
165 *
166 * Description:
167 * Marks the last entry as the termination point for sg_next()
168 *
169 **/
170static inline void sg_mark_end(struct scatterlist *sgl, unsigned int nents)
171{
172 sgl[nents - 1].page_link = 0x02;
173}
174
175static inline void __sg_mark_end(struct scatterlist *sg)
176{
177 sg->page_link |= 0x02;
178}
179
180/**
181 * sg_init_one - Initialize a single entry sg list
182 * @sg: SG entry
183 * @buf: Virtual address for IO
184 * @buflen: IO length
185 *
186 * Notes:
187 * This should not be used on a single entry that is part of a larger
188 * table. Use sg_init_table() for that.
189 *
190 **/
191static inline void sg_init_one(struct scatterlist *sg, const void *buf,
192 unsigned int buflen)
193{
194 memset(sg, 0, sizeof(*sg));
195#ifdef CONFIG_DEBUG_SG
196 sg->sg_magic = SG_MAGIC;
197#endif
198 sg_mark_end(sg, 1);
199 sg_set_buf(sg, buf, buflen);
200}
201
202/**
203 * sg_init_table - Initialize SG table
204 * @sgl: The SG table
205 * @nents: Number of entries in table
206 *
207 * Notes:
208 * If this is part of a chained sg table, sg_mark_end() should be
209 * used only on the last table part.
210 *
211 **/
212static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
213{
214 memset(sgl, 0, sizeof(*sgl) * nents);
215 sg_mark_end(sgl, nents);
216#ifdef CONFIG_DEBUG_SG
217 {
218 int i;
219 for (i = 0; i < nents; i++)
220 sgl[i].sg_magic = SG_MAGIC;
221 }
222#endif
223}
224
225/**
226 * sg_phys - Return physical address of an sg entry
227 * @sg: SG entry
228 *
229 * Description:
230 * This calls page_to_phys() on the page in this sg entry, and adds the
231 * sg offset. The caller must know that it is legal to call page_to_phys()
232 * on the sg page.
233 *
234 **/
235static inline unsigned long sg_phys(struct scatterlist *sg)
236{
237 return page_to_phys(sg_page(sg)) + sg->offset;
238}
239
240/**
241 * sg_virt - Return virtual address of an sg entry
242 * @sg: SG entry
243 *
244 * Description:
245 * This calls page_address() on the page in this sg entry, and adds the
246 * sg offset. The caller must know that the sg page has a valid virtual
247 * mapping.
248 *
249 **/
250static inline void *sg_virt(struct scatterlist *sg)
251{
252 return page_address(sg_page(sg)) + sg->offset;
105} 253}
106 254
107#endif /* _LINUX_SCATTERLIST_H */ 255#endif /* _LINUX_SCATTERLIST_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f93f22b3d2ff..fd4e12f24270 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -41,8 +41,7 @@
41#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 41#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
42 ~(SMP_CACHE_BYTES - 1)) 42 ~(SMP_CACHE_BYTES - 1))
43#define SKB_WITH_OVERHEAD(X) \ 43#define SKB_WITH_OVERHEAD(X) \
44 (((X) - sizeof(struct skb_shared_info)) & \ 44 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
45 ~(SMP_CACHE_BYTES - 1))
46#define SKB_MAX_ORDER(X, ORDER) \ 45#define SKB_MAX_ORDER(X, ORDER) \
47 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) 46 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
48#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 47#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
@@ -301,8 +300,9 @@ struct sk_buff {
301#endif 300#endif
302 301
303 int iif; 302 int iif;
303#ifdef CONFIG_NETDEVICES_MULTIQUEUE
304 __u16 queue_mapping; 304 __u16 queue_mapping;
305 305#endif
306#ifdef CONFIG_NET_SCHED 306#ifdef CONFIG_NET_SCHED
307 __u16 tc_index; /* traffic control index */ 307 __u16 tc_index; /* traffic control index */
308#ifdef CONFIG_NET_CLS_ACT 308#ifdef CONFIG_NET_CLS_ACT
@@ -1770,6 +1770,15 @@ static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
1770#endif 1770#endif
1771} 1771}
1772 1772
1773static inline u16 skb_get_queue_mapping(struct sk_buff *skb)
1774{
1775#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1776 return skb->queue_mapping;
1777#else
1778 return 0;
1779#endif
1780}
1781
1773static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 1782static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
1774{ 1783{
1775#ifdef CONFIG_NETDEVICES_MULTIQUEUE 1784#ifdef CONFIG_NETDEVICES_MULTIQUEUE
diff --git a/include/linux/socket.h b/include/linux/socket.h
index f852e1afd65a..c22ef1c1afb8 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -291,6 +291,7 @@ struct ucred {
291#define SOL_TIPC 271 291#define SOL_TIPC 271
292#define SOL_RXRPC 272 292#define SOL_RXRPC 272
293#define SOL_PPPOL2TP 273 293#define SOL_PPPOL2TP 273
294#define SOL_BLUETOOTH 274
294 295
295/* IPX options */ 296/* IPX options */
296#define IPX_TYPE 1 297#define IPX_TYPE 1
diff --git a/include/linux/videodev.h b/include/linux/videodev.h
index 8dba97a291f6..52e3d5fd5be4 100644
--- a/include/linux/videodev.h
+++ b/include/linux/videodev.h
@@ -294,48 +294,6 @@ struct video_code
294#define VID_PLAY_RESET 13 294#define VID_PLAY_RESET 13
295#define VID_PLAY_END_MARK 14 295#define VID_PLAY_END_MARK 14
296 296
297
298
299#define VID_HARDWARE_BT848 1
300#define VID_HARDWARE_QCAM_BW 2
301#define VID_HARDWARE_PMS 3
302#define VID_HARDWARE_QCAM_C 4
303#define VID_HARDWARE_PSEUDO 5
304#define VID_HARDWARE_SAA5249 6
305#define VID_HARDWARE_AZTECH 7
306#define VID_HARDWARE_SF16MI 8
307#define VID_HARDWARE_RTRACK 9
308#define VID_HARDWARE_ZOLTRIX 10
309#define VID_HARDWARE_SAA7146 11
310#define VID_HARDWARE_VIDEUM 12 /* Reserved for Winnov videum */
311#define VID_HARDWARE_RTRACK2 13
312#define VID_HARDWARE_PERMEDIA2 14 /* Reserved for Permedia2 */
313#define VID_HARDWARE_RIVA128 15 /* Reserved for RIVA 128 */
314#define VID_HARDWARE_PLANB 16 /* PowerMac motherboard video-in */
315#define VID_HARDWARE_BROADWAY 17 /* Broadway project */
316#define VID_HARDWARE_GEMTEK 18
317#define VID_HARDWARE_TYPHOON 19
318#define VID_HARDWARE_VINO 20 /* SGI Indy Vino */
319#define VID_HARDWARE_CADET 21 /* Cadet radio */
320#define VID_HARDWARE_TRUST 22 /* Trust FM Radio */
321#define VID_HARDWARE_TERRATEC 23 /* TerraTec ActiveRadio */
322#define VID_HARDWARE_CPIA 24
323#define VID_HARDWARE_ZR36120 25 /* Zoran ZR36120/ZR36125 */
324#define VID_HARDWARE_ZR36067 26 /* Zoran ZR36067/36060 */
325#define VID_HARDWARE_OV511 27
326#define VID_HARDWARE_ZR356700 28 /* Zoran 36700 series */
327#define VID_HARDWARE_W9966 29
328#define VID_HARDWARE_SE401 30 /* SE401 USB webcams */
329#define VID_HARDWARE_PWC 31 /* Philips webcams */
330#define VID_HARDWARE_MEYE 32 /* Sony Vaio MotionEye cameras */
331#define VID_HARDWARE_CPIA2 33
332#define VID_HARDWARE_VICAM 34
333#define VID_HARDWARE_SF16FMR2 35
334#define VID_HARDWARE_W9968CF 36
335#define VID_HARDWARE_SAA7114H 37
336#define VID_HARDWARE_SN9C102 38
337#define VID_HARDWARE_ARV 39
338
339#endif /* CONFIG_VIDEO_V4L1_COMPAT */ 297#endif /* CONFIG_VIDEO_V4L1_COMPAT */
340 298
341#endif /* __LINUX_VIDEODEV_H */ 299#endif /* __LINUX_VIDEODEV_H */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 1f503e94eff1..439474f24e34 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -441,94 +441,6 @@ struct v4l2_timecode
441#define V4L2_TC_USERBITS_8BITCHARS 0x0008 441#define V4L2_TC_USERBITS_8BITCHARS 0x0008
442/* The above is based on SMPTE timecodes */ 442/* The above is based on SMPTE timecodes */
443 443
444#ifdef __KERNEL__
445/*
446 * M P E G C O M P R E S S I O N P A R A M E T E R S
447 *
448 * ### WARNING: This experimental MPEG compression API is obsolete.
449 * ### It is replaced by the MPEG controls API.
450 * ### This old API will disappear in the near future!
451 *
452 */
453enum v4l2_bitrate_mode {
454 V4L2_BITRATE_NONE = 0, /* not specified */
455 V4L2_BITRATE_CBR, /* constant bitrate */
456 V4L2_BITRATE_VBR, /* variable bitrate */
457};
458struct v4l2_bitrate {
459 /* rates are specified in kbit/sec */
460 enum v4l2_bitrate_mode mode;
461 __u32 min;
462 __u32 target; /* use this one for CBR */
463 __u32 max;
464};
465
466enum v4l2_mpeg_streamtype {
467 V4L2_MPEG_SS_1, /* MPEG-1 system stream */
468 V4L2_MPEG_PS_2, /* MPEG-2 program stream */
469 V4L2_MPEG_TS_2, /* MPEG-2 transport stream */
470 V4L2_MPEG_PS_DVD, /* MPEG-2 program stream with DVD header fixups */
471};
472enum v4l2_mpeg_audiotype {
473 V4L2_MPEG_AU_2_I, /* MPEG-2 layer 1 */
474 V4L2_MPEG_AU_2_II, /* MPEG-2 layer 2 */
475 V4L2_MPEG_AU_2_III, /* MPEG-2 layer 3 */
476 V4L2_MPEG_AC3, /* AC3 */
477 V4L2_MPEG_LPCM, /* LPCM */
478};
479enum v4l2_mpeg_videotype {
480 V4L2_MPEG_VI_1, /* MPEG-1 */
481 V4L2_MPEG_VI_2, /* MPEG-2 */
482};
483enum v4l2_mpeg_aspectratio {
484 V4L2_MPEG_ASPECT_SQUARE = 1, /* square pixel */
485 V4L2_MPEG_ASPECT_4_3 = 2, /* 4 : 3 */
486 V4L2_MPEG_ASPECT_16_9 = 3, /* 16 : 9 */
487 V4L2_MPEG_ASPECT_1_221 = 4, /* 1 : 2,21 */
488};
489
490struct v4l2_mpeg_compression {
491 /* general */
492 enum v4l2_mpeg_streamtype st_type;
493 struct v4l2_bitrate st_bitrate;
494
495 /* transport streams */
496 __u16 ts_pid_pmt;
497 __u16 ts_pid_audio;
498 __u16 ts_pid_video;
499 __u16 ts_pid_pcr;
500
501 /* program stream */
502 __u16 ps_size;
503 __u16 reserved_1; /* align */
504
505 /* audio */
506 enum v4l2_mpeg_audiotype au_type;
507 struct v4l2_bitrate au_bitrate;
508 __u32 au_sample_rate;
509 __u8 au_pesid;
510 __u8 reserved_2[3]; /* align */
511
512 /* video */
513 enum v4l2_mpeg_videotype vi_type;
514 enum v4l2_mpeg_aspectratio vi_aspect_ratio;
515 struct v4l2_bitrate vi_bitrate;
516 __u32 vi_frame_rate;
517 __u16 vi_frames_per_gop;
518 __u16 vi_bframes_count;
519 __u8 vi_pesid;
520 __u8 reserved_3[3]; /* align */
521
522 /* misc flags */
523 __u32 closed_gops:1;
524 __u32 pulldown:1;
525 __u32 reserved_4:30; /* align */
526
527 /* I don't expect the above being perfect yet ;) */
528 __u32 reserved_5[8];
529};
530#endif
531
532struct v4l2_jpegcompression 444struct v4l2_jpegcompression
533{ 445{
534 int quality; 446 int quality;
@@ -1420,10 +1332,6 @@ struct v4l2_chip_ident {
1420#define VIDIOC_ENUM_FMT _IOWR ('V', 2, struct v4l2_fmtdesc) 1332#define VIDIOC_ENUM_FMT _IOWR ('V', 2, struct v4l2_fmtdesc)
1421#define VIDIOC_G_FMT _IOWR ('V', 4, struct v4l2_format) 1333#define VIDIOC_G_FMT _IOWR ('V', 4, struct v4l2_format)
1422#define VIDIOC_S_FMT _IOWR ('V', 5, struct v4l2_format) 1334#define VIDIOC_S_FMT _IOWR ('V', 5, struct v4l2_format)
1423#ifdef __KERNEL__
1424#define VIDIOC_G_MPEGCOMP _IOR ('V', 6, struct v4l2_mpeg_compression)
1425#define VIDIOC_S_MPEGCOMP _IOW ('V', 7, struct v4l2_mpeg_compression)
1426#endif
1427#define VIDIOC_REQBUFS _IOWR ('V', 8, struct v4l2_requestbuffers) 1335#define VIDIOC_REQBUFS _IOWR ('V', 8, struct v4l2_requestbuffers)
1428#define VIDIOC_QUERYBUF _IOWR ('V', 9, struct v4l2_buffer) 1336#define VIDIOC_QUERYBUF _IOWR ('V', 9, struct v4l2_buffer)
1429#define VIDIOC_G_FBUF _IOR ('V', 10, struct v4l2_framebuffer) 1337#define VIDIOC_G_FBUF _IOR ('V', 10, struct v4l2_framebuffer)
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index e75d5e6c4cea..c544c6f90893 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -94,7 +94,6 @@ struct video_device
94 char name[32]; 94 char name[32];
95 int type; /* v4l1 */ 95 int type; /* v4l1 */
96 int type2; /* v4l2 */ 96 int type2; /* v4l2 */
97 int hardware;
98 int minor; 97 int minor;
99 98
100 int debug; /* Activates debug level*/ 99 int debug; /* Activates debug level*/
@@ -272,10 +271,6 @@ struct video_device
272 int (*vidioc_s_crop) (struct file *file, void *fh, 271 int (*vidioc_s_crop) (struct file *file, void *fh,
273 struct v4l2_crop *a); 272 struct v4l2_crop *a);
274 /* Compression ioctls */ 273 /* Compression ioctls */
275 int (*vidioc_g_mpegcomp) (struct file *file, void *fh,
276 struct v4l2_mpeg_compression *a);
277 int (*vidioc_s_mpegcomp) (struct file *file, void *fh,
278 struct v4l2_mpeg_compression *a);
279 int (*vidioc_g_jpegcomp) (struct file *file, void *fh, 274 int (*vidioc_g_jpegcomp) (struct file *file, void *fh,
280 struct v4l2_jpegcompression *a); 275 struct v4l2_jpegcompression *a);
281 int (*vidioc_s_jpegcomp) (struct file *file, void *fh, 276 int (*vidioc_s_jpegcomp) (struct file *file, void *fh,
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index ebfb96b41106..a8a9eb6af966 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -200,119 +200,18 @@ enum {
200#define HCI_LM_SECURE 0x0020 200#define HCI_LM_SECURE 0x0020
201 201
202/* ----- HCI Commands ---- */ 202/* ----- HCI Commands ---- */
203/* OGF & OCF values */ 203#define HCI_OP_INQUIRY 0x0401
204 204struct hci_cp_inquiry {
205/* Informational Parameters */ 205 __u8 lap[3];
206#define OGF_INFO_PARAM 0x04 206 __u8 length;
207 207 __u8 num_rsp;
208#define OCF_READ_LOCAL_VERSION 0x0001
209struct hci_rp_read_loc_version {
210 __u8 status;
211 __u8 hci_ver;
212 __le16 hci_rev;
213 __u8 lmp_ver;
214 __le16 manufacturer;
215 __le16 lmp_subver;
216} __attribute__ ((packed));
217
218#define OCF_READ_LOCAL_FEATURES 0x0003
219struct hci_rp_read_local_features {
220 __u8 status;
221 __u8 features[8];
222} __attribute__ ((packed));
223
224#define OCF_READ_BUFFER_SIZE 0x0005
225struct hci_rp_read_buffer_size {
226 __u8 status;
227 __le16 acl_mtu;
228 __u8 sco_mtu;
229 __le16 acl_max_pkt;
230 __le16 sco_max_pkt;
231} __attribute__ ((packed));
232
233#define OCF_READ_BD_ADDR 0x0009
234struct hci_rp_read_bd_addr {
235 __u8 status;
236 bdaddr_t bdaddr;
237} __attribute__ ((packed));
238
239/* Host Controller and Baseband */
240#define OGF_HOST_CTL 0x03
241#define OCF_RESET 0x0003
242#define OCF_READ_AUTH_ENABLE 0x001F
243#define OCF_WRITE_AUTH_ENABLE 0x0020
244 #define AUTH_DISABLED 0x00
245 #define AUTH_ENABLED 0x01
246
247#define OCF_READ_ENCRYPT_MODE 0x0021
248#define OCF_WRITE_ENCRYPT_MODE 0x0022
249 #define ENCRYPT_DISABLED 0x00
250 #define ENCRYPT_P2P 0x01
251 #define ENCRYPT_BOTH 0x02
252
253#define OCF_WRITE_CA_TIMEOUT 0x0016
254#define OCF_WRITE_PG_TIMEOUT 0x0018
255
256#define OCF_WRITE_SCAN_ENABLE 0x001A
257 #define SCAN_DISABLED 0x00
258 #define SCAN_INQUIRY 0x01
259 #define SCAN_PAGE 0x02
260
261#define OCF_SET_EVENT_FLT 0x0005
262struct hci_cp_set_event_flt {
263 __u8 flt_type;
264 __u8 cond_type;
265 __u8 condition[0];
266} __attribute__ ((packed));
267
268/* Filter types */
269#define HCI_FLT_CLEAR_ALL 0x00
270#define HCI_FLT_INQ_RESULT 0x01
271#define HCI_FLT_CONN_SETUP 0x02
272
273/* CONN_SETUP Condition types */
274#define HCI_CONN_SETUP_ALLOW_ALL 0x00
275#define HCI_CONN_SETUP_ALLOW_CLASS 0x01
276#define HCI_CONN_SETUP_ALLOW_BDADDR 0x02
277
278/* CONN_SETUP Conditions */
279#define HCI_CONN_SETUP_AUTO_OFF 0x01
280#define HCI_CONN_SETUP_AUTO_ON 0x02
281
282#define OCF_READ_CLASS_OF_DEV 0x0023
283struct hci_rp_read_dev_class {
284 __u8 status;
285 __u8 dev_class[3];
286} __attribute__ ((packed));
287
288#define OCF_WRITE_CLASS_OF_DEV 0x0024
289struct hci_cp_write_dev_class {
290 __u8 dev_class[3];
291} __attribute__ ((packed));
292
293#define OCF_READ_VOICE_SETTING 0x0025
294struct hci_rp_read_voice_setting {
295 __u8 status;
296 __le16 voice_setting;
297} __attribute__ ((packed)); 208} __attribute__ ((packed));
298 209
299#define OCF_WRITE_VOICE_SETTING 0x0026 210#define HCI_OP_INQUIRY_CANCEL 0x0402
300struct hci_cp_write_voice_setting {
301 __le16 voice_setting;
302} __attribute__ ((packed));
303 211
304#define OCF_HOST_BUFFER_SIZE 0x0033 212#define HCI_OP_EXIT_PERIODIC_INQ 0x0404
305struct hci_cp_host_buffer_size {
306 __le16 acl_mtu;
307 __u8 sco_mtu;
308 __le16 acl_max_pkt;
309 __le16 sco_max_pkt;
310} __attribute__ ((packed));
311
312/* Link Control */
313#define OGF_LINK_CTL 0x01
314 213
315#define OCF_CREATE_CONN 0x0005 214#define HCI_OP_CREATE_CONN 0x0405
316struct hci_cp_create_conn { 215struct hci_cp_create_conn {
317 bdaddr_t bdaddr; 216 bdaddr_t bdaddr;
318 __le16 pkt_type; 217 __le16 pkt_type;
@@ -322,105 +221,138 @@ struct hci_cp_create_conn {
322 __u8 role_switch; 221 __u8 role_switch;
323} __attribute__ ((packed)); 222} __attribute__ ((packed));
324 223
325#define OCF_CREATE_CONN_CANCEL 0x0008 224#define HCI_OP_DISCONNECT 0x0406
326struct hci_cp_create_conn_cancel {
327 bdaddr_t bdaddr;
328} __attribute__ ((packed));
329
330#define OCF_ACCEPT_CONN_REQ 0x0009
331struct hci_cp_accept_conn_req {
332 bdaddr_t bdaddr;
333 __u8 role;
334} __attribute__ ((packed));
335
336#define OCF_REJECT_CONN_REQ 0x000a
337struct hci_cp_reject_conn_req {
338 bdaddr_t bdaddr;
339 __u8 reason;
340} __attribute__ ((packed));
341
342#define OCF_DISCONNECT 0x0006
343struct hci_cp_disconnect { 225struct hci_cp_disconnect {
344 __le16 handle; 226 __le16 handle;
345 __u8 reason; 227 __u8 reason;
346} __attribute__ ((packed)); 228} __attribute__ ((packed));
347 229
348#define OCF_ADD_SCO 0x0007 230#define HCI_OP_ADD_SCO 0x0407
349struct hci_cp_add_sco { 231struct hci_cp_add_sco {
350 __le16 handle; 232 __le16 handle;
351 __le16 pkt_type; 233 __le16 pkt_type;
352} __attribute__ ((packed)); 234} __attribute__ ((packed));
353 235
354#define OCF_INQUIRY 0x0001 236#define HCI_OP_CREATE_CONN_CANCEL 0x0408
355struct hci_cp_inquiry { 237struct hci_cp_create_conn_cancel {
356 __u8 lap[3]; 238 bdaddr_t bdaddr;
357 __u8 length;
358 __u8 num_rsp;
359} __attribute__ ((packed)); 239} __attribute__ ((packed));
360 240
361#define OCF_INQUIRY_CANCEL 0x0002 241#define HCI_OP_ACCEPT_CONN_REQ 0x0409
242struct hci_cp_accept_conn_req {
243 bdaddr_t bdaddr;
244 __u8 role;
245} __attribute__ ((packed));
362 246
363#define OCF_EXIT_PERIODIC_INQ 0x0004 247#define HCI_OP_REJECT_CONN_REQ 0x040a
248struct hci_cp_reject_conn_req {
249 bdaddr_t bdaddr;
250 __u8 reason;
251} __attribute__ ((packed));
364 252
365#define OCF_LINK_KEY_REPLY 0x000B 253#define HCI_OP_LINK_KEY_REPLY 0x040b
366struct hci_cp_link_key_reply { 254struct hci_cp_link_key_reply {
367 bdaddr_t bdaddr; 255 bdaddr_t bdaddr;
368 __u8 link_key[16]; 256 __u8 link_key[16];
369} __attribute__ ((packed)); 257} __attribute__ ((packed));
370 258
371#define OCF_LINK_KEY_NEG_REPLY 0x000C 259#define HCI_OP_LINK_KEY_NEG_REPLY 0x040c
372struct hci_cp_link_key_neg_reply { 260struct hci_cp_link_key_neg_reply {
373 bdaddr_t bdaddr; 261 bdaddr_t bdaddr;
374} __attribute__ ((packed)); 262} __attribute__ ((packed));
375 263
376#define OCF_PIN_CODE_REPLY 0x000D 264#define HCI_OP_PIN_CODE_REPLY 0x040d
377struct hci_cp_pin_code_reply { 265struct hci_cp_pin_code_reply {
378 bdaddr_t bdaddr; 266 bdaddr_t bdaddr;
379 __u8 pin_len; 267 __u8 pin_len;
380 __u8 pin_code[16]; 268 __u8 pin_code[16];
381} __attribute__ ((packed)); 269} __attribute__ ((packed));
382 270
383#define OCF_PIN_CODE_NEG_REPLY 0x000E 271#define HCI_OP_PIN_CODE_NEG_REPLY 0x040e
384struct hci_cp_pin_code_neg_reply { 272struct hci_cp_pin_code_neg_reply {
385 bdaddr_t bdaddr; 273 bdaddr_t bdaddr;
386} __attribute__ ((packed)); 274} __attribute__ ((packed));
387 275
388#define OCF_CHANGE_CONN_PTYPE 0x000F 276#define HCI_OP_CHANGE_CONN_PTYPE 0x040f
389struct hci_cp_change_conn_ptype { 277struct hci_cp_change_conn_ptype {
390 __le16 handle; 278 __le16 handle;
391 __le16 pkt_type; 279 __le16 pkt_type;
392} __attribute__ ((packed)); 280} __attribute__ ((packed));
393 281
394#define OCF_AUTH_REQUESTED 0x0011 282#define HCI_OP_AUTH_REQUESTED 0x0411
395struct hci_cp_auth_requested { 283struct hci_cp_auth_requested {
396 __le16 handle; 284 __le16 handle;
397} __attribute__ ((packed)); 285} __attribute__ ((packed));
398 286
399#define OCF_SET_CONN_ENCRYPT 0x0013 287#define HCI_OP_SET_CONN_ENCRYPT 0x0413
400struct hci_cp_set_conn_encrypt { 288struct hci_cp_set_conn_encrypt {
401 __le16 handle; 289 __le16 handle;
402 __u8 encrypt; 290 __u8 encrypt;
403} __attribute__ ((packed)); 291} __attribute__ ((packed));
404 292
405#define OCF_CHANGE_CONN_LINK_KEY 0x0015 293#define HCI_OP_CHANGE_CONN_LINK_KEY 0x0415
406struct hci_cp_change_conn_link_key { 294struct hci_cp_change_conn_link_key {
407 __le16 handle; 295 __le16 handle;
408} __attribute__ ((packed)); 296} __attribute__ ((packed));
409 297
410#define OCF_READ_REMOTE_FEATURES 0x001B 298#define HCI_OP_REMOTE_NAME_REQ 0x0419
299struct hci_cp_remote_name_req {
300 bdaddr_t bdaddr;
301 __u8 pscan_rep_mode;
302 __u8 pscan_mode;
303 __le16 clock_offset;
304} __attribute__ ((packed));
305
306#define HCI_OP_REMOTE_NAME_REQ_CANCEL 0x041a
307struct hci_cp_remote_name_req_cancel {
308 bdaddr_t bdaddr;
309} __attribute__ ((packed));
310
311#define HCI_OP_READ_REMOTE_FEATURES 0x041b
411struct hci_cp_read_remote_features { 312struct hci_cp_read_remote_features {
412 __le16 handle; 313 __le16 handle;
413} __attribute__ ((packed)); 314} __attribute__ ((packed));
414 315
415#define OCF_READ_REMOTE_VERSION 0x001D 316#define HCI_OP_READ_REMOTE_EXT_FEATURES 0x041c
317struct hci_cp_read_remote_ext_features {
318 __le16 handle;
319 __u8 page;
320} __attribute__ ((packed));
321
322#define HCI_OP_READ_REMOTE_VERSION 0x041d
416struct hci_cp_read_remote_version { 323struct hci_cp_read_remote_version {
417 __le16 handle; 324 __le16 handle;
418} __attribute__ ((packed)); 325} __attribute__ ((packed));
419 326
420/* Link Policy */ 327#define HCI_OP_SETUP_SYNC_CONN 0x0428
421#define OGF_LINK_POLICY 0x02 328struct hci_cp_setup_sync_conn {
329 __le16 handle;
330 __le32 tx_bandwidth;
331 __le32 rx_bandwidth;
332 __le16 max_latency;
333 __le16 voice_setting;
334 __u8 retrans_effort;
335 __le16 pkt_type;
336} __attribute__ ((packed));
422 337
423#define OCF_SNIFF_MODE 0x0003 338#define HCI_OP_ACCEPT_SYNC_CONN_REQ 0x0429
339struct hci_cp_accept_sync_conn_req {
340 bdaddr_t bdaddr;
341 __le32 tx_bandwidth;
342 __le32 rx_bandwidth;
343 __le16 max_latency;
344 __le16 content_format;
345 __u8 retrans_effort;
346 __le16 pkt_type;
347} __attribute__ ((packed));
348
349#define HCI_OP_REJECT_SYNC_CONN_REQ 0x042a
350struct hci_cp_reject_sync_conn_req {
351 bdaddr_t bdaddr;
352 __u8 reason;
353} __attribute__ ((packed));
354
355#define HCI_OP_SNIFF_MODE 0x0803
424struct hci_cp_sniff_mode { 356struct hci_cp_sniff_mode {
425 __le16 handle; 357 __le16 handle;
426 __le16 max_interval; 358 __le16 max_interval;
@@ -429,12 +361,12 @@ struct hci_cp_sniff_mode {
429 __le16 timeout; 361 __le16 timeout;
430} __attribute__ ((packed)); 362} __attribute__ ((packed));
431 363
432#define OCF_EXIT_SNIFF_MODE 0x0004 364#define HCI_OP_EXIT_SNIFF_MODE 0x0804
433struct hci_cp_exit_sniff_mode { 365struct hci_cp_exit_sniff_mode {
434 __le16 handle; 366 __le16 handle;
435} __attribute__ ((packed)); 367} __attribute__ ((packed));
436 368
437#define OCF_ROLE_DISCOVERY 0x0009 369#define HCI_OP_ROLE_DISCOVERY 0x0809
438struct hci_cp_role_discovery { 370struct hci_cp_role_discovery {
439 __le16 handle; 371 __le16 handle;
440} __attribute__ ((packed)); 372} __attribute__ ((packed));
@@ -444,7 +376,13 @@ struct hci_rp_role_discovery {
444 __u8 role; 376 __u8 role;
445} __attribute__ ((packed)); 377} __attribute__ ((packed));
446 378
447#define OCF_READ_LINK_POLICY 0x000C 379#define HCI_OP_SWITCH_ROLE 0x080b
380struct hci_cp_switch_role {
381 bdaddr_t bdaddr;
382 __u8 role;
383} __attribute__ ((packed));
384
385#define HCI_OP_READ_LINK_POLICY 0x080c
448struct hci_cp_read_link_policy { 386struct hci_cp_read_link_policy {
449 __le16 handle; 387 __le16 handle;
450} __attribute__ ((packed)); 388} __attribute__ ((packed));
@@ -454,13 +392,7 @@ struct hci_rp_read_link_policy {
454 __le16 policy; 392 __le16 policy;
455} __attribute__ ((packed)); 393} __attribute__ ((packed));
456 394
457#define OCF_SWITCH_ROLE 0x000B 395#define HCI_OP_WRITE_LINK_POLICY 0x080d
458struct hci_cp_switch_role {
459 bdaddr_t bdaddr;
460 __u8 role;
461} __attribute__ ((packed));
462
463#define OCF_WRITE_LINK_POLICY 0x000D
464struct hci_cp_write_link_policy { 396struct hci_cp_write_link_policy {
465 __le16 handle; 397 __le16 handle;
466 __le16 policy; 398 __le16 policy;
@@ -470,7 +402,7 @@ struct hci_rp_write_link_policy {
470 __le16 handle; 402 __le16 handle;
471} __attribute__ ((packed)); 403} __attribute__ ((packed));
472 404
473#define OCF_SNIFF_SUBRATE 0x0011 405#define HCI_OP_SNIFF_SUBRATE 0x0811
474struct hci_cp_sniff_subrate { 406struct hci_cp_sniff_subrate {
475 __le16 handle; 407 __le16 handle;
476 __le16 max_latency; 408 __le16 max_latency;
@@ -478,59 +410,156 @@ struct hci_cp_sniff_subrate {
478 __le16 min_local_timeout; 410 __le16 min_local_timeout;
479} __attribute__ ((packed)); 411} __attribute__ ((packed));
480 412
481/* Status params */ 413#define HCI_OP_SET_EVENT_MASK 0x0c01
482#define OGF_STATUS_PARAM 0x05 414struct hci_cp_set_event_mask {
415 __u8 mask[8];
416} __attribute__ ((packed));
483 417
484/* Testing commands */ 418#define HCI_OP_RESET 0x0c03
485#define OGF_TESTING_CMD 0x3E
486 419
487/* Vendor specific commands */ 420#define HCI_OP_SET_EVENT_FLT 0x0c05
488#define OGF_VENDOR_CMD 0x3F 421struct hci_cp_set_event_flt {
422 __u8 flt_type;
423 __u8 cond_type;
424 __u8 condition[0];
425} __attribute__ ((packed));
489 426
490/* ---- HCI Events ---- */ 427/* Filter types */
491#define HCI_EV_INQUIRY_COMPLETE 0x01 428#define HCI_FLT_CLEAR_ALL 0x00
429#define HCI_FLT_INQ_RESULT 0x01
430#define HCI_FLT_CONN_SETUP 0x02
492 431
493#define HCI_EV_INQUIRY_RESULT 0x02 432/* CONN_SETUP Condition types */
494struct inquiry_info { 433#define HCI_CONN_SETUP_ALLOW_ALL 0x00
495 bdaddr_t bdaddr; 434#define HCI_CONN_SETUP_ALLOW_CLASS 0x01
496 __u8 pscan_rep_mode; 435#define HCI_CONN_SETUP_ALLOW_BDADDR 0x02
497 __u8 pscan_period_mode; 436
498 __u8 pscan_mode; 437/* CONN_SETUP Conditions */
438#define HCI_CONN_SETUP_AUTO_OFF 0x01
439#define HCI_CONN_SETUP_AUTO_ON 0x02
440
441#define HCI_OP_WRITE_LOCAL_NAME 0x0c13
442struct hci_cp_write_local_name {
443 __u8 name[248];
444} __attribute__ ((packed));
445
446#define HCI_OP_READ_LOCAL_NAME 0x0c14
447struct hci_rp_read_local_name {
448 __u8 status;
449 __u8 name[248];
450} __attribute__ ((packed));
451
452#define HCI_OP_WRITE_CA_TIMEOUT 0x0c16
453
454#define HCI_OP_WRITE_PG_TIMEOUT 0x0c18
455
456#define HCI_OP_WRITE_SCAN_ENABLE 0x0c1a
457 #define SCAN_DISABLED 0x00
458 #define SCAN_INQUIRY 0x01
459 #define SCAN_PAGE 0x02
460
461#define HCI_OP_READ_AUTH_ENABLE 0x0c1f
462
463#define HCI_OP_WRITE_AUTH_ENABLE 0x0c20
464 #define AUTH_DISABLED 0x00
465 #define AUTH_ENABLED 0x01
466
467#define HCI_OP_READ_ENCRYPT_MODE 0x0c21
468
469#define HCI_OP_WRITE_ENCRYPT_MODE 0x0c22
470 #define ENCRYPT_DISABLED 0x00
471 #define ENCRYPT_P2P 0x01
472 #define ENCRYPT_BOTH 0x02
473
474#define HCI_OP_READ_CLASS_OF_DEV 0x0c23
475struct hci_rp_read_class_of_dev {
476 __u8 status;
499 __u8 dev_class[3]; 477 __u8 dev_class[3];
500 __le16 clock_offset;
501} __attribute__ ((packed)); 478} __attribute__ ((packed));
502 479
503#define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22 480#define HCI_OP_WRITE_CLASS_OF_DEV 0x0c24
504struct inquiry_info_with_rssi { 481struct hci_cp_write_class_of_dev {
505 bdaddr_t bdaddr;
506 __u8 pscan_rep_mode;
507 __u8 pscan_period_mode;
508 __u8 dev_class[3]; 482 __u8 dev_class[3];
509 __le16 clock_offset;
510 __s8 rssi;
511} __attribute__ ((packed)); 483} __attribute__ ((packed));
512struct inquiry_info_with_rssi_and_pscan_mode { 484
485#define HCI_OP_READ_VOICE_SETTING 0x0c25
486struct hci_rp_read_voice_setting {
487 __u8 status;
488 __le16 voice_setting;
489} __attribute__ ((packed));
490
491#define HCI_OP_WRITE_VOICE_SETTING 0x0c26
492struct hci_cp_write_voice_setting {
493 __le16 voice_setting;
494} __attribute__ ((packed));
495
496#define HCI_OP_HOST_BUFFER_SIZE 0x0c33
497struct hci_cp_host_buffer_size {
498 __le16 acl_mtu;
499 __u8 sco_mtu;
500 __le16 acl_max_pkt;
501 __le16 sco_max_pkt;
502} __attribute__ ((packed));
503
504#define HCI_OP_READ_LOCAL_VERSION 0x1001
505struct hci_rp_read_local_version {
506 __u8 status;
507 __u8 hci_ver;
508 __le16 hci_rev;
509 __u8 lmp_ver;
510 __le16 manufacturer;
511 __le16 lmp_subver;
512} __attribute__ ((packed));
513
514#define HCI_OP_READ_LOCAL_COMMANDS 0x1002
515struct hci_rp_read_local_commands {
516 __u8 status;
517 __u8 commands[64];
518} __attribute__ ((packed));
519
520#define HCI_OP_READ_LOCAL_FEATURES 0x1003
521struct hci_rp_read_local_features {
522 __u8 status;
523 __u8 features[8];
524} __attribute__ ((packed));
525
526#define HCI_OP_READ_LOCAL_EXT_FEATURES 0x1004
527struct hci_rp_read_local_ext_features {
528 __u8 status;
529 __u8 page;
530 __u8 max_page;
531 __u8 features[8];
532} __attribute__ ((packed));
533
534#define HCI_OP_READ_BUFFER_SIZE 0x1005
535struct hci_rp_read_buffer_size {
536 __u8 status;
537 __le16 acl_mtu;
538 __u8 sco_mtu;
539 __le16 acl_max_pkt;
540 __le16 sco_max_pkt;
541} __attribute__ ((packed));
542
543#define HCI_OP_READ_BD_ADDR 0x1009
544struct hci_rp_read_bd_addr {
545 __u8 status;
513 bdaddr_t bdaddr; 546 bdaddr_t bdaddr;
514 __u8 pscan_rep_mode;
515 __u8 pscan_period_mode;
516 __u8 pscan_mode;
517 __u8 dev_class[3];
518 __le16 clock_offset;
519 __s8 rssi;
520} __attribute__ ((packed)); 547} __attribute__ ((packed));
521 548
522#define HCI_EV_EXTENDED_INQUIRY_RESULT 0x2F 549/* ---- HCI Events ---- */
523struct extended_inquiry_info { 550#define HCI_EV_INQUIRY_COMPLETE 0x01
551
552#define HCI_EV_INQUIRY_RESULT 0x02
553struct inquiry_info {
524 bdaddr_t bdaddr; 554 bdaddr_t bdaddr;
525 __u8 pscan_rep_mode; 555 __u8 pscan_rep_mode;
526 __u8 pscan_period_mode; 556 __u8 pscan_period_mode;
557 __u8 pscan_mode;
527 __u8 dev_class[3]; 558 __u8 dev_class[3];
528 __le16 clock_offset; 559 __le16 clock_offset;
529 __s8 rssi;
530 __u8 data[240];
531} __attribute__ ((packed)); 560} __attribute__ ((packed));
532 561
533#define HCI_EV_CONN_COMPLETE 0x03 562#define HCI_EV_CONN_COMPLETE 0x03
534struct hci_ev_conn_complete { 563struct hci_ev_conn_complete {
535 __u8 status; 564 __u8 status;
536 __le16 handle; 565 __le16 handle;
@@ -539,40 +568,63 @@ struct hci_ev_conn_complete {
539 __u8 encr_mode; 568 __u8 encr_mode;
540} __attribute__ ((packed)); 569} __attribute__ ((packed));
541 570
542#define HCI_EV_CONN_REQUEST 0x04 571#define HCI_EV_CONN_REQUEST 0x04
543struct hci_ev_conn_request { 572struct hci_ev_conn_request {
544 bdaddr_t bdaddr; 573 bdaddr_t bdaddr;
545 __u8 dev_class[3]; 574 __u8 dev_class[3];
546 __u8 link_type; 575 __u8 link_type;
547} __attribute__ ((packed)); 576} __attribute__ ((packed));
548 577
549#define HCI_EV_DISCONN_COMPLETE 0x05 578#define HCI_EV_DISCONN_COMPLETE 0x05
550struct hci_ev_disconn_complete { 579struct hci_ev_disconn_complete {
551 __u8 status; 580 __u8 status;
552 __le16 handle; 581 __le16 handle;
553 __u8 reason; 582 __u8 reason;
554} __attribute__ ((packed)); 583} __attribute__ ((packed));
555 584
556#define HCI_EV_AUTH_COMPLETE 0x06 585#define HCI_EV_AUTH_COMPLETE 0x06
557struct hci_ev_auth_complete { 586struct hci_ev_auth_complete {
558 __u8 status; 587 __u8 status;
559 __le16 handle; 588 __le16 handle;
560} __attribute__ ((packed)); 589} __attribute__ ((packed));
561 590
562#define HCI_EV_ENCRYPT_CHANGE 0x08 591#define HCI_EV_REMOTE_NAME 0x07
592struct hci_ev_remote_name {
593 __u8 status;
594 bdaddr_t bdaddr;
595 __u8 name[248];
596} __attribute__ ((packed));
597
598#define HCI_EV_ENCRYPT_CHANGE 0x08
563struct hci_ev_encrypt_change { 599struct hci_ev_encrypt_change {
564 __u8 status; 600 __u8 status;
565 __le16 handle; 601 __le16 handle;
566 __u8 encrypt; 602 __u8 encrypt;
567} __attribute__ ((packed)); 603} __attribute__ ((packed));
568 604
569#define HCI_EV_CHANGE_CONN_LINK_KEY_COMPLETE 0x09 605#define HCI_EV_CHANGE_LINK_KEY_COMPLETE 0x09
570struct hci_ev_change_conn_link_key_complete { 606struct hci_ev_change_link_key_complete {
607 __u8 status;
608 __le16 handle;
609} __attribute__ ((packed));
610
611#define HCI_EV_REMOTE_FEATURES 0x0b
612struct hci_ev_remote_features {
613 __u8 status;
614 __le16 handle;
615 __u8 features[8];
616} __attribute__ ((packed));
617
618#define HCI_EV_REMOTE_VERSION 0x0c
619struct hci_ev_remote_version {
571 __u8 status; 620 __u8 status;
572 __le16 handle; 621 __le16 handle;
622 __u8 lmp_ver;
623 __le16 manufacturer;
624 __le16 lmp_subver;
573} __attribute__ ((packed)); 625} __attribute__ ((packed));
574 626
575#define HCI_EV_QOS_SETUP_COMPLETE 0x0D 627#define HCI_EV_QOS_SETUP_COMPLETE 0x0d
576struct hci_qos { 628struct hci_qos {
577 __u8 service_type; 629 __u8 service_type;
578 __u32 token_rate; 630 __u32 token_rate;
@@ -586,33 +638,33 @@ struct hci_ev_qos_setup_complete {
586 struct hci_qos qos; 638 struct hci_qos qos;
587} __attribute__ ((packed)); 639} __attribute__ ((packed));
588 640
589#define HCI_EV_CMD_COMPLETE 0x0E 641#define HCI_EV_CMD_COMPLETE 0x0e
590struct hci_ev_cmd_complete { 642struct hci_ev_cmd_complete {
591 __u8 ncmd; 643 __u8 ncmd;
592 __le16 opcode; 644 __le16 opcode;
593} __attribute__ ((packed)); 645} __attribute__ ((packed));
594 646
595#define HCI_EV_CMD_STATUS 0x0F 647#define HCI_EV_CMD_STATUS 0x0f
596struct hci_ev_cmd_status { 648struct hci_ev_cmd_status {
597 __u8 status; 649 __u8 status;
598 __u8 ncmd; 650 __u8 ncmd;
599 __le16 opcode; 651 __le16 opcode;
600} __attribute__ ((packed)); 652} __attribute__ ((packed));
601 653
602#define HCI_EV_NUM_COMP_PKTS 0x13 654#define HCI_EV_ROLE_CHANGE 0x12
603struct hci_ev_num_comp_pkts {
604 __u8 num_hndl;
605 /* variable length part */
606} __attribute__ ((packed));
607
608#define HCI_EV_ROLE_CHANGE 0x12
609struct hci_ev_role_change { 655struct hci_ev_role_change {
610 __u8 status; 656 __u8 status;
611 bdaddr_t bdaddr; 657 bdaddr_t bdaddr;
612 __u8 role; 658 __u8 role;
613} __attribute__ ((packed)); 659} __attribute__ ((packed));
614 660
615#define HCI_EV_MODE_CHANGE 0x14 661#define HCI_EV_NUM_COMP_PKTS 0x13
662struct hci_ev_num_comp_pkts {
663 __u8 num_hndl;
664 /* variable length part */
665} __attribute__ ((packed));
666
667#define HCI_EV_MODE_CHANGE 0x14
616struct hci_ev_mode_change { 668struct hci_ev_mode_change {
617 __u8 status; 669 __u8 status;
618 __le16 handle; 670 __le16 handle;
@@ -620,53 +672,88 @@ struct hci_ev_mode_change {
620 __le16 interval; 672 __le16 interval;
621} __attribute__ ((packed)); 673} __attribute__ ((packed));
622 674
623#define HCI_EV_PIN_CODE_REQ 0x16 675#define HCI_EV_PIN_CODE_REQ 0x16
624struct hci_ev_pin_code_req { 676struct hci_ev_pin_code_req {
625 bdaddr_t bdaddr; 677 bdaddr_t bdaddr;
626} __attribute__ ((packed)); 678} __attribute__ ((packed));
627 679
628#define HCI_EV_LINK_KEY_REQ 0x17 680#define HCI_EV_LINK_KEY_REQ 0x17
629struct hci_ev_link_key_req { 681struct hci_ev_link_key_req {
630 bdaddr_t bdaddr; 682 bdaddr_t bdaddr;
631} __attribute__ ((packed)); 683} __attribute__ ((packed));
632 684
633#define HCI_EV_LINK_KEY_NOTIFY 0x18 685#define HCI_EV_LINK_KEY_NOTIFY 0x18
634struct hci_ev_link_key_notify { 686struct hci_ev_link_key_notify {
635 bdaddr_t bdaddr; 687 bdaddr_t bdaddr;
636 __u8 link_key[16]; 688 __u8 link_key[16];
637 __u8 key_type; 689 __u8 key_type;
638} __attribute__ ((packed)); 690} __attribute__ ((packed));
639 691
640#define HCI_EV_REMOTE_FEATURES 0x0B 692#define HCI_EV_CLOCK_OFFSET 0x1c
641struct hci_ev_remote_features { 693struct hci_ev_clock_offset {
642 __u8 status; 694 __u8 status;
643 __le16 handle; 695 __le16 handle;
644 __u8 features[8]; 696 __le16 clock_offset;
645} __attribute__ ((packed)); 697} __attribute__ ((packed));
646 698
647#define HCI_EV_REMOTE_VERSION 0x0C 699#define HCI_EV_PSCAN_REP_MODE 0x20
648struct hci_ev_remote_version { 700struct hci_ev_pscan_rep_mode {
701 bdaddr_t bdaddr;
702 __u8 pscan_rep_mode;
703} __attribute__ ((packed));
704
705#define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22
706struct inquiry_info_with_rssi {
707 bdaddr_t bdaddr;
708 __u8 pscan_rep_mode;
709 __u8 pscan_period_mode;
710 __u8 dev_class[3];
711 __le16 clock_offset;
712 __s8 rssi;
713} __attribute__ ((packed));
714struct inquiry_info_with_rssi_and_pscan_mode {
715 bdaddr_t bdaddr;
716 __u8 pscan_rep_mode;
717 __u8 pscan_period_mode;
718 __u8 pscan_mode;
719 __u8 dev_class[3];
720 __le16 clock_offset;
721 __s8 rssi;
722} __attribute__ ((packed));
723
724#define HCI_EV_REMOTE_EXT_FEATURES 0x23
725struct hci_ev_remote_ext_features {
649 __u8 status; 726 __u8 status;
650 __le16 handle; 727 __le16 handle;
651 __u8 lmp_ver; 728 __u8 page;
652 __le16 manufacturer; 729 __u8 max_page;
653 __le16 lmp_subver; 730 __u8 features[8];
654} __attribute__ ((packed)); 731} __attribute__ ((packed));
655 732
656#define HCI_EV_CLOCK_OFFSET 0x01C 733#define HCI_EV_SYNC_CONN_COMPLETE 0x2c
657struct hci_ev_clock_offset { 734struct hci_ev_sync_conn_complete {
658 __u8 status; 735 __u8 status;
659 __le16 handle; 736 __le16 handle;
660 __le16 clock_offset; 737 bdaddr_t bdaddr;
738 __u8 link_type;
739 __u8 tx_interval;
740 __u8 retrans_window;
741 __le16 rx_pkt_len;
742 __le16 tx_pkt_len;
743 __u8 air_mode;
661} __attribute__ ((packed)); 744} __attribute__ ((packed));
662 745
663#define HCI_EV_PSCAN_REP_MODE 0x20 746#define HCI_EV_SYNC_CONN_CHANGED 0x2d
664struct hci_ev_pscan_rep_mode { 747struct hci_ev_sync_conn_changed {
665 bdaddr_t bdaddr; 748 __u8 status;
666 __u8 pscan_rep_mode; 749 __le16 handle;
750 __u8 tx_interval;
751 __u8 retrans_window;
752 __le16 rx_pkt_len;
753 __le16 tx_pkt_len;
667} __attribute__ ((packed)); 754} __attribute__ ((packed));
668 755
669#define HCI_EV_SNIFF_SUBRATE 0x2E 756#define HCI_EV_SNIFF_SUBRATE 0x2e
670struct hci_ev_sniff_subrate { 757struct hci_ev_sniff_subrate {
671 __u8 status; 758 __u8 status;
672 __le16 handle; 759 __le16 handle;
@@ -676,14 +763,25 @@ struct hci_ev_sniff_subrate {
676 __le16 max_local_timeout; 763 __le16 max_local_timeout;
677} __attribute__ ((packed)); 764} __attribute__ ((packed));
678 765
766#define HCI_EV_EXTENDED_INQUIRY_RESULT 0x2f
767struct extended_inquiry_info {
768 bdaddr_t bdaddr;
769 __u8 pscan_rep_mode;
770 __u8 pscan_period_mode;
771 __u8 dev_class[3];
772 __le16 clock_offset;
773 __s8 rssi;
774 __u8 data[240];
775} __attribute__ ((packed));
776
679/* Internal events generated by Bluetooth stack */ 777/* Internal events generated by Bluetooth stack */
680#define HCI_EV_STACK_INTERNAL 0xFD 778#define HCI_EV_STACK_INTERNAL 0xfd
681struct hci_ev_stack_internal { 779struct hci_ev_stack_internal {
682 __u16 type; 780 __u16 type;
683 __u8 data[0]; 781 __u8 data[0];
684} __attribute__ ((packed)); 782} __attribute__ ((packed));
685 783
686#define HCI_EV_SI_DEVICE 0x01 784#define HCI_EV_SI_DEVICE 0x01
687struct hci_ev_si_device { 785struct hci_ev_si_device {
688 __u16 event; 786 __u16 event;
689 __u16 dev_id; 787 __u16 dev_id;
@@ -704,40 +802,40 @@ struct hci_ev_si_security {
704#define HCI_SCO_HDR_SIZE 3 802#define HCI_SCO_HDR_SIZE 3
705 803
706struct hci_command_hdr { 804struct hci_command_hdr {
707 __le16 opcode; /* OCF & OGF */ 805 __le16 opcode; /* OCF & OGF */
708 __u8 plen; 806 __u8 plen;
709} __attribute__ ((packed)); 807} __attribute__ ((packed));
710 808
711struct hci_event_hdr { 809struct hci_event_hdr {
712 __u8 evt; 810 __u8 evt;
713 __u8 plen; 811 __u8 plen;
714} __attribute__ ((packed)); 812} __attribute__ ((packed));
715 813
716struct hci_acl_hdr { 814struct hci_acl_hdr {
717 __le16 handle; /* Handle & Flags(PB, BC) */ 815 __le16 handle; /* Handle & Flags(PB, BC) */
718 __le16 dlen; 816 __le16 dlen;
719} __attribute__ ((packed)); 817} __attribute__ ((packed));
720 818
721struct hci_sco_hdr { 819struct hci_sco_hdr {
722 __le16 handle; 820 __le16 handle;
723 __u8 dlen; 821 __u8 dlen;
724} __attribute__ ((packed)); 822} __attribute__ ((packed));
725 823
726#ifdef __KERNEL__ 824#ifdef __KERNEL__
727#include <linux/skbuff.h> 825#include <linux/skbuff.h>
728static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb) 826static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
729{ 827{
730 return (struct hci_event_hdr *)skb->data; 828 return (struct hci_event_hdr *) skb->data;
731} 829}
732 830
733static inline struct hci_acl_hdr *hci_acl_hdr(const struct sk_buff *skb) 831static inline struct hci_acl_hdr *hci_acl_hdr(const struct sk_buff *skb)
734{ 832{
735 return (struct hci_acl_hdr *)skb->data; 833 return (struct hci_acl_hdr *) skb->data;
736} 834}
737 835
738static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb) 836static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
739{ 837{
740 return (struct hci_sco_hdr *)skb->data; 838 return (struct hci_sco_hdr *) skb->data;
741} 839}
742#endif 840#endif
743 841
@@ -771,13 +869,13 @@ struct sockaddr_hci {
771struct hci_filter { 869struct hci_filter {
772 unsigned long type_mask; 870 unsigned long type_mask;
773 unsigned long event_mask[2]; 871 unsigned long event_mask[2];
774 __le16 opcode; 872 __le16 opcode;
775}; 873};
776 874
777struct hci_ufilter { 875struct hci_ufilter {
778 __u32 type_mask; 876 __u32 type_mask;
779 __u32 event_mask[2]; 877 __u32 event_mask[2];
780 __le16 opcode; 878 __le16 opcode;
781}; 879};
782 880
783#define HCI_FLT_TYPE_BITS 31 881#define HCI_FLT_TYPE_BITS 31
@@ -825,15 +923,15 @@ struct hci_dev_info {
825struct hci_conn_info { 923struct hci_conn_info {
826 __u16 handle; 924 __u16 handle;
827 bdaddr_t bdaddr; 925 bdaddr_t bdaddr;
828 __u8 type; 926 __u8 type;
829 __u8 out; 927 __u8 out;
830 __u16 state; 928 __u16 state;
831 __u32 link_mode; 929 __u32 link_mode;
832}; 930};
833 931
834struct hci_dev_req { 932struct hci_dev_req {
835 __u16 dev_id; 933 __u16 dev_id;
836 __u32 dev_opt; 934 __u32 dev_opt;
837}; 935};
838 936
839struct hci_dev_list_req { 937struct hci_dev_list_req {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 8f67c8a7169b..ea13baa3851b 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -71,7 +71,10 @@ struct hci_dev {
71 __u16 id; 71 __u16 id;
72 __u8 type; 72 __u8 type;
73 bdaddr_t bdaddr; 73 bdaddr_t bdaddr;
74 __u8 dev_name[248];
75 __u8 dev_class[3];
74 __u8 features[8]; 76 __u8 features[8];
77 __u8 commands[64];
75 __u8 hci_ver; 78 __u8 hci_ver;
76 __u16 hci_rev; 79 __u16 hci_rev;
77 __u16 manufacturer; 80 __u16 manufacturer;
@@ -310,10 +313,12 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
310void hci_acl_connect(struct hci_conn *conn); 313void hci_acl_connect(struct hci_conn *conn);
311void hci_acl_disconn(struct hci_conn *conn, __u8 reason); 314void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
312void hci_add_sco(struct hci_conn *conn, __u16 handle); 315void hci_add_sco(struct hci_conn *conn, __u16 handle);
316void hci_setup_sync(struct hci_conn *conn, __u16 handle);
313 317
314struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst); 318struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
315int hci_conn_del(struct hci_conn *conn); 319int hci_conn_del(struct hci_conn *conn);
316void hci_conn_hash_flush(struct hci_dev *hdev); 320void hci_conn_hash_flush(struct hci_dev *hdev);
321void hci_conn_check_pending(struct hci_dev *hdev);
317 322
318struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src); 323struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
319int hci_conn_auth(struct hci_conn *conn); 324int hci_conn_auth(struct hci_conn *conn);
@@ -617,11 +622,11 @@ int hci_unregister_cb(struct hci_cb *hcb);
617int hci_register_notifier(struct notifier_block *nb); 622int hci_register_notifier(struct notifier_block *nb);
618int hci_unregister_notifier(struct notifier_block *nb); 623int hci_unregister_notifier(struct notifier_block *nb);
619 624
620int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param); 625int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
621int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags); 626int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
622int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); 627int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
623 628
624void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf); 629void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
625 630
626void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data); 631void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
627 632
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 70e70f5d3dd6..73e115bc12dd 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -29,7 +29,8 @@
29#define L2CAP_DEFAULT_MTU 672 29#define L2CAP_DEFAULT_MTU 672
30#define L2CAP_DEFAULT_FLUSH_TO 0xFFFF 30#define L2CAP_DEFAULT_FLUSH_TO 0xFFFF
31 31
32#define L2CAP_CONN_TIMEOUT (HZ * 40) 32#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */
33#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */
33 34
34/* L2CAP socket address */ 35/* L2CAP socket address */
35struct sockaddr_l2 { 36struct sockaddr_l2 {
@@ -148,6 +149,19 @@ struct l2cap_conf_opt {
148 149
149#define L2CAP_CONF_MAX_SIZE 22 150#define L2CAP_CONF_MAX_SIZE 22
150 151
152struct l2cap_conf_rfc {
153 __u8 mode;
154 __u8 txwin_size;
155 __u8 max_transmit;
156 __le16 retrans_timeout;
157 __le16 monitor_timeout;
158 __le16 max_pdu_size;
159} __attribute__ ((packed));
160
161#define L2CAP_MODE_BASIC 0x00
162#define L2CAP_MODE_RETRANS 0x01
163#define L2CAP_MODE_FLOWCTL 0x02
164
151struct l2cap_disconn_req { 165struct l2cap_disconn_req {
152 __le16 dcid; 166 __le16 dcid;
153 __le16 scid; 167 __le16 scid;
@@ -160,7 +174,6 @@ struct l2cap_disconn_rsp {
160 174
161struct l2cap_info_req { 175struct l2cap_info_req {
162 __le16 type; 176 __le16 type;
163 __u8 data[0];
164} __attribute__ ((packed)); 177} __attribute__ ((packed));
165 178
166struct l2cap_info_rsp { 179struct l2cap_info_rsp {
@@ -192,6 +205,13 @@ struct l2cap_conn {
192 205
193 unsigned int mtu; 206 unsigned int mtu;
194 207
208 __u32 feat_mask;
209
210 __u8 info_state;
211 __u8 info_ident;
212
213 struct timer_list info_timer;
214
195 spinlock_t lock; 215 spinlock_t lock;
196 216
197 struct sk_buff *rx_skb; 217 struct sk_buff *rx_skb;
@@ -202,6 +222,9 @@ struct l2cap_conn {
202 struct l2cap_chan_list chan_list; 222 struct l2cap_chan_list chan_list;
203}; 223};
204 224
225#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
226#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x02
227
205/* ----- L2CAP channel and socket info ----- */ 228/* ----- L2CAP channel and socket info ----- */
206#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk) 229#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
207 230
@@ -221,7 +244,6 @@ struct l2cap_pinfo {
221 __u8 conf_len; 244 __u8 conf_len;
222 __u8 conf_state; 245 __u8 conf_state;
223 __u8 conf_retry; 246 __u8 conf_retry;
224 __u16 conf_mtu;
225 247
226 __u8 ident; 248 __u8 ident;
227 249
@@ -232,10 +254,11 @@ struct l2cap_pinfo {
232 struct sock *prev_c; 254 struct sock *prev_c;
233}; 255};
234 256
235#define L2CAP_CONF_REQ_SENT 0x01 257#define L2CAP_CONF_REQ_SENT 0x01
236#define L2CAP_CONF_INPUT_DONE 0x02 258#define L2CAP_CONF_INPUT_DONE 0x02
237#define L2CAP_CONF_OUTPUT_DONE 0x04 259#define L2CAP_CONF_OUTPUT_DONE 0x04
238#define L2CAP_CONF_MAX_RETRIES 2 260
261#define L2CAP_CONF_MAX_RETRIES 2
239 262
240void l2cap_load(void); 263void l2cap_load(void);
241 264
diff --git a/include/sound/version.h b/include/sound/version.h
index 8d4a8dd89237..a2be8ad8894b 100644
--- a/include/sound/version.h
+++ b/include/sound/version.h
@@ -1,3 +1,3 @@
1/* include/version.h. Generated by alsa/ksync script. */ 1/* include/version.h. Generated by alsa/ksync script. */
2#define CONFIG_SND_VERSION "1.0.15" 2#define CONFIG_SND_VERSION "1.0.15"
3#define CONFIG_SND_DATE " (Tue Oct 16 14:57:44 2007 UTC)" 3#define CONFIG_SND_DATE " (Tue Oct 23 06:09:18 2007 UTC)"