aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@shinybook.infradead.org>2005-06-20 10:51:24 -0400
committerDavid Woodhouse <dwmw2@shinybook.infradead.org>2005-06-20 10:51:24 -0400
commitdf5179854bca84ac5be500849b12dd33ce03f03f (patch)
tree78cf16415489e70f34c58f2c7f5c2e63696e9761 /include
parent0f45aa18e65cf3d768082d7d86054a0d2a20bb18 (diff)
parent8b22c249e7de453961e4d253b19fc2a0bdd65d53 (diff)
Merge with master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/arch-integrator/smp.h19
-rw-r--r--include/asm-arm/smp.h14
-rw-r--r--include/asm-arm/system.h1
-rw-r--r--include/linux/ata.h1
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/chio.h168
-rw-r--r--include/linux/dm9000.h36
-rw-r--r--include/linux/dma-mapping.h5
-rw-r--r--include/linux/hdlc.h4
-rw-r--r--include/linux/if.h2
-rw-r--r--include/linux/ip.h21
-rw-r--r--include/linux/ipv6.h13
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/major.h1
-rw-r--r--include/linux/netlink.h24
-rw-r--r--include/linux/rtnetlink.h176
-rw-r--r--include/linux/slab.h1
-rw-r--r--include/linux/tcp.h28
-rw-r--r--include/linux/wireless.h283
-rw-r--r--include/linux/xfrm.h4
-rw-r--r--include/net/neighbour.h7
-rw-r--r--include/net/request_sock.h255
-rw-r--r--include/net/sch_generic.h122
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tcp.h160
-rw-r--r--include/net/tcp_ecn.h13
-rw-r--r--include/net/xfrm.h24
-rw-r--r--include/scsi/scsi.h4
-rw-r--r--include/scsi/scsi_device.h4
-rw-r--r--include/scsi/scsi_host.h25
-rw-r--r--include/scsi/scsi_transport.h38
31 files changed, 1292 insertions, 168 deletions
diff --git a/include/asm-arm/arch-integrator/smp.h b/include/asm-arm/arch-integrator/smp.h
new file mode 100644
index 000000000000..0ec7093f7c37
--- /dev/null
+++ b/include/asm-arm/arch-integrator/smp.h
@@ -0,0 +1,19 @@
1#ifndef ASMARM_ARCH_SMP_H
2#define ASMARM_ARCH_SMP_H
3
4#include <linux/config.h>
5
6#include <asm/arch/hardware.h>
7#include <asm/io.h>
8
9#define hard_smp_processor_id() \
10 ({ \
11 unsigned int cpunum; \
12 __asm__("mrc p15, 0, %0, c0, c0, 5" \
13 : "=r" (cpunum)); \
14 cpunum &= 0x0F; \
15 })
16
17extern void secondary_scan_irqs(void);
18
19#endif
diff --git a/include/asm-arm/smp.h b/include/asm-arm/smp.h
index f21fd8f6bcdd..bd44f894690f 100644
--- a/include/asm-arm/smp.h
+++ b/include/asm-arm/smp.h
@@ -55,4 +55,18 @@ extern void smp_cross_call(cpumask_t callmap);
55 */ 55 */
56extern int boot_secondary(unsigned int cpu, struct task_struct *); 56extern int boot_secondary(unsigned int cpu, struct task_struct *);
57 57
58/*
59 * Perform platform specific initialisation of the specified CPU.
60 */
61extern void platform_secondary_init(unsigned int cpu);
62
63/*
64 * Initial data for bringing up a secondary CPU.
65 */
66struct secondary_data {
67 unsigned long pgdir;
68 void *stack;
69};
70extern struct secondary_data secondary_data;
71
58#endif /* ifndef __ASM_ARM_SMP_H */ 72#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index b13a8da4847b..8405eb6558ed 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -104,6 +104,7 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr);
104extern void __show_regs(struct pt_regs *); 104extern void __show_regs(struct pt_regs *);
105 105
106extern int cpu_architecture(void); 106extern int cpu_architecture(void);
107extern void cpu_init(void);
107 108
108#define set_cr(x) \ 109#define set_cr(x) \
109 __asm__ __volatile__( \ 110 __asm__ __volatile__( \
diff --git a/include/linux/ata.h b/include/linux/ata.h
index f178894edd04..ca5fcadf9981 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -224,6 +224,7 @@ struct ata_taskfile {
224}; 224};
225 225
226#define ata_id_is_ata(id) (((id)[0] & (1 << 15)) == 0) 226#define ata_id_is_ata(id) (((id)[0] & (1 << 15)) == 0)
227#define ata_id_is_sata(id) ((id)[93] == 0)
227#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6)) 228#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
228#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5)) 229#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
229#define ata_id_has_flush(id) ((id)[83] & (1 << 12)) 230#define ata_id_has_flush(id) ((id)[83] & (1 << 12))
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ef1afc178c0a..4a99b76c5a33 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -544,7 +544,7 @@ extern void blk_end_sync_rq(struct request *rq);
544extern void blk_attempt_remerge(request_queue_t *, struct request *); 544extern void blk_attempt_remerge(request_queue_t *, struct request *);
545extern void __blk_attempt_remerge(request_queue_t *, struct request *); 545extern void __blk_attempt_remerge(request_queue_t *, struct request *);
546extern struct request *blk_get_request(request_queue_t *, int, int); 546extern struct request *blk_get_request(request_queue_t *, int, int);
547extern void blk_insert_request(request_queue_t *, struct request *, int, void *, int); 547extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
548extern void blk_requeue_request(request_queue_t *, struct request *); 548extern void blk_requeue_request(request_queue_t *, struct request *);
549extern void blk_plug_device(request_queue_t *); 549extern void blk_plug_device(request_queue_t *);
550extern int blk_remove_plug(request_queue_t *); 550extern int blk_remove_plug(request_queue_t *);
diff --git a/include/linux/chio.h b/include/linux/chio.h
new file mode 100644
index 000000000000..63035ae67e63
--- /dev/null
+++ b/include/linux/chio.h
@@ -0,0 +1,168 @@
1/*
2 * ioctl interface for the scsi media changer driver
3 */
4
5/* changer element types */
6#define CHET_MT 0 /* media transport element (robot) */
7#define CHET_ST 1 /* storage element (media slots) */
8#define CHET_IE 2 /* import/export element */
9#define CHET_DT 3 /* data transfer element (tape/cdrom/whatever) */
10#define CHET_V1 4 /* vendor specific #1 */
11#define CHET_V2 5 /* vendor specific #2 */
12#define CHET_V3 6 /* vendor specific #3 */
13#define CHET_V4 7 /* vendor specific #4 */
14
15
16/*
17 * CHIOGPARAMS
18 * query changer properties
19 *
20 * CHIOVGPARAMS
21 * query vendor-specific element types
22 *
23 * accessing elements works by specifing type and unit of the element.
24 * for eample, storage elements are addressed with type = CHET_ST and
25 * unit = 0 .. cp_nslots-1
26 *
27 */
28struct changer_params {
29 int cp_curpicker; /* current transport element */
30 int cp_npickers; /* number of transport elements (CHET_MT) */
31 int cp_nslots; /* number of storage elements (CHET_ST) */
32 int cp_nportals; /* number of import/export elements (CHET_IE) */
33 int cp_ndrives; /* number of data transfer elements (CHET_DT) */
34};
35struct changer_vendor_params {
36 int cvp_n1; /* number of vendor specific elems (CHET_V1) */
37 char cvp_label1[16];
38 int cvp_n2; /* number of vendor specific elems (CHET_V2) */
39 char cvp_label2[16];
40 int cvp_n3; /* number of vendor specific elems (CHET_V3) */
41 char cvp_label3[16];
42 int cvp_n4; /* number of vendor specific elems (CHET_V4) */
43 char cvp_label4[16];
44 int reserved[8];
45};
46
47
48/*
49 * CHIOMOVE
50 * move a medium from one element to another
51 */
52struct changer_move {
53 int cm_fromtype; /* type/unit of source element */
54 int cm_fromunit;
55 int cm_totype; /* type/unit of destination element */
56 int cm_tounit;
57 int cm_flags;
58};
59#define CM_INVERT 1 /* flag: rotate media (for double-sided like MOD) */
60
61
62/*
63 * CHIOEXCHANGE
64 * move one medium from element #1 to element #2,
65 * and another one from element #2 to element #3.
66 * element #1 and #3 are allowed to be identical.
67 */
68struct changer_exchange {
69 int ce_srctype; /* type/unit of element #1 */
70 int ce_srcunit;
71 int ce_fdsttype; /* type/unit of element #2 */
72 int ce_fdstunit;
73 int ce_sdsttype; /* type/unit of element #3 */
74 int ce_sdstunit;
75 int ce_flags;
76};
77#define CE_INVERT1 1
78#define CE_INVERT2 2
79
80
81/*
82 * CHIOPOSITION
83 * move the transport element (robot arm) to a specific element.
84 */
85struct changer_position {
86 int cp_type;
87 int cp_unit;
88 int cp_flags;
89};
90#define CP_INVERT 1
91
92
93/*
94 * CHIOGSTATUS
95 * get element status for all elements of a specific type
96 */
97struct changer_element_status {
98 int ces_type;
99 unsigned char *ces_data;
100};
101#define CESTATUS_FULL 0x01 /* full */
102#define CESTATUS_IMPEXP 0x02 /* media was imported (inserted by sysop) */
103#define CESTATUS_EXCEPT 0x04 /* error condition */
104#define CESTATUS_ACCESS 0x08 /* access allowed */
105#define CESTATUS_EXENAB 0x10 /* element can export media */
106#define CESTATUS_INENAB 0x20 /* element can import media */
107
108
109/*
110 * CHIOGELEM
111 * get more detailed status informtion for a single element
112 */
113struct changer_get_element {
114 int cge_type; /* type/unit */
115 int cge_unit;
116 int cge_status; /* status */
117 int cge_errno; /* errno */
118 int cge_srctype; /* source element of the last move/exchange */
119 int cge_srcunit;
120 int cge_id; /* scsi id (for data transfer elements) */
121 int cge_lun; /* scsi lun (for data transfer elements) */
122 char cge_pvoltag[36]; /* primary volume tag */
123 char cge_avoltag[36]; /* alternate volume tag */
124 int cge_flags;
125};
126/* flags */
127#define CGE_ERRNO 0x01 /* errno available */
128#define CGE_INVERT 0x02 /* media inverted */
129#define CGE_SRC 0x04 /* media src available */
130#define CGE_IDLUN 0x08 /* ID+LUN available */
131#define CGE_PVOLTAG 0x10 /* primary volume tag available */
132#define CGE_AVOLTAG 0x20 /* alternate volume tag available */
133
134
135/*
136 * CHIOSVOLTAG
137 * set volume tag
138 */
139struct changer_set_voltag {
140 int csv_type; /* type/unit */
141 int csv_unit;
142 char csv_voltag[36]; /* volume tag */
143 int csv_flags;
144};
145#define CSV_PVOLTAG 0x01 /* primary volume tag */
146#define CSV_AVOLTAG 0x02 /* alternate volume tag */
147#define CSV_CLEARTAG 0x04 /* clear volume tag */
148
149/* ioctls */
150#define CHIOMOVE _IOW('c', 1,struct changer_move)
151#define CHIOEXCHANGE _IOW('c', 2,struct changer_exchange)
152#define CHIOPOSITION _IOW('c', 3,struct changer_position)
153#define CHIOGPICKER _IOR('c', 4,int) /* not impl. */
154#define CHIOSPICKER _IOW('c', 5,int) /* not impl. */
155#define CHIOGPARAMS _IOR('c', 6,struct changer_params)
156#define CHIOGSTATUS _IOW('c', 8,struct changer_element_status)
157#define CHIOGELEM _IOW('c',16,struct changer_get_element)
158#define CHIOINITELEM _IO('c',17)
159#define CHIOSVOLTAG _IOW('c',18,struct changer_set_voltag)
160#define CHIOGVPARAMS _IOR('c',19,struct changer_vendor_params)
161
162/* ---------------------------------------------------------------------- */
163
164/*
165 * Local variables:
166 * c-basic-offset: 8
167 * End:
168 */
diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h
new file mode 100644
index 000000000000..0008e2ad0c9f
--- /dev/null
+++ b/include/linux/dm9000.h
@@ -0,0 +1,36 @@
1/* include/linux/dm9000.h
2 *
3 * Copyright (c) 2004 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * Header file for dm9000 platform data
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12*/
13
14#ifndef __DM9000_PLATFORM_DATA
15#define __DM9000_PLATFORM_DATA __FILE__
16
17/* IO control flags */
18
19#define DM9000_PLATF_8BITONLY (0x0001)
20#define DM9000_PLATF_16BITONLY (0x0002)
21#define DM9000_PLATF_32BITONLY (0x0004)
22
23/* platfrom data for platfrom device structure's platfrom_data field */
24
25struct dm9000_plat_data {
26 unsigned int flags;
27
28 /* allow replacement IO routines */
29
30 void (*inblk)(void __iomem *reg, void *data, int len);
31 void (*outblk)(void __iomem *reg, void *data, int len);
32 void (*dumpblk)(void __iomem *reg, int len);
33};
34
35#endif /* __DM9000_PLATFORM_DATA */
36
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 806c305332c1..2d80cc761a15 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -14,7 +14,12 @@ enum dma_data_direction {
14}; 14};
15 15
16#define DMA_64BIT_MASK 0xffffffffffffffffULL 16#define DMA_64BIT_MASK 0xffffffffffffffffULL
17#define DMA_40BIT_MASK 0x000000ffffffffffULL
18#define DMA_39BIT_MASK 0x0000007fffffffffULL
17#define DMA_32BIT_MASK 0x00000000ffffffffULL 19#define DMA_32BIT_MASK 0x00000000ffffffffULL
20#define DMA_31BIT_MASK 0x000000007fffffffULL
21#define DMA_30BIT_MASK 0x000000003fffffffULL
22#define DMA_29BIT_MASK 0x000000001fffffffULL
18 23
19#include <asm/dma-mapping.h> 24#include <asm/dma-mapping.h>
20 25
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index 503194e62fe1..ed2927ef1ff7 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Generic HDLC support routines for Linux 2 * Generic HDLC support routines for Linux
3 * 3 *
4 * Copyright (C) 1999-2003 Krzysztof Halasa <khc@pm.waw.pl> 4 * Copyright (C) 1999-2005 Krzysztof Halasa <khc@pm.waw.pl>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License 7 * under the terms of version 2 of the GNU General Public License
@@ -41,6 +41,7 @@
41#define LMI_NONE 1 /* No LMI, all PVCs are static */ 41#define LMI_NONE 1 /* No LMI, all PVCs are static */
42#define LMI_ANSI 2 /* ANSI Annex D */ 42#define LMI_ANSI 2 /* ANSI Annex D */
43#define LMI_CCITT 3 /* ITU-T Annex A */ 43#define LMI_CCITT 3 /* ITU-T Annex A */
44#define LMI_CISCO 4 /* The "original" LMI, aka Gang of Four */
44 45
45#define HDLC_MAX_MTU 1500 /* Ethernet 1500 bytes */ 46#define HDLC_MAX_MTU 1500 /* Ethernet 1500 bytes */
46#define HDLC_MAX_MRU (HDLC_MAX_MTU + 10 + 14 + 4) /* for ETH+VLAN over FR */ 47#define HDLC_MAX_MRU (HDLC_MAX_MTU + 10 + 14 + 4) /* for ETH+VLAN over FR */
@@ -89,6 +90,7 @@ typedef struct pvc_device_struct {
89 unsigned int deleted: 1; 90 unsigned int deleted: 1;
90 unsigned int fecn: 1; 91 unsigned int fecn: 1;
91 unsigned int becn: 1; 92 unsigned int becn: 1;
93 unsigned int bandwidth; /* Cisco LMI reporting only */
92 }state; 94 }state;
93}pvc_device; 95}pvc_device;
94 96
diff --git a/include/linux/if.h b/include/linux/if.h
index d73a9d62f208..ce627d9092ef 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -33,7 +33,7 @@
33#define IFF_LOOPBACK 0x8 /* is a loopback net */ 33#define IFF_LOOPBACK 0x8 /* is a loopback net */
34#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */ 34#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */
35#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */ 35#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */
36#define IFF_RUNNING 0x40 /* resources allocated */ 36#define IFF_RUNNING 0x40 /* interface running and carrier ok */
37#define IFF_NOARP 0x80 /* no ARP protocol */ 37#define IFF_NOARP 0x80 /* no ARP protocol */
38#define IFF_PROMISC 0x100 /* receive all packets */ 38#define IFF_PROMISC 0x100 /* receive all packets */
39#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/ 39#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 8438c68591f9..31e7cedd9f84 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -81,6 +81,7 @@
81#ifdef __KERNEL__ 81#ifdef __KERNEL__
82#include <linux/config.h> 82#include <linux/config.h>
83#include <linux/types.h> 83#include <linux/types.h>
84#include <net/request_sock.h>
84#include <net/sock.h> 85#include <net/sock.h>
85#include <linux/igmp.h> 86#include <linux/igmp.h>
86#include <net/flow.h> 87#include <net/flow.h>
@@ -107,6 +108,26 @@ struct ip_options {
107 108
108#define optlength(opt) (sizeof(struct ip_options) + opt->optlen) 109#define optlength(opt) (sizeof(struct ip_options) + opt->optlen)
109 110
111struct inet_request_sock {
112 struct request_sock req;
113 u32 loc_addr;
114 u32 rmt_addr;
115 u16 rmt_port;
116 u16 snd_wscale : 4,
117 rcv_wscale : 4,
118 tstamp_ok : 1,
119 sack_ok : 1,
120 wscale_ok : 1,
121 ecn_ok : 1,
122 acked : 1;
123 struct ip_options *opt;
124};
125
126static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
127{
128 return (struct inet_request_sock *)sk;
129}
130
110struct ipv6_pinfo; 131struct ipv6_pinfo;
111 132
112struct inet_sock { 133struct inet_sock {
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ab0d0efbf240..6fcd6a0ade24 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -193,6 +193,19 @@ struct inet6_skb_parm {
193 193
194#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) 194#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
195 195
196struct tcp6_request_sock {
197 struct tcp_request_sock req;
198 struct in6_addr loc_addr;
199 struct in6_addr rmt_addr;
200 struct sk_buff *pktopts;
201 int iif;
202};
203
204static inline struct tcp6_request_sock *tcp6_rsk(const struct request_sock *sk)
205{
206 return (struct tcp6_request_sock *)sk;
207}
208
196/** 209/**
197 * struct ipv6_pinfo - ipv6 private area 210 * struct ipv6_pinfo - ipv6 private area
198 * 211 *
diff --git a/include/linux/libata.h b/include/linux/libata.h
index b009f801e7c5..6cd9ba63563b 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -421,6 +421,7 @@ extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
421extern unsigned int ata_dev_classify(struct ata_taskfile *tf); 421extern unsigned int ata_dev_classify(struct ata_taskfile *tf);
422extern void ata_dev_id_string(u16 *id, unsigned char *s, 422extern void ata_dev_id_string(u16 *id, unsigned char *s,
423 unsigned int ofs, unsigned int len); 423 unsigned int ofs, unsigned int len);
424extern void ata_dev_config(struct ata_port *ap, unsigned int i);
424extern void ata_bmdma_setup (struct ata_queued_cmd *qc); 425extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
425extern void ata_bmdma_start (struct ata_queued_cmd *qc); 426extern void ata_bmdma_start (struct ata_queued_cmd *qc);
426extern void ata_bmdma_stop(struct ata_port *ap); 427extern void ata_bmdma_stop(struct ata_port *ap);
diff --git a/include/linux/major.h b/include/linux/major.h
index 4b62c42b842c..e36a46702d94 100644
--- a/include/linux/major.h
+++ b/include/linux/major.h
@@ -100,6 +100,7 @@
100#define I2O_MAJOR 80 /* 80->87 */ 100#define I2O_MAJOR 80 /* 80->87 */
101 101
102#define SHMIQ_MAJOR 85 /* Linux/mips, SGI /dev/shmiq */ 102#define SHMIQ_MAJOR 85 /* Linux/mips, SGI /dev/shmiq */
103#define SCSI_CHANGER_MAJOR 86
103 104
104#define IDE6_MAJOR 88 105#define IDE6_MAJOR 88
105#define IDE7_MAJOR 89 106#define IDE7_MAJOR 89
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index b2738ac8bc99..e38407a23d04 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -156,7 +156,7 @@ struct netlink_notify
156}; 156};
157 157
158static __inline__ struct nlmsghdr * 158static __inline__ struct nlmsghdr *
159__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len) 159__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
160{ 160{
161 struct nlmsghdr *nlh; 161 struct nlmsghdr *nlh;
162 int size = NLMSG_LENGTH(len); 162 int size = NLMSG_LENGTH(len);
@@ -164,15 +164,31 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len)
164 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size)); 164 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
165 nlh->nlmsg_type = type; 165 nlh->nlmsg_type = type;
166 nlh->nlmsg_len = size; 166 nlh->nlmsg_len = size;
167 nlh->nlmsg_flags = 0; 167 nlh->nlmsg_flags = flags;
168 nlh->nlmsg_pid = pid; 168 nlh->nlmsg_pid = pid;
169 nlh->nlmsg_seq = seq; 169 nlh->nlmsg_seq = seq;
170 return nlh; 170 return nlh;
171} 171}
172 172
173#define NLMSG_NEW(skb, pid, seq, type, len, flags) \
174({ if (skb_tailroom(skb) < (int)NLMSG_SPACE(len)) \
175 goto nlmsg_failure; \
176 __nlmsg_put(skb, pid, seq, type, len, flags); })
177
173#define NLMSG_PUT(skb, pid, seq, type, len) \ 178#define NLMSG_PUT(skb, pid, seq, type, len) \
174({ if (skb_tailroom(skb) < (int)NLMSG_SPACE(len)) goto nlmsg_failure; \ 179 NLMSG_NEW(skb, pid, seq, type, len, 0)
175 __nlmsg_put(skb, pid, seq, type, len); }) 180
181#define NLMSG_NEW_ANSWER(skb, cb, type, len, flags) \
182 NLMSG_NEW(skb, NETLINK_CB((cb)->skb).pid, \
183 (cb)->nlh->nlmsg_seq, type, len, flags)
184
185#define NLMSG_END(skb, nlh) \
186({ (nlh)->nlmsg_len = (skb)->tail - (unsigned char *) (nlh); \
187 (skb)->len; })
188
189#define NLMSG_CANCEL(skb, nlh) \
190({ skb_trim(skb, (unsigned char *) (nlh) - (skb)->data); \
191 -1; })
176 192
177extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, 193extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
178 struct nlmsghdr *nlh, 194 struct nlmsghdr *nlh,
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 91ac97c20777..e68dbf0bf579 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -89,6 +89,13 @@ enum {
89 RTM_GETANYCAST = 62, 89 RTM_GETANYCAST = 62,
90#define RTM_GETANYCAST RTM_GETANYCAST 90#define RTM_GETANYCAST RTM_GETANYCAST
91 91
92 RTM_NEWNEIGHTBL = 64,
93#define RTM_NEWNEIGHTBL RTM_NEWNEIGHTBL
94 RTM_GETNEIGHTBL = 66,
95#define RTM_GETNEIGHTBL RTM_GETNEIGHTBL
96 RTM_SETNEIGHTBL,
97#define RTM_SETNEIGHTBL RTM_SETNEIGHTBL
98
92 __RTM_MAX, 99 __RTM_MAX,
93#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) 100#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
94}; 101};
@@ -493,6 +500,106 @@ struct nda_cacheinfo
493 __u32 ndm_refcnt; 500 __u32 ndm_refcnt;
494}; 501};
495 502
503
504/*****************************************************************
505 * Neighbour tables specific messages.
506 *
507 * To retrieve the neighbour tables send RTM_GETNEIGHTBL with the
508 * NLM_F_DUMP flag set. Every neighbour table configuration is
509 * spread over multiple messages to avoid running into message
510 * size limits on systems with many interfaces. The first message
511 * in the sequence transports all not device specific data such as
512 * statistics, configuration, and the default parameter set.
513 * This message is followed by 0..n messages carrying device
514 * specific parameter sets.
515 * Although the ordering should be sufficient, NDTA_NAME can be
516 * used to identify sequences. The initial message can be identified
517 * by checking for NDTA_CONFIG. The device specific messages do
518 * not contain this TLV but have NDTPA_IFINDEX set to the
519 * corresponding interface index.
520 *
521 * To change neighbour table attributes, send RTM_SETNEIGHTBL
522 * with NDTA_NAME set. Changeable attribute include NDTA_THRESH[1-3],
523 * NDTA_GC_INTERVAL, and all TLVs in NDTA_PARMS unless marked
524 * otherwise. Device specific parameter sets can be changed by
525 * setting NDTPA_IFINDEX to the interface index of the corresponding
526 * device.
527 ****/
528
529struct ndt_stats
530{
531 __u64 ndts_allocs;
532 __u64 ndts_destroys;
533 __u64 ndts_hash_grows;
534 __u64 ndts_res_failed;
535 __u64 ndts_lookups;
536 __u64 ndts_hits;
537 __u64 ndts_rcv_probes_mcast;
538 __u64 ndts_rcv_probes_ucast;
539 __u64 ndts_periodic_gc_runs;
540 __u64 ndts_forced_gc_runs;
541};
542
543enum {
544 NDTPA_UNSPEC,
545 NDTPA_IFINDEX, /* u32, unchangeable */
546 NDTPA_REFCNT, /* u32, read-only */
547 NDTPA_REACHABLE_TIME, /* u64, read-only, msecs */
548 NDTPA_BASE_REACHABLE_TIME, /* u64, msecs */
549 NDTPA_RETRANS_TIME, /* u64, msecs */
550 NDTPA_GC_STALETIME, /* u64, msecs */
551 NDTPA_DELAY_PROBE_TIME, /* u64, msecs */
552 NDTPA_QUEUE_LEN, /* u32 */
553 NDTPA_APP_PROBES, /* u32 */
554 NDTPA_UCAST_PROBES, /* u32 */
555 NDTPA_MCAST_PROBES, /* u32 */
556 NDTPA_ANYCAST_DELAY, /* u64, msecs */
557 NDTPA_PROXY_DELAY, /* u64, msecs */
558 NDTPA_PROXY_QLEN, /* u32 */
559 NDTPA_LOCKTIME, /* u64, msecs */
560 __NDTPA_MAX
561};
562#define NDTPA_MAX (__NDTPA_MAX - 1)
563
564struct ndtmsg
565{
566 __u8 ndtm_family;
567 __u8 ndtm_pad1;
568 __u16 ndtm_pad2;
569};
570
571struct ndt_config
572{
573 __u16 ndtc_key_len;
574 __u16 ndtc_entry_size;
575 __u32 ndtc_entries;
576 __u32 ndtc_last_flush; /* delta to now in msecs */
577 __u32 ndtc_last_rand; /* delta to now in msecs */
578 __u32 ndtc_hash_rnd;
579 __u32 ndtc_hash_mask;
580 __u32 ndtc_hash_chain_gc;
581 __u32 ndtc_proxy_qlen;
582};
583
584enum {
585 NDTA_UNSPEC,
586 NDTA_NAME, /* char *, unchangeable */
587 NDTA_THRESH1, /* u32 */
588 NDTA_THRESH2, /* u32 */
589 NDTA_THRESH3, /* u32 */
590 NDTA_CONFIG, /* struct ndt_config, read-only */
591 NDTA_PARMS, /* nested TLV NDTPA_* */
592 NDTA_STATS, /* struct ndt_stats, read-only */
593 NDTA_GC_INTERVAL, /* u64, msecs */
594 __NDTA_MAX
595};
596#define NDTA_MAX (__NDTA_MAX - 1)
597
598#define NDTA_RTA(r) ((struct rtattr*)(((char*)(r)) + \
599 NLMSG_ALIGN(sizeof(struct ndtmsg))))
600#define NDTA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ndtmsg))
601
602
496/**** 603/****
497 * General form of address family dependent message. 604 * General form of address family dependent message.
498 ****/ 605 ****/
@@ -789,6 +896,75 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi
789({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \ 896({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \
790 goto rtattr_failure; \ 897 goto rtattr_failure; \
791 memcpy(skb_put(skb, RTA_ALIGN(attrlen)), data, attrlen); }) 898 memcpy(skb_put(skb, RTA_ALIGN(attrlen)), data, attrlen); })
899
900#define RTA_PUT_U8(skb, attrtype, value) \
901({ u8 _tmp = (value); \
902 RTA_PUT(skb, attrtype, sizeof(u8), &_tmp); })
903
904#define RTA_PUT_U16(skb, attrtype, value) \
905({ u16 _tmp = (value); \
906 RTA_PUT(skb, attrtype, sizeof(u16), &_tmp); })
907
908#define RTA_PUT_U32(skb, attrtype, value) \
909({ u32 _tmp = (value); \
910 RTA_PUT(skb, attrtype, sizeof(u32), &_tmp); })
911
912#define RTA_PUT_U64(skb, attrtype, value) \
913({ u64 _tmp = (value); \
914 RTA_PUT(skb, attrtype, sizeof(u64), &_tmp); })
915
916#define RTA_PUT_SECS(skb, attrtype, value) \
917 RTA_PUT_U64(skb, attrtype, (value) / HZ)
918
919#define RTA_PUT_MSECS(skb, attrtype, value) \
920 RTA_PUT_U64(skb, attrtype, jiffies_to_msecs(value))
921
922#define RTA_PUT_STRING(skb, attrtype, value) \
923 RTA_PUT(skb, attrtype, strlen(value) + 1, value)
924
925#define RTA_PUT_FLAG(skb, attrtype) \
926 RTA_PUT(skb, attrtype, 0, NULL);
927
928#define RTA_NEST(skb, type) \
929({ struct rtattr *__start = (struct rtattr *) (skb)->tail; \
930 RTA_PUT(skb, type, 0, NULL); \
931 __start; })
932
933#define RTA_NEST_END(skb, start) \
934({ (start)->rta_len = ((skb)->tail - (unsigned char *) (start)); \
935 (skb)->len; })
936
937#define RTA_NEST_CANCEL(skb, start) \
938({ if (start) \
939 skb_trim(skb, (unsigned char *) (start) - (skb)->data); \
940 -1; })
941
942#define RTA_GET_U8(rta) \
943({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u8)) \
944 goto rtattr_failure; \
945 *(u8 *) RTA_DATA(rta); })
946
947#define RTA_GET_U16(rta) \
948({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u16)) \
949 goto rtattr_failure; \
950 *(u16 *) RTA_DATA(rta); })
951
952#define RTA_GET_U32(rta) \
953({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u32)) \
954 goto rtattr_failure; \
955 *(u32 *) RTA_DATA(rta); })
956
957#define RTA_GET_U64(rta) \
958({ u64 _tmp; \
959 if (!rta || RTA_PAYLOAD(rta) < sizeof(u64)) \
960 goto rtattr_failure; \
961 memcpy(&_tmp, RTA_DATA(rta), sizeof(_tmp)); \
962 _tmp; })
963
964#define RTA_GET_FLAG(rta) (!!(rta))
965
966#define RTA_GET_SECS(rta) ((unsigned long) RTA_GET_U64(rta) * HZ)
967#define RTA_GET_MSECS(rta) (msecs_to_jiffies((unsigned long) RTA_GET_U64(rta)))
792 968
793static inline struct rtattr * 969static inline struct rtattr *
794__rta_reserve(struct sk_buff *skb, int attrtype, int attrlen) 970__rta_reserve(struct sk_buff *skb, int attrtype, int attrlen)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 7d66385ae750..76cf7e60216c 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -64,6 +64,7 @@ extern int kmem_cache_shrink(kmem_cache_t *);
64extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast); 64extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast);
65extern void kmem_cache_free(kmem_cache_t *, void *); 65extern void kmem_cache_free(kmem_cache_t *, void *);
66extern unsigned int kmem_cache_size(kmem_cache_t *); 66extern unsigned int kmem_cache_size(kmem_cache_t *);
67extern const char *kmem_cache_name(kmem_cache_t *);
67extern kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags); 68extern kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags);
68 69
69/* Size description struct for general caches. */ 70/* Size description struct for general caches. */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 14a55e3e3a50..97a7c9e03df5 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -230,6 +230,17 @@ struct tcp_options_received {
230 __u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ 230 __u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
231}; 231};
232 232
233struct tcp_request_sock {
234 struct inet_request_sock req;
235 __u32 rcv_isn;
236 __u32 snt_isn;
237};
238
239static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
240{
241 return (struct tcp_request_sock *)req;
242}
243
233struct tcp_sock { 244struct tcp_sock {
234 /* inet_sock has to be the first member of tcp_sock */ 245 /* inet_sock has to be the first member of tcp_sock */
235 struct inet_sock inet; 246 struct inet_sock inet;
@@ -368,22 +379,7 @@ struct tcp_sock {
368 379
369 __u32 total_retrans; /* Total retransmits for entire connection */ 380 __u32 total_retrans; /* Total retransmits for entire connection */
370 381
371 /* The syn_wait_lock is necessary only to avoid proc interface having 382 struct request_sock_queue accept_queue; /* FIFO of established children */
372 * to grab the main lock sock while browsing the listening hash
373 * (otherwise it's deadlock prone).
374 * This lock is acquired in read mode only from listening_get_next()
375 * and it's acquired in write mode _only_ from code that is actively
376 * changing the syn_wait_queue. All readers that are holding
377 * the master sock lock don't need to grab this lock in read mode
378 * too as the syn_wait_queue writes are always protected from
379 * the main sock lock.
380 */
381 rwlock_t syn_wait_lock;
382 struct tcp_listen_opt *listen_opt;
383
384 /* FIFO of established children */
385 struct open_request *accept_queue;
386 struct open_request *accept_queue_tail;
387 383
388 unsigned int keepalive_time; /* time before keep alive takes place */ 384 unsigned int keepalive_time; /* time before keep alive takes place */
389 unsigned int keepalive_intvl; /* time interval between keep alive probes */ 385 unsigned int keepalive_intvl; /* time interval between keep alive probes */
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index 2f51f2b6562e..ae485f9c916e 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -1,10 +1,10 @@
1/* 1/*
2 * This file define a set of standard wireless extensions 2 * This file define a set of standard wireless extensions
3 * 3 *
4 * Version : 17 21.6.04 4 * Version : 18 12.3.05
5 * 5 *
6 * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> 6 * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
7 * Copyright (c) 1997-2004 Jean Tourrilhes, All Rights Reserved. 7 * Copyright (c) 1997-2005 Jean Tourrilhes, All Rights Reserved.
8 */ 8 */
9 9
10#ifndef _LINUX_WIRELESS_H 10#ifndef _LINUX_WIRELESS_H
@@ -82,7 +82,7 @@
82 * (there is some stuff that will be added in the future...) 82 * (there is some stuff that will be added in the future...)
83 * I just plan to increment with each new version. 83 * I just plan to increment with each new version.
84 */ 84 */
85#define WIRELESS_EXT 17 85#define WIRELESS_EXT 18
86 86
87/* 87/*
88 * Changes : 88 * Changes :
@@ -182,6 +182,21 @@
182 * - Document (struct iw_quality *)->updated, add new flags (INVALID) 182 * - Document (struct iw_quality *)->updated, add new flags (INVALID)
183 * - Wireless Event capability in struct iw_range 183 * - Wireless Event capability in struct iw_range
184 * - Add support for relative TxPower (yick !) 184 * - Add support for relative TxPower (yick !)
185 *
186 * V17 to V18 (From Jouni Malinen <jkmaline@cc.hut.fi>)
187 * ----------
188 * - Add support for WPA/WPA2
189 * - Add extended encoding configuration (SIOCSIWENCODEEXT and
190 * SIOCGIWENCODEEXT)
191 * - Add SIOCSIWGENIE/SIOCGIWGENIE
192 * - Add SIOCSIWMLME
193 * - Add SIOCSIWPMKSA
194 * - Add struct iw_range bit field for supported encoding capabilities
195 * - Add optional scan request parameters for SIOCSIWSCAN
196 * - Add SIOCSIWAUTH/SIOCGIWAUTH for setting authentication and WPA
197 * related parameters (extensible up to 4096 parameter values)
198 * - Add wireless events: IWEVGENIE, IWEVMICHAELMICFAILURE,
199 * IWEVASSOCREQIE, IWEVASSOCRESPIE, IWEVPMKIDCAND
185 */ 200 */
186 201
187/**************************** CONSTANTS ****************************/ 202/**************************** CONSTANTS ****************************/
@@ -256,6 +271,30 @@
256#define SIOCSIWPOWER 0x8B2C /* set Power Management settings */ 271#define SIOCSIWPOWER 0x8B2C /* set Power Management settings */
257#define SIOCGIWPOWER 0x8B2D /* get Power Management settings */ 272#define SIOCGIWPOWER 0x8B2D /* get Power Management settings */
258 273
274/* WPA : Generic IEEE 802.11 informatiom element (e.g., for WPA/RSN/WMM).
275 * This ioctl uses struct iw_point and data buffer that includes IE id and len
276 * fields. More than one IE may be included in the request. Setting the generic
277 * IE to empty buffer (len=0) removes the generic IE from the driver. Drivers
278 * are allowed to generate their own WPA/RSN IEs, but in these cases, drivers
279 * are required to report the used IE as a wireless event, e.g., when
280 * associating with an AP. */
281#define SIOCSIWGENIE 0x8B30 /* set generic IE */
282#define SIOCGIWGENIE 0x8B31 /* get generic IE */
283
284/* WPA : IEEE 802.11 MLME requests */
285#define SIOCSIWMLME 0x8B16 /* request MLME operation; uses
286 * struct iw_mlme */
287/* WPA : Authentication mode parameters */
288#define SIOCSIWAUTH 0x8B32 /* set authentication mode params */
289#define SIOCGIWAUTH 0x8B33 /* get authentication mode params */
290
291/* WPA : Extended version of encoding configuration */
292#define SIOCSIWENCODEEXT 0x8B34 /* set encoding token & mode */
293#define SIOCGIWENCODEEXT 0x8B35 /* get encoding token & mode */
294
295/* WPA2 : PMKSA cache management */
296#define SIOCSIWPMKSA 0x8B36 /* PMKSA cache operation */
297
259/* -------------------- DEV PRIVATE IOCTL LIST -------------------- */ 298/* -------------------- DEV PRIVATE IOCTL LIST -------------------- */
260 299
261/* These 32 ioctl are wireless device private, for 16 commands. 300/* These 32 ioctl are wireless device private, for 16 commands.
@@ -297,6 +336,34 @@
297#define IWEVCUSTOM 0x8C02 /* Driver specific ascii string */ 336#define IWEVCUSTOM 0x8C02 /* Driver specific ascii string */
298#define IWEVREGISTERED 0x8C03 /* Discovered a new node (AP mode) */ 337#define IWEVREGISTERED 0x8C03 /* Discovered a new node (AP mode) */
299#define IWEVEXPIRED 0x8C04 /* Expired a node (AP mode) */ 338#define IWEVEXPIRED 0x8C04 /* Expired a node (AP mode) */
339#define IWEVGENIE 0x8C05 /* Generic IE (WPA, RSN, WMM, ..)
340 * (scan results); This includes id and
341 * length fields. One IWEVGENIE may
342 * contain more than one IE. Scan
343 * results may contain one or more
344 * IWEVGENIE events. */
345#define IWEVMICHAELMICFAILURE 0x8C06 /* Michael MIC failure
346 * (struct iw_michaelmicfailure)
347 */
348#define IWEVASSOCREQIE 0x8C07 /* IEs used in (Re)Association Request.
349 * The data includes id and length
350 * fields and may contain more than one
351 * IE. This event is required in
352 * Managed mode if the driver
353 * generates its own WPA/RSN IE. This
354 * should be sent just before
355 * IWEVREGISTERED event for the
356 * association. */
357#define IWEVASSOCRESPIE 0x8C08 /* IEs used in (Re)Association
358 * Response. The data includes id and
359 * length fields and may contain more
360 * than one IE. This may be sent
361 * between IWEVASSOCREQIE and
362 * IWEVREGISTERED events for the
363 * association. */
364#define IWEVPMKIDCAND 0x8C09 /* PMKID candidate for RSN
365 * pre-authentication
366 * (struct iw_pmkid_cand) */
300 367
301#define IWEVFIRST 0x8C00 368#define IWEVFIRST 0x8C00
302 369
@@ -432,12 +499,94 @@
432#define IW_SCAN_THIS_MODE 0x0020 /* Scan only this Mode */ 499#define IW_SCAN_THIS_MODE 0x0020 /* Scan only this Mode */
433#define IW_SCAN_ALL_RATE 0x0040 /* Scan all Bit-Rates */ 500#define IW_SCAN_ALL_RATE 0x0040 /* Scan all Bit-Rates */
434#define IW_SCAN_THIS_RATE 0x0080 /* Scan only this Bit-Rate */ 501#define IW_SCAN_THIS_RATE 0x0080 /* Scan only this Bit-Rate */
502/* struct iw_scan_req scan_type */
503#define IW_SCAN_TYPE_ACTIVE 0
504#define IW_SCAN_TYPE_PASSIVE 1
435/* Maximum size of returned data */ 505/* Maximum size of returned data */
436#define IW_SCAN_MAX_DATA 4096 /* In bytes */ 506#define IW_SCAN_MAX_DATA 4096 /* In bytes */
437 507
438/* Max number of char in custom event - use multiple of them if needed */ 508/* Max number of char in custom event - use multiple of them if needed */
439#define IW_CUSTOM_MAX 256 /* In bytes */ 509#define IW_CUSTOM_MAX 256 /* In bytes */
440 510
511/* Generic information element */
512#define IW_GENERIC_IE_MAX 1024
513
514/* MLME requests (SIOCSIWMLME / struct iw_mlme) */
515#define IW_MLME_DEAUTH 0
516#define IW_MLME_DISASSOC 1
517
518/* SIOCSIWAUTH/SIOCGIWAUTH struct iw_param flags */
519#define IW_AUTH_INDEX 0x0FFF
520#define IW_AUTH_FLAGS 0xF000
521/* SIOCSIWAUTH/SIOCGIWAUTH parameters (0 .. 4095)
522 * (IW_AUTH_INDEX mask in struct iw_param flags; this is the index of the
523 * parameter that is being set/get to; value will be read/written to
524 * struct iw_param value field) */
525#define IW_AUTH_WPA_VERSION 0
526#define IW_AUTH_CIPHER_PAIRWISE 1
527#define IW_AUTH_CIPHER_GROUP 2
528#define IW_AUTH_KEY_MGMT 3
529#define IW_AUTH_TKIP_COUNTERMEASURES 4
530#define IW_AUTH_DROP_UNENCRYPTED 5
531#define IW_AUTH_80211_AUTH_ALG 6
532#define IW_AUTH_WPA_ENABLED 7
533#define IW_AUTH_RX_UNENCRYPTED_EAPOL 8
534#define IW_AUTH_ROAMING_CONTROL 9
535#define IW_AUTH_PRIVACY_INVOKED 10
536
537/* IW_AUTH_WPA_VERSION values (bit field) */
538#define IW_AUTH_WPA_VERSION_DISABLED 0x00000001
539#define IW_AUTH_WPA_VERSION_WPA 0x00000002
540#define IW_AUTH_WPA_VERSION_WPA2 0x00000004
541
542/* IW_AUTH_PAIRWISE_CIPHER and IW_AUTH_GROUP_CIPHER values (bit field) */
543#define IW_AUTH_CIPHER_NONE 0x00000001
544#define IW_AUTH_CIPHER_WEP40 0x00000002
545#define IW_AUTH_CIPHER_TKIP 0x00000004
546#define IW_AUTH_CIPHER_CCMP 0x00000008
547#define IW_AUTH_CIPHER_WEP104 0x00000010
548
549/* IW_AUTH_KEY_MGMT values (bit field) */
550#define IW_AUTH_KEY_MGMT_802_1X 1
551#define IW_AUTH_KEY_MGMT_PSK 2
552
553/* IW_AUTH_80211_AUTH_ALG values (bit field) */
554#define IW_AUTH_ALG_OPEN_SYSTEM 0x00000001
555#define IW_AUTH_ALG_SHARED_KEY 0x00000002
556#define IW_AUTH_ALG_LEAP 0x00000004
557
558/* IW_AUTH_ROAMING_CONTROL values */
559#define IW_AUTH_ROAMING_ENABLE 0 /* driver/firmware based roaming */
560#define IW_AUTH_ROAMING_DISABLE 1 /* user space program used for roaming
561 * control */
562
563/* SIOCSIWENCODEEXT definitions */
564#define IW_ENCODE_SEQ_MAX_SIZE 8
565/* struct iw_encode_ext ->alg */
566#define IW_ENCODE_ALG_NONE 0
567#define IW_ENCODE_ALG_WEP 1
568#define IW_ENCODE_ALG_TKIP 2
569#define IW_ENCODE_ALG_CCMP 3
570/* struct iw_encode_ext ->ext_flags */
571#define IW_ENCODE_EXT_TX_SEQ_VALID 0x00000001
572#define IW_ENCODE_EXT_RX_SEQ_VALID 0x00000002
573#define IW_ENCODE_EXT_GROUP_KEY 0x00000004
574#define IW_ENCODE_EXT_SET_TX_KEY 0x00000008
575
576/* IWEVMICHAELMICFAILURE : struct iw_michaelmicfailure ->flags */
577#define IW_MICFAILURE_KEY_ID 0x00000003 /* Key ID 0..3 */
578#define IW_MICFAILURE_GROUP 0x00000004
579#define IW_MICFAILURE_PAIRWISE 0x00000008
580#define IW_MICFAILURE_STAKEY 0x00000010
581#define IW_MICFAILURE_COUNT 0x00000060 /* 1 or 2 (0 = count not supported)
582 */
583
584/* Bit field values for enc_capa in struct iw_range */
585#define IW_ENC_CAPA_WPA 0x00000001
586#define IW_ENC_CAPA_WPA2 0x00000002
587#define IW_ENC_CAPA_CIPHER_TKIP 0x00000004
588#define IW_ENC_CAPA_CIPHER_CCMP 0x00000008
589
441/* Event capability macros - in (struct iw_range *)->event_capa 590/* Event capability macros - in (struct iw_range *)->event_capa
442 * Because we have more than 32 possible events, we use an array of 591 * Because we have more than 32 possible events, we use an array of
443 * 32 bit bitmasks. Note : 32 bits = 0x20 = 2^5. */ 592 * 32 bit bitmasks. Note : 32 bits = 0x20 = 2^5. */
@@ -546,6 +695,132 @@ struct iw_thrspy
546 struct iw_quality high; /* High threshold */ 695 struct iw_quality high; /* High threshold */
547}; 696};
548 697
698/*
699 * Optional data for scan request
700 *
701 * Note: these optional parameters are controlling parameters for the
702 * scanning behavior, these do not apply to getting scan results
703 * (SIOCGIWSCAN). Drivers are expected to keep a local BSS table and
704 * provide a merged results with all BSSes even if the previous scan
705 * request limited scanning to a subset, e.g., by specifying an SSID.
706 * Especially, scan results are required to include an entry for the
707 * current BSS if the driver is in Managed mode and associated with an AP.
708 */
709struct iw_scan_req
710{
711 __u8 scan_type; /* IW_SCAN_TYPE_{ACTIVE,PASSIVE} */
712 __u8 essid_len;
713 __u8 num_channels; /* num entries in channel_list;
714 * 0 = scan all allowed channels */
715 __u8 flags; /* reserved as padding; use zero, this may
716 * be used in the future for adding flags
717 * to request different scan behavior */
718 struct sockaddr bssid; /* ff:ff:ff:ff:ff:ff for broadcast BSSID or
719 * individual address of a specific BSS */
720
721 /*
722 * Use this ESSID if IW_SCAN_THIS_ESSID flag is used instead of using
723 * the current ESSID. This allows scan requests for specific ESSID
724 * without having to change the current ESSID and potentially breaking
725 * the current association.
726 */
727 __u8 essid[IW_ESSID_MAX_SIZE];
728
729 /*
730 * Optional parameters for changing the default scanning behavior.
731 * These are based on the MLME-SCAN.request from IEEE Std 802.11.
732 * TU is 1.024 ms. If these are set to 0, driver is expected to use
733 * reasonable default values. min_channel_time defines the time that
734 * will be used to wait for the first reply on each channel. If no
735 * replies are received, next channel will be scanned after this. If
736 * replies are received, total time waited on the channel is defined by
737 * max_channel_time.
738 */
739 __u32 min_channel_time; /* in TU */
740 __u32 max_channel_time; /* in TU */
741
742 struct iw_freq channel_list[IW_MAX_FREQUENCIES];
743};
744
745/* ------------------------- WPA SUPPORT ------------------------- */
746
747/*
748 * Extended data structure for get/set encoding (this is used with
749 * SIOCSIWENCODEEXT/SIOCGIWENCODEEXT. struct iw_point and IW_ENCODE_*
750 * flags are used in the same way as with SIOCSIWENCODE/SIOCGIWENCODE and
751 * only the data contents changes (key data -> this structure, including
752 * key data).
753 *
754 * If the new key is the first group key, it will be set as the default
755 * TX key. Otherwise, default TX key index is only changed if
756 * IW_ENCODE_EXT_SET_TX_KEY flag is set.
757 *
758 * Key will be changed with SIOCSIWENCODEEXT in all cases except for
759 * special "change TX key index" operation which is indicated by setting
760 * key_len = 0 and ext_flags |= IW_ENCODE_EXT_SET_TX_KEY.
761 *
762 * tx_seq/rx_seq are only used when respective
763 * IW_ENCODE_EXT_{TX,RX}_SEQ_VALID flag is set in ext_flags. Normal
764 * TKIP/CCMP operation is to set RX seq with SIOCSIWENCODEEXT and start
765 * TX seq from zero whenever key is changed. SIOCGIWENCODEEXT is normally
766 * used only by an Authenticator (AP or an IBSS station) to get the
767 * current TX sequence number. Using TX_SEQ_VALID for SIOCSIWENCODEEXT and
768 * RX_SEQ_VALID for SIOCGIWENCODEEXT are optional, but can be useful for
769 * debugging/testing.
770 */
771struct iw_encode_ext
772{
773 __u32 ext_flags; /* IW_ENCODE_EXT_* */
774 __u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
775 __u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
776 struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast
777 * (group) keys or unicast address for
778 * individual keys */
779 __u16 alg; /* IW_ENCODE_ALG_* */
780 __u16 key_len;
781 __u8 key[0];
782};
783
784/* SIOCSIWMLME data */
785struct iw_mlme
786{
787 __u16 cmd; /* IW_MLME_* */
788 __u16 reason_code;
789 struct sockaddr addr;
790};
791
792/* SIOCSIWPMKSA data */
793#define IW_PMKSA_ADD 1
794#define IW_PMKSA_REMOVE 2
795#define IW_PMKSA_FLUSH 3
796
797#define IW_PMKID_LEN 16
798
799struct iw_pmksa
800{
801 __u32 cmd; /* IW_PMKSA_* */
802 struct sockaddr bssid;
803 __u8 pmkid[IW_PMKID_LEN];
804};
805
806/* IWEVMICHAELMICFAILURE data */
807struct iw_michaelmicfailure
808{
809 __u32 flags;
810 struct sockaddr src_addr;
811 __u8 tsc[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
812};
813
814/* IWEVPMKIDCAND data */
815#define IW_PMKID_CAND_PREAUTH 0x00000001 /* RNS pre-authentication enabled */
816struct iw_pmkid_cand
817{
818 __u32 flags; /* IW_PMKID_CAND_* */
819 __u32 index; /* the smaller the index, the higher the
820 * priority */
821 struct sockaddr bssid;
822};
823
549/* ------------------------ WIRELESS STATS ------------------------ */ 824/* ------------------------ WIRELESS STATS ------------------------ */
550/* 825/*
551 * Wireless statistics (used for /proc/net/wireless) 826 * Wireless statistics (used for /proc/net/wireless)
@@ -725,6 +1000,8 @@ struct iw_range
725 struct iw_freq freq[IW_MAX_FREQUENCIES]; /* list */ 1000 struct iw_freq freq[IW_MAX_FREQUENCIES]; /* list */
726 /* Note : this frequency list doesn't need to fit channel numbers, 1001 /* Note : this frequency list doesn't need to fit channel numbers,
727 * because each entry contain its channel index */ 1002 * because each entry contain its channel index */
1003
1004 __u32 enc_capa; /* IW_ENC_CAPA_* bit field */
728}; 1005};
729 1006
730/* 1007/*
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index fd2ef742a9fd..d68391a9b9f3 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -174,6 +174,8 @@ enum xfrm_attr_type_t {
174 XFRMA_ALG_COMP, /* struct xfrm_algo */ 174 XFRMA_ALG_COMP, /* struct xfrm_algo */
175 XFRMA_ENCAP, /* struct xfrm_algo + struct xfrm_encap_tmpl */ 175 XFRMA_ENCAP, /* struct xfrm_algo + struct xfrm_encap_tmpl */
176 XFRMA_TMPL, /* 1 or more struct xfrm_user_tmpl */ 176 XFRMA_TMPL, /* 1 or more struct xfrm_user_tmpl */
177 XFRMA_SA,
178 XFRMA_POLICY,
177 __XFRMA_MAX 179 __XFRMA_MAX
178 180
179#define XFRMA_MAX (__XFRMA_MAX - 1) 181#define XFRMA_MAX (__XFRMA_MAX - 1)
@@ -257,5 +259,7 @@ struct xfrm_usersa_flush {
257 259
258#define XFRMGRP_ACQUIRE 1 260#define XFRMGRP_ACQUIRE 1
259#define XFRMGRP_EXPIRE 2 261#define XFRMGRP_EXPIRE 2
262#define XFRMGRP_SA 4
263#define XFRMGRP_POLICY 8
260 264
261#endif /* _LINUX_XFRM_H */ 265#endif /* _LINUX_XFRM_H */
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 4f33bbc21e7f..89809891e5ab 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -65,11 +65,10 @@ struct neighbour;
65 65
66struct neigh_parms 66struct neigh_parms
67{ 67{
68 struct net_device *dev;
68 struct neigh_parms *next; 69 struct neigh_parms *next;
69 int (*neigh_setup)(struct neighbour *); 70 int (*neigh_setup)(struct neighbour *);
70 struct neigh_table *tbl; 71 struct neigh_table *tbl;
71 int entries;
72 void *priv;
73 72
74 void *sysctl_table; 73 void *sysctl_table;
75 74
@@ -192,7 +191,6 @@ struct neigh_table
192 atomic_t entries; 191 atomic_t entries;
193 rwlock_t lock; 192 rwlock_t lock;
194 unsigned long last_rand; 193 unsigned long last_rand;
195 struct neigh_parms *parms_list;
196 kmem_cache_t *kmem_cachep; 194 kmem_cache_t *kmem_cachep;
197 struct neigh_statistics *stats; 195 struct neigh_statistics *stats;
198 struct neighbour **hash_buckets; 196 struct neighbour **hash_buckets;
@@ -252,6 +250,9 @@ extern int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
252extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); 250extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
253extern void neigh_app_ns(struct neighbour *n); 251extern void neigh_app_ns(struct neighbour *n);
254 252
253extern int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb);
254extern int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
255
255extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie); 256extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie);
256extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *)); 257extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *));
257extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *)); 258extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *));
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
new file mode 100644
index 000000000000..72fd6f5e86b1
--- /dev/null
+++ b/include/net/request_sock.h
@@ -0,0 +1,255 @@
1/*
2 * NET Generic infrastructure for Network protocols.
3 *
4 * Definitions for request_sock
5 *
6 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 *
8 * From code originally in include/net/tcp.h
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15#ifndef _REQUEST_SOCK_H
16#define _REQUEST_SOCK_H
17
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21
22#include <net/sock.h>
23
24struct request_sock;
25struct sk_buff;
26struct dst_entry;
27struct proto;
28
29struct request_sock_ops {
30 int family;
31 kmem_cache_t *slab;
32 int obj_size;
33 int (*rtx_syn_ack)(struct sock *sk,
34 struct request_sock *req,
35 struct dst_entry *dst);
36 void (*send_ack)(struct sk_buff *skb,
37 struct request_sock *req);
38 void (*send_reset)(struct sk_buff *skb);
39 void (*destructor)(struct request_sock *req);
40};
41
42/* struct request_sock - mini sock to represent a connection request
43 */
44struct request_sock {
45 struct request_sock *dl_next; /* Must be first member! */
46 u16 mss;
47 u8 retrans;
48 u8 __pad;
49 /* The following two fields can be easily recomputed I think -AK */
50 u32 window_clamp; /* window clamp at creation time */
51 u32 rcv_wnd; /* rcv_wnd offered first time */
52 u32 ts_recent;
53 unsigned long expires;
54 struct request_sock_ops *rsk_ops;
55 struct sock *sk;
56};
57
58static inline struct request_sock *reqsk_alloc(struct request_sock_ops *ops)
59{
60 struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC);
61
62 if (req != NULL)
63 req->rsk_ops = ops;
64
65 return req;
66}
67
68static inline void __reqsk_free(struct request_sock *req)
69{
70 kmem_cache_free(req->rsk_ops->slab, req);
71}
72
73static inline void reqsk_free(struct request_sock *req)
74{
75 req->rsk_ops->destructor(req);
76 __reqsk_free(req);
77}
78
79extern int sysctl_max_syn_backlog;
80
81/** struct listen_sock - listen state
82 *
83 * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
84 */
85struct listen_sock {
86 u8 max_qlen_log;
87 /* 3 bytes hole, try to use */
88 int qlen;
89 int qlen_young;
90 int clock_hand;
91 u32 hash_rnd;
92 struct request_sock *syn_table[0];
93};
94
95/** struct request_sock_queue - queue of request_socks
96 *
97 * @rskq_accept_head - FIFO head of established children
98 * @rskq_accept_tail - FIFO tail of established children
99 * @syn_wait_lock - serializer
100 *
101 * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
102 * lock sock while browsing the listening hash (otherwise it's deadlock prone).
103 *
104 * This lock is acquired in read mode only from listening_get_next() seq_file
105 * op and it's acquired in write mode _only_ from code that is actively
106 * changing rskq_accept_head. All readers that are holding the master sock lock
107 * don't need to grab this lock in read mode too as rskq_accept_head. writes
108 * are always protected from the main sock lock.
109 */
110struct request_sock_queue {
111 struct request_sock *rskq_accept_head;
112 struct request_sock *rskq_accept_tail;
113 rwlock_t syn_wait_lock;
114 struct listen_sock *listen_opt;
115};
116
117extern int reqsk_queue_alloc(struct request_sock_queue *queue,
118 const int nr_table_entries);
119
120static inline struct listen_sock *reqsk_queue_yank_listen_sk(struct request_sock_queue *queue)
121{
122 struct listen_sock *lopt;
123
124 write_lock_bh(&queue->syn_wait_lock);
125 lopt = queue->listen_opt;
126 queue->listen_opt = NULL;
127 write_unlock_bh(&queue->syn_wait_lock);
128
129 return lopt;
130}
131
132static inline void reqsk_queue_destroy(struct request_sock_queue *queue)
133{
134 kfree(reqsk_queue_yank_listen_sk(queue));
135}
136
137static inline struct request_sock *
138 reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
139{
140 struct request_sock *req = queue->rskq_accept_head;
141
142 queue->rskq_accept_head = queue->rskq_accept_head = NULL;
143 return req;
144}
145
146static inline int reqsk_queue_empty(struct request_sock_queue *queue)
147{
148 return queue->rskq_accept_head == NULL;
149}
150
151static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
152 struct request_sock *req,
153 struct request_sock **prev_req)
154{
155 write_lock(&queue->syn_wait_lock);
156 *prev_req = req->dl_next;
157 write_unlock(&queue->syn_wait_lock);
158}
159
160static inline void reqsk_queue_add(struct request_sock_queue *queue,
161 struct request_sock *req,
162 struct sock *parent,
163 struct sock *child)
164{
165 req->sk = child;
166 sk_acceptq_added(parent);
167
168 if (queue->rskq_accept_head == NULL)
169 queue->rskq_accept_head = req;
170 else
171 queue->rskq_accept_tail->dl_next = req;
172
173 queue->rskq_accept_tail = req;
174 req->dl_next = NULL;
175}
176
177static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
178{
179 struct request_sock *req = queue->rskq_accept_head;
180
181 BUG_TRAP(req != NULL);
182
183 queue->rskq_accept_head = req->dl_next;
184 if (queue->rskq_accept_head == NULL)
185 queue->rskq_accept_tail = NULL;
186
187 return req;
188}
189
190static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
191 struct sock *parent)
192{
193 struct request_sock *req = reqsk_queue_remove(queue);
194 struct sock *child = req->sk;
195
196 BUG_TRAP(child != NULL);
197
198 sk_acceptq_removed(parent);
199 __reqsk_free(req);
200 return child;
201}
202
203static inline int reqsk_queue_removed(struct request_sock_queue *queue,
204 struct request_sock *req)
205{
206 struct listen_sock *lopt = queue->listen_opt;
207
208 if (req->retrans == 0)
209 --lopt->qlen_young;
210
211 return --lopt->qlen;
212}
213
214static inline int reqsk_queue_added(struct request_sock_queue *queue)
215{
216 struct listen_sock *lopt = queue->listen_opt;
217 const int prev_qlen = lopt->qlen;
218
219 lopt->qlen_young++;
220 lopt->qlen++;
221 return prev_qlen;
222}
223
224static inline int reqsk_queue_len(struct request_sock_queue *queue)
225{
226 return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
227}
228
229static inline int reqsk_queue_len_young(struct request_sock_queue *queue)
230{
231 return queue->listen_opt->qlen_young;
232}
233
234static inline int reqsk_queue_is_full(struct request_sock_queue *queue)
235{
236 return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
237}
238
239static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
240 u32 hash, struct request_sock *req,
241 unsigned timeout)
242{
243 struct listen_sock *lopt = queue->listen_opt;
244
245 req->expires = jiffies + timeout;
246 req->retrans = 0;
247 req->sk = NULL;
248 req->dl_next = lopt->syn_table[hash];
249
250 write_lock(&queue->syn_wait_lock);
251 lopt->syn_table[hash] = req;
252 write_unlock(&queue->syn_wait_lock);
253}
254
255#endif /* _REQUEST_SOCK_H */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c57504b3b518..7b97405e2dbf 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -172,4 +172,126 @@ tcf_destroy(struct tcf_proto *tp)
172 kfree(tp); 172 kfree(tp);
173} 173}
174 174
175static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
176 struct sk_buff_head *list)
177{
178 __skb_queue_tail(list, skb);
179 sch->qstats.backlog += skb->len;
180 sch->bstats.bytes += skb->len;
181 sch->bstats.packets++;
182
183 return NET_XMIT_SUCCESS;
184}
185
186static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
187{
188 return __qdisc_enqueue_tail(skb, sch, &sch->q);
189}
190
191static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
192 struct sk_buff_head *list)
193{
194 struct sk_buff *skb = __skb_dequeue(list);
195
196 if (likely(skb != NULL))
197 sch->qstats.backlog -= skb->len;
198
199 return skb;
200}
201
202static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
203{
204 return __qdisc_dequeue_head(sch, &sch->q);
205}
206
207static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
208 struct sk_buff_head *list)
209{
210 struct sk_buff *skb = __skb_dequeue_tail(list);
211
212 if (likely(skb != NULL))
213 sch->qstats.backlog -= skb->len;
214
215 return skb;
216}
217
218static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
219{
220 return __qdisc_dequeue_tail(sch, &sch->q);
221}
222
223static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch,
224 struct sk_buff_head *list)
225{
226 __skb_queue_head(list, skb);
227 sch->qstats.backlog += skb->len;
228 sch->qstats.requeues++;
229
230 return NET_XMIT_SUCCESS;
231}
232
233static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch)
234{
235 return __qdisc_requeue(skb, sch, &sch->q);
236}
237
238static inline void __qdisc_reset_queue(struct Qdisc *sch,
239 struct sk_buff_head *list)
240{
241 /*
242 * We do not know the backlog in bytes of this list, it
243 * is up to the caller to correct it
244 */
245 skb_queue_purge(list);
246}
247
248static inline void qdisc_reset_queue(struct Qdisc *sch)
249{
250 __qdisc_reset_queue(sch, &sch->q);
251 sch->qstats.backlog = 0;
252}
253
254static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
255 struct sk_buff_head *list)
256{
257 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
258
259 if (likely(skb != NULL)) {
260 unsigned int len = skb->len;
261 kfree_skb(skb);
262 return len;
263 }
264
265 return 0;
266}
267
268static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
269{
270 return __qdisc_queue_drop(sch, &sch->q);
271}
272
273static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
274{
275 kfree_skb(skb);
276 sch->qstats.drops++;
277
278 return NET_XMIT_DROP;
279}
280
281static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
282{
283 sch->qstats.drops++;
284
285#ifdef CONFIG_NET_CLS_POLICE
286 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
287 goto drop;
288
289 return NET_XMIT_SUCCESS;
290
291drop:
292#endif
293 kfree_skb(skb);
294 return NET_XMIT_DROP;
295}
296
175#endif 297#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index a9ef3a6a13f3..e593af5b1ecc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -484,6 +484,8 @@ extern void sk_stream_kill_queues(struct sock *sk);
484 484
485extern int sk_wait_data(struct sock *sk, long *timeo); 485extern int sk_wait_data(struct sock *sk, long *timeo);
486 486
487struct request_sock_ops;
488
487/* Networking protocol blocks we attach to sockets. 489/* Networking protocol blocks we attach to sockets.
488 * socket layer -> transport layer interface 490 * socket layer -> transport layer interface
489 * transport -> network interface is defined by struct inet_proto 491 * transport -> network interface is defined by struct inet_proto
@@ -547,6 +549,8 @@ struct proto {
547 kmem_cache_t *slab; 549 kmem_cache_t *slab;
548 unsigned int obj_size; 550 unsigned int obj_size;
549 551
552 struct request_sock_ops *rsk_prot;
553
550 struct module *owner; 554 struct module *owner;
551 555
552 char name[32]; 556 char name[32];
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e71f8ba3e101..f730935b824a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -31,6 +31,7 @@
31#include <linux/cache.h> 31#include <linux/cache.h>
32#include <linux/percpu.h> 32#include <linux/percpu.h>
33#include <net/checksum.h> 33#include <net/checksum.h>
34#include <net/request_sock.h>
34#include <net/sock.h> 35#include <net/sock.h>
35#include <net/snmp.h> 36#include <net/snmp.h>
36#include <net/ip.h> 37#include <net/ip.h>
@@ -563,7 +564,6 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
563#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */ 564#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */
564 565
565/* sysctl variables for tcp */ 566/* sysctl variables for tcp */
566extern int sysctl_max_syn_backlog;
567extern int sysctl_tcp_timestamps; 567extern int sysctl_tcp_timestamps;
568extern int sysctl_tcp_window_scaling; 568extern int sysctl_tcp_window_scaling;
569extern int sysctl_tcp_sack; 569extern int sysctl_tcp_sack;
@@ -613,74 +613,6 @@ extern atomic_t tcp_memory_allocated;
613extern atomic_t tcp_sockets_allocated; 613extern atomic_t tcp_sockets_allocated;
614extern int tcp_memory_pressure; 614extern int tcp_memory_pressure;
615 615
616struct open_request;
617
618struct or_calltable {
619 int family;
620 int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*);
621 void (*send_ack) (struct sk_buff *skb, struct open_request *req);
622 void (*destructor) (struct open_request *req);
623 void (*send_reset) (struct sk_buff *skb);
624};
625
626struct tcp_v4_open_req {
627 __u32 loc_addr;
628 __u32 rmt_addr;
629 struct ip_options *opt;
630};
631
632#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
633struct tcp_v6_open_req {
634 struct in6_addr loc_addr;
635 struct in6_addr rmt_addr;
636 struct sk_buff *pktopts;
637 int iif;
638};
639#endif
640
641/* this structure is too big */
642struct open_request {
643 struct open_request *dl_next; /* Must be first member! */
644 __u32 rcv_isn;
645 __u32 snt_isn;
646 __u16 rmt_port;
647 __u16 mss;
648 __u8 retrans;
649 __u8 __pad;
650 __u16 snd_wscale : 4,
651 rcv_wscale : 4,
652 tstamp_ok : 1,
653 sack_ok : 1,
654 wscale_ok : 1,
655 ecn_ok : 1,
656 acked : 1;
657 /* The following two fields can be easily recomputed I think -AK */
658 __u32 window_clamp; /* window clamp at creation time */
659 __u32 rcv_wnd; /* rcv_wnd offered first time */
660 __u32 ts_recent;
661 unsigned long expires;
662 struct or_calltable *class;
663 struct sock *sk;
664 union {
665 struct tcp_v4_open_req v4_req;
666#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
667 struct tcp_v6_open_req v6_req;
668#endif
669 } af;
670};
671
672/* SLAB cache for open requests. */
673extern kmem_cache_t *tcp_openreq_cachep;
674
675#define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
676#define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
677
678static inline void tcp_openreq_free(struct open_request *req)
679{
680 req->class->destructor(req);
681 tcp_openreq_fastfree(req);
682}
683
684#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 616#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
685#define TCP_INET_FAMILY(fam) ((fam) == AF_INET) 617#define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
686#else 618#else
@@ -708,7 +640,7 @@ struct tcp_func {
708 640
709 struct sock * (*syn_recv_sock) (struct sock *sk, 641 struct sock * (*syn_recv_sock) (struct sock *sk,
710 struct sk_buff *skb, 642 struct sk_buff *skb,
711 struct open_request *req, 643 struct request_sock *req,
712 struct dst_entry *dst); 644 struct dst_entry *dst);
713 645
714 int (*remember_stamp) (struct sock *sk); 646 int (*remember_stamp) (struct sock *sk);
@@ -852,8 +784,8 @@ extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
852 unsigned len); 784 unsigned len);
853 785
854extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, 786extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
855 struct open_request *req, 787 struct request_sock *req,
856 struct open_request **prev); 788 struct request_sock **prev);
857extern int tcp_child_process(struct sock *parent, 789extern int tcp_child_process(struct sock *parent,
858 struct sock *child, 790 struct sock *child,
859 struct sk_buff *skb); 791 struct sk_buff *skb);
@@ -903,12 +835,12 @@ extern int tcp_v4_conn_request(struct sock *sk,
903 struct sk_buff *skb); 835 struct sk_buff *skb);
904 836
905extern struct sock * tcp_create_openreq_child(struct sock *sk, 837extern struct sock * tcp_create_openreq_child(struct sock *sk,
906 struct open_request *req, 838 struct request_sock *req,
907 struct sk_buff *skb); 839 struct sk_buff *skb);
908 840
909extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, 841extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
910 struct sk_buff *skb, 842 struct sk_buff *skb,
911 struct open_request *req, 843 struct request_sock *req,
912 struct dst_entry *dst); 844 struct dst_entry *dst);
913 845
914extern int tcp_v4_do_rcv(struct sock *sk, 846extern int tcp_v4_do_rcv(struct sock *sk,
@@ -922,7 +854,7 @@ extern int tcp_connect(struct sock *sk);
922 854
923extern struct sk_buff * tcp_make_synack(struct sock *sk, 855extern struct sk_buff * tcp_make_synack(struct sock *sk,
924 struct dst_entry *dst, 856 struct dst_entry *dst,
925 struct open_request *req); 857 struct request_sock *req);
926 858
927extern int tcp_disconnect(struct sock *sk, int flags); 859extern int tcp_disconnect(struct sock *sk, int flags);
928 860
@@ -1750,99 +1682,71 @@ static inline int tcp_full_space(const struct sock *sk)
1750 return tcp_win_from_space(sk->sk_rcvbuf); 1682 return tcp_win_from_space(sk->sk_rcvbuf);
1751} 1683}
1752 1684
1753static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req, 1685static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req,
1754 struct sock *child) 1686 struct sock *child)
1755{ 1687{
1756 struct tcp_sock *tp = tcp_sk(sk); 1688 reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child);
1757
1758 req->sk = child;
1759 sk_acceptq_added(sk);
1760
1761 if (!tp->accept_queue_tail) {
1762 tp->accept_queue = req;
1763 } else {
1764 tp->accept_queue_tail->dl_next = req;
1765 }
1766 tp->accept_queue_tail = req;
1767 req->dl_next = NULL;
1768} 1689}
1769 1690
1770struct tcp_listen_opt
1771{
1772 u8 max_qlen_log; /* log_2 of maximal queued SYNs */
1773 int qlen;
1774 int qlen_young;
1775 int clock_hand;
1776 u32 hash_rnd;
1777 struct open_request *syn_table[TCP_SYNQ_HSIZE];
1778};
1779
1780static inline void 1691static inline void
1781tcp_synq_removed(struct sock *sk, struct open_request *req) 1692tcp_synq_removed(struct sock *sk, struct request_sock *req)
1782{ 1693{
1783 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; 1694 if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0)
1784
1785 if (--lopt->qlen == 0)
1786 tcp_delete_keepalive_timer(sk); 1695 tcp_delete_keepalive_timer(sk);
1787 if (req->retrans == 0)
1788 lopt->qlen_young--;
1789} 1696}
1790 1697
1791static inline void tcp_synq_added(struct sock *sk) 1698static inline void tcp_synq_added(struct sock *sk)
1792{ 1699{
1793 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; 1700 if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0)
1794
1795 if (lopt->qlen++ == 0)
1796 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT); 1701 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
1797 lopt->qlen_young++;
1798} 1702}
1799 1703
1800static inline int tcp_synq_len(struct sock *sk) 1704static inline int tcp_synq_len(struct sock *sk)
1801{ 1705{
1802 return tcp_sk(sk)->listen_opt->qlen; 1706 return reqsk_queue_len(&tcp_sk(sk)->accept_queue);
1803} 1707}
1804 1708
1805static inline int tcp_synq_young(struct sock *sk) 1709static inline int tcp_synq_young(struct sock *sk)
1806{ 1710{
1807 return tcp_sk(sk)->listen_opt->qlen_young; 1711 return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue);
1808} 1712}
1809 1713
1810static inline int tcp_synq_is_full(struct sock *sk) 1714static inline int tcp_synq_is_full(struct sock *sk)
1811{ 1715{
1812 return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log; 1716 return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue);
1813} 1717}
1814 1718
1815static inline void tcp_synq_unlink(struct tcp_sock *tp, struct open_request *req, 1719static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req,
1816 struct open_request **prev) 1720 struct request_sock **prev)
1817{ 1721{
1818 write_lock(&tp->syn_wait_lock); 1722 reqsk_queue_unlink(&tp->accept_queue, req, prev);
1819 *prev = req->dl_next;
1820 write_unlock(&tp->syn_wait_lock);
1821} 1723}
1822 1724
1823static inline void tcp_synq_drop(struct sock *sk, struct open_request *req, 1725static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req,
1824 struct open_request **prev) 1726 struct request_sock **prev)
1825{ 1727{
1826 tcp_synq_unlink(tcp_sk(sk), req, prev); 1728 tcp_synq_unlink(tcp_sk(sk), req, prev);
1827 tcp_synq_removed(sk, req); 1729 tcp_synq_removed(sk, req);
1828 tcp_openreq_free(req); 1730 reqsk_free(req);
1829} 1731}
1830 1732
1831static __inline__ void tcp_openreq_init(struct open_request *req, 1733static __inline__ void tcp_openreq_init(struct request_sock *req,
1832 struct tcp_options_received *rx_opt, 1734 struct tcp_options_received *rx_opt,
1833 struct sk_buff *skb) 1735 struct sk_buff *skb)
1834{ 1736{
1737 struct inet_request_sock *ireq = inet_rsk(req);
1738
1835 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ 1739 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1836 req->rcv_isn = TCP_SKB_CB(skb)->seq; 1740 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1837 req->mss = rx_opt->mss_clamp; 1741 req->mss = rx_opt->mss_clamp;
1838 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; 1742 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1839 req->tstamp_ok = rx_opt->tstamp_ok; 1743 ireq->tstamp_ok = rx_opt->tstamp_ok;
1840 req->sack_ok = rx_opt->sack_ok; 1744 ireq->sack_ok = rx_opt->sack_ok;
1841 req->snd_wscale = rx_opt->snd_wscale; 1745 ireq->snd_wscale = rx_opt->snd_wscale;
1842 req->wscale_ok = rx_opt->wscale_ok; 1746 ireq->wscale_ok = rx_opt->wscale_ok;
1843 req->acked = 0; 1747 ireq->acked = 0;
1844 req->ecn_ok = 0; 1748 ireq->ecn_ok = 0;
1845 req->rmt_port = skb->h.th->source; 1749 ireq->rmt_port = skb->h.th->source;
1846} 1750}
1847 1751
1848extern void tcp_enter_memory_pressure(void); 1752extern void tcp_enter_memory_pressure(void);
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h
index dc1456389a97..64980ee8c92a 100644
--- a/include/net/tcp_ecn.h
+++ b/include/net/tcp_ecn.h
@@ -2,6 +2,7 @@
2#define _NET_TCP_ECN_H_ 1 2#define _NET_TCP_ECN_H_ 1
3 3
4#include <net/inet_ecn.h> 4#include <net/inet_ecn.h>
5#include <net/request_sock.h>
5 6
6#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) 7#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
7 8
@@ -38,9 +39,9 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct tcp_sock *tp,
38} 39}
39 40
40static __inline__ void 41static __inline__ void
41TCP_ECN_make_synack(struct open_request *req, struct tcphdr *th) 42TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
42{ 43{
43 if (req->ecn_ok) 44 if (inet_rsk(req)->ecn_ok)
44 th->ece = 1; 45 th->ece = 1;
45} 46}
46 47
@@ -111,16 +112,16 @@ static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
111} 112}
112 113
113static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, 114static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
114 struct open_request *req) 115 struct request_sock *req)
115{ 116{
116 tp->ecn_flags = req->ecn_ok ? TCP_ECN_OK : 0; 117 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
117} 118}
118 119
119static __inline__ void 120static __inline__ void
120TCP_ECN_create_request(struct open_request *req, struct tcphdr *th) 121TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
121{ 122{
122 if (sysctl_tcp_ecn && th->ece && th->cwr) 123 if (sysctl_tcp_ecn && th->ece && th->cwr)
123 req->ecn_ok = 1; 124 inet_rsk(req)->ecn_ok = 1;
124} 125}
125 126
126#endif 127#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d675836ba6c3..0e65e02b7a1d 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -158,6 +158,20 @@ enum {
158 XFRM_STATE_DEAD 158 XFRM_STATE_DEAD
159}; 159};
160 160
161/* callback structure passed from either netlink or pfkey */
162struct km_event
163{
164 union {
165 u32 hard;
166 u32 proto;
167 u32 byid;
168 } data;
169
170 u32 seq;
171 u32 pid;
172 u32 event;
173};
174
161struct xfrm_type; 175struct xfrm_type;
162struct xfrm_dst; 176struct xfrm_dst;
163struct xfrm_policy_afinfo { 177struct xfrm_policy_afinfo {
@@ -179,6 +193,8 @@ struct xfrm_policy_afinfo {
179 193
180extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo); 194extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
181extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); 195extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
196extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
197extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
182 198
183#define XFRM_ACQ_EXPIRES 30 199#define XFRM_ACQ_EXPIRES 30
184 200
@@ -290,11 +306,11 @@ struct xfrm_mgr
290{ 306{
291 struct list_head list; 307 struct list_head list;
292 char *id; 308 char *id;
293 int (*notify)(struct xfrm_state *x, int event); 309 int (*notify)(struct xfrm_state *x, struct km_event *c);
294 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir); 310 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
295 struct xfrm_policy *(*compile_policy)(u16 family, int opt, u8 *data, int len, int *dir); 311 struct xfrm_policy *(*compile_policy)(u16 family, int opt, u8 *data, int len, int *dir);
296 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport); 312 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport);
297 int (*notify_policy)(struct xfrm_policy *x, int dir, int event); 313 int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
298}; 314};
299 315
300extern int xfrm_register_km(struct xfrm_mgr *km); 316extern int xfrm_register_km(struct xfrm_mgr *km);
@@ -656,7 +672,7 @@ static inline int xfrm_sk_clone_policy(struct sock *sk)
656 return 0; 672 return 0;
657} 673}
658 674
659extern void xfrm_policy_delete(struct xfrm_policy *pol, int dir); 675extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
660 676
661static inline void xfrm_sk_free_policy(struct sock *sk) 677static inline void xfrm_sk_free_policy(struct sock *sk)
662{ 678{
@@ -817,7 +833,7 @@ extern int xfrm_state_add(struct xfrm_state *x);
817extern int xfrm_state_update(struct xfrm_state *x); 833extern int xfrm_state_update(struct xfrm_state *x);
818extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family); 834extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family);
819extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq); 835extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
820extern void xfrm_state_delete(struct xfrm_state *x); 836extern int xfrm_state_delete(struct xfrm_state *x);
821extern void xfrm_state_flush(u8 proto); 837extern void xfrm_state_flush(u8 proto);
822extern int xfrm_replay_check(struct xfrm_state *x, u32 seq); 838extern int xfrm_replay_check(struct xfrm_state *x, u32 seq);
823extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq); 839extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq);
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 659ecf48fb4a..1fb233741513 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -41,6 +41,7 @@ extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE];
41#define FORMAT_UNIT 0x04 41#define FORMAT_UNIT 0x04
42#define READ_BLOCK_LIMITS 0x05 42#define READ_BLOCK_LIMITS 0x05
43#define REASSIGN_BLOCKS 0x07 43#define REASSIGN_BLOCKS 0x07
44#define INITIALIZE_ELEMENT_STATUS 0x07
44#define READ_6 0x08 45#define READ_6 0x08
45#define WRITE_6 0x0a 46#define WRITE_6 0x0a
46#define SEEK_6 0x0b 47#define SEEK_6 0x0b
@@ -65,6 +66,7 @@ extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE];
65#define READ_10 0x28 66#define READ_10 0x28
66#define WRITE_10 0x2a 67#define WRITE_10 0x2a
67#define SEEK_10 0x2b 68#define SEEK_10 0x2b
69#define POSITION_TO_ELEMENT 0x2b
68#define WRITE_VERIFY 0x2e 70#define WRITE_VERIFY 0x2e
69#define VERIFY 0x2f 71#define VERIFY 0x2f
70#define SEARCH_HIGH 0x30 72#define SEARCH_HIGH 0x30
@@ -97,6 +99,7 @@ extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE];
97#define PERSISTENT_RESERVE_OUT 0x5f 99#define PERSISTENT_RESERVE_OUT 0x5f
98#define REPORT_LUNS 0xa0 100#define REPORT_LUNS 0xa0
99#define MOVE_MEDIUM 0xa5 101#define MOVE_MEDIUM 0xa5
102#define EXCHANGE_MEDIUM 0xa6
100#define READ_12 0xa8 103#define READ_12 0xa8
101#define WRITE_12 0xaa 104#define WRITE_12 0xaa
102#define WRITE_VERIFY_12 0xae 105#define WRITE_VERIFY_12 0xae
@@ -210,6 +213,7 @@ static inline int scsi_status_is_good(int status)
210#define TYPE_COMM 0x09 /* Communications device */ 213#define TYPE_COMM 0x09 /* Communications device */
211#define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */ 214#define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */
212#define TYPE_RAID 0x0c 215#define TYPE_RAID 0x0c
216#define TYPE_RBC 0x0e
213#define TYPE_NO_LUN 0x7f 217#define TYPE_NO_LUN 0x7f
214 218
215/* 219/*
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index c018020d9160..63c91dd85ca1 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -154,7 +154,9 @@ struct scsi_target {
154 unsigned int id; /* target id ... replace 154 unsigned int id; /* target id ... replace
155 * scsi_device.id eventually */ 155 * scsi_device.id eventually */
156 unsigned long create:1; /* signal that it needs to be added */ 156 unsigned long create:1; /* signal that it needs to be added */
157 unsigned long starget_data[0]; 157 void *hostdata; /* available to low-level driver */
158 unsigned long starget_data[0]; /* for the transport */
159 /* starget_data must be the last element!!!! */
158} __attribute__((aligned(sizeof(unsigned long)))); 160} __attribute__((aligned(sizeof(unsigned long))));
159 161
160#define to_scsi_target(d) container_of(d, struct scsi_target, dev) 162#define to_scsi_target(d) container_of(d, struct scsi_target, dev)
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 1cee1e100943..db9914adeac9 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -10,6 +10,7 @@ struct block_device;
10struct module; 10struct module;
11struct scsi_cmnd; 11struct scsi_cmnd;
12struct scsi_device; 12struct scsi_device;
13struct scsi_target;
13struct Scsi_Host; 14struct Scsi_Host;
14struct scsi_host_cmd_pool; 15struct scsi_host_cmd_pool;
15struct scsi_transport_template; 16struct scsi_transport_template;
@@ -228,6 +229,30 @@ struct scsi_host_template {
228 void (* slave_destroy)(struct scsi_device *); 229 void (* slave_destroy)(struct scsi_device *);
229 230
230 /* 231 /*
232 * Before the mid layer attempts to scan for a new device attached
233 * to a target where no target currently exists, it will call this
234 * entry in your driver. Should your driver need to allocate any
235 * structs or perform any other init items in order to send commands
236 * to a currently unused target, then this is where you can perform
237 * those allocations.
238 *
239 * Return values: 0 on success, non-0 on failure
240 *
241 * Status: OPTIONAL
242 */
243 int (* target_alloc)(struct scsi_target *);
244
245 /*
246 * Immediately prior to deallocating the target structure, and
247 * after all activity to attached scsi devices has ceased, the
248 * midlayer calls this point so that the driver may deallocate
249 * and terminate any references to the target.
250 *
251 * Status: OPTIONAL
252 */
253 void (* target_destroy)(struct scsi_target *);
254
255 /*
231 * fill in this function to allow the queue depth of this host 256 * fill in this function to allow the queue depth of this host
232 * to be changeable (on a per device basis). returns either 257 * to be changeable (on a per device basis). returns either
233 * the current queue depth setting (may be different from what 258 * the current queue depth setting (may be different from what
diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h
index 2dcee7a84752..a4f1837a33b1 100644
--- a/include/scsi/scsi_transport.h
+++ b/include/scsi/scsi_transport.h
@@ -21,6 +21,7 @@
21#define SCSI_TRANSPORT_H 21#define SCSI_TRANSPORT_H
22 22
23#include <linux/transport_class.h> 23#include <linux/transport_class.h>
24#include <scsi/scsi_host.h>
24 25
25struct scsi_transport_template { 26struct scsi_transport_template {
26 /* the attribute containers */ 27 /* the attribute containers */
@@ -32,8 +33,11 @@ struct scsi_transport_template {
32 * space of this size will be left at the end of the 33 * space of this size will be left at the end of the
33 * scsi_* structure */ 34 * scsi_* structure */
34 int device_size; 35 int device_size;
36 int device_private_offset;
35 int target_size; 37 int target_size;
38 int target_private_offset;
36 int host_size; 39 int host_size;
40 /* no private offset for the host; there's an alternative mechanism */
37 41
38 /* 42 /*
39 * True if the transport wants to use a host-based work-queue 43 * True if the transport wants to use a host-based work-queue
@@ -45,4 +49,38 @@ struct scsi_transport_template {
45 dev_to_shost((tc)->dev) 49 dev_to_shost((tc)->dev)
46 50
47 51
52/* Private area maintenance. The driver requested allocations come
53 * directly after the transport class allocations (if any). The idea
54 * is that you *must* call these only once. The code assumes that the
55 * initial values are the ones the transport specific code requires */
56static inline void
57scsi_transport_reserve_target(struct scsi_transport_template * t, int space)
58{
59 BUG_ON(t->target_private_offset != 0);
60 t->target_private_offset = ALIGN(t->target_size, sizeof(void *));
61 t->target_size = t->target_private_offset + space;
62}
63static inline void
64scsi_transport_reserve_device(struct scsi_transport_template * t, int space)
65{
66 BUG_ON(t->device_private_offset != 0);
67 t->device_private_offset = ALIGN(t->device_size, sizeof(void *));
68 t->device_size = t->device_private_offset + space;
69}
70static inline void *
71scsi_transport_target_data(struct scsi_target *starget)
72{
73 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
74 return (u8 *)starget->starget_data
75 + shost->transportt->target_private_offset;
76
77}
78static inline void *
79scsi_transport_device_data(struct scsi_device *sdev)
80{
81 struct Scsi_Host *shost = sdev->host;
82 return (u8 *)sdev->sdev_data
83 + shost->transportt->device_private_offset;
84}
85
48#endif /* SCSI_TRANSPORT_H */ 86#endif /* SCSI_TRANSPORT_H */