diff options
Diffstat (limited to 'net')
62 files changed, 4629 insertions, 2498 deletions
diff --git a/net/802/tr.c b/net/802/tr.c index 1eaa3d19d8bf..afd8385c0c9c 100644 --- a/net/802/tr.c +++ b/net/802/tr.c | |||
@@ -340,9 +340,10 @@ static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) | |||
340 | unsigned int hash, rii_p = 0; | 340 | unsigned int hash, rii_p = 0; |
341 | unsigned long flags; | 341 | unsigned long flags; |
342 | struct rif_cache *entry; | 342 | struct rif_cache *entry; |
343 | 343 | unsigned char saddr0; | |
344 | 344 | ||
345 | spin_lock_irqsave(&rif_lock, flags); | 345 | spin_lock_irqsave(&rif_lock, flags); |
346 | saddr0 = trh->saddr[0]; | ||
346 | 347 | ||
347 | /* | 348 | /* |
348 | * Firstly see if the entry exists | 349 | * Firstly see if the entry exists |
@@ -395,7 +396,6 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n", | |||
395 | entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK); | 396 | entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK); |
396 | memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short)); | 397 | memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short)); |
397 | entry->local_ring = 0; | 398 | entry->local_ring = 0; |
398 | trh->saddr[0]|=TR_RII; /* put the routing indicator back for tcpdump */ | ||
399 | } | 399 | } |
400 | else | 400 | else |
401 | { | 401 | { |
@@ -422,6 +422,7 @@ printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n", | |||
422 | } | 422 | } |
423 | entry->last_used=jiffies; | 423 | entry->last_used=jiffies; |
424 | } | 424 | } |
425 | trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */ | ||
425 | spin_unlock_irqrestore(&rif_lock, flags); | 426 | spin_unlock_irqrestore(&rif_lock, flags); |
426 | } | 427 | } |
427 | 428 | ||
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 4128fc76ac3a..e68700f950a5 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -175,39 +175,10 @@ static void pneigh_queue_purge(struct sk_buff_head *list) | |||
175 | } | 175 | } |
176 | } | 176 | } |
177 | 177 | ||
178 | void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) | 178 | static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev) |
179 | { | 179 | { |
180 | int i; | 180 | int i; |
181 | 181 | ||
182 | write_lock_bh(&tbl->lock); | ||
183 | |||
184 | for (i=0; i <= tbl->hash_mask; i++) { | ||
185 | struct neighbour *n, **np; | ||
186 | |||
187 | np = &tbl->hash_buckets[i]; | ||
188 | while ((n = *np) != NULL) { | ||
189 | if (dev && n->dev != dev) { | ||
190 | np = &n->next; | ||
191 | continue; | ||
192 | } | ||
193 | *np = n->next; | ||
194 | write_lock_bh(&n->lock); | ||
195 | n->dead = 1; | ||
196 | neigh_del_timer(n); | ||
197 | write_unlock_bh(&n->lock); | ||
198 | neigh_release(n); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | write_unlock_bh(&tbl->lock); | ||
203 | } | ||
204 | |||
205 | int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) | ||
206 | { | ||
207 | int i; | ||
208 | |||
209 | write_lock_bh(&tbl->lock); | ||
210 | |||
211 | for (i = 0; i <= tbl->hash_mask; i++) { | 182 | for (i = 0; i <= tbl->hash_mask; i++) { |
212 | struct neighbour *n, **np = &tbl->hash_buckets[i]; | 183 | struct neighbour *n, **np = &tbl->hash_buckets[i]; |
213 | 184 | ||
@@ -243,7 +214,19 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) | |||
243 | neigh_release(n); | 214 | neigh_release(n); |
244 | } | 215 | } |
245 | } | 216 | } |
217 | } | ||
218 | |||
219 | void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) | ||
220 | { | ||
221 | write_lock_bh(&tbl->lock); | ||
222 | neigh_flush_dev(tbl, dev); | ||
223 | write_unlock_bh(&tbl->lock); | ||
224 | } | ||
246 | 225 | ||
226 | int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) | ||
227 | { | ||
228 | write_lock_bh(&tbl->lock); | ||
229 | neigh_flush_dev(tbl, dev); | ||
247 | pneigh_ifdown(tbl, dev); | 230 | pneigh_ifdown(tbl, dev); |
248 | write_unlock_bh(&tbl->lock); | 231 | write_unlock_bh(&tbl->lock); |
249 | 232 | ||
@@ -732,6 +715,7 @@ static inline void neigh_add_timer(struct neighbour *n, unsigned long when) | |||
732 | if (unlikely(mod_timer(&n->timer, when))) { | 715 | if (unlikely(mod_timer(&n->timer, when))) { |
733 | printk("NEIGH: BUG, double timer add, state is %x\n", | 716 | printk("NEIGH: BUG, double timer add, state is %x\n", |
734 | n->nud_state); | 717 | n->nud_state); |
718 | dump_stack(); | ||
735 | } | 719 | } |
736 | } | 720 | } |
737 | 721 | ||
@@ -815,10 +799,10 @@ static void neigh_timer_handler(unsigned long arg) | |||
815 | } | 799 | } |
816 | 800 | ||
817 | if (neigh->nud_state & NUD_IN_TIMER) { | 801 | if (neigh->nud_state & NUD_IN_TIMER) { |
818 | neigh_hold(neigh); | ||
819 | if (time_before(next, jiffies + HZ/2)) | 802 | if (time_before(next, jiffies + HZ/2)) |
820 | next = jiffies + HZ/2; | 803 | next = jiffies + HZ/2; |
821 | neigh_add_timer(neigh, next); | 804 | if (!mod_timer(&neigh->timer, next)) |
805 | neigh_hold(neigh); | ||
822 | } | 806 | } |
823 | if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { | 807 | if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { |
824 | struct sk_buff *skb = skb_peek(&neigh->arp_queue); | 808 | struct sk_buff *skb = skb_peek(&neigh->arp_queue); |
@@ -1641,12 +1625,9 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb, | |||
1641 | 1625 | ||
1642 | memset(&ndst, 0, sizeof(ndst)); | 1626 | memset(&ndst, 0, sizeof(ndst)); |
1643 | 1627 | ||
1644 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 1628 | for_each_cpu(cpu) { |
1645 | struct neigh_statistics *st; | 1629 | struct neigh_statistics *st; |
1646 | 1630 | ||
1647 | if (!cpu_possible(cpu)) | ||
1648 | continue; | ||
1649 | |||
1650 | st = per_cpu_ptr(tbl->stats, cpu); | 1631 | st = per_cpu_ptr(tbl->stats, cpu); |
1651 | ndst.ndts_allocs += st->allocs; | 1632 | ndst.ndts_allocs += st->allocs; |
1652 | ndst.ndts_destroys += st->destroys; | 1633 | ndst.ndts_destroys += st->destroys; |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 5f043d346694..7fc3e9e28c34 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -75,7 +75,7 @@ | |||
75 | * By design there should only be *one* "controlling" process. In practice | 75 | * By design there should only be *one* "controlling" process. In practice |
76 | * multiple write accesses gives unpredictable result. Understood by "write" | 76 | * multiple write accesses gives unpredictable result. Understood by "write" |
77 | * to /proc gives result code thats should be read be the "writer". | 77 | * to /proc gives result code thats should be read be the "writer". |
78 | * For pratical use this should be no problem. | 78 | * For practical use this should be no problem. |
79 | * | 79 | * |
80 | * Note when adding devices to a specific CPU there good idea to also assign | 80 | * Note when adding devices to a specific CPU there good idea to also assign |
81 | * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU. | 81 | * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU. |
@@ -96,7 +96,7 @@ | |||
96 | * New xmit() return, do_div and misc clean up by Stephen Hemminger | 96 | * New xmit() return, do_div and misc clean up by Stephen Hemminger |
97 | * <shemminger@osdl.org> 040923 | 97 | * <shemminger@osdl.org> 040923 |
98 | * | 98 | * |
99 | * Rany Dunlap fixed u64 printk compiler waring | 99 | * Randy Dunlap fixed u64 printk compiler waring |
100 | * | 100 | * |
101 | * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org> | 101 | * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org> |
102 | * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213 | 102 | * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213 |
@@ -137,6 +137,7 @@ | |||
137 | #include <linux/ipv6.h> | 137 | #include <linux/ipv6.h> |
138 | #include <linux/udp.h> | 138 | #include <linux/udp.h> |
139 | #include <linux/proc_fs.h> | 139 | #include <linux/proc_fs.h> |
140 | #include <linux/seq_file.h> | ||
140 | #include <linux/wait.h> | 141 | #include <linux/wait.h> |
141 | #include <net/checksum.h> | 142 | #include <net/checksum.h> |
142 | #include <net/ipv6.h> | 143 | #include <net/ipv6.h> |
@@ -151,7 +152,7 @@ | |||
151 | #include <asm/timex.h> | 152 | #include <asm/timex.h> |
152 | 153 | ||
153 | 154 | ||
154 | #define VERSION "pktgen v2.62: Packet Generator for packet performance testing.\n" | 155 | #define VERSION "pktgen v2.63: Packet Generator for packet performance testing.\n" |
155 | 156 | ||
156 | /* #define PG_DEBUG(a) a */ | 157 | /* #define PG_DEBUG(a) a */ |
157 | #define PG_DEBUG(a) | 158 | #define PG_DEBUG(a) |
@@ -177,8 +178,8 @@ | |||
177 | #define T_REMDEV (1<<3) /* Remove all devs */ | 178 | #define T_REMDEV (1<<3) /* Remove all devs */ |
178 | 179 | ||
179 | /* Locks */ | 180 | /* Locks */ |
180 | #define thread_lock() spin_lock(&_thread_lock) | 181 | #define thread_lock() down(&pktgen_sem) |
181 | #define thread_unlock() spin_unlock(&_thread_lock) | 182 | #define thread_unlock() up(&pktgen_sem) |
182 | 183 | ||
183 | /* If lock -- can be removed after some work */ | 184 | /* If lock -- can be removed after some work */ |
184 | #define if_lock(t) spin_lock(&(t->if_lock)); | 185 | #define if_lock(t) spin_lock(&(t->if_lock)); |
@@ -186,7 +187,9 @@ | |||
186 | 187 | ||
187 | /* Used to help with determining the pkts on receive */ | 188 | /* Used to help with determining the pkts on receive */ |
188 | #define PKTGEN_MAGIC 0xbe9be955 | 189 | #define PKTGEN_MAGIC 0xbe9be955 |
189 | #define PG_PROC_DIR "net/pktgen" | 190 | #define PG_PROC_DIR "pktgen" |
191 | #define PGCTRL "pgctrl" | ||
192 | static struct proc_dir_entry *pg_proc_dir = NULL; | ||
190 | 193 | ||
191 | #define MAX_CFLOWS 65536 | 194 | #define MAX_CFLOWS 65536 |
192 | 195 | ||
@@ -202,11 +205,8 @@ struct pktgen_dev { | |||
202 | * Try to keep frequent/infrequent used vars. separated. | 205 | * Try to keep frequent/infrequent used vars. separated. |
203 | */ | 206 | */ |
204 | 207 | ||
205 | char ifname[32]; | 208 | char ifname[IFNAMSIZ]; |
206 | struct proc_dir_entry *proc_ent; | ||
207 | char result[512]; | 209 | char result[512]; |
208 | /* proc file names */ | ||
209 | char fname[80]; | ||
210 | 210 | ||
211 | struct pktgen_thread* pg_thread; /* the owner */ | 211 | struct pktgen_thread* pg_thread; /* the owner */ |
212 | struct pktgen_dev *next; /* Used for chaining in the thread's run-queue */ | 212 | struct pktgen_dev *next; /* Used for chaining in the thread's run-queue */ |
@@ -244,7 +244,7 @@ struct pktgen_dev { | |||
244 | __u32 seq_num; | 244 | __u32 seq_num; |
245 | 245 | ||
246 | int clone_skb; /* Use multiple SKBs during packet gen. If this number | 246 | int clone_skb; /* Use multiple SKBs during packet gen. If this number |
247 | * is greater than 1, then that many coppies of the same | 247 | * is greater than 1, then that many copies of the same |
248 | * packet will be sent before a new packet is allocated. | 248 | * packet will be sent before a new packet is allocated. |
249 | * For instance, if you want to send 1024 identical packets | 249 | * For instance, if you want to send 1024 identical packets |
250 | * before creating a new packet, set clone_skb to 1024. | 250 | * before creating a new packet, set clone_skb to 1024. |
@@ -330,8 +330,6 @@ struct pktgen_thread { | |||
330 | struct pktgen_dev *if_list; /* All device here */ | 330 | struct pktgen_dev *if_list; /* All device here */ |
331 | struct pktgen_thread* next; | 331 | struct pktgen_thread* next; |
332 | char name[32]; | 332 | char name[32]; |
333 | char fname[128]; /* name of proc file */ | ||
334 | struct proc_dir_entry *proc_ent; | ||
335 | char result[512]; | 333 | char result[512]; |
336 | u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */ | 334 | u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */ |
337 | 335 | ||
@@ -396,7 +394,7 @@ static inline s64 divremdi3(s64 x, s64 y, int type) | |||
396 | 394 | ||
397 | /* End of hacks to deal with 64-bit math on x86 */ | 395 | /* End of hacks to deal with 64-bit math on x86 */ |
398 | 396 | ||
399 | /** Convert to miliseconds */ | 397 | /** Convert to milliseconds */ |
400 | static inline __u64 tv_to_ms(const struct timeval* tv) | 398 | static inline __u64 tv_to_ms(const struct timeval* tv) |
401 | { | 399 | { |
402 | __u64 ms = tv->tv_usec / 1000; | 400 | __u64 ms = tv->tv_usec / 1000; |
@@ -425,7 +423,7 @@ static inline __u64 pg_div64(__u64 n, __u64 base) | |||
425 | { | 423 | { |
426 | __u64 tmp = n; | 424 | __u64 tmp = n; |
427 | /* | 425 | /* |
428 | * How do we know if the architectrure we are running on | 426 | * How do we know if the architecture we are running on |
429 | * supports division with 64 bit base? | 427 | * supports division with 64 bit base? |
430 | * | 428 | * |
431 | */ | 429 | */ |
@@ -473,16 +471,6 @@ static inline __u64 tv_diff(const struct timeval* a, const struct timeval* b) | |||
473 | 471 | ||
474 | static char version[] __initdata = VERSION; | 472 | static char version[] __initdata = VERSION; |
475 | 473 | ||
476 | static ssize_t proc_pgctrl_read(struct file* file, char __user * buf, size_t count, loff_t *ppos); | ||
477 | static ssize_t proc_pgctrl_write(struct file* file, const char __user * buf, size_t count, loff_t *ppos); | ||
478 | static int proc_if_read(char *buf , char **start, off_t offset, int len, int *eof, void *data); | ||
479 | |||
480 | static int proc_thread_read(char *buf , char **start, off_t offset, int len, int *eof, void *data); | ||
481 | static int proc_if_write(struct file *file, const char __user *user_buffer, unsigned long count, void *data); | ||
482 | static int proc_thread_write(struct file *file, const char __user *user_buffer, unsigned long count, void *data); | ||
483 | static int create_proc_dir(void); | ||
484 | static int remove_proc_dir(void); | ||
485 | |||
486 | static int pktgen_remove_device(struct pktgen_thread* t, struct pktgen_dev *i); | 474 | static int pktgen_remove_device(struct pktgen_thread* t, struct pktgen_dev *i); |
487 | static int pktgen_add_device(struct pktgen_thread* t, const char* ifname); | 475 | static int pktgen_add_device(struct pktgen_thread* t, const char* ifname); |
488 | static struct pktgen_thread* pktgen_find_thread(const char* name); | 476 | static struct pktgen_thread* pktgen_find_thread(const char* name); |
@@ -503,83 +491,41 @@ static int pg_delay_d = 0; | |||
503 | static int pg_clone_skb_d = 0; | 491 | static int pg_clone_skb_d = 0; |
504 | static int debug = 0; | 492 | static int debug = 0; |
505 | 493 | ||
506 | static DEFINE_SPINLOCK(_thread_lock); | 494 | static DECLARE_MUTEX(pktgen_sem); |
507 | static struct pktgen_thread *pktgen_threads = NULL; | 495 | static struct pktgen_thread *pktgen_threads = NULL; |
508 | 496 | ||
509 | static char module_fname[128]; | ||
510 | static struct proc_dir_entry *module_proc_ent = NULL; | ||
511 | |||
512 | static struct notifier_block pktgen_notifier_block = { | 497 | static struct notifier_block pktgen_notifier_block = { |
513 | .notifier_call = pktgen_device_event, | 498 | .notifier_call = pktgen_device_event, |
514 | }; | 499 | }; |
515 | 500 | ||
516 | static struct file_operations pktgen_fops = { | ||
517 | .read = proc_pgctrl_read, | ||
518 | .write = proc_pgctrl_write, | ||
519 | /* .ioctl = pktgen_ioctl, later maybe */ | ||
520 | }; | ||
521 | |||
522 | /* | 501 | /* |
523 | * /proc handling functions | 502 | * /proc handling functions |
524 | * | 503 | * |
525 | */ | 504 | */ |
526 | 505 | ||
527 | static struct proc_dir_entry *pg_proc_dir = NULL; | 506 | static int pgctrl_show(struct seq_file *seq, void *v) |
528 | static int proc_pgctrl_read_eof=0; | ||
529 | |||
530 | static ssize_t proc_pgctrl_read(struct file* file, char __user * buf, | ||
531 | size_t count, loff_t *ppos) | ||
532 | { | 507 | { |
533 | char data[200]; | 508 | seq_puts(seq, VERSION); |
534 | int len = 0; | 509 | return 0; |
535 | |||
536 | if(proc_pgctrl_read_eof) { | ||
537 | proc_pgctrl_read_eof=0; | ||
538 | len = 0; | ||
539 | goto out; | ||
540 | } | ||
541 | |||
542 | sprintf(data, "%s", VERSION); | ||
543 | |||
544 | len = strlen(data); | ||
545 | |||
546 | if(len > count) { | ||
547 | len =-EFAULT; | ||
548 | goto out; | ||
549 | } | ||
550 | |||
551 | if (copy_to_user(buf, data, len)) { | ||
552 | len =-EFAULT; | ||
553 | goto out; | ||
554 | } | ||
555 | |||
556 | *ppos += len; | ||
557 | proc_pgctrl_read_eof=1; /* EOF next call */ | ||
558 | |||
559 | out: | ||
560 | return len; | ||
561 | } | 510 | } |
562 | 511 | ||
563 | static ssize_t proc_pgctrl_write(struct file* file,const char __user * buf, | 512 | static ssize_t pgctrl_write(struct file* file,const char __user * buf, |
564 | size_t count, loff_t *ppos) | 513 | size_t count, loff_t *ppos) |
565 | { | 514 | { |
566 | char *data = NULL; | ||
567 | int err = 0; | 515 | int err = 0; |
516 | char data[128]; | ||
568 | 517 | ||
569 | if (!capable(CAP_NET_ADMIN)){ | 518 | if (!capable(CAP_NET_ADMIN)){ |
570 | err = -EPERM; | 519 | err = -EPERM; |
571 | goto out; | 520 | goto out; |
572 | } | 521 | } |
573 | 522 | ||
574 | data = (void*)vmalloc ((unsigned int)count); | 523 | if (count > sizeof(data)) |
524 | count = sizeof(data); | ||
575 | 525 | ||
576 | if(!data) { | ||
577 | err = -ENOMEM; | ||
578 | goto out; | ||
579 | } | ||
580 | if (copy_from_user(data, buf, count)) { | 526 | if (copy_from_user(data, buf, count)) { |
581 | err =-EFAULT; | 527 | err = -EFAULT; |
582 | goto out_free; | 528 | goto out; |
583 | } | 529 | } |
584 | data[count-1] = 0; /* Make string */ | 530 | data[count-1] = 0; /* Make string */ |
585 | 531 | ||
@@ -594,31 +540,40 @@ static ssize_t proc_pgctrl_write(struct file* file,const char __user * buf, | |||
594 | 540 | ||
595 | err = count; | 541 | err = count; |
596 | 542 | ||
597 | out_free: | ||
598 | vfree (data); | ||
599 | out: | 543 | out: |
600 | return err; | 544 | return err; |
601 | } | 545 | } |
602 | 546 | ||
603 | static int proc_if_read(char *buf , char **start, off_t offset, | 547 | static int pgctrl_open(struct inode *inode, struct file *file) |
604 | int len, int *eof, void *data) | 548 | { |
549 | return single_open(file, pgctrl_show, PDE(inode)->data); | ||
550 | } | ||
551 | |||
552 | static struct file_operations pktgen_fops = { | ||
553 | .owner = THIS_MODULE, | ||
554 | .open = pgctrl_open, | ||
555 | .read = seq_read, | ||
556 | .llseek = seq_lseek, | ||
557 | .write = pgctrl_write, | ||
558 | .release = single_release, | ||
559 | }; | ||
560 | |||
561 | static int pktgen_if_show(struct seq_file *seq, void *v) | ||
605 | { | 562 | { |
606 | char *p; | ||
607 | int i; | 563 | int i; |
608 | struct pktgen_dev *pkt_dev = (struct pktgen_dev*)(data); | 564 | struct pktgen_dev *pkt_dev = seq->private; |
609 | __u64 sa; | 565 | __u64 sa; |
610 | __u64 stopped; | 566 | __u64 stopped; |
611 | __u64 now = getCurUs(); | 567 | __u64 now = getCurUs(); |
612 | 568 | ||
613 | p = buf; | 569 | seq_printf(seq, "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", |
614 | p += sprintf(p, "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", | 570 | (unsigned long long) pkt_dev->count, |
615 | (unsigned long long) pkt_dev->count, | 571 | pkt_dev->min_pkt_size, pkt_dev->max_pkt_size); |
616 | pkt_dev->min_pkt_size, pkt_dev->max_pkt_size); | ||
617 | 572 | ||
618 | p += sprintf(p, " frags: %d delay: %u clone_skb: %d ifname: %s\n", | 573 | seq_printf(seq, " frags: %d delay: %u clone_skb: %d ifname: %s\n", |
619 | pkt_dev->nfrags, 1000*pkt_dev->delay_us+pkt_dev->delay_ns, pkt_dev->clone_skb, pkt_dev->ifname); | 574 | pkt_dev->nfrags, 1000*pkt_dev->delay_us+pkt_dev->delay_ns, pkt_dev->clone_skb, pkt_dev->ifname); |
620 | 575 | ||
621 | p += sprintf(p, " flows: %u flowlen: %u\n", pkt_dev->cflows, pkt_dev->lflow); | 576 | seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, pkt_dev->lflow); |
622 | 577 | ||
623 | 578 | ||
624 | if(pkt_dev->flags & F_IPV6) { | 579 | if(pkt_dev->flags & F_IPV6) { |
@@ -626,19 +581,19 @@ static int proc_if_read(char *buf , char **start, off_t offset, | |||
626 | fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr); | 581 | fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr); |
627 | fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr); | 582 | fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr); |
628 | fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr); | 583 | fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr); |
629 | p += sprintf(p, " saddr: %s min_saddr: %s max_saddr: %s\n", b1, b2, b3); | 584 | seq_printf(seq, " saddr: %s min_saddr: %s max_saddr: %s\n", b1, b2, b3); |
630 | 585 | ||
631 | fmt_ip6(b1, pkt_dev->in6_daddr.s6_addr); | 586 | fmt_ip6(b1, pkt_dev->in6_daddr.s6_addr); |
632 | fmt_ip6(b2, pkt_dev->min_in6_daddr.s6_addr); | 587 | fmt_ip6(b2, pkt_dev->min_in6_daddr.s6_addr); |
633 | fmt_ip6(b3, pkt_dev->max_in6_daddr.s6_addr); | 588 | fmt_ip6(b3, pkt_dev->max_in6_daddr.s6_addr); |
634 | p += sprintf(p, " daddr: %s min_daddr: %s max_daddr: %s\n", b1, b2, b3); | 589 | seq_printf(seq, " daddr: %s min_daddr: %s max_daddr: %s\n", b1, b2, b3); |
635 | 590 | ||
636 | } | 591 | } |
637 | else | 592 | else |
638 | p += sprintf(p, " dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n", | 593 | seq_printf(seq," dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n", |
639 | pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, pkt_dev->src_max); | 594 | pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, pkt_dev->src_max); |
640 | 595 | ||
641 | p += sprintf(p, " src_mac: "); | 596 | seq_puts(seq, " src_mac: "); |
642 | 597 | ||
643 | if ((pkt_dev->src_mac[0] == 0) && | 598 | if ((pkt_dev->src_mac[0] == 0) && |
644 | (pkt_dev->src_mac[1] == 0) && | 599 | (pkt_dev->src_mac[1] == 0) && |
@@ -648,89 +603,89 @@ static int proc_if_read(char *buf , char **start, off_t offset, | |||
648 | (pkt_dev->src_mac[5] == 0)) | 603 | (pkt_dev->src_mac[5] == 0)) |
649 | 604 | ||
650 | for (i = 0; i < 6; i++) | 605 | for (i = 0; i < 6; i++) |
651 | p += sprintf(p, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":"); | 606 | seq_printf(seq, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":"); |
652 | 607 | ||
653 | else | 608 | else |
654 | for (i = 0; i < 6; i++) | 609 | for (i = 0; i < 6; i++) |
655 | p += sprintf(p, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":"); | 610 | seq_printf(seq, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":"); |
656 | 611 | ||
657 | p += sprintf(p, "dst_mac: "); | 612 | seq_printf(seq, "dst_mac: "); |
658 | for (i = 0; i < 6; i++) | 613 | for (i = 0; i < 6; i++) |
659 | p += sprintf(p, "%02X%s", pkt_dev->dst_mac[i], i == 5 ? "\n" : ":"); | 614 | seq_printf(seq, "%02X%s", pkt_dev->dst_mac[i], i == 5 ? "\n" : ":"); |
660 | 615 | ||
661 | p += sprintf(p, " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", | 616 | seq_printf(seq, " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", |
662 | pkt_dev->udp_src_min, pkt_dev->udp_src_max, pkt_dev->udp_dst_min, | 617 | pkt_dev->udp_src_min, pkt_dev->udp_src_max, pkt_dev->udp_dst_min, |
663 | pkt_dev->udp_dst_max); | 618 | pkt_dev->udp_dst_max); |
664 | 619 | ||
665 | p += sprintf(p, " src_mac_count: %d dst_mac_count: %d \n Flags: ", | 620 | seq_printf(seq, " src_mac_count: %d dst_mac_count: %d \n Flags: ", |
666 | pkt_dev->src_mac_count, pkt_dev->dst_mac_count); | 621 | pkt_dev->src_mac_count, pkt_dev->dst_mac_count); |
667 | 622 | ||
668 | 623 | ||
669 | if (pkt_dev->flags & F_IPV6) | 624 | if (pkt_dev->flags & F_IPV6) |
670 | p += sprintf(p, "IPV6 "); | 625 | seq_printf(seq, "IPV6 "); |
671 | 626 | ||
672 | if (pkt_dev->flags & F_IPSRC_RND) | 627 | if (pkt_dev->flags & F_IPSRC_RND) |
673 | p += sprintf(p, "IPSRC_RND "); | 628 | seq_printf(seq, "IPSRC_RND "); |
674 | 629 | ||
675 | if (pkt_dev->flags & F_IPDST_RND) | 630 | if (pkt_dev->flags & F_IPDST_RND) |
676 | p += sprintf(p, "IPDST_RND "); | 631 | seq_printf(seq, "IPDST_RND "); |
677 | 632 | ||
678 | if (pkt_dev->flags & F_TXSIZE_RND) | 633 | if (pkt_dev->flags & F_TXSIZE_RND) |
679 | p += sprintf(p, "TXSIZE_RND "); | 634 | seq_printf(seq, "TXSIZE_RND "); |
680 | 635 | ||
681 | if (pkt_dev->flags & F_UDPSRC_RND) | 636 | if (pkt_dev->flags & F_UDPSRC_RND) |
682 | p += sprintf(p, "UDPSRC_RND "); | 637 | seq_printf(seq, "UDPSRC_RND "); |
683 | 638 | ||
684 | if (pkt_dev->flags & F_UDPDST_RND) | 639 | if (pkt_dev->flags & F_UDPDST_RND) |
685 | p += sprintf(p, "UDPDST_RND "); | 640 | seq_printf(seq, "UDPDST_RND "); |
686 | 641 | ||
687 | if (pkt_dev->flags & F_MACSRC_RND) | 642 | if (pkt_dev->flags & F_MACSRC_RND) |
688 | p += sprintf(p, "MACSRC_RND "); | 643 | seq_printf(seq, "MACSRC_RND "); |
689 | 644 | ||
690 | if (pkt_dev->flags & F_MACDST_RND) | 645 | if (pkt_dev->flags & F_MACDST_RND) |
691 | p += sprintf(p, "MACDST_RND "); | 646 | seq_printf(seq, "MACDST_RND "); |
692 | 647 | ||
693 | 648 | ||
694 | p += sprintf(p, "\n"); | 649 | seq_puts(seq, "\n"); |
695 | 650 | ||
696 | sa = pkt_dev->started_at; | 651 | sa = pkt_dev->started_at; |
697 | stopped = pkt_dev->stopped_at; | 652 | stopped = pkt_dev->stopped_at; |
698 | if (pkt_dev->running) | 653 | if (pkt_dev->running) |
699 | stopped = now; /* not really stopped, more like last-running-at */ | 654 | stopped = now; /* not really stopped, more like last-running-at */ |
700 | 655 | ||
701 | p += sprintf(p, "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n", | 656 | seq_printf(seq, "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n", |
702 | (unsigned long long) pkt_dev->sofar, | 657 | (unsigned long long) pkt_dev->sofar, |
703 | (unsigned long long) pkt_dev->errors, | 658 | (unsigned long long) pkt_dev->errors, |
704 | (unsigned long long) sa, | 659 | (unsigned long long) sa, |
705 | (unsigned long long) stopped, | 660 | (unsigned long long) stopped, |
706 | (unsigned long long) pkt_dev->idle_acc); | 661 | (unsigned long long) pkt_dev->idle_acc); |
707 | 662 | ||
708 | p += sprintf(p, " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", | 663 | seq_printf(seq, " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", |
709 | pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, pkt_dev->cur_src_mac_offset); | 664 | pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, |
665 | pkt_dev->cur_src_mac_offset); | ||
710 | 666 | ||
711 | if(pkt_dev->flags & F_IPV6) { | 667 | if(pkt_dev->flags & F_IPV6) { |
712 | char b1[128], b2[128]; | 668 | char b1[128], b2[128]; |
713 | fmt_ip6(b1, pkt_dev->cur_in6_daddr.s6_addr); | 669 | fmt_ip6(b1, pkt_dev->cur_in6_daddr.s6_addr); |
714 | fmt_ip6(b2, pkt_dev->cur_in6_saddr.s6_addr); | 670 | fmt_ip6(b2, pkt_dev->cur_in6_saddr.s6_addr); |
715 | p += sprintf(p, " cur_saddr: %s cur_daddr: %s\n", b2, b1); | 671 | seq_printf(seq, " cur_saddr: %s cur_daddr: %s\n", b2, b1); |
716 | } | 672 | } |
717 | else | 673 | else |
718 | p += sprintf(p, " cur_saddr: 0x%x cur_daddr: 0x%x\n", | 674 | seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n", |
719 | pkt_dev->cur_saddr, pkt_dev->cur_daddr); | 675 | pkt_dev->cur_saddr, pkt_dev->cur_daddr); |
720 | 676 | ||
721 | 677 | ||
722 | p += sprintf(p, " cur_udp_dst: %d cur_udp_src: %d\n", | 678 | seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n", |
723 | pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); | 679 | pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); |
724 | 680 | ||
725 | p += sprintf(p, " flows: %u\n", pkt_dev->nflows); | 681 | seq_printf(seq, " flows: %u\n", pkt_dev->nflows); |
726 | 682 | ||
727 | if (pkt_dev->result[0]) | 683 | if (pkt_dev->result[0]) |
728 | p += sprintf(p, "Result: %s\n", pkt_dev->result); | 684 | seq_printf(seq, "Result: %s\n", pkt_dev->result); |
729 | else | 685 | else |
730 | p += sprintf(p, "Result: Idle\n"); | 686 | seq_printf(seq, "Result: Idle\n"); |
731 | *eof = 1; | ||
732 | 687 | ||
733 | return p - buf; | 688 | return 0; |
734 | } | 689 | } |
735 | 690 | ||
736 | 691 | ||
@@ -802,13 +757,14 @@ done_str: | |||
802 | return i; | 757 | return i; |
803 | } | 758 | } |
804 | 759 | ||
805 | static int proc_if_write(struct file *file, const char __user *user_buffer, | 760 | static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer, |
806 | unsigned long count, void *data) | 761 | size_t count, loff_t *offset) |
807 | { | 762 | { |
763 | struct seq_file *seq = (struct seq_file *) file->private_data; | ||
764 | struct pktgen_dev *pkt_dev = seq->private; | ||
808 | int i = 0, max, len; | 765 | int i = 0, max, len; |
809 | char name[16], valstr[32]; | 766 | char name[16], valstr[32]; |
810 | unsigned long value = 0; | 767 | unsigned long value = 0; |
811 | struct pktgen_dev *pkt_dev = (struct pktgen_dev*)(data); | ||
812 | char* pg_result = NULL; | 768 | char* pg_result = NULL; |
813 | int tmp = 0; | 769 | int tmp = 0; |
814 | char buf[128]; | 770 | char buf[128]; |
@@ -849,7 +805,8 @@ static int proc_if_write(struct file *file, const char __user *user_buffer, | |||
849 | if (copy_from_user(tb, user_buffer, count)) | 805 | if (copy_from_user(tb, user_buffer, count)) |
850 | return -EFAULT; | 806 | return -EFAULT; |
851 | tb[count] = 0; | 807 | tb[count] = 0; |
852 | printk("pktgen: %s,%lu buffer -:%s:-\n", name, count, tb); | 808 | printk("pktgen: %s,%lu buffer -:%s:-\n", name, |
809 | (unsigned long) count, tb); | ||
853 | } | 810 | } |
854 | 811 | ||
855 | if (!strcmp(name, "min_pkt_size")) { | 812 | if (!strcmp(name, "min_pkt_size")) { |
@@ -1335,92 +1292,98 @@ static int proc_if_write(struct file *file, const char __user *user_buffer, | |||
1335 | return -EINVAL; | 1292 | return -EINVAL; |
1336 | } | 1293 | } |
1337 | 1294 | ||
1338 | static int proc_thread_read(char *buf , char **start, off_t offset, | 1295 | static int pktgen_if_open(struct inode *inode, struct file *file) |
1339 | int len, int *eof, void *data) | ||
1340 | { | 1296 | { |
1341 | char *p; | 1297 | return single_open(file, pktgen_if_show, PDE(inode)->data); |
1342 | struct pktgen_thread *t = (struct pktgen_thread*)(data); | 1298 | } |
1343 | struct pktgen_dev *pkt_dev = NULL; | ||
1344 | 1299 | ||
1300 | static struct file_operations pktgen_if_fops = { | ||
1301 | .owner = THIS_MODULE, | ||
1302 | .open = pktgen_if_open, | ||
1303 | .read = seq_read, | ||
1304 | .llseek = seq_lseek, | ||
1305 | .write = pktgen_if_write, | ||
1306 | .release = single_release, | ||
1307 | }; | ||
1345 | 1308 | ||
1346 | if (!t) { | 1309 | static int pktgen_thread_show(struct seq_file *seq, void *v) |
1347 | printk("pktgen: ERROR: could not find thread in proc_thread_read\n"); | 1310 | { |
1348 | return -EINVAL; | 1311 | struct pktgen_thread *t = seq->private; |
1349 | } | 1312 | struct pktgen_dev *pkt_dev = NULL; |
1313 | |||
1314 | BUG_ON(!t); | ||
1350 | 1315 | ||
1351 | p = buf; | 1316 | seq_printf(seq, "Name: %s max_before_softirq: %d\n", |
1352 | p += sprintf(p, "Name: %s max_before_softirq: %d\n", | ||
1353 | t->name, t->max_before_softirq); | 1317 | t->name, t->max_before_softirq); |
1354 | 1318 | ||
1355 | p += sprintf(p, "Running: "); | 1319 | seq_printf(seq, "Running: "); |
1356 | 1320 | ||
1357 | if_lock(t); | 1321 | if_lock(t); |
1358 | for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next) | 1322 | for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next) |
1359 | if(pkt_dev->running) | 1323 | if(pkt_dev->running) |
1360 | p += sprintf(p, "%s ", pkt_dev->ifname); | 1324 | seq_printf(seq, "%s ", pkt_dev->ifname); |
1361 | 1325 | ||
1362 | p += sprintf(p, "\nStopped: "); | 1326 | seq_printf(seq, "\nStopped: "); |
1363 | 1327 | ||
1364 | for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next) | 1328 | for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next) |
1365 | if(!pkt_dev->running) | 1329 | if(!pkt_dev->running) |
1366 | p += sprintf(p, "%s ", pkt_dev->ifname); | 1330 | seq_printf(seq, "%s ", pkt_dev->ifname); |
1367 | 1331 | ||
1368 | if (t->result[0]) | 1332 | if (t->result[0]) |
1369 | p += sprintf(p, "\nResult: %s\n", t->result); | 1333 | seq_printf(seq, "\nResult: %s\n", t->result); |
1370 | else | 1334 | else |
1371 | p += sprintf(p, "\nResult: NA\n"); | 1335 | seq_printf(seq, "\nResult: NA\n"); |
1372 | |||
1373 | *eof = 1; | ||
1374 | 1336 | ||
1375 | if_unlock(t); | 1337 | if_unlock(t); |
1376 | 1338 | ||
1377 | return p - buf; | 1339 | return 0; |
1378 | } | 1340 | } |
1379 | 1341 | ||
1380 | static int proc_thread_write(struct file *file, const char __user *user_buffer, | 1342 | static ssize_t pktgen_thread_write(struct file *file, |
1381 | unsigned long count, void *data) | 1343 | const char __user *user_buffer, |
1344 | size_t count, loff_t *offset) | ||
1382 | { | 1345 | { |
1346 | struct seq_file *seq = (struct seq_file *) file->private_data; | ||
1347 | struct pktgen_thread *t = seq->private; | ||
1383 | int i = 0, max, len, ret; | 1348 | int i = 0, max, len, ret; |
1384 | char name[40]; | 1349 | char name[40]; |
1385 | struct pktgen_thread *t; | ||
1386 | char *pg_result; | 1350 | char *pg_result; |
1387 | unsigned long value = 0; | 1351 | unsigned long value = 0; |
1388 | 1352 | ||
1389 | if (count < 1) { | 1353 | if (count < 1) { |
1390 | // sprintf(pg_result, "Wrong command format"); | 1354 | // sprintf(pg_result, "Wrong command format"); |
1391 | return -EINVAL; | 1355 | return -EINVAL; |
1392 | } | 1356 | } |
1393 | 1357 | ||
1394 | max = count - i; | 1358 | max = count - i; |
1395 | len = count_trail_chars(&user_buffer[i], max); | 1359 | len = count_trail_chars(&user_buffer[i], max); |
1396 | if (len < 0) | 1360 | if (len < 0) |
1397 | return len; | 1361 | return len; |
1398 | 1362 | ||
1399 | i += len; | 1363 | i += len; |
1400 | 1364 | ||
1401 | /* Read variable name */ | 1365 | /* Read variable name */ |
1402 | 1366 | ||
1403 | len = strn_len(&user_buffer[i], sizeof(name) - 1); | 1367 | len = strn_len(&user_buffer[i], sizeof(name) - 1); |
1404 | if (len < 0) | 1368 | if (len < 0) |
1405 | return len; | 1369 | return len; |
1406 | 1370 | ||
1407 | memset(name, 0, sizeof(name)); | 1371 | memset(name, 0, sizeof(name)); |
1408 | if (copy_from_user(name, &user_buffer[i], len)) | 1372 | if (copy_from_user(name, &user_buffer[i], len)) |
1409 | return -EFAULT; | 1373 | return -EFAULT; |
1410 | i += len; | 1374 | i += len; |
1411 | 1375 | ||
1412 | max = count -i; | 1376 | max = count -i; |
1413 | len = count_trail_chars(&user_buffer[i], max); | 1377 | len = count_trail_chars(&user_buffer[i], max); |
1414 | if (len < 0) | 1378 | if (len < 0) |
1415 | return len; | 1379 | return len; |
1416 | 1380 | ||
1417 | i += len; | 1381 | i += len; |
1418 | 1382 | ||
1419 | if (debug) | 1383 | if (debug) |
1420 | printk("pktgen: t=%s, count=%lu\n", name, count); | 1384 | printk("pktgen: t=%s, count=%lu\n", name, |
1421 | 1385 | (unsigned long) count); | |
1422 | 1386 | ||
1423 | t = (struct pktgen_thread*)(data); | ||
1424 | if(!t) { | 1387 | if(!t) { |
1425 | printk("pktgen: ERROR: No thread\n"); | 1388 | printk("pktgen: ERROR: No thread\n"); |
1426 | ret = -EINVAL; | 1389 | ret = -EINVAL; |
@@ -1474,21 +1437,19 @@ static int proc_thread_write(struct file *file, const char __user *user_buffer, | |||
1474 | return ret; | 1437 | return ret; |
1475 | } | 1438 | } |
1476 | 1439 | ||
1477 | static int create_proc_dir(void) | 1440 | static int pktgen_thread_open(struct inode *inode, struct file *file) |
1478 | { | 1441 | { |
1479 | pg_proc_dir = proc_mkdir(PG_PROC_DIR, NULL); | 1442 | return single_open(file, pktgen_thread_show, PDE(inode)->data); |
1480 | |||
1481 | if (!pg_proc_dir) | ||
1482 | return -ENODEV; | ||
1483 | |||
1484 | return 0; | ||
1485 | } | 1443 | } |
1486 | 1444 | ||
1487 | static int remove_proc_dir(void) | 1445 | static struct file_operations pktgen_thread_fops = { |
1488 | { | 1446 | .owner = THIS_MODULE, |
1489 | remove_proc_entry(PG_PROC_DIR, NULL); | 1447 | .open = pktgen_thread_open, |
1490 | return 0; | 1448 | .read = seq_read, |
1491 | } | 1449 | .llseek = seq_lseek, |
1450 | .write = pktgen_thread_write, | ||
1451 | .release = single_release, | ||
1452 | }; | ||
1492 | 1453 | ||
1493 | /* Think find or remove for NN */ | 1454 | /* Think find or remove for NN */ |
1494 | static struct pktgen_dev *__pktgen_NN_threads(const char* ifname, int remove) | 1455 | static struct pktgen_dev *__pktgen_NN_threads(const char* ifname, int remove) |
@@ -1702,7 +1663,7 @@ static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us) | |||
1702 | start = now = getCurUs(); | 1663 | start = now = getCurUs(); |
1703 | printk(KERN_INFO "sleeping for %d\n", (int)(spin_until_us - now)); | 1664 | printk(KERN_INFO "sleeping for %d\n", (int)(spin_until_us - now)); |
1704 | while (now < spin_until_us) { | 1665 | while (now < spin_until_us) { |
1705 | /* TODO: optimise sleeping behavior */ | 1666 | /* TODO: optimize sleeping behavior */ |
1706 | if (spin_until_us - now > jiffies_to_usecs(1)+1) | 1667 | if (spin_until_us - now > jiffies_to_usecs(1)+1) |
1707 | schedule_timeout_interruptible(1); | 1668 | schedule_timeout_interruptible(1); |
1708 | else if (spin_until_us - now > 100) { | 1669 | else if (spin_until_us - now > 100) { |
@@ -2361,7 +2322,7 @@ static void pktgen_stop_all_threads_ifs(void) | |||
2361 | pktgen_stop(t); | 2322 | pktgen_stop(t); |
2362 | t = t->next; | 2323 | t = t->next; |
2363 | } | 2324 | } |
2364 | thread_unlock(); | 2325 | thread_unlock(); |
2365 | } | 2326 | } |
2366 | 2327 | ||
2367 | static int thread_is_running(struct pktgen_thread *t ) | 2328 | static int thread_is_running(struct pktgen_thread *t ) |
@@ -2552,10 +2513,9 @@ static void pktgen_rem_thread(struct pktgen_thread *t) | |||
2552 | 2513 | ||
2553 | struct pktgen_thread *tmp = pktgen_threads; | 2514 | struct pktgen_thread *tmp = pktgen_threads; |
2554 | 2515 | ||
2555 | if (strlen(t->fname)) | 2516 | remove_proc_entry(t->name, pg_proc_dir); |
2556 | remove_proc_entry(t->fname, NULL); | ||
2557 | 2517 | ||
2558 | thread_lock(); | 2518 | thread_lock(); |
2559 | 2519 | ||
2560 | if (tmp == t) | 2520 | if (tmp == t) |
2561 | pktgen_threads = tmp->next; | 2521 | pktgen_threads = tmp->next; |
@@ -2825,7 +2785,7 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, const char* i | |||
2825 | if_lock(t); | 2785 | if_lock(t); |
2826 | 2786 | ||
2827 | for(pkt_dev=t->if_list; pkt_dev; pkt_dev = pkt_dev->next ) { | 2787 | for(pkt_dev=t->if_list; pkt_dev; pkt_dev = pkt_dev->next ) { |
2828 | if (strcmp(pkt_dev->ifname, ifname) == 0) { | 2788 | if (strncmp(pkt_dev->ifname, ifname, IFNAMSIZ) == 0) { |
2829 | break; | 2789 | break; |
2830 | } | 2790 | } |
2831 | } | 2791 | } |
@@ -2864,74 +2824,70 @@ static int add_dev_to_thread(struct pktgen_thread *t, struct pktgen_dev *pkt_dev | |||
2864 | static int pktgen_add_device(struct pktgen_thread *t, const char* ifname) | 2824 | static int pktgen_add_device(struct pktgen_thread *t, const char* ifname) |
2865 | { | 2825 | { |
2866 | struct pktgen_dev *pkt_dev; | 2826 | struct pktgen_dev *pkt_dev; |
2827 | struct proc_dir_entry *pe; | ||
2867 | 2828 | ||
2868 | /* We don't allow a device to be on several threads */ | 2829 | /* We don't allow a device to be on several threads */ |
2869 | 2830 | ||
2870 | if( (pkt_dev = __pktgen_NN_threads(ifname, FIND)) == NULL) { | 2831 | pkt_dev = __pktgen_NN_threads(ifname, FIND); |
2871 | 2832 | if (pkt_dev) { | |
2872 | pkt_dev = kmalloc(sizeof(struct pktgen_dev), GFP_KERNEL); | 2833 | printk("pktgen: ERROR: interface already used.\n"); |
2873 | if (!pkt_dev) | 2834 | return -EBUSY; |
2874 | return -ENOMEM; | 2835 | } |
2875 | 2836 | ||
2876 | memset(pkt_dev, 0, sizeof(struct pktgen_dev)); | 2837 | pkt_dev = kzalloc(sizeof(struct pktgen_dev), GFP_KERNEL); |
2838 | if (!pkt_dev) | ||
2839 | return -ENOMEM; | ||
2877 | 2840 | ||
2878 | pkt_dev->flows = vmalloc(MAX_CFLOWS*sizeof(struct flow_state)); | 2841 | pkt_dev->flows = vmalloc(MAX_CFLOWS*sizeof(struct flow_state)); |
2879 | if (pkt_dev->flows == NULL) { | 2842 | if (pkt_dev->flows == NULL) { |
2880 | kfree(pkt_dev); | 2843 | kfree(pkt_dev); |
2881 | return -ENOMEM; | 2844 | return -ENOMEM; |
2882 | } | 2845 | } |
2883 | memset(pkt_dev->flows, 0, MAX_CFLOWS*sizeof(struct flow_state)); | 2846 | memset(pkt_dev->flows, 0, MAX_CFLOWS*sizeof(struct flow_state)); |
2884 | |||
2885 | pkt_dev->min_pkt_size = ETH_ZLEN; | ||
2886 | pkt_dev->max_pkt_size = ETH_ZLEN; | ||
2887 | pkt_dev->nfrags = 0; | ||
2888 | pkt_dev->clone_skb = pg_clone_skb_d; | ||
2889 | pkt_dev->delay_us = pg_delay_d / 1000; | ||
2890 | pkt_dev->delay_ns = pg_delay_d % 1000; | ||
2891 | pkt_dev->count = pg_count_d; | ||
2892 | pkt_dev->sofar = 0; | ||
2893 | pkt_dev->udp_src_min = 9; /* sink port */ | ||
2894 | pkt_dev->udp_src_max = 9; | ||
2895 | pkt_dev->udp_dst_min = 9; | ||
2896 | pkt_dev->udp_dst_max = 9; | ||
2897 | |||
2898 | strncpy(pkt_dev->ifname, ifname, 31); | ||
2899 | sprintf(pkt_dev->fname, "%s/%s", PG_PROC_DIR, ifname); | ||
2900 | |||
2901 | if (! pktgen_setup_dev(pkt_dev)) { | ||
2902 | printk("pktgen: ERROR: pktgen_setup_dev failed.\n"); | ||
2903 | if (pkt_dev->flows) | ||
2904 | vfree(pkt_dev->flows); | ||
2905 | kfree(pkt_dev); | ||
2906 | return -ENODEV; | ||
2907 | } | ||
2908 | 2847 | ||
2909 | pkt_dev->proc_ent = create_proc_entry(pkt_dev->fname, 0600, NULL); | 2848 | pkt_dev->min_pkt_size = ETH_ZLEN; |
2910 | if (!pkt_dev->proc_ent) { | 2849 | pkt_dev->max_pkt_size = ETH_ZLEN; |
2911 | printk("pktgen: cannot create %s procfs entry.\n", pkt_dev->fname); | 2850 | pkt_dev->nfrags = 0; |
2912 | if (pkt_dev->flows) | 2851 | pkt_dev->clone_skb = pg_clone_skb_d; |
2913 | vfree(pkt_dev->flows); | 2852 | pkt_dev->delay_us = pg_delay_d / 1000; |
2914 | kfree(pkt_dev); | 2853 | pkt_dev->delay_ns = pg_delay_d % 1000; |
2915 | return -EINVAL; | 2854 | pkt_dev->count = pg_count_d; |
2916 | } | 2855 | pkt_dev->sofar = 0; |
2917 | pkt_dev->proc_ent->read_proc = proc_if_read; | 2856 | pkt_dev->udp_src_min = 9; /* sink port */ |
2918 | pkt_dev->proc_ent->write_proc = proc_if_write; | 2857 | pkt_dev->udp_src_max = 9; |
2919 | pkt_dev->proc_ent->data = (void*)(pkt_dev); | 2858 | pkt_dev->udp_dst_min = 9; |
2920 | pkt_dev->proc_ent->owner = THIS_MODULE; | 2859 | pkt_dev->udp_dst_max = 9; |
2860 | |||
2861 | strncpy(pkt_dev->ifname, ifname, IFNAMSIZ); | ||
2862 | |||
2863 | if (! pktgen_setup_dev(pkt_dev)) { | ||
2864 | printk("pktgen: ERROR: pktgen_setup_dev failed.\n"); | ||
2865 | if (pkt_dev->flows) | ||
2866 | vfree(pkt_dev->flows); | ||
2867 | kfree(pkt_dev); | ||
2868 | return -ENODEV; | ||
2869 | } | ||
2870 | |||
2871 | pe = create_proc_entry(ifname, 0600, pg_proc_dir); | ||
2872 | if (!pe) { | ||
2873 | printk("pktgen: cannot create %s/%s procfs entry.\n", | ||
2874 | PG_PROC_DIR, ifname); | ||
2875 | if (pkt_dev->flows) | ||
2876 | vfree(pkt_dev->flows); | ||
2877 | kfree(pkt_dev); | ||
2878 | return -EINVAL; | ||
2879 | } | ||
2880 | pe->proc_fops = &pktgen_if_fops; | ||
2881 | pe->data = pkt_dev; | ||
2921 | 2882 | ||
2922 | return add_dev_to_thread(t, pkt_dev); | 2883 | return add_dev_to_thread(t, pkt_dev); |
2923 | } | ||
2924 | else { | ||
2925 | printk("pktgen: ERROR: interface already used.\n"); | ||
2926 | return -EBUSY; | ||
2927 | } | ||
2928 | } | 2884 | } |
2929 | 2885 | ||
2930 | static struct pktgen_thread *pktgen_find_thread(const char* name) | 2886 | static struct pktgen_thread *pktgen_find_thread(const char* name) |
2931 | { | 2887 | { |
2932 | struct pktgen_thread *t = NULL; | 2888 | struct pktgen_thread *t = NULL; |
2933 | 2889 | ||
2934 | thread_lock(); | 2890 | thread_lock(); |
2935 | 2891 | ||
2936 | t = pktgen_threads; | 2892 | t = pktgen_threads; |
2937 | while (t) { | 2893 | while (t) { |
@@ -2947,6 +2903,7 @@ static struct pktgen_thread *pktgen_find_thread(const char* name) | |||
2947 | static int pktgen_create_thread(const char* name, int cpu) | 2903 | static int pktgen_create_thread(const char* name, int cpu) |
2948 | { | 2904 | { |
2949 | struct pktgen_thread *t = NULL; | 2905 | struct pktgen_thread *t = NULL; |
2906 | struct proc_dir_entry *pe; | ||
2950 | 2907 | ||
2951 | if (strlen(name) > 31) { | 2908 | if (strlen(name) > 31) { |
2952 | printk("pktgen: ERROR: Thread name cannot be more than 31 characters.\n"); | 2909 | printk("pktgen: ERROR: Thread name cannot be more than 31 characters.\n"); |
@@ -2958,28 +2915,26 @@ static int pktgen_create_thread(const char* name, int cpu) | |||
2958 | return -EINVAL; | 2915 | return -EINVAL; |
2959 | } | 2916 | } |
2960 | 2917 | ||
2961 | t = (struct pktgen_thread*)(kmalloc(sizeof(struct pktgen_thread), GFP_KERNEL)); | 2918 | t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL); |
2962 | if (!t) { | 2919 | if (!t) { |
2963 | printk("pktgen: ERROR: out of memory, can't create new thread.\n"); | 2920 | printk("pktgen: ERROR: out of memory, can't create new thread.\n"); |
2964 | return -ENOMEM; | 2921 | return -ENOMEM; |
2965 | } | 2922 | } |
2966 | 2923 | ||
2967 | memset(t, 0, sizeof(struct pktgen_thread)); | ||
2968 | strcpy(t->name, name); | 2924 | strcpy(t->name, name); |
2969 | spin_lock_init(&t->if_lock); | 2925 | spin_lock_init(&t->if_lock); |
2970 | t->cpu = cpu; | 2926 | t->cpu = cpu; |
2971 | 2927 | ||
2972 | sprintf(t->fname, "%s/%s", PG_PROC_DIR, t->name); | 2928 | pe = create_proc_entry(t->name, 0600, pg_proc_dir); |
2973 | t->proc_ent = create_proc_entry(t->fname, 0600, NULL); | 2929 | if (!pe) { |
2974 | if (!t->proc_ent) { | 2930 | printk("pktgen: cannot create %s/%s procfs entry.\n", |
2975 | printk("pktgen: cannot create %s procfs entry.\n", t->fname); | 2931 | PG_PROC_DIR, t->name); |
2976 | kfree(t); | 2932 | kfree(t); |
2977 | return -EINVAL; | 2933 | return -EINVAL; |
2978 | } | 2934 | } |
2979 | t->proc_ent->read_proc = proc_thread_read; | 2935 | |
2980 | t->proc_ent->write_proc = proc_thread_write; | 2936 | pe->proc_fops = &pktgen_thread_fops; |
2981 | t->proc_ent->data = (void*)(t); | 2937 | pe->data = t; |
2982 | t->proc_ent->owner = THIS_MODULE; | ||
2983 | 2938 | ||
2984 | t->next = pktgen_threads; | 2939 | t->next = pktgen_threads; |
2985 | pktgen_threads = t; | 2940 | pktgen_threads = t; |
@@ -3034,8 +2989,7 @@ static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *pkt_ | |||
3034 | 2989 | ||
3035 | /* Clean up proc file system */ | 2990 | /* Clean up proc file system */ |
3036 | 2991 | ||
3037 | if (strlen(pkt_dev->fname)) | 2992 | remove_proc_entry(pkt_dev->ifname, pg_proc_dir); |
3038 | remove_proc_entry(pkt_dev->fname, NULL); | ||
3039 | 2993 | ||
3040 | if (pkt_dev->flows) | 2994 | if (pkt_dev->flows) |
3041 | vfree(pkt_dev->flows); | 2995 | vfree(pkt_dev->flows); |
@@ -3046,31 +3000,31 @@ static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *pkt_ | |||
3046 | static int __init pg_init(void) | 3000 | static int __init pg_init(void) |
3047 | { | 3001 | { |
3048 | int cpu; | 3002 | int cpu; |
3049 | printk(version); | 3003 | struct proc_dir_entry *pe; |
3050 | 3004 | ||
3051 | module_fname[0] = 0; | 3005 | printk(version); |
3052 | 3006 | ||
3053 | create_proc_dir(); | 3007 | pg_proc_dir = proc_mkdir(PG_PROC_DIR, proc_net); |
3008 | if (!pg_proc_dir) | ||
3009 | return -ENODEV; | ||
3010 | pg_proc_dir->owner = THIS_MODULE; | ||
3054 | 3011 | ||
3055 | sprintf(module_fname, "%s/pgctrl", PG_PROC_DIR); | 3012 | pe = create_proc_entry(PGCTRL, 0600, pg_proc_dir); |
3056 | module_proc_ent = create_proc_entry(module_fname, 0600, NULL); | 3013 | if (pe == NULL) { |
3057 | if (!module_proc_ent) { | 3014 | printk("pktgen: ERROR: cannot create %s procfs entry.\n", PGCTRL); |
3058 | printk("pktgen: ERROR: cannot create %s procfs entry.\n", module_fname); | 3015 | proc_net_remove(PG_PROC_DIR); |
3059 | return -EINVAL; | 3016 | return -EINVAL; |
3060 | } | 3017 | } |
3061 | 3018 | ||
3062 | module_proc_ent->proc_fops = &pktgen_fops; | 3019 | pe->proc_fops = &pktgen_fops; |
3063 | module_proc_ent->data = NULL; | 3020 | pe->data = NULL; |
3064 | 3021 | ||
3065 | /* Register us to receive netdevice events */ | 3022 | /* Register us to receive netdevice events */ |
3066 | register_netdevice_notifier(&pktgen_notifier_block); | 3023 | register_netdevice_notifier(&pktgen_notifier_block); |
3067 | 3024 | ||
3068 | for (cpu = 0; cpu < NR_CPUS ; cpu++) { | 3025 | for_each_online_cpu(cpu) { |
3069 | char buf[30]; | 3026 | char buf[30]; |
3070 | 3027 | ||
3071 | if (!cpu_online(cpu)) | ||
3072 | continue; | ||
3073 | |||
3074 | sprintf(buf, "kpktgend_%i", cpu); | 3028 | sprintf(buf, "kpktgend_%i", cpu); |
3075 | pktgen_create_thread(buf, cpu); | 3029 | pktgen_create_thread(buf, cpu); |
3076 | } | 3030 | } |
@@ -3095,10 +3049,8 @@ static void __exit pg_cleanup(void) | |||
3095 | unregister_netdevice_notifier(&pktgen_notifier_block); | 3049 | unregister_netdevice_notifier(&pktgen_notifier_block); |
3096 | 3050 | ||
3097 | /* Clean up proc file system */ | 3051 | /* Clean up proc file system */ |
3098 | 3052 | remove_proc_entry(PGCTRL, pg_proc_dir); | |
3099 | remove_proc_entry(module_fname, NULL); | 3053 | proc_net_remove(PG_PROC_DIR); |
3100 | |||
3101 | remove_proc_dir(); | ||
3102 | } | 3054 | } |
3103 | 3055 | ||
3104 | 3056 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index af9b1516e21f..ef9d46b91eb9 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -122,6 +122,8 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) | |||
122 | * __alloc_skb - allocate a network buffer | 122 | * __alloc_skb - allocate a network buffer |
123 | * @size: size to allocate | 123 | * @size: size to allocate |
124 | * @gfp_mask: allocation mask | 124 | * @gfp_mask: allocation mask |
125 | * @fclone: allocate from fclone cache instead of head cache | ||
126 | * and allocate a cloned (child) skb | ||
125 | * | 127 | * |
126 | * Allocate a new &sk_buff. The returned buffer has no headroom and a | 128 | * Allocate a new &sk_buff. The returned buffer has no headroom and a |
127 | * tail room of size bytes. The object has a reference count of one. | 129 | * tail room of size bytes. The object has a reference count of one. |
@@ -410,6 +412,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
410 | C(nfct); | 412 | C(nfct); |
411 | nf_conntrack_get(skb->nfct); | 413 | nf_conntrack_get(skb->nfct); |
412 | C(nfctinfo); | 414 | C(nfctinfo); |
415 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) | ||
416 | C(ipvs_property); | ||
417 | #endif | ||
413 | #ifdef CONFIG_BRIDGE_NETFILTER | 418 | #ifdef CONFIG_BRIDGE_NETFILTER |
414 | C(nf_bridge); | 419 | C(nf_bridge); |
415 | nf_bridge_get(skb->nf_bridge); | 420 | nf_bridge_get(skb->nf_bridge); |
@@ -467,6 +472,9 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
467 | new->nfct = old->nfct; | 472 | new->nfct = old->nfct; |
468 | nf_conntrack_get(old->nfct); | 473 | nf_conntrack_get(old->nfct); |
469 | new->nfctinfo = old->nfctinfo; | 474 | new->nfctinfo = old->nfctinfo; |
475 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) | ||
476 | new->ipvs_property = old->ipvs_property; | ||
477 | #endif | ||
470 | #ifdef CONFIG_BRIDGE_NETFILTER | 478 | #ifdef CONFIG_BRIDGE_NETFILTER |
471 | new->nf_bridge = old->nf_bridge; | 479 | new->nf_bridge = old->nf_bridge; |
472 | nf_bridge_get(old->nf_bridge); | 480 | nf_bridge_get(old->nf_bridge); |
diff --git a/net/core/sock.c b/net/core/sock.c index 1c52fe809eda..9602ceb3bac9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -940,7 +940,7 @@ static struct sk_buff *sock_alloc_send_pskb(struct sock *sk, | |||
940 | int noblock, int *errcode) | 940 | int noblock, int *errcode) |
941 | { | 941 | { |
942 | struct sk_buff *skb; | 942 | struct sk_buff *skb; |
943 | unsigned int gfp_mask; | 943 | gfp_t gfp_mask; |
944 | long timeo; | 944 | long timeo; |
945 | int err; | 945 | int err; |
946 | 946 | ||
diff --git a/net/core/wireless.c b/net/core/wireless.c index d17f1583ea3e..271ddb35b0b2 100644 --- a/net/core/wireless.c +++ b/net/core/wireless.c | |||
@@ -455,10 +455,15 @@ static inline struct iw_statistics *get_wireless_stats(struct net_device *dev) | |||
455 | 455 | ||
456 | /* Old location, field to be removed in next WE */ | 456 | /* Old location, field to be removed in next WE */ |
457 | if(dev->get_wireless_stats) { | 457 | if(dev->get_wireless_stats) { |
458 | printk(KERN_DEBUG "%s (WE) : Driver using old /proc/net/wireless support, please fix driver !\n", | 458 | static int printed_message; |
459 | dev->name); | 459 | |
460 | if (!printed_message++) | ||
461 | printk(KERN_DEBUG "%s (WE) : Driver using old /proc/net/wireless support, please fix driver !\n", | ||
462 | dev->name); | ||
463 | |||
460 | return dev->get_wireless_stats(dev); | 464 | return dev->get_wireless_stats(dev); |
461 | } | 465 | } |
466 | |||
462 | /* Not found */ | 467 | /* Not found */ |
463 | return (struct iw_statistics *) NULL; | 468 | return (struct iw_statistics *) NULL; |
464 | } | 469 | } |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index ae088d1347af..6298cf58ff9e 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -463,6 +463,7 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, | |||
463 | if (skb != NULL) { | 463 | if (skb != NULL) { |
464 | const struct inet_request_sock *ireq = inet_rsk(req); | 464 | const struct inet_request_sock *ireq = inet_rsk(req); |
465 | 465 | ||
466 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | ||
466 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, | 467 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, |
467 | ireq->rmt_addr, | 468 | ireq->rmt_addr, |
468 | ireq->opt); | 469 | ireq->opt); |
@@ -647,6 +648,7 @@ int dccp_v4_send_reset(struct sock *sk, enum dccp_reset_codes code) | |||
647 | if (skb != NULL) { | 648 | if (skb != NULL) { |
648 | const struct inet_sock *inet = inet_sk(sk); | 649 | const struct inet_sock *inet = inet_sk(sk); |
649 | 650 | ||
651 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | ||
650 | err = ip_build_and_send_pkt(skb, sk, | 652 | err = ip_build_and_send_pkt(skb, sk, |
651 | inet->saddr, inet->daddr, NULL); | 653 | inet->saddr, inet->daddr, NULL); |
652 | if (err == NET_XMIT_CN) | 654 | if (err == NET_XMIT_CN) |
diff --git a/net/dccp/output.c b/net/dccp/output.c index 4786bdcddcc9..d59f86f7ceab 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -62,10 +62,8 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
62 | 62 | ||
63 | skb->h.raw = skb_push(skb, dccp_header_size); | 63 | skb->h.raw = skb_push(skb, dccp_header_size); |
64 | dh = dccp_hdr(skb); | 64 | dh = dccp_hdr(skb); |
65 | /* | 65 | |
66 | * Data packets are not cloned as they are never retransmitted | 66 | if (!skb->sk) |
67 | */ | ||
68 | if (skb_cloned(skb)) | ||
69 | skb_set_owner_w(skb, sk); | 67 | skb_set_owner_w(skb, sk); |
70 | 68 | ||
71 | /* Build DCCP header and checksum it. */ | 69 | /* Build DCCP header and checksum it. */ |
@@ -102,6 +100,7 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
102 | 100 | ||
103 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | 101 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
104 | 102 | ||
103 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | ||
105 | err = ip_queue_xmit(skb, 0); | 104 | err = ip_queue_xmit(skb, 0); |
106 | if (err <= 0) | 105 | if (err <= 0) |
107 | return err; | 106 | return err; |
@@ -243,7 +242,8 @@ int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo) | |||
243 | 242 | ||
244 | err = dccp_transmit_skb(sk, skb); | 243 | err = dccp_transmit_skb(sk, skb); |
245 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); | 244 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); |
246 | } | 245 | } else |
246 | kfree_skb(skb); | ||
247 | 247 | ||
248 | return err; | 248 | return err; |
249 | } | 249 | } |
@@ -495,7 +495,7 @@ void dccp_send_close(struct sock *sk, const int active) | |||
495 | { | 495 | { |
496 | struct dccp_sock *dp = dccp_sk(sk); | 496 | struct dccp_sock *dp = dccp_sk(sk); |
497 | struct sk_buff *skb; | 497 | struct sk_buff *skb; |
498 | const unsigned int prio = active ? GFP_KERNEL : GFP_ATOMIC; | 498 | const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC; |
499 | 499 | ||
500 | skb = alloc_skb(sk->sk_prot->max_header, prio); | 500 | skb = alloc_skb(sk->sk_prot->max_header, prio); |
501 | if (skb == NULL) | 501 | if (skb == NULL) |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index a1cfd0e9e3bc..a021c3422f67 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -402,8 +402,6 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
402 | * This bug was _quickly_ found & fixed by just looking at an OSTRA | 402 | * This bug was _quickly_ found & fixed by just looking at an OSTRA |
403 | * generated callgraph 8) -acme | 403 | * generated callgraph 8) -acme |
404 | */ | 404 | */ |
405 | if (rc != 0) | ||
406 | goto out_discard; | ||
407 | out_release: | 405 | out_release: |
408 | release_sock(sk); | 406 | release_sock(sk); |
409 | return rc ? : len; | 407 | return rc ? : len; |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 1186dc44cdff..3f25cadccddd 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -719,22 +719,9 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
719 | if (saddr->sdn_flags & ~SDF_WILD) | 719 | if (saddr->sdn_flags & ~SDF_WILD) |
720 | return -EINVAL; | 720 | return -EINVAL; |
721 | 721 | ||
722 | #if 1 | ||
723 | if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum || | 722 | if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum || |
724 | (saddr->sdn_flags & SDF_WILD))) | 723 | (saddr->sdn_flags & SDF_WILD))) |
725 | return -EACCES; | 724 | return -EACCES; |
726 | #else | ||
727 | /* | ||
728 | * Maybe put the default actions in the default security ops for | ||
729 | * dn_prot_sock ? Would be nice if the capable call would go there | ||
730 | * too. | ||
731 | */ | ||
732 | if (security_dn_prot_sock(saddr) && | ||
733 | !capable(CAP_NET_BIND_SERVICE) || | ||
734 | saddr->sdn_objnum || (saddr->sdn_flags & SDF_WILD)) | ||
735 | return -EACCES; | ||
736 | #endif | ||
737 | |||
738 | 725 | ||
739 | if (!(saddr->sdn_flags & SDF_WILD)) { | 726 | if (!(saddr->sdn_flags & SDF_WILD)) { |
740 | if (dn_ntohs(saddr->sdn_nodeaddrl)) { | 727 | if (dn_ntohs(saddr->sdn_nodeaddrl)) { |
diff --git a/net/ieee80211/Makefile b/net/ieee80211/Makefile index a6ccac5baea8..f988417121da 100644 --- a/net/ieee80211/Makefile +++ b/net/ieee80211/Makefile | |||
@@ -7,5 +7,6 @@ ieee80211-objs := \ | |||
7 | ieee80211_module.o \ | 7 | ieee80211_module.o \ |
8 | ieee80211_tx.o \ | 8 | ieee80211_tx.o \ |
9 | ieee80211_rx.o \ | 9 | ieee80211_rx.o \ |
10 | ieee80211_wx.o | 10 | ieee80211_wx.o \ |
11 | ieee80211_geo.o | ||
11 | 12 | ||
diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c index 61a9d92e455b..f3b6aa3be638 100644 --- a/net/ieee80211/ieee80211_crypt.c +++ b/net/ieee80211/ieee80211_crypt.c | |||
@@ -41,6 +41,12 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force) | |||
41 | { | 41 | { |
42 | struct list_head *ptr, *n; | 42 | struct list_head *ptr, *n; |
43 | struct ieee80211_crypt_data *entry; | 43 | struct ieee80211_crypt_data *entry; |
44 | unsigned long flags; | ||
45 | |||
46 | spin_lock_irqsave(&ieee->lock, flags); | ||
47 | |||
48 | if (list_empty(&ieee->crypt_deinit_list)) | ||
49 | goto unlock; | ||
44 | 50 | ||
45 | for (ptr = ieee->crypt_deinit_list.next, n = ptr->next; | 51 | for (ptr = ieee->crypt_deinit_list.next, n = ptr->next; |
46 | ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) { | 52 | ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) { |
@@ -57,6 +63,18 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force) | |||
57 | } | 63 | } |
58 | kfree(entry); | 64 | kfree(entry); |
59 | } | 65 | } |
66 | unlock: | ||
67 | spin_unlock_irqrestore(&ieee->lock, flags); | ||
68 | } | ||
69 | |||
70 | /* After this, crypt_deinit_list won't accept new members */ | ||
71 | void ieee80211_crypt_quiescing(struct ieee80211_device *ieee) | ||
72 | { | ||
73 | unsigned long flags; | ||
74 | |||
75 | spin_lock_irqsave(&ieee->lock, flags); | ||
76 | ieee->crypt_quiesced = 1; | ||
77 | spin_unlock_irqrestore(&ieee->lock, flags); | ||
60 | } | 78 | } |
61 | 79 | ||
62 | void ieee80211_crypt_deinit_handler(unsigned long data) | 80 | void ieee80211_crypt_deinit_handler(unsigned long data) |
@@ -64,16 +82,16 @@ void ieee80211_crypt_deinit_handler(unsigned long data) | |||
64 | struct ieee80211_device *ieee = (struct ieee80211_device *)data; | 82 | struct ieee80211_device *ieee = (struct ieee80211_device *)data; |
65 | unsigned long flags; | 83 | unsigned long flags; |
66 | 84 | ||
67 | spin_lock_irqsave(&ieee->lock, flags); | ||
68 | ieee80211_crypt_deinit_entries(ieee, 0); | 85 | ieee80211_crypt_deinit_entries(ieee, 0); |
69 | if (!list_empty(&ieee->crypt_deinit_list)) { | 86 | |
87 | spin_lock_irqsave(&ieee->lock, flags); | ||
88 | if (!list_empty(&ieee->crypt_deinit_list) && !ieee->crypt_quiesced) { | ||
70 | printk(KERN_DEBUG "%s: entries remaining in delayed crypt " | 89 | printk(KERN_DEBUG "%s: entries remaining in delayed crypt " |
71 | "deletion list\n", ieee->dev->name); | 90 | "deletion list\n", ieee->dev->name); |
72 | ieee->crypt_deinit_timer.expires = jiffies + HZ; | 91 | ieee->crypt_deinit_timer.expires = jiffies + HZ; |
73 | add_timer(&ieee->crypt_deinit_timer); | 92 | add_timer(&ieee->crypt_deinit_timer); |
74 | } | 93 | } |
75 | spin_unlock_irqrestore(&ieee->lock, flags); | 94 | spin_unlock_irqrestore(&ieee->lock, flags); |
76 | |||
77 | } | 95 | } |
78 | 96 | ||
79 | void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee, | 97 | void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee, |
@@ -93,10 +111,12 @@ void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee, | |||
93 | * locking. */ | 111 | * locking. */ |
94 | 112 | ||
95 | spin_lock_irqsave(&ieee->lock, flags); | 113 | spin_lock_irqsave(&ieee->lock, flags); |
96 | list_add(&tmp->list, &ieee->crypt_deinit_list); | 114 | if (!ieee->crypt_quiesced) { |
97 | if (!timer_pending(&ieee->crypt_deinit_timer)) { | 115 | list_add(&tmp->list, &ieee->crypt_deinit_list); |
98 | ieee->crypt_deinit_timer.expires = jiffies + HZ; | 116 | if (!timer_pending(&ieee->crypt_deinit_timer)) { |
99 | add_timer(&ieee->crypt_deinit_timer); | 117 | ieee->crypt_deinit_timer.expires = jiffies + HZ; |
118 | add_timer(&ieee->crypt_deinit_timer); | ||
119 | } | ||
100 | } | 120 | } |
101 | spin_unlock_irqrestore(&ieee->lock, flags); | 121 | spin_unlock_irqrestore(&ieee->lock, flags); |
102 | } | 122 | } |
@@ -191,18 +211,18 @@ static void ieee80211_crypt_null_deinit(void *priv) | |||
191 | } | 211 | } |
192 | 212 | ||
193 | static struct ieee80211_crypto_ops ieee80211_crypt_null = { | 213 | static struct ieee80211_crypto_ops ieee80211_crypt_null = { |
194 | .name = "NULL", | 214 | .name = "NULL", |
195 | .init = ieee80211_crypt_null_init, | 215 | .init = ieee80211_crypt_null_init, |
196 | .deinit = ieee80211_crypt_null_deinit, | 216 | .deinit = ieee80211_crypt_null_deinit, |
197 | .encrypt_mpdu = NULL, | 217 | .encrypt_mpdu = NULL, |
198 | .decrypt_mpdu = NULL, | 218 | .decrypt_mpdu = NULL, |
199 | .encrypt_msdu = NULL, | 219 | .encrypt_msdu = NULL, |
200 | .decrypt_msdu = NULL, | 220 | .decrypt_msdu = NULL, |
201 | .set_key = NULL, | 221 | .set_key = NULL, |
202 | .get_key = NULL, | 222 | .get_key = NULL, |
203 | .extra_prefix_len = 0, | 223 | .extra_mpdu_prefix_len = 0, |
204 | .extra_postfix_len = 0, | 224 | .extra_mpdu_postfix_len = 0, |
205 | .owner = THIS_MODULE, | 225 | .owner = THIS_MODULE, |
206 | }; | 226 | }; |
207 | 227 | ||
208 | static int __init ieee80211_crypto_init(void) | 228 | static int __init ieee80211_crypto_init(void) |
@@ -249,6 +269,7 @@ static void __exit ieee80211_crypto_deinit(void) | |||
249 | EXPORT_SYMBOL(ieee80211_crypt_deinit_entries); | 269 | EXPORT_SYMBOL(ieee80211_crypt_deinit_entries); |
250 | EXPORT_SYMBOL(ieee80211_crypt_deinit_handler); | 270 | EXPORT_SYMBOL(ieee80211_crypt_deinit_handler); |
251 | EXPORT_SYMBOL(ieee80211_crypt_delayed_deinit); | 271 | EXPORT_SYMBOL(ieee80211_crypt_delayed_deinit); |
272 | EXPORT_SYMBOL(ieee80211_crypt_quiescing); | ||
252 | 273 | ||
253 | EXPORT_SYMBOL(ieee80211_register_crypto_ops); | 274 | EXPORT_SYMBOL(ieee80211_register_crypto_ops); |
254 | EXPORT_SYMBOL(ieee80211_unregister_crypto_ops); | 275 | EXPORT_SYMBOL(ieee80211_unregister_crypto_ops); |
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index 8fc13f45971e..05a853c13012 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c | |||
@@ -119,7 +119,7 @@ static inline void xor_block(u8 * b, u8 * a, size_t len) | |||
119 | } | 119 | } |
120 | 120 | ||
121 | static void ccmp_init_blocks(struct crypto_tfm *tfm, | 121 | static void ccmp_init_blocks(struct crypto_tfm *tfm, |
122 | struct ieee80211_hdr *hdr, | 122 | struct ieee80211_hdr_4addr *hdr, |
123 | u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0) | 123 | u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0) |
124 | { | 124 | { |
125 | u8 *pos, qc = 0; | 125 | u8 *pos, qc = 0; |
@@ -191,26 +191,18 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm, | |||
191 | ieee80211_ccmp_aes_encrypt(tfm, b0, s0); | 191 | ieee80211_ccmp_aes_encrypt(tfm, b0, s0); |
192 | } | 192 | } |
193 | 193 | ||
194 | static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | 194 | static int ieee80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, void *priv) |
195 | { | 195 | { |
196 | struct ieee80211_ccmp_data *key = priv; | 196 | struct ieee80211_ccmp_data *key = priv; |
197 | int data_len, i, blocks, last, len; | 197 | int i; |
198 | u8 *pos, *mic; | 198 | u8 *pos; |
199 | struct ieee80211_hdr *hdr; | ||
200 | u8 *b0 = key->tx_b0; | ||
201 | u8 *b = key->tx_b; | ||
202 | u8 *e = key->tx_e; | ||
203 | u8 *s0 = key->tx_s0; | ||
204 | 199 | ||
205 | if (skb_headroom(skb) < CCMP_HDR_LEN || | 200 | if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len) |
206 | skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len) | ||
207 | return -1; | 201 | return -1; |
208 | 202 | ||
209 | data_len = skb->len - hdr_len; | ||
210 | pos = skb_push(skb, CCMP_HDR_LEN); | 203 | pos = skb_push(skb, CCMP_HDR_LEN); |
211 | memmove(pos, pos + CCMP_HDR_LEN, hdr_len); | 204 | memmove(pos, pos + CCMP_HDR_LEN, hdr_len); |
212 | pos += hdr_len; | 205 | pos += hdr_len; |
213 | mic = skb_put(skb, CCMP_MIC_LEN); | ||
214 | 206 | ||
215 | i = CCMP_PN_LEN - 1; | 207 | i = CCMP_PN_LEN - 1; |
216 | while (i >= 0) { | 208 | while (i >= 0) { |
@@ -229,7 +221,31 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
229 | *pos++ = key->tx_pn[1]; | 221 | *pos++ = key->tx_pn[1]; |
230 | *pos++ = key->tx_pn[0]; | 222 | *pos++ = key->tx_pn[0]; |
231 | 223 | ||
232 | hdr = (struct ieee80211_hdr *)skb->data; | 224 | return CCMP_HDR_LEN; |
225 | } | ||
226 | |||
227 | static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | ||
228 | { | ||
229 | struct ieee80211_ccmp_data *key = priv; | ||
230 | int data_len, i, blocks, last, len; | ||
231 | u8 *pos, *mic; | ||
232 | struct ieee80211_hdr_4addr *hdr; | ||
233 | u8 *b0 = key->tx_b0; | ||
234 | u8 *b = key->tx_b; | ||
235 | u8 *e = key->tx_e; | ||
236 | u8 *s0 = key->tx_s0; | ||
237 | |||
238 | if (skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len) | ||
239 | return -1; | ||
240 | |||
241 | data_len = skb->len - hdr_len; | ||
242 | len = ieee80211_ccmp_hdr(skb, hdr_len, priv); | ||
243 | if (len < 0) | ||
244 | return -1; | ||
245 | |||
246 | pos = skb->data + hdr_len + CCMP_HDR_LEN; | ||
247 | mic = skb_put(skb, CCMP_MIC_LEN); | ||
248 | hdr = (struct ieee80211_hdr_4addr *)skb->data; | ||
233 | ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); | 249 | ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); |
234 | 250 | ||
235 | blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; | 251 | blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; |
@@ -258,7 +274,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
258 | { | 274 | { |
259 | struct ieee80211_ccmp_data *key = priv; | 275 | struct ieee80211_ccmp_data *key = priv; |
260 | u8 keyidx, *pos; | 276 | u8 keyidx, *pos; |
261 | struct ieee80211_hdr *hdr; | 277 | struct ieee80211_hdr_4addr *hdr; |
262 | u8 *b0 = key->rx_b0; | 278 | u8 *b0 = key->rx_b0; |
263 | u8 *b = key->rx_b; | 279 | u8 *b = key->rx_b; |
264 | u8 *a = key->rx_a; | 280 | u8 *a = key->rx_a; |
@@ -272,7 +288,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
272 | return -1; | 288 | return -1; |
273 | } | 289 | } |
274 | 290 | ||
275 | hdr = (struct ieee80211_hdr *)skb->data; | 291 | hdr = (struct ieee80211_hdr_4addr *)skb->data; |
276 | pos = skb->data + hdr_len; | 292 | pos = skb->data + hdr_len; |
277 | keyidx = pos[3]; | 293 | keyidx = pos[3]; |
278 | if (!(keyidx & (1 << 5))) { | 294 | if (!(keyidx & (1 << 5))) { |
@@ -426,19 +442,20 @@ static char *ieee80211_ccmp_print_stats(char *p, void *priv) | |||
426 | } | 442 | } |
427 | 443 | ||
428 | static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = { | 444 | static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = { |
429 | .name = "CCMP", | 445 | .name = "CCMP", |
430 | .init = ieee80211_ccmp_init, | 446 | .init = ieee80211_ccmp_init, |
431 | .deinit = ieee80211_ccmp_deinit, | 447 | .deinit = ieee80211_ccmp_deinit, |
432 | .encrypt_mpdu = ieee80211_ccmp_encrypt, | 448 | .build_iv = ieee80211_ccmp_hdr, |
433 | .decrypt_mpdu = ieee80211_ccmp_decrypt, | 449 | .encrypt_mpdu = ieee80211_ccmp_encrypt, |
434 | .encrypt_msdu = NULL, | 450 | .decrypt_mpdu = ieee80211_ccmp_decrypt, |
435 | .decrypt_msdu = NULL, | 451 | .encrypt_msdu = NULL, |
436 | .set_key = ieee80211_ccmp_set_key, | 452 | .decrypt_msdu = NULL, |
437 | .get_key = ieee80211_ccmp_get_key, | 453 | .set_key = ieee80211_ccmp_set_key, |
438 | .print_stats = ieee80211_ccmp_print_stats, | 454 | .get_key = ieee80211_ccmp_get_key, |
439 | .extra_prefix_len = CCMP_HDR_LEN, | 455 | .print_stats = ieee80211_ccmp_print_stats, |
440 | .extra_postfix_len = CCMP_MIC_LEN, | 456 | .extra_mpdu_prefix_len = CCMP_HDR_LEN, |
441 | .owner = THIS_MODULE, | 457 | .extra_mpdu_postfix_len = CCMP_MIC_LEN, |
458 | .owner = THIS_MODULE, | ||
442 | }; | 459 | }; |
443 | 460 | ||
444 | static int __init ieee80211_crypto_ccmp_init(void) | 461 | static int __init ieee80211_crypto_ccmp_init(void) |
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index d4f9164be1a1..2e34f29b7956 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c | |||
@@ -59,8 +59,24 @@ struct ieee80211_tkip_data { | |||
59 | 59 | ||
60 | /* scratch buffers for virt_to_page() (crypto API) */ | 60 | /* scratch buffers for virt_to_page() (crypto API) */ |
61 | u8 rx_hdr[16], tx_hdr[16]; | 61 | u8 rx_hdr[16], tx_hdr[16]; |
62 | |||
63 | unsigned long flags; | ||
62 | }; | 64 | }; |
63 | 65 | ||
66 | static unsigned long ieee80211_tkip_set_flags(unsigned long flags, void *priv) | ||
67 | { | ||
68 | struct ieee80211_tkip_data *_priv = priv; | ||
69 | unsigned long old_flags = _priv->flags; | ||
70 | _priv->flags = flags; | ||
71 | return old_flags; | ||
72 | } | ||
73 | |||
74 | static unsigned long ieee80211_tkip_get_flags(void *priv) | ||
75 | { | ||
76 | struct ieee80211_tkip_data *_priv = priv; | ||
77 | return _priv->flags; | ||
78 | } | ||
79 | |||
64 | static void *ieee80211_tkip_init(int key_idx) | 80 | static void *ieee80211_tkip_init(int key_idx) |
65 | { | 81 | { |
66 | struct ieee80211_tkip_data *priv; | 82 | struct ieee80211_tkip_data *priv; |
@@ -69,6 +85,7 @@ static void *ieee80211_tkip_init(int key_idx) | |||
69 | if (priv == NULL) | 85 | if (priv == NULL) |
70 | goto fail; | 86 | goto fail; |
71 | memset(priv, 0, sizeof(*priv)); | 87 | memset(priv, 0, sizeof(*priv)); |
88 | |||
72 | priv->key_idx = key_idx; | 89 | priv->key_idx = key_idx; |
73 | 90 | ||
74 | priv->tfm_arc4 = crypto_alloc_tfm("arc4", 0); | 91 | priv->tfm_arc4 = crypto_alloc_tfm("arc4", 0); |
@@ -255,25 +272,27 @@ static void tkip_mixing_phase2(u8 * WEPSeed, const u8 * TK, const u16 * TTAK, | |||
255 | #endif | 272 | #endif |
256 | } | 273 | } |
257 | 274 | ||
258 | static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | 275 | static u8 *ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len, void *priv) |
259 | { | 276 | { |
260 | struct ieee80211_tkip_data *tkey = priv; | 277 | struct ieee80211_tkip_data *tkey = priv; |
261 | int len; | 278 | int len; |
262 | u8 rc4key[16], *pos, *icv; | 279 | u8 *rc4key, *pos, *icv; |
263 | struct ieee80211_hdr *hdr; | 280 | struct ieee80211_hdr_4addr *hdr; |
264 | u32 crc; | 281 | u32 crc; |
265 | struct scatterlist sg; | ||
266 | 282 | ||
267 | if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 || | 283 | hdr = (struct ieee80211_hdr_4addr *)skb->data; |
268 | skb->len < hdr_len) | 284 | |
269 | return -1; | 285 | if (skb_headroom(skb) < 8 || skb->len < hdr_len) |
286 | return NULL; | ||
270 | 287 | ||
271 | hdr = (struct ieee80211_hdr *)skb->data; | ||
272 | if (!tkey->tx_phase1_done) { | 288 | if (!tkey->tx_phase1_done) { |
273 | tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, | 289 | tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, |
274 | tkey->tx_iv32); | 290 | tkey->tx_iv32); |
275 | tkey->tx_phase1_done = 1; | 291 | tkey->tx_phase1_done = 1; |
276 | } | 292 | } |
293 | rc4key = kmalloc(16, GFP_ATOMIC); | ||
294 | if (!rc4key) | ||
295 | return NULL; | ||
277 | tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); | 296 | tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); |
278 | 297 | ||
279 | len = skb->len - hdr_len; | 298 | len = skb->len - hdr_len; |
@@ -282,9 +301,9 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
282 | pos += hdr_len; | 301 | pos += hdr_len; |
283 | icv = skb_put(skb, 4); | 302 | icv = skb_put(skb, 4); |
284 | 303 | ||
285 | *pos++ = rc4key[0]; | 304 | *pos++ = *rc4key; |
286 | *pos++ = rc4key[1]; | 305 | *pos++ = *(rc4key + 1); |
287 | *pos++ = rc4key[2]; | 306 | *pos++ = *(rc4key + 2); |
288 | *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */ ; | 307 | *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */ ; |
289 | *pos++ = tkey->tx_iv32 & 0xff; | 308 | *pos++ = tkey->tx_iv32 & 0xff; |
290 | *pos++ = (tkey->tx_iv32 >> 8) & 0xff; | 309 | *pos++ = (tkey->tx_iv32 >> 8) & 0xff; |
@@ -297,6 +316,38 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
297 | icv[2] = crc >> 16; | 316 | icv[2] = crc >> 16; |
298 | icv[3] = crc >> 24; | 317 | icv[3] = crc >> 24; |
299 | 318 | ||
319 | return rc4key; | ||
320 | } | ||
321 | |||
322 | static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | ||
323 | { | ||
324 | struct ieee80211_tkip_data *tkey = priv; | ||
325 | int len; | ||
326 | const u8 *rc4key; | ||
327 | u8 *pos; | ||
328 | struct scatterlist sg; | ||
329 | |||
330 | if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { | ||
331 | if (net_ratelimit()) { | ||
332 | struct ieee80211_hdr_4addr *hdr = | ||
333 | (struct ieee80211_hdr_4addr *)skb->data; | ||
334 | printk(KERN_DEBUG "TKIP countermeasures: dropped " | ||
335 | "TX packet to " MAC_FMT "\n", | ||
336 | MAC_ARG(hdr->addr1)); | ||
337 | } | ||
338 | return -1; | ||
339 | } | ||
340 | |||
341 | if (skb_tailroom(skb) < 4 || skb->len < hdr_len) | ||
342 | return -1; | ||
343 | |||
344 | len = skb->len - hdr_len; | ||
345 | pos = skb->data + hdr_len; | ||
346 | |||
347 | rc4key = ieee80211_tkip_hdr(skb, hdr_len, priv); | ||
348 | if (!rc4key) | ||
349 | return -1; | ||
350 | |||
300 | crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16); | 351 | crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16); |
301 | sg.page = virt_to_page(pos); | 352 | sg.page = virt_to_page(pos); |
302 | sg.offset = offset_in_page(pos); | 353 | sg.offset = offset_in_page(pos); |
@@ -319,16 +370,26 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
319 | u8 keyidx, *pos; | 370 | u8 keyidx, *pos; |
320 | u32 iv32; | 371 | u32 iv32; |
321 | u16 iv16; | 372 | u16 iv16; |
322 | struct ieee80211_hdr *hdr; | 373 | struct ieee80211_hdr_4addr *hdr; |
323 | u8 icv[4]; | 374 | u8 icv[4]; |
324 | u32 crc; | 375 | u32 crc; |
325 | struct scatterlist sg; | 376 | struct scatterlist sg; |
326 | int plen; | 377 | int plen; |
327 | 378 | ||
379 | hdr = (struct ieee80211_hdr_4addr *)skb->data; | ||
380 | |||
381 | if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { | ||
382 | if (net_ratelimit()) { | ||
383 | printk(KERN_DEBUG "TKIP countermeasures: dropped " | ||
384 | "received packet from " MAC_FMT "\n", | ||
385 | MAC_ARG(hdr->addr2)); | ||
386 | } | ||
387 | return -1; | ||
388 | } | ||
389 | |||
328 | if (skb->len < hdr_len + 8 + 4) | 390 | if (skb->len < hdr_len + 8 + 4) |
329 | return -1; | 391 | return -1; |
330 | 392 | ||
331 | hdr = (struct ieee80211_hdr *)skb->data; | ||
332 | pos = skb->data + hdr_len; | 393 | pos = skb->data + hdr_len; |
333 | keyidx = pos[3]; | 394 | keyidx = pos[3]; |
334 | if (!(keyidx & (1 << 5))) { | 395 | if (!(keyidx & (1 << 5))) { |
@@ -441,9 +502,9 @@ static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr, | |||
441 | 502 | ||
442 | static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) | 503 | static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) |
443 | { | 504 | { |
444 | struct ieee80211_hdr *hdr11; | 505 | struct ieee80211_hdr_4addr *hdr11; |
445 | 506 | ||
446 | hdr11 = (struct ieee80211_hdr *)skb->data; | 507 | hdr11 = (struct ieee80211_hdr_4addr *)skb->data; |
447 | switch (le16_to_cpu(hdr11->frame_ctl) & | 508 | switch (le16_to_cpu(hdr11->frame_ctl) & |
448 | (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { | 509 | (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { |
449 | case IEEE80211_FCTL_TODS: | 510 | case IEEE80211_FCTL_TODS: |
@@ -490,9 +551,9 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, | |||
490 | return 0; | 551 | return 0; |
491 | } | 552 | } |
492 | 553 | ||
493 | #if WIRELESS_EXT >= 18 | ||
494 | static void ieee80211_michael_mic_failure(struct net_device *dev, | 554 | static void ieee80211_michael_mic_failure(struct net_device *dev, |
495 | struct ieee80211_hdr *hdr, int keyidx) | 555 | struct ieee80211_hdr_4addr *hdr, |
556 | int keyidx) | ||
496 | { | 557 | { |
497 | union iwreq_data wrqu; | 558 | union iwreq_data wrqu; |
498 | struct iw_michaelmicfailure ev; | 559 | struct iw_michaelmicfailure ev; |
@@ -510,28 +571,6 @@ static void ieee80211_michael_mic_failure(struct net_device *dev, | |||
510 | wrqu.data.length = sizeof(ev); | 571 | wrqu.data.length = sizeof(ev); |
511 | wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev); | 572 | wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev); |
512 | } | 573 | } |
513 | #elif WIRELESS_EXT >= 15 | ||
514 | static void ieee80211_michael_mic_failure(struct net_device *dev, | ||
515 | struct ieee80211_hdr *hdr, int keyidx) | ||
516 | { | ||
517 | union iwreq_data wrqu; | ||
518 | char buf[128]; | ||
519 | |||
520 | /* TODO: needed parameters: count, keyid, key type, TSC */ | ||
521 | sprintf(buf, "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr=" | ||
522 | MAC_FMT ")", keyidx, hdr->addr1[0] & 0x01 ? "broad" : "uni", | ||
523 | MAC_ARG(hdr->addr2)); | ||
524 | memset(&wrqu, 0, sizeof(wrqu)); | ||
525 | wrqu.data.length = strlen(buf); | ||
526 | wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); | ||
527 | } | ||
528 | #else /* WIRELESS_EXT >= 15 */ | ||
529 | static inline void ieee80211_michael_mic_failure(struct net_device *dev, | ||
530 | struct ieee80211_hdr *hdr, | ||
531 | int keyidx) | ||
532 | { | ||
533 | } | ||
534 | #endif /* WIRELESS_EXT >= 15 */ | ||
535 | 574 | ||
536 | static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx, | 575 | static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx, |
537 | int hdr_len, void *priv) | 576 | int hdr_len, void *priv) |
@@ -547,8 +586,8 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx, | |||
547 | skb->data + hdr_len, skb->len - 8 - hdr_len, mic)) | 586 | skb->data + hdr_len, skb->len - 8 - hdr_len, mic)) |
548 | return -1; | 587 | return -1; |
549 | if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) { | 588 | if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) { |
550 | struct ieee80211_hdr *hdr; | 589 | struct ieee80211_hdr_4addr *hdr; |
551 | hdr = (struct ieee80211_hdr *)skb->data; | 590 | hdr = (struct ieee80211_hdr_4addr *)skb->data; |
552 | printk(KERN_DEBUG "%s: Michael MIC verification failed for " | 591 | printk(KERN_DEBUG "%s: Michael MIC verification failed for " |
553 | "MSDU from " MAC_FMT " keyidx=%d\n", | 592 | "MSDU from " MAC_FMT " keyidx=%d\n", |
554 | skb->dev ? skb->dev->name : "N/A", MAC_ARG(hdr->addr2), | 593 | skb->dev ? skb->dev->name : "N/A", MAC_ARG(hdr->addr2), |
@@ -654,19 +693,22 @@ static char *ieee80211_tkip_print_stats(char *p, void *priv) | |||
654 | } | 693 | } |
655 | 694 | ||
656 | static struct ieee80211_crypto_ops ieee80211_crypt_tkip = { | 695 | static struct ieee80211_crypto_ops ieee80211_crypt_tkip = { |
657 | .name = "TKIP", | 696 | .name = "TKIP", |
658 | .init = ieee80211_tkip_init, | 697 | .init = ieee80211_tkip_init, |
659 | .deinit = ieee80211_tkip_deinit, | 698 | .deinit = ieee80211_tkip_deinit, |
660 | .encrypt_mpdu = ieee80211_tkip_encrypt, | 699 | .encrypt_mpdu = ieee80211_tkip_encrypt, |
661 | .decrypt_mpdu = ieee80211_tkip_decrypt, | 700 | .decrypt_mpdu = ieee80211_tkip_decrypt, |
662 | .encrypt_msdu = ieee80211_michael_mic_add, | 701 | .encrypt_msdu = ieee80211_michael_mic_add, |
663 | .decrypt_msdu = ieee80211_michael_mic_verify, | 702 | .decrypt_msdu = ieee80211_michael_mic_verify, |
664 | .set_key = ieee80211_tkip_set_key, | 703 | .set_key = ieee80211_tkip_set_key, |
665 | .get_key = ieee80211_tkip_get_key, | 704 | .get_key = ieee80211_tkip_get_key, |
666 | .print_stats = ieee80211_tkip_print_stats, | 705 | .print_stats = ieee80211_tkip_print_stats, |
667 | .extra_prefix_len = 4 + 4, /* IV + ExtIV */ | 706 | .extra_mpdu_prefix_len = 4 + 4, /* IV + ExtIV */ |
668 | .extra_postfix_len = 8 + 4, /* MIC + ICV */ | 707 | .extra_mpdu_postfix_len = 4, /* ICV */ |
669 | .owner = THIS_MODULE, | 708 | .extra_msdu_postfix_len = 8, /* MIC */ |
709 | .get_flags = ieee80211_tkip_get_flags, | ||
710 | .set_flags = ieee80211_tkip_set_flags, | ||
711 | .owner = THIS_MODULE, | ||
670 | }; | 712 | }; |
671 | 713 | ||
672 | static int __init ieee80211_crypto_tkip_init(void) | 714 | static int __init ieee80211_crypto_tkip_init(void) |
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index b4d2514a0902..7c08ed2f2628 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c | |||
@@ -229,19 +229,19 @@ static char *prism2_wep_print_stats(char *p, void *priv) | |||
229 | } | 229 | } |
230 | 230 | ||
231 | static struct ieee80211_crypto_ops ieee80211_crypt_wep = { | 231 | static struct ieee80211_crypto_ops ieee80211_crypt_wep = { |
232 | .name = "WEP", | 232 | .name = "WEP", |
233 | .init = prism2_wep_init, | 233 | .init = prism2_wep_init, |
234 | .deinit = prism2_wep_deinit, | 234 | .deinit = prism2_wep_deinit, |
235 | .encrypt_mpdu = prism2_wep_encrypt, | 235 | .encrypt_mpdu = prism2_wep_encrypt, |
236 | .decrypt_mpdu = prism2_wep_decrypt, | 236 | .decrypt_mpdu = prism2_wep_decrypt, |
237 | .encrypt_msdu = NULL, | 237 | .encrypt_msdu = NULL, |
238 | .decrypt_msdu = NULL, | 238 | .decrypt_msdu = NULL, |
239 | .set_key = prism2_wep_set_key, | 239 | .set_key = prism2_wep_set_key, |
240 | .get_key = prism2_wep_get_key, | 240 | .get_key = prism2_wep_get_key, |
241 | .print_stats = prism2_wep_print_stats, | 241 | .print_stats = prism2_wep_print_stats, |
242 | .extra_prefix_len = 4, /* IV */ | 242 | .extra_mpdu_prefix_len = 4, /* IV */ |
243 | .extra_postfix_len = 4, /* ICV */ | 243 | .extra_mpdu_postfix_len = 4, /* ICV */ |
244 | .owner = THIS_MODULE, | 244 | .owner = THIS_MODULE, |
245 | }; | 245 | }; |
246 | 246 | ||
247 | static int __init ieee80211_crypto_wep_init(void) | 247 | static int __init ieee80211_crypto_wep_init(void) |
diff --git a/net/ieee80211/ieee80211_geo.c b/net/ieee80211/ieee80211_geo.c new file mode 100644 index 000000000000..c4b54ef8f6d5 --- /dev/null +++ b/net/ieee80211/ieee80211_geo.c | |||
@@ -0,0 +1,141 @@ | |||
1 | /****************************************************************************** | ||
2 | |||
3 | Copyright(c) 2005 Intel Corporation. All rights reserved. | ||
4 | |||
5 | This program is free software; you can redistribute it and/or modify it | ||
6 | under the terms of version 2 of the GNU General Public License as | ||
7 | published by the Free Software Foundation. | ||
8 | |||
9 | This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | more details. | ||
13 | |||
14 | You should have received a copy of the GNU General Public License along with | ||
15 | this program; if not, write to the Free Software Foundation, Inc., 59 | ||
16 | Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | |||
18 | The full GNU General Public License is included in this distribution in the | ||
19 | file called LICENSE. | ||
20 | |||
21 | Contact Information: | ||
22 | James P. Ketrenos <ipw2100-admin@linux.intel.com> | ||
23 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
24 | |||
25 | ******************************************************************************/ | ||
26 | #include <linux/compiler.h> | ||
27 | #include <linux/config.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/if_arp.h> | ||
30 | #include <linux/in6.h> | ||
31 | #include <linux/in.h> | ||
32 | #include <linux/ip.h> | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/module.h> | ||
35 | #include <linux/netdevice.h> | ||
36 | #include <linux/proc_fs.h> | ||
37 | #include <linux/skbuff.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <linux/tcp.h> | ||
40 | #include <linux/types.h> | ||
41 | #include <linux/version.h> | ||
42 | #include <linux/wireless.h> | ||
43 | #include <linux/etherdevice.h> | ||
44 | #include <asm/uaccess.h> | ||
45 | |||
46 | #include <net/ieee80211.h> | ||
47 | |||
48 | int ieee80211_is_valid_channel(struct ieee80211_device *ieee, u8 channel) | ||
49 | { | ||
50 | int i; | ||
51 | |||
52 | /* Driver needs to initialize the geography map before using | ||
53 | * these helper functions */ | ||
54 | BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); | ||
55 | |||
56 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) | ||
57 | for (i = 0; i < ieee->geo.bg_channels; i++) | ||
58 | /* NOTE: If G mode is currently supported but | ||
59 | * this is a B only channel, we don't see it | ||
60 | * as valid. */ | ||
61 | if ((ieee->geo.bg[i].channel == channel) && | ||
62 | (!(ieee->mode & IEEE_G) || | ||
63 | !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY))) | ||
64 | return IEEE80211_24GHZ_BAND; | ||
65 | |||
66 | if (ieee->freq_band & IEEE80211_52GHZ_BAND) | ||
67 | for (i = 0; i < ieee->geo.a_channels; i++) | ||
68 | if (ieee->geo.a[i].channel == channel) | ||
69 | return IEEE80211_52GHZ_BAND; | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | int ieee80211_channel_to_index(struct ieee80211_device *ieee, u8 channel) | ||
75 | { | ||
76 | int i; | ||
77 | |||
78 | /* Driver needs to initialize the geography map before using | ||
79 | * these helper functions */ | ||
80 | BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); | ||
81 | |||
82 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) | ||
83 | for (i = 0; i < ieee->geo.bg_channels; i++) | ||
84 | if (ieee->geo.bg[i].channel == channel) | ||
85 | return i; | ||
86 | |||
87 | if (ieee->freq_band & IEEE80211_52GHZ_BAND) | ||
88 | for (i = 0; i < ieee->geo.a_channels; i++) | ||
89 | if (ieee->geo.a[i].channel == channel) | ||
90 | return i; | ||
91 | |||
92 | return -1; | ||
93 | } | ||
94 | |||
95 | u8 ieee80211_freq_to_channel(struct ieee80211_device * ieee, u32 freq) | ||
96 | { | ||
97 | int i; | ||
98 | |||
99 | /* Driver needs to initialize the geography map before using | ||
100 | * these helper functions */ | ||
101 | BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); | ||
102 | |||
103 | freq /= 100000; | ||
104 | |||
105 | if (ieee->freq_band & IEEE80211_24GHZ_BAND) | ||
106 | for (i = 0; i < ieee->geo.bg_channels; i++) | ||
107 | if (ieee->geo.bg[i].freq == freq) | ||
108 | return ieee->geo.bg[i].channel; | ||
109 | |||
110 | if (ieee->freq_band & IEEE80211_52GHZ_BAND) | ||
111 | for (i = 0; i < ieee->geo.a_channels; i++) | ||
112 | if (ieee->geo.a[i].freq == freq) | ||
113 | return ieee->geo.a[i].channel; | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | int ieee80211_set_geo(struct ieee80211_device *ieee, | ||
119 | const struct ieee80211_geo *geo) | ||
120 | { | ||
121 | memcpy(ieee->geo.name, geo->name, 3); | ||
122 | ieee->geo.name[3] = '\0'; | ||
123 | ieee->geo.bg_channels = geo->bg_channels; | ||
124 | ieee->geo.a_channels = geo->a_channels; | ||
125 | memcpy(ieee->geo.bg, geo->bg, geo->bg_channels * | ||
126 | sizeof(struct ieee80211_channel)); | ||
127 | memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels * | ||
128 | sizeof(struct ieee80211_channel)); | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device *ieee) | ||
133 | { | ||
134 | return &ieee->geo; | ||
135 | } | ||
136 | |||
137 | EXPORT_SYMBOL(ieee80211_is_valid_channel); | ||
138 | EXPORT_SYMBOL(ieee80211_freq_to_channel); | ||
139 | EXPORT_SYMBOL(ieee80211_channel_to_index); | ||
140 | EXPORT_SYMBOL(ieee80211_set_geo); | ||
141 | EXPORT_SYMBOL(ieee80211_get_geo); | ||
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 6059e9e37123..f66d792cd204 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Copyright(c) 2004 Intel Corporation. All rights reserved. | 3 | Copyright(c) 2004-2005 Intel Corporation. All rights reserved. |
4 | 4 | ||
5 | Portions of this file are based on the WEP enablement code provided by the | 5 | Portions of this file are based on the WEP enablement code provided by the |
6 | Host AP project hostap-drivers v0.1.3 | 6 | Host AP project hostap-drivers v0.1.3 |
@@ -53,12 +53,15 @@ | |||
53 | 53 | ||
54 | #include <net/ieee80211.h> | 54 | #include <net/ieee80211.h> |
55 | 55 | ||
56 | MODULE_DESCRIPTION("802.11 data/management/control stack"); | 56 | #define DRV_DESCRIPTION "802.11 data/management/control stack" |
57 | MODULE_AUTHOR | 57 | #define DRV_NAME "ieee80211" |
58 | ("Copyright (C) 2004 Intel Corporation <jketreno@linux.intel.com>"); | 58 | #define DRV_VERSION IEEE80211_VERSION |
59 | MODULE_LICENSE("GPL"); | 59 | #define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>" |
60 | 60 | ||
61 | #define DRV_NAME "ieee80211" | 61 | MODULE_VERSION(DRV_VERSION); |
62 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | ||
63 | MODULE_AUTHOR(DRV_COPYRIGHT); | ||
64 | MODULE_LICENSE("GPL"); | ||
62 | 65 | ||
63 | static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee) | 66 | static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee) |
64 | { | 67 | { |
@@ -126,26 +129,34 @@ struct net_device *alloc_ieee80211(int sizeof_priv) | |||
126 | 129 | ||
127 | /* Default fragmentation threshold is maximum payload size */ | 130 | /* Default fragmentation threshold is maximum payload size */ |
128 | ieee->fts = DEFAULT_FTS; | 131 | ieee->fts = DEFAULT_FTS; |
132 | ieee->rts = DEFAULT_FTS; | ||
129 | ieee->scan_age = DEFAULT_MAX_SCAN_AGE; | 133 | ieee->scan_age = DEFAULT_MAX_SCAN_AGE; |
130 | ieee->open_wep = 1; | 134 | ieee->open_wep = 1; |
131 | 135 | ||
132 | /* Default to enabling full open WEP with host based encrypt/decrypt */ | 136 | /* Default to enabling full open WEP with host based encrypt/decrypt */ |
133 | ieee->host_encrypt = 1; | 137 | ieee->host_encrypt = 1; |
134 | ieee->host_decrypt = 1; | 138 | ieee->host_decrypt = 1; |
139 | ieee->host_mc_decrypt = 1; | ||
140 | |||
141 | /* Host fragementation in Open mode. Default is enabled. | ||
142 | * Note: host fragmentation is always enabled if host encryption | ||
143 | * is enabled. For cards can do hardware encryption, they must do | ||
144 | * hardware fragmentation as well. So we don't need a variable | ||
145 | * like host_enc_frag. */ | ||
146 | ieee->host_open_frag = 1; | ||
135 | ieee->ieee802_1x = 1; /* Default to supporting 802.1x */ | 147 | ieee->ieee802_1x = 1; /* Default to supporting 802.1x */ |
136 | 148 | ||
137 | INIT_LIST_HEAD(&ieee->crypt_deinit_list); | 149 | INIT_LIST_HEAD(&ieee->crypt_deinit_list); |
138 | init_timer(&ieee->crypt_deinit_timer); | 150 | init_timer(&ieee->crypt_deinit_timer); |
139 | ieee->crypt_deinit_timer.data = (unsigned long)ieee; | 151 | ieee->crypt_deinit_timer.data = (unsigned long)ieee; |
140 | ieee->crypt_deinit_timer.function = ieee80211_crypt_deinit_handler; | 152 | ieee->crypt_deinit_timer.function = ieee80211_crypt_deinit_handler; |
153 | ieee->crypt_quiesced = 0; | ||
141 | 154 | ||
142 | spin_lock_init(&ieee->lock); | 155 | spin_lock_init(&ieee->lock); |
143 | 156 | ||
144 | ieee->wpa_enabled = 0; | 157 | ieee->wpa_enabled = 0; |
145 | ieee->tkip_countermeasures = 0; | ||
146 | ieee->drop_unencrypted = 0; | 158 | ieee->drop_unencrypted = 0; |
147 | ieee->privacy_invoked = 0; | 159 | ieee->privacy_invoked = 0; |
148 | ieee->ieee802_1x = 1; | ||
149 | 160 | ||
150 | return dev; | 161 | return dev; |
151 | 162 | ||
@@ -161,6 +172,7 @@ void free_ieee80211(struct net_device *dev) | |||
161 | 172 | ||
162 | int i; | 173 | int i; |
163 | 174 | ||
175 | ieee80211_crypt_quiescing(ieee); | ||
164 | del_timer_sync(&ieee->crypt_deinit_timer); | 176 | del_timer_sync(&ieee->crypt_deinit_timer); |
165 | ieee80211_crypt_deinit_entries(ieee, 1); | 177 | ieee80211_crypt_deinit_entries(ieee, 1); |
166 | 178 | ||
@@ -195,38 +207,26 @@ static int show_debug_level(char *page, char **start, off_t offset, | |||
195 | static int store_debug_level(struct file *file, const char __user * buffer, | 207 | static int store_debug_level(struct file *file, const char __user * buffer, |
196 | unsigned long count, void *data) | 208 | unsigned long count, void *data) |
197 | { | 209 | { |
198 | char buf[] = "0x00000000"; | 210 | char buf[] = "0x00000000\n"; |
199 | char *p = (char *)buf; | 211 | unsigned long len = min((unsigned long)sizeof(buf) - 1, count); |
200 | unsigned long val; | 212 | unsigned long val; |
201 | 213 | ||
202 | if (count > sizeof(buf) - 1) | 214 | if (copy_from_user(buf, buffer, len)) |
203 | count = sizeof(buf) - 1; | ||
204 | |||
205 | if (copy_from_user(buf, buffer, count)) | ||
206 | return count; | 215 | return count; |
207 | buf[count] = 0; | 216 | buf[len] = 0; |
208 | /* | 217 | if (sscanf(buf, "%li", &val) != 1) |
209 | * what a FPOS... What, sscanf(buf, "%i", &val) would be too | ||
210 | * scary? | ||
211 | */ | ||
212 | if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { | ||
213 | p++; | ||
214 | if (p[0] == 'x' || p[0] == 'X') | ||
215 | p++; | ||
216 | val = simple_strtoul(p, &p, 16); | ||
217 | } else | ||
218 | val = simple_strtoul(p, &p, 10); | ||
219 | if (p == buf) | ||
220 | printk(KERN_INFO DRV_NAME | 218 | printk(KERN_INFO DRV_NAME |
221 | ": %s is not in hex or decimal form.\n", buf); | 219 | ": %s is not in hex or decimal form.\n", buf); |
222 | else | 220 | else |
223 | ieee80211_debug_level = val; | 221 | ieee80211_debug_level = val; |
224 | 222 | ||
225 | return strlen(buf); | 223 | return strnlen(buf, len); |
226 | } | 224 | } |
225 | #endif /* CONFIG_IEEE80211_DEBUG */ | ||
227 | 226 | ||
228 | static int __init ieee80211_init(void) | 227 | static int __init ieee80211_init(void) |
229 | { | 228 | { |
229 | #ifdef CONFIG_IEEE80211_DEBUG | ||
230 | struct proc_dir_entry *e; | 230 | struct proc_dir_entry *e; |
231 | 231 | ||
232 | ieee80211_debug_level = debug; | 232 | ieee80211_debug_level = debug; |
@@ -246,26 +246,33 @@ static int __init ieee80211_init(void) | |||
246 | e->read_proc = show_debug_level; | 246 | e->read_proc = show_debug_level; |
247 | e->write_proc = store_debug_level; | 247 | e->write_proc = store_debug_level; |
248 | e->data = NULL; | 248 | e->data = NULL; |
249 | #endif /* CONFIG_IEEE80211_DEBUG */ | ||
250 | |||
251 | printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); | ||
252 | printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); | ||
249 | 253 | ||
250 | return 0; | 254 | return 0; |
251 | } | 255 | } |
252 | 256 | ||
253 | static void __exit ieee80211_exit(void) | 257 | static void __exit ieee80211_exit(void) |
254 | { | 258 | { |
259 | #ifdef CONFIG_IEEE80211_DEBUG | ||
255 | if (ieee80211_proc) { | 260 | if (ieee80211_proc) { |
256 | remove_proc_entry("debug_level", ieee80211_proc); | 261 | remove_proc_entry("debug_level", ieee80211_proc); |
257 | remove_proc_entry(DRV_NAME, proc_net); | 262 | remove_proc_entry(DRV_NAME, proc_net); |
258 | ieee80211_proc = NULL; | 263 | ieee80211_proc = NULL; |
259 | } | 264 | } |
265 | #endif /* CONFIG_IEEE80211_DEBUG */ | ||
260 | } | 266 | } |
261 | 267 | ||
268 | #ifdef CONFIG_IEEE80211_DEBUG | ||
262 | #include <linux/moduleparam.h> | 269 | #include <linux/moduleparam.h> |
263 | module_param(debug, int, 0444); | 270 | module_param(debug, int, 0444); |
264 | MODULE_PARM_DESC(debug, "debug output mask"); | 271 | MODULE_PARM_DESC(debug, "debug output mask"); |
272 | #endif /* CONFIG_IEEE80211_DEBUG */ | ||
265 | 273 | ||
266 | module_exit(ieee80211_exit); | 274 | module_exit(ieee80211_exit); |
267 | module_init(ieee80211_init); | 275 | module_init(ieee80211_init); |
268 | #endif | ||
269 | 276 | ||
270 | const char *escape_essid(const char *essid, u8 essid_len) | 277 | const char *escape_essid(const char *essid, u8 essid_len) |
271 | { | 278 | { |
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index f7dcd854139e..ce694cf5c160 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen | 5 | * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen |
6 | * <jkmaline@cc.hut.fi> | 6 | * <jkmaline@cc.hut.fi> |
7 | * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi> | 7 | * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi> |
8 | * Copyright (c) 2004, Intel Corporation | 8 | * Copyright (c) 2004-2005, Intel Corporation |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
@@ -87,7 +87,7 @@ static struct ieee80211_frag_entry *ieee80211_frag_cache_find(struct | |||
87 | 87 | ||
88 | /* Called only as a tasklet (software IRQ) */ | 88 | /* Called only as a tasklet (software IRQ) */ |
89 | static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee, | 89 | static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee, |
90 | struct ieee80211_hdr *hdr) | 90 | struct ieee80211_hdr_4addr *hdr) |
91 | { | 91 | { |
92 | struct sk_buff *skb = NULL; | 92 | struct sk_buff *skb = NULL; |
93 | u16 sc; | 93 | u16 sc; |
@@ -101,7 +101,7 @@ static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee, | |||
101 | if (frag == 0) { | 101 | if (frag == 0) { |
102 | /* Reserve enough space to fit maximum frame length */ | 102 | /* Reserve enough space to fit maximum frame length */ |
103 | skb = dev_alloc_skb(ieee->dev->mtu + | 103 | skb = dev_alloc_skb(ieee->dev->mtu + |
104 | sizeof(struct ieee80211_hdr) + | 104 | sizeof(struct ieee80211_hdr_4addr) + |
105 | 8 /* LLC */ + | 105 | 8 /* LLC */ + |
106 | 2 /* alignment */ + | 106 | 2 /* alignment */ + |
107 | 8 /* WEP */ + ETH_ALEN /* WDS */ ); | 107 | 8 /* WEP */ + ETH_ALEN /* WDS */ ); |
@@ -138,7 +138,7 @@ static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee, | |||
138 | 138 | ||
139 | /* Called only as a tasklet (software IRQ) */ | 139 | /* Called only as a tasklet (software IRQ) */ |
140 | static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee, | 140 | static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee, |
141 | struct ieee80211_hdr *hdr) | 141 | struct ieee80211_hdr_4addr *hdr) |
142 | { | 142 | { |
143 | u16 sc; | 143 | u16 sc; |
144 | unsigned int seq; | 144 | unsigned int seq; |
@@ -176,7 +176,7 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
176 | ieee->dev->name); | 176 | ieee->dev->name); |
177 | return 0; | 177 | return 0; |
178 | /* | 178 | /* |
179 | hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr *) | 179 | hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr_4addr *) |
180 | skb->data);*/ | 180 | skb->data);*/ |
181 | } | 181 | } |
182 | 182 | ||
@@ -232,13 +232,13 @@ static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee, | |||
232 | { | 232 | { |
233 | struct net_device *dev = ieee->dev; | 233 | struct net_device *dev = ieee->dev; |
234 | u16 fc, ethertype; | 234 | u16 fc, ethertype; |
235 | struct ieee80211_hdr *hdr; | 235 | struct ieee80211_hdr_3addr *hdr; |
236 | u8 *pos; | 236 | u8 *pos; |
237 | 237 | ||
238 | if (skb->len < 24) | 238 | if (skb->len < 24) |
239 | return 0; | 239 | return 0; |
240 | 240 | ||
241 | hdr = (struct ieee80211_hdr *)skb->data; | 241 | hdr = (struct ieee80211_hdr_3addr *)skb->data; |
242 | fc = le16_to_cpu(hdr->frame_ctl); | 242 | fc = le16_to_cpu(hdr->frame_ctl); |
243 | 243 | ||
244 | /* check that the frame is unicast frame to us */ | 244 | /* check that the frame is unicast frame to us */ |
@@ -271,26 +271,15 @@ static inline int | |||
271 | ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb, | 271 | ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb, |
272 | struct ieee80211_crypt_data *crypt) | 272 | struct ieee80211_crypt_data *crypt) |
273 | { | 273 | { |
274 | struct ieee80211_hdr *hdr; | 274 | struct ieee80211_hdr_3addr *hdr; |
275 | int res, hdrlen; | 275 | int res, hdrlen; |
276 | 276 | ||
277 | if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) | 277 | if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) |
278 | return 0; | 278 | return 0; |
279 | 279 | ||
280 | hdr = (struct ieee80211_hdr *)skb->data; | 280 | hdr = (struct ieee80211_hdr_3addr *)skb->data; |
281 | hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); | 281 | hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); |
282 | 282 | ||
283 | #ifdef CONFIG_IEEE80211_CRYPT_TKIP | ||
284 | if (ieee->tkip_countermeasures && strcmp(crypt->ops->name, "TKIP") == 0) { | ||
285 | if (net_ratelimit()) { | ||
286 | printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " | ||
287 | "received packet from " MAC_FMT "\n", | ||
288 | ieee->dev->name, MAC_ARG(hdr->addr2)); | ||
289 | } | ||
290 | return -1; | ||
291 | } | ||
292 | #endif | ||
293 | |||
294 | atomic_inc(&crypt->refcnt); | 283 | atomic_inc(&crypt->refcnt); |
295 | res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); | 284 | res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); |
296 | atomic_dec(&crypt->refcnt); | 285 | atomic_dec(&crypt->refcnt); |
@@ -314,13 +303,13 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, | |||
314 | struct sk_buff *skb, int keyidx, | 303 | struct sk_buff *skb, int keyidx, |
315 | struct ieee80211_crypt_data *crypt) | 304 | struct ieee80211_crypt_data *crypt) |
316 | { | 305 | { |
317 | struct ieee80211_hdr *hdr; | 306 | struct ieee80211_hdr_3addr *hdr; |
318 | int res, hdrlen; | 307 | int res, hdrlen; |
319 | 308 | ||
320 | if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) | 309 | if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) |
321 | return 0; | 310 | return 0; |
322 | 311 | ||
323 | hdr = (struct ieee80211_hdr *)skb->data; | 312 | hdr = (struct ieee80211_hdr_3addr *)skb->data; |
324 | hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); | 313 | hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); |
325 | 314 | ||
326 | atomic_inc(&crypt->refcnt); | 315 | atomic_inc(&crypt->refcnt); |
@@ -343,7 +332,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
343 | struct ieee80211_rx_stats *rx_stats) | 332 | struct ieee80211_rx_stats *rx_stats) |
344 | { | 333 | { |
345 | struct net_device *dev = ieee->dev; | 334 | struct net_device *dev = ieee->dev; |
346 | struct ieee80211_hdr *hdr; | 335 | struct ieee80211_hdr_4addr *hdr; |
347 | size_t hdrlen; | 336 | size_t hdrlen; |
348 | u16 fc, type, stype, sc; | 337 | u16 fc, type, stype, sc; |
349 | struct net_device_stats *stats; | 338 | struct net_device_stats *stats; |
@@ -363,7 +352,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
363 | struct ieee80211_crypt_data *crypt = NULL; | 352 | struct ieee80211_crypt_data *crypt = NULL; |
364 | int keyidx = 0; | 353 | int keyidx = 0; |
365 | 354 | ||
366 | hdr = (struct ieee80211_hdr *)skb->data; | 355 | hdr = (struct ieee80211_hdr_4addr *)skb->data; |
367 | stats = &ieee->stats; | 356 | stats = &ieee->stats; |
368 | 357 | ||
369 | if (skb->len < 10) { | 358 | if (skb->len < 10) { |
@@ -378,35 +367,51 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
378 | frag = WLAN_GET_SEQ_FRAG(sc); | 367 | frag = WLAN_GET_SEQ_FRAG(sc); |
379 | hdrlen = ieee80211_get_hdrlen(fc); | 368 | hdrlen = ieee80211_get_hdrlen(fc); |
380 | 369 | ||
381 | #ifdef NOT_YET | ||
382 | #if WIRELESS_EXT > 15 | ||
383 | /* Put this code here so that we avoid duplicating it in all | 370 | /* Put this code here so that we avoid duplicating it in all |
384 | * Rx paths. - Jean II */ | 371 | * Rx paths. - Jean II */ |
385 | #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ | 372 | #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ |
386 | /* If spy monitoring on */ | 373 | /* If spy monitoring on */ |
387 | if (iface->spy_data.spy_number > 0) { | 374 | if (ieee->spy_data.spy_number > 0) { |
388 | struct iw_quality wstats; | 375 | struct iw_quality wstats; |
389 | wstats.level = rx_stats->signal; | 376 | |
390 | wstats.noise = rx_stats->noise; | 377 | wstats.updated = 0; |
391 | wstats.updated = 6; /* No qual value */ | 378 | if (rx_stats->mask & IEEE80211_STATMASK_RSSI) { |
379 | wstats.level = rx_stats->rssi; | ||
380 | wstats.updated |= IW_QUAL_LEVEL_UPDATED; | ||
381 | } else | ||
382 | wstats.updated |= IW_QUAL_LEVEL_INVALID; | ||
383 | |||
384 | if (rx_stats->mask & IEEE80211_STATMASK_NOISE) { | ||
385 | wstats.noise = rx_stats->noise; | ||
386 | wstats.updated |= IW_QUAL_NOISE_UPDATED; | ||
387 | } else | ||
388 | wstats.updated |= IW_QUAL_NOISE_INVALID; | ||
389 | |||
390 | if (rx_stats->mask & IEEE80211_STATMASK_SIGNAL) { | ||
391 | wstats.qual = rx_stats->signal; | ||
392 | wstats.updated |= IW_QUAL_QUAL_UPDATED; | ||
393 | } else | ||
394 | wstats.updated |= IW_QUAL_QUAL_INVALID; | ||
395 | |||
392 | /* Update spy records */ | 396 | /* Update spy records */ |
393 | wireless_spy_update(dev, hdr->addr2, &wstats); | 397 | wireless_spy_update(ieee->dev, hdr->addr2, &wstats); |
394 | } | 398 | } |
395 | #endif /* IW_WIRELESS_SPY */ | 399 | #endif /* IW_WIRELESS_SPY */ |
396 | #endif /* WIRELESS_EXT > 15 */ | 400 | |
401 | #ifdef NOT_YET | ||
397 | hostap_update_rx_stats(local->ap, hdr, rx_stats); | 402 | hostap_update_rx_stats(local->ap, hdr, rx_stats); |
398 | #endif | 403 | #endif |
399 | 404 | ||
400 | #if WIRELESS_EXT > 15 | ||
401 | if (ieee->iw_mode == IW_MODE_MONITOR) { | 405 | if (ieee->iw_mode == IW_MODE_MONITOR) { |
402 | ieee80211_monitor_rx(ieee, skb, rx_stats); | 406 | ieee80211_monitor_rx(ieee, skb, rx_stats); |
403 | stats->rx_packets++; | 407 | stats->rx_packets++; |
404 | stats->rx_bytes += skb->len; | 408 | stats->rx_bytes += skb->len; |
405 | return 1; | 409 | return 1; |
406 | } | 410 | } |
407 | #endif | ||
408 | 411 | ||
409 | if (ieee->host_decrypt) { | 412 | if ((is_multicast_ether_addr(hdr->addr1) || |
413 | is_broadcast_ether_addr(hdr->addr2)) ? ieee->host_mc_decrypt : | ||
414 | ieee->host_decrypt) { | ||
410 | int idx = 0; | 415 | int idx = 0; |
411 | if (skb->len >= hdrlen + 3) | 416 | if (skb->len >= hdrlen + 3) |
412 | idx = skb->data[hdrlen + 3] >> 6; | 417 | idx = skb->data[hdrlen + 3] >> 6; |
@@ -531,6 +536,9 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
531 | 536 | ||
532 | /* Nullfunc frames may have PS-bit set, so they must be passed to | 537 | /* Nullfunc frames may have PS-bit set, so they must be passed to |
533 | * hostap_handle_sta_rx() before being dropped here. */ | 538 | * hostap_handle_sta_rx() before being dropped here. */ |
539 | |||
540 | stype &= ~IEEE80211_STYPE_QOS_DATA; | ||
541 | |||
534 | if (stype != IEEE80211_STYPE_DATA && | 542 | if (stype != IEEE80211_STYPE_DATA && |
535 | stype != IEEE80211_STYPE_DATA_CFACK && | 543 | stype != IEEE80211_STYPE_DATA_CFACK && |
536 | stype != IEEE80211_STYPE_DATA_CFPOLL && | 544 | stype != IEEE80211_STYPE_DATA_CFPOLL && |
@@ -549,7 +557,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
549 | (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) | 557 | (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) |
550 | goto rx_dropped; | 558 | goto rx_dropped; |
551 | 559 | ||
552 | hdr = (struct ieee80211_hdr *)skb->data; | 560 | hdr = (struct ieee80211_hdr_4addr *)skb->data; |
553 | 561 | ||
554 | /* skb: hdr + (possibly fragmented) plaintext payload */ | 562 | /* skb: hdr + (possibly fragmented) plaintext payload */ |
555 | // PR: FIXME: hostap has additional conditions in the "if" below: | 563 | // PR: FIXME: hostap has additional conditions in the "if" below: |
@@ -603,7 +611,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
603 | /* this was the last fragment and the frame will be | 611 | /* this was the last fragment and the frame will be |
604 | * delivered, so remove skb from fragment cache */ | 612 | * delivered, so remove skb from fragment cache */ |
605 | skb = frag_skb; | 613 | skb = frag_skb; |
606 | hdr = (struct ieee80211_hdr *)skb->data; | 614 | hdr = (struct ieee80211_hdr_4addr *)skb->data; |
607 | ieee80211_frag_cache_invalidate(ieee, hdr); | 615 | ieee80211_frag_cache_invalidate(ieee, hdr); |
608 | } | 616 | } |
609 | 617 | ||
@@ -613,7 +621,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
613 | ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) | 621 | ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) |
614 | goto rx_dropped; | 622 | goto rx_dropped; |
615 | 623 | ||
616 | hdr = (struct ieee80211_hdr *)skb->data; | 624 | hdr = (struct ieee80211_hdr_4addr *)skb->data; |
617 | if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep) { | 625 | if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep) { |
618 | if ( /*ieee->ieee802_1x && */ | 626 | if ( /*ieee->ieee802_1x && */ |
619 | ieee80211_is_eapol_frame(ieee, skb)) { | 627 | ieee80211_is_eapol_frame(ieee, skb)) { |
@@ -755,69 +763,179 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
755 | 763 | ||
756 | #define MGMT_FRAME_FIXED_PART_LENGTH 0x24 | 764 | #define MGMT_FRAME_FIXED_PART_LENGTH 0x24 |
757 | 765 | ||
758 | static inline int ieee80211_is_ofdm_rate(u8 rate) | 766 | static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; |
767 | |||
768 | /* | ||
769 | * Make ther structure we read from the beacon packet has | ||
770 | * the right values | ||
771 | */ | ||
772 | static int ieee80211_verify_qos_info(struct ieee80211_qos_information_element | ||
773 | *info_element, int sub_type) | ||
759 | { | 774 | { |
760 | switch (rate & ~IEEE80211_BASIC_RATE_MASK) { | 775 | |
761 | case IEEE80211_OFDM_RATE_6MB: | 776 | if (info_element->qui_subtype != sub_type) |
762 | case IEEE80211_OFDM_RATE_9MB: | 777 | return -1; |
763 | case IEEE80211_OFDM_RATE_12MB: | 778 | if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN)) |
764 | case IEEE80211_OFDM_RATE_18MB: | 779 | return -1; |
765 | case IEEE80211_OFDM_RATE_24MB: | 780 | if (info_element->qui_type != QOS_OUI_TYPE) |
766 | case IEEE80211_OFDM_RATE_36MB: | 781 | return -1; |
767 | case IEEE80211_OFDM_RATE_48MB: | 782 | if (info_element->version != QOS_VERSION_1) |
768 | case IEEE80211_OFDM_RATE_54MB: | 783 | return -1; |
769 | return 1; | 784 | |
770 | } | ||
771 | return 0; | 785 | return 0; |
772 | } | 786 | } |
773 | 787 | ||
774 | static inline int ieee80211_network_init(struct ieee80211_device *ieee, | 788 | /* |
775 | struct ieee80211_probe_response | 789 | * Parse a QoS parameter element |
776 | *beacon, | 790 | */ |
777 | struct ieee80211_network *network, | 791 | static int ieee80211_read_qos_param_element(struct ieee80211_qos_parameter_info |
778 | struct ieee80211_rx_stats *stats) | 792 | *element_param, struct ieee80211_info_element |
793 | *info_element) | ||
779 | { | 794 | { |
780 | #ifdef CONFIG_IEEE80211_DEBUG | 795 | int ret = 0; |
781 | char rates_str[64]; | 796 | u16 size = sizeof(struct ieee80211_qos_parameter_info) - 2; |
782 | char *p; | ||
783 | #endif | ||
784 | struct ieee80211_info_element *info_element; | ||
785 | u16 left; | ||
786 | u8 i; | ||
787 | 797 | ||
788 | /* Pull out fixed field data */ | 798 | if ((info_element == NULL) || (element_param == NULL)) |
789 | memcpy(network->bssid, beacon->header.addr3, ETH_ALEN); | 799 | return -1; |
790 | network->capability = beacon->capability; | ||
791 | network->last_scanned = jiffies; | ||
792 | network->time_stamp[0] = beacon->time_stamp[0]; | ||
793 | network->time_stamp[1] = beacon->time_stamp[1]; | ||
794 | network->beacon_interval = beacon->beacon_interval; | ||
795 | /* Where to pull this? beacon->listen_interval; */ | ||
796 | network->listen_interval = 0x0A; | ||
797 | network->rates_len = network->rates_ex_len = 0; | ||
798 | network->last_associate = 0; | ||
799 | network->ssid_len = 0; | ||
800 | network->flags = 0; | ||
801 | network->atim_window = 0; | ||
802 | 800 | ||
803 | if (stats->freq == IEEE80211_52GHZ_BAND) { | 801 | if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) { |
804 | /* for A band (No DS info) */ | 802 | memcpy(element_param->info_element.qui, info_element->data, |
805 | network->channel = stats->received_channel; | 803 | info_element->len); |
804 | element_param->info_element.elementID = info_element->id; | ||
805 | element_param->info_element.length = info_element->len; | ||
806 | } else | 806 | } else |
807 | network->flags |= NETWORK_HAS_CCK; | 807 | ret = -1; |
808 | if (ret == 0) | ||
809 | ret = ieee80211_verify_qos_info(&element_param->info_element, | ||
810 | QOS_OUI_PARAM_SUB_TYPE); | ||
811 | return ret; | ||
812 | } | ||
808 | 813 | ||
809 | network->wpa_ie_len = 0; | 814 | /* |
810 | network->rsn_ie_len = 0; | 815 | * Parse a QoS information element |
816 | */ | ||
817 | static int ieee80211_read_qos_info_element(struct | ||
818 | ieee80211_qos_information_element | ||
819 | *element_info, struct ieee80211_info_element | ||
820 | *info_element) | ||
821 | { | ||
822 | int ret = 0; | ||
823 | u16 size = sizeof(struct ieee80211_qos_information_element) - 2; | ||
824 | |||
825 | if (element_info == NULL) | ||
826 | return -1; | ||
827 | if (info_element == NULL) | ||
828 | return -1; | ||
829 | |||
830 | if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) { | ||
831 | memcpy(element_info->qui, info_element->data, | ||
832 | info_element->len); | ||
833 | element_info->elementID = info_element->id; | ||
834 | element_info->length = info_element->len; | ||
835 | } else | ||
836 | ret = -1; | ||
837 | |||
838 | if (ret == 0) | ||
839 | ret = ieee80211_verify_qos_info(element_info, | ||
840 | QOS_OUI_INFO_SUB_TYPE); | ||
841 | return ret; | ||
842 | } | ||
843 | |||
844 | /* | ||
845 | * Write QoS parameters from the ac parameters. | ||
846 | */ | ||
847 | static int ieee80211_qos_convert_ac_to_parameters(struct | ||
848 | ieee80211_qos_parameter_info | ||
849 | *param_elm, struct | ||
850 | ieee80211_qos_parameters | ||
851 | *qos_param) | ||
852 | { | ||
853 | int rc = 0; | ||
854 | int i; | ||
855 | struct ieee80211_qos_ac_parameter *ac_params; | ||
856 | u32 txop; | ||
857 | u8 cw_min; | ||
858 | u8 cw_max; | ||
859 | |||
860 | for (i = 0; i < QOS_QUEUE_NUM; i++) { | ||
861 | ac_params = &(param_elm->ac_params_record[i]); | ||
862 | |||
863 | qos_param->aifs[i] = (ac_params->aci_aifsn) & 0x0F; | ||
864 | qos_param->aifs[i] -= (qos_param->aifs[i] < 2) ? 0 : 2; | ||
865 | |||
866 | cw_min = ac_params->ecw_min_max & 0x0F; | ||
867 | qos_param->cw_min[i] = (u16) ((1 << cw_min) - 1); | ||
868 | |||
869 | cw_max = (ac_params->ecw_min_max & 0xF0) >> 4; | ||
870 | qos_param->cw_max[i] = (u16) ((1 << cw_max) - 1); | ||
871 | |||
872 | qos_param->flag[i] = | ||
873 | (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00; | ||
874 | |||
875 | txop = le16_to_cpu(ac_params->tx_op_limit) * 32; | ||
876 | qos_param->tx_op_limit[i] = (u16) txop; | ||
877 | } | ||
878 | return rc; | ||
879 | } | ||
880 | |||
881 | /* | ||
882 | * we have a generic data element which it may contain QoS information or | ||
883 | * parameters element. check the information element length to decide | ||
884 | * which type to read | ||
885 | */ | ||
886 | static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element | ||
887 | *info_element, | ||
888 | struct ieee80211_network *network) | ||
889 | { | ||
890 | int rc = 0; | ||
891 | struct ieee80211_qos_parameters *qos_param = NULL; | ||
892 | struct ieee80211_qos_information_element qos_info_element; | ||
893 | |||
894 | rc = ieee80211_read_qos_info_element(&qos_info_element, info_element); | ||
895 | |||
896 | if (rc == 0) { | ||
897 | network->qos_data.param_count = qos_info_element.ac_info & 0x0F; | ||
898 | network->flags |= NETWORK_HAS_QOS_INFORMATION; | ||
899 | } else { | ||
900 | struct ieee80211_qos_parameter_info param_element; | ||
901 | |||
902 | rc = ieee80211_read_qos_param_element(¶m_element, | ||
903 | info_element); | ||
904 | if (rc == 0) { | ||
905 | qos_param = &(network->qos_data.parameters); | ||
906 | ieee80211_qos_convert_ac_to_parameters(¶m_element, | ||
907 | qos_param); | ||
908 | network->flags |= NETWORK_HAS_QOS_PARAMETERS; | ||
909 | network->qos_data.param_count = | ||
910 | param_element.info_element.ac_info & 0x0F; | ||
911 | } | ||
912 | } | ||
913 | |||
914 | if (rc == 0) { | ||
915 | IEEE80211_DEBUG_QOS("QoS is supported\n"); | ||
916 | network->qos_data.supported = 1; | ||
917 | } | ||
918 | return rc; | ||
919 | } | ||
920 | |||
921 | static int ieee80211_parse_info_param(struct ieee80211_info_element | ||
922 | *info_element, u16 length, | ||
923 | struct ieee80211_network *network) | ||
924 | { | ||
925 | u8 i; | ||
926 | #ifdef CONFIG_IEEE80211_DEBUG | ||
927 | char rates_str[64]; | ||
928 | char *p; | ||
929 | #endif | ||
811 | 930 | ||
812 | info_element = &beacon->info_element; | 931 | while (length >= sizeof(*info_element)) { |
813 | left = stats->len - ((void *)info_element - (void *)beacon); | 932 | if (sizeof(*info_element) + info_element->len > length) { |
814 | while (left >= sizeof(struct ieee80211_info_element_hdr)) { | 933 | IEEE80211_DEBUG_MGMT("Info elem: parse failed: " |
815 | if (sizeof(struct ieee80211_info_element_hdr) + | 934 | "info_element->len + 2 > left : " |
816 | info_element->len > left) { | 935 | "info_element->len+2=%zd left=%d, id=%d.\n", |
817 | IEEE80211_DEBUG_SCAN | 936 | info_element->len + |
818 | ("SCAN: parse failed: info_element->len + 2 > left : info_element->len+2=%Zd left=%d.\n", | 937 | sizeof(*info_element), |
819 | info_element->len + | 938 | length, info_element->id); |
820 | sizeof(struct ieee80211_info_element), left); | ||
821 | return 1; | 939 | return 1; |
822 | } | 940 | } |
823 | 941 | ||
@@ -837,7 +955,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, | |||
837 | memset(network->ssid + network->ssid_len, 0, | 955 | memset(network->ssid + network->ssid_len, 0, |
838 | IW_ESSID_MAX_SIZE - network->ssid_len); | 956 | IW_ESSID_MAX_SIZE - network->ssid_len); |
839 | 957 | ||
840 | IEEE80211_DEBUG_SCAN("MFIE_TYPE_SSID: '%s' len=%d.\n", | 958 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n", |
841 | network->ssid, network->ssid_len); | 959 | network->ssid, network->ssid_len); |
842 | break; | 960 | break; |
843 | 961 | ||
@@ -845,15 +963,14 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, | |||
845 | #ifdef CONFIG_IEEE80211_DEBUG | 963 | #ifdef CONFIG_IEEE80211_DEBUG |
846 | p = rates_str; | 964 | p = rates_str; |
847 | #endif | 965 | #endif |
848 | network->rates_len = | 966 | network->rates_len = min(info_element->len, |
849 | min(info_element->len, MAX_RATES_LENGTH); | 967 | MAX_RATES_LENGTH); |
850 | for (i = 0; i < network->rates_len; i++) { | 968 | for (i = 0; i < network->rates_len; i++) { |
851 | network->rates[i] = info_element->data[i]; | 969 | network->rates[i] = info_element->data[i]; |
852 | #ifdef CONFIG_IEEE80211_DEBUG | 970 | #ifdef CONFIG_IEEE80211_DEBUG |
853 | p += snprintf(p, | 971 | p += snprintf(p, sizeof(rates_str) - |
854 | sizeof(rates_str) - (p - | 972 | (p - rates_str), "%02X ", |
855 | rates_str), | 973 | network->rates[i]); |
856 | "%02X ", network->rates[i]); | ||
857 | #endif | 974 | #endif |
858 | if (ieee80211_is_ofdm_rate | 975 | if (ieee80211_is_ofdm_rate |
859 | (info_element->data[i])) { | 976 | (info_element->data[i])) { |
@@ -865,7 +982,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, | |||
865 | } | 982 | } |
866 | } | 983 | } |
867 | 984 | ||
868 | IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES: '%s' (%d)\n", | 985 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES: '%s' (%d)\n", |
869 | rates_str, network->rates_len); | 986 | rates_str, network->rates_len); |
870 | break; | 987 | break; |
871 | 988 | ||
@@ -873,15 +990,14 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, | |||
873 | #ifdef CONFIG_IEEE80211_DEBUG | 990 | #ifdef CONFIG_IEEE80211_DEBUG |
874 | p = rates_str; | 991 | p = rates_str; |
875 | #endif | 992 | #endif |
876 | network->rates_ex_len = | 993 | network->rates_ex_len = min(info_element->len, |
877 | min(info_element->len, MAX_RATES_EX_LENGTH); | 994 | MAX_RATES_EX_LENGTH); |
878 | for (i = 0; i < network->rates_ex_len; i++) { | 995 | for (i = 0; i < network->rates_ex_len; i++) { |
879 | network->rates_ex[i] = info_element->data[i]; | 996 | network->rates_ex[i] = info_element->data[i]; |
880 | #ifdef CONFIG_IEEE80211_DEBUG | 997 | #ifdef CONFIG_IEEE80211_DEBUG |
881 | p += snprintf(p, | 998 | p += snprintf(p, sizeof(rates_str) - |
882 | sizeof(rates_str) - (p - | 999 | (p - rates_str), "%02X ", |
883 | rates_str), | 1000 | network->rates[i]); |
884 | "%02X ", network->rates[i]); | ||
885 | #endif | 1001 | #endif |
886 | if (ieee80211_is_ofdm_rate | 1002 | if (ieee80211_is_ofdm_rate |
887 | (info_element->data[i])) { | 1003 | (info_element->data[i])) { |
@@ -893,40 +1009,51 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, | |||
893 | } | 1009 | } |
894 | } | 1010 | } |
895 | 1011 | ||
896 | IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES_EX: '%s' (%d)\n", | 1012 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES_EX: '%s' (%d)\n", |
897 | rates_str, network->rates_ex_len); | 1013 | rates_str, network->rates_ex_len); |
898 | break; | 1014 | break; |
899 | 1015 | ||
900 | case MFIE_TYPE_DS_SET: | 1016 | case MFIE_TYPE_DS_SET: |
901 | IEEE80211_DEBUG_SCAN("MFIE_TYPE_DS_SET: %d\n", | 1017 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_DS_SET: %d\n", |
902 | info_element->data[0]); | 1018 | info_element->data[0]); |
903 | if (stats->freq == IEEE80211_24GHZ_BAND) | 1019 | network->channel = info_element->data[0]; |
904 | network->channel = info_element->data[0]; | ||
905 | break; | 1020 | break; |
906 | 1021 | ||
907 | case MFIE_TYPE_FH_SET: | 1022 | case MFIE_TYPE_FH_SET: |
908 | IEEE80211_DEBUG_SCAN("MFIE_TYPE_FH_SET: ignored\n"); | 1023 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_FH_SET: ignored\n"); |
909 | break; | 1024 | break; |
910 | 1025 | ||
911 | case MFIE_TYPE_CF_SET: | 1026 | case MFIE_TYPE_CF_SET: |
912 | IEEE80211_DEBUG_SCAN("MFIE_TYPE_CF_SET: ignored\n"); | 1027 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_CF_SET: ignored\n"); |
913 | break; | 1028 | break; |
914 | 1029 | ||
915 | case MFIE_TYPE_TIM: | 1030 | case MFIE_TYPE_TIM: |
916 | IEEE80211_DEBUG_SCAN("MFIE_TYPE_TIM: ignored\n"); | 1031 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: ignored\n"); |
1032 | break; | ||
1033 | |||
1034 | case MFIE_TYPE_ERP_INFO: | ||
1035 | network->erp_value = info_element->data[0]; | ||
1036 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n", | ||
1037 | network->erp_value); | ||
917 | break; | 1038 | break; |
918 | 1039 | ||
919 | case MFIE_TYPE_IBSS_SET: | 1040 | case MFIE_TYPE_IBSS_SET: |
920 | IEEE80211_DEBUG_SCAN("MFIE_TYPE_IBSS_SET: ignored\n"); | 1041 | network->atim_window = info_element->data[0]; |
1042 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_IBSS_SET: %d\n", | ||
1043 | network->atim_window); | ||
921 | break; | 1044 | break; |
922 | 1045 | ||
923 | case MFIE_TYPE_CHALLENGE: | 1046 | case MFIE_TYPE_CHALLENGE: |
924 | IEEE80211_DEBUG_SCAN("MFIE_TYPE_CHALLENGE: ignored\n"); | 1047 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_CHALLENGE: ignored\n"); |
925 | break; | 1048 | break; |
926 | 1049 | ||
927 | case MFIE_TYPE_GENERIC: | 1050 | case MFIE_TYPE_GENERIC: |
928 | IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n", | 1051 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_GENERIC: %d bytes\n", |
929 | info_element->len); | 1052 | info_element->len); |
1053 | if (!ieee80211_parse_qos_info_param_IE(info_element, | ||
1054 | network)) | ||
1055 | break; | ||
1056 | |||
930 | if (info_element->len >= 4 && | 1057 | if (info_element->len >= 4 && |
931 | info_element->data[0] == 0x00 && | 1058 | info_element->data[0] == 0x00 && |
932 | info_element->data[1] == 0x50 && | 1059 | info_element->data[1] == 0x50 && |
@@ -940,7 +1067,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, | |||
940 | break; | 1067 | break; |
941 | 1068 | ||
942 | case MFIE_TYPE_RSN: | 1069 | case MFIE_TYPE_RSN: |
943 | IEEE80211_DEBUG_SCAN("MFIE_TYPE_RSN: %d bytes\n", | 1070 | IEEE80211_DEBUG_MGMT("MFIE_TYPE_RSN: %d bytes\n", |
944 | info_element->len); | 1071 | info_element->len); |
945 | network->rsn_ie_len = min(info_element->len + 2, | 1072 | network->rsn_ie_len = min(info_element->len + 2, |
946 | MAX_WPA_IE_LEN); | 1073 | MAX_WPA_IE_LEN); |
@@ -948,18 +1075,127 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, | |||
948 | network->rsn_ie_len); | 1075 | network->rsn_ie_len); |
949 | break; | 1076 | break; |
950 | 1077 | ||
1078 | case MFIE_TYPE_QOS_PARAMETER: | ||
1079 | printk(KERN_ERR | ||
1080 | "QoS Error need to parse QOS_PARAMETER IE\n"); | ||
1081 | break; | ||
1082 | |||
951 | default: | 1083 | default: |
952 | IEEE80211_DEBUG_SCAN("unsupported IE %d\n", | 1084 | IEEE80211_DEBUG_MGMT("unsupported IE %d\n", |
953 | info_element->id); | 1085 | info_element->id); |
954 | break; | 1086 | break; |
955 | } | 1087 | } |
956 | 1088 | ||
957 | left -= sizeof(struct ieee80211_info_element_hdr) + | 1089 | length -= sizeof(*info_element) + info_element->len; |
958 | info_element->len; | 1090 | info_element = |
959 | info_element = (struct ieee80211_info_element *) | 1091 | (struct ieee80211_info_element *)&info_element-> |
960 | &info_element->data[info_element->len]; | 1092 | data[info_element->len]; |
1093 | } | ||
1094 | |||
1095 | return 0; | ||
1096 | } | ||
1097 | |||
1098 | static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct ieee80211_assoc_response | ||
1099 | *frame, struct ieee80211_rx_stats *stats) | ||
1100 | { | ||
1101 | struct ieee80211_network network_resp; | ||
1102 | struct ieee80211_network *network = &network_resp; | ||
1103 | struct net_device *dev = ieee->dev; | ||
1104 | |||
1105 | network->flags = 0; | ||
1106 | network->qos_data.active = 0; | ||
1107 | network->qos_data.supported = 0; | ||
1108 | network->qos_data.param_count = 0; | ||
1109 | network->qos_data.old_param_count = 0; | ||
1110 | |||
1111 | //network->atim_window = le16_to_cpu(frame->aid) & (0x3FFF); | ||
1112 | network->atim_window = le16_to_cpu(frame->aid); | ||
1113 | network->listen_interval = le16_to_cpu(frame->status); | ||
1114 | memcpy(network->bssid, frame->header.addr3, ETH_ALEN); | ||
1115 | network->capability = le16_to_cpu(frame->capability); | ||
1116 | network->last_scanned = jiffies; | ||
1117 | network->rates_len = network->rates_ex_len = 0; | ||
1118 | network->last_associate = 0; | ||
1119 | network->ssid_len = 0; | ||
1120 | network->erp_value = | ||
1121 | (network->capability & WLAN_CAPABILITY_IBSS) ? 0x3 : 0x0; | ||
1122 | |||
1123 | if (stats->freq == IEEE80211_52GHZ_BAND) { | ||
1124 | /* for A band (No DS info) */ | ||
1125 | network->channel = stats->received_channel; | ||
1126 | } else | ||
1127 | network->flags |= NETWORK_HAS_CCK; | ||
1128 | |||
1129 | network->wpa_ie_len = 0; | ||
1130 | network->rsn_ie_len = 0; | ||
1131 | |||
1132 | if (ieee80211_parse_info_param | ||
1133 | (frame->info_element, stats->len - sizeof(*frame), network)) | ||
1134 | return 1; | ||
1135 | |||
1136 | network->mode = 0; | ||
1137 | if (stats->freq == IEEE80211_52GHZ_BAND) | ||
1138 | network->mode = IEEE_A; | ||
1139 | else { | ||
1140 | if (network->flags & NETWORK_HAS_OFDM) | ||
1141 | network->mode |= IEEE_G; | ||
1142 | if (network->flags & NETWORK_HAS_CCK) | ||
1143 | network->mode |= IEEE_B; | ||
961 | } | 1144 | } |
962 | 1145 | ||
1146 | if (ieee80211_is_empty_essid(network->ssid, network->ssid_len)) | ||
1147 | network->flags |= NETWORK_EMPTY_ESSID; | ||
1148 | |||
1149 | memcpy(&network->stats, stats, sizeof(network->stats)); | ||
1150 | |||
1151 | if (ieee->handle_assoc_response != NULL) | ||
1152 | ieee->handle_assoc_response(dev, frame, network); | ||
1153 | |||
1154 | return 0; | ||
1155 | } | ||
1156 | |||
1157 | /***************************************************/ | ||
1158 | |||
1159 | static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct ieee80211_probe_response | ||
1160 | *beacon, | ||
1161 | struct ieee80211_network *network, | ||
1162 | struct ieee80211_rx_stats *stats) | ||
1163 | { | ||
1164 | network->qos_data.active = 0; | ||
1165 | network->qos_data.supported = 0; | ||
1166 | network->qos_data.param_count = 0; | ||
1167 | network->qos_data.old_param_count = 0; | ||
1168 | |||
1169 | /* Pull out fixed field data */ | ||
1170 | memcpy(network->bssid, beacon->header.addr3, ETH_ALEN); | ||
1171 | network->capability = le16_to_cpu(beacon->capability); | ||
1172 | network->last_scanned = jiffies; | ||
1173 | network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]); | ||
1174 | network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]); | ||
1175 | network->beacon_interval = le16_to_cpu(beacon->beacon_interval); | ||
1176 | /* Where to pull this? beacon->listen_interval; */ | ||
1177 | network->listen_interval = 0x0A; | ||
1178 | network->rates_len = network->rates_ex_len = 0; | ||
1179 | network->last_associate = 0; | ||
1180 | network->ssid_len = 0; | ||
1181 | network->flags = 0; | ||
1182 | network->atim_window = 0; | ||
1183 | network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ? | ||
1184 | 0x3 : 0x0; | ||
1185 | |||
1186 | if (stats->freq == IEEE80211_52GHZ_BAND) { | ||
1187 | /* for A band (No DS info) */ | ||
1188 | network->channel = stats->received_channel; | ||
1189 | } else | ||
1190 | network->flags |= NETWORK_HAS_CCK; | ||
1191 | |||
1192 | network->wpa_ie_len = 0; | ||
1193 | network->rsn_ie_len = 0; | ||
1194 | |||
1195 | if (ieee80211_parse_info_param | ||
1196 | (beacon->info_element, stats->len - sizeof(*beacon), network)) | ||
1197 | return 1; | ||
1198 | |||
963 | network->mode = 0; | 1199 | network->mode = 0; |
964 | if (stats->freq == IEEE80211_52GHZ_BAND) | 1200 | if (stats->freq == IEEE80211_52GHZ_BAND) |
965 | network->mode = IEEE_A; | 1201 | network->mode = IEEE_A; |
@@ -1002,6 +1238,9 @@ static inline int is_same_network(struct ieee80211_network *src, | |||
1002 | static inline void update_network(struct ieee80211_network *dst, | 1238 | static inline void update_network(struct ieee80211_network *dst, |
1003 | struct ieee80211_network *src) | 1239 | struct ieee80211_network *src) |
1004 | { | 1240 | { |
1241 | int qos_active; | ||
1242 | u8 old_param; | ||
1243 | |||
1005 | memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats)); | 1244 | memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats)); |
1006 | dst->capability = src->capability; | 1245 | dst->capability = src->capability; |
1007 | memcpy(dst->rates, src->rates, src->rates_len); | 1246 | memcpy(dst->rates, src->rates, src->rates_len); |
@@ -1017,6 +1256,7 @@ static inline void update_network(struct ieee80211_network *dst, | |||
1017 | dst->beacon_interval = src->beacon_interval; | 1256 | dst->beacon_interval = src->beacon_interval; |
1018 | dst->listen_interval = src->listen_interval; | 1257 | dst->listen_interval = src->listen_interval; |
1019 | dst->atim_window = src->atim_window; | 1258 | dst->atim_window = src->atim_window; |
1259 | dst->erp_value = src->erp_value; | ||
1020 | 1260 | ||
1021 | memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len); | 1261 | memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len); |
1022 | dst->wpa_ie_len = src->wpa_ie_len; | 1262 | dst->wpa_ie_len = src->wpa_ie_len; |
@@ -1024,22 +1264,48 @@ static inline void update_network(struct ieee80211_network *dst, | |||
1024 | dst->rsn_ie_len = src->rsn_ie_len; | 1264 | dst->rsn_ie_len = src->rsn_ie_len; |
1025 | 1265 | ||
1026 | dst->last_scanned = jiffies; | 1266 | dst->last_scanned = jiffies; |
1267 | qos_active = src->qos_data.active; | ||
1268 | old_param = dst->qos_data.old_param_count; | ||
1269 | if (dst->flags & NETWORK_HAS_QOS_MASK) | ||
1270 | memcpy(&dst->qos_data, &src->qos_data, | ||
1271 | sizeof(struct ieee80211_qos_data)); | ||
1272 | else { | ||
1273 | dst->qos_data.supported = src->qos_data.supported; | ||
1274 | dst->qos_data.param_count = src->qos_data.param_count; | ||
1275 | } | ||
1276 | |||
1277 | if (dst->qos_data.supported == 1) { | ||
1278 | if (dst->ssid_len) | ||
1279 | IEEE80211_DEBUG_QOS | ||
1280 | ("QoS the network %s is QoS supported\n", | ||
1281 | dst->ssid); | ||
1282 | else | ||
1283 | IEEE80211_DEBUG_QOS | ||
1284 | ("QoS the network is QoS supported\n"); | ||
1285 | } | ||
1286 | dst->qos_data.active = qos_active; | ||
1287 | dst->qos_data.old_param_count = old_param; | ||
1288 | |||
1027 | /* dst->last_associate is not overwritten */ | 1289 | /* dst->last_associate is not overwritten */ |
1028 | } | 1290 | } |
1029 | 1291 | ||
1292 | static inline int is_beacon(int fc) | ||
1293 | { | ||
1294 | return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON); | ||
1295 | } | ||
1296 | |||
1030 | static inline void ieee80211_process_probe_response(struct ieee80211_device | 1297 | static inline void ieee80211_process_probe_response(struct ieee80211_device |
1031 | *ieee, | 1298 | *ieee, struct |
1032 | struct | ||
1033 | ieee80211_probe_response | 1299 | ieee80211_probe_response |
1034 | *beacon, | 1300 | *beacon, struct ieee80211_rx_stats |
1035 | struct ieee80211_rx_stats | ||
1036 | *stats) | 1301 | *stats) |
1037 | { | 1302 | { |
1303 | struct net_device *dev = ieee->dev; | ||
1038 | struct ieee80211_network network; | 1304 | struct ieee80211_network network; |
1039 | struct ieee80211_network *target; | 1305 | struct ieee80211_network *target; |
1040 | struct ieee80211_network *oldest = NULL; | 1306 | struct ieee80211_network *oldest = NULL; |
1041 | #ifdef CONFIG_IEEE80211_DEBUG | 1307 | #ifdef CONFIG_IEEE80211_DEBUG |
1042 | struct ieee80211_info_element *info_element = &beacon->info_element; | 1308 | struct ieee80211_info_element *info_element = beacon->info_element; |
1043 | #endif | 1309 | #endif |
1044 | unsigned long flags; | 1310 | unsigned long flags; |
1045 | 1311 | ||
@@ -1070,10 +1336,10 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device | |||
1070 | escape_essid(info_element->data, | 1336 | escape_essid(info_element->data, |
1071 | info_element->len), | 1337 | info_element->len), |
1072 | MAC_ARG(beacon->header.addr3), | 1338 | MAC_ARG(beacon->header.addr3), |
1073 | WLAN_FC_GET_STYPE(beacon->header. | 1339 | is_beacon(le16_to_cpu |
1074 | frame_ctl) == | 1340 | (beacon->header. |
1075 | IEEE80211_STYPE_PROBE_RESP ? | 1341 | frame_ctl)) ? |
1076 | "PROBE RESPONSE" : "BEACON"); | 1342 | "BEACON" : "PROBE RESPONSE"); |
1077 | return; | 1343 | return; |
1078 | } | 1344 | } |
1079 | 1345 | ||
@@ -1122,10 +1388,10 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device | |||
1122 | escape_essid(network.ssid, | 1388 | escape_essid(network.ssid, |
1123 | network.ssid_len), | 1389 | network.ssid_len), |
1124 | MAC_ARG(network.bssid), | 1390 | MAC_ARG(network.bssid), |
1125 | WLAN_FC_GET_STYPE(beacon->header. | 1391 | is_beacon(le16_to_cpu |
1126 | frame_ctl) == | 1392 | (beacon->header. |
1127 | IEEE80211_STYPE_PROBE_RESP ? | 1393 | frame_ctl)) ? |
1128 | "PROBE RESPONSE" : "BEACON"); | 1394 | "BEACON" : "PROBE RESPONSE"); |
1129 | #endif | 1395 | #endif |
1130 | memcpy(target, &network, sizeof(*target)); | 1396 | memcpy(target, &network, sizeof(*target)); |
1131 | list_add_tail(&target->list, &ieee->network_list); | 1397 | list_add_tail(&target->list, &ieee->network_list); |
@@ -1134,34 +1400,60 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device | |||
1134 | escape_essid(target->ssid, | 1400 | escape_essid(target->ssid, |
1135 | target->ssid_len), | 1401 | target->ssid_len), |
1136 | MAC_ARG(target->bssid), | 1402 | MAC_ARG(target->bssid), |
1137 | WLAN_FC_GET_STYPE(beacon->header. | 1403 | is_beacon(le16_to_cpu |
1138 | frame_ctl) == | 1404 | (beacon->header. |
1139 | IEEE80211_STYPE_PROBE_RESP ? | 1405 | frame_ctl)) ? |
1140 | "PROBE RESPONSE" : "BEACON"); | 1406 | "BEACON" : "PROBE RESPONSE"); |
1141 | update_network(target, &network); | 1407 | update_network(target, &network); |
1142 | } | 1408 | } |
1143 | 1409 | ||
1144 | spin_unlock_irqrestore(&ieee->lock, flags); | 1410 | spin_unlock_irqrestore(&ieee->lock, flags); |
1411 | |||
1412 | if (is_beacon(le16_to_cpu(beacon->header.frame_ctl))) { | ||
1413 | if (ieee->handle_beacon != NULL) | ||
1414 | ieee->handle_beacon(dev, beacon, &network); | ||
1415 | } else { | ||
1416 | if (ieee->handle_probe_response != NULL) | ||
1417 | ieee->handle_probe_response(dev, beacon, &network); | ||
1418 | } | ||
1145 | } | 1419 | } |
1146 | 1420 | ||
1147 | void ieee80211_rx_mgt(struct ieee80211_device *ieee, | 1421 | void ieee80211_rx_mgt(struct ieee80211_device *ieee, |
1148 | struct ieee80211_hdr *header, | 1422 | struct ieee80211_hdr_4addr *header, |
1149 | struct ieee80211_rx_stats *stats) | 1423 | struct ieee80211_rx_stats *stats) |
1150 | { | 1424 | { |
1151 | switch (WLAN_FC_GET_STYPE(header->frame_ctl)) { | 1425 | switch (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl))) { |
1152 | case IEEE80211_STYPE_ASSOC_RESP: | 1426 | case IEEE80211_STYPE_ASSOC_RESP: |
1153 | IEEE80211_DEBUG_MGMT("received ASSOCIATION RESPONSE (%d)\n", | 1427 | IEEE80211_DEBUG_MGMT("received ASSOCIATION RESPONSE (%d)\n", |
1154 | WLAN_FC_GET_STYPE(header->frame_ctl)); | 1428 | WLAN_FC_GET_STYPE(le16_to_cpu |
1429 | (header->frame_ctl))); | ||
1430 | ieee80211_handle_assoc_resp(ieee, | ||
1431 | (struct ieee80211_assoc_response *) | ||
1432 | header, stats); | ||
1155 | break; | 1433 | break; |
1156 | 1434 | ||
1157 | case IEEE80211_STYPE_REASSOC_RESP: | 1435 | case IEEE80211_STYPE_REASSOC_RESP: |
1158 | IEEE80211_DEBUG_MGMT("received REASSOCIATION RESPONSE (%d)\n", | 1436 | IEEE80211_DEBUG_MGMT("received REASSOCIATION RESPONSE (%d)\n", |
1159 | WLAN_FC_GET_STYPE(header->frame_ctl)); | 1437 | WLAN_FC_GET_STYPE(le16_to_cpu |
1438 | (header->frame_ctl))); | ||
1439 | break; | ||
1440 | |||
1441 | case IEEE80211_STYPE_PROBE_REQ: | ||
1442 | IEEE80211_DEBUG_MGMT("recieved auth (%d)\n", | ||
1443 | WLAN_FC_GET_STYPE(le16_to_cpu | ||
1444 | (header->frame_ctl))); | ||
1445 | |||
1446 | if (ieee->handle_probe_request != NULL) | ||
1447 | ieee->handle_probe_request(ieee->dev, | ||
1448 | (struct | ||
1449 | ieee80211_probe_request *) | ||
1450 | header, stats); | ||
1160 | break; | 1451 | break; |
1161 | 1452 | ||
1162 | case IEEE80211_STYPE_PROBE_RESP: | 1453 | case IEEE80211_STYPE_PROBE_RESP: |
1163 | IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n", | 1454 | IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n", |
1164 | WLAN_FC_GET_STYPE(header->frame_ctl)); | 1455 | WLAN_FC_GET_STYPE(le16_to_cpu |
1456 | (header->frame_ctl))); | ||
1165 | IEEE80211_DEBUG_SCAN("Probe response\n"); | 1457 | IEEE80211_DEBUG_SCAN("Probe response\n"); |
1166 | ieee80211_process_probe_response(ieee, | 1458 | ieee80211_process_probe_response(ieee, |
1167 | (struct | 1459 | (struct |
@@ -1171,20 +1463,46 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, | |||
1171 | 1463 | ||
1172 | case IEEE80211_STYPE_BEACON: | 1464 | case IEEE80211_STYPE_BEACON: |
1173 | IEEE80211_DEBUG_MGMT("received BEACON (%d)\n", | 1465 | IEEE80211_DEBUG_MGMT("received BEACON (%d)\n", |
1174 | WLAN_FC_GET_STYPE(header->frame_ctl)); | 1466 | WLAN_FC_GET_STYPE(le16_to_cpu |
1467 | (header->frame_ctl))); | ||
1175 | IEEE80211_DEBUG_SCAN("Beacon\n"); | 1468 | IEEE80211_DEBUG_SCAN("Beacon\n"); |
1176 | ieee80211_process_probe_response(ieee, | 1469 | ieee80211_process_probe_response(ieee, |
1177 | (struct | 1470 | (struct |
1178 | ieee80211_probe_response *) | 1471 | ieee80211_probe_response *) |
1179 | header, stats); | 1472 | header, stats); |
1180 | break; | 1473 | break; |
1474 | case IEEE80211_STYPE_AUTH: | ||
1181 | 1475 | ||
1476 | IEEE80211_DEBUG_MGMT("recieved auth (%d)\n", | ||
1477 | WLAN_FC_GET_STYPE(le16_to_cpu | ||
1478 | (header->frame_ctl))); | ||
1479 | |||
1480 | if (ieee->handle_auth != NULL) | ||
1481 | ieee->handle_auth(ieee->dev, | ||
1482 | (struct ieee80211_auth *)header); | ||
1483 | break; | ||
1484 | |||
1485 | case IEEE80211_STYPE_DISASSOC: | ||
1486 | if (ieee->handle_disassoc != NULL) | ||
1487 | ieee->handle_disassoc(ieee->dev, | ||
1488 | (struct ieee80211_disassoc *) | ||
1489 | header); | ||
1490 | break; | ||
1491 | |||
1492 | case IEEE80211_STYPE_DEAUTH: | ||
1493 | printk("DEAUTH from AP\n"); | ||
1494 | if (ieee->handle_deauth != NULL) | ||
1495 | ieee->handle_deauth(ieee->dev, (struct ieee80211_auth *) | ||
1496 | header); | ||
1497 | break; | ||
1182 | default: | 1498 | default: |
1183 | IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n", | 1499 | IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n", |
1184 | WLAN_FC_GET_STYPE(header->frame_ctl)); | 1500 | WLAN_FC_GET_STYPE(le16_to_cpu |
1501 | (header->frame_ctl))); | ||
1185 | IEEE80211_WARNING("%s: Unknown management packet: %d\n", | 1502 | IEEE80211_WARNING("%s: Unknown management packet: %d\n", |
1186 | ieee->dev->name, | 1503 | ieee->dev->name, |
1187 | WLAN_FC_GET_STYPE(header->frame_ctl)); | 1504 | WLAN_FC_GET_STYPE(le16_to_cpu |
1505 | (header->frame_ctl))); | ||
1188 | break; | 1506 | break; |
1189 | } | 1507 | } |
1190 | } | 1508 | } |
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index eed07bbbe6b6..95ccbadbf55b 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | 2 | ||
3 | Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved. | 3 | Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. |
4 | 4 | ||
5 | This program is free software; you can redistribute it and/or modify it | 5 | This program is free software; you can redistribute it and/or modify it |
6 | under the terms of version 2 of the GNU General Public License as | 6 | under the terms of version 2 of the GNU General Public License as |
@@ -128,7 +128,7 @@ payload of each frame is reduced to 492 bytes. | |||
128 | static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; | 128 | static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; |
129 | static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; | 129 | static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; |
130 | 130 | ||
131 | static inline int ieee80211_put_snap(u8 * data, u16 h_proto) | 131 | static inline int ieee80211_copy_snap(u8 * data, u16 h_proto) |
132 | { | 132 | { |
133 | struct ieee80211_snap_hdr *snap; | 133 | struct ieee80211_snap_hdr *snap; |
134 | u8 *oui; | 134 | u8 *oui; |
@@ -157,31 +157,14 @@ static inline int ieee80211_encrypt_fragment(struct ieee80211_device *ieee, | |||
157 | struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx]; | 157 | struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx]; |
158 | int res; | 158 | int res; |
159 | 159 | ||
160 | #ifdef CONFIG_IEEE80211_CRYPT_TKIP | 160 | if (crypt == NULL) |
161 | struct ieee80211_hdr *header; | ||
162 | |||
163 | if (ieee->tkip_countermeasures && | ||
164 | crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) { | ||
165 | header = (struct ieee80211_hdr *)frag->data; | ||
166 | if (net_ratelimit()) { | ||
167 | printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " | ||
168 | "TX packet to " MAC_FMT "\n", | ||
169 | ieee->dev->name, MAC_ARG(header->addr1)); | ||
170 | } | ||
171 | return -1; | 161 | return -1; |
172 | } | 162 | |
173 | #endif | ||
174 | /* To encrypt, frame format is: | 163 | /* To encrypt, frame format is: |
175 | * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */ | 164 | * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */ |
176 | |||
177 | // PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption. | ||
178 | /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so | ||
179 | * call both MSDU and MPDU encryption functions from here. */ | ||
180 | atomic_inc(&crypt->refcnt); | 165 | atomic_inc(&crypt->refcnt); |
181 | res = 0; | 166 | res = 0; |
182 | if (crypt->ops->encrypt_msdu) | 167 | if (crypt->ops && crypt->ops->encrypt_mpdu) |
183 | res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv); | ||
184 | if (res == 0 && crypt->ops->encrypt_mpdu) | ||
185 | res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv); | 168 | res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv); |
186 | 169 | ||
187 | atomic_dec(&crypt->refcnt); | 170 | atomic_dec(&crypt->refcnt); |
@@ -207,7 +190,7 @@ void ieee80211_txb_free(struct ieee80211_txb *txb) | |||
207 | } | 190 | } |
208 | 191 | ||
209 | static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, | 192 | static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, |
210 | gfp_t gfp_mask) | 193 | int headroom, gfp_t gfp_mask) |
211 | { | 194 | { |
212 | struct ieee80211_txb *txb; | 195 | struct ieee80211_txb *txb; |
213 | int i; | 196 | int i; |
@@ -221,11 +204,13 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, | |||
221 | txb->frag_size = txb_size; | 204 | txb->frag_size = txb_size; |
222 | 205 | ||
223 | for (i = 0; i < nr_frags; i++) { | 206 | for (i = 0; i < nr_frags; i++) { |
224 | txb->fragments[i] = dev_alloc_skb(txb_size); | 207 | txb->fragments[i] = __dev_alloc_skb(txb_size + headroom, |
208 | gfp_mask); | ||
225 | if (unlikely(!txb->fragments[i])) { | 209 | if (unlikely(!txb->fragments[i])) { |
226 | i--; | 210 | i--; |
227 | break; | 211 | break; |
228 | } | 212 | } |
213 | skb_reserve(txb->fragments[i], headroom); | ||
229 | } | 214 | } |
230 | if (unlikely(i != nr_frags)) { | 215 | if (unlikely(i != nr_frags)) { |
231 | while (i >= 0) | 216 | while (i >= 0) |
@@ -236,25 +221,31 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, | |||
236 | return txb; | 221 | return txb; |
237 | } | 222 | } |
238 | 223 | ||
239 | /* SKBs are added to the ieee->tx_queue. */ | 224 | /* Incoming skb is converted to a txb which consists of |
225 | * a block of 802.11 fragment packets (stored as skbs) */ | ||
240 | int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | 226 | int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) |
241 | { | 227 | { |
242 | struct ieee80211_device *ieee = netdev_priv(dev); | 228 | struct ieee80211_device *ieee = netdev_priv(dev); |
243 | struct ieee80211_txb *txb = NULL; | 229 | struct ieee80211_txb *txb = NULL; |
244 | struct ieee80211_hdr *frag_hdr; | 230 | struct ieee80211_hdr_3addr *frag_hdr; |
245 | int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size; | 231 | int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size, |
232 | rts_required; | ||
246 | unsigned long flags; | 233 | unsigned long flags; |
247 | struct net_device_stats *stats = &ieee->stats; | 234 | struct net_device_stats *stats = &ieee->stats; |
248 | int ether_type, encrypt; | 235 | int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv; |
249 | int bytes, fc, hdr_len; | 236 | int bytes, fc, hdr_len; |
250 | struct sk_buff *skb_frag; | 237 | struct sk_buff *skb_frag; |
251 | struct ieee80211_hdr header = { /* Ensure zero initialized */ | 238 | struct ieee80211_hdr_3addr header = { /* Ensure zero initialized */ |
252 | .duration_id = 0, | 239 | .duration_id = 0, |
253 | .seq_ctl = 0 | 240 | .seq_ctl = 0 |
254 | }; | 241 | }; |
255 | u8 dest[ETH_ALEN], src[ETH_ALEN]; | 242 | u8 dest[ETH_ALEN], src[ETH_ALEN]; |
256 | |||
257 | struct ieee80211_crypt_data *crypt; | 243 | struct ieee80211_crypt_data *crypt; |
244 | int priority = skb->priority; | ||
245 | int snapped = 0; | ||
246 | |||
247 | if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority)) | ||
248 | return NETDEV_TX_BUSY; | ||
258 | 249 | ||
259 | spin_lock_irqsave(&ieee->lock, flags); | 250 | spin_lock_irqsave(&ieee->lock, flags); |
260 | 251 | ||
@@ -276,7 +267,11 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
276 | crypt = ieee->crypt[ieee->tx_keyidx]; | 267 | crypt = ieee->crypt[ieee->tx_keyidx]; |
277 | 268 | ||
278 | encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && | 269 | encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && |
279 | ieee->host_encrypt && crypt && crypt->ops; | 270 | ieee->sec.encrypt; |
271 | |||
272 | host_encrypt = ieee->host_encrypt && encrypt && crypt; | ||
273 | host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt; | ||
274 | host_build_iv = ieee->host_build_iv && encrypt && crypt; | ||
280 | 275 | ||
281 | if (!encrypt && ieee->ieee802_1x && | 276 | if (!encrypt && ieee->ieee802_1x && |
282 | ieee->drop_unencrypted && ether_type != ETH_P_PAE) { | 277 | ieee->drop_unencrypted && ether_type != ETH_P_PAE) { |
@@ -285,8 +280,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
285 | } | 280 | } |
286 | 281 | ||
287 | /* Save source and destination addresses */ | 282 | /* Save source and destination addresses */ |
288 | memcpy(&dest, skb->data, ETH_ALEN); | 283 | memcpy(dest, skb->data, ETH_ALEN); |
289 | memcpy(&src, skb->data + ETH_ALEN, ETH_ALEN); | 284 | memcpy(src, skb->data + ETH_ALEN, ETH_ALEN); |
290 | 285 | ||
291 | /* Advance the SKB to the start of the payload */ | 286 | /* Advance the SKB to the start of the payload */ |
292 | skb_pull(skb, sizeof(struct ethhdr)); | 287 | skb_pull(skb, sizeof(struct ethhdr)); |
@@ -294,7 +289,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
294 | /* Determine total amount of storage required for TXB packets */ | 289 | /* Determine total amount of storage required for TXB packets */ |
295 | bytes = skb->len + SNAP_SIZE + sizeof(u16); | 290 | bytes = skb->len + SNAP_SIZE + sizeof(u16); |
296 | 291 | ||
297 | if (encrypt) | 292 | if (host_encrypt) |
298 | fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | | 293 | fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | |
299 | IEEE80211_FCTL_PROTECTED; | 294 | IEEE80211_FCTL_PROTECTED; |
300 | else | 295 | else |
@@ -302,70 +297,144 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
302 | 297 | ||
303 | if (ieee->iw_mode == IW_MODE_INFRA) { | 298 | if (ieee->iw_mode == IW_MODE_INFRA) { |
304 | fc |= IEEE80211_FCTL_TODS; | 299 | fc |= IEEE80211_FCTL_TODS; |
305 | /* To DS: Addr1 = BSSID, Addr2 = SA, | 300 | /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ |
306 | Addr3 = DA */ | 301 | memcpy(header.addr1, ieee->bssid, ETH_ALEN); |
307 | memcpy(&header.addr1, ieee->bssid, ETH_ALEN); | 302 | memcpy(header.addr2, src, ETH_ALEN); |
308 | memcpy(&header.addr2, &src, ETH_ALEN); | 303 | memcpy(header.addr3, dest, ETH_ALEN); |
309 | memcpy(&header.addr3, &dest, ETH_ALEN); | ||
310 | } else if (ieee->iw_mode == IW_MODE_ADHOC) { | 304 | } else if (ieee->iw_mode == IW_MODE_ADHOC) { |
311 | /* not From/To DS: Addr1 = DA, Addr2 = SA, | 305 | /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ |
312 | Addr3 = BSSID */ | 306 | memcpy(header.addr1, dest, ETH_ALEN); |
313 | memcpy(&header.addr1, dest, ETH_ALEN); | 307 | memcpy(header.addr2, src, ETH_ALEN); |
314 | memcpy(&header.addr2, src, ETH_ALEN); | 308 | memcpy(header.addr3, ieee->bssid, ETH_ALEN); |
315 | memcpy(&header.addr3, ieee->bssid, ETH_ALEN); | ||
316 | } | 309 | } |
317 | header.frame_ctl = cpu_to_le16(fc); | 310 | header.frame_ctl = cpu_to_le16(fc); |
318 | hdr_len = IEEE80211_3ADDR_LEN; | 311 | hdr_len = IEEE80211_3ADDR_LEN; |
319 | 312 | ||
320 | /* Determine fragmentation size based on destination (multicast | 313 | /* Encrypt msdu first on the whole data packet. */ |
321 | * and broadcast are not fragmented) */ | 314 | if ((host_encrypt || host_encrypt_msdu) && |
322 | if (is_multicast_ether_addr(dest) || is_broadcast_ether_addr(dest)) | 315 | crypt && crypt->ops && crypt->ops->encrypt_msdu) { |
323 | frag_size = MAX_FRAG_THRESHOLD; | 316 | int res = 0; |
324 | else | 317 | int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len + |
325 | frag_size = ieee->fts; | 318 | crypt->ops->extra_msdu_postfix_len; |
319 | struct sk_buff *skb_new = dev_alloc_skb(len); | ||
320 | |||
321 | if (unlikely(!skb_new)) | ||
322 | goto failed; | ||
323 | |||
324 | skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len); | ||
325 | memcpy(skb_put(skb_new, hdr_len), &header, hdr_len); | ||
326 | snapped = 1; | ||
327 | ieee80211_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)), | ||
328 | ether_type); | ||
329 | memcpy(skb_put(skb_new, skb->len), skb->data, skb->len); | ||
330 | res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv); | ||
331 | if (res < 0) { | ||
332 | IEEE80211_ERROR("msdu encryption failed\n"); | ||
333 | dev_kfree_skb_any(skb_new); | ||
334 | goto failed; | ||
335 | } | ||
336 | dev_kfree_skb_any(skb); | ||
337 | skb = skb_new; | ||
338 | bytes += crypt->ops->extra_msdu_prefix_len + | ||
339 | crypt->ops->extra_msdu_postfix_len; | ||
340 | skb_pull(skb, hdr_len); | ||
341 | } | ||
326 | 342 | ||
327 | /* Determine amount of payload per fragment. Regardless of if | 343 | if (host_encrypt || ieee->host_open_frag) { |
328 | * this stack is providing the full 802.11 header, one will | 344 | /* Determine fragmentation size based on destination (multicast |
329 | * eventually be affixed to this fragment -- so we must account for | 345 | * and broadcast are not fragmented) */ |
330 | * it when determining the amount of payload space. */ | 346 | if (is_multicast_ether_addr(dest) || |
331 | bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN; | 347 | is_broadcast_ether_addr(dest)) |
332 | if (ieee->config & | 348 | frag_size = MAX_FRAG_THRESHOLD; |
333 | (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) | 349 | else |
334 | bytes_per_frag -= IEEE80211_FCS_LEN; | 350 | frag_size = ieee->fts; |
335 | 351 | ||
336 | /* Each fragment may need to have room for encryptiong pre/postfix */ | 352 | /* Determine amount of payload per fragment. Regardless of if |
337 | if (encrypt) | 353 | * this stack is providing the full 802.11 header, one will |
338 | bytes_per_frag -= crypt->ops->extra_prefix_len + | 354 | * eventually be affixed to this fragment -- so we must account |
339 | crypt->ops->extra_postfix_len; | 355 | * for it when determining the amount of payload space. */ |
340 | 356 | bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN; | |
341 | /* Number of fragments is the total bytes_per_frag / | 357 | if (ieee->config & |
342 | * payload_per_fragment */ | 358 | (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) |
343 | nr_frags = bytes / bytes_per_frag; | 359 | bytes_per_frag -= IEEE80211_FCS_LEN; |
344 | bytes_last_frag = bytes % bytes_per_frag; | 360 | |
345 | if (bytes_last_frag) | 361 | /* Each fragment may need to have room for encryptiong |
362 | * pre/postfix */ | ||
363 | if (host_encrypt) | ||
364 | bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len + | ||
365 | crypt->ops->extra_mpdu_postfix_len; | ||
366 | |||
367 | /* Number of fragments is the total | ||
368 | * bytes_per_frag / payload_per_fragment */ | ||
369 | nr_frags = bytes / bytes_per_frag; | ||
370 | bytes_last_frag = bytes % bytes_per_frag; | ||
371 | if (bytes_last_frag) | ||
372 | nr_frags++; | ||
373 | else | ||
374 | bytes_last_frag = bytes_per_frag; | ||
375 | } else { | ||
376 | nr_frags = 1; | ||
377 | bytes_per_frag = bytes_last_frag = bytes; | ||
378 | frag_size = bytes + IEEE80211_3ADDR_LEN; | ||
379 | } | ||
380 | |||
381 | rts_required = (frag_size > ieee->rts | ||
382 | && ieee->config & CFG_IEEE80211_RTS); | ||
383 | if (rts_required) | ||
346 | nr_frags++; | 384 | nr_frags++; |
347 | else | ||
348 | bytes_last_frag = bytes_per_frag; | ||
349 | 385 | ||
350 | /* When we allocate the TXB we allocate enough space for the reserve | 386 | /* When we allocate the TXB we allocate enough space for the reserve |
351 | * and full fragment bytes (bytes_per_frag doesn't include prefix, | 387 | * and full fragment bytes (bytes_per_frag doesn't include prefix, |
352 | * postfix, header, FCS, etc.) */ | 388 | * postfix, header, FCS, etc.) */ |
353 | txb = ieee80211_alloc_txb(nr_frags, frag_size, GFP_ATOMIC); | 389 | txb = ieee80211_alloc_txb(nr_frags, frag_size, |
390 | ieee->tx_headroom, GFP_ATOMIC); | ||
354 | if (unlikely(!txb)) { | 391 | if (unlikely(!txb)) { |
355 | printk(KERN_WARNING "%s: Could not allocate TXB\n", | 392 | printk(KERN_WARNING "%s: Could not allocate TXB\n", |
356 | ieee->dev->name); | 393 | ieee->dev->name); |
357 | goto failed; | 394 | goto failed; |
358 | } | 395 | } |
359 | txb->encrypted = encrypt; | 396 | txb->encrypted = encrypt; |
360 | txb->payload_size = bytes; | 397 | if (host_encrypt) |
398 | txb->payload_size = frag_size * (nr_frags - 1) + | ||
399 | bytes_last_frag; | ||
400 | else | ||
401 | txb->payload_size = bytes; | ||
402 | |||
403 | if (rts_required) { | ||
404 | skb_frag = txb->fragments[0]; | ||
405 | frag_hdr = | ||
406 | (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len); | ||
407 | |||
408 | /* | ||
409 | * Set header frame_ctl to the RTS. | ||
410 | */ | ||
411 | header.frame_ctl = | ||
412 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); | ||
413 | memcpy(frag_hdr, &header, hdr_len); | ||
361 | 414 | ||
362 | for (i = 0; i < nr_frags; i++) { | 415 | /* |
416 | * Restore header frame_ctl to the original data setting. | ||
417 | */ | ||
418 | header.frame_ctl = cpu_to_le16(fc); | ||
419 | |||
420 | if (ieee->config & | ||
421 | (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) | ||
422 | skb_put(skb_frag, 4); | ||
423 | |||
424 | txb->rts_included = 1; | ||
425 | i = 1; | ||
426 | } else | ||
427 | i = 0; | ||
428 | |||
429 | for (; i < nr_frags; i++) { | ||
363 | skb_frag = txb->fragments[i]; | 430 | skb_frag = txb->fragments[i]; |
364 | 431 | ||
365 | if (encrypt) | 432 | if (host_encrypt || host_build_iv) |
366 | skb_reserve(skb_frag, crypt->ops->extra_prefix_len); | 433 | skb_reserve(skb_frag, |
434 | crypt->ops->extra_mpdu_prefix_len); | ||
367 | 435 | ||
368 | frag_hdr = (struct ieee80211_hdr *)skb_put(skb_frag, hdr_len); | 436 | frag_hdr = |
437 | (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len); | ||
369 | memcpy(frag_hdr, &header, hdr_len); | 438 | memcpy(frag_hdr, &header, hdr_len); |
370 | 439 | ||
371 | /* If this is not the last fragment, then add the MOREFRAGS | 440 | /* If this is not the last fragment, then add the MOREFRAGS |
@@ -379,11 +448,10 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
379 | bytes = bytes_last_frag; | 448 | bytes = bytes_last_frag; |
380 | } | 449 | } |
381 | 450 | ||
382 | /* Put a SNAP header on the first fragment */ | 451 | if (i == 0 && !snapped) { |
383 | if (i == 0) { | 452 | ieee80211_copy_snap(skb_put |
384 | ieee80211_put_snap(skb_put | 453 | (skb_frag, SNAP_SIZE + sizeof(u16)), |
385 | (skb_frag, SNAP_SIZE + sizeof(u16)), | 454 | ether_type); |
386 | ether_type); | ||
387 | bytes -= SNAP_SIZE + sizeof(u16); | 455 | bytes -= SNAP_SIZE + sizeof(u16); |
388 | } | 456 | } |
389 | 457 | ||
@@ -394,8 +462,19 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
394 | 462 | ||
395 | /* Encryption routine will move the header forward in order | 463 | /* Encryption routine will move the header forward in order |
396 | * to insert the IV between the header and the payload */ | 464 | * to insert the IV between the header and the payload */ |
397 | if (encrypt) | 465 | if (host_encrypt) |
398 | ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len); | 466 | ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len); |
467 | else if (host_build_iv) { | ||
468 | struct ieee80211_crypt_data *crypt; | ||
469 | |||
470 | crypt = ieee->crypt[ieee->tx_keyidx]; | ||
471 | atomic_inc(&crypt->refcnt); | ||
472 | if (crypt->ops->build_iv) | ||
473 | crypt->ops->build_iv(skb_frag, hdr_len, | ||
474 | crypt->priv); | ||
475 | atomic_dec(&crypt->refcnt); | ||
476 | } | ||
477 | |||
399 | if (ieee->config & | 478 | if (ieee->config & |
400 | (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) | 479 | (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) |
401 | skb_put(skb_frag, 4); | 480 | skb_put(skb_frag, 4); |
@@ -407,11 +486,20 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
407 | dev_kfree_skb_any(skb); | 486 | dev_kfree_skb_any(skb); |
408 | 487 | ||
409 | if (txb) { | 488 | if (txb) { |
410 | if ((*ieee->hard_start_xmit) (txb, dev) == 0) { | 489 | int ret = (*ieee->hard_start_xmit) (txb, dev, priority); |
490 | if (ret == 0) { | ||
411 | stats->tx_packets++; | 491 | stats->tx_packets++; |
412 | stats->tx_bytes += txb->payload_size; | 492 | stats->tx_bytes += txb->payload_size; |
413 | return 0; | 493 | return 0; |
414 | } | 494 | } |
495 | |||
496 | if (ret == NETDEV_TX_BUSY) { | ||
497 | printk(KERN_ERR "%s: NETDEV_TX_BUSY returned; " | ||
498 | "driver should report queue full via " | ||
499 | "ieee_device->is_queue_full.\n", | ||
500 | ieee->dev->name); | ||
501 | } | ||
502 | |||
415 | ieee80211_txb_free(txb); | 503 | ieee80211_txb_free(txb); |
416 | } | 504 | } |
417 | 505 | ||
@@ -422,7 +510,72 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
422 | netif_stop_queue(dev); | 510 | netif_stop_queue(dev); |
423 | stats->tx_errors++; | 511 | stats->tx_errors++; |
424 | return 1; | 512 | return 1; |
513 | } | ||
514 | |||
515 | /* Incoming 802.11 strucure is converted to a TXB | ||
516 | * a block of 802.11 fragment packets (stored as skbs) */ | ||
517 | int ieee80211_tx_frame(struct ieee80211_device *ieee, | ||
518 | struct ieee80211_hdr *frame, int len) | ||
519 | { | ||
520 | struct ieee80211_txb *txb = NULL; | ||
521 | unsigned long flags; | ||
522 | struct net_device_stats *stats = &ieee->stats; | ||
523 | struct sk_buff *skb_frag; | ||
524 | int priority = -1; | ||
525 | |||
526 | spin_lock_irqsave(&ieee->lock, flags); | ||
425 | 527 | ||
528 | /* If there is no driver handler to take the TXB, dont' bother | ||
529 | * creating it... */ | ||
530 | if (!ieee->hard_start_xmit) { | ||
531 | printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name); | ||
532 | goto success; | ||
533 | } | ||
534 | |||
535 | if (unlikely(len < 24)) { | ||
536 | printk(KERN_WARNING "%s: skb too small (%d).\n", | ||
537 | ieee->dev->name, len); | ||
538 | goto success; | ||
539 | } | ||
540 | |||
541 | /* When we allocate the TXB we allocate enough space for the reserve | ||
542 | * and full fragment bytes (bytes_per_frag doesn't include prefix, | ||
543 | * postfix, header, FCS, etc.) */ | ||
544 | txb = ieee80211_alloc_txb(1, len, ieee->tx_headroom, GFP_ATOMIC); | ||
545 | if (unlikely(!txb)) { | ||
546 | printk(KERN_WARNING "%s: Could not allocate TXB\n", | ||
547 | ieee->dev->name); | ||
548 | goto failed; | ||
549 | } | ||
550 | txb->encrypted = 0; | ||
551 | txb->payload_size = len; | ||
552 | |||
553 | skb_frag = txb->fragments[0]; | ||
554 | |||
555 | memcpy(skb_put(skb_frag, len), frame, len); | ||
556 | |||
557 | if (ieee->config & | ||
558 | (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) | ||
559 | skb_put(skb_frag, 4); | ||
560 | |||
561 | success: | ||
562 | spin_unlock_irqrestore(&ieee->lock, flags); | ||
563 | |||
564 | if (txb) { | ||
565 | if ((*ieee->hard_start_xmit) (txb, ieee->dev, priority) == 0) { | ||
566 | stats->tx_packets++; | ||
567 | stats->tx_bytes += txb->payload_size; | ||
568 | return 0; | ||
569 | } | ||
570 | ieee80211_txb_free(txb); | ||
571 | } | ||
572 | return 0; | ||
573 | |||
574 | failed: | ||
575 | spin_unlock_irqrestore(&ieee->lock, flags); | ||
576 | stats->tx_errors++; | ||
577 | return 1; | ||
426 | } | 578 | } |
427 | 579 | ||
580 | EXPORT_SYMBOL(ieee80211_tx_frame); | ||
428 | EXPORT_SYMBOL(ieee80211_txb_free); | 581 | EXPORT_SYMBOL(ieee80211_txb_free); |
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index 94882f39b072..1ce7af9bec35 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | 2 | ||
3 | Copyright(c) 2004 Intel Corporation. All rights reserved. | 3 | Copyright(c) 2004-2005 Intel Corporation. All rights reserved. |
4 | 4 | ||
5 | Portions of this file are based on the WEP enablement code provided by the | 5 | Portions of this file are based on the WEP enablement code provided by the |
6 | Host AP project hostap-drivers v0.1.3 | 6 | Host AP project hostap-drivers v0.1.3 |
@@ -32,6 +32,7 @@ | |||
32 | 32 | ||
33 | #include <linux/kmod.h> | 33 | #include <linux/kmod.h> |
34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
35 | #include <linux/jiffies.h> | ||
35 | 36 | ||
36 | #include <net/ieee80211.h> | 37 | #include <net/ieee80211.h> |
37 | #include <linux/wireless.h> | 38 | #include <linux/wireless.h> |
@@ -140,18 +141,41 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee, | |||
140 | start = iwe_stream_add_point(start, stop, &iwe, custom); | 141 | start = iwe_stream_add_point(start, stop, &iwe, custom); |
141 | 142 | ||
142 | /* Add quality statistics */ | 143 | /* Add quality statistics */ |
143 | /* TODO: Fix these values... */ | ||
144 | iwe.cmd = IWEVQUAL; | 144 | iwe.cmd = IWEVQUAL; |
145 | iwe.u.qual.qual = network->stats.signal; | 145 | iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | |
146 | iwe.u.qual.level = network->stats.rssi; | 146 | IW_QUAL_NOISE_UPDATED; |
147 | iwe.u.qual.noise = network->stats.noise; | 147 | |
148 | iwe.u.qual.updated = network->stats.mask & IEEE80211_STATMASK_WEMASK; | 148 | if (!(network->stats.mask & IEEE80211_STATMASK_RSSI)) { |
149 | if (!(network->stats.mask & IEEE80211_STATMASK_RSSI)) | 149 | iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID | |
150 | iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID; | 150 | IW_QUAL_LEVEL_INVALID; |
151 | if (!(network->stats.mask & IEEE80211_STATMASK_NOISE)) | 151 | iwe.u.qual.qual = 0; |
152 | iwe.u.qual.level = 0; | ||
153 | } else { | ||
154 | iwe.u.qual.level = network->stats.rssi; | ||
155 | if (ieee->perfect_rssi == ieee->worst_rssi) | ||
156 | iwe.u.qual.qual = 100; | ||
157 | else | ||
158 | iwe.u.qual.qual = | ||
159 | (100 * | ||
160 | (ieee->perfect_rssi - ieee->worst_rssi) * | ||
161 | (ieee->perfect_rssi - ieee->worst_rssi) - | ||
162 | (ieee->perfect_rssi - network->stats.rssi) * | ||
163 | (15 * (ieee->perfect_rssi - ieee->worst_rssi) + | ||
164 | 62 * (ieee->perfect_rssi - network->stats.rssi))) / | ||
165 | ((ieee->perfect_rssi - ieee->worst_rssi) * | ||
166 | (ieee->perfect_rssi - ieee->worst_rssi)); | ||
167 | if (iwe.u.qual.qual > 100) | ||
168 | iwe.u.qual.qual = 100; | ||
169 | else if (iwe.u.qual.qual < 1) | ||
170 | iwe.u.qual.qual = 0; | ||
171 | } | ||
172 | |||
173 | if (!(network->stats.mask & IEEE80211_STATMASK_NOISE)) { | ||
152 | iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID; | 174 | iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID; |
153 | if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL)) | 175 | iwe.u.qual.noise = 0; |
154 | iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID; | 176 | } else { |
177 | iwe.u.qual.noise = network->stats.noise; | ||
178 | } | ||
155 | 179 | ||
156 | start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN); | 180 | start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN); |
157 | 181 | ||
@@ -162,7 +186,7 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee, | |||
162 | if (iwe.u.data.length) | 186 | if (iwe.u.data.length) |
163 | start = iwe_stream_add_point(start, stop, &iwe, custom); | 187 | start = iwe_stream_add_point(start, stop, &iwe, custom); |
164 | 188 | ||
165 | if (ieee->wpa_enabled && network->wpa_ie_len) { | 189 | if (network->wpa_ie_len) { |
166 | char buf[MAX_WPA_IE_LEN * 2 + 30]; | 190 | char buf[MAX_WPA_IE_LEN * 2 + 30]; |
167 | 191 | ||
168 | u8 *p = buf; | 192 | u8 *p = buf; |
@@ -177,7 +201,7 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee, | |||
177 | start = iwe_stream_add_point(start, stop, &iwe, buf); | 201 | start = iwe_stream_add_point(start, stop, &iwe, buf); |
178 | } | 202 | } |
179 | 203 | ||
180 | if (ieee->wpa_enabled && network->rsn_ie_len) { | 204 | if (network->rsn_ie_len) { |
181 | char buf[MAX_WPA_IE_LEN * 2 + 30]; | 205 | char buf[MAX_WPA_IE_LEN * 2 + 30]; |
182 | 206 | ||
183 | u8 *p = buf; | 207 | u8 *p = buf; |
@@ -197,8 +221,8 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee, | |||
197 | iwe.cmd = IWEVCUSTOM; | 221 | iwe.cmd = IWEVCUSTOM; |
198 | p = custom; | 222 | p = custom; |
199 | p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), | 223 | p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), |
200 | " Last beacon: %lums ago", | 224 | " Last beacon: %dms ago", |
201 | (jiffies - network->last_scanned) / (HZ / 100)); | 225 | jiffies_to_msecs(jiffies - network->last_scanned)); |
202 | iwe.u.data.length = p - custom; | 226 | iwe.u.data.length = p - custom; |
203 | if (iwe.u.data.length) | 227 | if (iwe.u.data.length) |
204 | start = iwe_stream_add_point(start, stop, &iwe, custom); | 228 | start = iwe_stream_add_point(start, stop, &iwe, custom); |
@@ -228,13 +252,13 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee, | |||
228 | ev = ipw2100_translate_scan(ieee, ev, stop, network); | 252 | ev = ipw2100_translate_scan(ieee, ev, stop, network); |
229 | else | 253 | else |
230 | IEEE80211_DEBUG_SCAN("Not showing network '%s (" | 254 | IEEE80211_DEBUG_SCAN("Not showing network '%s (" |
231 | MAC_FMT ")' due to age (%lums).\n", | 255 | MAC_FMT ")' due to age (%dms).\n", |
232 | escape_essid(network->ssid, | 256 | escape_essid(network->ssid, |
233 | network->ssid_len), | 257 | network->ssid_len), |
234 | MAC_ARG(network->bssid), | 258 | MAC_ARG(network->bssid), |
235 | (jiffies - | 259 | jiffies_to_msecs(jiffies - |
236 | network->last_scanned) / (HZ / | 260 | network-> |
237 | 100)); | 261 | last_scanned)); |
238 | } | 262 | } |
239 | 263 | ||
240 | spin_unlock_irqrestore(&ieee->lock, flags); | 264 | spin_unlock_irqrestore(&ieee->lock, flags); |
@@ -258,6 +282,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, | |||
258 | }; | 282 | }; |
259 | int i, key, key_provided, len; | 283 | int i, key, key_provided, len; |
260 | struct ieee80211_crypt_data **crypt; | 284 | struct ieee80211_crypt_data **crypt; |
285 | int host_crypto = ieee->host_encrypt || ieee->host_decrypt; | ||
261 | 286 | ||
262 | IEEE80211_DEBUG_WX("SET_ENCODE\n"); | 287 | IEEE80211_DEBUG_WX("SET_ENCODE\n"); |
263 | 288 | ||
@@ -298,15 +323,17 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, | |||
298 | 323 | ||
299 | if (i == WEP_KEYS) { | 324 | if (i == WEP_KEYS) { |
300 | sec.enabled = 0; | 325 | sec.enabled = 0; |
326 | sec.encrypt = 0; | ||
301 | sec.level = SEC_LEVEL_0; | 327 | sec.level = SEC_LEVEL_0; |
302 | sec.flags |= SEC_ENABLED | SEC_LEVEL; | 328 | sec.flags |= SEC_ENABLED | SEC_LEVEL | SEC_ENCRYPT; |
303 | } | 329 | } |
304 | 330 | ||
305 | goto done; | 331 | goto done; |
306 | } | 332 | } |
307 | 333 | ||
308 | sec.enabled = 1; | 334 | sec.enabled = 1; |
309 | sec.flags |= SEC_ENABLED; | 335 | sec.encrypt = 1; |
336 | sec.flags |= SEC_ENABLED | SEC_ENCRYPT; | ||
310 | 337 | ||
311 | if (*crypt != NULL && (*crypt)->ops != NULL && | 338 | if (*crypt != NULL && (*crypt)->ops != NULL && |
312 | strcmp((*crypt)->ops->name, "WEP") != 0) { | 339 | strcmp((*crypt)->ops->name, "WEP") != 0) { |
@@ -315,7 +342,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, | |||
315 | ieee80211_crypt_delayed_deinit(ieee, crypt); | 342 | ieee80211_crypt_delayed_deinit(ieee, crypt); |
316 | } | 343 | } |
317 | 344 | ||
318 | if (*crypt == NULL) { | 345 | if (*crypt == NULL && host_crypto) { |
319 | struct ieee80211_crypt_data *new_crypt; | 346 | struct ieee80211_crypt_data *new_crypt; |
320 | 347 | ||
321 | /* take WEP into use */ | 348 | /* take WEP into use */ |
@@ -355,49 +382,56 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, | |||
355 | key, escape_essid(sec.keys[key], len), | 382 | key, escape_essid(sec.keys[key], len), |
356 | erq->length, len); | 383 | erq->length, len); |
357 | sec.key_sizes[key] = len; | 384 | sec.key_sizes[key] = len; |
358 | (*crypt)->ops->set_key(sec.keys[key], len, NULL, | 385 | if (*crypt) |
359 | (*crypt)->priv); | 386 | (*crypt)->ops->set_key(sec.keys[key], len, NULL, |
387 | (*crypt)->priv); | ||
360 | sec.flags |= (1 << key); | 388 | sec.flags |= (1 << key); |
361 | /* This ensures a key will be activated if no key is | 389 | /* This ensures a key will be activated if no key is |
362 | * explicitely set */ | 390 | * explicitely set */ |
363 | if (key == sec.active_key) | 391 | if (key == sec.active_key) |
364 | sec.flags |= SEC_ACTIVE_KEY; | 392 | sec.flags |= SEC_ACTIVE_KEY; |
393 | |||
365 | } else { | 394 | } else { |
366 | len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN, | 395 | if (host_crypto) { |
367 | NULL, (*crypt)->priv); | 396 | len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN, |
368 | if (len == 0) { | 397 | NULL, (*crypt)->priv); |
369 | /* Set a default key of all 0 */ | 398 | if (len == 0) { |
370 | IEEE80211_DEBUG_WX("Setting key %d to all zero.\n", | 399 | /* Set a default key of all 0 */ |
371 | key); | 400 | IEEE80211_DEBUG_WX("Setting key %d to all " |
372 | memset(sec.keys[key], 0, 13); | 401 | "zero.\n", key); |
373 | (*crypt)->ops->set_key(sec.keys[key], 13, NULL, | 402 | memset(sec.keys[key], 0, 13); |
374 | (*crypt)->priv); | 403 | (*crypt)->ops->set_key(sec.keys[key], 13, NULL, |
375 | sec.key_sizes[key] = 13; | 404 | (*crypt)->priv); |
376 | sec.flags |= (1 << key); | 405 | sec.key_sizes[key] = 13; |
406 | sec.flags |= (1 << key); | ||
407 | } | ||
377 | } | 408 | } |
378 | |||
379 | /* No key data - just set the default TX key index */ | 409 | /* No key data - just set the default TX key index */ |
380 | if (key_provided) { | 410 | if (key_provided) { |
381 | IEEE80211_DEBUG_WX | 411 | IEEE80211_DEBUG_WX("Setting key %d to default Tx " |
382 | ("Setting key %d to default Tx key.\n", key); | 412 | "key.\n", key); |
383 | ieee->tx_keyidx = key; | 413 | ieee->tx_keyidx = key; |
384 | sec.active_key = key; | 414 | sec.active_key = key; |
385 | sec.flags |= SEC_ACTIVE_KEY; | 415 | sec.flags |= SEC_ACTIVE_KEY; |
386 | } | 416 | } |
387 | } | 417 | } |
388 | 418 | if (erq->flags & (IW_ENCODE_OPEN | IW_ENCODE_RESTRICTED)) { | |
389 | done: | 419 | ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED); |
390 | ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED); | 420 | sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : |
391 | sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY; | 421 | WLAN_AUTH_SHARED_KEY; |
392 | sec.flags |= SEC_AUTH_MODE; | 422 | sec.flags |= SEC_AUTH_MODE; |
393 | IEEE80211_DEBUG_WX("Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ? | 423 | IEEE80211_DEBUG_WX("Auth: %s\n", |
394 | "OPEN" : "SHARED KEY"); | 424 | sec.auth_mode == WLAN_AUTH_OPEN ? |
425 | "OPEN" : "SHARED KEY"); | ||
426 | } | ||
395 | 427 | ||
396 | /* For now we just support WEP, so only set that security level... | 428 | /* For now we just support WEP, so only set that security level... |
397 | * TODO: When WPA is added this is one place that needs to change */ | 429 | * TODO: When WPA is added this is one place that needs to change */ |
398 | sec.flags |= SEC_LEVEL; | 430 | sec.flags |= SEC_LEVEL; |
399 | sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */ | 431 | sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */ |
432 | sec.encode_alg[key] = SEC_ALG_WEP; | ||
400 | 433 | ||
434 | done: | ||
401 | if (ieee->set_security) | 435 | if (ieee->set_security) |
402 | ieee->set_security(dev, &sec); | 436 | ieee->set_security(dev, &sec); |
403 | 437 | ||
@@ -422,6 +456,7 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee, | |||
422 | struct iw_point *erq = &(wrqu->encoding); | 456 | struct iw_point *erq = &(wrqu->encoding); |
423 | int len, key; | 457 | int len, key; |
424 | struct ieee80211_crypt_data *crypt; | 458 | struct ieee80211_crypt_data *crypt; |
459 | struct ieee80211_security *sec = &ieee->sec; | ||
425 | 460 | ||
426 | IEEE80211_DEBUG_WX("GET_ENCODE\n"); | 461 | IEEE80211_DEBUG_WX("GET_ENCODE\n"); |
427 | 462 | ||
@@ -436,23 +471,16 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee, | |||
436 | crypt = ieee->crypt[key]; | 471 | crypt = ieee->crypt[key]; |
437 | erq->flags = key + 1; | 472 | erq->flags = key + 1; |
438 | 473 | ||
439 | if (crypt == NULL || crypt->ops == NULL) { | 474 | if (!sec->enabled) { |
440 | erq->length = 0; | 475 | erq->length = 0; |
441 | erq->flags |= IW_ENCODE_DISABLED; | 476 | erq->flags |= IW_ENCODE_DISABLED; |
442 | return 0; | 477 | return 0; |
443 | } | 478 | } |
444 | 479 | ||
445 | if (strcmp(crypt->ops->name, "WEP") != 0) { | 480 | len = sec->key_sizes[key]; |
446 | /* only WEP is supported with wireless extensions, so just | 481 | memcpy(keybuf, sec->keys[key], len); |
447 | * report that encryption is used */ | ||
448 | erq->length = 0; | ||
449 | erq->flags |= IW_ENCODE_ENABLED; | ||
450 | return 0; | ||
451 | } | ||
452 | 482 | ||
453 | len = crypt->ops->get_key(keybuf, WEP_KEY_LEN, NULL, crypt->priv); | ||
454 | erq->length = (len >= 0 ? len : 0); | 483 | erq->length = (len >= 0 ? len : 0); |
455 | |||
456 | erq->flags |= IW_ENCODE_ENABLED; | 484 | erq->flags |= IW_ENCODE_ENABLED; |
457 | 485 | ||
458 | if (ieee->open_wep) | 486 | if (ieee->open_wep) |
@@ -463,6 +491,240 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee, | |||
463 | return 0; | 491 | return 0; |
464 | } | 492 | } |
465 | 493 | ||
494 | int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, | ||
495 | struct iw_request_info *info, | ||
496 | union iwreq_data *wrqu, char *extra) | ||
497 | { | ||
498 | struct net_device *dev = ieee->dev; | ||
499 | struct iw_point *encoding = &wrqu->encoding; | ||
500 | struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; | ||
501 | int i, idx, ret = 0; | ||
502 | int group_key = 0; | ||
503 | const char *alg, *module; | ||
504 | struct ieee80211_crypto_ops *ops; | ||
505 | struct ieee80211_crypt_data **crypt; | ||
506 | |||
507 | struct ieee80211_security sec = { | ||
508 | .flags = 0, | ||
509 | }; | ||
510 | |||
511 | idx = encoding->flags & IW_ENCODE_INDEX; | ||
512 | if (idx) { | ||
513 | if (idx < 1 || idx > WEP_KEYS) | ||
514 | return -EINVAL; | ||
515 | idx--; | ||
516 | } else | ||
517 | idx = ieee->tx_keyidx; | ||
518 | |||
519 | if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { | ||
520 | crypt = &ieee->crypt[idx]; | ||
521 | group_key = 1; | ||
522 | } else { | ||
523 | if (idx != 0) | ||
524 | return -EINVAL; | ||
525 | if (ieee->iw_mode == IW_MODE_INFRA) | ||
526 | crypt = &ieee->crypt[idx]; | ||
527 | else | ||
528 | return -EINVAL; | ||
529 | } | ||
530 | |||
531 | sec.flags |= SEC_ENABLED | SEC_ENCRYPT; | ||
532 | if ((encoding->flags & IW_ENCODE_DISABLED) || | ||
533 | ext->alg == IW_ENCODE_ALG_NONE) { | ||
534 | if (*crypt) | ||
535 | ieee80211_crypt_delayed_deinit(ieee, crypt); | ||
536 | |||
537 | for (i = 0; i < WEP_KEYS; i++) | ||
538 | if (ieee->crypt[i] != NULL) | ||
539 | break; | ||
540 | |||
541 | if (i == WEP_KEYS) { | ||
542 | sec.enabled = 0; | ||
543 | sec.encrypt = 0; | ||
544 | sec.level = SEC_LEVEL_0; | ||
545 | sec.flags |= SEC_LEVEL; | ||
546 | } | ||
547 | goto done; | ||
548 | } | ||
549 | |||
550 | sec.enabled = 1; | ||
551 | sec.encrypt = 1; | ||
552 | |||
553 | if (group_key ? !ieee->host_mc_decrypt : | ||
554 | !(ieee->host_encrypt || ieee->host_decrypt || | ||
555 | ieee->host_encrypt_msdu)) | ||
556 | goto skip_host_crypt; | ||
557 | |||
558 | switch (ext->alg) { | ||
559 | case IW_ENCODE_ALG_WEP: | ||
560 | alg = "WEP"; | ||
561 | module = "ieee80211_crypt_wep"; | ||
562 | break; | ||
563 | case IW_ENCODE_ALG_TKIP: | ||
564 | alg = "TKIP"; | ||
565 | module = "ieee80211_crypt_tkip"; | ||
566 | break; | ||
567 | case IW_ENCODE_ALG_CCMP: | ||
568 | alg = "CCMP"; | ||
569 | module = "ieee80211_crypt_ccmp"; | ||
570 | break; | ||
571 | default: | ||
572 | IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", | ||
573 | dev->name, ext->alg); | ||
574 | ret = -EINVAL; | ||
575 | goto done; | ||
576 | } | ||
577 | |||
578 | ops = ieee80211_get_crypto_ops(alg); | ||
579 | if (ops == NULL) { | ||
580 | request_module(module); | ||
581 | ops = ieee80211_get_crypto_ops(alg); | ||
582 | } | ||
583 | if (ops == NULL) { | ||
584 | IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", | ||
585 | dev->name, ext->alg); | ||
586 | ret = -EINVAL; | ||
587 | goto done; | ||
588 | } | ||
589 | |||
590 | if (*crypt == NULL || (*crypt)->ops != ops) { | ||
591 | struct ieee80211_crypt_data *new_crypt; | ||
592 | |||
593 | ieee80211_crypt_delayed_deinit(ieee, crypt); | ||
594 | |||
595 | new_crypt = (struct ieee80211_crypt_data *) | ||
596 | kmalloc(sizeof(*new_crypt), GFP_KERNEL); | ||
597 | if (new_crypt == NULL) { | ||
598 | ret = -ENOMEM; | ||
599 | goto done; | ||
600 | } | ||
601 | memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); | ||
602 | new_crypt->ops = ops; | ||
603 | if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) | ||
604 | new_crypt->priv = new_crypt->ops->init(idx); | ||
605 | if (new_crypt->priv == NULL) { | ||
606 | kfree(new_crypt); | ||
607 | ret = -EINVAL; | ||
608 | goto done; | ||
609 | } | ||
610 | *crypt = new_crypt; | ||
611 | } | ||
612 | |||
613 | if (ext->key_len > 0 && (*crypt)->ops->set_key && | ||
614 | (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq, | ||
615 | (*crypt)->priv) < 0) { | ||
616 | IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name); | ||
617 | ret = -EINVAL; | ||
618 | goto done; | ||
619 | } | ||
620 | |||
621 | skip_host_crypt: | ||
622 | if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { | ||
623 | ieee->tx_keyidx = idx; | ||
624 | sec.active_key = idx; | ||
625 | sec.flags |= SEC_ACTIVE_KEY; | ||
626 | } | ||
627 | |||
628 | if (ext->alg != IW_ENCODE_ALG_NONE) { | ||
629 | memcpy(sec.keys[idx], ext->key, ext->key_len); | ||
630 | sec.key_sizes[idx] = ext->key_len; | ||
631 | sec.flags |= (1 << idx); | ||
632 | if (ext->alg == IW_ENCODE_ALG_WEP) { | ||
633 | sec.encode_alg[idx] = SEC_ALG_WEP; | ||
634 | sec.flags |= SEC_LEVEL; | ||
635 | sec.level = SEC_LEVEL_1; | ||
636 | } else if (ext->alg == IW_ENCODE_ALG_TKIP) { | ||
637 | sec.encode_alg[idx] = SEC_ALG_TKIP; | ||
638 | sec.flags |= SEC_LEVEL; | ||
639 | sec.level = SEC_LEVEL_2; | ||
640 | } else if (ext->alg == IW_ENCODE_ALG_CCMP) { | ||
641 | sec.encode_alg[idx] = SEC_ALG_CCMP; | ||
642 | sec.flags |= SEC_LEVEL; | ||
643 | sec.level = SEC_LEVEL_3; | ||
644 | } | ||
645 | /* Don't set sec level for group keys. */ | ||
646 | if (group_key) | ||
647 | sec.flags &= ~SEC_LEVEL; | ||
648 | } | ||
649 | done: | ||
650 | if (ieee->set_security) | ||
651 | ieee->set_security(ieee->dev, &sec); | ||
652 | |||
653 | /* | ||
654 | * Do not reset port if card is in Managed mode since resetting will | ||
655 | * generate new IEEE 802.11 authentication which may end up in looping | ||
656 | * with IEEE 802.1X. If your hardware requires a reset after WEP | ||
657 | * configuration (for example... Prism2), implement the reset_port in | ||
658 | * the callbacks structures used to initialize the 802.11 stack. | ||
659 | */ | ||
660 | if (ieee->reset_on_keychange && | ||
661 | ieee->iw_mode != IW_MODE_INFRA && | ||
662 | ieee->reset_port && ieee->reset_port(dev)) { | ||
663 | IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name); | ||
664 | return -EINVAL; | ||
665 | } | ||
666 | |||
667 | return ret; | ||
668 | } | ||
669 | |||
670 | int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, | ||
671 | struct iw_request_info *info, | ||
672 | union iwreq_data *wrqu, char *extra) | ||
673 | { | ||
674 | struct iw_point *encoding = &wrqu->encoding; | ||
675 | struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; | ||
676 | struct ieee80211_security *sec = &ieee->sec; | ||
677 | int idx, max_key_len; | ||
678 | |||
679 | max_key_len = encoding->length - sizeof(*ext); | ||
680 | if (max_key_len < 0) | ||
681 | return -EINVAL; | ||
682 | |||
683 | idx = encoding->flags & IW_ENCODE_INDEX; | ||
684 | if (idx) { | ||
685 | if (idx < 1 || idx > WEP_KEYS) | ||
686 | return -EINVAL; | ||
687 | idx--; | ||
688 | } else | ||
689 | idx = ieee->tx_keyidx; | ||
690 | |||
691 | if (!ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) | ||
692 | if (idx != 0 || ieee->iw_mode != IW_MODE_INFRA) | ||
693 | return -EINVAL; | ||
694 | |||
695 | encoding->flags = idx + 1; | ||
696 | memset(ext, 0, sizeof(*ext)); | ||
697 | |||
698 | if (!sec->enabled) { | ||
699 | ext->alg = IW_ENCODE_ALG_NONE; | ||
700 | ext->key_len = 0; | ||
701 | encoding->flags |= IW_ENCODE_DISABLED; | ||
702 | } else { | ||
703 | if (sec->encode_alg[idx] == SEC_ALG_WEP) | ||
704 | ext->alg = IW_ENCODE_ALG_WEP; | ||
705 | else if (sec->encode_alg[idx] == SEC_ALG_TKIP) | ||
706 | ext->alg = IW_ENCODE_ALG_TKIP; | ||
707 | else if (sec->encode_alg[idx] == SEC_ALG_CCMP) | ||
708 | ext->alg = IW_ENCODE_ALG_CCMP; | ||
709 | else | ||
710 | return -EINVAL; | ||
711 | |||
712 | ext->key_len = sec->key_sizes[idx]; | ||
713 | memcpy(ext->key, sec->keys[idx], ext->key_len); | ||
714 | encoding->flags |= IW_ENCODE_ENABLED; | ||
715 | if (ext->key_len && | ||
716 | (ext->alg == IW_ENCODE_ALG_TKIP || | ||
717 | ext->alg == IW_ENCODE_ALG_CCMP)) | ||
718 | ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID; | ||
719 | |||
720 | } | ||
721 | |||
722 | return 0; | ||
723 | } | ||
724 | |||
725 | EXPORT_SYMBOL(ieee80211_wx_set_encodeext); | ||
726 | EXPORT_SYMBOL(ieee80211_wx_get_encodeext); | ||
727 | |||
466 | EXPORT_SYMBOL(ieee80211_wx_get_scan); | 728 | EXPORT_SYMBOL(ieee80211_wx_get_scan); |
467 | EXPORT_SYMBOL(ieee80211_wx_set_encode); | 729 | EXPORT_SYMBOL(ieee80211_wx_set_encode); |
468 | EXPORT_SYMBOL(ieee80211_wx_get_encode); | 730 | EXPORT_SYMBOL(ieee80211_wx_get_encode); |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 74f2207e131a..4ec4b2ca6ab1 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -715,6 +715,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg) | |||
715 | break; | 715 | break; |
716 | ret = 0; | 716 | ret = 0; |
717 | if (ifa->ifa_mask != sin->sin_addr.s_addr) { | 717 | if (ifa->ifa_mask != sin->sin_addr.s_addr) { |
718 | u32 old_mask = ifa->ifa_mask; | ||
718 | inet_del_ifa(in_dev, ifap, 0); | 719 | inet_del_ifa(in_dev, ifap, 0); |
719 | ifa->ifa_mask = sin->sin_addr.s_addr; | 720 | ifa->ifa_mask = sin->sin_addr.s_addr; |
720 | ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask); | 721 | ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask); |
@@ -728,7 +729,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg) | |||
728 | if ((dev->flags & IFF_BROADCAST) && | 729 | if ((dev->flags & IFF_BROADCAST) && |
729 | (ifa->ifa_prefixlen < 31) && | 730 | (ifa->ifa_prefixlen < 31) && |
730 | (ifa->ifa_broadcast == | 731 | (ifa->ifa_broadcast == |
731 | (ifa->ifa_local|~ifa->ifa_mask))) { | 732 | (ifa->ifa_local|~old_mask))) { |
732 | ifa->ifa_broadcast = (ifa->ifa_local | | 733 | ifa->ifa_broadcast = (ifa->ifa_local | |
733 | ~sin->sin_addr.s_addr); | 734 | ~sin->sin_addr.s_addr); |
734 | } | 735 | } |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 0093ea08c7f5..66247f38b371 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -2404,7 +2404,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v) | |||
2404 | prefix = htonl(l->key); | 2404 | prefix = htonl(l->key); |
2405 | 2405 | ||
2406 | list_for_each_entry_rcu(fa, &li->falh, fa_list) { | 2406 | list_for_each_entry_rcu(fa, &li->falh, fa_list) { |
2407 | const struct fib_info *fi = rcu_dereference(fa->fa_info); | 2407 | const struct fib_info *fi = fa->fa_info; |
2408 | unsigned flags = fib_flag_trans(fa->fa_type, mask, fi); | 2408 | unsigned flags = fib_flag_trans(fa->fa_type, mask, fi); |
2409 | 2409 | ||
2410 | if (fa->fa_type == RTN_BROADCAST | 2410 | if (fa->fa_type == RTN_BROADCAST |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 90dca711ac9f..175e093ec564 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -1108,12 +1108,9 @@ void __init icmp_init(struct net_proto_family *ops) | |||
1108 | struct inet_sock *inet; | 1108 | struct inet_sock *inet; |
1109 | int i; | 1109 | int i; |
1110 | 1110 | ||
1111 | for (i = 0; i < NR_CPUS; i++) { | 1111 | for_each_cpu(i) { |
1112 | int err; | 1112 | int err; |
1113 | 1113 | ||
1114 | if (!cpu_possible(i)) | ||
1115 | continue; | ||
1116 | |||
1117 | err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP, | 1114 | err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP, |
1118 | &per_cpu(__icmp_socket, i)); | 1115 | &per_cpu(__icmp_socket, i)); |
1119 | 1116 | ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 3f1a263e1249..87e350069abb 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -391,6 +391,9 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
391 | to->nfct = from->nfct; | 391 | to->nfct = from->nfct; |
392 | nf_conntrack_get(to->nfct); | 392 | nf_conntrack_get(to->nfct); |
393 | to->nfctinfo = from->nfctinfo; | 393 | to->nfctinfo = from->nfctinfo; |
394 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) | ||
395 | to->ipvs_property = from->ipvs_property; | ||
396 | #endif | ||
394 | #ifdef CONFIG_BRIDGE_NETFILTER | 397 | #ifdef CONFIG_BRIDGE_NETFILTER |
395 | nf_bridge_put(to->nf_bridge); | 398 | nf_bridge_put(to->nf_bridge); |
396 | to->nf_bridge = from->nf_bridge; | 399 | to->nf_bridge = from->nf_bridge; |
@@ -1020,10 +1023,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, | |||
1020 | int alloclen; | 1023 | int alloclen; |
1021 | 1024 | ||
1022 | skb_prev = skb; | 1025 | skb_prev = skb; |
1023 | if (skb_prev) | 1026 | fraggap = skb_prev->len - maxfraglen; |
1024 | fraggap = skb_prev->len - maxfraglen; | ||
1025 | else | ||
1026 | fraggap = 0; | ||
1027 | 1027 | ||
1028 | alloclen = fragheaderlen + hh_len + fraggap + 15; | 1028 | alloclen = fragheaderlen + hh_len + fraggap + 15; |
1029 | skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation); | 1029 | skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation); |
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index 07a80b56e8dc..422ab68ee7fb 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c | |||
@@ -50,7 +50,7 @@ | |||
50 | #include <linux/netfilter_ipv4/ip_conntrack_core.h> | 50 | #include <linux/netfilter_ipv4/ip_conntrack_core.h> |
51 | #include <linux/netfilter_ipv4/listhelp.h> | 51 | #include <linux/netfilter_ipv4/listhelp.h> |
52 | 52 | ||
53 | #define IP_CONNTRACK_VERSION "2.3" | 53 | #define IP_CONNTRACK_VERSION "2.4" |
54 | 54 | ||
55 | #if 0 | 55 | #if 0 |
56 | #define DEBUGP printk | 56 | #define DEBUGP printk |
@@ -148,16 +148,20 @@ DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat); | |||
148 | static int ip_conntrack_hash_rnd_initted; | 148 | static int ip_conntrack_hash_rnd_initted; |
149 | static unsigned int ip_conntrack_hash_rnd; | 149 | static unsigned int ip_conntrack_hash_rnd; |
150 | 150 | ||
151 | static u_int32_t | 151 | static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple, |
152 | hash_conntrack(const struct ip_conntrack_tuple *tuple) | 152 | unsigned int size, unsigned int rnd) |
153 | { | 153 | { |
154 | #if 0 | ||
155 | dump_tuple(tuple); | ||
156 | #endif | ||
157 | return (jhash_3words(tuple->src.ip, | 154 | return (jhash_3words(tuple->src.ip, |
158 | (tuple->dst.ip ^ tuple->dst.protonum), | 155 | (tuple->dst.ip ^ tuple->dst.protonum), |
159 | (tuple->src.u.all | (tuple->dst.u.all << 16)), | 156 | (tuple->src.u.all | (tuple->dst.u.all << 16)), |
160 | ip_conntrack_hash_rnd) % ip_conntrack_htable_size); | 157 | rnd) % size); |
158 | } | ||
159 | |||
160 | static u_int32_t | ||
161 | hash_conntrack(const struct ip_conntrack_tuple *tuple) | ||
162 | { | ||
163 | return __hash_conntrack(tuple, ip_conntrack_htable_size, | ||
164 | ip_conntrack_hash_rnd); | ||
161 | } | 165 | } |
162 | 166 | ||
163 | int | 167 | int |
@@ -1341,14 +1345,13 @@ static int kill_all(struct ip_conntrack *i, void *data) | |||
1341 | return 1; | 1345 | return 1; |
1342 | } | 1346 | } |
1343 | 1347 | ||
1344 | static void free_conntrack_hash(void) | 1348 | static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size) |
1345 | { | 1349 | { |
1346 | if (ip_conntrack_vmalloc) | 1350 | if (vmalloced) |
1347 | vfree(ip_conntrack_hash); | 1351 | vfree(hash); |
1348 | else | 1352 | else |
1349 | free_pages((unsigned long)ip_conntrack_hash, | 1353 | free_pages((unsigned long)hash, |
1350 | get_order(sizeof(struct list_head) | 1354 | get_order(sizeof(struct list_head) * size)); |
1351 | * ip_conntrack_htable_size)); | ||
1352 | } | 1355 | } |
1353 | 1356 | ||
1354 | void ip_conntrack_flush() | 1357 | void ip_conntrack_flush() |
@@ -1378,12 +1381,83 @@ void ip_conntrack_cleanup(void) | |||
1378 | ip_conntrack_flush(); | 1381 | ip_conntrack_flush(); |
1379 | kmem_cache_destroy(ip_conntrack_cachep); | 1382 | kmem_cache_destroy(ip_conntrack_cachep); |
1380 | kmem_cache_destroy(ip_conntrack_expect_cachep); | 1383 | kmem_cache_destroy(ip_conntrack_expect_cachep); |
1381 | free_conntrack_hash(); | 1384 | free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc, |
1385 | ip_conntrack_htable_size); | ||
1382 | nf_unregister_sockopt(&so_getorigdst); | 1386 | nf_unregister_sockopt(&so_getorigdst); |
1383 | } | 1387 | } |
1384 | 1388 | ||
1385 | static int hashsize; | 1389 | static struct list_head *alloc_hashtable(int size, int *vmalloced) |
1386 | module_param(hashsize, int, 0400); | 1390 | { |
1391 | struct list_head *hash; | ||
1392 | unsigned int i; | ||
1393 | |||
1394 | *vmalloced = 0; | ||
1395 | hash = (void*)__get_free_pages(GFP_KERNEL, | ||
1396 | get_order(sizeof(struct list_head) | ||
1397 | * size)); | ||
1398 | if (!hash) { | ||
1399 | *vmalloced = 1; | ||
1400 | printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n"); | ||
1401 | hash = vmalloc(sizeof(struct list_head) * size); | ||
1402 | } | ||
1403 | |||
1404 | if (hash) | ||
1405 | for (i = 0; i < size; i++) | ||
1406 | INIT_LIST_HEAD(&hash[i]); | ||
1407 | |||
1408 | return hash; | ||
1409 | } | ||
1410 | |||
1411 | int set_hashsize(const char *val, struct kernel_param *kp) | ||
1412 | { | ||
1413 | int i, bucket, hashsize, vmalloced; | ||
1414 | int old_vmalloced, old_size; | ||
1415 | int rnd; | ||
1416 | struct list_head *hash, *old_hash; | ||
1417 | struct ip_conntrack_tuple_hash *h; | ||
1418 | |||
1419 | /* On boot, we can set this without any fancy locking. */ | ||
1420 | if (!ip_conntrack_htable_size) | ||
1421 | return param_set_int(val, kp); | ||
1422 | |||
1423 | hashsize = simple_strtol(val, NULL, 0); | ||
1424 | if (!hashsize) | ||
1425 | return -EINVAL; | ||
1426 | |||
1427 | hash = alloc_hashtable(hashsize, &vmalloced); | ||
1428 | if (!hash) | ||
1429 | return -ENOMEM; | ||
1430 | |||
1431 | /* We have to rehash for the new table anyway, so we also can | ||
1432 | * use a new random seed */ | ||
1433 | get_random_bytes(&rnd, 4); | ||
1434 | |||
1435 | write_lock_bh(&ip_conntrack_lock); | ||
1436 | for (i = 0; i < ip_conntrack_htable_size; i++) { | ||
1437 | while (!list_empty(&ip_conntrack_hash[i])) { | ||
1438 | h = list_entry(ip_conntrack_hash[i].next, | ||
1439 | struct ip_conntrack_tuple_hash, list); | ||
1440 | list_del(&h->list); | ||
1441 | bucket = __hash_conntrack(&h->tuple, hashsize, rnd); | ||
1442 | list_add_tail(&h->list, &hash[bucket]); | ||
1443 | } | ||
1444 | } | ||
1445 | old_size = ip_conntrack_htable_size; | ||
1446 | old_vmalloced = ip_conntrack_vmalloc; | ||
1447 | old_hash = ip_conntrack_hash; | ||
1448 | |||
1449 | ip_conntrack_htable_size = hashsize; | ||
1450 | ip_conntrack_vmalloc = vmalloced; | ||
1451 | ip_conntrack_hash = hash; | ||
1452 | ip_conntrack_hash_rnd = rnd; | ||
1453 | write_unlock_bh(&ip_conntrack_lock); | ||
1454 | |||
1455 | free_conntrack_hash(old_hash, old_vmalloced, old_size); | ||
1456 | return 0; | ||
1457 | } | ||
1458 | |||
1459 | module_param_call(hashsize, set_hashsize, param_get_uint, | ||
1460 | &ip_conntrack_htable_size, 0600); | ||
1387 | 1461 | ||
1388 | int __init ip_conntrack_init(void) | 1462 | int __init ip_conntrack_init(void) |
1389 | { | 1463 | { |
@@ -1392,9 +1466,7 @@ int __init ip_conntrack_init(void) | |||
1392 | 1466 | ||
1393 | /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB | 1467 | /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB |
1394 | * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ | 1468 | * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ |
1395 | if (hashsize) { | 1469 | if (!ip_conntrack_htable_size) { |
1396 | ip_conntrack_htable_size = hashsize; | ||
1397 | } else { | ||
1398 | ip_conntrack_htable_size | 1470 | ip_conntrack_htable_size |
1399 | = (((num_physpages << PAGE_SHIFT) / 16384) | 1471 | = (((num_physpages << PAGE_SHIFT) / 16384) |
1400 | / sizeof(struct list_head)); | 1472 | / sizeof(struct list_head)); |
@@ -1416,20 +1488,8 @@ int __init ip_conntrack_init(void) | |||
1416 | return ret; | 1488 | return ret; |
1417 | } | 1489 | } |
1418 | 1490 | ||
1419 | /* AK: the hash table is twice as big than needed because it | 1491 | ip_conntrack_hash = alloc_hashtable(ip_conntrack_htable_size, |
1420 | uses list_head. it would be much nicer to caches to use a | 1492 | &ip_conntrack_vmalloc); |
1421 | single pointer list head here. */ | ||
1422 | ip_conntrack_vmalloc = 0; | ||
1423 | ip_conntrack_hash | ||
1424 | =(void*)__get_free_pages(GFP_KERNEL, | ||
1425 | get_order(sizeof(struct list_head) | ||
1426 | *ip_conntrack_htable_size)); | ||
1427 | if (!ip_conntrack_hash) { | ||
1428 | ip_conntrack_vmalloc = 1; | ||
1429 | printk(KERN_WARNING "ip_conntrack: falling back to vmalloc.\n"); | ||
1430 | ip_conntrack_hash = vmalloc(sizeof(struct list_head) | ||
1431 | * ip_conntrack_htable_size); | ||
1432 | } | ||
1433 | if (!ip_conntrack_hash) { | 1493 | if (!ip_conntrack_hash) { |
1434 | printk(KERN_ERR "Unable to create ip_conntrack_hash\n"); | 1494 | printk(KERN_ERR "Unable to create ip_conntrack_hash\n"); |
1435 | goto err_unreg_sockopt; | 1495 | goto err_unreg_sockopt; |
@@ -1461,9 +1521,6 @@ int __init ip_conntrack_init(void) | |||
1461 | ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp; | 1521 | ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp; |
1462 | write_unlock_bh(&ip_conntrack_lock); | 1522 | write_unlock_bh(&ip_conntrack_lock); |
1463 | 1523 | ||
1464 | for (i = 0; i < ip_conntrack_htable_size; i++) | ||
1465 | INIT_LIST_HEAD(&ip_conntrack_hash[i]); | ||
1466 | |||
1467 | /* For use by ipt_REJECT */ | 1524 | /* For use by ipt_REJECT */ |
1468 | ip_ct_attach = ip_conntrack_attach; | 1525 | ip_ct_attach = ip_conntrack_attach; |
1469 | 1526 | ||
@@ -1478,7 +1535,8 @@ int __init ip_conntrack_init(void) | |||
1478 | err_free_conntrack_slab: | 1535 | err_free_conntrack_slab: |
1479 | kmem_cache_destroy(ip_conntrack_cachep); | 1536 | kmem_cache_destroy(ip_conntrack_cachep); |
1480 | err_free_hash: | 1537 | err_free_hash: |
1481 | free_conntrack_hash(); | 1538 | free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc, |
1539 | ip_conntrack_htable_size); | ||
1482 | err_unreg_sockopt: | 1540 | err_unreg_sockopt: |
1483 | nf_unregister_sockopt(&so_getorigdst); | 1541 | nf_unregister_sockopt(&so_getorigdst); |
1484 | 1542 | ||
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index f7943ba1f43c..a65e508fbd40 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -90,9 +90,7 @@ fold_field(void *mib[], int offt) | |||
90 | unsigned long res = 0; | 90 | unsigned long res = 0; |
91 | int i; | 91 | int i; |
92 | 92 | ||
93 | for (i = 0; i < NR_CPUS; i++) { | 93 | for_each_cpu(i) { |
94 | if (!cpu_possible(i)) | ||
95 | continue; | ||
96 | res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt); | 94 | res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt); |
97 | res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt); | 95 | res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt); |
98 | } | 96 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 677419d0c9ad..3e98b57578dc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2239,6 +2239,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, | |||
2239 | /* Note, it is the only place, where | 2239 | /* Note, it is the only place, where |
2240 | * fast path is recovered for sending TCP. | 2240 | * fast path is recovered for sending TCP. |
2241 | */ | 2241 | */ |
2242 | tp->pred_flags = 0; | ||
2242 | tcp_fast_path_check(sk, tp); | 2243 | tcp_fast_path_check(sk, tp); |
2243 | 2244 | ||
2244 | if (nwin > tp->max_window) { | 2245 | if (nwin > tp->max_window) { |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7114031fdc70..b907456a79f4 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -435,17 +435,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss | |||
435 | int nsize, old_factor; | 435 | int nsize, old_factor; |
436 | u16 flags; | 436 | u16 flags; |
437 | 437 | ||
438 | if (unlikely(len >= skb->len)) { | 438 | BUG_ON(len > skb->len); |
439 | if (net_ratelimit()) { | ||
440 | printk(KERN_DEBUG "TCP: seg_size=%u, mss=%u, seq=%u, " | ||
441 | "end_seq=%u, skb->len=%u.\n", len, mss_now, | ||
442 | TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, | ||
443 | skb->len); | ||
444 | WARN_ON(1); | ||
445 | } | ||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | nsize = skb_headlen(skb) - len; | 439 | nsize = skb_headlen(skb) - len; |
450 | if (nsize < 0) | 440 | if (nsize < 0) |
451 | nsize = 0; | 441 | nsize = 0; |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index b7185fb3377c..23e540365a14 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -700,10 +700,7 @@ int __init icmpv6_init(struct net_proto_family *ops) | |||
700 | struct sock *sk; | 700 | struct sock *sk; |
701 | int err, i, j; | 701 | int err, i, j; |
702 | 702 | ||
703 | for (i = 0; i < NR_CPUS; i++) { | 703 | for_each_cpu(i) { |
704 | if (!cpu_possible(i)) | ||
705 | continue; | ||
706 | |||
707 | err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, | 704 | err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, |
708 | &per_cpu(__icmpv6_socket, i)); | 705 | &per_cpu(__icmpv6_socket, i)); |
709 | if (err < 0) { | 706 | if (err < 0) { |
@@ -749,9 +746,7 @@ void icmpv6_cleanup(void) | |||
749 | { | 746 | { |
750 | int i; | 747 | int i; |
751 | 748 | ||
752 | for (i = 0; i < NR_CPUS; i++) { | 749 | for_each_cpu(i) { |
753 | if (!cpu_possible(i)) | ||
754 | continue; | ||
755 | sock_release(per_cpu(__icmpv6_socket, i)); | 750 | sock_release(per_cpu(__icmpv6_socket, i)); |
756 | } | 751 | } |
757 | inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); | 752 | inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); |
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index f841bde30c18..bbbe80cdaf72 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c | |||
@@ -483,7 +483,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) | |||
483 | goto done; | 483 | goto done; |
484 | } | 484 | } |
485 | fl1 = sfl->fl; | 485 | fl1 = sfl->fl; |
486 | atomic_inc(&fl->users); | 486 | atomic_inc(&fl1->users); |
487 | break; | 487 | break; |
488 | } | 488 | } |
489 | } | 489 | } |
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 334a5967831e..50a13e75d70e 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -140,9 +140,7 @@ fold_field(void *mib[], int offt) | |||
140 | unsigned long res = 0; | 140 | unsigned long res = 0; |
141 | int i; | 141 | int i; |
142 | 142 | ||
143 | for (i = 0; i < NR_CPUS; i++) { | 143 | for_each_cpu(i) { |
144 | if (!cpu_possible(i)) | ||
145 | continue; | ||
146 | res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); | 144 | res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); |
147 | res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); | 145 | res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); |
148 | } | 146 | } |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 678c3f2c0d0b..5ca283537bc6 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -740,11 +740,8 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long t | |||
740 | 740 | ||
741 | int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol) | 741 | int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol) |
742 | { | 742 | { |
743 | struct netlink_sock *nlk; | ||
744 | int len = skb->len; | 743 | int len = skb->len; |
745 | 744 | ||
746 | nlk = nlk_sk(sk); | ||
747 | |||
748 | skb_queue_tail(&sk->sk_receive_queue, skb); | 745 | skb_queue_tail(&sk->sk_receive_queue, skb); |
749 | sk->sk_data_ready(sk, len); | 746 | sk->sk_data_ready(sk, len); |
750 | sock_put(sk); | 747 | sock_put(sk); |
@@ -827,7 +824,7 @@ struct netlink_broadcast_data { | |||
827 | int failure; | 824 | int failure; |
828 | int congested; | 825 | int congested; |
829 | int delivered; | 826 | int delivered; |
830 | unsigned int allocation; | 827 | gfp_t allocation; |
831 | struct sk_buff *skb, *skb2; | 828 | struct sk_buff *skb, *skb2; |
832 | }; | 829 | }; |
833 | 830 | ||
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index e556d92c0bc4..b18fe5043019 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c | |||
@@ -727,7 +727,7 @@ int rose_rt_ioctl(unsigned int cmd, void __user *arg) | |||
727 | } | 727 | } |
728 | if (rose_route.mask > 10) /* Mask can't be more than 10 digits */ | 728 | if (rose_route.mask > 10) /* Mask can't be more than 10 digits */ |
729 | return -EINVAL; | 729 | return -EINVAL; |
730 | if (rose_route.ndigis > 8) /* No more than 8 digipeats */ | 730 | if (rose_route.ndigis > AX25_MAX_DIGIS) |
731 | return -EINVAL; | 731 | return -EINVAL; |
732 | err = rose_add_node(&rose_route, dev); | 732 | err = rose_add_node(&rose_route, dev); |
733 | dev_put(dev); | 733 | dev_put(dev); |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index b74f7772b576..6e4dc28874d7 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -69,9 +69,7 @@ fold_field(void *mib[], int nr) | |||
69 | unsigned long res = 0; | 69 | unsigned long res = 0; |
70 | int i; | 70 | int i; |
71 | 71 | ||
72 | for (i = 0; i < NR_CPUS; i++) { | 72 | for_each_cpu(i) { |
73 | if (!cpu_possible(i)) | ||
74 | continue; | ||
75 | res += | 73 | res += |
76 | *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) + | 74 | *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) + |
77 | sizeof (unsigned long) * nr)); | 75 | sizeof (unsigned long) * nr)); |
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile index 46a2ce00a29b..cdcab9ca4c60 100644 --- a/net/sunrpc/Makefile +++ b/net/sunrpc/Makefile | |||
@@ -6,7 +6,7 @@ | |||
6 | obj-$(CONFIG_SUNRPC) += sunrpc.o | 6 | obj-$(CONFIG_SUNRPC) += sunrpc.o |
7 | obj-$(CONFIG_SUNRPC_GSS) += auth_gss/ | 7 | obj-$(CONFIG_SUNRPC_GSS) += auth_gss/ |
8 | 8 | ||
9 | sunrpc-y := clnt.o xprt.o sched.o \ | 9 | sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \ |
10 | auth.o auth_null.o auth_unix.o \ | 10 | auth.o auth_null.o auth_unix.o \ |
11 | svc.o svcsock.o svcauth.o svcauth_unix.o \ | 11 | svc.o svcsock.o svcauth.o svcauth_unix.o \ |
12 | pmap_clnt.o timer.o xdr.o \ | 12 | pmap_clnt.o timer.o xdr.o \ |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 505e2d4b3d62..a415d99c394d 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/socket.h> | ||
15 | #include <linux/sunrpc/clnt.h> | 14 | #include <linux/sunrpc/clnt.h> |
16 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
17 | 16 | ||
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile index fe1b874084bc..f3431a7e33da 100644 --- a/net/sunrpc/auth_gss/Makefile +++ b/net/sunrpc/auth_gss/Makefile | |||
@@ -10,7 +10,7 @@ auth_rpcgss-objs := auth_gss.o gss_generic_token.o \ | |||
10 | obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o | 10 | obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o |
11 | 11 | ||
12 | rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ | 12 | rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ |
13 | gss_krb5_seqnum.o | 13 | gss_krb5_seqnum.o gss_krb5_wrap.o |
14 | 14 | ||
15 | obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o | 15 | obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o |
16 | 16 | ||
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 2f7b867161d2..f44f46f1d8e0 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -42,9 +42,8 @@ | |||
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/types.h> | 43 | #include <linux/types.h> |
44 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
45 | #include <linux/socket.h> | ||
46 | #include <linux/in.h> | ||
47 | #include <linux/sched.h> | 45 | #include <linux/sched.h> |
46 | #include <linux/pagemap.h> | ||
48 | #include <linux/sunrpc/clnt.h> | 47 | #include <linux/sunrpc/clnt.h> |
49 | #include <linux/sunrpc/auth.h> | 48 | #include <linux/sunrpc/auth.h> |
50 | #include <linux/sunrpc/auth_gss.h> | 49 | #include <linux/sunrpc/auth_gss.h> |
@@ -846,10 +845,8 @@ gss_marshal(struct rpc_task *task, u32 *p) | |||
846 | 845 | ||
847 | /* We compute the checksum for the verifier over the xdr-encoded bytes | 846 | /* We compute the checksum for the verifier over the xdr-encoded bytes |
848 | * starting with the xid and ending at the end of the credential: */ | 847 | * starting with the xid and ending at the end of the credential: */ |
849 | iov.iov_base = req->rq_snd_buf.head[0].iov_base; | 848 | iov.iov_base = xprt_skip_transport_header(task->tk_xprt, |
850 | if (task->tk_client->cl_xprt->stream) | 849 | req->rq_snd_buf.head[0].iov_base); |
851 | /* See clnt.c:call_header() */ | ||
852 | iov.iov_base += 4; | ||
853 | iov.iov_len = (u8 *)p - (u8 *)iov.iov_base; | 850 | iov.iov_len = (u8 *)p - (u8 *)iov.iov_base; |
854 | xdr_buf_from_iov(&iov, &verf_buf); | 851 | xdr_buf_from_iov(&iov, &verf_buf); |
855 | 852 | ||
@@ -857,9 +854,7 @@ gss_marshal(struct rpc_task *task, u32 *p) | |||
857 | *p++ = htonl(RPC_AUTH_GSS); | 854 | *p++ = htonl(RPC_AUTH_GSS); |
858 | 855 | ||
859 | mic.data = (u8 *)(p + 1); | 856 | mic.data = (u8 *)(p + 1); |
860 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, | 857 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); |
861 | GSS_C_QOP_DEFAULT, | ||
862 | &verf_buf, &mic); | ||
863 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) { | 858 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) { |
864 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 859 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; |
865 | } else if (maj_stat != 0) { | 860 | } else if (maj_stat != 0) { |
@@ -890,10 +885,8 @@ static u32 * | |||
890 | gss_validate(struct rpc_task *task, u32 *p) | 885 | gss_validate(struct rpc_task *task, u32 *p) |
891 | { | 886 | { |
892 | struct rpc_cred *cred = task->tk_msg.rpc_cred; | 887 | struct rpc_cred *cred = task->tk_msg.rpc_cred; |
893 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, | ||
894 | gc_base); | ||
895 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); | 888 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); |
896 | u32 seq, qop_state; | 889 | u32 seq; |
897 | struct kvec iov; | 890 | struct kvec iov; |
898 | struct xdr_buf verf_buf; | 891 | struct xdr_buf verf_buf; |
899 | struct xdr_netobj mic; | 892 | struct xdr_netobj mic; |
@@ -914,23 +907,14 @@ gss_validate(struct rpc_task *task, u32 *p) | |||
914 | mic.data = (u8 *)p; | 907 | mic.data = (u8 *)p; |
915 | mic.len = len; | 908 | mic.len = len; |
916 | 909 | ||
917 | maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic, &qop_state); | 910 | maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); |
918 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | 911 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) |
919 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 912 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; |
920 | if (maj_stat) | 913 | if (maj_stat) |
921 | goto out_bad; | 914 | goto out_bad; |
922 | switch (gss_cred->gc_service) { | 915 | /* We leave it to unwrap to calculate au_rslack. For now we just |
923 | case RPC_GSS_SVC_NONE: | 916 | * calculate the length of the verifier: */ |
924 | /* verifier data, flavor, length: */ | 917 | task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2; |
925 | task->tk_auth->au_rslack = XDR_QUADLEN(len) + 2; | ||
926 | break; | ||
927 | case RPC_GSS_SVC_INTEGRITY: | ||
928 | /* verifier data, flavor, length, length, sequence number: */ | ||
929 | task->tk_auth->au_rslack = XDR_QUADLEN(len) + 4; | ||
930 | break; | ||
931 | case RPC_GSS_SVC_PRIVACY: | ||
932 | goto out_bad; | ||
933 | } | ||
934 | gss_put_ctx(ctx); | 918 | gss_put_ctx(ctx); |
935 | dprintk("RPC: %4u GSS gss_validate: gss_verify_mic succeeded.\n", | 919 | dprintk("RPC: %4u GSS gss_validate: gss_verify_mic succeeded.\n", |
936 | task->tk_pid); | 920 | task->tk_pid); |
@@ -975,8 +959,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
975 | p = iov->iov_base + iov->iov_len; | 959 | p = iov->iov_base + iov->iov_len; |
976 | mic.data = (u8 *)(p + 1); | 960 | mic.data = (u8 *)(p + 1); |
977 | 961 | ||
978 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, | 962 | maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); |
979 | GSS_C_QOP_DEFAULT, &integ_buf, &mic); | ||
980 | status = -EIO; /* XXX? */ | 963 | status = -EIO; /* XXX? */ |
981 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | 964 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) |
982 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 965 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; |
@@ -990,6 +973,113 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
990 | return 0; | 973 | return 0; |
991 | } | 974 | } |
992 | 975 | ||
976 | static void | ||
977 | priv_release_snd_buf(struct rpc_rqst *rqstp) | ||
978 | { | ||
979 | int i; | ||
980 | |||
981 | for (i=0; i < rqstp->rq_enc_pages_num; i++) | ||
982 | __free_page(rqstp->rq_enc_pages[i]); | ||
983 | kfree(rqstp->rq_enc_pages); | ||
984 | } | ||
985 | |||
986 | static int | ||
987 | alloc_enc_pages(struct rpc_rqst *rqstp) | ||
988 | { | ||
989 | struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; | ||
990 | int first, last, i; | ||
991 | |||
992 | if (snd_buf->page_len == 0) { | ||
993 | rqstp->rq_enc_pages_num = 0; | ||
994 | return 0; | ||
995 | } | ||
996 | |||
997 | first = snd_buf->page_base >> PAGE_CACHE_SHIFT; | ||
998 | last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT; | ||
999 | rqstp->rq_enc_pages_num = last - first + 1 + 1; | ||
1000 | rqstp->rq_enc_pages | ||
1001 | = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *), | ||
1002 | GFP_NOFS); | ||
1003 | if (!rqstp->rq_enc_pages) | ||
1004 | goto out; | ||
1005 | for (i=0; i < rqstp->rq_enc_pages_num; i++) { | ||
1006 | rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS); | ||
1007 | if (rqstp->rq_enc_pages[i] == NULL) | ||
1008 | goto out_free; | ||
1009 | } | ||
1010 | rqstp->rq_release_snd_buf = priv_release_snd_buf; | ||
1011 | return 0; | ||
1012 | out_free: | ||
1013 | for (i--; i >= 0; i--) { | ||
1014 | __free_page(rqstp->rq_enc_pages[i]); | ||
1015 | } | ||
1016 | out: | ||
1017 | return -EAGAIN; | ||
1018 | } | ||
1019 | |||
1020 | static inline int | ||
1021 | gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | ||
1022 | kxdrproc_t encode, struct rpc_rqst *rqstp, u32 *p, void *obj) | ||
1023 | { | ||
1024 | struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; | ||
1025 | u32 offset; | ||
1026 | u32 maj_stat; | ||
1027 | int status; | ||
1028 | u32 *opaque_len; | ||
1029 | struct page **inpages; | ||
1030 | int first; | ||
1031 | int pad; | ||
1032 | struct kvec *iov; | ||
1033 | char *tmp; | ||
1034 | |||
1035 | opaque_len = p++; | ||
1036 | offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; | ||
1037 | *p++ = htonl(rqstp->rq_seqno); | ||
1038 | |||
1039 | status = encode(rqstp, p, obj); | ||
1040 | if (status) | ||
1041 | return status; | ||
1042 | |||
1043 | status = alloc_enc_pages(rqstp); | ||
1044 | if (status) | ||
1045 | return status; | ||
1046 | first = snd_buf->page_base >> PAGE_CACHE_SHIFT; | ||
1047 | inpages = snd_buf->pages + first; | ||
1048 | snd_buf->pages = rqstp->rq_enc_pages; | ||
1049 | snd_buf->page_base -= first << PAGE_CACHE_SHIFT; | ||
1050 | /* Give the tail its own page, in case we need extra space in the | ||
1051 | * head when wrapping: */ | ||
1052 | if (snd_buf->page_len || snd_buf->tail[0].iov_len) { | ||
1053 | tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); | ||
1054 | memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); | ||
1055 | snd_buf->tail[0].iov_base = tmp; | ||
1056 | } | ||
1057 | maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); | ||
1058 | /* RPC_SLACK_SPACE should prevent this ever happening: */ | ||
1059 | BUG_ON(snd_buf->len > snd_buf->buflen); | ||
1060 | status = -EIO; | ||
1061 | /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was | ||
1062 | * done anyway, so it's safe to put the request on the wire: */ | ||
1063 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | ||
1064 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | ||
1065 | else if (maj_stat) | ||
1066 | return status; | ||
1067 | |||
1068 | *opaque_len = htonl(snd_buf->len - offset); | ||
1069 | /* guess whether we're in the head or the tail: */ | ||
1070 | if (snd_buf->page_len || snd_buf->tail[0].iov_len) | ||
1071 | iov = snd_buf->tail; | ||
1072 | else | ||
1073 | iov = snd_buf->head; | ||
1074 | p = iov->iov_base + iov->iov_len; | ||
1075 | pad = 3 - ((snd_buf->len - offset - 1) & 3); | ||
1076 | memset(p, 0, pad); | ||
1077 | iov->iov_len += pad; | ||
1078 | snd_buf->len += pad; | ||
1079 | |||
1080 | return 0; | ||
1081 | } | ||
1082 | |||
993 | static int | 1083 | static int |
994 | gss_wrap_req(struct rpc_task *task, | 1084 | gss_wrap_req(struct rpc_task *task, |
995 | kxdrproc_t encode, void *rqstp, u32 *p, void *obj) | 1085 | kxdrproc_t encode, void *rqstp, u32 *p, void *obj) |
@@ -1017,6 +1107,8 @@ gss_wrap_req(struct rpc_task *task, | |||
1017 | rqstp, p, obj); | 1107 | rqstp, p, obj); |
1018 | break; | 1108 | break; |
1019 | case RPC_GSS_SVC_PRIVACY: | 1109 | case RPC_GSS_SVC_PRIVACY: |
1110 | status = gss_wrap_req_priv(cred, ctx, encode, | ||
1111 | rqstp, p, obj); | ||
1020 | break; | 1112 | break; |
1021 | } | 1113 | } |
1022 | out: | 1114 | out: |
@@ -1054,8 +1146,7 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
1054 | if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset)) | 1146 | if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset)) |
1055 | return status; | 1147 | return status; |
1056 | 1148 | ||
1057 | maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, | 1149 | maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); |
1058 | &mic, NULL); | ||
1059 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | 1150 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) |
1060 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | 1151 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; |
1061 | if (maj_stat != GSS_S_COMPLETE) | 1152 | if (maj_stat != GSS_S_COMPLETE) |
@@ -1063,6 +1154,35 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | |||
1063 | return 0; | 1154 | return 0; |
1064 | } | 1155 | } |
1065 | 1156 | ||
1157 | static inline int | ||
1158 | gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, | ||
1159 | struct rpc_rqst *rqstp, u32 **p) | ||
1160 | { | ||
1161 | struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; | ||
1162 | u32 offset; | ||
1163 | u32 opaque_len; | ||
1164 | u32 maj_stat; | ||
1165 | int status = -EIO; | ||
1166 | |||
1167 | opaque_len = ntohl(*(*p)++); | ||
1168 | offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; | ||
1169 | if (offset + opaque_len > rcv_buf->len) | ||
1170 | return status; | ||
1171 | /* remove padding: */ | ||
1172 | rcv_buf->len = offset + opaque_len; | ||
1173 | |||
1174 | maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); | ||
1175 | if (maj_stat == GSS_S_CONTEXT_EXPIRED) | ||
1176 | cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; | ||
1177 | if (maj_stat != GSS_S_COMPLETE) | ||
1178 | return status; | ||
1179 | if (ntohl(*(*p)++) != rqstp->rq_seqno) | ||
1180 | return status; | ||
1181 | |||
1182 | return 0; | ||
1183 | } | ||
1184 | |||
1185 | |||
1066 | static int | 1186 | static int |
1067 | gss_unwrap_resp(struct rpc_task *task, | 1187 | gss_unwrap_resp(struct rpc_task *task, |
1068 | kxdrproc_t decode, void *rqstp, u32 *p, void *obj) | 1188 | kxdrproc_t decode, void *rqstp, u32 *p, void *obj) |
@@ -1071,6 +1191,9 @@ gss_unwrap_resp(struct rpc_task *task, | |||
1071 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, | 1191 | struct gss_cred *gss_cred = container_of(cred, struct gss_cred, |
1072 | gc_base); | 1192 | gc_base); |
1073 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); | 1193 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); |
1194 | u32 *savedp = p; | ||
1195 | struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head; | ||
1196 | int savedlen = head->iov_len; | ||
1074 | int status = -EIO; | 1197 | int status = -EIO; |
1075 | 1198 | ||
1076 | if (ctx->gc_proc != RPC_GSS_PROC_DATA) | 1199 | if (ctx->gc_proc != RPC_GSS_PROC_DATA) |
@@ -1084,8 +1207,14 @@ gss_unwrap_resp(struct rpc_task *task, | |||
1084 | goto out; | 1207 | goto out; |
1085 | break; | 1208 | break; |
1086 | case RPC_GSS_SVC_PRIVACY: | 1209 | case RPC_GSS_SVC_PRIVACY: |
1210 | status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); | ||
1211 | if (status) | ||
1212 | goto out; | ||
1087 | break; | 1213 | break; |
1088 | } | 1214 | } |
1215 | /* take into account extra slack for integrity and privacy cases: */ | ||
1216 | task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp) | ||
1217 | + (savedlen - head->iov_len); | ||
1089 | out_decode: | 1218 | out_decode: |
1090 | status = decode(rqstp, p, obj); | 1219 | status = decode(rqstp, p, obj); |
1091 | out: | 1220 | out: |
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index ee6ae74cd1b2..3f3d5437f02d 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c | |||
@@ -139,17 +139,91 @@ buf_to_sg(struct scatterlist *sg, char *ptr, int len) { | |||
139 | sg->length = len; | 139 | sg->length = len; |
140 | } | 140 | } |
141 | 141 | ||
142 | static int | ||
143 | process_xdr_buf(struct xdr_buf *buf, int offset, int len, | ||
144 | int (*actor)(struct scatterlist *, void *), void *data) | ||
145 | { | ||
146 | int i, page_len, thislen, page_offset, ret = 0; | ||
147 | struct scatterlist sg[1]; | ||
148 | |||
149 | if (offset >= buf->head[0].iov_len) { | ||
150 | offset -= buf->head[0].iov_len; | ||
151 | } else { | ||
152 | thislen = buf->head[0].iov_len - offset; | ||
153 | if (thislen > len) | ||
154 | thislen = len; | ||
155 | buf_to_sg(sg, buf->head[0].iov_base + offset, thislen); | ||
156 | ret = actor(sg, data); | ||
157 | if (ret) | ||
158 | goto out; | ||
159 | offset = 0; | ||
160 | len -= thislen; | ||
161 | } | ||
162 | if (len == 0) | ||
163 | goto out; | ||
164 | |||
165 | if (offset >= buf->page_len) { | ||
166 | offset -= buf->page_len; | ||
167 | } else { | ||
168 | page_len = buf->page_len - offset; | ||
169 | if (page_len > len) | ||
170 | page_len = len; | ||
171 | len -= page_len; | ||
172 | page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); | ||
173 | i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT; | ||
174 | thislen = PAGE_CACHE_SIZE - page_offset; | ||
175 | do { | ||
176 | if (thislen > page_len) | ||
177 | thislen = page_len; | ||
178 | sg->page = buf->pages[i]; | ||
179 | sg->offset = page_offset; | ||
180 | sg->length = thislen; | ||
181 | ret = actor(sg, data); | ||
182 | if (ret) | ||
183 | goto out; | ||
184 | page_len -= thislen; | ||
185 | i++; | ||
186 | page_offset = 0; | ||
187 | thislen = PAGE_CACHE_SIZE; | ||
188 | } while (page_len != 0); | ||
189 | offset = 0; | ||
190 | } | ||
191 | if (len == 0) | ||
192 | goto out; | ||
193 | |||
194 | if (offset < buf->tail[0].iov_len) { | ||
195 | thislen = buf->tail[0].iov_len - offset; | ||
196 | if (thislen > len) | ||
197 | thislen = len; | ||
198 | buf_to_sg(sg, buf->tail[0].iov_base + offset, thislen); | ||
199 | ret = actor(sg, data); | ||
200 | len -= thislen; | ||
201 | } | ||
202 | if (len != 0) | ||
203 | ret = -EINVAL; | ||
204 | out: | ||
205 | return ret; | ||
206 | } | ||
207 | |||
208 | static int | ||
209 | checksummer(struct scatterlist *sg, void *data) | ||
210 | { | ||
211 | struct crypto_tfm *tfm = (struct crypto_tfm *)data; | ||
212 | |||
213 | crypto_digest_update(tfm, sg, 1); | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | |||
142 | /* checksum the plaintext data and hdrlen bytes of the token header */ | 218 | /* checksum the plaintext data and hdrlen bytes of the token header */ |
143 | s32 | 219 | s32 |
144 | make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, | 220 | make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, |
145 | struct xdr_netobj *cksum) | 221 | int body_offset, struct xdr_netobj *cksum) |
146 | { | 222 | { |
147 | char *cksumname; | 223 | char *cksumname; |
148 | struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */ | 224 | struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */ |
149 | struct scatterlist sg[1]; | 225 | struct scatterlist sg[1]; |
150 | u32 code = GSS_S_FAILURE; | 226 | u32 code = GSS_S_FAILURE; |
151 | int len, thislen, offset; | ||
152 | int i; | ||
153 | 227 | ||
154 | switch (cksumtype) { | 228 | switch (cksumtype) { |
155 | case CKSUMTYPE_RSA_MD5: | 229 | case CKSUMTYPE_RSA_MD5: |
@@ -169,33 +243,8 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, | |||
169 | crypto_digest_init(tfm); | 243 | crypto_digest_init(tfm); |
170 | buf_to_sg(sg, header, hdrlen); | 244 | buf_to_sg(sg, header, hdrlen); |
171 | crypto_digest_update(tfm, sg, 1); | 245 | crypto_digest_update(tfm, sg, 1); |
172 | if (body->head[0].iov_len) { | 246 | process_xdr_buf(body, body_offset, body->len - body_offset, |
173 | buf_to_sg(sg, body->head[0].iov_base, body->head[0].iov_len); | 247 | checksummer, tfm); |
174 | crypto_digest_update(tfm, sg, 1); | ||
175 | } | ||
176 | |||
177 | len = body->page_len; | ||
178 | if (len != 0) { | ||
179 | offset = body->page_base & (PAGE_CACHE_SIZE - 1); | ||
180 | i = body->page_base >> PAGE_CACHE_SHIFT; | ||
181 | thislen = PAGE_CACHE_SIZE - offset; | ||
182 | do { | ||
183 | if (thislen > len) | ||
184 | thislen = len; | ||
185 | sg->page = body->pages[i]; | ||
186 | sg->offset = offset; | ||
187 | sg->length = thislen; | ||
188 | crypto_digest_update(tfm, sg, 1); | ||
189 | len -= thislen; | ||
190 | i++; | ||
191 | offset = 0; | ||
192 | thislen = PAGE_CACHE_SIZE; | ||
193 | } while(len != 0); | ||
194 | } | ||
195 | if (body->tail[0].iov_len) { | ||
196 | buf_to_sg(sg, body->tail[0].iov_base, body->tail[0].iov_len); | ||
197 | crypto_digest_update(tfm, sg, 1); | ||
198 | } | ||
199 | crypto_digest_final(tfm, cksum->data); | 248 | crypto_digest_final(tfm, cksum->data); |
200 | code = 0; | 249 | code = 0; |
201 | out: | 250 | out: |
@@ -204,3 +253,154 @@ out: | |||
204 | } | 253 | } |
205 | 254 | ||
206 | EXPORT_SYMBOL(make_checksum); | 255 | EXPORT_SYMBOL(make_checksum); |
256 | |||
257 | struct encryptor_desc { | ||
258 | u8 iv[8]; /* XXX hard-coded blocksize */ | ||
259 | struct crypto_tfm *tfm; | ||
260 | int pos; | ||
261 | struct xdr_buf *outbuf; | ||
262 | struct page **pages; | ||
263 | struct scatterlist infrags[4]; | ||
264 | struct scatterlist outfrags[4]; | ||
265 | int fragno; | ||
266 | int fraglen; | ||
267 | }; | ||
268 | |||
269 | static int | ||
270 | encryptor(struct scatterlist *sg, void *data) | ||
271 | { | ||
272 | struct encryptor_desc *desc = data; | ||
273 | struct xdr_buf *outbuf = desc->outbuf; | ||
274 | struct page *in_page; | ||
275 | int thislen = desc->fraglen + sg->length; | ||
276 | int fraglen, ret; | ||
277 | int page_pos; | ||
278 | |||
279 | /* Worst case is 4 fragments: head, end of page 1, start | ||
280 | * of page 2, tail. Anything more is a bug. */ | ||
281 | BUG_ON(desc->fragno > 3); | ||
282 | desc->infrags[desc->fragno] = *sg; | ||
283 | desc->outfrags[desc->fragno] = *sg; | ||
284 | |||
285 | page_pos = desc->pos - outbuf->head[0].iov_len; | ||
286 | if (page_pos >= 0 && page_pos < outbuf->page_len) { | ||
287 | /* pages are not in place: */ | ||
288 | int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT; | ||
289 | in_page = desc->pages[i]; | ||
290 | } else { | ||
291 | in_page = sg->page; | ||
292 | } | ||
293 | desc->infrags[desc->fragno].page = in_page; | ||
294 | desc->fragno++; | ||
295 | desc->fraglen += sg->length; | ||
296 | desc->pos += sg->length; | ||
297 | |||
298 | fraglen = thislen & 7; /* XXX hardcoded blocksize */ | ||
299 | thislen -= fraglen; | ||
300 | |||
301 | if (thislen == 0) | ||
302 | return 0; | ||
303 | |||
304 | ret = crypto_cipher_encrypt_iv(desc->tfm, desc->outfrags, desc->infrags, | ||
305 | thislen, desc->iv); | ||
306 | if (ret) | ||
307 | return ret; | ||
308 | if (fraglen) { | ||
309 | desc->outfrags[0].page = sg->page; | ||
310 | desc->outfrags[0].offset = sg->offset + sg->length - fraglen; | ||
311 | desc->outfrags[0].length = fraglen; | ||
312 | desc->infrags[0] = desc->outfrags[0]; | ||
313 | desc->infrags[0].page = in_page; | ||
314 | desc->fragno = 1; | ||
315 | desc->fraglen = fraglen; | ||
316 | } else { | ||
317 | desc->fragno = 0; | ||
318 | desc->fraglen = 0; | ||
319 | } | ||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | int | ||
324 | gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset, | ||
325 | struct page **pages) | ||
326 | { | ||
327 | int ret; | ||
328 | struct encryptor_desc desc; | ||
329 | |||
330 | BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); | ||
331 | |||
332 | memset(desc.iv, 0, sizeof(desc.iv)); | ||
333 | desc.tfm = tfm; | ||
334 | desc.pos = offset; | ||
335 | desc.outbuf = buf; | ||
336 | desc.pages = pages; | ||
337 | desc.fragno = 0; | ||
338 | desc.fraglen = 0; | ||
339 | |||
340 | ret = process_xdr_buf(buf, offset, buf->len - offset, encryptor, &desc); | ||
341 | return ret; | ||
342 | } | ||
343 | |||
344 | EXPORT_SYMBOL(gss_encrypt_xdr_buf); | ||
345 | |||
346 | struct decryptor_desc { | ||
347 | u8 iv[8]; /* XXX hard-coded blocksize */ | ||
348 | struct crypto_tfm *tfm; | ||
349 | struct scatterlist frags[4]; | ||
350 | int fragno; | ||
351 | int fraglen; | ||
352 | }; | ||
353 | |||
354 | static int | ||
355 | decryptor(struct scatterlist *sg, void *data) | ||
356 | { | ||
357 | struct decryptor_desc *desc = data; | ||
358 | int thislen = desc->fraglen + sg->length; | ||
359 | int fraglen, ret; | ||
360 | |||
361 | /* Worst case is 4 fragments: head, end of page 1, start | ||
362 | * of page 2, tail. Anything more is a bug. */ | ||
363 | BUG_ON(desc->fragno > 3); | ||
364 | desc->frags[desc->fragno] = *sg; | ||
365 | desc->fragno++; | ||
366 | desc->fraglen += sg->length; | ||
367 | |||
368 | fraglen = thislen & 7; /* XXX hardcoded blocksize */ | ||
369 | thislen -= fraglen; | ||
370 | |||
371 | if (thislen == 0) | ||
372 | return 0; | ||
373 | |||
374 | ret = crypto_cipher_decrypt_iv(desc->tfm, desc->frags, desc->frags, | ||
375 | thislen, desc->iv); | ||
376 | if (ret) | ||
377 | return ret; | ||
378 | if (fraglen) { | ||
379 | desc->frags[0].page = sg->page; | ||
380 | desc->frags[0].offset = sg->offset + sg->length - fraglen; | ||
381 | desc->frags[0].length = fraglen; | ||
382 | desc->fragno = 1; | ||
383 | desc->fraglen = fraglen; | ||
384 | } else { | ||
385 | desc->fragno = 0; | ||
386 | desc->fraglen = 0; | ||
387 | } | ||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | int | ||
392 | gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset) | ||
393 | { | ||
394 | struct decryptor_desc desc; | ||
395 | |||
396 | /* XXXJBF: */ | ||
397 | BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); | ||
398 | |||
399 | memset(desc.iv, 0, sizeof(desc.iv)); | ||
400 | desc.tfm = tfm; | ||
401 | desc.fragno = 0; | ||
402 | desc.fraglen = 0; | ||
403 | return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc); | ||
404 | } | ||
405 | |||
406 | EXPORT_SYMBOL(gss_decrypt_xdr_buf); | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 606a8a82cafb..5f1f806a0b11 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
@@ -39,7 +39,6 @@ | |||
39 | #include <linux/types.h> | 39 | #include <linux/types.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/sunrpc/auth.h> | 41 | #include <linux/sunrpc/auth.h> |
42 | #include <linux/in.h> | ||
43 | #include <linux/sunrpc/gss_krb5.h> | 42 | #include <linux/sunrpc/gss_krb5.h> |
44 | #include <linux/sunrpc/xdr.h> | 43 | #include <linux/sunrpc/xdr.h> |
45 | #include <linux/crypto.h> | 44 | #include <linux/crypto.h> |
@@ -191,43 +190,12 @@ gss_delete_sec_context_kerberos(void *internal_ctx) { | |||
191 | kfree(kctx); | 190 | kfree(kctx); |
192 | } | 191 | } |
193 | 192 | ||
194 | static u32 | ||
195 | gss_verify_mic_kerberos(struct gss_ctx *ctx, | ||
196 | struct xdr_buf *message, | ||
197 | struct xdr_netobj *mic_token, | ||
198 | u32 *qstate) { | ||
199 | u32 maj_stat = 0; | ||
200 | int qop_state; | ||
201 | struct krb5_ctx *kctx = ctx->internal_ctx_id; | ||
202 | |||
203 | maj_stat = krb5_read_token(kctx, mic_token, message, &qop_state, | ||
204 | KG_TOK_MIC_MSG); | ||
205 | if (!maj_stat && qop_state) | ||
206 | *qstate = qop_state; | ||
207 | |||
208 | dprintk("RPC: gss_verify_mic_kerberos returning %d\n", maj_stat); | ||
209 | return maj_stat; | ||
210 | } | ||
211 | |||
212 | static u32 | ||
213 | gss_get_mic_kerberos(struct gss_ctx *ctx, | ||
214 | u32 qop, | ||
215 | struct xdr_buf *message, | ||
216 | struct xdr_netobj *mic_token) { | ||
217 | u32 err = 0; | ||
218 | struct krb5_ctx *kctx = ctx->internal_ctx_id; | ||
219 | |||
220 | err = krb5_make_token(kctx, qop, message, mic_token, KG_TOK_MIC_MSG); | ||
221 | |||
222 | dprintk("RPC: gss_get_mic_kerberos returning %d\n",err); | ||
223 | |||
224 | return err; | ||
225 | } | ||
226 | |||
227 | static struct gss_api_ops gss_kerberos_ops = { | 193 | static struct gss_api_ops gss_kerberos_ops = { |
228 | .gss_import_sec_context = gss_import_sec_context_kerberos, | 194 | .gss_import_sec_context = gss_import_sec_context_kerberos, |
229 | .gss_get_mic = gss_get_mic_kerberos, | 195 | .gss_get_mic = gss_get_mic_kerberos, |
230 | .gss_verify_mic = gss_verify_mic_kerberos, | 196 | .gss_verify_mic = gss_verify_mic_kerberos, |
197 | .gss_wrap = gss_wrap_kerberos, | ||
198 | .gss_unwrap = gss_unwrap_kerberos, | ||
231 | .gss_delete_sec_context = gss_delete_sec_context_kerberos, | 199 | .gss_delete_sec_context = gss_delete_sec_context_kerberos, |
232 | }; | 200 | }; |
233 | 201 | ||
@@ -242,6 +210,11 @@ static struct pf_desc gss_kerberos_pfs[] = { | |||
242 | .service = RPC_GSS_SVC_INTEGRITY, | 210 | .service = RPC_GSS_SVC_INTEGRITY, |
243 | .name = "krb5i", | 211 | .name = "krb5i", |
244 | }, | 212 | }, |
213 | [2] = { | ||
214 | .pseudoflavor = RPC_AUTH_GSS_KRB5P, | ||
215 | .service = RPC_GSS_SVC_PRIVACY, | ||
216 | .name = "krb5p", | ||
217 | }, | ||
245 | }; | 218 | }; |
246 | 219 | ||
247 | static struct gss_api_mech gss_kerberos_mech = { | 220 | static struct gss_api_mech gss_kerberos_mech = { |
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index afeeb8715a77..13f8ae979454 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c | |||
@@ -70,22 +70,13 @@ | |||
70 | # define RPCDBG_FACILITY RPCDBG_AUTH | 70 | # define RPCDBG_FACILITY RPCDBG_AUTH |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | static inline int | ||
74 | gss_krb5_padding(int blocksize, int length) { | ||
75 | /* Most of the code is block-size independent but in practice we | ||
76 | * use only 8: */ | ||
77 | BUG_ON(blocksize != 8); | ||
78 | return 8 - (length & 7); | ||
79 | } | ||
80 | |||
81 | u32 | 73 | u32 |
82 | krb5_make_token(struct krb5_ctx *ctx, int qop_req, | 74 | gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, |
83 | struct xdr_buf *text, struct xdr_netobj *token, | 75 | struct xdr_netobj *token) |
84 | int toktype) | ||
85 | { | 76 | { |
77 | struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; | ||
86 | s32 checksum_type; | 78 | s32 checksum_type; |
87 | struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; | 79 | struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; |
88 | int blocksize = 0, tmsglen; | ||
89 | unsigned char *ptr, *krb5_hdr, *msg_start; | 80 | unsigned char *ptr, *krb5_hdr, *msg_start; |
90 | s32 now; | 81 | s32 now; |
91 | 82 | ||
@@ -93,9 +84,6 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req, | |||
93 | 84 | ||
94 | now = get_seconds(); | 85 | now = get_seconds(); |
95 | 86 | ||
96 | if (qop_req != 0) | ||
97 | goto out_err; | ||
98 | |||
99 | switch (ctx->signalg) { | 87 | switch (ctx->signalg) { |
100 | case SGN_ALG_DES_MAC_MD5: | 88 | case SGN_ALG_DES_MAC_MD5: |
101 | checksum_type = CKSUMTYPE_RSA_MD5; | 89 | checksum_type = CKSUMTYPE_RSA_MD5; |
@@ -111,21 +99,13 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req, | |||
111 | goto out_err; | 99 | goto out_err; |
112 | } | 100 | } |
113 | 101 | ||
114 | if (toktype == KG_TOK_WRAP_MSG) { | 102 | token->len = g_token_size(&ctx->mech_used, 22); |
115 | blocksize = crypto_tfm_alg_blocksize(ctx->enc); | ||
116 | tmsglen = blocksize + text->len | ||
117 | + gss_krb5_padding(blocksize, blocksize + text->len); | ||
118 | } else { | ||
119 | tmsglen = 0; | ||
120 | } | ||
121 | |||
122 | token->len = g_token_size(&ctx->mech_used, 22 + tmsglen); | ||
123 | 103 | ||
124 | ptr = token->data; | 104 | ptr = token->data; |
125 | g_make_token_header(&ctx->mech_used, 22 + tmsglen, &ptr); | 105 | g_make_token_header(&ctx->mech_used, 22, &ptr); |
126 | 106 | ||
127 | *ptr++ = (unsigned char) ((toktype>>8)&0xff); | 107 | *ptr++ = (unsigned char) ((KG_TOK_MIC_MSG>>8)&0xff); |
128 | *ptr++ = (unsigned char) (toktype&0xff); | 108 | *ptr++ = (unsigned char) (KG_TOK_MIC_MSG&0xff); |
129 | 109 | ||
130 | /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */ | 110 | /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */ |
131 | krb5_hdr = ptr - 2; | 111 | krb5_hdr = ptr - 2; |
@@ -133,17 +113,9 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req, | |||
133 | 113 | ||
134 | *(u16 *)(krb5_hdr + 2) = htons(ctx->signalg); | 114 | *(u16 *)(krb5_hdr + 2) = htons(ctx->signalg); |
135 | memset(krb5_hdr + 4, 0xff, 4); | 115 | memset(krb5_hdr + 4, 0xff, 4); |
136 | if (toktype == KG_TOK_WRAP_MSG) | ||
137 | *(u16 *)(krb5_hdr + 4) = htons(ctx->sealalg); | ||
138 | 116 | ||
139 | if (toktype == KG_TOK_WRAP_MSG) { | 117 | if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum)) |
140 | /* XXX removing support for now */ | ||
141 | goto out_err; | ||
142 | } else { /* Sign only. */ | ||
143 | if (make_checksum(checksum_type, krb5_hdr, 8, text, | ||
144 | &md5cksum)) | ||
145 | goto out_err; | 118 | goto out_err; |
146 | } | ||
147 | 119 | ||
148 | switch (ctx->signalg) { | 120 | switch (ctx->signalg) { |
149 | case SGN_ALG_DES_MAC_MD5: | 121 | case SGN_ALG_DES_MAC_MD5: |
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index 8767fc53183d..2030475d98ed 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c | |||
@@ -68,21 +68,14 @@ | |||
68 | #endif | 68 | #endif |
69 | 69 | ||
70 | 70 | ||
71 | /* message_buffer is an input if toktype is MIC and an output if it is WRAP: | 71 | /* read_token is a mic token, and message_buffer is the data that the mic was |
72 | * If toktype is MIC: read_token is a mic token, and message_buffer is the | 72 | * supposedly taken over. */ |
73 | * data that the mic was supposedly taken over. | ||
74 | * If toktype is WRAP: read_token is a wrap token, and message_buffer is used | ||
75 | * to return the decrypted data. | ||
76 | */ | ||
77 | 73 | ||
78 | /* XXX will need to change prototype and/or just split into a separate function | ||
79 | * when we add privacy (because read_token will be in pages too). */ | ||
80 | u32 | 74 | u32 |
81 | krb5_read_token(struct krb5_ctx *ctx, | 75 | gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, |
82 | struct xdr_netobj *read_token, | 76 | struct xdr_buf *message_buffer, struct xdr_netobj *read_token) |
83 | struct xdr_buf *message_buffer, | ||
84 | int *qop_state, int toktype) | ||
85 | { | 77 | { |
78 | struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; | ||
86 | int signalg; | 79 | int signalg; |
87 | int sealalg; | 80 | int sealalg; |
88 | s32 checksum_type; | 81 | s32 checksum_type; |
@@ -100,16 +93,12 @@ krb5_read_token(struct krb5_ctx *ctx, | |||
100 | read_token->len)) | 93 | read_token->len)) |
101 | goto out; | 94 | goto out; |
102 | 95 | ||
103 | if ((*ptr++ != ((toktype>>8)&0xff)) || (*ptr++ != (toktype&0xff))) | 96 | if ((*ptr++ != ((KG_TOK_MIC_MSG>>8)&0xff)) || |
97 | (*ptr++ != ( KG_TOK_MIC_MSG &0xff)) ) | ||
104 | goto out; | 98 | goto out; |
105 | 99 | ||
106 | /* XXX sanity-check bodysize?? */ | 100 | /* XXX sanity-check bodysize?? */ |
107 | 101 | ||
108 | if (toktype == KG_TOK_WRAP_MSG) { | ||
109 | /* XXX gone */ | ||
110 | goto out; | ||
111 | } | ||
112 | |||
113 | /* get the sign and seal algorithms */ | 102 | /* get the sign and seal algorithms */ |
114 | 103 | ||
115 | signalg = ptr[0] + (ptr[1] << 8); | 104 | signalg = ptr[0] + (ptr[1] << 8); |
@@ -120,14 +109,7 @@ krb5_read_token(struct krb5_ctx *ctx, | |||
120 | if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) | 109 | if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) |
121 | goto out; | 110 | goto out; |
122 | 111 | ||
123 | if (((toktype != KG_TOK_WRAP_MSG) && (sealalg != 0xffff)) || | 112 | if (sealalg != 0xffff) |
124 | ((toktype == KG_TOK_WRAP_MSG) && (sealalg == 0xffff))) | ||
125 | goto out; | ||
126 | |||
127 | /* in the current spec, there is only one valid seal algorithm per | ||
128 | key type, so a simple comparison is ok */ | ||
129 | |||
130 | if ((toktype == KG_TOK_WRAP_MSG) && !(sealalg == ctx->sealalg)) | ||
131 | goto out; | 113 | goto out; |
132 | 114 | ||
133 | /* there are several mappings of seal algorithms to sign algorithms, | 115 | /* there are several mappings of seal algorithms to sign algorithms, |
@@ -154,7 +136,7 @@ krb5_read_token(struct krb5_ctx *ctx, | |||
154 | switch (signalg) { | 136 | switch (signalg) { |
155 | case SGN_ALG_DES_MAC_MD5: | 137 | case SGN_ALG_DES_MAC_MD5: |
156 | ret = make_checksum(checksum_type, ptr - 2, 8, | 138 | ret = make_checksum(checksum_type, ptr - 2, 8, |
157 | message_buffer, &md5cksum); | 139 | message_buffer, 0, &md5cksum); |
158 | if (ret) | 140 | if (ret) |
159 | goto out; | 141 | goto out; |
160 | 142 | ||
@@ -175,9 +157,6 @@ krb5_read_token(struct krb5_ctx *ctx, | |||
175 | 157 | ||
176 | /* it got through unscathed. Make sure the context is unexpired */ | 158 | /* it got through unscathed. Make sure the context is unexpired */ |
177 | 159 | ||
178 | if (qop_state) | ||
179 | *qop_state = GSS_C_QOP_DEFAULT; | ||
180 | |||
181 | now = get_seconds(); | 160 | now = get_seconds(); |
182 | 161 | ||
183 | ret = GSS_S_CONTEXT_EXPIRED; | 162 | ret = GSS_S_CONTEXT_EXPIRED; |
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c new file mode 100644 index 000000000000..af777cf9f251 --- /dev/null +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c | |||
@@ -0,0 +1,363 @@ | |||
1 | #include <linux/types.h> | ||
2 | #include <linux/slab.h> | ||
3 | #include <linux/jiffies.h> | ||
4 | #include <linux/sunrpc/gss_krb5.h> | ||
5 | #include <linux/random.h> | ||
6 | #include <linux/pagemap.h> | ||
7 | #include <asm/scatterlist.h> | ||
8 | #include <linux/crypto.h> | ||
9 | |||
10 | #ifdef RPC_DEBUG | ||
11 | # define RPCDBG_FACILITY RPCDBG_AUTH | ||
12 | #endif | ||
13 | |||
14 | static inline int | ||
15 | gss_krb5_padding(int blocksize, int length) | ||
16 | { | ||
17 | /* Most of the code is block-size independent but currently we | ||
18 | * use only 8: */ | ||
19 | BUG_ON(blocksize != 8); | ||
20 | return 8 - (length & 7); | ||
21 | } | ||
22 | |||
23 | static inline void | ||
24 | gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) | ||
25 | { | ||
26 | int padding = gss_krb5_padding(blocksize, buf->len - offset); | ||
27 | char *p; | ||
28 | struct kvec *iov; | ||
29 | |||
30 | if (buf->page_len || buf->tail[0].iov_len) | ||
31 | iov = &buf->tail[0]; | ||
32 | else | ||
33 | iov = &buf->head[0]; | ||
34 | p = iov->iov_base + iov->iov_len; | ||
35 | iov->iov_len += padding; | ||
36 | buf->len += padding; | ||
37 | memset(p, padding, padding); | ||
38 | } | ||
39 | |||
40 | static inline int | ||
41 | gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) | ||
42 | { | ||
43 | u8 *ptr; | ||
44 | u8 pad; | ||
45 | int len = buf->len; | ||
46 | |||
47 | if (len <= buf->head[0].iov_len) { | ||
48 | pad = *(u8 *)(buf->head[0].iov_base + len - 1); | ||
49 | if (pad > buf->head[0].iov_len) | ||
50 | return -EINVAL; | ||
51 | buf->head[0].iov_len -= pad; | ||
52 | goto out; | ||
53 | } else | ||
54 | len -= buf->head[0].iov_len; | ||
55 | if (len <= buf->page_len) { | ||
56 | int last = (buf->page_base + len - 1) | ||
57 | >>PAGE_CACHE_SHIFT; | ||
58 | int offset = (buf->page_base + len - 1) | ||
59 | & (PAGE_CACHE_SIZE - 1); | ||
60 | ptr = kmap_atomic(buf->pages[last], KM_SKB_SUNRPC_DATA); | ||
61 | pad = *(ptr + offset); | ||
62 | kunmap_atomic(ptr, KM_SKB_SUNRPC_DATA); | ||
63 | goto out; | ||
64 | } else | ||
65 | len -= buf->page_len; | ||
66 | BUG_ON(len > buf->tail[0].iov_len); | ||
67 | pad = *(u8 *)(buf->tail[0].iov_base + len - 1); | ||
68 | out: | ||
69 | /* XXX: NOTE: we do not adjust the page lengths--they represent | ||
70 | * a range of data in the real filesystem page cache, and we need | ||
71 | * to know that range so the xdr code can properly place read data. | ||
72 | * However adjusting the head length, as we do above, is harmless. | ||
73 | * In the case of a request that fits into a single page, the server | ||
74 | * also uses length and head length together to determine the original | ||
75 | * start of the request to copy the request for deferal; so it's | ||
76 | * easier on the server if we adjust head and tail length in tandem. | ||
77 | * It's not really a problem that we don't fool with the page and | ||
78 | * tail lengths, though--at worst badly formed xdr might lead the | ||
79 | * server to attempt to parse the padding. | ||
80 | * XXX: Document all these weird requirements for gss mechanism | ||
81 | * wrap/unwrap functions. */ | ||
82 | if (pad > blocksize) | ||
83 | return -EINVAL; | ||
84 | if (buf->len > pad) | ||
85 | buf->len -= pad; | ||
86 | else | ||
87 | return -EINVAL; | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static inline void | ||
92 | make_confounder(char *p, int blocksize) | ||
93 | { | ||
94 | static u64 i = 0; | ||
95 | u64 *q = (u64 *)p; | ||
96 | |||
97 | /* rfc1964 claims this should be "random". But all that's really | ||
98 | * necessary is that it be unique. And not even that is necessary in | ||
99 | * our case since our "gssapi" implementation exists only to support | ||
100 | * rpcsec_gss, so we know that the only buffers we will ever encrypt | ||
101 | * already begin with a unique sequence number. Just to hedge my bets | ||
102 | * I'll make a half-hearted attempt at something unique, but ensuring | ||
103 | * uniqueness would mean worrying about atomicity and rollover, and I | ||
104 | * don't care enough. */ | ||
105 | |||
106 | BUG_ON(blocksize != 8); | ||
107 | *q = i++; | ||
108 | } | ||
109 | |||
110 | /* Assumptions: the head and tail of inbuf are ours to play with. | ||
111 | * The pages, however, may be real pages in the page cache and we replace | ||
112 | * them with scratch pages from **pages before writing to them. */ | ||
113 | /* XXX: obviously the above should be documentation of wrap interface, | ||
114 | * and shouldn't be in this kerberos-specific file. */ | ||
115 | |||
116 | /* XXX factor out common code with seal/unseal. */ | ||
117 | |||
118 | u32 | ||
119 | gss_wrap_kerberos(struct gss_ctx *ctx, int offset, | ||
120 | struct xdr_buf *buf, struct page **pages) | ||
121 | { | ||
122 | struct krb5_ctx *kctx = ctx->internal_ctx_id; | ||
123 | s32 checksum_type; | ||
124 | struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; | ||
125 | int blocksize = 0, plainlen; | ||
126 | unsigned char *ptr, *krb5_hdr, *msg_start; | ||
127 | s32 now; | ||
128 | int headlen; | ||
129 | struct page **tmp_pages; | ||
130 | |||
131 | dprintk("RPC: gss_wrap_kerberos\n"); | ||
132 | |||
133 | now = get_seconds(); | ||
134 | |||
135 | switch (kctx->signalg) { | ||
136 | case SGN_ALG_DES_MAC_MD5: | ||
137 | checksum_type = CKSUMTYPE_RSA_MD5; | ||
138 | break; | ||
139 | default: | ||
140 | dprintk("RPC: gss_krb5_seal: kctx->signalg %d not" | ||
141 | " supported\n", kctx->signalg); | ||
142 | goto out_err; | ||
143 | } | ||
144 | if (kctx->sealalg != SEAL_ALG_NONE && kctx->sealalg != SEAL_ALG_DES) { | ||
145 | dprintk("RPC: gss_krb5_seal: kctx->sealalg %d not supported\n", | ||
146 | kctx->sealalg); | ||
147 | goto out_err; | ||
148 | } | ||
149 | |||
150 | blocksize = crypto_tfm_alg_blocksize(kctx->enc); | ||
151 | gss_krb5_add_padding(buf, offset, blocksize); | ||
152 | BUG_ON((buf->len - offset) % blocksize); | ||
153 | plainlen = blocksize + buf->len - offset; | ||
154 | |||
155 | headlen = g_token_size(&kctx->mech_used, 22 + plainlen) - | ||
156 | (buf->len - offset); | ||
157 | |||
158 | ptr = buf->head[0].iov_base + offset; | ||
159 | /* shift data to make room for header. */ | ||
160 | /* XXX Would be cleverer to encrypt while copying. */ | ||
161 | /* XXX bounds checking, slack, etc. */ | ||
162 | memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset); | ||
163 | buf->head[0].iov_len += headlen; | ||
164 | buf->len += headlen; | ||
165 | BUG_ON((buf->len - offset - headlen) % blocksize); | ||
166 | |||
167 | g_make_token_header(&kctx->mech_used, 22 + plainlen, &ptr); | ||
168 | |||
169 | |||
170 | *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff); | ||
171 | *ptr++ = (unsigned char) (KG_TOK_WRAP_MSG&0xff); | ||
172 | |||
173 | /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */ | ||
174 | krb5_hdr = ptr - 2; | ||
175 | msg_start = krb5_hdr + 24; | ||
176 | /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize); | ||
177 | |||
178 | *(u16 *)(krb5_hdr + 2) = htons(kctx->signalg); | ||
179 | memset(krb5_hdr + 4, 0xff, 4); | ||
180 | *(u16 *)(krb5_hdr + 4) = htons(kctx->sealalg); | ||
181 | |||
182 | make_confounder(msg_start, blocksize); | ||
183 | |||
184 | /* XXXJBF: UGH!: */ | ||
185 | tmp_pages = buf->pages; | ||
186 | buf->pages = pages; | ||
187 | if (make_checksum(checksum_type, krb5_hdr, 8, buf, | ||
188 | offset + headlen - blocksize, &md5cksum)) | ||
189 | goto out_err; | ||
190 | buf->pages = tmp_pages; | ||
191 | |||
192 | switch (kctx->signalg) { | ||
193 | case SGN_ALG_DES_MAC_MD5: | ||
194 | if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, | ||
195 | md5cksum.data, md5cksum.len)) | ||
196 | goto out_err; | ||
197 | memcpy(krb5_hdr + 16, | ||
198 | md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH, | ||
199 | KRB5_CKSUM_LENGTH); | ||
200 | |||
201 | dprintk("RPC: make_seal_token: cksum data: \n"); | ||
202 | print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0); | ||
203 | break; | ||
204 | default: | ||
205 | BUG(); | ||
206 | } | ||
207 | |||
208 | kfree(md5cksum.data); | ||
209 | |||
210 | /* XXX would probably be more efficient to compute checksum | ||
211 | * and encrypt at the same time: */ | ||
212 | if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, | ||
213 | kctx->seq_send, krb5_hdr + 16, krb5_hdr + 8))) | ||
214 | goto out_err; | ||
215 | |||
216 | if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, | ||
217 | pages)) | ||
218 | goto out_err; | ||
219 | |||
220 | kctx->seq_send++; | ||
221 | |||
222 | return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE); | ||
223 | out_err: | ||
224 | if (md5cksum.data) kfree(md5cksum.data); | ||
225 | return GSS_S_FAILURE; | ||
226 | } | ||
227 | |||
228 | u32 | ||
229 | gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) | ||
230 | { | ||
231 | struct krb5_ctx *kctx = ctx->internal_ctx_id; | ||
232 | int signalg; | ||
233 | int sealalg; | ||
234 | s32 checksum_type; | ||
235 | struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; | ||
236 | s32 now; | ||
237 | int direction; | ||
238 | s32 seqnum; | ||
239 | unsigned char *ptr; | ||
240 | int bodysize; | ||
241 | u32 ret = GSS_S_DEFECTIVE_TOKEN; | ||
242 | void *data_start, *orig_start; | ||
243 | int data_len; | ||
244 | int blocksize; | ||
245 | |||
246 | dprintk("RPC: gss_unwrap_kerberos\n"); | ||
247 | |||
248 | ptr = (u8 *)buf->head[0].iov_base + offset; | ||
249 | if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, | ||
250 | buf->len - offset)) | ||
251 | goto out; | ||
252 | |||
253 | if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) || | ||
254 | (*ptr++ != (KG_TOK_WRAP_MSG &0xff)) ) | ||
255 | goto out; | ||
256 | |||
257 | /* XXX sanity-check bodysize?? */ | ||
258 | |||
259 | /* get the sign and seal algorithms */ | ||
260 | |||
261 | signalg = ptr[0] + (ptr[1] << 8); | ||
262 | sealalg = ptr[2] + (ptr[3] << 8); | ||
263 | |||
264 | /* Sanity checks */ | ||
265 | |||
266 | if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) | ||
267 | goto out; | ||
268 | |||
269 | if (sealalg == 0xffff) | ||
270 | goto out; | ||
271 | |||
272 | /* in the current spec, there is only one valid seal algorithm per | ||
273 | key type, so a simple comparison is ok */ | ||
274 | |||
275 | if (sealalg != kctx->sealalg) | ||
276 | goto out; | ||
277 | |||
278 | /* there are several mappings of seal algorithms to sign algorithms, | ||
279 | but few enough that we can try them all. */ | ||
280 | |||
281 | if ((kctx->sealalg == SEAL_ALG_NONE && signalg > 1) || | ||
282 | (kctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) || | ||
283 | (kctx->sealalg == SEAL_ALG_DES3KD && | ||
284 | signalg != SGN_ALG_HMAC_SHA1_DES3_KD)) | ||
285 | goto out; | ||
286 | |||
287 | if (gss_decrypt_xdr_buf(kctx->enc, buf, | ||
288 | ptr + 22 - (unsigned char *)buf->head[0].iov_base)) | ||
289 | goto out; | ||
290 | |||
291 | /* compute the checksum of the message */ | ||
292 | |||
293 | /* initialize the the cksum */ | ||
294 | switch (signalg) { | ||
295 | case SGN_ALG_DES_MAC_MD5: | ||
296 | checksum_type = CKSUMTYPE_RSA_MD5; | ||
297 | break; | ||
298 | default: | ||
299 | ret = GSS_S_DEFECTIVE_TOKEN; | ||
300 | goto out; | ||
301 | } | ||
302 | |||
303 | switch (signalg) { | ||
304 | case SGN_ALG_DES_MAC_MD5: | ||
305 | ret = make_checksum(checksum_type, ptr - 2, 8, buf, | ||
306 | ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum); | ||
307 | if (ret) | ||
308 | goto out; | ||
309 | |||
310 | ret = krb5_encrypt(kctx->seq, NULL, md5cksum.data, | ||
311 | md5cksum.data, md5cksum.len); | ||
312 | if (ret) | ||
313 | goto out; | ||
314 | |||
315 | if (memcmp(md5cksum.data + 8, ptr + 14, 8)) { | ||
316 | ret = GSS_S_BAD_SIG; | ||
317 | goto out; | ||
318 | } | ||
319 | break; | ||
320 | default: | ||
321 | ret = GSS_S_DEFECTIVE_TOKEN; | ||
322 | goto out; | ||
323 | } | ||
324 | |||
325 | /* it got through unscathed. Make sure the context is unexpired */ | ||
326 | |||
327 | now = get_seconds(); | ||
328 | |||
329 | ret = GSS_S_CONTEXT_EXPIRED; | ||
330 | if (now > kctx->endtime) | ||
331 | goto out; | ||
332 | |||
333 | /* do sequencing checks */ | ||
334 | |||
335 | ret = GSS_S_BAD_SIG; | ||
336 | if ((ret = krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction, | ||
337 | &seqnum))) | ||
338 | goto out; | ||
339 | |||
340 | if ((kctx->initiate && direction != 0xff) || | ||
341 | (!kctx->initiate && direction != 0)) | ||
342 | goto out; | ||
343 | |||
344 | /* Copy the data back to the right position. XXX: Would probably be | ||
345 | * better to copy and encrypt at the same time. */ | ||
346 | |||
347 | blocksize = crypto_tfm_alg_blocksize(kctx->enc); | ||
348 | data_start = ptr + 22 + blocksize; | ||
349 | orig_start = buf->head[0].iov_base + offset; | ||
350 | data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; | ||
351 | memmove(orig_start, data_start, data_len); | ||
352 | buf->head[0].iov_len -= (data_start - orig_start); | ||
353 | buf->len -= (data_start - orig_start); | ||
354 | |||
355 | ret = GSS_S_DEFECTIVE_TOKEN; | ||
356 | if (gss_krb5_remove_padding(buf, blocksize)) | ||
357 | goto out; | ||
358 | |||
359 | ret = GSS_S_COMPLETE; | ||
360 | out: | ||
361 | if (md5cksum.data) kfree(md5cksum.data); | ||
362 | return ret; | ||
363 | } | ||
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 9dfb68377d69..b048bf672da2 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c | |||
@@ -35,7 +35,6 @@ | |||
35 | 35 | ||
36 | #include <linux/types.h> | 36 | #include <linux/types.h> |
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | #include <linux/socket.h> | ||
39 | #include <linux/module.h> | 38 | #include <linux/module.h> |
40 | #include <linux/sunrpc/msg_prot.h> | 39 | #include <linux/sunrpc/msg_prot.h> |
41 | #include <linux/sunrpc/gss_asn1.h> | 40 | #include <linux/sunrpc/gss_asn1.h> |
@@ -251,13 +250,11 @@ gss_import_sec_context(const void *input_token, size_t bufsize, | |||
251 | 250 | ||
252 | u32 | 251 | u32 |
253 | gss_get_mic(struct gss_ctx *context_handle, | 252 | gss_get_mic(struct gss_ctx *context_handle, |
254 | u32 qop, | ||
255 | struct xdr_buf *message, | 253 | struct xdr_buf *message, |
256 | struct xdr_netobj *mic_token) | 254 | struct xdr_netobj *mic_token) |
257 | { | 255 | { |
258 | return context_handle->mech_type->gm_ops | 256 | return context_handle->mech_type->gm_ops |
259 | ->gss_get_mic(context_handle, | 257 | ->gss_get_mic(context_handle, |
260 | qop, | ||
261 | message, | 258 | message, |
262 | mic_token); | 259 | mic_token); |
263 | } | 260 | } |
@@ -267,16 +264,34 @@ gss_get_mic(struct gss_ctx *context_handle, | |||
267 | u32 | 264 | u32 |
268 | gss_verify_mic(struct gss_ctx *context_handle, | 265 | gss_verify_mic(struct gss_ctx *context_handle, |
269 | struct xdr_buf *message, | 266 | struct xdr_buf *message, |
270 | struct xdr_netobj *mic_token, | 267 | struct xdr_netobj *mic_token) |
271 | u32 *qstate) | ||
272 | { | 268 | { |
273 | return context_handle->mech_type->gm_ops | 269 | return context_handle->mech_type->gm_ops |
274 | ->gss_verify_mic(context_handle, | 270 | ->gss_verify_mic(context_handle, |
275 | message, | 271 | message, |
276 | mic_token, | 272 | mic_token); |
277 | qstate); | ||
278 | } | 273 | } |
279 | 274 | ||
275 | u32 | ||
276 | gss_wrap(struct gss_ctx *ctx_id, | ||
277 | int offset, | ||
278 | struct xdr_buf *buf, | ||
279 | struct page **inpages) | ||
280 | { | ||
281 | return ctx_id->mech_type->gm_ops | ||
282 | ->gss_wrap(ctx_id, offset, buf, inpages); | ||
283 | } | ||
284 | |||
285 | u32 | ||
286 | gss_unwrap(struct gss_ctx *ctx_id, | ||
287 | int offset, | ||
288 | struct xdr_buf *buf) | ||
289 | { | ||
290 | return ctx_id->mech_type->gm_ops | ||
291 | ->gss_unwrap(ctx_id, offset, buf); | ||
292 | } | ||
293 | |||
294 | |||
280 | /* gss_delete_sec_context: free all resources associated with context_handle. | 295 | /* gss_delete_sec_context: free all resources associated with context_handle. |
281 | * Note this differs from the RFC 2744-specified prototype in that we don't | 296 | * Note this differs from the RFC 2744-specified prototype in that we don't |
282 | * bother returning an output token, since it would never be used anyway. */ | 297 | * bother returning an output token, since it would never be used anyway. */ |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index 6c97d61baa9b..39b3edc14694 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c | |||
@@ -224,18 +224,13 @@ gss_delete_sec_context_spkm3(void *internal_ctx) { | |||
224 | static u32 | 224 | static u32 |
225 | gss_verify_mic_spkm3(struct gss_ctx *ctx, | 225 | gss_verify_mic_spkm3(struct gss_ctx *ctx, |
226 | struct xdr_buf *signbuf, | 226 | struct xdr_buf *signbuf, |
227 | struct xdr_netobj *checksum, | 227 | struct xdr_netobj *checksum) |
228 | u32 *qstate) { | 228 | { |
229 | u32 maj_stat = 0; | 229 | u32 maj_stat = 0; |
230 | int qop_state = 0; | ||
231 | struct spkm3_ctx *sctx = ctx->internal_ctx_id; | 230 | struct spkm3_ctx *sctx = ctx->internal_ctx_id; |
232 | 231 | ||
233 | dprintk("RPC: gss_verify_mic_spkm3 calling spkm3_read_token\n"); | 232 | dprintk("RPC: gss_verify_mic_spkm3 calling spkm3_read_token\n"); |
234 | maj_stat = spkm3_read_token(sctx, checksum, signbuf, &qop_state, | 233 | maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK); |
235 | SPKM_MIC_TOK); | ||
236 | |||
237 | if (!maj_stat && qop_state) | ||
238 | *qstate = qop_state; | ||
239 | 234 | ||
240 | dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat); | 235 | dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat); |
241 | return maj_stat; | 236 | return maj_stat; |
@@ -243,15 +238,15 @@ gss_verify_mic_spkm3(struct gss_ctx *ctx, | |||
243 | 238 | ||
244 | static u32 | 239 | static u32 |
245 | gss_get_mic_spkm3(struct gss_ctx *ctx, | 240 | gss_get_mic_spkm3(struct gss_ctx *ctx, |
246 | u32 qop, | ||
247 | struct xdr_buf *message_buffer, | 241 | struct xdr_buf *message_buffer, |
248 | struct xdr_netobj *message_token) { | 242 | struct xdr_netobj *message_token) |
243 | { | ||
249 | u32 err = 0; | 244 | u32 err = 0; |
250 | struct spkm3_ctx *sctx = ctx->internal_ctx_id; | 245 | struct spkm3_ctx *sctx = ctx->internal_ctx_id; |
251 | 246 | ||
252 | dprintk("RPC: gss_get_mic_spkm3\n"); | 247 | dprintk("RPC: gss_get_mic_spkm3\n"); |
253 | 248 | ||
254 | err = spkm3_make_token(sctx, qop, message_buffer, | 249 | err = spkm3_make_token(sctx, message_buffer, |
255 | message_token, SPKM_MIC_TOK); | 250 | message_token, SPKM_MIC_TOK); |
256 | return err; | 251 | return err; |
257 | } | 252 | } |
@@ -264,8 +259,8 @@ static struct gss_api_ops gss_spkm3_ops = { | |||
264 | }; | 259 | }; |
265 | 260 | ||
266 | static struct pf_desc gss_spkm3_pfs[] = { | 261 | static struct pf_desc gss_spkm3_pfs[] = { |
267 | {RPC_AUTH_GSS_SPKM, 0, RPC_GSS_SVC_NONE, "spkm3"}, | 262 | {RPC_AUTH_GSS_SPKM, RPC_GSS_SVC_NONE, "spkm3"}, |
268 | {RPC_AUTH_GSS_SPKMI, 0, RPC_GSS_SVC_INTEGRITY, "spkm3i"}, | 263 | {RPC_AUTH_GSS_SPKMI, RPC_GSS_SVC_INTEGRITY, "spkm3i"}, |
269 | }; | 264 | }; |
270 | 265 | ||
271 | static struct gss_api_mech gss_spkm3_mech = { | 266 | static struct gss_api_mech gss_spkm3_mech = { |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c index 25339868d462..148201e929d0 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_seal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c | |||
@@ -51,7 +51,7 @@ | |||
51 | */ | 51 | */ |
52 | 52 | ||
53 | u32 | 53 | u32 |
54 | spkm3_make_token(struct spkm3_ctx *ctx, int qop_req, | 54 | spkm3_make_token(struct spkm3_ctx *ctx, |
55 | struct xdr_buf * text, struct xdr_netobj * token, | 55 | struct xdr_buf * text, struct xdr_netobj * token, |
56 | int toktype) | 56 | int toktype) |
57 | { | 57 | { |
@@ -68,8 +68,6 @@ spkm3_make_token(struct spkm3_ctx *ctx, int qop_req, | |||
68 | dprintk("RPC: spkm3_make_token\n"); | 68 | dprintk("RPC: spkm3_make_token\n"); |
69 | 69 | ||
70 | now = jiffies; | 70 | now = jiffies; |
71 | if (qop_req != 0) | ||
72 | goto out_err; | ||
73 | 71 | ||
74 | if (ctx->ctx_id.len != 16) { | 72 | if (ctx->ctx_id.len != 16) { |
75 | dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n", | 73 | dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n", |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c index 65ce81bf0bc4..c3c0d9586103 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c | |||
@@ -52,7 +52,7 @@ u32 | |||
52 | spkm3_read_token(struct spkm3_ctx *ctx, | 52 | spkm3_read_token(struct spkm3_ctx *ctx, |
53 | struct xdr_netobj *read_token, /* checksum */ | 53 | struct xdr_netobj *read_token, /* checksum */ |
54 | struct xdr_buf *message_buffer, /* signbuf */ | 54 | struct xdr_buf *message_buffer, /* signbuf */ |
55 | int *qop_state, int toktype) | 55 | int toktype) |
56 | { | 56 | { |
57 | s32 code; | 57 | s32 code; |
58 | struct xdr_netobj wire_cksum = {.len =0, .data = NULL}; | 58 | struct xdr_netobj wire_cksum = {.len =0, .data = NULL}; |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index e3308195374e..e4ada15ed856 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
@@ -566,8 +566,7 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci, | |||
566 | 566 | ||
567 | if (rqstp->rq_deferred) /* skip verification of revisited request */ | 567 | if (rqstp->rq_deferred) /* skip verification of revisited request */ |
568 | return SVC_OK; | 568 | return SVC_OK; |
569 | if (gss_verify_mic(ctx_id, &rpchdr, &checksum, NULL) | 569 | if (gss_verify_mic(ctx_id, &rpchdr, &checksum) != GSS_S_COMPLETE) { |
570 | != GSS_S_COMPLETE) { | ||
571 | *authp = rpcsec_gsserr_credproblem; | 570 | *authp = rpcsec_gsserr_credproblem; |
572 | return SVC_DENIED; | 571 | return SVC_DENIED; |
573 | } | 572 | } |
@@ -604,7 +603,7 @@ gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) | |||
604 | xdr_buf_from_iov(&iov, &verf_data); | 603 | xdr_buf_from_iov(&iov, &verf_data); |
605 | p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; | 604 | p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; |
606 | mic.data = (u8 *)(p + 1); | 605 | mic.data = (u8 *)(p + 1); |
607 | maj_stat = gss_get_mic(ctx_id, 0, &verf_data, &mic); | 606 | maj_stat = gss_get_mic(ctx_id, &verf_data, &mic); |
608 | if (maj_stat != GSS_S_COMPLETE) | 607 | if (maj_stat != GSS_S_COMPLETE) |
609 | return -1; | 608 | return -1; |
610 | *p++ = htonl(mic.len); | 609 | *p++ = htonl(mic.len); |
@@ -710,7 +709,7 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) | |||
710 | goto out; | 709 | goto out; |
711 | if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len)) | 710 | if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len)) |
712 | goto out; | 711 | goto out; |
713 | maj_stat = gss_verify_mic(ctx, &integ_buf, &mic, NULL); | 712 | maj_stat = gss_verify_mic(ctx, &integ_buf, &mic); |
714 | if (maj_stat != GSS_S_COMPLETE) | 713 | if (maj_stat != GSS_S_COMPLETE) |
715 | goto out; | 714 | goto out; |
716 | if (ntohl(svc_getu32(&buf->head[0])) != seq) | 715 | if (ntohl(svc_getu32(&buf->head[0])) != seq) |
@@ -1012,7 +1011,7 @@ svcauth_gss_release(struct svc_rqst *rqstp) | |||
1012 | resv = &resbuf->tail[0]; | 1011 | resv = &resbuf->tail[0]; |
1013 | } | 1012 | } |
1014 | mic.data = (u8 *)resv->iov_base + resv->iov_len + 4; | 1013 | mic.data = (u8 *)resv->iov_base + resv->iov_len + 4; |
1015 | if (gss_get_mic(gsd->rsci->mechctx, 0, &integ_buf, &mic)) | 1014 | if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic)) |
1016 | goto out_err; | 1015 | goto out_err; |
1017 | svc_putu32(resv, htonl(mic.len)); | 1016 | svc_putu32(resv, htonl(mic.len)); |
1018 | memset(mic.data + mic.len, 0, | 1017 | memset(mic.data + mic.len, 0, |
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c index 9b72d3abf823..f56767aaa927 100644 --- a/net/sunrpc/auth_null.c +++ b/net/sunrpc/auth_null.c | |||
@@ -7,9 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/socket.h> | ||
11 | #include <linux/module.h> | 10 | #include <linux/module.h> |
12 | #include <linux/in.h> | ||
13 | #include <linux/utsname.h> | 11 | #include <linux/utsname.h> |
14 | #include <linux/sunrpc/clnt.h> | 12 | #include <linux/sunrpc/clnt.h> |
15 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 4ff297a9b15b..890fb5ea0dcb 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
@@ -9,8 +9,6 @@ | |||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/socket.h> | ||
13 | #include <linux/in.h> | ||
14 | #include <linux/sunrpc/clnt.h> | 12 | #include <linux/sunrpc/clnt.h> |
15 | #include <linux/sunrpc/auth.h> | 13 | #include <linux/sunrpc/auth.h> |
16 | 14 | ||
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index f17e6153b688..702ede309b06 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * linux/net/sunrpc/rpcclnt.c | 2 | * linux/net/sunrpc/clnt.c |
3 | * | 3 | * |
4 | * This file contains the high-level RPC interface. | 4 | * This file contains the high-level RPC interface. |
5 | * It is modeled as a finite state machine to support both synchronous | 5 | * It is modeled as a finite state machine to support both synchronous |
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/in.h> | ||
31 | #include <linux/utsname.h> | 30 | #include <linux/utsname.h> |
32 | 31 | ||
33 | #include <linux/sunrpc/clnt.h> | 32 | #include <linux/sunrpc/clnt.h> |
@@ -53,6 +52,7 @@ static void call_allocate(struct rpc_task *task); | |||
53 | static void call_encode(struct rpc_task *task); | 52 | static void call_encode(struct rpc_task *task); |
54 | static void call_decode(struct rpc_task *task); | 53 | static void call_decode(struct rpc_task *task); |
55 | static void call_bind(struct rpc_task *task); | 54 | static void call_bind(struct rpc_task *task); |
55 | static void call_bind_status(struct rpc_task *task); | ||
56 | static void call_transmit(struct rpc_task *task); | 56 | static void call_transmit(struct rpc_task *task); |
57 | static void call_status(struct rpc_task *task); | 57 | static void call_status(struct rpc_task *task); |
58 | static void call_refresh(struct rpc_task *task); | 58 | static void call_refresh(struct rpc_task *task); |
@@ -517,15 +517,8 @@ void | |||
517 | rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) | 517 | rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) |
518 | { | 518 | { |
519 | struct rpc_xprt *xprt = clnt->cl_xprt; | 519 | struct rpc_xprt *xprt = clnt->cl_xprt; |
520 | 520 | if (xprt->ops->set_buffer_size) | |
521 | xprt->sndsize = 0; | 521 | xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); |
522 | if (sndsize) | ||
523 | xprt->sndsize = sndsize + RPC_SLACK_SPACE; | ||
524 | xprt->rcvsize = 0; | ||
525 | if (rcvsize) | ||
526 | xprt->rcvsize = rcvsize + RPC_SLACK_SPACE; | ||
527 | if (xprt_connected(xprt)) | ||
528 | xprt_sock_setbufsize(xprt); | ||
529 | } | 522 | } |
530 | 523 | ||
531 | /* | 524 | /* |
@@ -685,13 +678,11 @@ call_allocate(struct rpc_task *task) | |||
685 | static void | 678 | static void |
686 | call_encode(struct rpc_task *task) | 679 | call_encode(struct rpc_task *task) |
687 | { | 680 | { |
688 | struct rpc_clnt *clnt = task->tk_client; | ||
689 | struct rpc_rqst *req = task->tk_rqstp; | 681 | struct rpc_rqst *req = task->tk_rqstp; |
690 | struct xdr_buf *sndbuf = &req->rq_snd_buf; | 682 | struct xdr_buf *sndbuf = &req->rq_snd_buf; |
691 | struct xdr_buf *rcvbuf = &req->rq_rcv_buf; | 683 | struct xdr_buf *rcvbuf = &req->rq_rcv_buf; |
692 | unsigned int bufsiz; | 684 | unsigned int bufsiz; |
693 | kxdrproc_t encode; | 685 | kxdrproc_t encode; |
694 | int status; | ||
695 | u32 *p; | 686 | u32 *p; |
696 | 687 | ||
697 | dprintk("RPC: %4d call_encode (status %d)\n", | 688 | dprintk("RPC: %4d call_encode (status %d)\n", |
@@ -719,11 +710,15 @@ call_encode(struct rpc_task *task) | |||
719 | rpc_exit(task, -EIO); | 710 | rpc_exit(task, -EIO); |
720 | return; | 711 | return; |
721 | } | 712 | } |
722 | if (encode && (status = rpcauth_wrap_req(task, encode, req, p, | 713 | if (encode == NULL) |
723 | task->tk_msg.rpc_argp)) < 0) { | 714 | return; |
724 | printk(KERN_WARNING "%s: can't encode arguments: %d\n", | 715 | |
725 | clnt->cl_protname, -status); | 716 | task->tk_status = rpcauth_wrap_req(task, encode, req, p, |
726 | rpc_exit(task, status); | 717 | task->tk_msg.rpc_argp); |
718 | if (task->tk_status == -ENOMEM) { | ||
719 | /* XXX: Is this sane? */ | ||
720 | rpc_delay(task, 3*HZ); | ||
721 | task->tk_status = -EAGAIN; | ||
727 | } | 722 | } |
728 | } | 723 | } |
729 | 724 | ||
@@ -734,43 +729,95 @@ static void | |||
734 | call_bind(struct rpc_task *task) | 729 | call_bind(struct rpc_task *task) |
735 | { | 730 | { |
736 | struct rpc_clnt *clnt = task->tk_client; | 731 | struct rpc_clnt *clnt = task->tk_client; |
737 | struct rpc_xprt *xprt = clnt->cl_xprt; | ||
738 | |||
739 | dprintk("RPC: %4d call_bind xprt %p %s connected\n", task->tk_pid, | ||
740 | xprt, (xprt_connected(xprt) ? "is" : "is not")); | ||
741 | 732 | ||
742 | task->tk_action = (xprt_connected(xprt)) ? call_transmit : call_connect; | 733 | dprintk("RPC: %4d call_bind (status %d)\n", |
734 | task->tk_pid, task->tk_status); | ||
743 | 735 | ||
736 | task->tk_action = call_connect; | ||
744 | if (!clnt->cl_port) { | 737 | if (!clnt->cl_port) { |
745 | task->tk_action = call_connect; | 738 | task->tk_action = call_bind_status; |
746 | task->tk_timeout = RPC_CONNECT_TIMEOUT; | 739 | task->tk_timeout = task->tk_xprt->bind_timeout; |
747 | rpc_getport(task, clnt); | 740 | rpc_getport(task, clnt); |
748 | } | 741 | } |
749 | } | 742 | } |
750 | 743 | ||
751 | /* | 744 | /* |
752 | * 4a. Connect to the RPC server (TCP case) | 745 | * 4a. Sort out bind result |
746 | */ | ||
747 | static void | ||
748 | call_bind_status(struct rpc_task *task) | ||
749 | { | ||
750 | int status = -EACCES; | ||
751 | |||
752 | if (task->tk_status >= 0) { | ||
753 | dprintk("RPC: %4d call_bind_status (status %d)\n", | ||
754 | task->tk_pid, task->tk_status); | ||
755 | task->tk_status = 0; | ||
756 | task->tk_action = call_connect; | ||
757 | return; | ||
758 | } | ||
759 | |||
760 | switch (task->tk_status) { | ||
761 | case -EACCES: | ||
762 | dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", | ||
763 | task->tk_pid); | ||
764 | rpc_delay(task, 3*HZ); | ||
765 | goto retry_bind; | ||
766 | case -ETIMEDOUT: | ||
767 | dprintk("RPC: %4d rpcbind request timed out\n", | ||
768 | task->tk_pid); | ||
769 | if (RPC_IS_SOFT(task)) { | ||
770 | status = -EIO; | ||
771 | break; | ||
772 | } | ||
773 | goto retry_bind; | ||
774 | case -EPFNOSUPPORT: | ||
775 | dprintk("RPC: %4d remote rpcbind service unavailable\n", | ||
776 | task->tk_pid); | ||
777 | break; | ||
778 | case -EPROTONOSUPPORT: | ||
779 | dprintk("RPC: %4d remote rpcbind version 2 unavailable\n", | ||
780 | task->tk_pid); | ||
781 | break; | ||
782 | default: | ||
783 | dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", | ||
784 | task->tk_pid, -task->tk_status); | ||
785 | status = -EIO; | ||
786 | break; | ||
787 | } | ||
788 | |||
789 | rpc_exit(task, status); | ||
790 | return; | ||
791 | |||
792 | retry_bind: | ||
793 | task->tk_status = 0; | ||
794 | task->tk_action = call_bind; | ||
795 | return; | ||
796 | } | ||
797 | |||
798 | /* | ||
799 | * 4b. Connect to the RPC server | ||
753 | */ | 800 | */ |
754 | static void | 801 | static void |
755 | call_connect(struct rpc_task *task) | 802 | call_connect(struct rpc_task *task) |
756 | { | 803 | { |
757 | struct rpc_clnt *clnt = task->tk_client; | 804 | struct rpc_xprt *xprt = task->tk_xprt; |
758 | 805 | ||
759 | dprintk("RPC: %4d call_connect status %d\n", | 806 | dprintk("RPC: %4d call_connect xprt %p %s connected\n", |
760 | task->tk_pid, task->tk_status); | 807 | task->tk_pid, xprt, |
808 | (xprt_connected(xprt) ? "is" : "is not")); | ||
761 | 809 | ||
762 | if (xprt_connected(clnt->cl_xprt)) { | 810 | task->tk_action = call_transmit; |
763 | task->tk_action = call_transmit; | 811 | if (!xprt_connected(xprt)) { |
764 | return; | 812 | task->tk_action = call_connect_status; |
813 | if (task->tk_status < 0) | ||
814 | return; | ||
815 | xprt_connect(task); | ||
765 | } | 816 | } |
766 | task->tk_action = call_connect_status; | ||
767 | if (task->tk_status < 0) | ||
768 | return; | ||
769 | xprt_connect(task); | ||
770 | } | 817 | } |
771 | 818 | ||
772 | /* | 819 | /* |
773 | * 4b. Sort out connect result | 820 | * 4c. Sort out connect result |
774 | */ | 821 | */ |
775 | static void | 822 | static void |
776 | call_connect_status(struct rpc_task *task) | 823 | call_connect_status(struct rpc_task *task) |
@@ -778,6 +825,9 @@ call_connect_status(struct rpc_task *task) | |||
778 | struct rpc_clnt *clnt = task->tk_client; | 825 | struct rpc_clnt *clnt = task->tk_client; |
779 | int status = task->tk_status; | 826 | int status = task->tk_status; |
780 | 827 | ||
828 | dprintk("RPC: %5u call_connect_status (status %d)\n", | ||
829 | task->tk_pid, task->tk_status); | ||
830 | |||
781 | task->tk_status = 0; | 831 | task->tk_status = 0; |
782 | if (status >= 0) { | 832 | if (status >= 0) { |
783 | clnt->cl_stats->netreconn++; | 833 | clnt->cl_stats->netreconn++; |
@@ -785,17 +835,19 @@ call_connect_status(struct rpc_task *task) | |||
785 | return; | 835 | return; |
786 | } | 836 | } |
787 | 837 | ||
788 | /* Something failed: we may have to rebind */ | 838 | /* Something failed: remote service port may have changed */ |
789 | if (clnt->cl_autobind) | 839 | if (clnt->cl_autobind) |
790 | clnt->cl_port = 0; | 840 | clnt->cl_port = 0; |
841 | |||
791 | switch (status) { | 842 | switch (status) { |
792 | case -ENOTCONN: | 843 | case -ENOTCONN: |
793 | case -ETIMEDOUT: | 844 | case -ETIMEDOUT: |
794 | case -EAGAIN: | 845 | case -EAGAIN: |
795 | task->tk_action = (clnt->cl_port == 0) ? call_bind : call_connect; | 846 | task->tk_action = call_bind; |
796 | break; | 847 | break; |
797 | default: | 848 | default: |
798 | rpc_exit(task, -EIO); | 849 | rpc_exit(task, -EIO); |
850 | break; | ||
799 | } | 851 | } |
800 | } | 852 | } |
801 | 853 | ||
@@ -815,10 +867,12 @@ call_transmit(struct rpc_task *task) | |||
815 | if (task->tk_status != 0) | 867 | if (task->tk_status != 0) |
816 | return; | 868 | return; |
817 | /* Encode here so that rpcsec_gss can use correct sequence number. */ | 869 | /* Encode here so that rpcsec_gss can use correct sequence number. */ |
818 | if (!task->tk_rqstp->rq_bytes_sent) | 870 | if (task->tk_rqstp->rq_bytes_sent == 0) { |
819 | call_encode(task); | 871 | call_encode(task); |
820 | if (task->tk_status < 0) | 872 | /* Did the encode result in an error condition? */ |
821 | return; | 873 | if (task->tk_status != 0) |
874 | goto out_nosend; | ||
875 | } | ||
822 | xprt_transmit(task); | 876 | xprt_transmit(task); |
823 | if (task->tk_status < 0) | 877 | if (task->tk_status < 0) |
824 | return; | 878 | return; |
@@ -826,6 +880,10 @@ call_transmit(struct rpc_task *task) | |||
826 | task->tk_action = NULL; | 880 | task->tk_action = NULL; |
827 | rpc_wake_up_task(task); | 881 | rpc_wake_up_task(task); |
828 | } | 882 | } |
883 | return; | ||
884 | out_nosend: | ||
885 | /* release socket write lock before attempting to handle error */ | ||
886 | xprt_abort_transmit(task); | ||
829 | } | 887 | } |
830 | 888 | ||
831 | /* | 889 | /* |
@@ -1020,13 +1078,12 @@ static u32 * | |||
1020 | call_header(struct rpc_task *task) | 1078 | call_header(struct rpc_task *task) |
1021 | { | 1079 | { |
1022 | struct rpc_clnt *clnt = task->tk_client; | 1080 | struct rpc_clnt *clnt = task->tk_client; |
1023 | struct rpc_xprt *xprt = clnt->cl_xprt; | ||
1024 | struct rpc_rqst *req = task->tk_rqstp; | 1081 | struct rpc_rqst *req = task->tk_rqstp; |
1025 | u32 *p = req->rq_svec[0].iov_base; | 1082 | u32 *p = req->rq_svec[0].iov_base; |
1026 | 1083 | ||
1027 | /* FIXME: check buffer size? */ | 1084 | /* FIXME: check buffer size? */ |
1028 | if (xprt->stream) | 1085 | |
1029 | *p++ = 0; /* fill in later */ | 1086 | p = xprt_skip_transport_header(task->tk_xprt, p); |
1030 | *p++ = req->rq_xid; /* XID */ | 1087 | *p++ = req->rq_xid; /* XID */ |
1031 | *p++ = htonl(RPC_CALL); /* CALL */ | 1088 | *p++ = htonl(RPC_CALL); /* CALL */ |
1032 | *p++ = htonl(RPC_VERSION); /* RPC version */ | 1089 | *p++ = htonl(RPC_VERSION); /* RPC version */ |
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c index 4e81f2766923..a398575f94b8 100644 --- a/net/sunrpc/pmap_clnt.c +++ b/net/sunrpc/pmap_clnt.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #define PMAP_GETPORT 3 | 26 | #define PMAP_GETPORT 3 |
27 | 27 | ||
28 | static struct rpc_procinfo pmap_procedures[]; | 28 | static struct rpc_procinfo pmap_procedures[]; |
29 | static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int); | 29 | static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int, int); |
30 | static void pmap_getport_done(struct rpc_task *); | 30 | static void pmap_getport_done(struct rpc_task *); |
31 | static struct rpc_program pmap_program; | 31 | static struct rpc_program pmap_program; |
32 | static DEFINE_SPINLOCK(pmap_lock); | 32 | static DEFINE_SPINLOCK(pmap_lock); |
@@ -65,7 +65,7 @@ rpc_getport(struct rpc_task *task, struct rpc_clnt *clnt) | |||
65 | map->pm_binding = 1; | 65 | map->pm_binding = 1; |
66 | spin_unlock(&pmap_lock); | 66 | spin_unlock(&pmap_lock); |
67 | 67 | ||
68 | pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot); | 68 | pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot, 0); |
69 | if (IS_ERR(pmap_clnt)) { | 69 | if (IS_ERR(pmap_clnt)) { |
70 | task->tk_status = PTR_ERR(pmap_clnt); | 70 | task->tk_status = PTR_ERR(pmap_clnt); |
71 | goto bailout; | 71 | goto bailout; |
@@ -112,7 +112,7 @@ rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot) | |||
112 | NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); | 112 | NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); |
113 | 113 | ||
114 | sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); | 114 | sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); |
115 | pmap_clnt = pmap_create(hostname, sin, prot); | 115 | pmap_clnt = pmap_create(hostname, sin, prot, 0); |
116 | if (IS_ERR(pmap_clnt)) | 116 | if (IS_ERR(pmap_clnt)) |
117 | return PTR_ERR(pmap_clnt); | 117 | return PTR_ERR(pmap_clnt); |
118 | 118 | ||
@@ -171,7 +171,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) | |||
171 | 171 | ||
172 | sin.sin_family = AF_INET; | 172 | sin.sin_family = AF_INET; |
173 | sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); | 173 | sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); |
174 | pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP); | 174 | pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1); |
175 | if (IS_ERR(pmap_clnt)) { | 175 | if (IS_ERR(pmap_clnt)) { |
176 | error = PTR_ERR(pmap_clnt); | 176 | error = PTR_ERR(pmap_clnt); |
177 | dprintk("RPC: couldn't create pmap client. Error = %d\n", error); | 177 | dprintk("RPC: couldn't create pmap client. Error = %d\n", error); |
@@ -198,7 +198,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) | |||
198 | } | 198 | } |
199 | 199 | ||
200 | static struct rpc_clnt * | 200 | static struct rpc_clnt * |
201 | pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto) | 201 | pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileged) |
202 | { | 202 | { |
203 | struct rpc_xprt *xprt; | 203 | struct rpc_xprt *xprt; |
204 | struct rpc_clnt *clnt; | 204 | struct rpc_clnt *clnt; |
@@ -208,6 +208,8 @@ pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto) | |||
208 | if (IS_ERR(xprt)) | 208 | if (IS_ERR(xprt)) |
209 | return (struct rpc_clnt *)xprt; | 209 | return (struct rpc_clnt *)xprt; |
210 | xprt->addr.sin_port = htons(RPC_PMAP_PORT); | 210 | xprt->addr.sin_port = htons(RPC_PMAP_PORT); |
211 | if (!privileged) | ||
212 | xprt->resvport = 0; | ||
211 | 213 | ||
212 | /* printk("pmap: create clnt\n"); */ | 214 | /* printk("pmap: create clnt\n"); */ |
213 | clnt = rpc_new_client(xprt, hostname, | 215 | clnt = rpc_new_client(xprt, hostname, |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index ded6c63f11ec..4f188d0a5d11 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -76,25 +76,35 @@ int | |||
76 | rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) | 76 | rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) |
77 | { | 77 | { |
78 | struct rpc_inode *rpci = RPC_I(inode); | 78 | struct rpc_inode *rpci = RPC_I(inode); |
79 | int res = 0; | 79 | int res = -EPIPE; |
80 | 80 | ||
81 | down(&inode->i_sem); | 81 | down(&inode->i_sem); |
82 | if (rpci->ops == NULL) | ||
83 | goto out; | ||
82 | if (rpci->nreaders) { | 84 | if (rpci->nreaders) { |
83 | list_add_tail(&msg->list, &rpci->pipe); | 85 | list_add_tail(&msg->list, &rpci->pipe); |
84 | rpci->pipelen += msg->len; | 86 | rpci->pipelen += msg->len; |
87 | res = 0; | ||
85 | } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) { | 88 | } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) { |
86 | if (list_empty(&rpci->pipe)) | 89 | if (list_empty(&rpci->pipe)) |
87 | schedule_delayed_work(&rpci->queue_timeout, | 90 | schedule_delayed_work(&rpci->queue_timeout, |
88 | RPC_UPCALL_TIMEOUT); | 91 | RPC_UPCALL_TIMEOUT); |
89 | list_add_tail(&msg->list, &rpci->pipe); | 92 | list_add_tail(&msg->list, &rpci->pipe); |
90 | rpci->pipelen += msg->len; | 93 | rpci->pipelen += msg->len; |
91 | } else | 94 | res = 0; |
92 | res = -EPIPE; | 95 | } |
96 | out: | ||
93 | up(&inode->i_sem); | 97 | up(&inode->i_sem); |
94 | wake_up(&rpci->waitq); | 98 | wake_up(&rpci->waitq); |
95 | return res; | 99 | return res; |
96 | } | 100 | } |
97 | 101 | ||
102 | static inline void | ||
103 | rpc_inode_setowner(struct inode *inode, void *private) | ||
104 | { | ||
105 | RPC_I(inode)->private = private; | ||
106 | } | ||
107 | |||
98 | static void | 108 | static void |
99 | rpc_close_pipes(struct inode *inode) | 109 | rpc_close_pipes(struct inode *inode) |
100 | { | 110 | { |
@@ -111,15 +121,10 @@ rpc_close_pipes(struct inode *inode) | |||
111 | rpci->ops->release_pipe(inode); | 121 | rpci->ops->release_pipe(inode); |
112 | rpci->ops = NULL; | 122 | rpci->ops = NULL; |
113 | } | 123 | } |
124 | rpc_inode_setowner(inode, NULL); | ||
114 | up(&inode->i_sem); | 125 | up(&inode->i_sem); |
115 | } | 126 | } |
116 | 127 | ||
117 | static inline void | ||
118 | rpc_inode_setowner(struct inode *inode, void *private) | ||
119 | { | ||
120 | RPC_I(inode)->private = private; | ||
121 | } | ||
122 | |||
123 | static struct inode * | 128 | static struct inode * |
124 | rpc_alloc_inode(struct super_block *sb) | 129 | rpc_alloc_inode(struct super_block *sb) |
125 | { | 130 | { |
@@ -501,7 +506,6 @@ repeat: | |||
501 | dentry = dvec[--n]; | 506 | dentry = dvec[--n]; |
502 | if (dentry->d_inode) { | 507 | if (dentry->d_inode) { |
503 | rpc_close_pipes(dentry->d_inode); | 508 | rpc_close_pipes(dentry->d_inode); |
504 | rpc_inode_setowner(dentry->d_inode, NULL); | ||
505 | simple_unlink(dir, dentry); | 509 | simple_unlink(dir, dentry); |
506 | } | 510 | } |
507 | dput(dentry); | 511 | dput(dentry); |
@@ -576,10 +580,8 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry) | |||
576 | int error; | 580 | int error; |
577 | 581 | ||
578 | shrink_dcache_parent(dentry); | 582 | shrink_dcache_parent(dentry); |
579 | if (dentry->d_inode) { | 583 | if (dentry->d_inode) |
580 | rpc_close_pipes(dentry->d_inode); | 584 | rpc_close_pipes(dentry->d_inode); |
581 | rpc_inode_setowner(dentry->d_inode, NULL); | ||
582 | } | ||
583 | if ((error = simple_rmdir(dir, dentry)) != 0) | 585 | if ((error = simple_rmdir(dir, dentry)) != 0) |
584 | return error; | 586 | return error; |
585 | if (!error) { | 587 | if (!error) { |
@@ -732,7 +734,6 @@ rpc_unlink(char *path) | |||
732 | d_drop(dentry); | 734 | d_drop(dentry); |
733 | if (dentry->d_inode) { | 735 | if (dentry->d_inode) { |
734 | rpc_close_pipes(dentry->d_inode); | 736 | rpc_close_pipes(dentry->d_inode); |
735 | rpc_inode_setowner(dentry->d_inode, NULL); | ||
736 | error = simple_unlink(dir, dentry); | 737 | error = simple_unlink(dir, dentry); |
737 | } | 738 | } |
738 | dput(dentry); | 739 | dput(dentry); |
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c new file mode 100644 index 000000000000..8f97e90f36c8 --- /dev/null +++ b/net/sunrpc/socklib.c | |||
@@ -0,0 +1,175 @@ | |||
1 | /* | ||
2 | * linux/net/sunrpc/socklib.c | ||
3 | * | ||
4 | * Common socket helper routines for RPC client and server | ||
5 | * | ||
6 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/pagemap.h> | ||
11 | #include <linux/udp.h> | ||
12 | #include <linux/sunrpc/xdr.h> | ||
13 | |||
14 | |||
15 | /** | ||
16 | * skb_read_bits - copy some data bits from skb to internal buffer | ||
17 | * @desc: sk_buff copy helper | ||
18 | * @to: copy destination | ||
19 | * @len: number of bytes to copy | ||
20 | * | ||
21 | * Possibly called several times to iterate over an sk_buff and copy | ||
22 | * data out of it. | ||
23 | */ | ||
24 | static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len) | ||
25 | { | ||
26 | if (len > desc->count) | ||
27 | len = desc->count; | ||
28 | if (skb_copy_bits(desc->skb, desc->offset, to, len)) | ||
29 | return 0; | ||
30 | desc->count -= len; | ||
31 | desc->offset += len; | ||
32 | return len; | ||
33 | } | ||
34 | |||
35 | /** | ||
36 | * skb_read_and_csum_bits - copy and checksum from skb to buffer | ||
37 | * @desc: sk_buff copy helper | ||
38 | * @to: copy destination | ||
39 | * @len: number of bytes to copy | ||
40 | * | ||
41 | * Same as skb_read_bits, but calculate a checksum at the same time. | ||
42 | */ | ||
43 | static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len) | ||
44 | { | ||
45 | unsigned int csum2, pos; | ||
46 | |||
47 | if (len > desc->count) | ||
48 | len = desc->count; | ||
49 | pos = desc->offset; | ||
50 | csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); | ||
51 | desc->csum = csum_block_add(desc->csum, csum2, pos); | ||
52 | desc->count -= len; | ||
53 | desc->offset += len; | ||
54 | return len; | ||
55 | } | ||
56 | |||
57 | /** | ||
58 | * xdr_partial_copy_from_skb - copy data out of an skb | ||
59 | * @xdr: target XDR buffer | ||
60 | * @base: starting offset | ||
61 | * @desc: sk_buff copy helper | ||
62 | * @copy_actor: virtual method for copying data | ||
63 | * | ||
64 | */ | ||
65 | ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, skb_reader_t *desc, skb_read_actor_t copy_actor) | ||
66 | { | ||
67 | struct page **ppage = xdr->pages; | ||
68 | unsigned int len, pglen = xdr->page_len; | ||
69 | ssize_t copied = 0; | ||
70 | int ret; | ||
71 | |||
72 | len = xdr->head[0].iov_len; | ||
73 | if (base < len) { | ||
74 | len -= base; | ||
75 | ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); | ||
76 | copied += ret; | ||
77 | if (ret != len || !desc->count) | ||
78 | goto out; | ||
79 | base = 0; | ||
80 | } else | ||
81 | base -= len; | ||
82 | |||
83 | if (unlikely(pglen == 0)) | ||
84 | goto copy_tail; | ||
85 | if (unlikely(base >= pglen)) { | ||
86 | base -= pglen; | ||
87 | goto copy_tail; | ||
88 | } | ||
89 | if (base || xdr->page_base) { | ||
90 | pglen -= base; | ||
91 | base += xdr->page_base; | ||
92 | ppage += base >> PAGE_CACHE_SHIFT; | ||
93 | base &= ~PAGE_CACHE_MASK; | ||
94 | } | ||
95 | do { | ||
96 | char *kaddr; | ||
97 | |||
98 | /* ACL likes to be lazy in allocating pages - ACLs | ||
99 | * are small by default but can get huge. */ | ||
100 | if (unlikely(*ppage == NULL)) { | ||
101 | *ppage = alloc_page(GFP_ATOMIC); | ||
102 | if (unlikely(*ppage == NULL)) { | ||
103 | if (copied == 0) | ||
104 | copied = -ENOMEM; | ||
105 | goto out; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | len = PAGE_CACHE_SIZE; | ||
110 | kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); | ||
111 | if (base) { | ||
112 | len -= base; | ||
113 | if (pglen < len) | ||
114 | len = pglen; | ||
115 | ret = copy_actor(desc, kaddr + base, len); | ||
116 | base = 0; | ||
117 | } else { | ||
118 | if (pglen < len) | ||
119 | len = pglen; | ||
120 | ret = copy_actor(desc, kaddr, len); | ||
121 | } | ||
122 | flush_dcache_page(*ppage); | ||
123 | kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); | ||
124 | copied += ret; | ||
125 | if (ret != len || !desc->count) | ||
126 | goto out; | ||
127 | ppage++; | ||
128 | } while ((pglen -= len) != 0); | ||
129 | copy_tail: | ||
130 | len = xdr->tail[0].iov_len; | ||
131 | if (base < len) | ||
132 | copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); | ||
133 | out: | ||
134 | return copied; | ||
135 | } | ||
136 | |||
137 | /** | ||
138 | * csum_partial_copy_to_xdr - checksum and copy data | ||
139 | * @xdr: target XDR buffer | ||
140 | * @skb: source skb | ||
141 | * | ||
142 | * We have set things up such that we perform the checksum of the UDP | ||
143 | * packet in parallel with the copies into the RPC client iovec. -DaveM | ||
144 | */ | ||
145 | int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) | ||
146 | { | ||
147 | skb_reader_t desc; | ||
148 | |||
149 | desc.skb = skb; | ||
150 | desc.offset = sizeof(struct udphdr); | ||
151 | desc.count = skb->len - desc.offset; | ||
152 | |||
153 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) | ||
154 | goto no_checksum; | ||
155 | |||
156 | desc.csum = csum_partial(skb->data, desc.offset, skb->csum); | ||
157 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) | ||
158 | return -1; | ||
159 | if (desc.offset != skb->len) { | ||
160 | unsigned int csum2; | ||
161 | csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); | ||
162 | desc.csum = csum_block_add(desc.csum, csum2, desc.offset); | ||
163 | } | ||
164 | if (desc.count) | ||
165 | return -1; | ||
166 | if ((unsigned short)csum_fold(desc.csum)) | ||
167 | return -1; | ||
168 | return 0; | ||
169 | no_checksum: | ||
170 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0) | ||
171 | return -1; | ||
172 | if (desc.count) | ||
173 | return -1; | ||
174 | return 0; | ||
175 | } | ||
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index ed48ff022d35..2387e7b823ff 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/socket.h> | ||
14 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
15 | #include <linux/uio.h> | 14 | #include <linux/uio.h> |
16 | #include <linux/unistd.h> | 15 | #include <linux/unistd.h> |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 30ec3efc48a6..f16e7cdd6150 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -548,9 +548,6 @@ svc_write_space(struct sock *sk) | |||
548 | /* | 548 | /* |
549 | * Receive a datagram from a UDP socket. | 549 | * Receive a datagram from a UDP socket. |
550 | */ | 550 | */ |
551 | extern int | ||
552 | csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb); | ||
553 | |||
554 | static int | 551 | static int |
555 | svc_udp_recvfrom(struct svc_rqst *rqstp) | 552 | svc_udp_recvfrom(struct svc_rqst *rqstp) |
556 | { | 553 | { |
@@ -587,7 +584,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
587 | struct timeval tv; | 584 | struct timeval tv; |
588 | 585 | ||
589 | tv.tv_sec = xtime.tv_sec; | 586 | tv.tv_sec = xtime.tv_sec; |
590 | tv.tv_usec = xtime.tv_nsec * 1000; | 587 | tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC; |
591 | skb_set_timestamp(skb, &tv); | 588 | skb_set_timestamp(skb, &tv); |
592 | /* Don't enable netstamp, sunrpc doesn't | 589 | /* Don't enable netstamp, sunrpc doesn't |
593 | need that much accuracy */ | 590 | need that much accuracy */ |
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c index 1b9616a12e24..d0c9f460e411 100644 --- a/net/sunrpc/sysctl.c +++ b/net/sunrpc/sysctl.c | |||
@@ -119,8 +119,18 @@ done: | |||
119 | return 0; | 119 | return 0; |
120 | } | 120 | } |
121 | 121 | ||
122 | unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; | ||
123 | unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; | ||
124 | unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; | ||
125 | EXPORT_SYMBOL(xprt_min_resvport); | ||
126 | unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; | ||
127 | EXPORT_SYMBOL(xprt_max_resvport); | ||
128 | |||
129 | |||
122 | static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; | 130 | static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; |
123 | static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; | 131 | static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; |
132 | static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; | ||
133 | static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; | ||
124 | 134 | ||
125 | static ctl_table debug_table[] = { | 135 | static ctl_table debug_table[] = { |
126 | { | 136 | { |
@@ -177,6 +187,28 @@ static ctl_table debug_table[] = { | |||
177 | .extra1 = &min_slot_table_size, | 187 | .extra1 = &min_slot_table_size, |
178 | .extra2 = &max_slot_table_size | 188 | .extra2 = &max_slot_table_size |
179 | }, | 189 | }, |
190 | { | ||
191 | .ctl_name = CTL_MIN_RESVPORT, | ||
192 | .procname = "min_resvport", | ||
193 | .data = &xprt_min_resvport, | ||
194 | .maxlen = sizeof(unsigned int), | ||
195 | .mode = 0644, | ||
196 | .proc_handler = &proc_dointvec_minmax, | ||
197 | .strategy = &sysctl_intvec, | ||
198 | .extra1 = &xprt_min_resvport_limit, | ||
199 | .extra2 = &xprt_max_resvport_limit | ||
200 | }, | ||
201 | { | ||
202 | .ctl_name = CTL_MAX_RESVPORT, | ||
203 | .procname = "max_resvport", | ||
204 | .data = &xprt_max_resvport, | ||
205 | .maxlen = sizeof(unsigned int), | ||
206 | .mode = 0644, | ||
207 | .proc_handler = &proc_dointvec_minmax, | ||
208 | .strategy = &sysctl_intvec, | ||
209 | .extra1 = &xprt_min_resvport_limit, | ||
210 | .extra2 = &xprt_max_resvport_limit | ||
211 | }, | ||
180 | { .ctl_name = 0 } | 212 | { .ctl_name = 0 } |
181 | }; | 213 | }; |
182 | 214 | ||
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index fde16f40a581..32df43372ee9 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -6,15 +6,12 @@ | |||
6 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | 6 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/module.h> | ||
9 | #include <linux/types.h> | 10 | #include <linux/types.h> |
10 | #include <linux/socket.h> | ||
11 | #include <linux/string.h> | 11 | #include <linux/string.h> |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/pagemap.h> | 13 | #include <linux/pagemap.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/in.h> | ||
16 | #include <linux/net.h> | ||
17 | #include <net/sock.h> | ||
18 | #include <linux/sunrpc/xdr.h> | 15 | #include <linux/sunrpc/xdr.h> |
19 | #include <linux/sunrpc/msg_prot.h> | 16 | #include <linux/sunrpc/msg_prot.h> |
20 | 17 | ||
@@ -176,178 +173,6 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, | |||
176 | xdr->buflen += len; | 173 | xdr->buflen += len; |
177 | } | 174 | } |
178 | 175 | ||
179 | ssize_t | ||
180 | xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, | ||
181 | skb_reader_t *desc, | ||
182 | skb_read_actor_t copy_actor) | ||
183 | { | ||
184 | struct page **ppage = xdr->pages; | ||
185 | unsigned int len, pglen = xdr->page_len; | ||
186 | ssize_t copied = 0; | ||
187 | int ret; | ||
188 | |||
189 | len = xdr->head[0].iov_len; | ||
190 | if (base < len) { | ||
191 | len -= base; | ||
192 | ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); | ||
193 | copied += ret; | ||
194 | if (ret != len || !desc->count) | ||
195 | goto out; | ||
196 | base = 0; | ||
197 | } else | ||
198 | base -= len; | ||
199 | |||
200 | if (pglen == 0) | ||
201 | goto copy_tail; | ||
202 | if (base >= pglen) { | ||
203 | base -= pglen; | ||
204 | goto copy_tail; | ||
205 | } | ||
206 | if (base || xdr->page_base) { | ||
207 | pglen -= base; | ||
208 | base += xdr->page_base; | ||
209 | ppage += base >> PAGE_CACHE_SHIFT; | ||
210 | base &= ~PAGE_CACHE_MASK; | ||
211 | } | ||
212 | do { | ||
213 | char *kaddr; | ||
214 | |||
215 | /* ACL likes to be lazy in allocating pages - ACLs | ||
216 | * are small by default but can get huge. */ | ||
217 | if (unlikely(*ppage == NULL)) { | ||
218 | *ppage = alloc_page(GFP_ATOMIC); | ||
219 | if (unlikely(*ppage == NULL)) { | ||
220 | if (copied == 0) | ||
221 | copied = -ENOMEM; | ||
222 | goto out; | ||
223 | } | ||
224 | } | ||
225 | |||
226 | len = PAGE_CACHE_SIZE; | ||
227 | kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); | ||
228 | if (base) { | ||
229 | len -= base; | ||
230 | if (pglen < len) | ||
231 | len = pglen; | ||
232 | ret = copy_actor(desc, kaddr + base, len); | ||
233 | base = 0; | ||
234 | } else { | ||
235 | if (pglen < len) | ||
236 | len = pglen; | ||
237 | ret = copy_actor(desc, kaddr, len); | ||
238 | } | ||
239 | flush_dcache_page(*ppage); | ||
240 | kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); | ||
241 | copied += ret; | ||
242 | if (ret != len || !desc->count) | ||
243 | goto out; | ||
244 | ppage++; | ||
245 | } while ((pglen -= len) != 0); | ||
246 | copy_tail: | ||
247 | len = xdr->tail[0].iov_len; | ||
248 | if (base < len) | ||
249 | copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); | ||
250 | out: | ||
251 | return copied; | ||
252 | } | ||
253 | |||
254 | |||
255 | int | ||
256 | xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, | ||
257 | struct xdr_buf *xdr, unsigned int base, int msgflags) | ||
258 | { | ||
259 | struct page **ppage = xdr->pages; | ||
260 | unsigned int len, pglen = xdr->page_len; | ||
261 | int err, ret = 0; | ||
262 | ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); | ||
263 | |||
264 | len = xdr->head[0].iov_len; | ||
265 | if (base < len || (addr != NULL && base == 0)) { | ||
266 | struct kvec iov = { | ||
267 | .iov_base = xdr->head[0].iov_base + base, | ||
268 | .iov_len = len - base, | ||
269 | }; | ||
270 | struct msghdr msg = { | ||
271 | .msg_name = addr, | ||
272 | .msg_namelen = addrlen, | ||
273 | .msg_flags = msgflags, | ||
274 | }; | ||
275 | if (xdr->len > len) | ||
276 | msg.msg_flags |= MSG_MORE; | ||
277 | |||
278 | if (iov.iov_len != 0) | ||
279 | err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | ||
280 | else | ||
281 | err = kernel_sendmsg(sock, &msg, NULL, 0, 0); | ||
282 | if (ret == 0) | ||
283 | ret = err; | ||
284 | else if (err > 0) | ||
285 | ret += err; | ||
286 | if (err != iov.iov_len) | ||
287 | goto out; | ||
288 | base = 0; | ||
289 | } else | ||
290 | base -= len; | ||
291 | |||
292 | if (pglen == 0) | ||
293 | goto copy_tail; | ||
294 | if (base >= pglen) { | ||
295 | base -= pglen; | ||
296 | goto copy_tail; | ||
297 | } | ||
298 | if (base || xdr->page_base) { | ||
299 | pglen -= base; | ||
300 | base += xdr->page_base; | ||
301 | ppage += base >> PAGE_CACHE_SHIFT; | ||
302 | base &= ~PAGE_CACHE_MASK; | ||
303 | } | ||
304 | |||
305 | sendpage = sock->ops->sendpage ? : sock_no_sendpage; | ||
306 | do { | ||
307 | int flags = msgflags; | ||
308 | |||
309 | len = PAGE_CACHE_SIZE; | ||
310 | if (base) | ||
311 | len -= base; | ||
312 | if (pglen < len) | ||
313 | len = pglen; | ||
314 | |||
315 | if (pglen != len || xdr->tail[0].iov_len != 0) | ||
316 | flags |= MSG_MORE; | ||
317 | |||
318 | /* Hmm... We might be dealing with highmem pages */ | ||
319 | if (PageHighMem(*ppage)) | ||
320 | sendpage = sock_no_sendpage; | ||
321 | err = sendpage(sock, *ppage, base, len, flags); | ||
322 | if (ret == 0) | ||
323 | ret = err; | ||
324 | else if (err > 0) | ||
325 | ret += err; | ||
326 | if (err != len) | ||
327 | goto out; | ||
328 | base = 0; | ||
329 | ppage++; | ||
330 | } while ((pglen -= len) != 0); | ||
331 | copy_tail: | ||
332 | len = xdr->tail[0].iov_len; | ||
333 | if (base < len) { | ||
334 | struct kvec iov = { | ||
335 | .iov_base = xdr->tail[0].iov_base + base, | ||
336 | .iov_len = len - base, | ||
337 | }; | ||
338 | struct msghdr msg = { | ||
339 | .msg_flags = msgflags, | ||
340 | }; | ||
341 | err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | ||
342 | if (ret == 0) | ||
343 | ret = err; | ||
344 | else if (err > 0) | ||
345 | ret += err; | ||
346 | } | ||
347 | out: | ||
348 | return ret; | ||
349 | } | ||
350 | |||
351 | 176 | ||
352 | /* | 177 | /* |
353 | * Helper routines for doing 'memmove' like operations on a struct xdr_buf | 178 | * Helper routines for doing 'memmove' like operations on a struct xdr_buf |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 3c654e06b084..6dda3860351f 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -10,12 +10,12 @@ | |||
10 | * one is available. Otherwise, it sleeps on the backlog queue | 10 | * one is available. Otherwise, it sleeps on the backlog queue |
11 | * (xprt_reserve). | 11 | * (xprt_reserve). |
12 | * - Next, the caller puts together the RPC message, stuffs it into | 12 | * - Next, the caller puts together the RPC message, stuffs it into |
13 | * the request struct, and calls xprt_call(). | 13 | * the request struct, and calls xprt_transmit(). |
14 | * - xprt_call transmits the message and installs the caller on the | 14 | * - xprt_transmit sends the message and installs the caller on the |
15 | * socket's wait list. At the same time, it installs a timer that | 15 | * transport's wait list. At the same time, it installs a timer that |
16 | * is run after the packet's timeout has expired. | 16 | * is run after the packet's timeout has expired. |
17 | * - When a packet arrives, the data_ready handler walks the list of | 17 | * - When a packet arrives, the data_ready handler walks the list of |
18 | * pending requests for that socket. If a matching XID is found, the | 18 | * pending requests for that transport. If a matching XID is found, the |
19 | * caller is woken up, and the timer removed. | 19 | * caller is woken up, and the timer removed. |
20 | * - When no reply arrives within the timeout interval, the timer is | 20 | * - When no reply arrives within the timeout interval, the timer is |
21 | * fired by the kernel and runs xprt_timer(). It either adjusts the | 21 | * fired by the kernel and runs xprt_timer(). It either adjusts the |
@@ -33,36 +33,17 @@ | |||
33 | * | 33 | * |
34 | * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de> | 34 | * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de> |
35 | * | 35 | * |
36 | * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com> | 36 | * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com> |
37 | * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com> | ||
38 | * TCP NFS related read + write fixes | ||
39 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> | ||
40 | * | ||
41 | * Rewrite of larges part of the code in order to stabilize TCP stuff. | ||
42 | * Fix behaviour when socket buffer is full. | ||
43 | * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> | ||
44 | */ | 37 | */ |
45 | 38 | ||
39 | #include <linux/module.h> | ||
40 | |||
46 | #include <linux/types.h> | 41 | #include <linux/types.h> |
47 | #include <linux/slab.h> | 42 | #include <linux/interrupt.h> |
48 | #include <linux/capability.h> | ||
49 | #include <linux/sched.h> | ||
50 | #include <linux/errno.h> | ||
51 | #include <linux/socket.h> | ||
52 | #include <linux/in.h> | ||
53 | #include <linux/net.h> | ||
54 | #include <linux/mm.h> | ||
55 | #include <linux/udp.h> | ||
56 | #include <linux/tcp.h> | ||
57 | #include <linux/sunrpc/clnt.h> | ||
58 | #include <linux/file.h> | ||
59 | #include <linux/workqueue.h> | 43 | #include <linux/workqueue.h> |
60 | #include <linux/random.h> | 44 | #include <linux/random.h> |
61 | 45 | ||
62 | #include <net/sock.h> | 46 | #include <linux/sunrpc/clnt.h> |
63 | #include <net/checksum.h> | ||
64 | #include <net/udp.h> | ||
65 | #include <net/tcp.h> | ||
66 | 47 | ||
67 | /* | 48 | /* |
68 | * Local variables | 49 | * Local variables |
@@ -73,81 +54,90 @@ | |||
73 | # define RPCDBG_FACILITY RPCDBG_XPRT | 54 | # define RPCDBG_FACILITY RPCDBG_XPRT |
74 | #endif | 55 | #endif |
75 | 56 | ||
76 | #define XPRT_MAX_BACKOFF (8) | ||
77 | #define XPRT_IDLE_TIMEOUT (5*60*HZ) | ||
78 | #define XPRT_MAX_RESVPORT (800) | ||
79 | |||
80 | /* | 57 | /* |
81 | * Local functions | 58 | * Local functions |
82 | */ | 59 | */ |
83 | static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); | 60 | static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); |
84 | static inline void do_xprt_reserve(struct rpc_task *); | 61 | static inline void do_xprt_reserve(struct rpc_task *); |
85 | static void xprt_disconnect(struct rpc_xprt *); | ||
86 | static void xprt_connect_status(struct rpc_task *task); | 62 | static void xprt_connect_status(struct rpc_task *task); |
87 | static struct rpc_xprt * xprt_setup(int proto, struct sockaddr_in *ap, | ||
88 | struct rpc_timeout *to); | ||
89 | static struct socket *xprt_create_socket(struct rpc_xprt *, int, int); | ||
90 | static void xprt_bind_socket(struct rpc_xprt *, struct socket *); | ||
91 | static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); | 63 | static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); |
92 | 64 | ||
93 | static int xprt_clear_backlog(struct rpc_xprt *xprt); | ||
94 | |||
95 | #ifdef RPC_DEBUG_DATA | ||
96 | /* | 65 | /* |
97 | * Print the buffer contents (first 128 bytes only--just enough for | 66 | * The transport code maintains an estimate on the maximum number of out- |
98 | * diropres return). | 67 | * standing RPC requests, using a smoothed version of the congestion |
68 | * avoidance implemented in 44BSD. This is basically the Van Jacobson | ||
69 | * congestion algorithm: If a retransmit occurs, the congestion window is | ||
70 | * halved; otherwise, it is incremented by 1/cwnd when | ||
71 | * | ||
72 | * - a reply is received and | ||
73 | * - a full number of requests are outstanding and | ||
74 | * - the congestion window hasn't been updated recently. | ||
99 | */ | 75 | */ |
100 | static void | 76 | #define RPC_CWNDSHIFT (8U) |
101 | xprt_pktdump(char *msg, u32 *packet, unsigned int count) | 77 | #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) |
102 | { | 78 | #define RPC_INITCWND RPC_CWNDSCALE |
103 | u8 *buf = (u8 *) packet; | 79 | #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) |
104 | int j; | ||
105 | |||
106 | dprintk("RPC: %s\n", msg); | ||
107 | for (j = 0; j < count && j < 128; j += 4) { | ||
108 | if (!(j & 31)) { | ||
109 | if (j) | ||
110 | dprintk("\n"); | ||
111 | dprintk("0x%04x ", j); | ||
112 | } | ||
113 | dprintk("%02x%02x%02x%02x ", | ||
114 | buf[j], buf[j+1], buf[j+2], buf[j+3]); | ||
115 | } | ||
116 | dprintk("\n"); | ||
117 | } | ||
118 | #else | ||
119 | static inline void | ||
120 | xprt_pktdump(char *msg, u32 *packet, unsigned int count) | ||
121 | { | ||
122 | /* NOP */ | ||
123 | } | ||
124 | #endif | ||
125 | 80 | ||
126 | /* | 81 | #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) |
127 | * Look up RPC transport given an INET socket | 82 | |
83 | /** | ||
84 | * xprt_reserve_xprt - serialize write access to transports | ||
85 | * @task: task that is requesting access to the transport | ||
86 | * | ||
87 | * This prevents mixing the payload of separate requests, and prevents | ||
88 | * transport connects from colliding with writes. No congestion control | ||
89 | * is provided. | ||
128 | */ | 90 | */ |
129 | static inline struct rpc_xprt * | 91 | int xprt_reserve_xprt(struct rpc_task *task) |
130 | xprt_from_sock(struct sock *sk) | ||
131 | { | 92 | { |
132 | return (struct rpc_xprt *) sk->sk_user_data; | 93 | struct rpc_xprt *xprt = task->tk_xprt; |
94 | struct rpc_rqst *req = task->tk_rqstp; | ||
95 | |||
96 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { | ||
97 | if (task == xprt->snd_task) | ||
98 | return 1; | ||
99 | if (task == NULL) | ||
100 | return 0; | ||
101 | goto out_sleep; | ||
102 | } | ||
103 | xprt->snd_task = task; | ||
104 | if (req) { | ||
105 | req->rq_bytes_sent = 0; | ||
106 | req->rq_ntrans++; | ||
107 | } | ||
108 | return 1; | ||
109 | |||
110 | out_sleep: | ||
111 | dprintk("RPC: %4d failed to lock transport %p\n", | ||
112 | task->tk_pid, xprt); | ||
113 | task->tk_timeout = 0; | ||
114 | task->tk_status = -EAGAIN; | ||
115 | if (req && req->rq_ntrans) | ||
116 | rpc_sleep_on(&xprt->resend, task, NULL, NULL); | ||
117 | else | ||
118 | rpc_sleep_on(&xprt->sending, task, NULL, NULL); | ||
119 | return 0; | ||
133 | } | 120 | } |
134 | 121 | ||
135 | /* | 122 | /* |
136 | * Serialize write access to sockets, in order to prevent different | 123 | * xprt_reserve_xprt_cong - serialize write access to transports |
137 | * requests from interfering with each other. | 124 | * @task: task that is requesting access to the transport |
138 | * Also prevents TCP socket connects from colliding with writes. | 125 | * |
126 | * Same as xprt_reserve_xprt, but Van Jacobson congestion control is | ||
127 | * integrated into the decision of whether a request is allowed to be | ||
128 | * woken up and given access to the transport. | ||
139 | */ | 129 | */ |
140 | static int | 130 | int xprt_reserve_xprt_cong(struct rpc_task *task) |
141 | __xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) | ||
142 | { | 131 | { |
132 | struct rpc_xprt *xprt = task->tk_xprt; | ||
143 | struct rpc_rqst *req = task->tk_rqstp; | 133 | struct rpc_rqst *req = task->tk_rqstp; |
144 | 134 | ||
145 | if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) { | 135 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { |
146 | if (task == xprt->snd_task) | 136 | if (task == xprt->snd_task) |
147 | return 1; | 137 | return 1; |
148 | goto out_sleep; | 138 | goto out_sleep; |
149 | } | 139 | } |
150 | if (xprt->nocong || __xprt_get_cong(xprt, task)) { | 140 | if (__xprt_get_cong(xprt, task)) { |
151 | xprt->snd_task = task; | 141 | xprt->snd_task = task; |
152 | if (req) { | 142 | if (req) { |
153 | req->rq_bytes_sent = 0; | 143 | req->rq_bytes_sent = 0; |
@@ -156,10 +146,10 @@ __xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) | |||
156 | return 1; | 146 | return 1; |
157 | } | 147 | } |
158 | smp_mb__before_clear_bit(); | 148 | smp_mb__before_clear_bit(); |
159 | clear_bit(XPRT_LOCKED, &xprt->sockstate); | 149 | clear_bit(XPRT_LOCKED, &xprt->state); |
160 | smp_mb__after_clear_bit(); | 150 | smp_mb__after_clear_bit(); |
161 | out_sleep: | 151 | out_sleep: |
162 | dprintk("RPC: %4d failed to lock socket %p\n", task->tk_pid, xprt); | 152 | dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt); |
163 | task->tk_timeout = 0; | 153 | task->tk_timeout = 0; |
164 | task->tk_status = -EAGAIN; | 154 | task->tk_status = -EAGAIN; |
165 | if (req && req->rq_ntrans) | 155 | if (req && req->rq_ntrans) |
@@ -169,26 +159,52 @@ out_sleep: | |||
169 | return 0; | 159 | return 0; |
170 | } | 160 | } |
171 | 161 | ||
172 | static inline int | 162 | static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) |
173 | xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) | ||
174 | { | 163 | { |
175 | int retval; | 164 | int retval; |
176 | 165 | ||
177 | spin_lock_bh(&xprt->sock_lock); | 166 | spin_lock_bh(&xprt->transport_lock); |
178 | retval = __xprt_lock_write(xprt, task); | 167 | retval = xprt->ops->reserve_xprt(task); |
179 | spin_unlock_bh(&xprt->sock_lock); | 168 | spin_unlock_bh(&xprt->transport_lock); |
180 | return retval; | 169 | return retval; |
181 | } | 170 | } |
182 | 171 | ||
172 | static void __xprt_lock_write_next(struct rpc_xprt *xprt) | ||
173 | { | ||
174 | struct rpc_task *task; | ||
175 | struct rpc_rqst *req; | ||
183 | 176 | ||
184 | static void | 177 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
185 | __xprt_lock_write_next(struct rpc_xprt *xprt) | 178 | return; |
179 | |||
180 | task = rpc_wake_up_next(&xprt->resend); | ||
181 | if (!task) { | ||
182 | task = rpc_wake_up_next(&xprt->sending); | ||
183 | if (!task) | ||
184 | goto out_unlock; | ||
185 | } | ||
186 | |||
187 | req = task->tk_rqstp; | ||
188 | xprt->snd_task = task; | ||
189 | if (req) { | ||
190 | req->rq_bytes_sent = 0; | ||
191 | req->rq_ntrans++; | ||
192 | } | ||
193 | return; | ||
194 | |||
195 | out_unlock: | ||
196 | smp_mb__before_clear_bit(); | ||
197 | clear_bit(XPRT_LOCKED, &xprt->state); | ||
198 | smp_mb__after_clear_bit(); | ||
199 | } | ||
200 | |||
201 | static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) | ||
186 | { | 202 | { |
187 | struct rpc_task *task; | 203 | struct rpc_task *task; |
188 | 204 | ||
189 | if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) | 205 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
190 | return; | 206 | return; |
191 | if (!xprt->nocong && RPCXPRT_CONGESTED(xprt)) | 207 | if (RPCXPRT_CONGESTED(xprt)) |
192 | goto out_unlock; | 208 | goto out_unlock; |
193 | task = rpc_wake_up_next(&xprt->resend); | 209 | task = rpc_wake_up_next(&xprt->resend); |
194 | if (!task) { | 210 | if (!task) { |
@@ -196,7 +212,7 @@ __xprt_lock_write_next(struct rpc_xprt *xprt) | |||
196 | if (!task) | 212 | if (!task) |
197 | goto out_unlock; | 213 | goto out_unlock; |
198 | } | 214 | } |
199 | if (xprt->nocong || __xprt_get_cong(xprt, task)) { | 215 | if (__xprt_get_cong(xprt, task)) { |
200 | struct rpc_rqst *req = task->tk_rqstp; | 216 | struct rpc_rqst *req = task->tk_rqstp; |
201 | xprt->snd_task = task; | 217 | xprt->snd_task = task; |
202 | if (req) { | 218 | if (req) { |
@@ -207,87 +223,52 @@ __xprt_lock_write_next(struct rpc_xprt *xprt) | |||
207 | } | 223 | } |
208 | out_unlock: | 224 | out_unlock: |
209 | smp_mb__before_clear_bit(); | 225 | smp_mb__before_clear_bit(); |
210 | clear_bit(XPRT_LOCKED, &xprt->sockstate); | 226 | clear_bit(XPRT_LOCKED, &xprt->state); |
211 | smp_mb__after_clear_bit(); | 227 | smp_mb__after_clear_bit(); |
212 | } | 228 | } |
213 | 229 | ||
214 | /* | 230 | /** |
215 | * Releases the socket for use by other requests. | 231 | * xprt_release_xprt - allow other requests to use a transport |
232 | * @xprt: transport with other tasks potentially waiting | ||
233 | * @task: task that is releasing access to the transport | ||
234 | * | ||
235 | * Note that "task" can be NULL. No congestion control is provided. | ||
216 | */ | 236 | */ |
217 | static void | 237 | void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) |
218 | __xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) | ||
219 | { | 238 | { |
220 | if (xprt->snd_task == task) { | 239 | if (xprt->snd_task == task) { |
221 | xprt->snd_task = NULL; | 240 | xprt->snd_task = NULL; |
222 | smp_mb__before_clear_bit(); | 241 | smp_mb__before_clear_bit(); |
223 | clear_bit(XPRT_LOCKED, &xprt->sockstate); | 242 | clear_bit(XPRT_LOCKED, &xprt->state); |
224 | smp_mb__after_clear_bit(); | 243 | smp_mb__after_clear_bit(); |
225 | __xprt_lock_write_next(xprt); | 244 | __xprt_lock_write_next(xprt); |
226 | } | 245 | } |
227 | } | 246 | } |
228 | 247 | ||
229 | static inline void | 248 | /** |
230 | xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) | 249 | * xprt_release_xprt_cong - allow other requests to use a transport |
231 | { | 250 | * @xprt: transport with other tasks potentially waiting |
232 | spin_lock_bh(&xprt->sock_lock); | 251 | * @task: task that is releasing access to the transport |
233 | __xprt_release_write(xprt, task); | 252 | * |
234 | spin_unlock_bh(&xprt->sock_lock); | 253 | * Note that "task" can be NULL. Another task is awoken to use the |
235 | } | 254 | * transport if the transport's congestion window allows it. |
236 | |||
237 | /* | ||
238 | * Write data to socket. | ||
239 | */ | 255 | */ |
240 | static inline int | 256 | void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) |
241 | xprt_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req) | ||
242 | { | 257 | { |
243 | struct socket *sock = xprt->sock; | 258 | if (xprt->snd_task == task) { |
244 | struct xdr_buf *xdr = &req->rq_snd_buf; | 259 | xprt->snd_task = NULL; |
245 | struct sockaddr *addr = NULL; | 260 | smp_mb__before_clear_bit(); |
246 | int addrlen = 0; | 261 | clear_bit(XPRT_LOCKED, &xprt->state); |
247 | unsigned int skip; | 262 | smp_mb__after_clear_bit(); |
248 | int result; | 263 | __xprt_lock_write_next_cong(xprt); |
249 | |||
250 | if (!sock) | ||
251 | return -ENOTCONN; | ||
252 | |||
253 | xprt_pktdump("packet data:", | ||
254 | req->rq_svec->iov_base, | ||
255 | req->rq_svec->iov_len); | ||
256 | |||
257 | /* For UDP, we need to provide an address */ | ||
258 | if (!xprt->stream) { | ||
259 | addr = (struct sockaddr *) &xprt->addr; | ||
260 | addrlen = sizeof(xprt->addr); | ||
261 | } | 264 | } |
262 | /* Dont repeat bytes */ | 265 | } |
263 | skip = req->rq_bytes_sent; | ||
264 | |||
265 | clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); | ||
266 | result = xdr_sendpages(sock, addr, addrlen, xdr, skip, MSG_DONTWAIT); | ||
267 | |||
268 | dprintk("RPC: xprt_sendmsg(%d) = %d\n", xdr->len - skip, result); | ||
269 | |||
270 | if (result >= 0) | ||
271 | return result; | ||
272 | 266 | ||
273 | switch (result) { | 267 | static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) |
274 | case -ECONNREFUSED: | 268 | { |
275 | /* When the server has died, an ICMP port unreachable message | 269 | spin_lock_bh(&xprt->transport_lock); |
276 | * prompts ECONNREFUSED. | 270 | xprt->ops->release_xprt(xprt, task); |
277 | */ | 271 | spin_unlock_bh(&xprt->transport_lock); |
278 | case -EAGAIN: | ||
279 | break; | ||
280 | case -ECONNRESET: | ||
281 | case -ENOTCONN: | ||
282 | case -EPIPE: | ||
283 | /* connection broken */ | ||
284 | if (xprt->stream) | ||
285 | result = -ENOTCONN; | ||
286 | break; | ||
287 | default: | ||
288 | printk(KERN_NOTICE "RPC: sendmsg returned error %d\n", -result); | ||
289 | } | ||
290 | return result; | ||
291 | } | 272 | } |
292 | 273 | ||
293 | /* | 274 | /* |
@@ -321,26 +302,40 @@ __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) | |||
321 | return; | 302 | return; |
322 | req->rq_cong = 0; | 303 | req->rq_cong = 0; |
323 | xprt->cong -= RPC_CWNDSCALE; | 304 | xprt->cong -= RPC_CWNDSCALE; |
324 | __xprt_lock_write_next(xprt); | 305 | __xprt_lock_write_next_cong(xprt); |
325 | } | 306 | } |
326 | 307 | ||
327 | /* | 308 | /** |
328 | * Adjust RPC congestion window | 309 | * xprt_release_rqst_cong - housekeeping when request is complete |
310 | * @task: RPC request that recently completed | ||
311 | * | ||
312 | * Useful for transports that require congestion control. | ||
313 | */ | ||
314 | void xprt_release_rqst_cong(struct rpc_task *task) | ||
315 | { | ||
316 | __xprt_put_cong(task->tk_xprt, task->tk_rqstp); | ||
317 | } | ||
318 | |||
319 | /** | ||
320 | * xprt_adjust_cwnd - adjust transport congestion window | ||
321 | * @task: recently completed RPC request used to adjust window | ||
322 | * @result: result code of completed RPC request | ||
323 | * | ||
329 | * We use a time-smoothed congestion estimator to avoid heavy oscillation. | 324 | * We use a time-smoothed congestion estimator to avoid heavy oscillation. |
330 | */ | 325 | */ |
331 | static void | 326 | void xprt_adjust_cwnd(struct rpc_task *task, int result) |
332 | xprt_adjust_cwnd(struct rpc_xprt *xprt, int result) | ||
333 | { | 327 | { |
334 | unsigned long cwnd; | 328 | struct rpc_rqst *req = task->tk_rqstp; |
329 | struct rpc_xprt *xprt = task->tk_xprt; | ||
330 | unsigned long cwnd = xprt->cwnd; | ||
335 | 331 | ||
336 | cwnd = xprt->cwnd; | ||
337 | if (result >= 0 && cwnd <= xprt->cong) { | 332 | if (result >= 0 && cwnd <= xprt->cong) { |
338 | /* The (cwnd >> 1) term makes sure | 333 | /* The (cwnd >> 1) term makes sure |
339 | * the result gets rounded properly. */ | 334 | * the result gets rounded properly. */ |
340 | cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; | 335 | cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; |
341 | if (cwnd > RPC_MAXCWND(xprt)) | 336 | if (cwnd > RPC_MAXCWND(xprt)) |
342 | cwnd = RPC_MAXCWND(xprt); | 337 | cwnd = RPC_MAXCWND(xprt); |
343 | __xprt_lock_write_next(xprt); | 338 | __xprt_lock_write_next_cong(xprt); |
344 | } else if (result == -ETIMEDOUT) { | 339 | } else if (result == -ETIMEDOUT) { |
345 | cwnd >>= 1; | 340 | cwnd >>= 1; |
346 | if (cwnd < RPC_CWNDSCALE) | 341 | if (cwnd < RPC_CWNDSCALE) |
@@ -349,11 +344,89 @@ xprt_adjust_cwnd(struct rpc_xprt *xprt, int result) | |||
349 | dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", | 344 | dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", |
350 | xprt->cong, xprt->cwnd, cwnd); | 345 | xprt->cong, xprt->cwnd, cwnd); |
351 | xprt->cwnd = cwnd; | 346 | xprt->cwnd = cwnd; |
347 | __xprt_put_cong(xprt, req); | ||
348 | } | ||
349 | |||
350 | /** | ||
351 | * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue | ||
352 | * @xprt: transport with waiting tasks | ||
353 | * @status: result code to plant in each task before waking it | ||
354 | * | ||
355 | */ | ||
356 | void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) | ||
357 | { | ||
358 | if (status < 0) | ||
359 | rpc_wake_up_status(&xprt->pending, status); | ||
360 | else | ||
361 | rpc_wake_up(&xprt->pending); | ||
362 | } | ||
363 | |||
364 | /** | ||
365 | * xprt_wait_for_buffer_space - wait for transport output buffer to clear | ||
366 | * @task: task to be put to sleep | ||
367 | * | ||
368 | */ | ||
369 | void xprt_wait_for_buffer_space(struct rpc_task *task) | ||
370 | { | ||
371 | struct rpc_rqst *req = task->tk_rqstp; | ||
372 | struct rpc_xprt *xprt = req->rq_xprt; | ||
373 | |||
374 | task->tk_timeout = req->rq_timeout; | ||
375 | rpc_sleep_on(&xprt->pending, task, NULL, NULL); | ||
376 | } | ||
377 | |||
378 | /** | ||
379 | * xprt_write_space - wake the task waiting for transport output buffer space | ||
380 | * @xprt: transport with waiting tasks | ||
381 | * | ||
382 | * Can be called in a soft IRQ context, so xprt_write_space never sleeps. | ||
383 | */ | ||
384 | void xprt_write_space(struct rpc_xprt *xprt) | ||
385 | { | ||
386 | if (unlikely(xprt->shutdown)) | ||
387 | return; | ||
388 | |||
389 | spin_lock_bh(&xprt->transport_lock); | ||
390 | if (xprt->snd_task) { | ||
391 | dprintk("RPC: write space: waking waiting task on xprt %p\n", | ||
392 | xprt); | ||
393 | rpc_wake_up_task(xprt->snd_task); | ||
394 | } | ||
395 | spin_unlock_bh(&xprt->transport_lock); | ||
396 | } | ||
397 | |||
398 | /** | ||
399 | * xprt_set_retrans_timeout_def - set a request's retransmit timeout | ||
400 | * @task: task whose timeout is to be set | ||
401 | * | ||
402 | * Set a request's retransmit timeout based on the transport's | ||
403 | * default timeout parameters. Used by transports that don't adjust | ||
404 | * the retransmit timeout based on round-trip time estimation. | ||
405 | */ | ||
406 | void xprt_set_retrans_timeout_def(struct rpc_task *task) | ||
407 | { | ||
408 | task->tk_timeout = task->tk_rqstp->rq_timeout; | ||
352 | } | 409 | } |
353 | 410 | ||
354 | /* | 411 | /* |
355 | * Reset the major timeout value | 412 | * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout |
413 | * @task: task whose timeout is to be set | ||
414 | * | ||
415 | * Set a request's retransmit timeout using the RTT estimator. | ||
356 | */ | 416 | */ |
417 | void xprt_set_retrans_timeout_rtt(struct rpc_task *task) | ||
418 | { | ||
419 | int timer = task->tk_msg.rpc_proc->p_timer; | ||
420 | struct rpc_rtt *rtt = task->tk_client->cl_rtt; | ||
421 | struct rpc_rqst *req = task->tk_rqstp; | ||
422 | unsigned long max_timeout = req->rq_xprt->timeout.to_maxval; | ||
423 | |||
424 | task->tk_timeout = rpc_calc_rto(rtt, timer); | ||
425 | task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; | ||
426 | if (task->tk_timeout > max_timeout || task->tk_timeout == 0) | ||
427 | task->tk_timeout = max_timeout; | ||
428 | } | ||
429 | |||
357 | static void xprt_reset_majortimeo(struct rpc_rqst *req) | 430 | static void xprt_reset_majortimeo(struct rpc_rqst *req) |
358 | { | 431 | { |
359 | struct rpc_timeout *to = &req->rq_xprt->timeout; | 432 | struct rpc_timeout *to = &req->rq_xprt->timeout; |
@@ -368,8 +441,10 @@ static void xprt_reset_majortimeo(struct rpc_rqst *req) | |||
368 | req->rq_majortimeo += jiffies; | 441 | req->rq_majortimeo += jiffies; |
369 | } | 442 | } |
370 | 443 | ||
371 | /* | 444 | /** |
372 | * Adjust timeout values etc for next retransmit | 445 | * xprt_adjust_timeout - adjust timeout values for next retransmit |
446 | * @req: RPC request containing parameters to use for the adjustment | ||
447 | * | ||
373 | */ | 448 | */ |
374 | int xprt_adjust_timeout(struct rpc_rqst *req) | 449 | int xprt_adjust_timeout(struct rpc_rqst *req) |
375 | { | 450 | { |
@@ -391,9 +466,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req) | |||
391 | req->rq_retries = 0; | 466 | req->rq_retries = 0; |
392 | xprt_reset_majortimeo(req); | 467 | xprt_reset_majortimeo(req); |
393 | /* Reset the RTT counters == "slow start" */ | 468 | /* Reset the RTT counters == "slow start" */ |
394 | spin_lock_bh(&xprt->sock_lock); | 469 | spin_lock_bh(&xprt->transport_lock); |
395 | rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); | 470 | rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); |
396 | spin_unlock_bh(&xprt->sock_lock); | 471 | spin_unlock_bh(&xprt->transport_lock); |
397 | pprintk("RPC: %lu timeout\n", jiffies); | 472 | pprintk("RPC: %lu timeout\n", jiffies); |
398 | status = -ETIMEDOUT; | 473 | status = -ETIMEDOUT; |
399 | } | 474 | } |
@@ -405,133 +480,52 @@ int xprt_adjust_timeout(struct rpc_rqst *req) | |||
405 | return status; | 480 | return status; |
406 | } | 481 | } |
407 | 482 | ||
408 | /* | 483 | static void xprt_autoclose(void *args) |
409 | * Close down a transport socket | ||
410 | */ | ||
411 | static void | ||
412 | xprt_close(struct rpc_xprt *xprt) | ||
413 | { | ||
414 | struct socket *sock = xprt->sock; | ||
415 | struct sock *sk = xprt->inet; | ||
416 | |||
417 | if (!sk) | ||
418 | return; | ||
419 | |||
420 | write_lock_bh(&sk->sk_callback_lock); | ||
421 | xprt->inet = NULL; | ||
422 | xprt->sock = NULL; | ||
423 | |||
424 | sk->sk_user_data = NULL; | ||
425 | sk->sk_data_ready = xprt->old_data_ready; | ||
426 | sk->sk_state_change = xprt->old_state_change; | ||
427 | sk->sk_write_space = xprt->old_write_space; | ||
428 | write_unlock_bh(&sk->sk_callback_lock); | ||
429 | |||
430 | sk->sk_no_check = 0; | ||
431 | |||
432 | sock_release(sock); | ||
433 | } | ||
434 | |||
435 | static void | ||
436 | xprt_socket_autoclose(void *args) | ||
437 | { | 484 | { |
438 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; | 485 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; |
439 | 486 | ||
440 | xprt_disconnect(xprt); | 487 | xprt_disconnect(xprt); |
441 | xprt_close(xprt); | 488 | xprt->ops->close(xprt); |
442 | xprt_release_write(xprt, NULL); | 489 | xprt_release_write(xprt, NULL); |
443 | } | 490 | } |
444 | 491 | ||
445 | /* | 492 | /** |
446 | * Mark a transport as disconnected | 493 | * xprt_disconnect - mark a transport as disconnected |
494 | * @xprt: transport to flag for disconnect | ||
495 | * | ||
447 | */ | 496 | */ |
448 | static void | 497 | void xprt_disconnect(struct rpc_xprt *xprt) |
449 | xprt_disconnect(struct rpc_xprt *xprt) | ||
450 | { | 498 | { |
451 | dprintk("RPC: disconnected transport %p\n", xprt); | 499 | dprintk("RPC: disconnected transport %p\n", xprt); |
452 | spin_lock_bh(&xprt->sock_lock); | 500 | spin_lock_bh(&xprt->transport_lock); |
453 | xprt_clear_connected(xprt); | 501 | xprt_clear_connected(xprt); |
454 | rpc_wake_up_status(&xprt->pending, -ENOTCONN); | 502 | xprt_wake_pending_tasks(xprt, -ENOTCONN); |
455 | spin_unlock_bh(&xprt->sock_lock); | 503 | spin_unlock_bh(&xprt->transport_lock); |
456 | } | 504 | } |
457 | 505 | ||
458 | /* | ||
459 | * Used to allow disconnection when we've been idle | ||
460 | */ | ||
461 | static void | 506 | static void |
462 | xprt_init_autodisconnect(unsigned long data) | 507 | xprt_init_autodisconnect(unsigned long data) |
463 | { | 508 | { |
464 | struct rpc_xprt *xprt = (struct rpc_xprt *)data; | 509 | struct rpc_xprt *xprt = (struct rpc_xprt *)data; |
465 | 510 | ||
466 | spin_lock(&xprt->sock_lock); | 511 | spin_lock(&xprt->transport_lock); |
467 | if (!list_empty(&xprt->recv) || xprt->shutdown) | 512 | if (!list_empty(&xprt->recv) || xprt->shutdown) |
468 | goto out_abort; | 513 | goto out_abort; |
469 | if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) | 514 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
470 | goto out_abort; | 515 | goto out_abort; |
471 | spin_unlock(&xprt->sock_lock); | 516 | spin_unlock(&xprt->transport_lock); |
472 | /* Let keventd close the socket */ | 517 | if (xprt_connecting(xprt)) |
473 | if (test_bit(XPRT_CONNECTING, &xprt->sockstate) != 0) | ||
474 | xprt_release_write(xprt, NULL); | 518 | xprt_release_write(xprt, NULL); |
475 | else | 519 | else |
476 | schedule_work(&xprt->task_cleanup); | 520 | schedule_work(&xprt->task_cleanup); |
477 | return; | 521 | return; |
478 | out_abort: | 522 | out_abort: |
479 | spin_unlock(&xprt->sock_lock); | 523 | spin_unlock(&xprt->transport_lock); |
480 | } | ||
481 | |||
482 | static void xprt_socket_connect(void *args) | ||
483 | { | ||
484 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; | ||
485 | struct socket *sock = xprt->sock; | ||
486 | int status = -EIO; | ||
487 | |||
488 | if (xprt->shutdown || xprt->addr.sin_port == 0) | ||
489 | goto out; | ||
490 | |||
491 | /* | ||
492 | * Start by resetting any existing state | ||
493 | */ | ||
494 | xprt_close(xprt); | ||
495 | sock = xprt_create_socket(xprt, xprt->prot, xprt->resvport); | ||
496 | if (sock == NULL) { | ||
497 | /* couldn't create socket or bind to reserved port; | ||
498 | * this is likely a permanent error, so cause an abort */ | ||
499 | goto out; | ||
500 | } | ||
501 | xprt_bind_socket(xprt, sock); | ||
502 | xprt_sock_setbufsize(xprt); | ||
503 | |||
504 | status = 0; | ||
505 | if (!xprt->stream) | ||
506 | goto out; | ||
507 | |||
508 | /* | ||
509 | * Tell the socket layer to start connecting... | ||
510 | */ | ||
511 | status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, | ||
512 | sizeof(xprt->addr), O_NONBLOCK); | ||
513 | dprintk("RPC: %p connect status %d connected %d sock state %d\n", | ||
514 | xprt, -status, xprt_connected(xprt), sock->sk->sk_state); | ||
515 | if (status < 0) { | ||
516 | switch (status) { | ||
517 | case -EINPROGRESS: | ||
518 | case -EALREADY: | ||
519 | goto out_clear; | ||
520 | } | ||
521 | } | ||
522 | out: | ||
523 | if (status < 0) | ||
524 | rpc_wake_up_status(&xprt->pending, status); | ||
525 | else | ||
526 | rpc_wake_up(&xprt->pending); | ||
527 | out_clear: | ||
528 | smp_mb__before_clear_bit(); | ||
529 | clear_bit(XPRT_CONNECTING, &xprt->sockstate); | ||
530 | smp_mb__after_clear_bit(); | ||
531 | } | 524 | } |
532 | 525 | ||
533 | /* | 526 | /** |
534 | * Attempt to connect a TCP socket. | 527 | * xprt_connect - schedule a transport connect operation |
528 | * @task: RPC task that is requesting the connect | ||
535 | * | 529 | * |
536 | */ | 530 | */ |
537 | void xprt_connect(struct rpc_task *task) | 531 | void xprt_connect(struct rpc_task *task) |
@@ -552,37 +546,19 @@ void xprt_connect(struct rpc_task *task) | |||
552 | if (!xprt_lock_write(xprt, task)) | 546 | if (!xprt_lock_write(xprt, task)) |
553 | return; | 547 | return; |
554 | if (xprt_connected(xprt)) | 548 | if (xprt_connected(xprt)) |
555 | goto out_write; | 549 | xprt_release_write(xprt, task); |
550 | else { | ||
551 | if (task->tk_rqstp) | ||
552 | task->tk_rqstp->rq_bytes_sent = 0; | ||
556 | 553 | ||
557 | if (task->tk_rqstp) | 554 | task->tk_timeout = xprt->connect_timeout; |
558 | task->tk_rqstp->rq_bytes_sent = 0; | 555 | rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL); |
559 | 556 | xprt->ops->connect(task); | |
560 | task->tk_timeout = RPC_CONNECT_TIMEOUT; | ||
561 | rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL); | ||
562 | if (!test_and_set_bit(XPRT_CONNECTING, &xprt->sockstate)) { | ||
563 | /* Note: if we are here due to a dropped connection | ||
564 | * we delay reconnecting by RPC_REESTABLISH_TIMEOUT/HZ | ||
565 | * seconds | ||
566 | */ | ||
567 | if (xprt->sock != NULL) | ||
568 | schedule_delayed_work(&xprt->sock_connect, | ||
569 | RPC_REESTABLISH_TIMEOUT); | ||
570 | else { | ||
571 | schedule_work(&xprt->sock_connect); | ||
572 | if (!RPC_IS_ASYNC(task)) | ||
573 | flush_scheduled_work(); | ||
574 | } | ||
575 | } | 557 | } |
576 | return; | 558 | return; |
577 | out_write: | ||
578 | xprt_release_write(xprt, task); | ||
579 | } | 559 | } |
580 | 560 | ||
581 | /* | 561 | static void xprt_connect_status(struct rpc_task *task) |
582 | * We arrive here when awoken from waiting on connection establishment. | ||
583 | */ | ||
584 | static void | ||
585 | xprt_connect_status(struct rpc_task *task) | ||
586 | { | 562 | { |
587 | struct rpc_xprt *xprt = task->tk_xprt; | 563 | struct rpc_xprt *xprt = task->tk_xprt; |
588 | 564 | ||
@@ -592,31 +568,42 @@ xprt_connect_status(struct rpc_task *task) | |||
592 | return; | 568 | return; |
593 | } | 569 | } |
594 | 570 | ||
595 | /* if soft mounted, just cause this RPC to fail */ | ||
596 | if (RPC_IS_SOFT(task)) | ||
597 | task->tk_status = -EIO; | ||
598 | |||
599 | switch (task->tk_status) { | 571 | switch (task->tk_status) { |
600 | case -ECONNREFUSED: | 572 | case -ECONNREFUSED: |
601 | case -ECONNRESET: | 573 | case -ECONNRESET: |
574 | dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n", | ||
575 | task->tk_pid, task->tk_client->cl_server); | ||
576 | break; | ||
602 | case -ENOTCONN: | 577 | case -ENOTCONN: |
603 | return; | 578 | dprintk("RPC: %4d xprt_connect_status: connection broken\n", |
579 | task->tk_pid); | ||
580 | break; | ||
604 | case -ETIMEDOUT: | 581 | case -ETIMEDOUT: |
605 | dprintk("RPC: %4d xprt_connect_status: timed out\n", | 582 | dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n", |
606 | task->tk_pid); | 583 | task->tk_pid); |
607 | break; | 584 | break; |
608 | default: | 585 | default: |
609 | printk(KERN_ERR "RPC: error %d connecting to server %s\n", | 586 | dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n", |
610 | -task->tk_status, task->tk_client->cl_server); | 587 | task->tk_pid, -task->tk_status, task->tk_client->cl_server); |
588 | xprt_release_write(xprt, task); | ||
589 | task->tk_status = -EIO; | ||
590 | return; | ||
591 | } | ||
592 | |||
593 | /* if soft mounted, just cause this RPC to fail */ | ||
594 | if (RPC_IS_SOFT(task)) { | ||
595 | xprt_release_write(xprt, task); | ||
596 | task->tk_status = -EIO; | ||
611 | } | 597 | } |
612 | xprt_release_write(xprt, task); | ||
613 | } | 598 | } |
614 | 599 | ||
615 | /* | 600 | /** |
616 | * Look up the RPC request corresponding to a reply, and then lock it. | 601 | * xprt_lookup_rqst - find an RPC request corresponding to an XID |
602 | * @xprt: transport on which the original request was transmitted | ||
603 | * @xid: RPC XID of incoming reply | ||
604 | * | ||
617 | */ | 605 | */ |
618 | static inline struct rpc_rqst * | 606 | struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) |
619 | xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) | ||
620 | { | 607 | { |
621 | struct list_head *pos; | 608 | struct list_head *pos; |
622 | struct rpc_rqst *req = NULL; | 609 | struct rpc_rqst *req = NULL; |
@@ -631,556 +618,68 @@ xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) | |||
631 | return req; | 618 | return req; |
632 | } | 619 | } |
633 | 620 | ||
634 | /* | 621 | /** |
635 | * Complete reply received. | 622 | * xprt_update_rtt - update an RPC client's RTT state after receiving a reply |
636 | * The TCP code relies on us to remove the request from xprt->pending. | 623 | * @task: RPC request that recently completed |
637 | */ | 624 | * |
638 | static void | ||
639 | xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied) | ||
640 | { | ||
641 | struct rpc_task *task = req->rq_task; | ||
642 | struct rpc_clnt *clnt = task->tk_client; | ||
643 | |||
644 | /* Adjust congestion window */ | ||
645 | if (!xprt->nocong) { | ||
646 | unsigned timer = task->tk_msg.rpc_proc->p_timer; | ||
647 | xprt_adjust_cwnd(xprt, copied); | ||
648 | __xprt_put_cong(xprt, req); | ||
649 | if (timer) { | ||
650 | if (req->rq_ntrans == 1) | ||
651 | rpc_update_rtt(clnt->cl_rtt, timer, | ||
652 | (long)jiffies - req->rq_xtime); | ||
653 | rpc_set_timeo(clnt->cl_rtt, timer, req->rq_ntrans - 1); | ||
654 | } | ||
655 | } | ||
656 | |||
657 | #ifdef RPC_PROFILE | ||
658 | /* Profile only reads for now */ | ||
659 | if (copied > 1024) { | ||
660 | static unsigned long nextstat; | ||
661 | static unsigned long pkt_rtt, pkt_len, pkt_cnt; | ||
662 | |||
663 | pkt_cnt++; | ||
664 | pkt_len += req->rq_slen + copied; | ||
665 | pkt_rtt += jiffies - req->rq_xtime; | ||
666 | if (time_before(nextstat, jiffies)) { | ||
667 | printk("RPC: %lu %ld cwnd\n", jiffies, xprt->cwnd); | ||
668 | printk("RPC: %ld %ld %ld %ld stat\n", | ||
669 | jiffies, pkt_cnt, pkt_len, pkt_rtt); | ||
670 | pkt_rtt = pkt_len = pkt_cnt = 0; | ||
671 | nextstat = jiffies + 5 * HZ; | ||
672 | } | ||
673 | } | ||
674 | #endif | ||
675 | |||
676 | dprintk("RPC: %4d has input (%d bytes)\n", task->tk_pid, copied); | ||
677 | list_del_init(&req->rq_list); | ||
678 | req->rq_received = req->rq_private_buf.len = copied; | ||
679 | |||
680 | /* ... and wake up the process. */ | ||
681 | rpc_wake_up_task(task); | ||
682 | return; | ||
683 | } | ||
684 | |||
685 | static size_t | ||
686 | skb_read_bits(skb_reader_t *desc, void *to, size_t len) | ||
687 | { | ||
688 | if (len > desc->count) | ||
689 | len = desc->count; | ||
690 | if (skb_copy_bits(desc->skb, desc->offset, to, len)) | ||
691 | return 0; | ||
692 | desc->count -= len; | ||
693 | desc->offset += len; | ||
694 | return len; | ||
695 | } | ||
696 | |||
697 | static size_t | ||
698 | skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len) | ||
699 | { | ||
700 | unsigned int csum2, pos; | ||
701 | |||
702 | if (len > desc->count) | ||
703 | len = desc->count; | ||
704 | pos = desc->offset; | ||
705 | csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); | ||
706 | desc->csum = csum_block_add(desc->csum, csum2, pos); | ||
707 | desc->count -= len; | ||
708 | desc->offset += len; | ||
709 | return len; | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * We have set things up such that we perform the checksum of the UDP | ||
714 | * packet in parallel with the copies into the RPC client iovec. -DaveM | ||
715 | */ | ||
716 | int | ||
717 | csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) | ||
718 | { | ||
719 | skb_reader_t desc; | ||
720 | |||
721 | desc.skb = skb; | ||
722 | desc.offset = sizeof(struct udphdr); | ||
723 | desc.count = skb->len - desc.offset; | ||
724 | |||
725 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) | ||
726 | goto no_checksum; | ||
727 | |||
728 | desc.csum = csum_partial(skb->data, desc.offset, skb->csum); | ||
729 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) | ||
730 | return -1; | ||
731 | if (desc.offset != skb->len) { | ||
732 | unsigned int csum2; | ||
733 | csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); | ||
734 | desc.csum = csum_block_add(desc.csum, csum2, desc.offset); | ||
735 | } | ||
736 | if (desc.count) | ||
737 | return -1; | ||
738 | if ((unsigned short)csum_fold(desc.csum)) | ||
739 | return -1; | ||
740 | return 0; | ||
741 | no_checksum: | ||
742 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0) | ||
743 | return -1; | ||
744 | if (desc.count) | ||
745 | return -1; | ||
746 | return 0; | ||
747 | } | ||
748 | |||
749 | /* | ||
750 | * Input handler for RPC replies. Called from a bottom half and hence | ||
751 | * atomic. | ||
752 | */ | ||
753 | static void | ||
754 | udp_data_ready(struct sock *sk, int len) | ||
755 | { | ||
756 | struct rpc_task *task; | ||
757 | struct rpc_xprt *xprt; | ||
758 | struct rpc_rqst *rovr; | ||
759 | struct sk_buff *skb; | ||
760 | int err, repsize, copied; | ||
761 | u32 _xid, *xp; | ||
762 | |||
763 | read_lock(&sk->sk_callback_lock); | ||
764 | dprintk("RPC: udp_data_ready...\n"); | ||
765 | if (!(xprt = xprt_from_sock(sk))) { | ||
766 | printk("RPC: udp_data_ready request not found!\n"); | ||
767 | goto out; | ||
768 | } | ||
769 | |||
770 | dprintk("RPC: udp_data_ready client %p\n", xprt); | ||
771 | |||
772 | if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) | ||
773 | goto out; | ||
774 | |||
775 | if (xprt->shutdown) | ||
776 | goto dropit; | ||
777 | |||
778 | repsize = skb->len - sizeof(struct udphdr); | ||
779 | if (repsize < 4) { | ||
780 | printk("RPC: impossible RPC reply size %d!\n", repsize); | ||
781 | goto dropit; | ||
782 | } | ||
783 | |||
784 | /* Copy the XID from the skb... */ | ||
785 | xp = skb_header_pointer(skb, sizeof(struct udphdr), | ||
786 | sizeof(_xid), &_xid); | ||
787 | if (xp == NULL) | ||
788 | goto dropit; | ||
789 | |||
790 | /* Look up and lock the request corresponding to the given XID */ | ||
791 | spin_lock(&xprt->sock_lock); | ||
792 | rovr = xprt_lookup_rqst(xprt, *xp); | ||
793 | if (!rovr) | ||
794 | goto out_unlock; | ||
795 | task = rovr->rq_task; | ||
796 | |||
797 | dprintk("RPC: %4d received reply\n", task->tk_pid); | ||
798 | |||
799 | if ((copied = rovr->rq_private_buf.buflen) > repsize) | ||
800 | copied = repsize; | ||
801 | |||
802 | /* Suck it into the iovec, verify checksum if not done by hw. */ | ||
803 | if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) | ||
804 | goto out_unlock; | ||
805 | |||
806 | /* Something worked... */ | ||
807 | dst_confirm(skb->dst); | ||
808 | |||
809 | xprt_complete_rqst(xprt, rovr, copied); | ||
810 | |||
811 | out_unlock: | ||
812 | spin_unlock(&xprt->sock_lock); | ||
813 | dropit: | ||
814 | skb_free_datagram(sk, skb); | ||
815 | out: | ||
816 | read_unlock(&sk->sk_callback_lock); | ||
817 | } | ||
818 | |||
819 | /* | ||
820 | * Copy from an skb into memory and shrink the skb. | ||
821 | */ | ||
822 | static inline size_t | ||
823 | tcp_copy_data(skb_reader_t *desc, void *p, size_t len) | ||
824 | { | ||
825 | if (len > desc->count) | ||
826 | len = desc->count; | ||
827 | if (skb_copy_bits(desc->skb, desc->offset, p, len)) { | ||
828 | dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", | ||
829 | len, desc->count); | ||
830 | return 0; | ||
831 | } | ||
832 | desc->offset += len; | ||
833 | desc->count -= len; | ||
834 | dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", | ||
835 | len, desc->count); | ||
836 | return len; | ||
837 | } | ||
838 | |||
839 | /* | ||
840 | * TCP read fragment marker | ||
841 | */ | ||
842 | static inline void | ||
843 | tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) | ||
844 | { | ||
845 | size_t len, used; | ||
846 | char *p; | ||
847 | |||
848 | p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; | ||
849 | len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; | ||
850 | used = tcp_copy_data(desc, p, len); | ||
851 | xprt->tcp_offset += used; | ||
852 | if (used != len) | ||
853 | return; | ||
854 | xprt->tcp_reclen = ntohl(xprt->tcp_recm); | ||
855 | if (xprt->tcp_reclen & 0x80000000) | ||
856 | xprt->tcp_flags |= XPRT_LAST_FRAG; | ||
857 | else | ||
858 | xprt->tcp_flags &= ~XPRT_LAST_FRAG; | ||
859 | xprt->tcp_reclen &= 0x7fffffff; | ||
860 | xprt->tcp_flags &= ~XPRT_COPY_RECM; | ||
861 | xprt->tcp_offset = 0; | ||
862 | /* Sanity check of the record length */ | ||
863 | if (xprt->tcp_reclen < 4) { | ||
864 | printk(KERN_ERR "RPC: Invalid TCP record fragment length\n"); | ||
865 | xprt_disconnect(xprt); | ||
866 | } | ||
867 | dprintk("RPC: reading TCP record fragment of length %d\n", | ||
868 | xprt->tcp_reclen); | ||
869 | } | ||
870 | |||
871 | static void | ||
872 | tcp_check_recm(struct rpc_xprt *xprt) | ||
873 | { | ||
874 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", | ||
875 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); | ||
876 | if (xprt->tcp_offset == xprt->tcp_reclen) { | ||
877 | xprt->tcp_flags |= XPRT_COPY_RECM; | ||
878 | xprt->tcp_offset = 0; | ||
879 | if (xprt->tcp_flags & XPRT_LAST_FRAG) { | ||
880 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | ||
881 | xprt->tcp_flags |= XPRT_COPY_XID; | ||
882 | xprt->tcp_copied = 0; | ||
883 | } | ||
884 | } | ||
885 | } | ||
886 | |||
887 | /* | ||
888 | * TCP read xid | ||
889 | */ | ||
890 | static inline void | ||
891 | tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) | ||
892 | { | ||
893 | size_t len, used; | ||
894 | char *p; | ||
895 | |||
896 | len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; | ||
897 | dprintk("RPC: reading XID (%Zu bytes)\n", len); | ||
898 | p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; | ||
899 | used = tcp_copy_data(desc, p, len); | ||
900 | xprt->tcp_offset += used; | ||
901 | if (used != len) | ||
902 | return; | ||
903 | xprt->tcp_flags &= ~XPRT_COPY_XID; | ||
904 | xprt->tcp_flags |= XPRT_COPY_DATA; | ||
905 | xprt->tcp_copied = 4; | ||
906 | dprintk("RPC: reading reply for XID %08x\n", | ||
907 | ntohl(xprt->tcp_xid)); | ||
908 | tcp_check_recm(xprt); | ||
909 | } | ||
910 | |||
911 | /* | ||
912 | * TCP read and complete request | ||
913 | */ | ||
914 | static inline void | ||
915 | tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) | ||
916 | { | ||
917 | struct rpc_rqst *req; | ||
918 | struct xdr_buf *rcvbuf; | ||
919 | size_t len; | ||
920 | ssize_t r; | ||
921 | |||
922 | /* Find and lock the request corresponding to this xid */ | ||
923 | spin_lock(&xprt->sock_lock); | ||
924 | req = xprt_lookup_rqst(xprt, xprt->tcp_xid); | ||
925 | if (!req) { | ||
926 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | ||
927 | dprintk("RPC: XID %08x request not found!\n", | ||
928 | ntohl(xprt->tcp_xid)); | ||
929 | spin_unlock(&xprt->sock_lock); | ||
930 | return; | ||
931 | } | ||
932 | |||
933 | rcvbuf = &req->rq_private_buf; | ||
934 | len = desc->count; | ||
935 | if (len > xprt->tcp_reclen - xprt->tcp_offset) { | ||
936 | skb_reader_t my_desc; | ||
937 | |||
938 | len = xprt->tcp_reclen - xprt->tcp_offset; | ||
939 | memcpy(&my_desc, desc, sizeof(my_desc)); | ||
940 | my_desc.count = len; | ||
941 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | ||
942 | &my_desc, tcp_copy_data); | ||
943 | desc->count -= r; | ||
944 | desc->offset += r; | ||
945 | } else | ||
946 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | ||
947 | desc, tcp_copy_data); | ||
948 | |||
949 | if (r > 0) { | ||
950 | xprt->tcp_copied += r; | ||
951 | xprt->tcp_offset += r; | ||
952 | } | ||
953 | if (r != len) { | ||
954 | /* Error when copying to the receive buffer, | ||
955 | * usually because we weren't able to allocate | ||
956 | * additional buffer pages. All we can do now | ||
957 | * is turn off XPRT_COPY_DATA, so the request | ||
958 | * will not receive any additional updates, | ||
959 | * and time out. | ||
960 | * Any remaining data from this record will | ||
961 | * be discarded. | ||
962 | */ | ||
963 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | ||
964 | dprintk("RPC: XID %08x truncated request\n", | ||
965 | ntohl(xprt->tcp_xid)); | ||
966 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | ||
967 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | ||
968 | goto out; | ||
969 | } | ||
970 | |||
971 | dprintk("RPC: XID %08x read %Zd bytes\n", | ||
972 | ntohl(xprt->tcp_xid), r); | ||
973 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | ||
974 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | ||
975 | |||
976 | if (xprt->tcp_copied == req->rq_private_buf.buflen) | ||
977 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | ||
978 | else if (xprt->tcp_offset == xprt->tcp_reclen) { | ||
979 | if (xprt->tcp_flags & XPRT_LAST_FRAG) | ||
980 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | ||
981 | } | ||
982 | |||
983 | out: | ||
984 | if (!(xprt->tcp_flags & XPRT_COPY_DATA)) { | ||
985 | dprintk("RPC: %4d received reply complete\n", | ||
986 | req->rq_task->tk_pid); | ||
987 | xprt_complete_rqst(xprt, req, xprt->tcp_copied); | ||
988 | } | ||
989 | spin_unlock(&xprt->sock_lock); | ||
990 | tcp_check_recm(xprt); | ||
991 | } | ||
992 | |||
993 | /* | ||
994 | * TCP discard extra bytes from a short read | ||
995 | */ | ||
996 | static inline void | ||
997 | tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) | ||
998 | { | ||
999 | size_t len; | ||
1000 | |||
1001 | len = xprt->tcp_reclen - xprt->tcp_offset; | ||
1002 | if (len > desc->count) | ||
1003 | len = desc->count; | ||
1004 | desc->count -= len; | ||
1005 | desc->offset += len; | ||
1006 | xprt->tcp_offset += len; | ||
1007 | dprintk("RPC: discarded %Zu bytes\n", len); | ||
1008 | tcp_check_recm(xprt); | ||
1009 | } | ||
1010 | |||
1011 | /* | ||
1012 | * TCP record receive routine | ||
1013 | * We first have to grab the record marker, then the XID, then the data. | ||
1014 | */ | 625 | */ |
1015 | static int | 626 | void xprt_update_rtt(struct rpc_task *task) |
1016 | tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, | ||
1017 | unsigned int offset, size_t len) | ||
1018 | { | ||
1019 | struct rpc_xprt *xprt = rd_desc->arg.data; | ||
1020 | skb_reader_t desc = { | ||
1021 | .skb = skb, | ||
1022 | .offset = offset, | ||
1023 | .count = len, | ||
1024 | .csum = 0 | ||
1025 | }; | ||
1026 | |||
1027 | dprintk("RPC: tcp_data_recv\n"); | ||
1028 | do { | ||
1029 | /* Read in a new fragment marker if necessary */ | ||
1030 | /* Can we ever really expect to get completely empty fragments? */ | ||
1031 | if (xprt->tcp_flags & XPRT_COPY_RECM) { | ||
1032 | tcp_read_fraghdr(xprt, &desc); | ||
1033 | continue; | ||
1034 | } | ||
1035 | /* Read in the xid if necessary */ | ||
1036 | if (xprt->tcp_flags & XPRT_COPY_XID) { | ||
1037 | tcp_read_xid(xprt, &desc); | ||
1038 | continue; | ||
1039 | } | ||
1040 | /* Read in the request data */ | ||
1041 | if (xprt->tcp_flags & XPRT_COPY_DATA) { | ||
1042 | tcp_read_request(xprt, &desc); | ||
1043 | continue; | ||
1044 | } | ||
1045 | /* Skip over any trailing bytes on short reads */ | ||
1046 | tcp_read_discard(xprt, &desc); | ||
1047 | } while (desc.count); | ||
1048 | dprintk("RPC: tcp_data_recv done\n"); | ||
1049 | return len - desc.count; | ||
1050 | } | ||
1051 | |||
1052 | static void tcp_data_ready(struct sock *sk, int bytes) | ||
1053 | { | 627 | { |
1054 | struct rpc_xprt *xprt; | 628 | struct rpc_rqst *req = task->tk_rqstp; |
1055 | read_descriptor_t rd_desc; | 629 | struct rpc_rtt *rtt = task->tk_client->cl_rtt; |
1056 | 630 | unsigned timer = task->tk_msg.rpc_proc->p_timer; | |
1057 | read_lock(&sk->sk_callback_lock); | ||
1058 | dprintk("RPC: tcp_data_ready...\n"); | ||
1059 | if (!(xprt = xprt_from_sock(sk))) { | ||
1060 | printk("RPC: tcp_data_ready socket info not found!\n"); | ||
1061 | goto out; | ||
1062 | } | ||
1063 | if (xprt->shutdown) | ||
1064 | goto out; | ||
1065 | |||
1066 | /* We use rd_desc to pass struct xprt to tcp_data_recv */ | ||
1067 | rd_desc.arg.data = xprt; | ||
1068 | rd_desc.count = 65536; | ||
1069 | tcp_read_sock(sk, &rd_desc, tcp_data_recv); | ||
1070 | out: | ||
1071 | read_unlock(&sk->sk_callback_lock); | ||
1072 | } | ||
1073 | |||
1074 | static void | ||
1075 | tcp_state_change(struct sock *sk) | ||
1076 | { | ||
1077 | struct rpc_xprt *xprt; | ||
1078 | 631 | ||
1079 | read_lock(&sk->sk_callback_lock); | 632 | if (timer) { |
1080 | if (!(xprt = xprt_from_sock(sk))) | 633 | if (req->rq_ntrans == 1) |
1081 | goto out; | 634 | rpc_update_rtt(rtt, timer, |
1082 | dprintk("RPC: tcp_state_change client %p...\n", xprt); | 635 | (long)jiffies - req->rq_xtime); |
1083 | dprintk("RPC: state %x conn %d dead %d zapped %d\n", | 636 | rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); |
1084 | sk->sk_state, xprt_connected(xprt), | ||
1085 | sock_flag(sk, SOCK_DEAD), | ||
1086 | sock_flag(sk, SOCK_ZAPPED)); | ||
1087 | |||
1088 | switch (sk->sk_state) { | ||
1089 | case TCP_ESTABLISHED: | ||
1090 | spin_lock_bh(&xprt->sock_lock); | ||
1091 | if (!xprt_test_and_set_connected(xprt)) { | ||
1092 | /* Reset TCP record info */ | ||
1093 | xprt->tcp_offset = 0; | ||
1094 | xprt->tcp_reclen = 0; | ||
1095 | xprt->tcp_copied = 0; | ||
1096 | xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; | ||
1097 | rpc_wake_up(&xprt->pending); | ||
1098 | } | ||
1099 | spin_unlock_bh(&xprt->sock_lock); | ||
1100 | break; | ||
1101 | case TCP_SYN_SENT: | ||
1102 | case TCP_SYN_RECV: | ||
1103 | break; | ||
1104 | default: | ||
1105 | xprt_disconnect(xprt); | ||
1106 | break; | ||
1107 | } | 637 | } |
1108 | out: | ||
1109 | read_unlock(&sk->sk_callback_lock); | ||
1110 | } | 638 | } |
1111 | 639 | ||
1112 | /* | 640 | /** |
1113 | * Called when more output buffer space is available for this socket. | 641 | * xprt_complete_rqst - called when reply processing is complete |
1114 | * We try not to wake our writers until they can make "significant" | 642 | * @task: RPC request that recently completed |
1115 | * progress, otherwise we'll waste resources thrashing sock_sendmsg | 643 | * @copied: actual number of bytes received from the transport |
1116 | * with a bunch of small requests. | 644 | * |
645 | * Caller holds transport lock. | ||
1117 | */ | 646 | */ |
1118 | static void | 647 | void xprt_complete_rqst(struct rpc_task *task, int copied) |
1119 | xprt_write_space(struct sock *sk) | ||
1120 | { | 648 | { |
1121 | struct rpc_xprt *xprt; | 649 | struct rpc_rqst *req = task->tk_rqstp; |
1122 | struct socket *sock; | ||
1123 | |||
1124 | read_lock(&sk->sk_callback_lock); | ||
1125 | if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->sk_socket)) | ||
1126 | goto out; | ||
1127 | if (xprt->shutdown) | ||
1128 | goto out; | ||
1129 | |||
1130 | /* Wait until we have enough socket memory */ | ||
1131 | if (xprt->stream) { | ||
1132 | /* from net/core/stream.c:sk_stream_write_space */ | ||
1133 | if (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)) | ||
1134 | goto out; | ||
1135 | } else { | ||
1136 | /* from net/core/sock.c:sock_def_write_space */ | ||
1137 | if (!sock_writeable(sk)) | ||
1138 | goto out; | ||
1139 | } | ||
1140 | 650 | ||
1141 | if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)) | 651 | dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", |
1142 | goto out; | 652 | task->tk_pid, ntohl(req->rq_xid), copied); |
1143 | 653 | ||
1144 | spin_lock_bh(&xprt->sock_lock); | 654 | list_del_init(&req->rq_list); |
1145 | if (xprt->snd_task) | 655 | req->rq_received = req->rq_private_buf.len = copied; |
1146 | rpc_wake_up_task(xprt->snd_task); | 656 | rpc_wake_up_task(task); |
1147 | spin_unlock_bh(&xprt->sock_lock); | ||
1148 | out: | ||
1149 | read_unlock(&sk->sk_callback_lock); | ||
1150 | } | 657 | } |
1151 | 658 | ||
1152 | /* | 659 | static void xprt_timer(struct rpc_task *task) |
1153 | * RPC receive timeout handler. | ||
1154 | */ | ||
1155 | static void | ||
1156 | xprt_timer(struct rpc_task *task) | ||
1157 | { | 660 | { |
1158 | struct rpc_rqst *req = task->tk_rqstp; | 661 | struct rpc_rqst *req = task->tk_rqstp; |
1159 | struct rpc_xprt *xprt = req->rq_xprt; | 662 | struct rpc_xprt *xprt = req->rq_xprt; |
1160 | 663 | ||
1161 | spin_lock(&xprt->sock_lock); | 664 | dprintk("RPC: %4d xprt_timer\n", task->tk_pid); |
1162 | if (req->rq_received) | ||
1163 | goto out; | ||
1164 | |||
1165 | xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT); | ||
1166 | __xprt_put_cong(xprt, req); | ||
1167 | 665 | ||
1168 | dprintk("RPC: %4d xprt_timer (%s request)\n", | 666 | spin_lock(&xprt->transport_lock); |
1169 | task->tk_pid, req ? "pending" : "backlogged"); | 667 | if (!req->rq_received) { |
1170 | 668 | if (xprt->ops->timer) | |
1171 | task->tk_status = -ETIMEDOUT; | 669 | xprt->ops->timer(task); |
1172 | out: | 670 | task->tk_status = -ETIMEDOUT; |
671 | } | ||
1173 | task->tk_timeout = 0; | 672 | task->tk_timeout = 0; |
1174 | rpc_wake_up_task(task); | 673 | rpc_wake_up_task(task); |
1175 | spin_unlock(&xprt->sock_lock); | 674 | spin_unlock(&xprt->transport_lock); |
1176 | } | 675 | } |
1177 | 676 | ||
1178 | /* | 677 | /** |
1179 | * Place the actual RPC call. | 678 | * xprt_prepare_transmit - reserve the transport before sending a request |
1180 | * We have to copy the iovec because sendmsg fiddles with its contents. | 679 | * @task: RPC task about to send a request |
680 | * | ||
1181 | */ | 681 | */ |
1182 | int | 682 | int xprt_prepare_transmit(struct rpc_task *task) |
1183 | xprt_prepare_transmit(struct rpc_task *task) | ||
1184 | { | 683 | { |
1185 | struct rpc_rqst *req = task->tk_rqstp; | 684 | struct rpc_rqst *req = task->tk_rqstp; |
1186 | struct rpc_xprt *xprt = req->rq_xprt; | 685 | struct rpc_xprt *xprt = req->rq_xprt; |
@@ -1191,12 +690,12 @@ xprt_prepare_transmit(struct rpc_task *task) | |||
1191 | if (xprt->shutdown) | 690 | if (xprt->shutdown) |
1192 | return -EIO; | 691 | return -EIO; |
1193 | 692 | ||
1194 | spin_lock_bh(&xprt->sock_lock); | 693 | spin_lock_bh(&xprt->transport_lock); |
1195 | if (req->rq_received && !req->rq_bytes_sent) { | 694 | if (req->rq_received && !req->rq_bytes_sent) { |
1196 | err = req->rq_received; | 695 | err = req->rq_received; |
1197 | goto out_unlock; | 696 | goto out_unlock; |
1198 | } | 697 | } |
1199 | if (!__xprt_lock_write(xprt, task)) { | 698 | if (!xprt->ops->reserve_xprt(task)) { |
1200 | err = -EAGAIN; | 699 | err = -EAGAIN; |
1201 | goto out_unlock; | 700 | goto out_unlock; |
1202 | } | 701 | } |
@@ -1206,39 +705,42 @@ xprt_prepare_transmit(struct rpc_task *task) | |||
1206 | goto out_unlock; | 705 | goto out_unlock; |
1207 | } | 706 | } |
1208 | out_unlock: | 707 | out_unlock: |
1209 | spin_unlock_bh(&xprt->sock_lock); | 708 | spin_unlock_bh(&xprt->transport_lock); |
1210 | return err; | 709 | return err; |
1211 | } | 710 | } |
1212 | 711 | ||
1213 | void | 712 | void |
1214 | xprt_transmit(struct rpc_task *task) | 713 | xprt_abort_transmit(struct rpc_task *task) |
714 | { | ||
715 | struct rpc_xprt *xprt = task->tk_xprt; | ||
716 | |||
717 | xprt_release_write(xprt, task); | ||
718 | } | ||
719 | |||
720 | /** | ||
721 | * xprt_transmit - send an RPC request on a transport | ||
722 | * @task: controlling RPC task | ||
723 | * | ||
724 | * We have to copy the iovec because sendmsg fiddles with its contents. | ||
725 | */ | ||
726 | void xprt_transmit(struct rpc_task *task) | ||
1215 | { | 727 | { |
1216 | struct rpc_clnt *clnt = task->tk_client; | ||
1217 | struct rpc_rqst *req = task->tk_rqstp; | 728 | struct rpc_rqst *req = task->tk_rqstp; |
1218 | struct rpc_xprt *xprt = req->rq_xprt; | 729 | struct rpc_xprt *xprt = req->rq_xprt; |
1219 | int status, retry = 0; | 730 | int status; |
1220 | |||
1221 | 731 | ||
1222 | dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); | 732 | dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); |
1223 | 733 | ||
1224 | /* set up everything as needed. */ | ||
1225 | /* Write the record marker */ | ||
1226 | if (xprt->stream) { | ||
1227 | u32 *marker = req->rq_svec[0].iov_base; | ||
1228 | |||
1229 | *marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker))); | ||
1230 | } | ||
1231 | |||
1232 | smp_rmb(); | 734 | smp_rmb(); |
1233 | if (!req->rq_received) { | 735 | if (!req->rq_received) { |
1234 | if (list_empty(&req->rq_list)) { | 736 | if (list_empty(&req->rq_list)) { |
1235 | spin_lock_bh(&xprt->sock_lock); | 737 | spin_lock_bh(&xprt->transport_lock); |
1236 | /* Update the softirq receive buffer */ | 738 | /* Update the softirq receive buffer */ |
1237 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, | 739 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, |
1238 | sizeof(req->rq_private_buf)); | 740 | sizeof(req->rq_private_buf)); |
1239 | /* Add request to the receive list */ | 741 | /* Add request to the receive list */ |
1240 | list_add_tail(&req->rq_list, &xprt->recv); | 742 | list_add_tail(&req->rq_list, &xprt->recv); |
1241 | spin_unlock_bh(&xprt->sock_lock); | 743 | spin_unlock_bh(&xprt->transport_lock); |
1242 | xprt_reset_majortimeo(req); | 744 | xprt_reset_majortimeo(req); |
1243 | /* Turn off autodisconnect */ | 745 | /* Turn off autodisconnect */ |
1244 | del_singleshot_timer_sync(&xprt->timer); | 746 | del_singleshot_timer_sync(&xprt->timer); |
@@ -1246,40 +748,19 @@ xprt_transmit(struct rpc_task *task) | |||
1246 | } else if (!req->rq_bytes_sent) | 748 | } else if (!req->rq_bytes_sent) |
1247 | return; | 749 | return; |
1248 | 750 | ||
1249 | /* Continue transmitting the packet/record. We must be careful | 751 | status = xprt->ops->send_request(task); |
1250 | * to cope with writespace callbacks arriving _after_ we have | 752 | if (status == 0) { |
1251 | * called xprt_sendmsg(). | 753 | dprintk("RPC: %4d xmit complete\n", task->tk_pid); |
1252 | */ | 754 | spin_lock_bh(&xprt->transport_lock); |
1253 | while (1) { | 755 | xprt->ops->set_retrans_timeout(task); |
1254 | req->rq_xtime = jiffies; | 756 | /* Don't race with disconnect */ |
1255 | status = xprt_sendmsg(xprt, req); | 757 | if (!xprt_connected(xprt)) |
1256 | 758 | task->tk_status = -ENOTCONN; | |
1257 | if (status < 0) | 759 | else if (!req->rq_received) |
1258 | break; | 760 | rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); |
1259 | 761 | xprt->ops->release_xprt(xprt, task); | |
1260 | if (xprt->stream) { | 762 | spin_unlock_bh(&xprt->transport_lock); |
1261 | req->rq_bytes_sent += status; | 763 | return; |
1262 | |||
1263 | /* If we've sent the entire packet, immediately | ||
1264 | * reset the count of bytes sent. */ | ||
1265 | if (req->rq_bytes_sent >= req->rq_slen) { | ||
1266 | req->rq_bytes_sent = 0; | ||
1267 | goto out_receive; | ||
1268 | } | ||
1269 | } else { | ||
1270 | if (status >= req->rq_slen) | ||
1271 | goto out_receive; | ||
1272 | status = -EAGAIN; | ||
1273 | break; | ||
1274 | } | ||
1275 | |||
1276 | dprintk("RPC: %4d xmit incomplete (%d left of %d)\n", | ||
1277 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, | ||
1278 | req->rq_slen); | ||
1279 | |||
1280 | status = -EAGAIN; | ||
1281 | if (retry++ > 50) | ||
1282 | break; | ||
1283 | } | 764 | } |
1284 | 765 | ||
1285 | /* Note: at this point, task->tk_sleeping has not yet been set, | 766 | /* Note: at this point, task->tk_sleeping has not yet been set, |
@@ -1289,60 +770,19 @@ xprt_transmit(struct rpc_task *task) | |||
1289 | task->tk_status = status; | 770 | task->tk_status = status; |
1290 | 771 | ||
1291 | switch (status) { | 772 | switch (status) { |
1292 | case -EAGAIN: | ||
1293 | if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { | ||
1294 | /* Protect against races with xprt_write_space */ | ||
1295 | spin_lock_bh(&xprt->sock_lock); | ||
1296 | /* Don't race with disconnect */ | ||
1297 | if (!xprt_connected(xprt)) | ||
1298 | task->tk_status = -ENOTCONN; | ||
1299 | else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) { | ||
1300 | task->tk_timeout = req->rq_timeout; | ||
1301 | rpc_sleep_on(&xprt->pending, task, NULL, NULL); | ||
1302 | } | ||
1303 | spin_unlock_bh(&xprt->sock_lock); | ||
1304 | return; | ||
1305 | } | ||
1306 | /* Keep holding the socket if it is blocked */ | ||
1307 | rpc_delay(task, HZ>>4); | ||
1308 | return; | ||
1309 | case -ECONNREFUSED: | 773 | case -ECONNREFUSED: |
1310 | task->tk_timeout = RPC_REESTABLISH_TIMEOUT; | ||
1311 | rpc_sleep_on(&xprt->sending, task, NULL, NULL); | 774 | rpc_sleep_on(&xprt->sending, task, NULL, NULL); |
775 | case -EAGAIN: | ||
1312 | case -ENOTCONN: | 776 | case -ENOTCONN: |
1313 | return; | 777 | return; |
1314 | default: | 778 | default: |
1315 | if (xprt->stream) | 779 | break; |
1316 | xprt_disconnect(xprt); | ||
1317 | } | 780 | } |
1318 | xprt_release_write(xprt, task); | 781 | xprt_release_write(xprt, task); |
1319 | return; | 782 | return; |
1320 | out_receive: | ||
1321 | dprintk("RPC: %4d xmit complete\n", task->tk_pid); | ||
1322 | /* Set the task's receive timeout value */ | ||
1323 | spin_lock_bh(&xprt->sock_lock); | ||
1324 | if (!xprt->nocong) { | ||
1325 | int timer = task->tk_msg.rpc_proc->p_timer; | ||
1326 | task->tk_timeout = rpc_calc_rto(clnt->cl_rtt, timer); | ||
1327 | task->tk_timeout <<= rpc_ntimeo(clnt->cl_rtt, timer) + req->rq_retries; | ||
1328 | if (task->tk_timeout > xprt->timeout.to_maxval || task->tk_timeout == 0) | ||
1329 | task->tk_timeout = xprt->timeout.to_maxval; | ||
1330 | } else | ||
1331 | task->tk_timeout = req->rq_timeout; | ||
1332 | /* Don't race with disconnect */ | ||
1333 | if (!xprt_connected(xprt)) | ||
1334 | task->tk_status = -ENOTCONN; | ||
1335 | else if (!req->rq_received) | ||
1336 | rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); | ||
1337 | __xprt_release_write(xprt, task); | ||
1338 | spin_unlock_bh(&xprt->sock_lock); | ||
1339 | } | 783 | } |
1340 | 784 | ||
1341 | /* | 785 | static inline void do_xprt_reserve(struct rpc_task *task) |
1342 | * Reserve an RPC call slot. | ||
1343 | */ | ||
1344 | static inline void | ||
1345 | do_xprt_reserve(struct rpc_task *task) | ||
1346 | { | 786 | { |
1347 | struct rpc_xprt *xprt = task->tk_xprt; | 787 | struct rpc_xprt *xprt = task->tk_xprt; |
1348 | 788 | ||
@@ -1362,22 +802,25 @@ do_xprt_reserve(struct rpc_task *task) | |||
1362 | rpc_sleep_on(&xprt->backlog, task, NULL, NULL); | 802 | rpc_sleep_on(&xprt->backlog, task, NULL, NULL); |
1363 | } | 803 | } |
1364 | 804 | ||
1365 | void | 805 | /** |
1366 | xprt_reserve(struct rpc_task *task) | 806 | * xprt_reserve - allocate an RPC request slot |
807 | * @task: RPC task requesting a slot allocation | ||
808 | * | ||
809 | * If no more slots are available, place the task on the transport's | ||
810 | * backlog queue. | ||
811 | */ | ||
812 | void xprt_reserve(struct rpc_task *task) | ||
1367 | { | 813 | { |
1368 | struct rpc_xprt *xprt = task->tk_xprt; | 814 | struct rpc_xprt *xprt = task->tk_xprt; |
1369 | 815 | ||
1370 | task->tk_status = -EIO; | 816 | task->tk_status = -EIO; |
1371 | if (!xprt->shutdown) { | 817 | if (!xprt->shutdown) { |
1372 | spin_lock(&xprt->xprt_lock); | 818 | spin_lock(&xprt->reserve_lock); |
1373 | do_xprt_reserve(task); | 819 | do_xprt_reserve(task); |
1374 | spin_unlock(&xprt->xprt_lock); | 820 | spin_unlock(&xprt->reserve_lock); |
1375 | } | 821 | } |
1376 | } | 822 | } |
1377 | 823 | ||
1378 | /* | ||
1379 | * Allocate a 'unique' XID | ||
1380 | */ | ||
1381 | static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt) | 824 | static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt) |
1382 | { | 825 | { |
1383 | return xprt->xid++; | 826 | return xprt->xid++; |
@@ -1388,11 +831,7 @@ static inline void xprt_init_xid(struct rpc_xprt *xprt) | |||
1388 | get_random_bytes(&xprt->xid, sizeof(xprt->xid)); | 831 | get_random_bytes(&xprt->xid, sizeof(xprt->xid)); |
1389 | } | 832 | } |
1390 | 833 | ||
1391 | /* | 834 | static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) |
1392 | * Initialize RPC request | ||
1393 | */ | ||
1394 | static void | ||
1395 | xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) | ||
1396 | { | 835 | { |
1397 | struct rpc_rqst *req = task->tk_rqstp; | 836 | struct rpc_rqst *req = task->tk_rqstp; |
1398 | 837 | ||
@@ -1400,128 +839,104 @@ xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) | |||
1400 | req->rq_task = task; | 839 | req->rq_task = task; |
1401 | req->rq_xprt = xprt; | 840 | req->rq_xprt = xprt; |
1402 | req->rq_xid = xprt_alloc_xid(xprt); | 841 | req->rq_xid = xprt_alloc_xid(xprt); |
842 | req->rq_release_snd_buf = NULL; | ||
1403 | dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid, | 843 | dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid, |
1404 | req, ntohl(req->rq_xid)); | 844 | req, ntohl(req->rq_xid)); |
1405 | } | 845 | } |
1406 | 846 | ||
1407 | /* | 847 | /** |
1408 | * Release an RPC call slot | 848 | * xprt_release - release an RPC request slot |
849 | * @task: task which is finished with the slot | ||
850 | * | ||
1409 | */ | 851 | */ |
1410 | void | 852 | void xprt_release(struct rpc_task *task) |
1411 | xprt_release(struct rpc_task *task) | ||
1412 | { | 853 | { |
1413 | struct rpc_xprt *xprt = task->tk_xprt; | 854 | struct rpc_xprt *xprt = task->tk_xprt; |
1414 | struct rpc_rqst *req; | 855 | struct rpc_rqst *req; |
1415 | 856 | ||
1416 | if (!(req = task->tk_rqstp)) | 857 | if (!(req = task->tk_rqstp)) |
1417 | return; | 858 | return; |
1418 | spin_lock_bh(&xprt->sock_lock); | 859 | spin_lock_bh(&xprt->transport_lock); |
1419 | __xprt_release_write(xprt, task); | 860 | xprt->ops->release_xprt(xprt, task); |
1420 | __xprt_put_cong(xprt, req); | 861 | if (xprt->ops->release_request) |
862 | xprt->ops->release_request(task); | ||
1421 | if (!list_empty(&req->rq_list)) | 863 | if (!list_empty(&req->rq_list)) |
1422 | list_del(&req->rq_list); | 864 | list_del(&req->rq_list); |
1423 | xprt->last_used = jiffies; | 865 | xprt->last_used = jiffies; |
1424 | if (list_empty(&xprt->recv) && !xprt->shutdown) | 866 | if (list_empty(&xprt->recv) && !xprt->shutdown) |
1425 | mod_timer(&xprt->timer, xprt->last_used + XPRT_IDLE_TIMEOUT); | 867 | mod_timer(&xprt->timer, |
1426 | spin_unlock_bh(&xprt->sock_lock); | 868 | xprt->last_used + xprt->idle_timeout); |
869 | spin_unlock_bh(&xprt->transport_lock); | ||
1427 | task->tk_rqstp = NULL; | 870 | task->tk_rqstp = NULL; |
871 | if (req->rq_release_snd_buf) | ||
872 | req->rq_release_snd_buf(req); | ||
1428 | memset(req, 0, sizeof(*req)); /* mark unused */ | 873 | memset(req, 0, sizeof(*req)); /* mark unused */ |
1429 | 874 | ||
1430 | dprintk("RPC: %4d release request %p\n", task->tk_pid, req); | 875 | dprintk("RPC: %4d release request %p\n", task->tk_pid, req); |
1431 | 876 | ||
1432 | spin_lock(&xprt->xprt_lock); | 877 | spin_lock(&xprt->reserve_lock); |
1433 | list_add(&req->rq_list, &xprt->free); | 878 | list_add(&req->rq_list, &xprt->free); |
1434 | xprt_clear_backlog(xprt); | 879 | rpc_wake_up_next(&xprt->backlog); |
1435 | spin_unlock(&xprt->xprt_lock); | 880 | spin_unlock(&xprt->reserve_lock); |
1436 | } | ||
1437 | |||
1438 | /* | ||
1439 | * Set default timeout parameters | ||
1440 | */ | ||
1441 | static void | ||
1442 | xprt_default_timeout(struct rpc_timeout *to, int proto) | ||
1443 | { | ||
1444 | if (proto == IPPROTO_UDP) | ||
1445 | xprt_set_timeout(to, 5, 5 * HZ); | ||
1446 | else | ||
1447 | xprt_set_timeout(to, 5, 60 * HZ); | ||
1448 | } | 881 | } |
1449 | 882 | ||
1450 | /* | 883 | /** |
1451 | * Set constant timeout | 884 | * xprt_set_timeout - set constant RPC timeout |
885 | * @to: RPC timeout parameters to set up | ||
886 | * @retr: number of retries | ||
887 | * @incr: amount of increase after each retry | ||
888 | * | ||
1452 | */ | 889 | */ |
1453 | void | 890 | void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) |
1454 | xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) | ||
1455 | { | 891 | { |
1456 | to->to_initval = | 892 | to->to_initval = |
1457 | to->to_increment = incr; | 893 | to->to_increment = incr; |
1458 | to->to_maxval = incr * retr; | 894 | to->to_maxval = to->to_initval + (incr * retr); |
1459 | to->to_retries = retr; | 895 | to->to_retries = retr; |
1460 | to->to_exponential = 0; | 896 | to->to_exponential = 0; |
1461 | } | 897 | } |
1462 | 898 | ||
1463 | unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; | 899 | static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) |
1464 | unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; | ||
1465 | |||
1466 | /* | ||
1467 | * Initialize an RPC client | ||
1468 | */ | ||
1469 | static struct rpc_xprt * | ||
1470 | xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) | ||
1471 | { | 900 | { |
901 | int result; | ||
1472 | struct rpc_xprt *xprt; | 902 | struct rpc_xprt *xprt; |
1473 | unsigned int entries; | ||
1474 | size_t slot_table_size; | ||
1475 | struct rpc_rqst *req; | 903 | struct rpc_rqst *req; |
1476 | 904 | ||
1477 | dprintk("RPC: setting up %s transport...\n", | ||
1478 | proto == IPPROTO_UDP? "UDP" : "TCP"); | ||
1479 | |||
1480 | entries = (proto == IPPROTO_TCP)? | ||
1481 | xprt_tcp_slot_table_entries : xprt_udp_slot_table_entries; | ||
1482 | |||
1483 | if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) | 905 | if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) |
1484 | return ERR_PTR(-ENOMEM); | 906 | return ERR_PTR(-ENOMEM); |
1485 | memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */ | 907 | memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */ |
1486 | xprt->max_reqs = entries; | ||
1487 | slot_table_size = entries * sizeof(xprt->slot[0]); | ||
1488 | xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); | ||
1489 | if (xprt->slot == NULL) { | ||
1490 | kfree(xprt); | ||
1491 | return ERR_PTR(-ENOMEM); | ||
1492 | } | ||
1493 | memset(xprt->slot, 0, slot_table_size); | ||
1494 | 908 | ||
1495 | xprt->addr = *ap; | 909 | xprt->addr = *ap; |
1496 | xprt->prot = proto; | 910 | |
1497 | xprt->stream = (proto == IPPROTO_TCP)? 1 : 0; | 911 | switch (proto) { |
1498 | if (xprt->stream) { | 912 | case IPPROTO_UDP: |
1499 | xprt->cwnd = RPC_MAXCWND(xprt); | 913 | result = xs_setup_udp(xprt, to); |
1500 | xprt->nocong = 1; | 914 | break; |
1501 | xprt->max_payload = (1U << 31) - 1; | 915 | case IPPROTO_TCP: |
1502 | } else { | 916 | result = xs_setup_tcp(xprt, to); |
1503 | xprt->cwnd = RPC_INITCWND; | 917 | break; |
1504 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); | 918 | default: |
919 | printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", | ||
920 | proto); | ||
921 | result = -EIO; | ||
922 | break; | ||
923 | } | ||
924 | if (result) { | ||
925 | kfree(xprt); | ||
926 | return ERR_PTR(result); | ||
1505 | } | 927 | } |
1506 | spin_lock_init(&xprt->sock_lock); | 928 | |
1507 | spin_lock_init(&xprt->xprt_lock); | 929 | spin_lock_init(&xprt->transport_lock); |
1508 | init_waitqueue_head(&xprt->cong_wait); | 930 | spin_lock_init(&xprt->reserve_lock); |
1509 | 931 | ||
1510 | INIT_LIST_HEAD(&xprt->free); | 932 | INIT_LIST_HEAD(&xprt->free); |
1511 | INIT_LIST_HEAD(&xprt->recv); | 933 | INIT_LIST_HEAD(&xprt->recv); |
1512 | INIT_WORK(&xprt->sock_connect, xprt_socket_connect, xprt); | 934 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); |
1513 | INIT_WORK(&xprt->task_cleanup, xprt_socket_autoclose, xprt); | ||
1514 | init_timer(&xprt->timer); | 935 | init_timer(&xprt->timer); |
1515 | xprt->timer.function = xprt_init_autodisconnect; | 936 | xprt->timer.function = xprt_init_autodisconnect; |
1516 | xprt->timer.data = (unsigned long) xprt; | 937 | xprt->timer.data = (unsigned long) xprt; |
1517 | xprt->last_used = jiffies; | 938 | xprt->last_used = jiffies; |
1518 | xprt->port = XPRT_MAX_RESVPORT; | 939 | xprt->cwnd = RPC_INITCWND; |
1519 | |||
1520 | /* Set timeout parameters */ | ||
1521 | if (to) { | ||
1522 | xprt->timeout = *to; | ||
1523 | } else | ||
1524 | xprt_default_timeout(&xprt->timeout, xprt->prot); | ||
1525 | 940 | ||
1526 | rpc_init_wait_queue(&xprt->pending, "xprt_pending"); | 941 | rpc_init_wait_queue(&xprt->pending, "xprt_pending"); |
1527 | rpc_init_wait_queue(&xprt->sending, "xprt_sending"); | 942 | rpc_init_wait_queue(&xprt->sending, "xprt_sending"); |
@@ -1529,139 +944,25 @@ xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) | |||
1529 | rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); | 944 | rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); |
1530 | 945 | ||
1531 | /* initialize free list */ | 946 | /* initialize free list */ |
1532 | for (req = &xprt->slot[entries-1]; req >= &xprt->slot[0]; req--) | 947 | for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--) |
1533 | list_add(&req->rq_list, &xprt->free); | 948 | list_add(&req->rq_list, &xprt->free); |
1534 | 949 | ||
1535 | xprt_init_xid(xprt); | 950 | xprt_init_xid(xprt); |
1536 | 951 | ||
1537 | /* Check whether we want to use a reserved port */ | ||
1538 | xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; | ||
1539 | |||
1540 | dprintk("RPC: created transport %p with %u slots\n", xprt, | 952 | dprintk("RPC: created transport %p with %u slots\n", xprt, |
1541 | xprt->max_reqs); | 953 | xprt->max_reqs); |
1542 | 954 | ||
1543 | return xprt; | 955 | return xprt; |
1544 | } | 956 | } |
1545 | 957 | ||
1546 | /* | 958 | /** |
1547 | * Bind to a reserved port | 959 | * xprt_create_proto - create an RPC client transport |
1548 | */ | 960 | * @proto: requested transport protocol |
1549 | static inline int xprt_bindresvport(struct rpc_xprt *xprt, struct socket *sock) | 961 | * @sap: remote peer's address |
1550 | { | 962 | * @to: timeout parameters for new transport |
1551 | struct sockaddr_in myaddr = { | 963 | * |
1552 | .sin_family = AF_INET, | ||
1553 | }; | ||
1554 | int err, port; | ||
1555 | |||
1556 | /* Were we already bound to a given port? Try to reuse it */ | ||
1557 | port = xprt->port; | ||
1558 | do { | ||
1559 | myaddr.sin_port = htons(port); | ||
1560 | err = sock->ops->bind(sock, (struct sockaddr *) &myaddr, | ||
1561 | sizeof(myaddr)); | ||
1562 | if (err == 0) { | ||
1563 | xprt->port = port; | ||
1564 | return 0; | ||
1565 | } | ||
1566 | if (--port == 0) | ||
1567 | port = XPRT_MAX_RESVPORT; | ||
1568 | } while (err == -EADDRINUSE && port != xprt->port); | ||
1569 | |||
1570 | printk("RPC: Can't bind to reserved port (%d).\n", -err); | ||
1571 | return err; | ||
1572 | } | ||
1573 | |||
1574 | static void | ||
1575 | xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock) | ||
1576 | { | ||
1577 | struct sock *sk = sock->sk; | ||
1578 | |||
1579 | if (xprt->inet) | ||
1580 | return; | ||
1581 | |||
1582 | write_lock_bh(&sk->sk_callback_lock); | ||
1583 | sk->sk_user_data = xprt; | ||
1584 | xprt->old_data_ready = sk->sk_data_ready; | ||
1585 | xprt->old_state_change = sk->sk_state_change; | ||
1586 | xprt->old_write_space = sk->sk_write_space; | ||
1587 | if (xprt->prot == IPPROTO_UDP) { | ||
1588 | sk->sk_data_ready = udp_data_ready; | ||
1589 | sk->sk_no_check = UDP_CSUM_NORCV; | ||
1590 | xprt_set_connected(xprt); | ||
1591 | } else { | ||
1592 | tcp_sk(sk)->nonagle = 1; /* disable Nagle's algorithm */ | ||
1593 | sk->sk_data_ready = tcp_data_ready; | ||
1594 | sk->sk_state_change = tcp_state_change; | ||
1595 | xprt_clear_connected(xprt); | ||
1596 | } | ||
1597 | sk->sk_write_space = xprt_write_space; | ||
1598 | |||
1599 | /* Reset to new socket */ | ||
1600 | xprt->sock = sock; | ||
1601 | xprt->inet = sk; | ||
1602 | write_unlock_bh(&sk->sk_callback_lock); | ||
1603 | |||
1604 | return; | ||
1605 | } | ||
1606 | |||
1607 | /* | ||
1608 | * Set socket buffer length | ||
1609 | */ | ||
1610 | void | ||
1611 | xprt_sock_setbufsize(struct rpc_xprt *xprt) | ||
1612 | { | ||
1613 | struct sock *sk = xprt->inet; | ||
1614 | |||
1615 | if (xprt->stream) | ||
1616 | return; | ||
1617 | if (xprt->rcvsize) { | ||
1618 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | ||
1619 | sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; | ||
1620 | } | ||
1621 | if (xprt->sndsize) { | ||
1622 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | ||
1623 | sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2; | ||
1624 | sk->sk_write_space(sk); | ||
1625 | } | ||
1626 | } | ||
1627 | |||
1628 | /* | ||
1629 | * Datastream sockets are created here, but xprt_connect will create | ||
1630 | * and connect stream sockets. | ||
1631 | */ | ||
1632 | static struct socket * xprt_create_socket(struct rpc_xprt *xprt, int proto, int resvport) | ||
1633 | { | ||
1634 | struct socket *sock; | ||
1635 | int type, err; | ||
1636 | |||
1637 | dprintk("RPC: xprt_create_socket(%s %d)\n", | ||
1638 | (proto == IPPROTO_UDP)? "udp" : "tcp", proto); | ||
1639 | |||
1640 | type = (proto == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; | ||
1641 | |||
1642 | if ((err = sock_create_kern(PF_INET, type, proto, &sock)) < 0) { | ||
1643 | printk("RPC: can't create socket (%d).\n", -err); | ||
1644 | return NULL; | ||
1645 | } | ||
1646 | |||
1647 | /* If the caller has the capability, bind to a reserved port */ | ||
1648 | if (resvport && xprt_bindresvport(xprt, sock) < 0) { | ||
1649 | printk("RPC: can't bind to reserved port.\n"); | ||
1650 | goto failed; | ||
1651 | } | ||
1652 | |||
1653 | return sock; | ||
1654 | |||
1655 | failed: | ||
1656 | sock_release(sock); | ||
1657 | return NULL; | ||
1658 | } | ||
1659 | |||
1660 | /* | ||
1661 | * Create an RPC client transport given the protocol and peer address. | ||
1662 | */ | 964 | */ |
1663 | struct rpc_xprt * | 965 | struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to) |
1664 | xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to) | ||
1665 | { | 966 | { |
1666 | struct rpc_xprt *xprt; | 967 | struct rpc_xprt *xprt; |
1667 | 968 | ||
@@ -1673,46 +974,26 @@ xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to) | |||
1673 | return xprt; | 974 | return xprt; |
1674 | } | 975 | } |
1675 | 976 | ||
1676 | /* | 977 | static void xprt_shutdown(struct rpc_xprt *xprt) |
1677 | * Prepare for transport shutdown. | ||
1678 | */ | ||
1679 | static void | ||
1680 | xprt_shutdown(struct rpc_xprt *xprt) | ||
1681 | { | 978 | { |
1682 | xprt->shutdown = 1; | 979 | xprt->shutdown = 1; |
1683 | rpc_wake_up(&xprt->sending); | 980 | rpc_wake_up(&xprt->sending); |
1684 | rpc_wake_up(&xprt->resend); | 981 | rpc_wake_up(&xprt->resend); |
1685 | rpc_wake_up(&xprt->pending); | 982 | xprt_wake_pending_tasks(xprt, -EIO); |
1686 | rpc_wake_up(&xprt->backlog); | 983 | rpc_wake_up(&xprt->backlog); |
1687 | wake_up(&xprt->cong_wait); | ||
1688 | del_timer_sync(&xprt->timer); | 984 | del_timer_sync(&xprt->timer); |
1689 | |||
1690 | /* synchronously wait for connect worker to finish */ | ||
1691 | cancel_delayed_work(&xprt->sock_connect); | ||
1692 | flush_scheduled_work(); | ||
1693 | } | 985 | } |
1694 | 986 | ||
1695 | /* | 987 | /** |
1696 | * Clear the xprt backlog queue | 988 | * xprt_destroy - destroy an RPC transport, killing off all requests. |
1697 | */ | 989 | * @xprt: transport to destroy |
1698 | static int | 990 | * |
1699 | xprt_clear_backlog(struct rpc_xprt *xprt) { | ||
1700 | rpc_wake_up_next(&xprt->backlog); | ||
1701 | wake_up(&xprt->cong_wait); | ||
1702 | return 1; | ||
1703 | } | ||
1704 | |||
1705 | /* | ||
1706 | * Destroy an RPC transport, killing off all requests. | ||
1707 | */ | 991 | */ |
1708 | int | 992 | int xprt_destroy(struct rpc_xprt *xprt) |
1709 | xprt_destroy(struct rpc_xprt *xprt) | ||
1710 | { | 993 | { |
1711 | dprintk("RPC: destroying transport %p\n", xprt); | 994 | dprintk("RPC: destroying transport %p\n", xprt); |
1712 | xprt_shutdown(xprt); | 995 | xprt_shutdown(xprt); |
1713 | xprt_disconnect(xprt); | 996 | xprt->ops->destroy(xprt); |
1714 | xprt_close(xprt); | ||
1715 | kfree(xprt->slot); | ||
1716 | kfree(xprt); | 997 | kfree(xprt); |
1717 | 998 | ||
1718 | return 0; | 999 | return 0; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c new file mode 100644 index 000000000000..2e1529217e65 --- /dev/null +++ b/net/sunrpc/xprtsock.c | |||
@@ -0,0 +1,1252 @@ | |||
1 | /* | ||
2 | * linux/net/sunrpc/xprtsock.c | ||
3 | * | ||
4 | * Client-side transport implementation for sockets. | ||
5 | * | ||
6 | * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com> | ||
7 | * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com> | ||
8 | * TCP NFS related read + write fixes | ||
9 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> | ||
10 | * | ||
11 | * Rewrite of larges part of the code in order to stabilize TCP stuff. | ||
12 | * Fix behaviour when socket buffer is full. | ||
13 | * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> | ||
14 | * | ||
15 | * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com> | ||
16 | */ | ||
17 | |||
18 | #include <linux/types.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/capability.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/pagemap.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/socket.h> | ||
25 | #include <linux/in.h> | ||
26 | #include <linux/net.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/udp.h> | ||
29 | #include <linux/tcp.h> | ||
30 | #include <linux/sunrpc/clnt.h> | ||
31 | #include <linux/file.h> | ||
32 | |||
33 | #include <net/sock.h> | ||
34 | #include <net/checksum.h> | ||
35 | #include <net/udp.h> | ||
36 | #include <net/tcp.h> | ||
37 | |||
38 | /* | ||
39 | * How many times to try sending a request on a socket before waiting | ||
40 | * for the socket buffer to clear. | ||
41 | */ | ||
42 | #define XS_SENDMSG_RETRY (10U) | ||
43 | |||
44 | /* | ||
45 | * Time out for an RPC UDP socket connect. UDP socket connects are | ||
46 | * synchronous, but we set a timeout anyway in case of resource | ||
47 | * exhaustion on the local host. | ||
48 | */ | ||
49 | #define XS_UDP_CONN_TO (5U * HZ) | ||
50 | |||
51 | /* | ||
52 | * Wait duration for an RPC TCP connection to be established. Solaris | ||
53 | * NFS over TCP uses 60 seconds, for example, which is in line with how | ||
54 | * long a server takes to reboot. | ||
55 | */ | ||
56 | #define XS_TCP_CONN_TO (60U * HZ) | ||
57 | |||
58 | /* | ||
59 | * Wait duration for a reply from the RPC portmapper. | ||
60 | */ | ||
61 | #define XS_BIND_TO (60U * HZ) | ||
62 | |||
63 | /* | ||
64 | * Delay if a UDP socket connect error occurs. This is most likely some | ||
65 | * kind of resource problem on the local host. | ||
66 | */ | ||
67 | #define XS_UDP_REEST_TO (2U * HZ) | ||
68 | |||
69 | /* | ||
70 | * The reestablish timeout allows clients to delay for a bit before attempting | ||
71 | * to reconnect to a server that just dropped our connection. | ||
72 | * | ||
73 | * We implement an exponential backoff when trying to reestablish a TCP | ||
74 | * transport connection with the server. Some servers like to drop a TCP | ||
75 | * connection when they are overworked, so we start with a short timeout and | ||
76 | * increase over time if the server is down or not responding. | ||
77 | */ | ||
78 | #define XS_TCP_INIT_REEST_TO (3U * HZ) | ||
79 | #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ) | ||
80 | |||
81 | /* | ||
82 | * TCP idle timeout; client drops the transport socket if it is idle | ||
83 | * for this long. Note that we also timeout UDP sockets to prevent | ||
84 | * holding port numbers when there is no RPC traffic. | ||
85 | */ | ||
86 | #define XS_IDLE_DISC_TO (5U * 60 * HZ) | ||
87 | |||
88 | #ifdef RPC_DEBUG | ||
89 | # undef RPC_DEBUG_DATA | ||
90 | # define RPCDBG_FACILITY RPCDBG_TRANS | ||
91 | #endif | ||
92 | |||
93 | #ifdef RPC_DEBUG_DATA | ||
94 | static void xs_pktdump(char *msg, u32 *packet, unsigned int count) | ||
95 | { | ||
96 | u8 *buf = (u8 *) packet; | ||
97 | int j; | ||
98 | |||
99 | dprintk("RPC: %s\n", msg); | ||
100 | for (j = 0; j < count && j < 128; j += 4) { | ||
101 | if (!(j & 31)) { | ||
102 | if (j) | ||
103 | dprintk("\n"); | ||
104 | dprintk("0x%04x ", j); | ||
105 | } | ||
106 | dprintk("%02x%02x%02x%02x ", | ||
107 | buf[j], buf[j+1], buf[j+2], buf[j+3]); | ||
108 | } | ||
109 | dprintk("\n"); | ||
110 | } | ||
111 | #else | ||
112 | static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) | ||
113 | { | ||
114 | /* NOP */ | ||
115 | } | ||
116 | #endif | ||
117 | |||
118 | #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) | ||
119 | |||
120 | static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len) | ||
121 | { | ||
122 | struct kvec iov = { | ||
123 | .iov_base = xdr->head[0].iov_base + base, | ||
124 | .iov_len = len - base, | ||
125 | }; | ||
126 | struct msghdr msg = { | ||
127 | .msg_name = addr, | ||
128 | .msg_namelen = addrlen, | ||
129 | .msg_flags = XS_SENDMSG_FLAGS, | ||
130 | }; | ||
131 | |||
132 | if (xdr->len > len) | ||
133 | msg.msg_flags |= MSG_MORE; | ||
134 | |||
135 | if (likely(iov.iov_len)) | ||
136 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | ||
137 | return kernel_sendmsg(sock, &msg, NULL, 0, 0); | ||
138 | } | ||
139 | |||
140 | static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len) | ||
141 | { | ||
142 | struct kvec iov = { | ||
143 | .iov_base = xdr->tail[0].iov_base + base, | ||
144 | .iov_len = len - base, | ||
145 | }; | ||
146 | struct msghdr msg = { | ||
147 | .msg_flags = XS_SENDMSG_FLAGS, | ||
148 | }; | ||
149 | |||
150 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | ||
151 | } | ||
152 | |||
153 | /** | ||
154 | * xs_sendpages - write pages directly to a socket | ||
155 | * @sock: socket to send on | ||
156 | * @addr: UDP only -- address of destination | ||
157 | * @addrlen: UDP only -- length of destination address | ||
158 | * @xdr: buffer containing this request | ||
159 | * @base: starting position in the buffer | ||
160 | * | ||
161 | */ | ||
162 | static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) | ||
163 | { | ||
164 | struct page **ppage = xdr->pages; | ||
165 | unsigned int len, pglen = xdr->page_len; | ||
166 | int err, ret = 0; | ||
167 | ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); | ||
168 | |||
169 | if (unlikely(!sock)) | ||
170 | return -ENOTCONN; | ||
171 | |||
172 | clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); | ||
173 | |||
174 | len = xdr->head[0].iov_len; | ||
175 | if (base < len || (addr != NULL && base == 0)) { | ||
176 | err = xs_send_head(sock, addr, addrlen, xdr, base, len); | ||
177 | if (ret == 0) | ||
178 | ret = err; | ||
179 | else if (err > 0) | ||
180 | ret += err; | ||
181 | if (err != (len - base)) | ||
182 | goto out; | ||
183 | base = 0; | ||
184 | } else | ||
185 | base -= len; | ||
186 | |||
187 | if (unlikely(pglen == 0)) | ||
188 | goto copy_tail; | ||
189 | if (unlikely(base >= pglen)) { | ||
190 | base -= pglen; | ||
191 | goto copy_tail; | ||
192 | } | ||
193 | if (base || xdr->page_base) { | ||
194 | pglen -= base; | ||
195 | base += xdr->page_base; | ||
196 | ppage += base >> PAGE_CACHE_SHIFT; | ||
197 | base &= ~PAGE_CACHE_MASK; | ||
198 | } | ||
199 | |||
200 | sendpage = sock->ops->sendpage ? : sock_no_sendpage; | ||
201 | do { | ||
202 | int flags = XS_SENDMSG_FLAGS; | ||
203 | |||
204 | len = PAGE_CACHE_SIZE; | ||
205 | if (base) | ||
206 | len -= base; | ||
207 | if (pglen < len) | ||
208 | len = pglen; | ||
209 | |||
210 | if (pglen != len || xdr->tail[0].iov_len != 0) | ||
211 | flags |= MSG_MORE; | ||
212 | |||
213 | /* Hmm... We might be dealing with highmem pages */ | ||
214 | if (PageHighMem(*ppage)) | ||
215 | sendpage = sock_no_sendpage; | ||
216 | err = sendpage(sock, *ppage, base, len, flags); | ||
217 | if (ret == 0) | ||
218 | ret = err; | ||
219 | else if (err > 0) | ||
220 | ret += err; | ||
221 | if (err != len) | ||
222 | goto out; | ||
223 | base = 0; | ||
224 | ppage++; | ||
225 | } while ((pglen -= len) != 0); | ||
226 | copy_tail: | ||
227 | len = xdr->tail[0].iov_len; | ||
228 | if (base < len) { | ||
229 | err = xs_send_tail(sock, xdr, base, len); | ||
230 | if (ret == 0) | ||
231 | ret = err; | ||
232 | else if (err > 0) | ||
233 | ret += err; | ||
234 | } | ||
235 | out: | ||
236 | return ret; | ||
237 | } | ||
238 | |||
239 | /** | ||
240 | * xs_nospace - place task on wait queue if transmit was incomplete | ||
241 | * @task: task to put to sleep | ||
242 | * | ||
243 | */ | ||
244 | static void xs_nospace(struct rpc_task *task) | ||
245 | { | ||
246 | struct rpc_rqst *req = task->tk_rqstp; | ||
247 | struct rpc_xprt *xprt = req->rq_xprt; | ||
248 | |||
249 | dprintk("RPC: %4d xmit incomplete (%u left of %u)\n", | ||
250 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, | ||
251 | req->rq_slen); | ||
252 | |||
253 | if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { | ||
254 | /* Protect against races with write_space */ | ||
255 | spin_lock_bh(&xprt->transport_lock); | ||
256 | |||
257 | /* Don't race with disconnect */ | ||
258 | if (!xprt_connected(xprt)) | ||
259 | task->tk_status = -ENOTCONN; | ||
260 | else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) | ||
261 | xprt_wait_for_buffer_space(task); | ||
262 | |||
263 | spin_unlock_bh(&xprt->transport_lock); | ||
264 | } else | ||
265 | /* Keep holding the socket if it is blocked */ | ||
266 | rpc_delay(task, HZ>>4); | ||
267 | } | ||
268 | |||
269 | /** | ||
270 | * xs_udp_send_request - write an RPC request to a UDP socket | ||
271 | * @task: address of RPC task that manages the state of an RPC request | ||
272 | * | ||
273 | * Return values: | ||
274 | * 0: The request has been sent | ||
275 | * EAGAIN: The socket was blocked, please call again later to | ||
276 | * complete the request | ||
277 | * ENOTCONN: Caller needs to invoke connect logic then call again | ||
278 | * other: Some other error occured, the request was not sent | ||
279 | */ | ||
280 | static int xs_udp_send_request(struct rpc_task *task) | ||
281 | { | ||
282 | struct rpc_rqst *req = task->tk_rqstp; | ||
283 | struct rpc_xprt *xprt = req->rq_xprt; | ||
284 | struct xdr_buf *xdr = &req->rq_snd_buf; | ||
285 | int status; | ||
286 | |||
287 | xs_pktdump("packet data:", | ||
288 | req->rq_svec->iov_base, | ||
289 | req->rq_svec->iov_len); | ||
290 | |||
291 | req->rq_xtime = jiffies; | ||
292 | status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr, | ||
293 | sizeof(xprt->addr), xdr, req->rq_bytes_sent); | ||
294 | |||
295 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", | ||
296 | xdr->len - req->rq_bytes_sent, status); | ||
297 | |||
298 | if (likely(status >= (int) req->rq_slen)) | ||
299 | return 0; | ||
300 | |||
301 | /* Still some bytes left; set up for a retry later. */ | ||
302 | if (status > 0) | ||
303 | status = -EAGAIN; | ||
304 | |||
305 | switch (status) { | ||
306 | case -ENETUNREACH: | ||
307 | case -EPIPE: | ||
308 | case -ECONNREFUSED: | ||
309 | /* When the server has died, an ICMP port unreachable message | ||
310 | * prompts ECONNREFUSED. */ | ||
311 | break; | ||
312 | case -EAGAIN: | ||
313 | xs_nospace(task); | ||
314 | break; | ||
315 | default: | ||
316 | dprintk("RPC: sendmsg returned unrecognized error %d\n", | ||
317 | -status); | ||
318 | break; | ||
319 | } | ||
320 | |||
321 | return status; | ||
322 | } | ||
323 | |||
324 | static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf) | ||
325 | { | ||
326 | u32 reclen = buf->len - sizeof(rpc_fraghdr); | ||
327 | rpc_fraghdr *base = buf->head[0].iov_base; | ||
328 | *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen); | ||
329 | } | ||
330 | |||
331 | /** | ||
332 | * xs_tcp_send_request - write an RPC request to a TCP socket | ||
333 | * @task: address of RPC task that manages the state of an RPC request | ||
334 | * | ||
335 | * Return values: | ||
336 | * 0: The request has been sent | ||
337 | * EAGAIN: The socket was blocked, please call again later to | ||
338 | * complete the request | ||
339 | * ENOTCONN: Caller needs to invoke connect logic then call again | ||
340 | * other: Some other error occured, the request was not sent | ||
341 | * | ||
342 | * XXX: In the case of soft timeouts, should we eventually give up | ||
343 | * if sendmsg is not able to make progress? | ||
344 | */ | ||
345 | static int xs_tcp_send_request(struct rpc_task *task) | ||
346 | { | ||
347 | struct rpc_rqst *req = task->tk_rqstp; | ||
348 | struct rpc_xprt *xprt = req->rq_xprt; | ||
349 | struct xdr_buf *xdr = &req->rq_snd_buf; | ||
350 | int status, retry = 0; | ||
351 | |||
352 | xs_encode_tcp_record_marker(&req->rq_snd_buf); | ||
353 | |||
354 | xs_pktdump("packet data:", | ||
355 | req->rq_svec->iov_base, | ||
356 | req->rq_svec->iov_len); | ||
357 | |||
358 | /* Continue transmitting the packet/record. We must be careful | ||
359 | * to cope with writespace callbacks arriving _after_ we have | ||
360 | * called sendmsg(). */ | ||
361 | while (1) { | ||
362 | req->rq_xtime = jiffies; | ||
363 | status = xs_sendpages(xprt->sock, NULL, 0, xdr, | ||
364 | req->rq_bytes_sent); | ||
365 | |||
366 | dprintk("RPC: xs_tcp_send_request(%u) = %d\n", | ||
367 | xdr->len - req->rq_bytes_sent, status); | ||
368 | |||
369 | if (unlikely(status < 0)) | ||
370 | break; | ||
371 | |||
372 | /* If we've sent the entire packet, immediately | ||
373 | * reset the count of bytes sent. */ | ||
374 | req->rq_bytes_sent += status; | ||
375 | if (likely(req->rq_bytes_sent >= req->rq_slen)) { | ||
376 | req->rq_bytes_sent = 0; | ||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | status = -EAGAIN; | ||
381 | if (retry++ > XS_SENDMSG_RETRY) | ||
382 | break; | ||
383 | } | ||
384 | |||
385 | switch (status) { | ||
386 | case -EAGAIN: | ||
387 | xs_nospace(task); | ||
388 | break; | ||
389 | case -ECONNREFUSED: | ||
390 | case -ECONNRESET: | ||
391 | case -ENOTCONN: | ||
392 | case -EPIPE: | ||
393 | status = -ENOTCONN; | ||
394 | break; | ||
395 | default: | ||
396 | dprintk("RPC: sendmsg returned unrecognized error %d\n", | ||
397 | -status); | ||
398 | xprt_disconnect(xprt); | ||
399 | break; | ||
400 | } | ||
401 | |||
402 | return status; | ||
403 | } | ||
404 | |||
405 | /** | ||
406 | * xs_close - close a socket | ||
407 | * @xprt: transport | ||
408 | * | ||
409 | * This is used when all requests are complete; ie, no DRC state remains | ||
410 | * on the server we want to save. | ||
411 | */ | ||
412 | static void xs_close(struct rpc_xprt *xprt) | ||
413 | { | ||
414 | struct socket *sock = xprt->sock; | ||
415 | struct sock *sk = xprt->inet; | ||
416 | |||
417 | if (!sk) | ||
418 | return; | ||
419 | |||
420 | dprintk("RPC: xs_close xprt %p\n", xprt); | ||
421 | |||
422 | write_lock_bh(&sk->sk_callback_lock); | ||
423 | xprt->inet = NULL; | ||
424 | xprt->sock = NULL; | ||
425 | |||
426 | sk->sk_user_data = NULL; | ||
427 | sk->sk_data_ready = xprt->old_data_ready; | ||
428 | sk->sk_state_change = xprt->old_state_change; | ||
429 | sk->sk_write_space = xprt->old_write_space; | ||
430 | write_unlock_bh(&sk->sk_callback_lock); | ||
431 | |||
432 | sk->sk_no_check = 0; | ||
433 | |||
434 | sock_release(sock); | ||
435 | } | ||
436 | |||
437 | /** | ||
438 | * xs_destroy - prepare to shutdown a transport | ||
439 | * @xprt: doomed transport | ||
440 | * | ||
441 | */ | ||
442 | static void xs_destroy(struct rpc_xprt *xprt) | ||
443 | { | ||
444 | dprintk("RPC: xs_destroy xprt %p\n", xprt); | ||
445 | |||
446 | cancel_delayed_work(&xprt->connect_worker); | ||
447 | flush_scheduled_work(); | ||
448 | |||
449 | xprt_disconnect(xprt); | ||
450 | xs_close(xprt); | ||
451 | kfree(xprt->slot); | ||
452 | } | ||
453 | |||
454 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) | ||
455 | { | ||
456 | return (struct rpc_xprt *) sk->sk_user_data; | ||
457 | } | ||
458 | |||
459 | /** | ||
460 | * xs_udp_data_ready - "data ready" callback for UDP sockets | ||
461 | * @sk: socket with data to read | ||
462 | * @len: how much data to read | ||
463 | * | ||
464 | */ | ||
465 | static void xs_udp_data_ready(struct sock *sk, int len) | ||
466 | { | ||
467 | struct rpc_task *task; | ||
468 | struct rpc_xprt *xprt; | ||
469 | struct rpc_rqst *rovr; | ||
470 | struct sk_buff *skb; | ||
471 | int err, repsize, copied; | ||
472 | u32 _xid, *xp; | ||
473 | |||
474 | read_lock(&sk->sk_callback_lock); | ||
475 | dprintk("RPC: xs_udp_data_ready...\n"); | ||
476 | if (!(xprt = xprt_from_sock(sk))) | ||
477 | goto out; | ||
478 | |||
479 | if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) | ||
480 | goto out; | ||
481 | |||
482 | if (xprt->shutdown) | ||
483 | goto dropit; | ||
484 | |||
485 | repsize = skb->len - sizeof(struct udphdr); | ||
486 | if (repsize < 4) { | ||
487 | dprintk("RPC: impossible RPC reply size %d!\n", repsize); | ||
488 | goto dropit; | ||
489 | } | ||
490 | |||
491 | /* Copy the XID from the skb... */ | ||
492 | xp = skb_header_pointer(skb, sizeof(struct udphdr), | ||
493 | sizeof(_xid), &_xid); | ||
494 | if (xp == NULL) | ||
495 | goto dropit; | ||
496 | |||
497 | /* Look up and lock the request corresponding to the given XID */ | ||
498 | spin_lock(&xprt->transport_lock); | ||
499 | rovr = xprt_lookup_rqst(xprt, *xp); | ||
500 | if (!rovr) | ||
501 | goto out_unlock; | ||
502 | task = rovr->rq_task; | ||
503 | |||
504 | if ((copied = rovr->rq_private_buf.buflen) > repsize) | ||
505 | copied = repsize; | ||
506 | |||
507 | /* Suck it into the iovec, verify checksum if not done by hw. */ | ||
508 | if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) | ||
509 | goto out_unlock; | ||
510 | |||
511 | /* Something worked... */ | ||
512 | dst_confirm(skb->dst); | ||
513 | |||
514 | xprt_adjust_cwnd(task, copied); | ||
515 | xprt_update_rtt(task); | ||
516 | xprt_complete_rqst(task, copied); | ||
517 | |||
518 | out_unlock: | ||
519 | spin_unlock(&xprt->transport_lock); | ||
520 | dropit: | ||
521 | skb_free_datagram(sk, skb); | ||
522 | out: | ||
523 | read_unlock(&sk->sk_callback_lock); | ||
524 | } | ||
525 | |||
526 | static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len) | ||
527 | { | ||
528 | if (len > desc->count) | ||
529 | len = desc->count; | ||
530 | if (skb_copy_bits(desc->skb, desc->offset, p, len)) { | ||
531 | dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", | ||
532 | len, desc->count); | ||
533 | return 0; | ||
534 | } | ||
535 | desc->offset += len; | ||
536 | desc->count -= len; | ||
537 | dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", | ||
538 | len, desc->count); | ||
539 | return len; | ||
540 | } | ||
541 | |||
542 | static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) | ||
543 | { | ||
544 | size_t len, used; | ||
545 | char *p; | ||
546 | |||
547 | p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; | ||
548 | len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; | ||
549 | used = xs_tcp_copy_data(desc, p, len); | ||
550 | xprt->tcp_offset += used; | ||
551 | if (used != len) | ||
552 | return; | ||
553 | |||
554 | xprt->tcp_reclen = ntohl(xprt->tcp_recm); | ||
555 | if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) | ||
556 | xprt->tcp_flags |= XPRT_LAST_FRAG; | ||
557 | else | ||
558 | xprt->tcp_flags &= ~XPRT_LAST_FRAG; | ||
559 | xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; | ||
560 | |||
561 | xprt->tcp_flags &= ~XPRT_COPY_RECM; | ||
562 | xprt->tcp_offset = 0; | ||
563 | |||
564 | /* Sanity check of the record length */ | ||
565 | if (unlikely(xprt->tcp_reclen < 4)) { | ||
566 | dprintk("RPC: invalid TCP record fragment length\n"); | ||
567 | xprt_disconnect(xprt); | ||
568 | return; | ||
569 | } | ||
570 | dprintk("RPC: reading TCP record fragment of length %d\n", | ||
571 | xprt->tcp_reclen); | ||
572 | } | ||
573 | |||
574 | static void xs_tcp_check_recm(struct rpc_xprt *xprt) | ||
575 | { | ||
576 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", | ||
577 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); | ||
578 | if (xprt->tcp_offset == xprt->tcp_reclen) { | ||
579 | xprt->tcp_flags |= XPRT_COPY_RECM; | ||
580 | xprt->tcp_offset = 0; | ||
581 | if (xprt->tcp_flags & XPRT_LAST_FRAG) { | ||
582 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | ||
583 | xprt->tcp_flags |= XPRT_COPY_XID; | ||
584 | xprt->tcp_copied = 0; | ||
585 | } | ||
586 | } | ||
587 | } | ||
588 | |||
589 | static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) | ||
590 | { | ||
591 | size_t len, used; | ||
592 | char *p; | ||
593 | |||
594 | len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; | ||
595 | dprintk("RPC: reading XID (%Zu bytes)\n", len); | ||
596 | p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; | ||
597 | used = xs_tcp_copy_data(desc, p, len); | ||
598 | xprt->tcp_offset += used; | ||
599 | if (used != len) | ||
600 | return; | ||
601 | xprt->tcp_flags &= ~XPRT_COPY_XID; | ||
602 | xprt->tcp_flags |= XPRT_COPY_DATA; | ||
603 | xprt->tcp_copied = 4; | ||
604 | dprintk("RPC: reading reply for XID %08x\n", | ||
605 | ntohl(xprt->tcp_xid)); | ||
606 | xs_tcp_check_recm(xprt); | ||
607 | } | ||
608 | |||
609 | static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) | ||
610 | { | ||
611 | struct rpc_rqst *req; | ||
612 | struct xdr_buf *rcvbuf; | ||
613 | size_t len; | ||
614 | ssize_t r; | ||
615 | |||
616 | /* Find and lock the request corresponding to this xid */ | ||
617 | spin_lock(&xprt->transport_lock); | ||
618 | req = xprt_lookup_rqst(xprt, xprt->tcp_xid); | ||
619 | if (!req) { | ||
620 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | ||
621 | dprintk("RPC: XID %08x request not found!\n", | ||
622 | ntohl(xprt->tcp_xid)); | ||
623 | spin_unlock(&xprt->transport_lock); | ||
624 | return; | ||
625 | } | ||
626 | |||
627 | rcvbuf = &req->rq_private_buf; | ||
628 | len = desc->count; | ||
629 | if (len > xprt->tcp_reclen - xprt->tcp_offset) { | ||
630 | skb_reader_t my_desc; | ||
631 | |||
632 | len = xprt->tcp_reclen - xprt->tcp_offset; | ||
633 | memcpy(&my_desc, desc, sizeof(my_desc)); | ||
634 | my_desc.count = len; | ||
635 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | ||
636 | &my_desc, xs_tcp_copy_data); | ||
637 | desc->count -= r; | ||
638 | desc->offset += r; | ||
639 | } else | ||
640 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | ||
641 | desc, xs_tcp_copy_data); | ||
642 | |||
643 | if (r > 0) { | ||
644 | xprt->tcp_copied += r; | ||
645 | xprt->tcp_offset += r; | ||
646 | } | ||
647 | if (r != len) { | ||
648 | /* Error when copying to the receive buffer, | ||
649 | * usually because we weren't able to allocate | ||
650 | * additional buffer pages. All we can do now | ||
651 | * is turn off XPRT_COPY_DATA, so the request | ||
652 | * will not receive any additional updates, | ||
653 | * and time out. | ||
654 | * Any remaining data from this record will | ||
655 | * be discarded. | ||
656 | */ | ||
657 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | ||
658 | dprintk("RPC: XID %08x truncated request\n", | ||
659 | ntohl(xprt->tcp_xid)); | ||
660 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | ||
661 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | ||
662 | goto out; | ||
663 | } | ||
664 | |||
665 | dprintk("RPC: XID %08x read %Zd bytes\n", | ||
666 | ntohl(xprt->tcp_xid), r); | ||
667 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | ||
668 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | ||
669 | |||
670 | if (xprt->tcp_copied == req->rq_private_buf.buflen) | ||
671 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | ||
672 | else if (xprt->tcp_offset == xprt->tcp_reclen) { | ||
673 | if (xprt->tcp_flags & XPRT_LAST_FRAG) | ||
674 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | ||
675 | } | ||
676 | |||
677 | out: | ||
678 | if (!(xprt->tcp_flags & XPRT_COPY_DATA)) | ||
679 | xprt_complete_rqst(req->rq_task, xprt->tcp_copied); | ||
680 | spin_unlock(&xprt->transport_lock); | ||
681 | xs_tcp_check_recm(xprt); | ||
682 | } | ||
683 | |||
684 | static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) | ||
685 | { | ||
686 | size_t len; | ||
687 | |||
688 | len = xprt->tcp_reclen - xprt->tcp_offset; | ||
689 | if (len > desc->count) | ||
690 | len = desc->count; | ||
691 | desc->count -= len; | ||
692 | desc->offset += len; | ||
693 | xprt->tcp_offset += len; | ||
694 | dprintk("RPC: discarded %Zu bytes\n", len); | ||
695 | xs_tcp_check_recm(xprt); | ||
696 | } | ||
697 | |||
698 | static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) | ||
699 | { | ||
700 | struct rpc_xprt *xprt = rd_desc->arg.data; | ||
701 | skb_reader_t desc = { | ||
702 | .skb = skb, | ||
703 | .offset = offset, | ||
704 | .count = len, | ||
705 | .csum = 0 | ||
706 | }; | ||
707 | |||
708 | dprintk("RPC: xs_tcp_data_recv started\n"); | ||
709 | do { | ||
710 | /* Read in a new fragment marker if necessary */ | ||
711 | /* Can we ever really expect to get completely empty fragments? */ | ||
712 | if (xprt->tcp_flags & XPRT_COPY_RECM) { | ||
713 | xs_tcp_read_fraghdr(xprt, &desc); | ||
714 | continue; | ||
715 | } | ||
716 | /* Read in the xid if necessary */ | ||
717 | if (xprt->tcp_flags & XPRT_COPY_XID) { | ||
718 | xs_tcp_read_xid(xprt, &desc); | ||
719 | continue; | ||
720 | } | ||
721 | /* Read in the request data */ | ||
722 | if (xprt->tcp_flags & XPRT_COPY_DATA) { | ||
723 | xs_tcp_read_request(xprt, &desc); | ||
724 | continue; | ||
725 | } | ||
726 | /* Skip over any trailing bytes on short reads */ | ||
727 | xs_tcp_read_discard(xprt, &desc); | ||
728 | } while (desc.count); | ||
729 | dprintk("RPC: xs_tcp_data_recv done\n"); | ||
730 | return len - desc.count; | ||
731 | } | ||
732 | |||
733 | /** | ||
734 | * xs_tcp_data_ready - "data ready" callback for TCP sockets | ||
735 | * @sk: socket with data to read | ||
736 | * @bytes: how much data to read | ||
737 | * | ||
738 | */ | ||
739 | static void xs_tcp_data_ready(struct sock *sk, int bytes) | ||
740 | { | ||
741 | struct rpc_xprt *xprt; | ||
742 | read_descriptor_t rd_desc; | ||
743 | |||
744 | read_lock(&sk->sk_callback_lock); | ||
745 | dprintk("RPC: xs_tcp_data_ready...\n"); | ||
746 | if (!(xprt = xprt_from_sock(sk))) | ||
747 | goto out; | ||
748 | if (xprt->shutdown) | ||
749 | goto out; | ||
750 | |||
751 | /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ | ||
752 | rd_desc.arg.data = xprt; | ||
753 | rd_desc.count = 65536; | ||
754 | tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); | ||
755 | out: | ||
756 | read_unlock(&sk->sk_callback_lock); | ||
757 | } | ||
758 | |||
759 | /** | ||
760 | * xs_tcp_state_change - callback to handle TCP socket state changes | ||
761 | * @sk: socket whose state has changed | ||
762 | * | ||
763 | */ | ||
764 | static void xs_tcp_state_change(struct sock *sk) | ||
765 | { | ||
766 | struct rpc_xprt *xprt; | ||
767 | |||
768 | read_lock(&sk->sk_callback_lock); | ||
769 | if (!(xprt = xprt_from_sock(sk))) | ||
770 | goto out; | ||
771 | dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); | ||
772 | dprintk("RPC: state %x conn %d dead %d zapped %d\n", | ||
773 | sk->sk_state, xprt_connected(xprt), | ||
774 | sock_flag(sk, SOCK_DEAD), | ||
775 | sock_flag(sk, SOCK_ZAPPED)); | ||
776 | |||
777 | switch (sk->sk_state) { | ||
778 | case TCP_ESTABLISHED: | ||
779 | spin_lock_bh(&xprt->transport_lock); | ||
780 | if (!xprt_test_and_set_connected(xprt)) { | ||
781 | /* Reset TCP record info */ | ||
782 | xprt->tcp_offset = 0; | ||
783 | xprt->tcp_reclen = 0; | ||
784 | xprt->tcp_copied = 0; | ||
785 | xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; | ||
786 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | ||
787 | xprt_wake_pending_tasks(xprt, 0); | ||
788 | } | ||
789 | spin_unlock_bh(&xprt->transport_lock); | ||
790 | break; | ||
791 | case TCP_SYN_SENT: | ||
792 | case TCP_SYN_RECV: | ||
793 | break; | ||
794 | default: | ||
795 | xprt_disconnect(xprt); | ||
796 | break; | ||
797 | } | ||
798 | out: | ||
799 | read_unlock(&sk->sk_callback_lock); | ||
800 | } | ||
801 | |||
802 | /** | ||
803 | * xs_udp_write_space - callback invoked when socket buffer space | ||
804 | * becomes available | ||
805 | * @sk: socket whose state has changed | ||
806 | * | ||
807 | * Called when more output buffer space is available for this socket. | ||
808 | * We try not to wake our writers until they can make "significant" | ||
809 | * progress, otherwise we'll waste resources thrashing kernel_sendmsg | ||
810 | * with a bunch of small requests. | ||
811 | */ | ||
812 | static void xs_udp_write_space(struct sock *sk) | ||
813 | { | ||
814 | read_lock(&sk->sk_callback_lock); | ||
815 | |||
816 | /* from net/core/sock.c:sock_def_write_space */ | ||
817 | if (sock_writeable(sk)) { | ||
818 | struct socket *sock; | ||
819 | struct rpc_xprt *xprt; | ||
820 | |||
821 | if (unlikely(!(sock = sk->sk_socket))) | ||
822 | goto out; | ||
823 | if (unlikely(!(xprt = xprt_from_sock(sk)))) | ||
824 | goto out; | ||
825 | if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) | ||
826 | goto out; | ||
827 | |||
828 | xprt_write_space(xprt); | ||
829 | } | ||
830 | |||
831 | out: | ||
832 | read_unlock(&sk->sk_callback_lock); | ||
833 | } | ||
834 | |||
835 | /** | ||
836 | * xs_tcp_write_space - callback invoked when socket buffer space | ||
837 | * becomes available | ||
838 | * @sk: socket whose state has changed | ||
839 | * | ||
840 | * Called when more output buffer space is available for this socket. | ||
841 | * We try not to wake our writers until they can make "significant" | ||
842 | * progress, otherwise we'll waste resources thrashing kernel_sendmsg | ||
843 | * with a bunch of small requests. | ||
844 | */ | ||
845 | static void xs_tcp_write_space(struct sock *sk) | ||
846 | { | ||
847 | read_lock(&sk->sk_callback_lock); | ||
848 | |||
849 | /* from net/core/stream.c:sk_stream_write_space */ | ||
850 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { | ||
851 | struct socket *sock; | ||
852 | struct rpc_xprt *xprt; | ||
853 | |||
854 | if (unlikely(!(sock = sk->sk_socket))) | ||
855 | goto out; | ||
856 | if (unlikely(!(xprt = xprt_from_sock(sk)))) | ||
857 | goto out; | ||
858 | if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) | ||
859 | goto out; | ||
860 | |||
861 | xprt_write_space(xprt); | ||
862 | } | ||
863 | |||
864 | out: | ||
865 | read_unlock(&sk->sk_callback_lock); | ||
866 | } | ||
867 | |||
868 | static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) | ||
869 | { | ||
870 | struct sock *sk = xprt->inet; | ||
871 | |||
872 | if (xprt->rcvsize) { | ||
873 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | ||
874 | sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; | ||
875 | } | ||
876 | if (xprt->sndsize) { | ||
877 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | ||
878 | sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2; | ||
879 | sk->sk_write_space(sk); | ||
880 | } | ||
881 | } | ||
882 | |||
883 | /** | ||
884 | * xs_udp_set_buffer_size - set send and receive limits | ||
885 | * @xprt: generic transport | ||
886 | * @sndsize: requested size of send buffer, in bytes | ||
887 | * @rcvsize: requested size of receive buffer, in bytes | ||
888 | * | ||
889 | * Set socket send and receive buffer size limits. | ||
890 | */ | ||
891 | static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) | ||
892 | { | ||
893 | xprt->sndsize = 0; | ||
894 | if (sndsize) | ||
895 | xprt->sndsize = sndsize + 1024; | ||
896 | xprt->rcvsize = 0; | ||
897 | if (rcvsize) | ||
898 | xprt->rcvsize = rcvsize + 1024; | ||
899 | |||
900 | xs_udp_do_set_buffer_size(xprt); | ||
901 | } | ||
902 | |||
903 | /** | ||
904 | * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport | ||
905 | * @task: task that timed out | ||
906 | * | ||
907 | * Adjust the congestion window after a retransmit timeout has occurred. | ||
908 | */ | ||
909 | static void xs_udp_timer(struct rpc_task *task) | ||
910 | { | ||
911 | xprt_adjust_cwnd(task, -ETIMEDOUT); | ||
912 | } | ||
913 | |||
914 | static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) | ||
915 | { | ||
916 | struct sockaddr_in myaddr = { | ||
917 | .sin_family = AF_INET, | ||
918 | }; | ||
919 | int err; | ||
920 | unsigned short port = xprt->port; | ||
921 | |||
922 | do { | ||
923 | myaddr.sin_port = htons(port); | ||
924 | err = sock->ops->bind(sock, (struct sockaddr *) &myaddr, | ||
925 | sizeof(myaddr)); | ||
926 | if (err == 0) { | ||
927 | xprt->port = port; | ||
928 | dprintk("RPC: xs_bindresvport bound to port %u\n", | ||
929 | port); | ||
930 | return 0; | ||
931 | } | ||
932 | if (port <= xprt_min_resvport) | ||
933 | port = xprt_max_resvport; | ||
934 | else | ||
935 | port--; | ||
936 | } while (err == -EADDRINUSE && port != xprt->port); | ||
937 | |||
938 | dprintk("RPC: can't bind to reserved port (%d).\n", -err); | ||
939 | return err; | ||
940 | } | ||
941 | |||
942 | /** | ||
943 | * xs_udp_connect_worker - set up a UDP socket | ||
944 | * @args: RPC transport to connect | ||
945 | * | ||
946 | * Invoked by a work queue tasklet. | ||
947 | */ | ||
948 | static void xs_udp_connect_worker(void *args) | ||
949 | { | ||
950 | struct rpc_xprt *xprt = (struct rpc_xprt *) args; | ||
951 | struct socket *sock = xprt->sock; | ||
952 | int err, status = -EIO; | ||
953 | |||
954 | if (xprt->shutdown || xprt->addr.sin_port == 0) | ||
955 | goto out; | ||
956 | |||
957 | dprintk("RPC: xs_udp_connect_worker for xprt %p\n", xprt); | ||
958 | |||
959 | /* Start by resetting any existing state */ | ||
960 | xs_close(xprt); | ||
961 | |||
962 | if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) { | ||
963 | dprintk("RPC: can't create UDP transport socket (%d).\n", -err); | ||
964 | goto out; | ||
965 | } | ||
966 | |||
967 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { | ||
968 | sock_release(sock); | ||
969 | goto out; | ||
970 | } | ||
971 | |||
972 | if (!xprt->inet) { | ||
973 | struct sock *sk = sock->sk; | ||
974 | |||
975 | write_lock_bh(&sk->sk_callback_lock); | ||
976 | |||
977 | sk->sk_user_data = xprt; | ||
978 | xprt->old_data_ready = sk->sk_data_ready; | ||
979 | xprt->old_state_change = sk->sk_state_change; | ||
980 | xprt->old_write_space = sk->sk_write_space; | ||
981 | sk->sk_data_ready = xs_udp_data_ready; | ||
982 | sk->sk_write_space = xs_udp_write_space; | ||
983 | sk->sk_no_check = UDP_CSUM_NORCV; | ||
984 | |||
985 | xprt_set_connected(xprt); | ||
986 | |||
987 | /* Reset to new socket */ | ||
988 | xprt->sock = sock; | ||
989 | xprt->inet = sk; | ||
990 | |||
991 | write_unlock_bh(&sk->sk_callback_lock); | ||
992 | } | ||
993 | xs_udp_do_set_buffer_size(xprt); | ||
994 | status = 0; | ||
995 | out: | ||
996 | xprt_wake_pending_tasks(xprt, status); | ||
997 | xprt_clear_connecting(xprt); | ||
998 | } | ||
999 | |||
1000 | /* | ||
1001 | * We need to preserve the port number so the reply cache on the server can | ||
1002 | * find our cached RPC replies when we get around to reconnecting. | ||
1003 | */ | ||
1004 | static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | ||
1005 | { | ||
1006 | int result; | ||
1007 | struct socket *sock = xprt->sock; | ||
1008 | struct sockaddr any; | ||
1009 | |||
1010 | dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); | ||
1011 | |||
1012 | /* | ||
1013 | * Disconnect the transport socket by doing a connect operation | ||
1014 | * with AF_UNSPEC. This should return immediately... | ||
1015 | */ | ||
1016 | memset(&any, 0, sizeof(any)); | ||
1017 | any.sa_family = AF_UNSPEC; | ||
1018 | result = sock->ops->connect(sock, &any, sizeof(any), 0); | ||
1019 | if (result) | ||
1020 | dprintk("RPC: AF_UNSPEC connect return code %d\n", | ||
1021 | result); | ||
1022 | } | ||
1023 | |||
1024 | /** | ||
1025 | * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint | ||
1026 | * @args: RPC transport to connect | ||
1027 | * | ||
1028 | * Invoked by a work queue tasklet. | ||
1029 | */ | ||
1030 | static void xs_tcp_connect_worker(void *args) | ||
1031 | { | ||
1032 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; | ||
1033 | struct socket *sock = xprt->sock; | ||
1034 | int err, status = -EIO; | ||
1035 | |||
1036 | if (xprt->shutdown || xprt->addr.sin_port == 0) | ||
1037 | goto out; | ||
1038 | |||
1039 | dprintk("RPC: xs_tcp_connect_worker for xprt %p\n", xprt); | ||
1040 | |||
1041 | if (!xprt->sock) { | ||
1042 | /* start from scratch */ | ||
1043 | if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { | ||
1044 | dprintk("RPC: can't create TCP transport socket (%d).\n", -err); | ||
1045 | goto out; | ||
1046 | } | ||
1047 | |||
1048 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { | ||
1049 | sock_release(sock); | ||
1050 | goto out; | ||
1051 | } | ||
1052 | } else | ||
1053 | /* "close" the socket, preserving the local port */ | ||
1054 | xs_tcp_reuse_connection(xprt); | ||
1055 | |||
1056 | if (!xprt->inet) { | ||
1057 | struct sock *sk = sock->sk; | ||
1058 | |||
1059 | write_lock_bh(&sk->sk_callback_lock); | ||
1060 | |||
1061 | sk->sk_user_data = xprt; | ||
1062 | xprt->old_data_ready = sk->sk_data_ready; | ||
1063 | xprt->old_state_change = sk->sk_state_change; | ||
1064 | xprt->old_write_space = sk->sk_write_space; | ||
1065 | sk->sk_data_ready = xs_tcp_data_ready; | ||
1066 | sk->sk_state_change = xs_tcp_state_change; | ||
1067 | sk->sk_write_space = xs_tcp_write_space; | ||
1068 | |||
1069 | /* socket options */ | ||
1070 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; | ||
1071 | sock_reset_flag(sk, SOCK_LINGER); | ||
1072 | tcp_sk(sk)->linger2 = 0; | ||
1073 | tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; | ||
1074 | |||
1075 | xprt_clear_connected(xprt); | ||
1076 | |||
1077 | /* Reset to new socket */ | ||
1078 | xprt->sock = sock; | ||
1079 | xprt->inet = sk; | ||
1080 | |||
1081 | write_unlock_bh(&sk->sk_callback_lock); | ||
1082 | } | ||
1083 | |||
1084 | /* Tell the socket layer to start connecting... */ | ||
1085 | status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, | ||
1086 | sizeof(xprt->addr), O_NONBLOCK); | ||
1087 | dprintk("RPC: %p connect status %d connected %d sock state %d\n", | ||
1088 | xprt, -status, xprt_connected(xprt), sock->sk->sk_state); | ||
1089 | if (status < 0) { | ||
1090 | switch (status) { | ||
1091 | case -EINPROGRESS: | ||
1092 | case -EALREADY: | ||
1093 | goto out_clear; | ||
1094 | case -ECONNREFUSED: | ||
1095 | case -ECONNRESET: | ||
1096 | /* retry with existing socket, after a delay */ | ||
1097 | break; | ||
1098 | default: | ||
1099 | /* get rid of existing socket, and retry */ | ||
1100 | xs_close(xprt); | ||
1101 | break; | ||
1102 | } | ||
1103 | } | ||
1104 | out: | ||
1105 | xprt_wake_pending_tasks(xprt, status); | ||
1106 | out_clear: | ||
1107 | xprt_clear_connecting(xprt); | ||
1108 | } | ||
1109 | |||
1110 | /** | ||
1111 | * xs_connect - connect a socket to a remote endpoint | ||
1112 | * @task: address of RPC task that manages state of connect request | ||
1113 | * | ||
1114 | * TCP: If the remote end dropped the connection, delay reconnecting. | ||
1115 | * | ||
1116 | * UDP socket connects are synchronous, but we use a work queue anyway | ||
1117 | * to guarantee that even unprivileged user processes can set up a | ||
1118 | * socket on a privileged port. | ||
1119 | * | ||
1120 | * If a UDP socket connect fails, the delay behavior here prevents | ||
1121 | * retry floods (hard mounts). | ||
1122 | */ | ||
1123 | static void xs_connect(struct rpc_task *task) | ||
1124 | { | ||
1125 | struct rpc_xprt *xprt = task->tk_xprt; | ||
1126 | |||
1127 | if (xprt_test_and_set_connecting(xprt)) | ||
1128 | return; | ||
1129 | |||
1130 | if (xprt->sock != NULL) { | ||
1131 | dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n", | ||
1132 | xprt, xprt->reestablish_timeout / HZ); | ||
1133 | schedule_delayed_work(&xprt->connect_worker, | ||
1134 | xprt->reestablish_timeout); | ||
1135 | xprt->reestablish_timeout <<= 1; | ||
1136 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) | ||
1137 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | ||
1138 | } else { | ||
1139 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | ||
1140 | schedule_work(&xprt->connect_worker); | ||
1141 | |||
1142 | /* flush_scheduled_work can sleep... */ | ||
1143 | if (!RPC_IS_ASYNC(task)) | ||
1144 | flush_scheduled_work(); | ||
1145 | } | ||
1146 | } | ||
1147 | |||
1148 | static struct rpc_xprt_ops xs_udp_ops = { | ||
1149 | .set_buffer_size = xs_udp_set_buffer_size, | ||
1150 | .reserve_xprt = xprt_reserve_xprt_cong, | ||
1151 | .release_xprt = xprt_release_xprt_cong, | ||
1152 | .connect = xs_connect, | ||
1153 | .send_request = xs_udp_send_request, | ||
1154 | .set_retrans_timeout = xprt_set_retrans_timeout_rtt, | ||
1155 | .timer = xs_udp_timer, | ||
1156 | .release_request = xprt_release_rqst_cong, | ||
1157 | .close = xs_close, | ||
1158 | .destroy = xs_destroy, | ||
1159 | }; | ||
1160 | |||
1161 | static struct rpc_xprt_ops xs_tcp_ops = { | ||
1162 | .reserve_xprt = xprt_reserve_xprt, | ||
1163 | .release_xprt = xprt_release_xprt, | ||
1164 | .connect = xs_connect, | ||
1165 | .send_request = xs_tcp_send_request, | ||
1166 | .set_retrans_timeout = xprt_set_retrans_timeout_def, | ||
1167 | .close = xs_close, | ||
1168 | .destroy = xs_destroy, | ||
1169 | }; | ||
1170 | |||
1171 | /** | ||
1172 | * xs_setup_udp - Set up transport to use a UDP socket | ||
1173 | * @xprt: transport to set up | ||
1174 | * @to: timeout parameters | ||
1175 | * | ||
1176 | */ | ||
1177 | int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) | ||
1178 | { | ||
1179 | size_t slot_table_size; | ||
1180 | |||
1181 | dprintk("RPC: setting up udp-ipv4 transport...\n"); | ||
1182 | |||
1183 | xprt->max_reqs = xprt_udp_slot_table_entries; | ||
1184 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | ||
1185 | xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); | ||
1186 | if (xprt->slot == NULL) | ||
1187 | return -ENOMEM; | ||
1188 | memset(xprt->slot, 0, slot_table_size); | ||
1189 | |||
1190 | xprt->prot = IPPROTO_UDP; | ||
1191 | xprt->port = xprt_max_resvport; | ||
1192 | xprt->tsh_size = 0; | ||
1193 | xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; | ||
1194 | /* XXX: header size can vary due to auth type, IPv6, etc. */ | ||
1195 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); | ||
1196 | |||
1197 | INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); | ||
1198 | xprt->bind_timeout = XS_BIND_TO; | ||
1199 | xprt->connect_timeout = XS_UDP_CONN_TO; | ||
1200 | xprt->reestablish_timeout = XS_UDP_REEST_TO; | ||
1201 | xprt->idle_timeout = XS_IDLE_DISC_TO; | ||
1202 | |||
1203 | xprt->ops = &xs_udp_ops; | ||
1204 | |||
1205 | if (to) | ||
1206 | xprt->timeout = *to; | ||
1207 | else | ||
1208 | xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); | ||
1209 | |||
1210 | return 0; | ||
1211 | } | ||
1212 | |||
1213 | /** | ||
1214 | * xs_setup_tcp - Set up transport to use a TCP socket | ||
1215 | * @xprt: transport to set up | ||
1216 | * @to: timeout parameters | ||
1217 | * | ||
1218 | */ | ||
1219 | int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) | ||
1220 | { | ||
1221 | size_t slot_table_size; | ||
1222 | |||
1223 | dprintk("RPC: setting up tcp-ipv4 transport...\n"); | ||
1224 | |||
1225 | xprt->max_reqs = xprt_tcp_slot_table_entries; | ||
1226 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | ||
1227 | xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); | ||
1228 | if (xprt->slot == NULL) | ||
1229 | return -ENOMEM; | ||
1230 | memset(xprt->slot, 0, slot_table_size); | ||
1231 | |||
1232 | xprt->prot = IPPROTO_TCP; | ||
1233 | xprt->port = xprt_max_resvport; | ||
1234 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); | ||
1235 | xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; | ||
1236 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; | ||
1237 | |||
1238 | INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); | ||
1239 | xprt->bind_timeout = XS_BIND_TO; | ||
1240 | xprt->connect_timeout = XS_TCP_CONN_TO; | ||
1241 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | ||
1242 | xprt->idle_timeout = XS_IDLE_DISC_TO; | ||
1243 | |||
1244 | xprt->ops = &xs_tcp_ops; | ||
1245 | |||
1246 | if (to) | ||
1247 | xprt->timeout = *to; | ||
1248 | else | ||
1249 | xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); | ||
1250 | |||
1251 | return 0; | ||
1252 | } | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index cbb0ba34a600..0db9e57013fd 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1192,46 +1192,6 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family) | |||
1192 | 1192 | ||
1193 | EXPORT_SYMBOL(xfrm_bundle_ok); | 1193 | EXPORT_SYMBOL(xfrm_bundle_ok); |
1194 | 1194 | ||
1195 | /* Well... that's _TASK_. We need to scan through transformation | ||
1196 | * list and figure out what mss tcp should generate in order to | ||
1197 | * final datagram fit to mtu. Mama mia... :-) | ||
1198 | * | ||
1199 | * Apparently, some easy way exists, but we used to choose the most | ||
1200 | * bizarre ones. :-) So, raising Kalashnikov... tra-ta-ta. | ||
1201 | * | ||
1202 | * Consider this function as something like dark humour. :-) | ||
1203 | */ | ||
1204 | static int xfrm_get_mss(struct dst_entry *dst, u32 mtu) | ||
1205 | { | ||
1206 | int res = mtu - dst->header_len; | ||
1207 | |||
1208 | for (;;) { | ||
1209 | struct dst_entry *d = dst; | ||
1210 | int m = res; | ||
1211 | |||
1212 | do { | ||
1213 | struct xfrm_state *x = d->xfrm; | ||
1214 | if (x) { | ||
1215 | spin_lock_bh(&x->lock); | ||
1216 | if (x->km.state == XFRM_STATE_VALID && | ||
1217 | x->type && x->type->get_max_size) | ||
1218 | m = x->type->get_max_size(d->xfrm, m); | ||
1219 | else | ||
1220 | m += x->props.header_len; | ||
1221 | spin_unlock_bh(&x->lock); | ||
1222 | } | ||
1223 | } while ((d = d->child) != NULL); | ||
1224 | |||
1225 | if (m <= mtu) | ||
1226 | break; | ||
1227 | res -= (m - mtu); | ||
1228 | if (res < 88) | ||
1229 | return mtu; | ||
1230 | } | ||
1231 | |||
1232 | return res + dst->header_len; | ||
1233 | } | ||
1234 | |||
1235 | int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) | 1195 | int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) |
1236 | { | 1196 | { |
1237 | int err = 0; | 1197 | int err = 0; |
@@ -1252,8 +1212,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) | |||
1252 | dst_ops->negative_advice = xfrm_negative_advice; | 1212 | dst_ops->negative_advice = xfrm_negative_advice; |
1253 | if (likely(dst_ops->link_failure == NULL)) | 1213 | if (likely(dst_ops->link_failure == NULL)) |
1254 | dst_ops->link_failure = xfrm_link_failure; | 1214 | dst_ops->link_failure = xfrm_link_failure; |
1255 | if (likely(dst_ops->get_mss == NULL)) | ||
1256 | dst_ops->get_mss = xfrm_get_mss; | ||
1257 | if (likely(afinfo->garbage_collect == NULL)) | 1215 | if (likely(afinfo->garbage_collect == NULL)) |
1258 | afinfo->garbage_collect = __xfrm_garbage_collect; | 1216 | afinfo->garbage_collect = __xfrm_garbage_collect; |
1259 | xfrm_policy_afinfo[afinfo->family] = afinfo; | 1217 | xfrm_policy_afinfo[afinfo->family] = afinfo; |
@@ -1281,7 +1239,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) | |||
1281 | dst_ops->check = NULL; | 1239 | dst_ops->check = NULL; |
1282 | dst_ops->negative_advice = NULL; | 1240 | dst_ops->negative_advice = NULL; |
1283 | dst_ops->link_failure = NULL; | 1241 | dst_ops->link_failure = NULL; |
1284 | dst_ops->get_mss = NULL; | ||
1285 | afinfo->garbage_collect = NULL; | 1242 | afinfo->garbage_collect = NULL; |
1286 | } | 1243 | } |
1287 | } | 1244 | } |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 9d206c282cf1..8b9a4747417d 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -1026,6 +1026,12 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x) | |||
1026 | } | 1026 | } |
1027 | EXPORT_SYMBOL(xfrm_state_delete_tunnel); | 1027 | EXPORT_SYMBOL(xfrm_state_delete_tunnel); |
1028 | 1028 | ||
1029 | /* | ||
1030 | * This function is NOT optimal. For example, with ESP it will give an | ||
1031 | * MTU that's usually two bytes short of being optimal. However, it will | ||
1032 | * usually give an answer that's a multiple of 4 provided the input is | ||
1033 | * also a multiple of 4. | ||
1034 | */ | ||
1029 | int xfrm_state_mtu(struct xfrm_state *x, int mtu) | 1035 | int xfrm_state_mtu(struct xfrm_state *x, int mtu) |
1030 | { | 1036 | { |
1031 | int res = mtu; | 1037 | int res = mtu; |